From: Linus Torvalds Date: Sat, 15 Jan 2011 20:33:40 +0000 (-0800) Subject: Merge branch 'devel-stable' of master.kernel.org:/home/rmk/linux-2.6-arm X-Git-Tag: v2.6.38-rc1~38 X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=commitdiff_plain;h=16c1020362083b320868c0deef492249089c3cd3;hp=bbba75606963c82febf7bd2761ea848ac5d1a1bb Merge branch 'devel-stable' of /home/rmk/linux-2.6-arm * 'devel-stable' of master.kernel.org:/home/rmk/linux-2.6-arm: (161 commits) ARM: pxa: fix building issue of missing physmap.h ARM: mmp: PXA910 drive strength FAST using wrong value ARM: mmp: MMP2 drive strength FAST using wrong value ARM: pxa: fix recursive calls in pxa_low_gpio_chip AT91: Support for gsia18s board AT91: Acme Systems FOX Board G20 board files AT91: board-sam9m10g45ek.c: Remove duplicate inclusion of mach/hardware.h ARM: pxa: fix suspend/resume array index miscalculation ARM: pxa: use cpu_has_ipr() consistently in irq.c ARM: pxa: remove unused variable in clock-pxa3xx.c ARM: pxa: fix warning in zeus.c ARM: sa1111: fix typo in sa1111_retrigger_lowirq() ARM mxs: clkdev related compile fixes ARM i.MX mx31_3ds: Fix MC13783 regulator names ARM: plat-stmp3xxx: irq_data conversion. ARM: plat-spear: irq_data conversion. ARM: plat-orion: irq_data conversion. ARM: plat-omap: irq_data conversion. ARM: plat-nomadik: irq_data conversion. ARM: plat-mxc: irq_data conversion. ... Fix up trivial conflict in arch/arm/plat-omap/gpio.c (Lennert Buytenhek's irq_data conversion clashing with some omap irq updates) --- diff --git a/CREDITS b/CREDITS index 494b6e4746d7..1d39a6d0a510 100644 --- a/CREDITS +++ b/CREDITS @@ -2811,8 +2811,8 @@ D: CDROM driver "sonycd535" (Sony CDU-535/531) N: Stelian Pop E: stelian@popies.net P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0 D3F7 7185 9E7A EDBB 6147 -D: sonypi, meye drivers, mct_u232 usb serial hacks -S: Paris, France +D: random kernel hacks +S: Paimpont, France N: Pete Popov E: pete_popov@yahoo.com diff --git a/Documentation/ABI/stable/thermal-notification b/Documentation/ABI/stable/thermal-notification new file mode 100644 index 000000000000..9723e8b7aeb3 --- /dev/null +++ b/Documentation/ABI/stable/thermal-notification @@ -0,0 +1,4 @@ +What: A notification mechanism for thermal related events +Description: + This interface enables notification for thermal related events. + The notification is in the form of a netlink event. diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led index 9e4541d71cb6..edff6630c805 100644 --- a/Documentation/ABI/testing/sysfs-class-led +++ b/Documentation/ABI/testing/sysfs-class-led @@ -26,3 +26,12 @@ Description: scheduler is chosen. Trigger specific parameters can appear in /sys/class/leds/ once a given trigger is selected. +What: /sys/class/leds//inverted +Date: January 2011 +KernelVersion: 2.6.38 +Contact: Richard Purdie +Description: + Invert the LED on/off state. This parameter is specific to + gpio and backlight triggers. In case of the backlight trigger, + it is usefull when driving a LED which is intended to indicate + a device in a standby like state. diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index 03641a08e275..8906648f962b 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -268,10 +268,6 @@ !Finclude/net/mac80211.h ieee80211_ops !Finclude/net/mac80211.h ieee80211_alloc_hw !Finclude/net/mac80211.h ieee80211_register_hw -!Finclude/net/mac80211.h ieee80211_get_tx_led_name -!Finclude/net/mac80211.h ieee80211_get_rx_led_name -!Finclude/net/mac80211.h ieee80211_get_assoc_led_name -!Finclude/net/mac80211.h ieee80211_get_radio_led_name !Finclude/net/mac80211.h ieee80211_unregister_hw !Finclude/net/mac80211.h ieee80211_free_hw @@ -382,6 +378,23 @@ + + LED support + + Mac80211 supports various ways of blinking LEDs. Wherever possible, + device LEDs should be exposed as LED class devices and hooked up to + the appropriate trigger, which will then be triggered appropriately + by mac80211. + +!Finclude/net/mac80211.h ieee80211_get_tx_led_name +!Finclude/net/mac80211.h ieee80211_get_rx_led_name +!Finclude/net/mac80211.h ieee80211_get_assoc_led_name +!Finclude/net/mac80211.h ieee80211_get_radio_led_name +!Finclude/net/mac80211.h ieee80211_tpt_blink +!Finclude/net/mac80211.h ieee80211_tpt_led_trigger_flags +!Finclude/net/mac80211.h ieee80211_create_tpt_led_trigger + + Hardware crypto acceleration !Pinclude/net/mac80211.h Hardware crypto acceleration diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl index 020ac80d4682..620eb3f6a90a 100644 --- a/Documentation/DocBook/mtdnand.tmpl +++ b/Documentation/DocBook/mtdnand.tmpl @@ -250,7 +250,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd) Device ready function If the hardware interface has the ready busy pin of the NAND chip connected to a - GPIO or other accesible I/O pin, this function is used to read back the state of the + GPIO or other accessible I/O pin, this function is used to read back the state of the pin. The function has no arguments and should return 0, if the device is busy (R/B pin is low) and 1, if the device is ready (R/B pin is high). If the hardware interface does not give access to the ready busy pin, then diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index 69dd29ed824e..b2bea15137d2 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt @@ -533,6 +533,33 @@ completion during sending a panic event. Other Pieces ------------ +Get the detailed info related with the IPMI device +-------------------------------------------------- + +Some users need more detailed information about a device, like where +the address came from or the raw base device for the IPMI interface. +You can use the IPMI smi_watcher to catch the IPMI interfaces as they +come or go, and to grab the information, you can use the function +ipmi_get_smi_info(), which returns the following structure: + +struct ipmi_smi_info { + enum ipmi_addr_src addr_src; + struct device *dev; + union { + struct { + void *acpi_handle; + } acpi_info; + } addr_info; +}; + +Currently special info for only for SI_ACPI address sources is +returned. Others may be added as necessary. + +Note that the dev pointer is included in the above structure, and +assuming ipmi_smi_get_info returns success, you must call put_device +on the dev pointer. + + Watchdog -------- diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt new file mode 100644 index 000000000000..9146952c612a --- /dev/null +++ b/Documentation/acpi/apei/output_format.txt @@ -0,0 +1,122 @@ + APEI output format + ~~~~~~~~~~~~~~~~~~ + +APEI uses printk as hardware error reporting interface, the output +format is as follow. + + := +APEI generic hardware error status +severity: , +section: , severity: , +flags: +
+fru_id: +fru_text: +section_type:
+
+ +* := recoverable | fatal | corrected | info + +
# := +[primary][, containment warning][, reset][, threshold exceeded]\ +[, resource not accessible][, latent error] + +
:= generic processor error | memory error | \ +PCIe error | unknown, + +
:= + | | \ + | + + := +[processor_type: , ] +[processor_isa: , ] +[error_type: +] +[operation: , ] +[flags: +] +[level: ] +[version_info: ] +[processor_id: ] +[target_address: ] +[requestor_id: ] +[responder_id: ] +[IP: ] + +* := IA32/X64 | IA64 + +* := IA32 | IA64 | X64 + +# := +[cache error][, TLB error][, bus error][, micro-architectural error] + +* := unknown or generic | data read | data write | \ +instruction execution + +# := +[restartable][, precise IP][, overflow][, corrected] + + := +[error_status: ] +[physical_address: ] +[physical_address_mask: ] +[node: ] +[card: ] +[module: ] +[bank: ] +[device: ] +[row: ] +[column: ] +[bit_position: ] +[requestor_id: ] +[responder_id: ] +[target_id: ] +[error_type: , ] + +* := +unknown | no error | single-bit ECC | multi-bit ECC | \ +single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \ +target abort | parity error | watchdog timeout | invalid address | \ +mirror Broken | memory sparing | scrub corrected error | \ +scrub uncorrected error + + := +[port_type: , ] +[version: .] +[command: , status: ] +[device_id: ::. +slot: +secondary_bus: +vendor_id: , device_id: +class_code: ] +[serial number: , ] +[bridge: secondary_status: , control: ] + +* := PCIe end point | legacy PCI end point | \ +unknown | unknown | root port | upstream switch port | \ +downstream switch port | PCIe to PCI/PCI-X bridge | \ +PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \ +root complex event collector + +Where, [] designate corresponding content is optional + +All description with * has the following format: + +field: , + +Where value of should be the position of "string" in description. Otherwise, will be "unknown". + +All description with # has the following format: + +field: + + +Where each string in corresponding to one set bit of +. The bit position is the position of "string" in description. + +For more detailed explanation of every field, please refer to UEFI +specification version 2.3 or later, section Appendix N: Common +Platform Error Record. diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt index d6da611f8f63..4ed7b5ceeed2 100644 --- a/Documentation/cgroups/blkio-controller.txt +++ b/Documentation/cgroups/blkio-controller.txt @@ -89,6 +89,33 @@ Throttling/Upper Limit policy Limits for writes can be put using blkio.write_bps_device file. +Hierarchical Cgroups +==================== +- Currently none of the IO control policy supports hierarhical groups. But + cgroup interface does allow creation of hierarhical cgroups and internally + IO policies treat them as flat hierarchy. + + So this patch will allow creation of cgroup hierarhcy but at the backend + everything will be treated as flat. So if somebody created a hierarchy like + as follows. + + root + / \ + test1 test2 + | + test3 + + CFQ and throttling will practically treat all groups at same level. + + pivot + / | \ \ + root test1 test2 test3 + + Down the line we can implement hierarchical accounting/control support + and also introduce a new cgroup file "use_hierarchy" which will control + whether cgroup hierarchy is viewed as flat or hierarchical by the policy.. + This is how memory controller also has implemented the things. + Various user visible config options =================================== CONFIG_BLK_CGROUP diff --git a/Documentation/cgroups/cgroup_event_listener.c b/Documentation/cgroups/cgroup_event_listener.c index 8c2bfc4a6358..3e082f96dc12 100644 --- a/Documentation/cgroups/cgroup_event_listener.c +++ b/Documentation/cgroups/cgroup_event_listener.c @@ -91,7 +91,7 @@ int main(int argc, char **argv) if (ret == -1) { perror("cgroup.event_control " - "is not accessable any more"); + "is not accessible any more"); break; } diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index 190018b0c649..44b8b7af8019 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt @@ -355,13 +355,13 @@ subsystems, type: To change the set of subsystems bound to a mounted hierarchy, just remount with different options: -# mount -o remount,cpuset,ns hier1 /dev/cgroup +# mount -o remount,cpuset,blkio hier1 /dev/cgroup -Now memory is removed from the hierarchy and ns is added. +Now memory is removed from the hierarchy and blkio is added. -Note this will add ns to the hierarchy but won't remove memory or +Note this will add blkio to the hierarchy but won't remove memory or cpuset, because the new options are appended to the old ones: -# mount -o remount,ns /dev/cgroup +# mount -o remount,blkio /dev/cgroup To Specify a hierarchy's release_agent: # mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \ diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt index b7eececfb195..fc8fa97a09ac 100644 --- a/Documentation/cgroups/memcg_test.txt +++ b/Documentation/cgroups/memcg_test.txt @@ -398,7 +398,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y. written to move_charge_at_immigrate. 9.10 Memory thresholds - Memory controler implements memory thresholds using cgroups notification + Memory controller implements memory thresholds using cgroups notification API. You can use Documentation/cgroups/cgroup_event_listener.c to test it. diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt index 524de926290d..59293ac4a5d0 100644 --- a/Documentation/device-mapper/dm-crypt.txt +++ b/Documentation/device-mapper/dm-crypt.txt @@ -8,7 +8,7 @@ Parameters: Encryption cipher and an optional IV generation mode. - (In format cipher-chainmode-ivopts:ivmode). + (In format cipher[:keycount]-chainmode-ivopts:ivmode). Examples: des aes-cbc-essiv:sha256 @@ -20,6 +20,11 @@ Parameters: Key used for encryption. It is encoded as a hexadecimal number. You can only use key sizes that are valid for the selected cipher. + + Multi-key compatibility mode. You can define keys and + then sectors are encrypted according to their offsets (sector 0 uses key0; + sector 1 uses key1 etc.). must be a power of two. + The IV offset is a sector count that is added to the sector number before creating the IV. diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt new file mode 100644 index 000000000000..33b6b7071ac8 --- /dev/null +++ b/Documentation/device-mapper/dm-raid.txt @@ -0,0 +1,70 @@ +Device-mapper RAID (dm-raid) is a bridge from DM to MD. It +provides a way to use device-mapper interfaces to access the MD RAID +drivers. + +As with all device-mapper targets, the nominal public interfaces are the +constructor (CTR) tables and the status outputs (both STATUSTYPE_INFO +and STATUSTYPE_TABLE). The CTR table looks like the following: + +1: raid \ +2: <#raid_params> \ +3: <#raid_devs> .. + +Line 1 contains the standard first three arguments to any device-mapper +target - the start, length, and target type fields. The target type in +this case is "raid". + +Line 2 contains the arguments that define the particular raid +type/personality/level, the required arguments for that raid type, and +any optional arguments. Possible raid types include: raid4, raid5_la, +raid5_ls, raid5_rs, raid6_zr, raid6_nr, and raid6_nc. (raid1 is +planned for the future.) The list of required and optional parameters +is the same for all the current raid types. The required parameters are +positional, while the optional parameters are given as key/value pairs. +The possible parameters are as follows: + Chunk size in sectors. + [[no]sync] Force/Prevent RAID initialization + [rebuild ] Rebuild the drive indicated by the index + [daemon_sleep ] Time between bitmap daemon work to clear bits + [min_recovery_rate ] Throttle RAID initialization + [max_recovery_rate ] Throttle RAID initialization + [max_write_behind ] See '-write-behind=' (man mdadm) + [stripe_cache ] Stripe cache size for higher RAIDs + +Line 3 contains the list of devices that compose the array in +metadata/data device pairs. If the metadata is stored separately, a '-' +is given for the metadata device position. If a drive has failed or is +missing at creation time, a '-' can be given for both the metadata and +data drives for a given position. + +NB. Currently all metadata devices must be specified as '-'. + +Examples: +# RAID4 - 4 data drives, 1 parity +# No metadata devices specified to hold superblock/bitmap info +# Chunk size of 1MiB +# (Lines separated for easy reading) +0 1960893648 raid \ + raid4 1 2048 \ + 5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81 + +# RAID4 - 4 data drives, 1 parity (no metadata devices) +# Chunk size of 1MiB, force RAID initialization, +# min recovery rate at 20 kiB/sec/disk +0 1960893648 raid \ + raid4 4 2048 min_recovery_rate 20 sync\ + 5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81 + +Performing a 'dmsetup table' should display the CTR table used to +construct the mapping (with possible reordering of optional +parameters). + +Performing a 'dmsetup status' will yield information on the state and +health of the array. The output is as follows: +1: raid \ +2: <#devices> <1 health char for each dev> + +Line 1 is standard DM output. Line 2 is best shown by example: + 0 1960893648 raid raid4 5 AAAAA 2/490221568 +Here we can see the RAID type is raid4, there are 5 devices - all of +which are 'A'live, and the array is 2/490221568 complete with recovery. diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt index 945ff3fda433..a0b58e29f911 100644 --- a/Documentation/email-clients.txt +++ b/Documentation/email-clients.txt @@ -104,6 +104,13 @@ Then from the "Message" menu item, select insert file and choose your patch. As an added bonus you can customise the message creation toolbar menu and put the "insert file" icon there. +Make the the composer window wide enough so that no lines wrap. As of +KMail 1.13.5 (KDE 4.5.4), KMail will apply word wrapping when sending +the email if the lines wrap in the composer window. Having word wrapping +disabled in the Options menu isn't enough. Thus, if your patch has very +long lines, you must make the composer window very wide before sending +the email. See: https://bugs.kde.org/show_bug.cgi?id=174034 + You can safely GPG sign attachments, but inlined text is preferred for patches so do not GPG sign them. Signing patches that have been inserted as inlined text will make them tricky to extract from their 7-bit encoding. @@ -179,26 +186,8 @@ Sylpheed (GUI) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Thunderbird (GUI) -By default, thunderbird likes to mangle text, but there are ways to -coerce it into being nice. - -- Under account settings, composition and addressing, uncheck "Compose - messages in HTML format". - -- Edit your Thunderbird config settings to tell it not to wrap lines: - user_pref("mailnews.wraplength", 0); - -- Edit your Thunderbird config settings so that it won't use format=flowed: - user_pref("mailnews.send_plaintext_flowed", false); - -- You need to get Thunderbird into preformat mode: -. If you compose HTML messages by default, it's not too hard. Just select - "Preformat" from the drop-down box just under the subject line. -. If you compose in text by default, you have to tell it to compose a new - message in HTML (just as a one-off), and then force it from there back to - text, else it will wrap lines. To do this, use shift-click on the Write - icon to compose to get HTML compose mode, then select "Preformat" from - the drop-down box just under the subject line. +Thunderbird is an Outlook clone that likes to mangle text, but there are ways +to coerce it into behaving. - Allows use of an external editor: The easiest thing to do with Thunderbird and patches is to use an @@ -208,6 +197,27 @@ coerce it into being nice. View->Toolbars->Customize... and finally just click on it when in the Compose dialog. +To beat some sense out of the internal editor, do this: + +- Under account settings, composition and addressing, uncheck "Compose + messages in HTML format". + +- Edit your Thunderbird config settings so that it won't use format=flowed. + Go to "edit->preferences->advanced->config editor" to bring up the + thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to + "false". + +- Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML + composer, select "Preformat" from the drop-down box just under the subject + line, then close the message without saving. (This setting also applies to + the text composer, but the only control for it is in the HTML composer.) + +- Install the "toggle wordwrap" extension. Download the file from: + https://addons.mozilla.org/thunderbird/addon/2351/ + Then go to "tools->add ons", select "install" at the bottom of the screen, + and browse to where you saved the .xul file. This adds an "Enable + Wordwrap" entry under the Options menu of the message composer. + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TkRat (GUI) diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 22f10818c2b3..8c594c45b6a1 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -193,6 +193,20 @@ Why: /proc//oom_adj allows userspace to influence the oom killer's --------------------------- +What: CS5535/CS5536 obsolete GPIO driver +When: June 2011 +Files: drivers/staging/cs5535_gpio/* +Check: drivers/staging/cs5535_gpio/cs5535_gpio.c +Why: A newer driver replaces this; it is drivers/gpio/cs5535-gpio.c, and + integrates with the Linux GPIO subsystem. The old driver has been + moved to staging, and will be removed altogether around 2.6.40. + Please test the new driver, and ensure that the functionality you + need and any bugfixes from the old driver are available in the new + one. +Who: Andres Salomon + +-------------------------- + What: remove EXPORT_SYMBOL(kernel_thread) When: August 2006 Files: arch/*/kernel/*_ksyms.c @@ -234,6 +248,17 @@ Who: Zhang Rui --------------------------- +What: CONFIG_ACPI_PROCFS_POWER +When: 2.6.39 +Why: sysfs I/F for ACPI power devices, including AC and Battery, + has been working in upstream kenrel since 2.6.24, Sep 2007. + In 2.6.37, we make the sysfs I/F always built in and this option + disabled by default. + Remove this option and the ACPI power procfs interface in 2.6.39. +Who: Zhang Rui + +--------------------------- + What: /proc/acpi/button When: August 2007 Why: /proc/acpi/button has been replaced by events to the input layer @@ -576,3 +601,13 @@ Why: The functions have been superceded by cancel_delayed_work_sync() Who: Tejun Heo ---------------------------- + +What: Legacy, non-standard chassis intrusion detection interface. +When: June 2011 +Why: The adm9240, w83792d and w83793 hardware monitoring drivers have + legacy interfaces for chassis intrusion detection. A standard + interface has been added to each driver, so the legacy interface + can be removed. +Who: Jean Delvare + +---------------------------- diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 977d8919cc69..ef9349a4b5d1 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -343,7 +343,6 @@ prototypes: int (*fl_grant)(struct file_lock *, struct file_lock *, int); void (*fl_release_private)(struct file_lock *); void (*fl_break)(struct file_lock *); /* break_lease callback */ - int (*fl_mylease)(struct file_lock *, struct file_lock *); int (*fl_change)(struct file_lock **, int); locking rules: @@ -353,7 +352,6 @@ fl_notify: yes no fl_grant: no no fl_release_private: maybe no fl_break: yes no -fl_mylease: yes no fl_change yes no --------------------------- buffer_head ----------------------------------- diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 07a32b42cf9c..dfbcd1b00b0a 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -365,8 +365,8 @@ must be done in the RCU callback. [recommended] vfs now tries to do path walking in "rcu-walk mode", which avoids atomic operations and scalability hazards on dentries and inodes (see -Documentation/filesystems/path-walk.txt). d_hash and d_compare changes (above) -are examples of the changes required to support this. For more complex +Documentation/filesystems/path-lookup.txt). d_hash and d_compare changes +(above) are examples of the changes required to support this. For more complex filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so no changes are required to the filesystem. However, this is costly and loses the benefits of rcu-walk mode. We will begin to add filesystem callbacks that @@ -383,5 +383,14 @@ Documentation/filesystems/vfs.txt for more details. permission and check_acl are inode permission checks that are called on many or all directory inodes on the way down a path walk (to check for -exec permission). These must now be rcu-walk aware (flags & IPERM_RCU). See -Documentation/filesystems/vfs.txt for more details. +exec permission). These must now be rcu-walk aware (flags & IPERM_FLAG_RCU). +See Documentation/filesystems/vfs.txt for more details. + +-- +[mandatory] + In ->fallocate() you must check the mode option passed in. If your +filesystem does not support hole punching (deallocating space in the middle of a +file) you must return -EOPNOTSUPP if FALLOC_FL_PUNCH_HOLE is set in mode. +Currently you can only have FALLOC_FL_PUNCH_HOLE with FALLOC_FL_KEEP_SIZE set, +so the i_size should not change when hole punching, even when puching the end of +a file off. diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 9471225212c4..23cae6548d3a 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -375,6 +375,7 @@ Anonymous: 0 kB Swap: 0 kB KernelPageSize: 4 kB MMUPageSize: 4 kB +Locked: 374 kB The first of these lines shows the same information as is displayed for the mapping in /proc/PID/maps. The remaining lines show the size of the mapping @@ -670,6 +671,8 @@ varies by architecture and compile options. The following is from a > cat /proc/meminfo +The "Locked" indicates whether the mapping is locked in memory or not. + MemTotal: 16344972 kB MemFree: 13634064 kB @@ -1320,6 +1323,10 @@ scaled linearly with /proc//oom_score_adj. Writing to /proc//oom_score_adj or /proc//oom_adj will change the other with its scaled value. +The value of /proc//oom_score_adj may be reduced no lower than the last +value set by a CAP_SYS_RESOURCE process. To reduce the value any lower +requires CAP_SYS_RESOURCE. + NOTICE: /proc//oom_adj is deprecated and will be removed, please see Documentation/feature-removal-schedule.txt. diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index fbb324e2bd43..cae6d27c9f5b 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -415,8 +415,8 @@ otherwise noted. permission: called by the VFS to check for access rights on a POSIX-like filesystem. - May be called in rcu-walk mode (flags & IPERM_RCU). If in rcu-walk - mode, the filesystem must check the permission without blocking or + May be called in rcu-walk mode (flags & IPERM_FLAG_RCU). If in rcu-walk + mode, the filesystem must check the permission without blocking or storing to the inode. If a situation is encountered that rcu-walk cannot handle, return diff --git a/Documentation/hwmon/adm9240 b/Documentation/hwmon/adm9240 index 2c6f1fed4618..36e8ec6aa868 100644 --- a/Documentation/hwmon/adm9240 +++ b/Documentation/hwmon/adm9240 @@ -155,7 +155,7 @@ connected to a normally open switch. The ADM9240 provides an internal open drain on this line, and may output a 20 ms active low pulse to reset an external Chassis Intrusion latch. -Clear the CI latch by writing value 1 to the sysfs chassis_clear file. +Clear the CI latch by writing value 0 to the sysfs intrusion0_alarm file. Alarm flags reported as 16-bit word diff --git a/Documentation/hwmon/ads7828 b/Documentation/hwmon/ads7828 index 75bc4beaf447..2bbebe6f771f 100644 --- a/Documentation/hwmon/ads7828 +++ b/Documentation/hwmon/ads7828 @@ -9,7 +9,7 @@ Supported chips: http://focus.ti.com/lit/ds/symlink/ads7828.pdf Authors: - Steve Hardy + Steve Hardy Module Parameters ----------------- diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737 index fc5df7654d63..4d2935145a1c 100644 --- a/Documentation/hwmon/dme1737 +++ b/Documentation/hwmon/dme1737 @@ -42,7 +42,7 @@ Description This driver implements support for the hardware monitoring capabilities of the SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, SCH311x, and SCH5127 Super-I/O chips. These chips feature monitoring of 3 temp sensors -temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and +temp[1-3] (2 remote diodes and 1 internal), 8 voltages in[0-7] (7 external and 1 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and automatically. @@ -105,6 +105,7 @@ SCH5127: in4: V1_IN 0V - 1.5V in5: VTR (+3.3V standby) 0V - 4.38V in6: Vbat (+3.0V) 0V - 4.38V + in7: Vtrip (+1.5V) 0V - 1.99V Each voltage input has associated min and max limits which trigger an alarm when crossed. @@ -217,10 +218,10 @@ cpu0_vid RO CPU core reference voltage in vrm RW Voltage regulator module version number. -in[0-6]_input RO Measured voltage in millivolts. -in[0-6]_min RW Low limit for voltage input. -in[0-6]_max RW High limit for voltage input. -in[0-6]_alarm RO Voltage input alarm. Returns 1 if +in[0-7]_input RO Measured voltage in millivolts. +in[0-7]_min RW Low limit for voltage input. +in[0-7]_max RW High limit for voltage input. +in[0-7]_alarm RO Voltage input alarm. Returns 1 if voltage input is or went outside the associated min-max range, 0 otherwise. @@ -324,3 +325,4 @@ fan5 opt opt pwm5 opt opt fan6 opt opt pwm6 opt opt +in7 yes diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf index fb145e5e722a..8432e1118173 100644 --- a/Documentation/hwmon/w83627hf +++ b/Documentation/hwmon/w83627hf @@ -91,3 +91,25 @@ isaset -y -f 0x2e 0xaa The above sequence assumes a Super-I/O config space at 0x2e/0x2f, but 0x4e/0x4f is also possible. + +Voltage pin mapping +------------------- + +Here is a summary of the voltage pin mapping for the W83627THF. This +can be useful to convert data provided by board manufacturers into +working libsensors configuration statements. + + W83627THF | + Pin | Name | Register | Sysfs attribute +----------------------------------------------------- + 100 | CPUVCORE | 20h | in0 + 99 | VIN0 | 21h | in1 + 98 | VIN1 | 22h | in2 + 97 | VIN2 | 24h | in4 + 114 | AVCC | 23h | in3 + 61 | 5VSB | 50h (bank 5) | in7 + 74 | VBAT | 51h (bank 5) | in8 + +For other supported devices, you'll have to take the hard path and +look up the information in the datasheet yourself (and then add it +to this document please.) diff --git a/Documentation/hwmon/w83793 b/Documentation/hwmon/w83793 index 51171a83165b..6cc5f639b721 100644 --- a/Documentation/hwmon/w83793 +++ b/Documentation/hwmon/w83793 @@ -92,7 +92,7 @@ This driver implements support for Winbond W83793G/W83793R chips. * Chassis If the case open alarm triggers, it will stay in this state unless cleared - by any write to the sysfs file "chassis". + by writing 0 to the sysfs file "intrusion0_alarm". * VID and VRM The VRM version is detected automatically, don't modify the it unless you diff --git a/Documentation/input/ff.txt b/Documentation/input/ff.txt index ded4d5f53109..b3867bf49f8f 100644 --- a/Documentation/input/ff.txt +++ b/Documentation/input/ff.txt @@ -49,7 +49,9 @@ This information is subject to change. #include #include -unsigned long features[1 + FF_MAX/sizeof(unsigned long)]; +#define BITS_TO_LONGS(x) \ + (((x) + 8 * sizeof (unsigned long) - 1) / (8 * sizeof (unsigned long))) +unsigned long features[BITS_TO_LONGS(FF_CNT)]; int ioctl(int file_descriptor, int request, unsigned long *features); "request" must be EVIOCGBIT(EV_FF, size of features array in bytes ) diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index d6a63c7b4478..ac293e955308 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -247,7 +247,7 @@ Code Seq#(hex) Include File Comments 'p' 40-7F linux/nvram.h 'p' 80-9F linux/ppdev.h user-space parport -'p' A1-A4 linux/pps.h LinuxPPS +'p' A1-A5 linux/pps.h LinuxPPS 'q' 00-1F linux/serio.h 'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt index 59a69ec67c40..f6dece5b7014 100644 --- a/Documentation/iostats.txt +++ b/Documentation/iostats.txt @@ -81,7 +81,7 @@ Field 9 -- # of I/Os currently in progress The only field that should go to zero. Incremented as requests are given to appropriate struct request_queue and decremented as they finish. Field 10 -- # of milliseconds spent doing I/Os - This field is increases so long as field 9 is nonzero. + This field increases so long as field 9 is nonzero. Field 11 -- weighted # of milliseconds spent doing I/Os This field is incremented at each I/O start, I/O completion, I/O merge, or read of these stats by the number of I/Os in progress diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt index cab61d842259..7a9e0b4b2903 100644 --- a/Documentation/kdump/kdump.txt +++ b/Documentation/kdump/kdump.txt @@ -65,18 +65,21 @@ Install kexec-tools 2) Download the kexec-tools user-space package from the following URL: -http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools.tar.gz +http://kernel.org/pub/linux/utils/kernel/kexec/kexec-tools.tar.gz This is a symlink to the latest version. The latest kexec-tools git tree is available at: -git://git.kernel.org/pub/scm/linux/kernel/git/horms/kexec-tools.git -or -http://www.kernel.org/git/?p=linux/kernel/git/horms/kexec-tools.git +git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git +and +http://www.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git + +There is also a gitweb interface available at +http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git More information about kexec-tools can be found at -http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/README.html +http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html 3) Unpack the tarball with the tar command, as follows: @@ -439,6 +442,6 @@ To Do Contact ======= -Vivek Goyal (vgoyal@in.ibm.com) +Vivek Goyal (vgoyal@redhat.com) Maneesh Soni (maneesh@in.ibm.com) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index ed3708f8d0db..b72e071a3e5b 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -199,11 +199,6 @@ and is between 256 and 4096 characters. It is defined in the file unusable. The "log_buf_len" parameter may be useful if you need to capture more output. - acpi_display_output= [HW,ACPI] - acpi_display_output=vendor - acpi_display_output=video - See above. - acpi_irq_balance [HW,ACPI] ACPI will balance active IRQs default in APIC mode @@ -659,11 +654,6 @@ and is between 256 and 4096 characters. It is defined in the file dscc4.setup= [NET] - dynamic_printk Enables pr_debug()/dev_dbg() calls if - CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled. - These can also be switched on/off via - /dynamic_printk/modules - earlycon= [KNL] Output early console device and options. uart[8250],io,[,options] uart[8250],mmio,[,options] @@ -888,6 +878,7 @@ and is between 256 and 4096 characters. It is defined in the file controller i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX controllers + i8042.notimeout [HW] Ignore timeout condition signalled by conroller i8042.reset [HW] Reset the controller during init and cleanup i8042.unlock [HW] Unlock (ignore) the keylock @@ -1709,6 +1700,9 @@ and is between 256 and 4096 characters. It is defined in the file no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver + no-kvmapf [X86,KVM] Disable paravirtualized asynchronous page + fault handling. + nolapic [X86-32,APIC] Do not enable or use the local APIC. nolapic_timer [X86-32,APIC] Do not use the local APIC timer. diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO index e3a55b6091e9..ab5189ae3428 100644 --- a/Documentation/ko_KR/HOWTO +++ b/Documentation/ko_KR/HOWTO @@ -391,8 +391,8 @@ bugme-new 메일링 리스트나(새로운 버그 리포트들만이 이곳에 bugme-janitor 메일링 리스트(bugzilla에 모든 변화들이 여기서 메일로 전해진다) 에 등록하면 된다. - http://lists.osdl.org/mailman/listinfo/bugme-new - http://lists.osdl.org/mailman/listinfo/bugme-janitors + https://lists.linux-foundation.org/mailman/listinfo/bugme-new + https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 741fe66d6eca..0cfb00fd86ff 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -598,7 +598,7 @@ a 5-byte jump instruction. So there are several limitations. a) The instructions in DCR must be relocatable. b) The instructions in DCR must not include a call instruction. c) JTPR must not be targeted by any jump or call instruction. -d) DCR must not straddle the border betweeen functions. +d) DCR must not straddle the border between functions. Anyway, these limitations are checked by the in-kernel instruction decoder, so you don't need to worry about that. diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt index b336266bea5e..ad85797c1cf0 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/kvm/api.txt @@ -874,7 +874,7 @@ Possible values are: - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and is waiting for an interrupt - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector - accesible via KVM_GET_VCPU_EVENTS) + accessible via KVM_GET_VCPU_EVENTS) This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel irqchip, the multiprocessing state must be maintained by userspace. @@ -1085,6 +1085,184 @@ of 4 instructions that make up a hypercall. If any additional field gets added to this structure later on, a bit for that additional piece of information will be set in the flags bitmap. +4.47 KVM_ASSIGN_PCI_DEVICE + +Capability: KVM_CAP_DEVICE_ASSIGNMENT +Architectures: x86 ia64 +Type: vm ioctl +Parameters: struct kvm_assigned_pci_dev (in) +Returns: 0 on success, -1 on error + +Assigns a host PCI device to the VM. + +struct kvm_assigned_pci_dev { + __u32 assigned_dev_id; + __u32 busnr; + __u32 devfn; + __u32 flags; + __u32 segnr; + union { + __u32 reserved[11]; + }; +}; + +The PCI device is specified by the triple segnr, busnr, and devfn. +Identification in succeeding service requests is done via assigned_dev_id. The +following flags are specified: + +/* Depends on KVM_CAP_IOMMU */ +#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) + +4.48 KVM_DEASSIGN_PCI_DEVICE + +Capability: KVM_CAP_DEVICE_DEASSIGNMENT +Architectures: x86 ia64 +Type: vm ioctl +Parameters: struct kvm_assigned_pci_dev (in) +Returns: 0 on success, -1 on error + +Ends PCI device assignment, releasing all associated resources. + +See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is +used in kvm_assigned_pci_dev to identify the device. + +4.49 KVM_ASSIGN_DEV_IRQ + +Capability: KVM_CAP_ASSIGN_DEV_IRQ +Architectures: x86 ia64 +Type: vm ioctl +Parameters: struct kvm_assigned_irq (in) +Returns: 0 on success, -1 on error + +Assigns an IRQ to a passed-through device. + +struct kvm_assigned_irq { + __u32 assigned_dev_id; + __u32 host_irq; + __u32 guest_irq; + __u32 flags; + union { + struct { + __u32 addr_lo; + __u32 addr_hi; + __u32 data; + } guest_msi; + __u32 reserved[12]; + }; +}; + +The following flags are defined: + +#define KVM_DEV_IRQ_HOST_INTX (1 << 0) +#define KVM_DEV_IRQ_HOST_MSI (1 << 1) +#define KVM_DEV_IRQ_HOST_MSIX (1 << 2) + +#define KVM_DEV_IRQ_GUEST_INTX (1 << 8) +#define KVM_DEV_IRQ_GUEST_MSI (1 << 9) +#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) + +It is not valid to specify multiple types per host or guest IRQ. However, the +IRQ type of host and guest can differ or can even be null. + +4.50 KVM_DEASSIGN_DEV_IRQ + +Capability: KVM_CAP_ASSIGN_DEV_IRQ +Architectures: x86 ia64 +Type: vm ioctl +Parameters: struct kvm_assigned_irq (in) +Returns: 0 on success, -1 on error + +Ends an IRQ assignment to a passed-through device. + +See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified +by assigned_dev_id, flags must correspond to the IRQ type specified on +KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed. + +4.51 KVM_SET_GSI_ROUTING + +Capability: KVM_CAP_IRQ_ROUTING +Architectures: x86 ia64 +Type: vm ioctl +Parameters: struct kvm_irq_routing (in) +Returns: 0 on success, -1 on error + +Sets the GSI routing table entries, overwriting any previously set entries. + +struct kvm_irq_routing { + __u32 nr; + __u32 flags; + struct kvm_irq_routing_entry entries[0]; +}; + +No flags are specified so far, the corresponding field must be set to zero. + +struct kvm_irq_routing_entry { + __u32 gsi; + __u32 type; + __u32 flags; + __u32 pad; + union { + struct kvm_irq_routing_irqchip irqchip; + struct kvm_irq_routing_msi msi; + __u32 pad[8]; + } u; +}; + +/* gsi routing entry types */ +#define KVM_IRQ_ROUTING_IRQCHIP 1 +#define KVM_IRQ_ROUTING_MSI 2 + +No flags are specified so far, the corresponding field must be set to zero. + +struct kvm_irq_routing_irqchip { + __u32 irqchip; + __u32 pin; +}; + +struct kvm_irq_routing_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + __u32 pad; +}; + +4.52 KVM_ASSIGN_SET_MSIX_NR + +Capability: KVM_CAP_DEVICE_MSIX +Architectures: x86 ia64 +Type: vm ioctl +Parameters: struct kvm_assigned_msix_nr (in) +Returns: 0 on success, -1 on error + +Set the number of MSI-X interrupts for an assigned device. This service can +only be called once in the lifetime of an assigned device. + +struct kvm_assigned_msix_nr { + __u32 assigned_dev_id; + __u16 entry_nr; + __u16 padding; +}; + +#define KVM_MAX_MSIX_PER_DEV 256 + +4.53 KVM_ASSIGN_SET_MSIX_ENTRY + +Capability: KVM_CAP_DEVICE_MSIX +Architectures: x86 ia64 +Type: vm ioctl +Parameters: struct kvm_assigned_msix_entry (in) +Returns: 0 on success, -1 on error + +Specifies the routing of an MSI-X assigned device interrupt to a GSI. Setting +the GSI vector to zero means disabling the interrupt. + +struct kvm_assigned_msix_entry { + __u32 assigned_dev_id; + __u32 gsi; + __u16 entry; /* The index of entry in the MSI-X table */ + __u16 padding[3]; +}; + 5. The kvm_run structure Application code obtains a pointer to the kvm_run structure by diff --git a/Documentation/kvm/cpuid.txt b/Documentation/kvm/cpuid.txt index 14a12ea92b7f..882068538c9c 100644 --- a/Documentation/kvm/cpuid.txt +++ b/Documentation/kvm/cpuid.txt @@ -36,6 +36,9 @@ KVM_FEATURE_MMU_OP || 2 || deprecated. KVM_FEATURE_CLOCKSOURCE2 || 3 || kvmclock available at msrs || || 0x4b564d00 and 0x4b564d01 ------------------------------------------------------------------------------ +KVM_FEATURE_ASYNC_PF || 4 || async pf can be enabled by + || || writing to msr 0x4b564d02 +------------------------------------------------------------------------------ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side || || per-cpu warps are expected in || || kvmclock. diff --git a/Documentation/kvm/msr.txt b/Documentation/kvm/msr.txt index 8ddcfe84c09a..d079aed27e03 100644 --- a/Documentation/kvm/msr.txt +++ b/Documentation/kvm/msr.txt @@ -3,7 +3,6 @@ Glauber Costa , Red Hat Inc, 2010 ===================================================== KVM makes use of some custom MSRs to service some requests. -At present, this facility is only used by kvmclock. Custom MSRs have a range reserved for them, that goes from 0x4b564d00 to 0x4b564dff. There are MSRs outside this area, @@ -151,3 +150,38 @@ MSR_KVM_SYSTEM_TIME: 0x12 return PRESENT; } else return NON_PRESENT; + +MSR_KVM_ASYNC_PF_EN: 0x4b564d02 + data: Bits 63-6 hold 64-byte aligned physical address of a + 64 byte memory area which must be in guest RAM and must be + zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1 + when asynchronous page faults are enabled on the vcpu 0 when + disabled. Bit 2 is 1 if asynchronous page faults can be injected + when vcpu is in cpl == 0. + + First 4 byte of 64 byte memory location will be written to by + the hypervisor at the time of asynchronous page fault (APF) + injection to indicate type of asynchronous page fault. Value + of 1 means that the page referred to by the page fault is not + present. Value 2 means that the page is now available. Disabling + interrupt inhibits APFs. Guest must not enable interrupt + before the reason is read, or it may be overwritten by another + APF. Since APF uses the same exception vector as regular page + fault guest must reset the reason to 0 before it does + something that can generate normal page fault. If during page + fault APF reason is 0 it means that this is regular page + fault. + + During delivery of type 1 APF cr2 contains a token that will + be used to notify a guest when missing page becomes + available. When page becomes available type 2 APF is sent with + cr2 set to the token associated with the page. There is special + kind of token 0xffffffff which tells vcpu that it should wake + up all processes waiting for APFs and no individual type 2 APFs + will be sent. + + If APF is disabled while there are outstanding APFs, they will + not be delivered. + + Currently type 2 APF will be always delivered on the same vcpu as + type 1 was, but guest should not rely on that. diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt index efb3a6a045a2..6ccaf8e1a00e 100644 --- a/Documentation/lguest/lguest.txt +++ b/Documentation/lguest/lguest.txt @@ -111,8 +111,11 @@ Running Lguest: Then use --tunnet=bridge:lg0 when launching the guest. - See http://linux-net.osdl.org/index.php/Bridge for general information - on how to get bridging working. + See: + + http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge + + for general information on how to get bridging to work. There is a helpful mailing list at http://ozlabs.org/mailman/listinfo/lguest diff --git a/Documentation/magic-number.txt b/Documentation/magic-number.txt index 505f19607542..4b12abcb2ad3 100644 --- a/Documentation/magic-number.txt +++ b/Documentation/magic-number.txt @@ -150,7 +150,7 @@ NBD_REPLY_MAGIC 0x96744668 nbd_reply include/linux/nbd.h STL_BOARDMAGIC 0xa2267f52 stlbrd include/linux/stallion.h ENI155_MAGIC 0xa54b872d midway_eprom drivers/atm/eni.h SCI_MAGIC 0xbabeface gs_port drivers/char/sh-sci.h -CODA_MAGIC 0xC0DAC0DA coda_file_info include/linux/coda_fs_i.h +CODA_MAGIC 0xC0DAC0DA coda_file_info fs/coda/coda_fs_i.h DPMEM_MAGIC 0xc0ffee11 gdt_pci_sram drivers/scsi/gdth.h STLI_PORTMAGIC 0xe671c7a1 stliport include/linux/istallion.h YAM_MAGIC 0xF10A7654 yam_port drivers/net/hamradio/yam.c diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.txt index bec69a8a1697..a7ba5e4e2c91 100644 --- a/Documentation/networking/bridge.txt +++ b/Documentation/networking/bridge.txt @@ -1,8 +1,8 @@ In order to use the Ethernet bridging functionality, you'll need the userspace tools. These programs and documentation are available -at http://www.linux-foundation.org/en/Net:Bridge. The download page is +at http://www.linuxfoundation.org/en/Net:Bridge. The download page is http://prdownloads.sourceforge.net/bridge. If you still have questions, don't hesitate to post to the mailing list -(more info http://lists.osdl.org/mailman/listinfo/bridge). +(more info https://lists.linux-foundation.org/mailman/listinfo/bridge). diff --git a/Documentation/networking/caif/spi_porting.txt b/Documentation/networking/caif/spi_porting.txt index 61d7c9247453..0cb8cb9098f4 100644 --- a/Documentation/networking/caif/spi_porting.txt +++ b/Documentation/networking/caif/spi_porting.txt @@ -32,7 +32,7 @@ the physical hardware, both with regard to SPI and to GPIOs. This function is called by the CAIF SPI interface to give you a chance to set up your hardware to be ready to receive a stream of data from the master. The xfer structure contains - both physical and logical adresses, as well as the total length + both physical and logical addresses, as well as the total length of the transfer in both directions.The dev parameter can be used to map to different CAIF SPI slave devices. diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt index 811872b45bee..d718bc2ff1cf 100644 --- a/Documentation/networking/dccp.txt +++ b/Documentation/networking/dccp.txt @@ -38,11 +38,11 @@ The Linux DCCP implementation does not currently support all the features that a specified in RFCs 4340...42. The known bugs are at: - http://linux-net.osdl.org/index.php/TODO#DCCP + http://www.linuxfoundation.org/collaborate/workgroups/networking/todo#DCCP For more up-to-date versions of the DCCP implementation, please consider using the experimental DCCP test tree; instructions for checking this out are on: -http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree +http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp_testing#Experimental_DCCP_source_tree Socket options diff --git a/Documentation/networking/generic_netlink.txt b/Documentation/networking/generic_netlink.txt index d4f8b8b9b53c..3e071115ca90 100644 --- a/Documentation/networking/generic_netlink.txt +++ b/Documentation/networking/generic_netlink.txt @@ -1,3 +1,3 @@ A wiki document on how to use Generic Netlink can be found here: - * http://linux-net.osdl.org/index.php/Generic_Netlink_HOWTO + * http://www.linuxfoundation.org/collaborate/workgroups/networking/generic_netlink_howto diff --git a/Documentation/nfc/nfc-pn544.txt b/Documentation/nfc/nfc-pn544.txt new file mode 100644 index 000000000000..2fcac9f5996e --- /dev/null +++ b/Documentation/nfc/nfc-pn544.txt @@ -0,0 +1,114 @@ +Kernel driver for the NXP Semiconductors PN544 Near Field +Communication chip + +Author: Jari Vanhala +Contact: Matti Aaltonen (matti.j.aaltonen at nokia.com) + +General +------- + +The PN544 is an integrated transmission module for contactless +communication. The driver goes under drives/nfc/ and is compiled as a +module named "pn544". It registers a misc device and creates a device +file named "/dev/pn544". + +Host Interfaces: I2C, SPI and HSU, this driver supports currently only I2C. + +The Interface +------------- + +The driver offers a sysfs interface for a hardware test and an IOCTL +interface for selecting between two operating modes. There are read, +write and poll functions for transferring messages. The two operating +modes are the normal (HCI) mode and the firmware update mode. + +PN544 is controlled by sending messages from the userspace to the +chip. The main function of the driver is just to pass those messages +without caring about the message content. + + +Protocols +--------- + +In the normal (HCI) mode and in the firmware update mode read and +write functions behave a bit differently because the message formats +or the protocols are different. + +In the normal (HCI) mode the protocol used is derived from the ETSI +HCI specification. The firmware is updated using a specific protocol, +which is different from HCI. + +HCI messages consist of an eight bit header and the message body. The +header contains the message length. Maximum size for an HCI message is +33. In HCI mode sent messages are tested for a correct +checksum. Firmware update messages have the length in the second (MSB) +and third (LSB) bytes of the message. The maximum FW message length is +1024 bytes. + +For the ETSI HCI specification see +http://www.etsi.org/WebSite/Technologies/ProtocolSpecification.aspx + +The Hardware Test +----------------- + +The idea of the test is that it can performed by reading from the +corresponding sysfs file. The test is implemented in the board file +and it should test that PN544 can be put into the firmware update +mode. If the test is not implemented the sysfs file does not get +created. + +Example: +> cat /sys/module/pn544/drivers/i2c\:pn544/3-002b/nfc_test +1 + +Normal Operation +---------------- + +PN544 is powered up when the device file is opened, otherwise it's +turned off. Only one instance can use the device at a time. + +Userspace applications control PN544 with HCI messages. The hardware +sends an interrupt when data is available for reading. Data is +physically read when the read function is called by a userspace +application. Poll() checks the read interrupt state. Configuration and +self testing are also done from the userspace using read and write. + +Example platform data: + +static int rx71_pn544_nfc_request_resources(struct i2c_client *client) +{ + /* Get and setup the HW resources for the device */ +} + +static void rx71_pn544_nfc_free_resources(void) +{ + /* Release the HW resources */ +} + +static void rx71_pn544_nfc_enable(int fw) +{ + /* Turn the device on */ +} + +static int rx71_pn544_nfc_test(void) +{ + /* + * Put the device into the FW update mode + * and then back to the normal mode. + * Check the behavior and return one on success, + * zero on failure. + */ +} + +static void rx71_pn544_nfc_disable(void) +{ + /* turn the power off */ +} + +static struct pn544_nfc_platform_data rx71_nfc_data = { + .request_resources = rx71_pn544_nfc_request_resources, + .free_resources = rx71_pn544_nfc_free_resources, + .enable = rx71_pn544_nfc_enable, + .test = rx71_pn544_nfc_test, + .disable = rx71_pn544_nfc_disable, +}; diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt index 3272ed59dec7..7400d7555dc3 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/powerpc/booting-without-of.txt @@ -1098,7 +1098,7 @@ supported currently at the toplevel. * an arbitrary array of bytes */ - childnode@addresss { /* define a child node named "childnode" + childnode@address { /* define a child node named "childnode" * whose unit name is "childnode at * address" */ diff --git a/Documentation/pps/pps.txt b/Documentation/pps/pps.txt index 125f4ab48998..d35dcdd82ff6 100644 --- a/Documentation/pps/pps.txt +++ b/Documentation/pps/pps.txt @@ -170,3 +170,49 @@ and the run ppstest as follow: Please, note that to compile userland programs you need the file timepps.h (see Documentation/pps/). + + +Generators +---------- + +Sometimes one needs to be able not only to catch PPS signals but to produce +them also. For example, running a distributed simulation, which requires +computers' clock to be synchronized very tightly. One way to do this is to +invent some complicated hardware solutions but it may be neither necessary +nor affordable. The cheap way is to load a PPS generator on one of the +computers (master) and PPS clients on others (slaves), and use very simple +cables to deliver signals using parallel ports, for example. + +Parallel port cable pinout: +pin name master slave +1 STROBE *------ * +2 D0 * | * +3 D1 * | * +4 D2 * | * +5 D3 * | * +6 D4 * | * +7 D5 * | * +8 D6 * | * +9 D7 * | * +10 ACK * ------* +11 BUSY * * +12 PE * * +13 SEL * * +14 AUTOFD * * +15 ERROR * * +16 INIT * * +17 SELIN * * +18-25 GND *-----------* + +Please note that parallel port interrupt occurs only on high->low transition, +so it is used for PPS assert edge. PPS clear edge can be determined only +using polling in the interrupt handler which actually can be done way more +precisely because interrupt handling delays can be quite big and random. So +current parport PPS generator implementation (pps_gen_parport module) is +geared towards using the clear edge for time synchronization. + +Clear edge polling is done with disabled interrupts so it's better to select +delay between assert and clear edge as small as possible to reduce system +latencies. But if it is too small slave won't be able to capture clear edge +transition. The default of 30us should be good enough in most situations. +The delay can be selected using 'delay' pps_gen_parport module parameter. diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX index 3c00c9c3219e..d2651c47ae27 100644 --- a/Documentation/scheduler/00-INDEX +++ b/Documentation/scheduler/00-INDEX @@ -3,7 +3,7 @@ sched-arch.txt - CPU Scheduler implementation hints for architecture specific code. sched-design-CFS.txt - - goals, design and implementation of the Complete Fair Scheduler. + - goals, design and implementation of the Completely Fair Scheduler. sched-domains.txt - information on scheduling domains. sched-nice-design.txt diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc index 337c924cc81f..5e83769c6aa9 100644 --- a/Documentation/scsi/ChangeLog.lpfc +++ b/Documentation/scsi/ChangeLog.lpfc @@ -573,7 +573,7 @@ Changes from 20041018 to 20041123 * Backround nodev_timeout processing to DPC This enables us to unblock (stop dev_loss_tmo) when appopriate. * Fix array discovery with multiple luns. The max_luns was 0 at - the time the host structure was intialized. lpfc_cfg_params + the time the host structure was initialized. lpfc_cfg_params then set the max_luns to the correct value afterwards. * Remove unused define LPFC_MAX_LUN and set the default value of lpfc_max_lun parameter to 512. diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt index 7c900507279f..540db41dfd5d 100644 --- a/Documentation/serial/tty.txt +++ b/Documentation/serial/tty.txt @@ -107,7 +107,7 @@ write_wakeup() - May be called at any point between open and close. dcd_change() - Report to the tty line the current DCD pin status changes and the relative timestamp. The timestamp - can be NULL. + cannot be NULL. Driver Access diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index d0eb696d32e8..3c1eddd9fcc7 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt @@ -974,13 +974,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. See hdspm.txt for details. - Module snd-hifier - ----------------- - - Module for the MediaTek/TempoTec HiFier Fantasia sound card. - - This module supports autoprobe and multiple cards. - Module snd-ice1712 ------------------ @@ -1531,15 +1524,20 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. Module snd-oxygen ----------------- - Module for sound cards based on the C-Media CMI8788 chip: + Module for sound cards based on the C-Media CMI8786/8787/8788 chip: * Asound A-8788 + * Asus Xonar DG * AuzenTech X-Meridian + * AuzenTech X-Meridian 2G * Bgears b-Enspirer * Club3D Theatron DTS * HT-Omega Claro (plus) * HT-Omega Claro halo (XT) + * Kuroutoshikou CMI8787-HG2PCI * Razer Barracuda AC-1 * Sondigo Inferno + * TempoTec HiFier Fantasia + * TempoTec HiFier Serenade This module supports autoprobe and multiple cards. @@ -2006,9 +2004,9 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. Module snd-virtuoso ------------------- - Module for sound cards based on the Asus AV100/AV200 chips, - i.e., Xonar D1, DX, D2, D2X, DS, HDAV1.3 (Deluxe), Essence ST - (Deluxe) and Essence STX. + Module for sound cards based on the Asus AV66/AV100/AV200 chips, + i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX, + HDAV1.3 (Deluxe), and HDAV1.3 Slim. This module supports autoprobe and multiple cards. diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 37c6aad5e590..16ae4300c747 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -149,7 +149,6 @@ ALC882/883/885/888/889 acer-aspire-7730g Acer Aspire 7730G acer-aspire-8930g Acer Aspire 8930G medion Medion Laptops - medion-md2 Medion MD2 targa-dig Targa/MSI targa-2ch-dig Targa/MSI with 2-channel targa-8ch-dig Targa/MSI with 8-channel (MSI GX620) diff --git a/Documentation/sysctl/00-INDEX b/Documentation/sysctl/00-INDEX index 1286f455992f..8cf5d493fd03 100644 --- a/Documentation/sysctl/00-INDEX +++ b/Documentation/sysctl/00-INDEX @@ -4,8 +4,6 @@ README - general information about /proc/sys/ sysctl files. abi.txt - documentation for /proc/sys/abi/*. -ctl_unnumbered.txt - - explanation of why one should not add new binary sysctl numbers. fs.txt - documentation for /proc/sys/fs/*. kernel.txt diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 574067194f38..11d5ceda5bb0 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -34,6 +34,7 @@ show up in /proc/sys/kernel: - hotplug - java-appletviewer [ binfmt_java, obsolete ] - java-interpreter [ binfmt_java, obsolete ] +- kptr_restrict - kstack_depth_to_print [ X86 only ] - l2cr [ PPC only ] - modprobe ==> Documentation/debugging-modules.txt @@ -261,6 +262,19 @@ This flag controls the L2 cache of G3 processor boards. If ============================================================== +kptr_restrict: + +This toggle indicates whether restrictions are placed on +exposing kernel addresses via /proc and other interfaces. When +kptr_restrict is set to (0), there are no restrictions. When +kptr_restrict is set to (1), the default, kernel pointers +printed using the %pK format specifier will be replaced with 0's +unless the user has CAP_SYSLOG. When kptr_restrict is set to +(2), kernel pointers printed using %pK will be replaced with 0's +regardless of privileges. + +============================================================== + kstack_depth_to_print: (X86 only) Controls the number of words to print when dumping the raw diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py new file mode 100755 index 000000000000..dbeb8a0d7175 --- /dev/null +++ b/Documentation/target/tcm_mod_builder.py @@ -0,0 +1,1094 @@ +#!/usr/bin/python +# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD +# +# Copyright (c) 2010 Rising Tide Systems +# Copyright (c) 2010 Linux-iSCSI.org +# +# Author: nab@kernel.org +# +import os, sys +import subprocess as sub +import string +import re +import optparse + +tcm_dir = "" + +fabric_ops = [] +fabric_mod_dir = "" +fabric_mod_port = "" +fabric_mod_init_port = "" + +def tcm_mod_err(msg): + print msg + sys.exit(1) + +def tcm_mod_create_module_subdir(fabric_mod_dir_var): + + if os.path.isdir(fabric_mod_dir_var) == True: + return 1 + + print "Creating fabric_mod_dir: " + fabric_mod_dir_var + ret = os.mkdir(fabric_mod_dir_var) + if ret: + tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) + + return + +def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): + global fabric_mod_port + global fabric_mod_init_port + buf = "" + + f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" + print "Writing file: " + f + + p = open(f, 'w'); + if not p: + tcm_mod_err("Unable to open file: " + f) + + buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" + buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" + buf += "\n" + buf += "struct " + fabric_mod_name + "_nacl {\n" + buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" + buf += " u64 nport_wwpn;\n" + buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" + buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" + buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" + buf += " struct se_node_acl se_node_acl;\n" + buf += "};\n" + buf += "\n" + buf += "struct " + fabric_mod_name + "_tpg {\n" + buf += " /* FC lport target portal group tag for TCM */\n" + buf += " u16 lport_tpgt;\n" + buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" + buf += " struct " + fabric_mod_name + "_lport *lport;\n" + buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" + buf += " struct se_portal_group se_tpg;\n" + buf += "};\n" + buf += "\n" + buf += "struct " + fabric_mod_name + "_lport {\n" + buf += " /* SCSI protocol the lport is providing */\n" + buf += " u8 lport_proto_id;\n" + buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" + buf += " u64 lport_wwpn;\n" + buf += " /* ASCII formatted WWPN for FC Target Lport */\n" + buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" + buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" + buf += " struct se_wwn lport_wwn;\n" + buf += "};\n" + + ret = p.write(buf) + if ret: + tcm_mod_err("Unable to write f: " + f) + + p.close() + + fabric_mod_port = "lport" + fabric_mod_init_port = "nport" + + return + +def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): + global fabric_mod_port + global fabric_mod_init_port + buf = "" + + f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" + print "Writing file: " + f + + p = open(f, 'w'); + if not p: + tcm_mod_err("Unable to open file: " + f) + + buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" + buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" + buf += "\n" + buf += "struct " + fabric_mod_name + "_nacl {\n" + buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" + buf += " u64 iport_wwpn;\n" + buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" + buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" + buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" + buf += " struct se_node_acl se_node_acl;\n" + buf += "};\n\n" + buf += "struct " + fabric_mod_name + "_tpg {\n" + buf += " /* SAS port target portal group tag for TCM */\n" + buf += " u16 tport_tpgt;\n" + buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" + buf += " struct " + fabric_mod_name + "_tport *tport;\n" + buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" + buf += " struct se_portal_group se_tpg;\n" + buf += "};\n\n" + buf += "struct " + fabric_mod_name + "_tport {\n" + buf += " /* SCSI protocol the tport is providing */\n" + buf += " u8 tport_proto_id;\n" + buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" + buf += " u64 tport_wwpn;\n" + buf += " /* ASCII formatted WWPN for SAS Target port */\n" + buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" + buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" + buf += " struct se_wwn tport_wwn;\n" + buf += "};\n" + + ret = p.write(buf) + if ret: + tcm_mod_err("Unable to write f: " + f) + + p.close() + + fabric_mod_port = "tport" + fabric_mod_init_port = "iport" + + return + +def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): + global fabric_mod_port + global fabric_mod_init_port + buf = "" + + f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" + print "Writing file: " + f + + p = open(f, 'w'); + if not p: + tcm_mod_err("Unable to open file: " + f) + + buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" + buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" + buf += "\n" + buf += "struct " + fabric_mod_name + "_nacl {\n" + buf += " /* ASCII formatted InitiatorName */\n" + buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" + buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" + buf += " struct se_node_acl se_node_acl;\n" + buf += "};\n\n" + buf += "struct " + fabric_mod_name + "_tpg {\n" + buf += " /* iSCSI target portal group tag for TCM */\n" + buf += " u16 tport_tpgt;\n" + buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" + buf += " struct " + fabric_mod_name + "_tport *tport;\n" + buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" + buf += " struct se_portal_group se_tpg;\n" + buf += "};\n\n" + buf += "struct " + fabric_mod_name + "_tport {\n" + buf += " /* SCSI protocol the tport is providing */\n" + buf += " u8 tport_proto_id;\n" + buf += " /* ASCII formatted TargetName for IQN */\n" + buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" + buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" + buf += " struct se_wwn tport_wwn;\n" + buf += "};\n" + + ret = p.write(buf) + if ret: + tcm_mod_err("Unable to write f: " + f) + + p.close() + + fabric_mod_port = "tport" + fabric_mod_init_port = "iport" + + return + +def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): + + if proto_ident == "FC": + tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) + elif proto_ident == "SAS": + tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) + elif proto_ident == "iSCSI": + tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) + else: + print "Unsupported proto_ident: " + proto_ident + sys.exit(1) + + return + +def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): + buf = "" + + f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" + print "Writing file: " + f + + p = open(f, 'w'); + if not p: + tcm_mod_err("Unable to open file: " + f) + + buf = "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n\n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n\n" + buf += "#include <" + fabric_mod_name + "_base.h>\n" + buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" + + buf += "/* Local pointer to allocated TCM configfs fabric module */\n" + buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" + + buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" + buf += " struct se_portal_group *se_tpg,\n" + buf += " struct config_group *group,\n" + buf += " const char *name)\n" + buf += "{\n" + buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" + buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" + + if proto_ident == "FC" or proto_ident == "SAS": + buf += " u64 wwpn = 0;\n" + + buf += " u32 nexus_depth;\n\n" + buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" + buf += " return ERR_PTR(-EINVAL); */\n" + buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" + buf += " if (!(se_nacl_new))\n" + buf += " return ERR_PTR(-ENOMEM);\n" + buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" + buf += " nexus_depth = 1;\n" + buf += " /*\n" + buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" + buf += " * when converting a NodeACL from demo mode -> explict\n" + buf += " */\n" + buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" + buf += " name, nexus_depth);\n" + buf += " if (IS_ERR(se_nacl)) {\n" + buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" + buf += " return se_nacl;\n" + buf += " }\n" + buf += " /*\n" + buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" + buf += " */\n" + buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" + + if proto_ident == "FC" or proto_ident == "SAS": + buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" + + buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" + buf += " return se_nacl;\n" + buf += "}\n\n" + buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" + buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" + buf += " kfree(nacl);\n" + buf += "}\n\n" + + buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" + buf += " struct se_wwn *wwn,\n" + buf += " struct config_group *group,\n" + buf += " const char *name)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" + buf += " unsigned long tpgt;\n" + buf += " int ret;\n\n" + buf += " if (strstr(name, \"tpgt_\") != name)\n" + buf += " return ERR_PTR(-EINVAL);\n" + buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" + buf += " return ERR_PTR(-EINVAL);\n\n" + buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" + buf += " if (!(tpg)) {\n" + buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" + buf += " return ERR_PTR(-ENOMEM);\n" + buf += " }\n" + buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" + buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" + buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" + buf += " &tpg->se_tpg, (void *)tpg,\n" + buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" + buf += " if (ret < 0) {\n" + buf += " kfree(tpg);\n" + buf += " return NULL;\n" + buf += " }\n" + buf += " return &tpg->se_tpg;\n" + buf += "}\n\n" + buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" + buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" + buf += " core_tpg_deregister(se_tpg);\n" + buf += " kfree(tpg);\n" + buf += "}\n\n" + + buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" + buf += " struct target_fabric_configfs *tf,\n" + buf += " struct config_group *group,\n" + buf += " const char *name)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" + + if proto_ident == "FC" or proto_ident == "SAS": + buf += " u64 wwpn = 0;\n\n" + + buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" + buf += " return ERR_PTR(-EINVAL); */\n\n" + buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" + buf += " if (!(" + fabric_mod_port + ")) {\n" + buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" + buf += " return ERR_PTR(-ENOMEM);\n" + buf += " }\n" + + if proto_ident == "FC" or proto_ident == "SAS": + buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" + + buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n" + buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" + buf += "}\n\n" + buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" + buf += " kfree(" + fabric_mod_port + ");\n" + buf += "}\n\n" + buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" + buf += " struct target_fabric_configfs *tf,\n" + buf += " char *page)\n" + buf += "{\n" + buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" + buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" + buf += " utsname()->machine);\n" + buf += "}\n\n" + buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" + buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" + buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" + buf += " NULL,\n" + buf += "};\n\n" + + buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" + buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" + buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" + buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" + buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" + buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" + buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" + buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" + buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" + buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" + buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" + buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" + buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" + buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" + buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" + buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" + buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n" + buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n" + buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" + buf += " .close_session = " + fabric_mod_name + "_close_session,\n" + buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" + buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" + buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" + buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" + buf += " .sess_get_initiator_sid = NULL,\n" + buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" + buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" + buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" + buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" + buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" + buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n" + buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" + buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" + buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" + buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n" + buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n" + buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" + buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n" + buf += " /*\n" + buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" + buf += " */\n" + buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" + buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" + buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" + buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" + buf += " .fabric_post_link = NULL,\n" + buf += " .fabric_pre_unlink = NULL,\n" + buf += " .fabric_make_np = NULL,\n" + buf += " .fabric_drop_np = NULL,\n" + buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" + buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" + buf += "};\n\n" + + buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" + buf += "{\n" + buf += " struct target_fabric_configfs *fabric;\n" + buf += " int ret;\n\n" + buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" + buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" + buf += " utsname()->machine);\n" + buf += " /*\n" + buf += " * Register the top level struct config_item_type with TCM core\n" + buf += " */\n" + buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" + buf += " if (!(fabric)) {\n" + buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" + buf += " return -ENOMEM;\n" + buf += " }\n" + buf += " /*\n" + buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" + buf += " */\n" + buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" + buf += " /*\n" + buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" + buf += " */\n" + buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" + buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" + buf += " /*\n" + buf += " * Register the fabric for use within TCM\n" + buf += " */\n" + buf += " ret = target_fabric_configfs_register(fabric);\n" + buf += " if (ret < 0) {\n" + buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" + buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" + buf += " return ret;\n" + buf += " }\n" + buf += " /*\n" + buf += " * Setup our local pointer to *fabric\n" + buf += " */\n" + buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" + buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" + buf += " return 0;\n" + buf += "};\n\n" + buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n" + buf += "{\n" + buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n" + buf += " return;\n\n" + buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" + buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" + buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" + buf += "};\n\n" + + buf += "static int __init " + fabric_mod_name + "_init(void)\n" + buf += "{\n" + buf += " int ret;\n\n" + buf += " ret = " + fabric_mod_name + "_register_configfs();\n" + buf += " if (ret < 0)\n" + buf += " return ret;\n\n" + buf += " return 0;\n" + buf += "};\n\n" + buf += "static void " + fabric_mod_name + "_exit(void)\n" + buf += "{\n" + buf += " " + fabric_mod_name + "_deregister_configfs();\n" + buf += "};\n\n" + + buf += "#ifdef MODULE\n" + buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" + buf += "MODULE_LICENSE(\"GPL\");\n" + buf += "module_init(" + fabric_mod_name + "_init);\n" + buf += "module_exit(" + fabric_mod_name + "_exit);\n" + buf += "#endif\n" + + ret = p.write(buf) + if ret: + tcm_mod_err("Unable to write f: " + f) + + p.close() + + return + +def tcm_mod_scan_fabric_ops(tcm_dir): + + fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h" + + print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api + process_fo = 0; + + p = open(fabric_ops_api, 'r') + + line = p.readline() + while line: + if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): + line = p.readline() + continue + + if process_fo == 0: + process_fo = 1; + line = p.readline() + # Search for function pointer + if not re.search('\(\*', line): + continue + + fabric_ops.append(line.rstrip()) + continue + + line = p.readline() + # Search for function pointer + if not re.search('\(\*', line): + continue + + fabric_ops.append(line.rstrip()) + + p.close() + return + +def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): + buf = "" + bufi = "" + + f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" + print "Writing file: " + f + + p = open(f, 'w') + if not p: + tcm_mod_err("Unable to open file: " + f) + + fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" + print "Writing file: " + fi + + pi = open(fi, 'w') + if not pi: + tcm_mod_err("Unable to open file: " + fi) + + buf = "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n\n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include \n" + buf += "#include <" + fabric_mod_name + "_base.h>\n" + buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" + + buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " return 1;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" + + buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" + + total_fabric_ops = len(fabric_ops) + i = 0 + + while i < total_fabric_ops: + fo = fabric_ops[i] + i += 1 +# print "fabric_ops: " + fo + + if re.search('get_fabric_name', fo): + buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" + buf += "{\n" + buf += " return \"" + fabric_mod_name[4:] + "\";\n" + buf += "}\n\n" + bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" + continue + + if re.search('get_fabric_proto_ident', fo): + buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" + buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" + buf += " u8 proto_id;\n\n" + buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" + if proto_ident == "FC": + buf += " case SCSI_PROTOCOL_FCP:\n" + buf += " default:\n" + buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" + buf += " break;\n" + elif proto_ident == "SAS": + buf += " case SCSI_PROTOCOL_SAS:\n" + buf += " default:\n" + buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" + buf += " break;\n" + elif proto_ident == "iSCSI": + buf += " case SCSI_PROTOCOL_ISCSI:\n" + buf += " default:\n" + buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" + buf += " break;\n" + + buf += " }\n\n" + buf += " return proto_id;\n" + buf += "}\n\n" + bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" + + if re.search('get_wwn', fo): + buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" + buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" + buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" + buf += "}\n\n" + bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" + + if re.search('get_tag', fo): + buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" + buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" + buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" + buf += "}\n\n" + bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" + + if re.search('get_default_depth', fo): + buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " return 1;\n" + buf += "}\n\n" + bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" + + if re.search('get_pr_transport_id\)\(', fo): + buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" + buf += " struct se_portal_group *se_tpg,\n" + buf += " struct se_node_acl *se_nacl,\n" + buf += " struct t10_pr_registration *pr_reg,\n" + buf += " int *format_code,\n" + buf += " unsigned char *buf)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" + buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" + buf += " int ret = 0;\n\n" + buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" + if proto_ident == "FC": + buf += " case SCSI_PROTOCOL_FCP:\n" + buf += " default:\n" + buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" + buf += " format_code, buf);\n" + buf += " break;\n" + elif proto_ident == "SAS": + buf += " case SCSI_PROTOCOL_SAS:\n" + buf += " default:\n" + buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" + buf += " format_code, buf);\n" + buf += " break;\n" + elif proto_ident == "iSCSI": + buf += " case SCSI_PROTOCOL_ISCSI:\n" + buf += " default:\n" + buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" + buf += " format_code, buf);\n" + buf += " break;\n" + + buf += " }\n\n" + buf += " return ret;\n" + buf += "}\n\n" + bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" + bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" + bufi += " int *, unsigned char *);\n" + + if re.search('get_pr_transport_id_len\)\(', fo): + buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" + buf += " struct se_portal_group *se_tpg,\n" + buf += " struct se_node_acl *se_nacl,\n" + buf += " struct t10_pr_registration *pr_reg,\n" + buf += " int *format_code)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" + buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" + buf += " int ret = 0;\n\n" + buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" + if proto_ident == "FC": + buf += " case SCSI_PROTOCOL_FCP:\n" + buf += " default:\n" + buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" + buf += " format_code);\n" + buf += " break;\n" + elif proto_ident == "SAS": + buf += " case SCSI_PROTOCOL_SAS:\n" + buf += " default:\n" + buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" + buf += " format_code);\n" + buf += " break;\n" + elif proto_ident == "iSCSI": + buf += " case SCSI_PROTOCOL_ISCSI:\n" + buf += " default:\n" + buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" + buf += " format_code);\n" + buf += " break;\n" + + + buf += " }\n\n" + buf += " return ret;\n" + buf += "}\n\n" + bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" + bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" + bufi += " int *);\n" + + if re.search('parse_pr_out_transport_id\)\(', fo): + buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" + buf += " struct se_portal_group *se_tpg,\n" + buf += " const char *buf,\n" + buf += " u32 *out_tid_len,\n" + buf += " char **port_nexus_ptr)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" + buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" + buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" + buf += " char *tid = NULL;\n\n" + buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" + if proto_ident == "FC": + buf += " case SCSI_PROTOCOL_FCP:\n" + buf += " default:\n" + buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" + buf += " port_nexus_ptr);\n" + elif proto_ident == "SAS": + buf += " case SCSI_PROTOCOL_SAS:\n" + buf += " default:\n" + buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" + buf += " port_nexus_ptr);\n" + elif proto_ident == "iSCSI": + buf += " case SCSI_PROTOCOL_ISCSI:\n" + buf += " default:\n" + buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" + buf += " port_nexus_ptr);\n" + + buf += " }\n\n" + buf += " return tid;\n" + buf += "}\n\n" + bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" + bufi += " const char *, u32 *, char **);\n" + + if re.search('alloc_fabric_acl\)\(', fo): + buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" + buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" + buf += " if (!(nacl)) {\n" + buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n" + buf += " return NULL;\n" + buf += " }\n\n" + buf += " return &nacl->se_node_acl;\n" + buf += "}\n\n" + bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" + + if re.search('release_fabric_acl\)\(', fo): + buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" + buf += " struct se_portal_group *se_tpg,\n" + buf += " struct se_node_acl *se_nacl)\n" + buf += "{\n" + buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" + buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" + buf += " kfree(nacl);\n" + buf += "}\n\n" + bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" + bufi += " struct se_node_acl *);\n" + + if re.search('tpg_get_inst_index\)\(', fo): + buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" + buf += "{\n" + buf += " return 1;\n" + buf += "}\n\n" + bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" + + if re.search('release_cmd_to_pool', fo): + buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return;\n" + buf += "}\n\n" + bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" + + if re.search('shutdown_session\)\(', fo): + buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" + + if re.search('close_session\)\(', fo): + buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" + buf += "{\n" + buf += " return;\n" + buf += "}\n\n" + bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" + + if re.search('stop_session\)\(', fo): + buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" + buf += "{\n" + buf += " return;\n" + buf += "}\n\n" + bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" + + if re.search('fall_back_to_erl0\)\(', fo): + buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" + buf += "{\n" + buf += " return;\n" + buf += "}\n\n" + bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" + + if re.search('sess_logged_in\)\(', fo): + buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" + + if re.search('sess_get_index\)\(', fo): + buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" + + if re.search('write_pending\)\(', fo): + buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" + + if re.search('write_pending_status\)\(', fo): + buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" + + if re.search('set_default_node_attributes\)\(', fo): + buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" + buf += "{\n" + buf += " return;\n" + buf += "}\n\n" + bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" + + if re.search('get_task_tag\)\(', fo): + buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" + + if re.search('get_cmd_state\)\(', fo): + buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" + + if re.search('new_cmd_failure\)\(', fo): + buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return;\n" + buf += "}\n\n" + bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n" + + if re.search('queue_data_in\)\(', fo): + buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" + + if re.search('queue_status\)\(', fo): + buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" + + if re.search('queue_tm_rsp\)\(', fo): + buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" + + if re.search('get_fabric_sense_len\)\(', fo): + buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n" + + if re.search('set_fabric_sense_len\)\(', fo): + buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n" + + if re.search('is_state_remove\)\(', fo): + buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" + buf += "{\n" + buf += " return 0;\n" + buf += "}\n\n" + bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" + + if re.search('pack_lun\)\(', fo): + buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n" + buf += "{\n" + buf += " WARN_ON(lun >= 256);\n" + buf += " /* Caller wants this byte-swapped */\n" + buf += " return cpu_to_le64((lun & 0xff) << 8);\n" + buf += "}\n\n" + bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n" + + + ret = p.write(buf) + if ret: + tcm_mod_err("Unable to write f: " + f) + + p.close() + + ret = pi.write(bufi) + if ret: + tcm_mod_err("Unable to write fi: " + fi) + + pi.close() + return + +def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): + + buf = "" + f = fabric_mod_dir_var + "/Kbuild" + print "Writing file: " + f + + p = open(f, 'w') + if not p: + tcm_mod_err("Unable to open file: " + f) + + buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n" + buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" + buf += " " + fabric_mod_name + "_configfs.o\n" + buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" + + ret = p.write(buf) + if ret: + tcm_mod_err("Unable to write f: " + f) + + p.close() + return + +def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): + + buf = "" + f = fabric_mod_dir_var + "/Kconfig" + print "Writing file: " + f + + p = open(f, 'w') + if not p: + tcm_mod_err("Unable to open file: " + f) + + buf = "config " + fabric_mod_name.upper() + "\n" + buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" + buf += " depends on TARGET_CORE && CONFIGFS_FS\n" + buf += " default n\n" + buf += " ---help---\n" + buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" + + ret = p.write(buf) + if ret: + tcm_mod_err("Unable to write f: " + f) + + p.close() + return + +def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): + buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" + kbuild = tcm_dir + "/drivers/target/Kbuild" + + f = open(kbuild, 'a') + f.write(buf) + f.close() + return + +def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): + buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" + kconfig = tcm_dir + "/drivers/target/Kconfig" + + f = open(kconfig, 'a') + f.write(buf) + f.close() + return + +def main(modname, proto_ident): +# proto_ident = "FC" +# proto_ident = "SAS" +# proto_ident = "iSCSI" + + tcm_dir = os.getcwd(); + tcm_dir += "/../../" + print "tcm_dir: " + tcm_dir + fabric_mod_name = modname + fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name + print "Set fabric_mod_name: " + fabric_mod_name + print "Set fabric_mod_dir: " + fabric_mod_dir + print "Using proto_ident: " + proto_ident + + if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": + print "Unsupported proto_ident: " + proto_ident + sys.exit(1) + + ret = tcm_mod_create_module_subdir(fabric_mod_dir) + if ret: + print "tcm_mod_create_module_subdir() failed because module already exists!" + sys.exit(1) + + tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) + tcm_mod_scan_fabric_ops(tcm_dir) + tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) + tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) + tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) + tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) + + input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ") + if input == "yes" or input == "y": + tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) + + input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") + if input == "yes" or input == "y": + tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) + + return + +parser = optparse.OptionParser() +parser.add_option('-m', '--modulename', help='Module name', dest='modname', + action='store', nargs=1, type='string') +parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', + action='store', nargs=1, type='string') + +(opts, args) = parser.parse_args() + +mandatories = ['modname', 'protoident'] +for m in mandatories: + if not opts.__dict__[m]: + print "mandatory option is missing\n" + parser.print_help() + exit(-1) + +if __name__ == "__main__": + + main(str(opts.modname), opts.protoident) diff --git a/Documentation/target/tcm_mod_builder.txt b/Documentation/target/tcm_mod_builder.txt new file mode 100644 index 000000000000..84533d8e747f --- /dev/null +++ b/Documentation/target/tcm_mod_builder.txt @@ -0,0 +1,145 @@ +>>>>>>>>>> The TCM v4 fabric module script generator <<<<<<<<<< + +Greetings all, + +This document is intended to be a mini-HOWTO for using the tcm_mod_builder.py +script to generate a brand new functional TCM v4 fabric .ko module of your very own, +that once built can be immediately be loaded to start access the new TCM/ConfigFS +fabric skeleton, by simply using: + + modprobe $TCM_NEW_MOD + mkdir -p /sys/kernel/config/target/$TCM_NEW_MOD + +This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following + + *) Generate new API callers for drivers/target/target_core_fabric_configs.c logic + ->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg() + ->make_wwn(), ->drop_wwn(). These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c + *) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module + using a skeleton struct target_core_fabric_ops API template. + *) Based on user defined T10 Proto_Ident for the new fabric module being built, + the TransportID / Initiator and Target WWPN related handlers for + SPC-3 persistent reservation are automatically generated in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c + using drivers/target/target_core_fabric_lib.c logic. + *) NOP API calls for all other Data I/O path and fabric dependent attribute logic + in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c + +tcm_mod_builder.py depends upon the mandatory '-p $PROTO_IDENT' and '-m +$FABRIC_MOD_name' parameters, and actually running the script looks like: + +target:/mnt/sdb/lio-core-2.6.git/Documentation/target# python tcm_mod_builder.py -p iSCSI -m tcm_nab5000 +tcm_dir: /mnt/sdb/lio-core-2.6.git/Documentation/target/../../ +Set fabric_mod_name: tcm_nab5000 +Set fabric_mod_dir: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000 +Using proto_ident: iSCSI +Creating fabric_mod_dir: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000 +Writing file: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_base.h +Using tcm_mod_scan_fabric_ops: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../include/target/target_core_fabric_ops.h +Writing file: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.c +Writing file: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.h +Writing file: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_configfs.c +Writing file: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kbuild +Writing file: +/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kconfig +Would you like to add tcm_nab5000to drivers/target/Kbuild..? [yes,no]: yes +Would you like to add tcm_nab5000to drivers/target/Kconfig..? [yes,no]: yes + +At the end of tcm_mod_builder.py. the script will ask to add the following +line to drivers/target/Kbuild: + + obj-$(CONFIG_TCM_NAB5000) += tcm_nab5000/ + +and the same for drivers/target/Kconfig: + + source "drivers/target/tcm_nab5000/Kconfig" + +*) Run 'make menuconfig' and select the new CONFIG_TCM_NAB5000 item: + + TCM_NAB5000 fabric module + +*) Build using 'make modules', once completed you will have: + +target:/mnt/sdb/lio-core-2.6.git# ls -la drivers/target/tcm_nab5000/ +total 1348 +drwxr-xr-x 2 root root 4096 2010-10-05 03:23 . +drwxr-xr-x 9 root root 4096 2010-10-05 03:22 .. +-rw-r--r-- 1 root root 282 2010-10-05 03:22 Kbuild +-rw-r--r-- 1 root root 171 2010-10-05 03:22 Kconfig +-rw-r--r-- 1 root root 49 2010-10-05 03:23 modules.order +-rw-r--r-- 1 root root 738 2010-10-05 03:22 tcm_nab5000_base.h +-rw-r--r-- 1 root root 9096 2010-10-05 03:22 tcm_nab5000_configfs.c +-rw-r--r-- 1 root root 191200 2010-10-05 03:23 tcm_nab5000_configfs.o +-rw-r--r-- 1 root root 40504 2010-10-05 03:23 .tcm_nab5000_configfs.o.cmd +-rw-r--r-- 1 root root 5414 2010-10-05 03:22 tcm_nab5000_fabric.c +-rw-r--r-- 1 root root 2016 2010-10-05 03:22 tcm_nab5000_fabric.h +-rw-r--r-- 1 root root 190932 2010-10-05 03:23 tcm_nab5000_fabric.o +-rw-r--r-- 1 root root 40713 2010-10-05 03:23 .tcm_nab5000_fabric.o.cmd +-rw-r--r-- 1 root root 401861 2010-10-05 03:23 tcm_nab5000.ko +-rw-r--r-- 1 root root 265 2010-10-05 03:23 .tcm_nab5000.ko.cmd +-rw-r--r-- 1 root root 459 2010-10-05 03:23 tcm_nab5000.mod.c +-rw-r--r-- 1 root root 23896 2010-10-05 03:23 tcm_nab5000.mod.o +-rw-r--r-- 1 root root 22655 2010-10-05 03:23 .tcm_nab5000.mod.o.cmd +-rw-r--r-- 1 root root 379022 2010-10-05 03:23 tcm_nab5000.o +-rw-r--r-- 1 root root 211 2010-10-05 03:23 .tcm_nab5000.o.cmd + +*) Load the new module, create a lun_0 configfs group, and add new TCM Core + IBLOCK backstore symlink to port: + +target:/mnt/sdb/lio-core-2.6.git# insmod drivers/target/tcm_nab5000.ko +target:/mnt/sdb/lio-core-2.6.git# mkdir -p /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0 +target:/mnt/sdb/lio-core-2.6.git# cd /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0/ +target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# ln -s /sys/kernel/config/target/core/iblock_0/lvm_test0 nab5000_port + +target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# cd - +target:/mnt/sdb/lio-core-2.6.git# tree /sys/kernel/config/target/nab5000/ +/sys/kernel/config/target/nab5000/ +|-- discovery_auth +|-- iqn.foo +| `-- tpgt_1 +| |-- acls +| |-- attrib +| |-- lun +| | `-- lun_0 +| | |-- alua_tg_pt_gp +| | |-- alua_tg_pt_offline +| | |-- alua_tg_pt_status +| | |-- alua_tg_pt_write_md +| | `-- nab5000_port -> ../../../../../../target/core/iblock_0/lvm_test0 +| |-- np +| `-- param +`-- version + +target:/mnt/sdb/lio-core-2.6.git# lsmod +Module Size Used by +tcm_nab5000 3935 4 +iscsi_target_mod 193211 0 +target_core_stgt 8090 0 +target_core_pscsi 11122 1 +target_core_file 9172 2 +target_core_iblock 9280 1 +target_core_mod 228575 31 +tcm_nab5000,iscsi_target_mod,target_core_stgt,target_core_pscsi,target_core_file,target_core_iblock +libfc 73681 0 +scsi_debug 56265 0 +scsi_tgt 8666 1 target_core_stgt +configfs 20644 2 target_core_mod + +---------------------------------------------------------------------- + +Future TODO items: + + *) Add more T10 proto_idents + *) Make tcm_mod_dump_fabric_ops() smarter and generate function pointer + defs directly from include/target/target_core_fabric_ops.h:struct target_core_fabric_ops + structure members. + +October 5th, 2010 +Nicholas A. Bellinger diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt index cb3d15bc1aeb..b61e46f449aa 100644 --- a/Documentation/thermal/sysfs-api.txt +++ b/Documentation/thermal/sysfs-api.txt @@ -278,3 +278,15 @@ method, the sys I/F structure will be built like this: |---name: acpitz |---temp1_input: 37000 |---temp1_crit: 100000 + +4. Event Notification + +The framework includes a simple notification mechanism, in the form of a +netlink event. Netlink socket initialization is done during the _init_ +of the framework. Drivers which intend to use the notification mechanism +just need to call generate_netlink_event() with two arguments viz +(originator, event). Typically the originator will be an integer assigned +to a thermal_zone_device when it registers itself with the framework. The +event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL, +THERMAL_DEV_FAULT}. Notification can be sent when the current temperature +crosses any of the configured thresholds. diff --git a/Documentation/timers/timer_stats.txt b/Documentation/timers/timer_stats.txt index 9bd00fc2e823..8abd40b22b7f 100644 --- a/Documentation/timers/timer_stats.txt +++ b/Documentation/timers/timer_stats.txt @@ -19,7 +19,7 @@ Linux system over a sample period: - the pid of the task(process) which initialized the timer - the name of the process which initialized the timer -- the function where the timer was intialized +- the function where the timer was initialized - the callback function which is associated to the timer - the number of events (callbacks) diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt index 09bd8e902989..b510564aac7e 100644 --- a/Documentation/trace/events.txt +++ b/Documentation/trace/events.txt @@ -125,7 +125,7 @@ is the size of the data item, in bytes. For example, here's the information displayed for the 'sched_wakeup' event: -# cat /debug/tracing/events/sched/sched_wakeup/format +# cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/format name: sched_wakeup ID: 60 @@ -201,19 +201,19 @@ to the 'filter' file for the given event. For example: -# cd /debug/tracing/events/sched/sched_wakeup +# cd /sys/kernel/debug/tracing/events/sched/sched_wakeup # echo "common_preempt_count > 4" > filter A slightly more involved example: -# cd /debug/tracing/events/sched/sched_signal_send +# cd /sys/kernel/debug/tracing/events/signal/signal_generate # echo "((sig >= 10 && sig < 15) || sig == 17) && comm != bash" > filter If there is an error in the expression, you'll get an 'Invalid argument' error when setting it, and the erroneous string along with an error message can be seen by looking at the filter e.g.: -# cd /debug/tracing/events/sched/sched_signal_send +# cd /sys/kernel/debug/tracing/events/signal/signal_generate # echo "((sig >= 10 && sig < 15) || dsig == 17) && comm != bash" > filter -bash: echo: write error: Invalid argument # cat filter diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt new file mode 100644 index 000000000000..0924aaca3302 --- /dev/null +++ b/Documentation/vm/transhuge.txt @@ -0,0 +1,298 @@ += Transparent Hugepage Support = + +== Objective == + +Performance critical computing applications dealing with large memory +working sets are already running on top of libhugetlbfs and in turn +hugetlbfs. Transparent Hugepage Support is an alternative means of +using huge pages for the backing of virtual memory with huge pages +that supports the automatic promotion and demotion of page sizes and +without the shortcomings of hugetlbfs. + +Currently it only works for anonymous memory mappings but in the +future it can expand over the pagecache layer starting with tmpfs. + +The reason applications are running faster is because of two +factors. The first factor is almost completely irrelevant and it's not +of significant interest because it'll also have the downside of +requiring larger clear-page copy-page in page faults which is a +potentially negative effect. The first factor consists in taking a +single page fault for each 2M virtual region touched by userland (so +reducing the enter/exit kernel frequency by a 512 times factor). This +only matters the first time the memory is accessed for the lifetime of +a memory mapping. The second long lasting and much more important +factor will affect all subsequent accesses to the memory for the whole +runtime of the application. The second factor consist of two +components: 1) the TLB miss will run faster (especially with +virtualization using nested pagetables but almost always also on bare +metal without virtualization) and 2) a single TLB entry will be +mapping a much larger amount of virtual memory in turn reducing the +number of TLB misses. With virtualization and nested pagetables the +TLB can be mapped of larger size only if both KVM and the Linux guest +are using hugepages but a significant speedup already happens if only +one of the two is using hugepages just because of the fact the TLB +miss is going to run faster. + +== Design == + +- "graceful fallback": mm components which don't have transparent + hugepage knowledge fall back to breaking a transparent hugepage and + working on the regular pages and their respective regular pmd/pte + mappings + +- if a hugepage allocation fails because of memory fragmentation, + regular pages should be gracefully allocated instead and mixed in + the same vma without any failure or significant delay and without + userland noticing + +- if some task quits and more hugepages become available (either + immediately in the buddy or through the VM), guest physical memory + backed by regular pages should be relocated on hugepages + automatically (with khugepaged) + +- it doesn't require memory reservation and in turn it uses hugepages + whenever possible (the only possible reservation here is kernelcore= + to avoid unmovable pages to fragment all the memory but such a tweak + is not specific to transparent hugepage support and it's a generic + feature that applies to all dynamic high order allocations in the + kernel) + +- this initial support only offers the feature in the anonymous memory + regions but it'd be ideal to move it to tmpfs and the pagecache + later + +Transparent Hugepage Support maximizes the usefulness of free memory +if compared to the reservation approach of hugetlbfs by allowing all +unused memory to be used as cache or other movable (or even unmovable +entities). It doesn't require reservation to prevent hugepage +allocation failures to be noticeable from userland. It allows paging +and all other advanced VM features to be available on the +hugepages. It requires no modifications for applications to take +advantage of it. + +Applications however can be further optimized to take advantage of +this feature, like for example they've been optimized before to avoid +a flood of mmap system calls for every malloc(4k). Optimizing userland +is by far not mandatory and khugepaged already can take care of long +lived page allocations even for hugepage unaware applications that +deals with large amounts of memory. + +In certain cases when hugepages are enabled system wide, application +may end up allocating more memory resources. An application may mmap a +large region but only touch 1 byte of it, in that case a 2M page might +be allocated instead of a 4k page for no good. This is why it's +possible to disable hugepages system-wide and to only have them inside +MADV_HUGEPAGE madvise regions. + +Embedded systems should enable hugepages only inside madvise regions +to eliminate any risk of wasting any precious byte of memory and to +only run faster. + +Applications that gets a lot of benefit from hugepages and that don't +risk to lose memory by using hugepages, should use +madvise(MADV_HUGEPAGE) on their critical mmapped regions. + +== sysfs == + +Transparent Hugepage Support can be entirely disabled (mostly for +debugging purposes) or only enabled inside MADV_HUGEPAGE regions (to +avoid the risk of consuming more memory resources) or enabled system +wide. This can be achieved with one of: + +echo always >/sys/kernel/mm/transparent_hugepage/enabled +echo madvise >/sys/kernel/mm/transparent_hugepage/enabled +echo never >/sys/kernel/mm/transparent_hugepage/enabled + +It's also possible to limit defrag efforts in the VM to generate +hugepages in case they're not immediately free to madvise regions or +to never try to defrag memory and simply fallback to regular pages +unless hugepages are immediately available. Clearly if we spend CPU +time to defrag memory, we would expect to gain even more by the fact +we use hugepages later instead of regular pages. This isn't always +guaranteed, but it may be more likely in case the allocation is for a +MADV_HUGEPAGE region. + +echo always >/sys/kernel/mm/transparent_hugepage/defrag +echo madvise >/sys/kernel/mm/transparent_hugepage/defrag +echo never >/sys/kernel/mm/transparent_hugepage/defrag + +khugepaged will be automatically started when +transparent_hugepage/enabled is set to "always" or "madvise, and it'll +be automatically shutdown if it's set to "never". + +khugepaged runs usually at low frequency so while one may not want to +invoke defrag algorithms synchronously during the page faults, it +should be worth invoking defrag at least in khugepaged. However it's +also possible to disable defrag in khugepaged: + +echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag +echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag + +You can also control how many pages khugepaged should scan at each +pass: + +/sys/kernel/mm/transparent_hugepage/khugepaged/pages_to_scan + +and how many milliseconds to wait in khugepaged between each pass (you +can set this to 0 to run khugepaged at 100% utilization of one core): + +/sys/kernel/mm/transparent_hugepage/khugepaged/scan_sleep_millisecs + +and how many milliseconds to wait in khugepaged if there's an hugepage +allocation failure to throttle the next allocation attempt. + +/sys/kernel/mm/transparent_hugepage/khugepaged/alloc_sleep_millisecs + +The khugepaged progress can be seen in the number of pages collapsed: + +/sys/kernel/mm/transparent_hugepage/khugepaged/pages_collapsed + +for each pass: + +/sys/kernel/mm/transparent_hugepage/khugepaged/full_scans + +== Boot parameter == + +You can change the sysfs boot time defaults of Transparent Hugepage +Support by passing the parameter "transparent_hugepage=always" or +"transparent_hugepage=madvise" or "transparent_hugepage=never" +(without "") to the kernel command line. + +== Need of application restart == + +The transparent_hugepage/enabled values only affect future +behavior. So to make them effective you need to restart any +application that could have been using hugepages. This also applies to +the regions registered in khugepaged. + +== get_user_pages and follow_page == + +get_user_pages and follow_page if run on a hugepage, will return the +head or tail pages as usual (exactly as they would do on +hugetlbfs). Most gup users will only care about the actual physical +address of the page and its temporary pinning to release after the I/O +is complete, so they won't ever notice the fact the page is huge. But +if any driver is going to mangle over the page structure of the tail +page (like for checking page->mapping or other bits that are relevant +for the head page and not the tail page), it should be updated to jump +to check head page instead (while serializing properly against +split_huge_page() to avoid the head and tail pages to disappear from +under it, see the futex code to see an example of that, hugetlbfs also +needed special handling in futex code for similar reasons). + +NOTE: these aren't new constraints to the GUP API, and they match the +same constrains that applies to hugetlbfs too, so any driver capable +of handling GUP on hugetlbfs will also work fine on transparent +hugepage backed mappings. + +In case you can't handle compound pages if they're returned by +follow_page, the FOLL_SPLIT bit can be specified as parameter to +follow_page, so that it will split the hugepages before returning +them. Migration for example passes FOLL_SPLIT as parameter to +follow_page because it's not hugepage aware and in fact it can't work +at all on hugetlbfs (but it instead works fine on transparent +hugepages thanks to FOLL_SPLIT). migration simply can't deal with +hugepages being returned (as it's not only checking the pfn of the +page and pinning it during the copy but it pretends to migrate the +memory in regular page sizes and with regular pte/pmd mappings). + +== Optimizing the applications == + +To be guaranteed that the kernel will map a 2M page immediately in any +memory region, the mmap region has to be hugepage naturally +aligned. posix_memalign() can provide that guarantee. + +== Hugetlbfs == + +You can use hugetlbfs on a kernel that has transparent hugepage +support enabled just fine as always. No difference can be noted in +hugetlbfs other than there will be less overall fragmentation. All +usual features belonging to hugetlbfs are preserved and +unaffected. libhugetlbfs will also work fine as usual. + +== Graceful fallback == + +Code walking pagetables but unware about huge pmds can simply call +split_huge_page_pmd(mm, pmd) where the pmd is the one returned by +pmd_offset. It's trivial to make the code transparent hugepage aware +by just grepping for "pmd_offset" and adding split_huge_page_pmd where +missing after pmd_offset returns the pmd. Thanks to the graceful +fallback design, with a one liner change, you can avoid to write +hundred if not thousand of lines of complex code to make your code +hugepage aware. + +If you're not walking pagetables but you run into a physical hugepage +but you can't handle it natively in your code, you can split it by +calling split_huge_page(page). This is what the Linux VM does before +it tries to swapout the hugepage for example. + +Example to make mremap.c transparent hugepage aware with a one liner +change: + +diff --git a/mm/mremap.c b/mm/mremap.c +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -41,6 +41,7 @@ static pmd_t *get_old_pmd(struct mm_stru + return NULL; + + pmd = pmd_offset(pud, addr); ++ split_huge_page_pmd(mm, pmd); + if (pmd_none_or_clear_bad(pmd)) + return NULL; + +== Locking in hugepage aware code == + +We want as much code as possible hugepage aware, as calling +split_huge_page() or split_huge_page_pmd() has a cost. + +To make pagetable walks huge pmd aware, all you need to do is to call +pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the +mmap_sem in read (or write) mode to be sure an huge pmd cannot be +created from under you by khugepaged (khugepaged collapse_huge_page +takes the mmap_sem in write mode in addition to the anon_vma lock). If +pmd_trans_huge returns false, you just fallback in the old code +paths. If instead pmd_trans_huge returns true, you have to take the +mm->page_table_lock and re-run pmd_trans_huge. Taking the +page_table_lock will prevent the huge pmd to be converted into a +regular pmd from under you (split_huge_page can run in parallel to the +pagetable walk). If the second pmd_trans_huge returns false, you +should just drop the page_table_lock and fallback to the old code as +before. Otherwise you should run pmd_trans_splitting on the pmd. In +case pmd_trans_splitting returns true, it means split_huge_page is +already in the middle of splitting the page. So if pmd_trans_splitting +returns true it's enough to drop the page_table_lock and call +wait_split_huge_page and then fallback the old code paths. You are +guaranteed by the time wait_split_huge_page returns, the pmd isn't +huge anymore. If pmd_trans_splitting returns false, you can proceed to +process the huge pmd and the hugepage natively. Once finished you can +drop the page_table_lock. + +== compound_lock, get_user_pages and put_page == + +split_huge_page internally has to distribute the refcounts in the head +page to the tail pages before clearing all PG_head/tail bits from the +page structures. It can do that easily for refcounts taken by huge pmd +mappings. But the GUI API as created by hugetlbfs (that returns head +and tail pages if running get_user_pages on an address backed by any +hugepage), requires the refcount to be accounted on the tail pages and +not only in the head pages, if we want to be able to run +split_huge_page while there are gup pins established on any tail +page. Failure to be able to run split_huge_page if there's any gup pin +on any tail page, would mean having to split all hugepages upfront in +get_user_pages which is unacceptable as too many gup users are +performance critical and they must work natively on hugepages like +they work natively on hugetlbfs already (hugetlbfs is simpler because +hugetlbfs pages cannot be splitted so there wouldn't be requirement of +accounting the pins on the tail pages for hugetlbfs). If we wouldn't +account the gup refcounts on the tail pages during gup, we won't know +anymore which tail page is pinned by gup and which is not while we run +split_huge_page. But we still have to add the gup pin to the head page +too, to know when we can free the compound page in case it's never +splitted during its lifetime. That requires changing not just +get_page, but put_page as well so that when put_page runs on a tail +page (and only on a tail page) it will find its respective head page, +and then it will decrease the head page refcount in addition to the +tail page refcount. To obtain a head page reliably and to decrease its +refcount without race conditions, put_page has to serialize against +__split_huge_page_refcount using a special per-page lock called +compound_lock. diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX index f8101d6b07b7..75613c9ac4db 100644 --- a/Documentation/w1/slaves/00-INDEX +++ b/Documentation/w1/slaves/00-INDEX @@ -2,3 +2,5 @@ - This file w1_therm - The Maxim/Dallas Semiconductor ds18*20 temperature sensor. +w1_ds2423 + - The Maxim/Dallas Semiconductor ds2423 counter device. diff --git a/Documentation/w1/slaves/w1_ds2423 b/Documentation/w1/slaves/w1_ds2423 new file mode 100644 index 000000000000..90a65d23cf59 --- /dev/null +++ b/Documentation/w1/slaves/w1_ds2423 @@ -0,0 +1,47 @@ +Kernel driver w1_ds2423 +======================= + +Supported chips: + * Maxim DS2423 based counter devices. + +supported family codes: + W1_THERM_DS2423 0x1D + +Author: Mika Laitio + +Description +----------- + +Support is provided through the sysfs w1_slave file. Each opening and +read sequence of w1_slave file initiates the read of counters and ram +available in DS2423 pages 12 - 15. + +Result of each page is provided as an ASCII output where each counter +value and associated ram buffer is outpputed to own line. + +Each lines will contain the values of 42 bytes read from the counter and +memory page along the crc=YES or NO for indicating whether the read operation +was successfull and CRC matched. +If the operation was successfull, there is also in the end of each line +a counter value expressed as an integer after c= + +Meaning of 42 bytes represented is following: + - 1 byte from ram page + - 4 bytes for the counter value + - 4 zero bytes + - 2 bytes for crc16 which was calculated from the data read since the previous crc bytes + - 31 remaining bytes from the ram page + - crc=YES/NO indicating whether read was ok and crc matched + - c= current counter value + +example from the successfull read: +00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2 +00 02 00 00 00 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2 +00 29 c6 5d 18 00 00 00 00 04 37 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=408798761 +00 05 00 00 00 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=YES c=5 + +example from the read with crc errors: +00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2 +00 02 00 00 22 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO +00 e1 61 5d 19 00 00 00 00 df 0b 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO +00 05 00 00 20 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=NO diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index bdeb81ccb5f6..9b7221a86df2 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt @@ -622,9 +622,9 @@ Protocol: 2.08+ The payload may be compressed. The format of both the compressed and uncompressed data should be determined using the standard magic numbers. The currently supported compression formats are gzip - (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA - (magic number 5D 00). The uncompressed payload is currently always ELF - (magic number 7F 45 4C 46). + (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA + (magic number 5D 00), and XZ (magic number FD 37). The uncompressed + payload is currently always ELF (magic number 7F 45 4C 46). Field name: payload_length Type: read diff --git a/Documentation/xz.txt b/Documentation/xz.txt new file mode 100644 index 000000000000..2cf3e2608de3 --- /dev/null +++ b/Documentation/xz.txt @@ -0,0 +1,121 @@ + +XZ data compression in Linux +============================ + +Introduction + + XZ is a general purpose data compression format with high compression + ratio and relatively fast decompression. The primary compression + algorithm (filter) is LZMA2. Additional filters can be used to improve + compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters + improve compression ratio of executable data. + + The XZ decompressor in Linux is called XZ Embedded. It supports + the LZMA2 filter and optionally also BCJ filters. CRC32 is supported + for integrity checking. The home page of XZ Embedded is at + , where you can find the + latest version and also information about using the code outside + the Linux kernel. + + For userspace, XZ Utils provide a zlib-like compression library + and a gzip-like command line tool. XZ Utils can be downloaded from + . + +XZ related components in the kernel + + The xz_dec module provides XZ decompressor with single-call (buffer + to buffer) and multi-call (stateful) APIs. The usage of the xz_dec + module is documented in include/linux/xz.h. + + The xz_dec_test module is for testing xz_dec. xz_dec_test is not + useful unless you are hacking the XZ decompressor. xz_dec_test + allocates a char device major dynamically to which one can write + .xz files from userspace. The decompressed output is thrown away. + Keep an eye on dmesg to see diagnostics printed by xz_dec_test. + See the xz_dec_test source code for the details. + + For decompressing the kernel image, initramfs, and initrd, there + is a wrapper function in lib/decompress_unxz.c. Its API is the + same as in other decompress_*.c files, which is defined in + include/linux/decompress/generic.h. + + scripts/xz_wrap.sh is a wrapper for the xz command line tool found + from XZ Utils. The wrapper sets compression options to values suitable + for compressing the kernel image. + + For kernel makefiles, two commands are provided for use with + $(call if_needed). The kernel image should be compressed with + $(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2 + dictionary. It will also append a four-byte trailer containing the + uncompressed size of the file, which is needed by the boot code. + Other things should be compressed with $(call if_needed,xzmisc) + which will use no BCJ filter and 1 MiB LZMA2 dictionary. + +Notes on compression options + + Since the XZ Embedded supports only streams with no integrity check or + CRC32, make sure that you don't use some other integrity check type + when encoding files that are supposed to be decoded by the kernel. With + liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32 + when encoding. With the xz command line tool, use --check=none or + --check=crc32. + + Using CRC32 is strongly recommended unless there is some other layer + which will verify the integrity of the uncompressed data anyway. + Double checking the integrity would probably be waste of CPU cycles. + Note that the headers will always have a CRC32 which will be validated + by the decoder; you can only change the integrity check type (or + disable it) for the actual uncompressed data. + + In userspace, LZMA2 is typically used with dictionary sizes of several + megabytes. The decoder needs to have the dictionary in RAM, thus big + dictionaries cannot be used for files that are intended to be decoded + by the kernel. 1 MiB is probably the maximum reasonable dictionary + size for in-kernel use (maybe more is OK for initramfs). The presets + in XZ Utils may not be optimal when creating files for the kernel, + so don't hesitate to use custom settings. Example: + + xz --check=crc32 --lzma2=dict=512KiB inputfile + + An exception to above dictionary size limitation is when the decoder + is used in single-call mode. Decompressing the kernel itself is an + example of this situation. In single-call mode, the memory usage + doesn't depend on the dictionary size, and it is perfectly fine to + use a big dictionary: for maximum compression, the dictionary should + be at least as big as the uncompressed data itself. + +Future plans + + Creating a limited XZ encoder may be considered if people think it is + useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at + the fastest settings, so it isn't clear if LZMA2 encoder is wanted + into the kernel. + + Support for limited random-access reading is planned for the + decompression code. I don't know if it could have any use in the + kernel, but I know that it would be useful in some embedded projects + outside the Linux kernel. + +Conformance to the .xz file format specification + + There are a couple of corner cases where things have been simplified + at expense of detecting errors as early as possible. These should not + matter in practice all, since they don't cause security issues. But + it is good to know this if testing the code e.g. with the test files + from XZ Utils. + +Reporting bugs + + Before reporting a bug, please check that it's not fixed already + at upstream. See to get the + latest code. + + Report bugs to or visit #tukaani on + Freenode and talk to Larhzu. I don't actively read LKML or other + kernel-related mailing lists, so if there's something I should know, + you should email to me personally or use IRC. + + Don't bother Igor Pavlov with questions about the XZ implementation + in the kernel or about XZ Utils. While these two implementations + include essential code that is directly based on Igor Pavlov's code, + these implementations aren't maintained nor supported by him. diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO index 69160779e432..faf976c0c731 100644 --- a/Documentation/zh_CN/HOWTO +++ b/Documentation/zh_CN/HOWTO @@ -347,8 +347,8 @@ bugzilla.kernel.org是Linux内核开发者们用来跟踪内核Bug的网站。 最新bug的通知,可以订阅bugme-new邮件列表(只有新的bug报告会被寄到这里) 或者订阅bugme-janitor邮件列表(所有bugzilla的变动都会被寄到这里)。 - http://lists.osdl.org/mailman/listinfo/bugme-new - http://lists.osdl.org/mailman/listinfo/bugme-janitors + https://lists.linux-foundation.org/mailman/listinfo/bugme-new + https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors 邮件列表 diff --git a/Documentation/zh_CN/SubmittingDrivers b/Documentation/zh_CN/SubmittingDrivers index c27b0f6cdd39..5889f8df6312 100644 --- a/Documentation/zh_CN/SubmittingDrivers +++ b/Documentation/zh_CN/SubmittingDrivers @@ -61,7 +61,7 @@ Linux 2.4: Linux 2.6: 除了遵循和 2.4 版内核同样的规则外,你还需要在 linux-kernel 邮件 列表上跟踪最新的 API 变化。向 Linux 2.6 内核提交驱动的顶级联系人 - 是 Andrew Morton 。 + 是 Andrew Morton 。 决定设备驱动能否被接受的条件 ---------------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 64d7621ab35b..89e4d4b145bb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -496,7 +496,6 @@ S: Supported F: arch/x86/kernel/microcode_amd.c AMS (Apple Motion Sensor) DRIVER -M: Stelian Pop M: Michael Hanselmann S: Supported F: drivers/macintosh/ams/ @@ -1524,6 +1523,14 @@ S: Supported F: block/bsg.c F: include/linux/bsg.h +BT87X AUDIO DRIVER +M: Clemens Ladisch +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +T: git git://git.alsa-project.org/alsa-kernel.git +S: Maintained +F: Documentation/sound/alsa/Bt87x.txt +F: sound/pci/bt87x.c + BT8XXGPIO DRIVER M: Michael Buesch W: http://bu3sch.de/btgpio.php @@ -1549,6 +1556,13 @@ S: Maintained F: Documentation/video4linux/bttv/ F: drivers/media/video/bt8xx/bttv* +C-MEDIA CMI8788 DRIVER +M: Clemens Ladisch +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +T: git git://git.alsa-project.org/alsa-kernel.git +S: Maintained +F: sound/pci/oxygen/ + CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS M: David Howells L: linux-cachefs@redhat.com @@ -2008,7 +2022,7 @@ F: drivers/scsi/dc395x.* DCCP PROTOCOL M: Arnaldo Carvalho de Melo L: dccp@vger.kernel.org -W: http://linux-net.osdl.org/index.php/DCCP +W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp S: Maintained F: include/linux/dccp.h F: include/linux/tfrc.h @@ -2340,6 +2354,13 @@ W: bluesmoke.sourceforge.net S: Maintained F: drivers/edac/r82600_edac.c +EDIROL UA-101/UA-1000 DRIVER +M: Clemens Ladisch +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +T: git git://git.alsa-project.org/alsa-kernel.git +S: Maintained +F: sound/usb/misc/ua101.c + EEEPC LAPTOP EXTRAS DRIVER M: Corentin Chary L: acpi4asus-user@lists.sourceforge.net @@ -2430,7 +2451,7 @@ ETHERNET BRIDGE M: Stephen Hemminger L: bridge@lists.linux-foundation.org L: netdev@vger.kernel.org -W: http://www.linux-foundation.org/en/Net:Bridge +W: http://www.linuxfoundation.org/en/Net:Bridge S: Maintained F: include/linux/netfilter_bridge/ F: net/bridge/ @@ -3508,6 +3529,13 @@ L: linux-serial@vger.kernel.org S: Maintained F: drivers/serial/jsm/ +K10TEMP HARDWARE MONITORING DRIVER +M: Clemens Ladisch +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/k10temp +F: drivers/hwmon/k10temp.c + K8TEMP HARDWARE MONITORING DRIVER M: Rudolf Marek L: lm-sensors@lm-sensors.org @@ -4093,9 +4121,8 @@ F: include/linux/module.h F: kernel/module.c MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER -M: Stelian Pop W: http://popies.net/meye/ -S: Maintained +S: Orphan F: Documentation/video4linux/meye.txt F: drivers/media/video/meye.* F: include/linux/meye.h @@ -4539,6 +4566,13 @@ F: drivers/of F: include/linux/of*.h K: of_get_property +OPL4 DRIVER +M: Clemens Ladisch +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +T: git git://git.alsa-project.org/alsa-kernel.git +S: Maintained +F: sound/drivers/opl4/ + OPROFILE M: Robert Richter L: oprofile-list@lists.sf.net @@ -4635,7 +4669,7 @@ M: Jeremy Fitzhardinge M: Chris Wright M: Alok Kataria M: Rusty Russell -L: virtualization@lists.osdl.org +L: virtualization@lists.linux-foundation.org S: Supported F: Documentation/ia64/paravirt_ops.txt F: arch/*/kernel/paravirt* @@ -5133,11 +5167,6 @@ F: kernel/rcu* F: kernel/srcu* X: kernel/rcutorture.c -REAL TIME CLOCK DRIVER (LEGACY) -M: Paul Gortmaker -S: Maintained -F: drivers/char/rtc.c - REAL TIME CLOCK (RTC) SUBSYSTEM M: Alessandro Zummo L: rtc-linux@googlegroups.com @@ -5243,8 +5272,7 @@ S: Supported F: drivers/s390/net/ S390 ZCRYPT DRIVER -M: Felix Beck -M: Ralph Wuerthner +M: Holger Dengler M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@ -5290,7 +5318,7 @@ SAMSUNG AUDIO (ASoC) DRIVERS M: Jassi Brar L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported -F: sound/soc/s3c24xx +F: sound/soc/samsung TIMEKEEPING, NTP M: John Stultz @@ -6298,6 +6326,13 @@ S: Maintained W: http://www.one-eyed-alien.net/~mdharm/linux-usb/ F: drivers/usb/storage/ +USB MIDI DRIVER +M: Clemens Ladisch +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +T: git git://git.alsa-project.org/alsa-kernel.git +S: Maintained +F: sound/usb/midi.* + USB OHCI DRIVER M: David Brownell L: linux-usb@vger.kernel.org @@ -6537,7 +6572,7 @@ F: include/linux/virtio_console.h VIRTIO HOST (VHOST) M: "Michael S. Tsirkin" L: kvm@vger.kernel.org -L: virtualization@lists.osdl.org +L: virtualization@lists.linux-foundation.org L: netdev@vger.kernel.org S: Maintained F: drivers/vhost/ @@ -6556,13 +6591,12 @@ F: Documentation/i2c/busses/i2c-viapro F: drivers/i2c/busses/i2c-viapro.c VIA SD/MMC CARD CONTROLLER DRIVER -M: Joseph Chan +M: Bruce Chang M: Harald Welte S: Maintained F: drivers/mmc/host/via-sdmmc.c VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER -M: Joseph Chan M: Florian Tobias Schandinat L: linux-fbdev@vger.kernel.org S: Maintained @@ -6587,7 +6621,7 @@ F: net/8021q/ VLYNQ BUS M: Florian Fainelli -L: openwrt-devel@lists.openwrt.org +L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Maintained F: drivers/vlynq/vlynq.c F: include/linux/vlynq.h @@ -6807,7 +6841,7 @@ XEN HYPERVISOR INTERFACE M: Jeremy Fitzhardinge M: Konrad Rzeszutek Wilk L: xen-devel@lists.xensource.com (moderated for non-subscribers) -L: virtualization@lists.osdl.org +L: virtualization@lists.linux-foundation.org S: Supported F: arch/x86/xen/ F: drivers/*/xen-*front.c diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h index 99c56d47879d..72db984f8781 100644 --- a/arch/alpha/include/asm/mman.h +++ b/arch/alpha/include/asm/mman.h @@ -53,6 +53,9 @@ #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 0f1d8493cfca..c1f3e7cb82a4 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -506,7 +506,7 @@ set_rtc_mmss(unsigned long nowtime) CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); } else { - printk(KERN_WARNING + printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2aef8a5a3661..5cff165b7eb0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1277,7 +1277,7 @@ config SMP config SMP_ON_UP bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" depends on EXPERIMENTAL - depends on SMP && !XIP + depends on SMP && !XIP_KERNEL default y help SMP kernels contain instructions which fail on non-SMP processors. diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 978e83217311..fcddd48fe9da 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -240,7 +240,7 @@ static struct resource it8152_mem = { /* * The following functions are needed for DMA bouncing. - * ITE8152 chip can addrees up to 64MByte, so all the devices + * ITE8152 chip can address up to 64MByte, so all the devices * connected to ITE8152 (PCI and USB) should have limited DMA window */ diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 9dce36e372ff..ae5fe7292e0d 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c @@ -70,7 +70,7 @@ static inline struct vic_device *to_vic(struct sys_device *sys) * vic_init2 - common initialisation code * @base: Base of the VIC. * - * Common initialisation code for registeration + * Common initialisation code for registration * and resume. */ static void vic_init2(void __iomem *base) diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 338ff19ae447..7b1bb2bbaf88 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -285,7 +285,7 @@ static inline int fls(int x) if (__builtin_constant_p(x)) return constant_fls(x); - asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc"); + asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); ret = 32 - ret; return ret; } diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index a84628be1a7b..c8e6ddf3e860 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -115,4 +115,6 @@ static inline void init_fixed_sched_clock(struct clock_data *cd, } } +extern void sched_clock_postinit(void); + #endif diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index bbecaac1e013..8f57515bbdb0 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -60,6 +60,8 @@ str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" .align +#else + b __error #endif /* diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 0c1bb68ff4a8..2cfe8161b478 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -38,17 +38,9 @@ #ifdef CONFIG_MMU void *module_alloc(unsigned long size) { - struct vm_struct *area; - - size = PAGE_ALIGN(size); - if (!size) - return NULL; - - area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); - if (!area) - return NULL; - - return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, -1, + __builtin_return_address(0)); } #else /* CONFIG_MMU */ void *module_alloc(unsigned long size) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index e76fcaadce03..94bbedbed639 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -483,6 +483,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } +#ifdef CONFIG_MMU /* * The vectors page is always readable from user space for the * atomic helpers and the signal restart code. Let's declare a mapping @@ -503,3 +504,4 @@ const char *arch_vma_name(struct vm_area_struct *vma) { return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; } +#endif diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 2cdcc9287c74..9a46370fe9da 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -34,7 +34,7 @@ void __init init_sched_clock(struct clock_data *cd, void (*update)(void), sched_clock_update_fn = update; /* calculate the mult/shift to convert counter ticks to ns. */ - clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60); + clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0); r = rate; if (r >= 4000000) { @@ -60,10 +60,15 @@ void __init init_sched_clock(struct clock_data *cd, void (*update)(void), * sets the initial epoch. */ sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); - sched_clock_poll(sched_clock_timer.data); + update(); /* * Ensure that sched_clock() starts off at 0ns */ cd->epoch_ns = 0; } + +void __init sched_clock_postinit(void) +{ + sched_clock_poll(sched_clock_timer.data); +} diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3455ad33de4c..420b8d6485d6 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -518,25 +518,21 @@ setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) #endif } -static void __init -request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) +static void __init request_standard_resources(struct machine_desc *mdesc) { + struct memblock_region *region; struct resource *res; - int i; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(_etext - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); - for (i = 0; i < mi->nr_banks; i++) { - if (mi->bank[i].size == 0) - continue; - + for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; - res->start = mi->bank[i].start; - res->end = mi->bank[i].start + mi->bank[i].size - 1; + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); @@ -650,15 +646,17 @@ static int __init parse_tag_revision(const struct tag *tag) __tagtable(ATAG_REVISION, parse_tag_revision); -#ifndef CONFIG_CMDLINE_FORCE static int __init parse_tag_cmdline(const struct tag *tag) { +#ifndef CONFIG_CMDLINE_FORCE strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); +#else + pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); +#endif /* CONFIG_CMDLINE_FORCE */ return 0; } __tagtable(ATAG_CMDLINE, parse_tag_cmdline); -#endif /* CONFIG_CMDLINE_FORCE */ /* * Scan the tag table for this tag, and call its parse function. @@ -857,7 +855,7 @@ void __init setup_arch(char **cmdline_p) arm_memblock_init(&meminfo, mdesc); paging_init(mdesc); - request_standard_resources(&meminfo, mdesc); + request_standard_resources(mdesc); #ifdef CONFIG_SMP if (is_smp()) diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index dd790745b3ef..fd9156698ab9 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -114,7 +114,7 @@ static void __cpuinit twd_calibrate_rate(void) twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, - (twd_timer_rate / 100000) % 100); + (twd_timer_rate / 1000000) % 100); } load = twd_timer_rate / HZ; diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index c2e112e1a05f..381d23a497c1 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -94,10 +94,13 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) if (tsk != current) { #ifdef CONFIG_SMP /* - * What guarantees do we have here that 'tsk' - * is not running on another CPU? + * What guarantees do we have here that 'tsk' is not + * running on another CPU? For now, ignore it as we + * can't guarantee we won't explode. */ - BUG(); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; + return; #else data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index f1e2eb19a67d..3d76bf233734 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -163,5 +164,8 @@ void __init time_init(void) { system_timer = machine_desc->timer; system_timer->init(); +#ifdef CONFIG_HAVE_SCHED_CLOCK + sched_clock_postinit(); +#endif } diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S index 8d6a8762ab88..3c9a05c8d20b 100644 --- a/arch/arm/lib/delay.S +++ b/arch/arm/lib/delay.S @@ -25,11 +25,15 @@ ENTRY(__udelay) ldr r2, .LC1 mul r0, r2, r0 ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06 + mov r1, #-1 ldr r2, .LC0 ldr r2, [r2] @ max = 0x01ffffff + add r0, r0, r1, lsr #32-14 mov r0, r0, lsr #14 @ max = 0x0001ffff + add r2, r2, r1, lsr #32-10 mov r2, r2, lsr #10 @ max = 0x00007fff mul r0, r2, r0 @ max = 2^32-1 + add r0, r0, r1, lsr #32-6 movs r0, r0, lsr #6 moveq pc, lr diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c index 7b58c948a957..de2fd04e7c8a 100644 --- a/arch/arm/mach-at91/board-ecbat91.c +++ b/arch/arm/mach-at91/board-ecbat91.c @@ -128,17 +128,17 @@ static struct spi_board_info __initdata ecb_at91spi_devices[] = { .platform_data = &my_flash0_platform, #endif }, - { /* User accessable spi - cs1 (250KHz) */ + { /* User accessible spi - cs1 (250KHz) */ .modalias = "spi-cs1", .chip_select = 1, .max_speed_hz = 250 * 1000, }, - { /* User accessable spi - cs2 (1MHz) */ + { /* User accessible spi - cs2 (1MHz) */ .modalias = "spi-cs2", .chip_select = 2, .max_speed_hz = 1 * 1000 * 1000, }, - { /* User accessable spi - cs3 (10MHz) */ + { /* User accessible spi - cs3 (10MHz) */ .modalias = "spi-cs3", .chip_select = 3, .max_speed_hz = 10 * 1000 * 1000, diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index dafbacc25eb1..ea53f4d9b283 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -301,7 +301,7 @@ static void at91_pm_end(void) } -static struct platform_suspend_ops at91_pm_ops ={ +static const struct platform_suspend_ops at91_pm_ops = { .valid = at91_pm_valid_state, .begin = at91_pm_begin, .enter = at91_pm_enter, diff --git a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c index b3a61d860c65..96273ff34956 100644 --- a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c +++ b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c @@ -757,7 +757,7 @@ static int chipcHw_divide(int num, int denom) t = t << 1; } - /* Intialize the result */ + /* Initialize the result */ r = 0; do { diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c index 7b9bac2d79a5..6b9be2e98e51 100644 --- a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c +++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c @@ -893,7 +893,7 @@ int dmacHw_setDataDescriptor(dmacHw_CONFIG_t *pConfig, /* [ IN ] Configuration */ /****************************************************************************/ uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle, /* [ IN ] DMA Channel handle */ - dmacHw_CONTROLLER_ATTRIB_e attr /* [ IN ] DMA Controler attribute of type dmacHw_CONTROLLER_ATTRIB_e */ + dmacHw_CONTROLLER_ATTRIB_e attr /* [ IN ] DMA Controller attribute of type dmacHw_CONTROLLER_ATTRIB_e */ ) { dmacHw_CBLK_t *pCblk = dmacHw_HANDLE_TO_CBLK(handle); diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c index ff7b436d0935..77f84b40dda9 100644 --- a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c +++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c @@ -316,7 +316,7 @@ static void DisplayDescRing(void *pDescriptor, /* [ IN ] Descriptor buffer */ /** * @brief Check if DMA channel is the flow controller * -* @return 1 : If DMA is a flow controler +* @return 1 : If DMA is a flow controller * 0 : Peripheral is the flow controller * * @note diff --git a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c index 5c1c9a0e5ed2..16225e43f3c3 100644 --- a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c +++ b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c @@ -558,7 +558,7 @@ static int tmrHw_divide(int num, int denom) t = t << 1; } - /* Intialize the result */ + /* Initialize the result */ r = 0; do { diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c index 77eb35c89cd0..8d1baf3f4683 100644 --- a/arch/arm/mach-bcmring/dma.c +++ b/arch/arm/mach-bcmring/dma.c @@ -671,7 +671,7 @@ static int ConfigChannel(DMA_Handle_t handle) /****************************************************************************/ /** -* Intializes all of the data structures associated with the DMA. +* Initializes all of the data structures associated with the DMA. * @return * >= 0 - Initialization was successfull. * diff --git a/arch/arm/mach-bcmring/include/csp/dmacHw.h b/arch/arm/mach-bcmring/include/csp/dmacHw.h index 5d510130a25f..6c8da2b9fc1f 100644 --- a/arch/arm/mach-bcmring/include/csp/dmacHw.h +++ b/arch/arm/mach-bcmring/include/csp/dmacHw.h @@ -590,7 +590,7 @@ void dmacHw_printDebugInfo(dmacHw_HANDLE_t handle, /* [ IN ] DMA Channel handle */ /****************************************************************************/ uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle, /* [ IN ] DMA Channel handle */ - dmacHw_CONTROLLER_ATTRIB_e attr /* [ IN ] DMA Controler attribute of type dmacHw_CONTROLLER_ATTRIB_e */ + dmacHw_CONTROLLER_ATTRIB_e attr /* [ IN ] DMA Controller attribute of type dmacHw_CONTROLLER_ATTRIB_e */ ); #endif /* _DMACHW_H */ diff --git a/arch/arm/mach-bcmring/include/csp/tmrHw.h b/arch/arm/mach-bcmring/include/csp/tmrHw.h index f1236d00cb97..2cbb530db8ea 100644 --- a/arch/arm/mach-bcmring/include/csp/tmrHw.h +++ b/arch/arm/mach-bcmring/include/csp/tmrHw.h @@ -76,7 +76,7 @@ tmrHw_RATE_t tmrHw_setPeriodicTimerRate(tmrHw_ID_t timerId, /* [ IN ] Timer Id * certain time interval * * This function initializes a periodic timer to generate timer interrupt -* after every time interval in milisecond +* after every time interval in millisecond * * @return On success: Effective interval set in mili-second * On failure: 0 @@ -93,7 +93,7 @@ tmrHw_INTERVAL_t tmrHw_setPeriodicTimerInterval(tmrHw_ID_t timerId, /* [ IN ] T * after certain time interval * * This function initializes a periodic timer to generate a single ticks after -* certain time interval in milisecond +* certain time interval in millisecond * * @return On success: Effective interval set in mili-second * On failure: 0 diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h index cbf334d1c761..d67e2f8c22de 100644 --- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h +++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h @@ -28,7 +28,7 @@ /* Data type for DMA Link List Item */ typedef struct { - uint32_t sar; /* Source Adress Register. + uint32_t sar; /* Source Address Register. Address must be aligned to CTLx.SRC_TR_WIDTH. */ uint32_t dar; /* Destination Address Register. Address must be aligned to CTLx.DST_TR_WIDTH. */ diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h index 891cea87e333..f1ecf96f2da5 100644 --- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h +++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h @@ -35,7 +35,7 @@ typedef struct { /* Data type representing DMA channel registers */ typedef struct { - dmacHw_REG64_t ChannelSar; /* Source Adress Register. 64 bits (upper 32 bits are reserved) + dmacHw_REG64_t ChannelSar; /* Source Address Register. 64 bits (upper 32 bits are reserved) Address must be aligned to CTLx.SRC_TR_WIDTH. */ dmacHw_REG64_t ChannelDar; /* Destination Address Register.64 bits (upper 32 bits are reserved) diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c index fab953b43dea..1bd73a04be20 100644 --- a/arch/arm/mach-davinci/pm.c +++ b/arch/arm/mach-davinci/pm.c @@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_state_t state) return ret; } -static struct platform_suspend_ops davinci_pm_ops = { +static const struct platform_suspend_ops davinci_pm_ops = { .enter = davinci_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h index 213a4fcfeb1c..8c950e1d06be 100644 --- a/arch/arm/mach-gemini/include/mach/hardware.h +++ b/arch/arm/mach-gemini/include/mach/hardware.h @@ -33,7 +33,7 @@ #define GEMINI_LPC_HOST_BASE 0x47000000 #define GEMINI_LPC_IO_BASE 0x47800000 #define GEMINI_INTERRUPT_BASE 0x48000000 -/* TODO: Different interrupt controlers when SMP +/* TODO: Different interrupt controllers when SMP * #define GEMINI_INTERRUPT0_BASE 0x48000000 * #define GEMINI_INTERRUPT1_BASE 0x49000000 */ diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c index 79f0b896e446..629454d71c8d 100644 --- a/arch/arm/mach-h720x/h7201-eval.c +++ b/arch/arm/mach-h720x/h7201-eval.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include "common.h" diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c index cc28b1efe047..e9f46b696354 100644 --- a/arch/arm/mach-h720x/h7202-eval.c +++ b/arch/arm/mach-h720x/h7202-eval.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c index f667a262dfc1..505614803bc6 100644 --- a/arch/arm/mach-imx/mach-pcm038.c +++ b/arch/arm/mach-imx/mach-pcm038.c @@ -254,10 +254,10 @@ static struct regulator_init_data cam_data = { static struct mc13783_regulator_init_data pcm038_regulators[] = { { - .id = MC13783_REGU_VCAM, + .id = MC13783_REG_VCAM, .init_data = &cam_data, }, { - .id = MC13783_REGU_VMMC1, + .id = MC13783_REG_VMMC1, .init_data = &sdhc1_data, }, }; diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c index 6bf81ceea137..acf17691d2cc 100644 --- a/arch/arm/mach-imx/pm-imx27.c +++ b/arch/arm/mach-imx/pm-imx27.c @@ -32,7 +32,7 @@ static int mx27_suspend_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops mx27_suspend_ops = { +static const struct platform_suspend_ops mx27_suspend_ops = { .enter = mx27_suspend_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c index a3fbcb3adc29..fbb457779895 100644 --- a/arch/arm/mach-integrator/cpu.c +++ b/arch/arm/mach-integrator/cpu.c @@ -173,7 +173,7 @@ static unsigned int integrator_get(unsigned int cpu) if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; - } else if (machine_is_cintegrator()) { + } else { vco.s = 1; } vco.v = cm_osc & 255; diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index c9d77fad10ab..cfcca4174e25 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c @@ -171,7 +171,7 @@ static void __init openrd_init(void) kirkwood_i2c_init(); - if (machine_is_openrd_client()) { + if (machine_is_openrd_client() || machine_is_openrd_ultimate()) { i2c_register_board_info(0, i2c_board_info, ARRAY_SIZE(i2c_board_info)); kirkwood_audio_init(); diff --git a/arch/arm/mach-lpc32xx/pm.c b/arch/arm/mach-lpc32xx/pm.c index a6e2aed9a49f..e76d41bb7056 100644 --- a/arch/arm/mach-lpc32xx/pm.c +++ b/arch/arm/mach-lpc32xx/pm.c @@ -123,7 +123,7 @@ static int lpc32xx_pm_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops lpc32xx_pm_ops = { +static const struct platform_suspend_ops lpc32xx_pm_ops = { .valid = suspend_valid_only_mem, .enter = lpc32xx_pm_enter, }; diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S index 4dc99aa65d07..12467157afb9 100644 --- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S +++ b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S @@ -26,7 +26,7 @@ * The interrupt numbering scheme is defined in the * interrupt controller spec. To wit: * - * Migrated the code from ARM MP port to be more consistant + * Migrated the code from ARM MP port to be more consistent * with interrupt processing , the following still holds true * however, all interrupts are treated the same regardless of * if they are local IPI or PPI diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index 800f327a7ecc..1260007a9dd1 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c @@ -154,7 +154,7 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { if (mtype == MT_DEVICE) { /* The peripherals in the 88000000 - D0000000 range - * are only accessable by type MT_DEVICE_NONSHARED. + * are only accessible by type MT_DEVICE_NONSHARED. * Adjust mtype as necessary to make this "just work." */ if ((phys_addr >= 0x88000000) && (phys_addr < 0xD0000000)) diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c index 6b4aa2c47107..0d65db885be7 100644 --- a/arch/arm/mach-mx3/mach-mx31_3ds.c +++ b/arch/arm/mach-mx3/mach-mx31_3ds.c @@ -140,10 +140,10 @@ static struct regulator_init_data gpo_init = { static struct mc13783_regulator_init_data mx31_3ds_regulators[] = { { - .id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */ + .id = MC13783_REG_PWGT1SPI, /* Power Gate for ARM core. */ .init_data = &pwgtx_init, }, { - .id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */ + .id = MC13783_REG_PWGT2SPI, /* Power Gate for L2 Cache. */ .init_data = &pwgtx_init, }, { diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c index 203d21a510aa..1aa8d65fccbb 100644 --- a/arch/arm/mach-mx3/mach-mx31moboard.c +++ b/arch/arm/mach-mx3/mach-mx31moboard.c @@ -216,11 +216,11 @@ static struct regulator_init_data cam_vreg_data = { static struct mc13783_regulator_init_data moboard_regulators[] = { { - .id = MC13783_REGU_VMMC1, + .id = MC13783_REG_VMMC1, .init_data = &sdhc_vreg_data, }, { - .id = MC13783_REGU_VCAM, + .id = MC13783_REG_VCAM, .init_data = &cam_vreg_data, }, }; diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c index 0cca23a85175..98ba9784aa15 100644 --- a/arch/arm/mach-omap1/pm.c +++ b/arch/arm/mach-omap1/pm.c @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = { -static struct platform_suspend_ops omap_pm_ops ={ +static const struct platform_suspend_ops omap_pm_ops = { .prepare = omap_pm_prepare, .enter = omap_pm_enter, .finish = omap_pm_finish, diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index cd7332f50b2d..1c0c2b02d870 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -187,16 +187,19 @@ obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \ hsmmc.o obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o \ board-zoom-peripherals.o \ + board-zoom-display.o \ board-flash.o \ hsmmc.o \ board-zoom-debugboard.o obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o \ board-zoom-peripherals.o \ + board-zoom-display.o \ board-flash.o \ hsmmc.o \ board-zoom-debugboard.o obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o \ board-zoom-peripherals.o \ + board-zoom-display.o \ board-flash.o \ hsmmc.o obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o \ diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index 3b39ef1a680a..d4e41ef86aa5 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -38,6 +38,7 @@ #include #include #include +#include #include @@ -270,13 +271,18 @@ static struct omap_dss_device sdp3430_lcd_device = { .platform_disable = sdp3430_panel_disable_lcd, }; +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = sdp3430_panel_enable_dvi, + .platform_disable = sdp3430_panel_disable_dvi, +}; + static struct omap_dss_device sdp3430_dvi_device = { .name = "dvi", - .driver_name = "generic_panel", .type = OMAP_DISPLAY_TYPE_DPI, + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .platform_enable = sdp3430_panel_enable_dvi, - .platform_disable = sdp3430_panel_disable_dvi, }; static struct omap_dss_device sdp3430_tv_device = { diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c index 5d41dbe059a3..62645640f5e4 100644 --- a/arch/arm/mach-omap2/board-3630sdp.c +++ b/arch/arm/mach-omap2/board-3630sdp.c @@ -207,6 +207,7 @@ static void __init omap_sdp_init(void) { omap3_mux_init(board_mux, OMAP_PACKAGE_CBP); zoom_peripherals_init(); + zoom_display_init(); board_smc91x_init(); board_flash_init(sdp_flash_partitions, chip_sel_sdp); enable_board_wakeup_source(); diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index a70bdf28e2bc..07d1b20b1148 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -554,6 +554,7 @@ static void __init omap_sfh7741prox_init(void) #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { + OMAP4_MUX(USBB2_ULPITLL_CLK, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #else @@ -576,11 +577,12 @@ static void __init omap_4430sdp_init(void) omap4_twl6030_hsmmc_init(mmc); /* Power on the ULPI PHY */ - if (gpio_is_valid(OMAP4SDP_MDM_PWR_EN_GPIO)) { - /* FIXME: Assumes pad is already muxed for GPIO mode */ - gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3"); + status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3"); + if (status) + pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__); + else gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1); - } + usb_ehci_init(&ehci_pdata); usb_musb_init(&musb_board_data); diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c index bc1562648020..10d60b7743cf 100644 --- a/arch/arm/mach-omap2/board-am3517evm.c +++ b/arch/arm/mach-omap2/board-am3517evm.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mux.h" #include "control.h" @@ -303,13 +304,18 @@ static void am3517_evm_panel_disable_lcd(struct omap_dss_device *dssdev) lcd_enabled = 0; } +static struct panel_generic_dpi_data lcd_panel = { + .name = "sharp_lq", + .platform_enable = am3517_evm_panel_enable_lcd, + .platform_disable = am3517_evm_panel_disable_lcd, +}; + static struct omap_dss_device am3517_evm_lcd_device = { .type = OMAP_DISPLAY_TYPE_DPI, .name = "lcd", - .driver_name = "sharp_lq_panel", + .driver_name = "generic_dpi_panel", + .data = &lcd_panel, .phy.dpi.data_lines = 16, - .platform_enable = am3517_evm_panel_enable_lcd, - .platform_disable = am3517_evm_panel_disable_lcd, }; static int am3517_evm_panel_enable_tv(struct omap_dss_device *dssdev) @@ -346,13 +352,18 @@ static void am3517_evm_panel_disable_dvi(struct omap_dss_device *dssdev) dvi_enabled = 0; } +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = am3517_evm_panel_enable_dvi, + .platform_disable = am3517_evm_panel_disable_dvi, +}; + static struct omap_dss_device am3517_evm_dvi_device = { .type = OMAP_DISPLAY_TYPE_DPI, .name = "dvi", - .driver_name = "generic_panel", + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .platform_enable = am3517_evm_panel_enable_dvi, - .platform_disable = am3517_evm_panel_disable_dvi, }; static struct omap_dss_device *am3517_evm_dss_devices[] = { diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index 486a3de5f401..dac141610666 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -351,22 +352,32 @@ static void cm_t35_panel_disable_tv(struct omap_dss_device *dssdev) { } +static struct panel_generic_dpi_data lcd_panel = { + .name = "toppoly_tdo35s", + .platform_enable = cm_t35_panel_enable_lcd, + .platform_disable = cm_t35_panel_disable_lcd, +}; + static struct omap_dss_device cm_t35_lcd_device = { .name = "lcd", - .driver_name = "toppoly_tdo35s_panel", .type = OMAP_DISPLAY_TYPE_DPI, + .driver_name = "generic_dpi_panel", + .data = &lcd_panel, .phy.dpi.data_lines = 18, - .platform_enable = cm_t35_panel_enable_lcd, - .platform_disable = cm_t35_panel_disable_lcd, +}; + +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = cm_t35_panel_enable_dvi, + .platform_disable = cm_t35_panel_disable_dvi, }; static struct omap_dss_device cm_t35_dvi_device = { .name = "dvi", - .driver_name = "generic_panel", .type = OMAP_DISPLAY_TYPE_DPI, + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .platform_enable = cm_t35_panel_enable_dvi, - .platform_disable = cm_t35_panel_disable_dvi, }; static struct omap_dss_device cm_t35_tv_device = { diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 451e7ff08b18..00bb1fc5e017 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -149,23 +150,32 @@ static struct regulator_consumer_supply devkit8000_vmmc1_supply = static struct regulator_consumer_supply devkit8000_vio_supply = REGULATOR_SUPPLY("vcc", "spi2.0"); +static struct panel_generic_dpi_data lcd_panel = { + .name = "generic", + .platform_enable = devkit8000_panel_enable_lcd, + .platform_disable = devkit8000_panel_disable_lcd, +}; + static struct omap_dss_device devkit8000_lcd_device = { .name = "lcd", - .driver_name = "generic_panel", .type = OMAP_DISPLAY_TYPE_DPI, + .driver_name = "generic_dpi_panel", + .data = &lcd_panel, .phy.dpi.data_lines = 24, - .reset_gpio = -EINVAL, /* will be replaced */ - .platform_enable = devkit8000_panel_enable_lcd, - .platform_disable = devkit8000_panel_disable_lcd, }; + +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = devkit8000_panel_enable_dvi, + .platform_disable = devkit8000_panel_disable_dvi, +}; + static struct omap_dss_device devkit8000_dvi_device = { .name = "dvi", - .driver_name = "generic_panel", .type = OMAP_DISPLAY_TYPE_DPI, + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .reset_gpio = -EINVAL, /* will be replaced */ - .platform_enable = devkit8000_panel_enable_dvi, - .platform_disable = devkit8000_panel_disable_dvi, }; static struct omap_dss_device devkit8000_tv_device = { diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 0afa3011db0f..3be85a1f55f4 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include #include #include "mux.h" @@ -459,13 +461,18 @@ static void igep2_disable_dvi(struct omap_dss_device *dssdev) gpio_direction_output(IGEP2_GPIO_DVI_PUP, 0); } +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = igep2_enable_dvi, + .platform_disable = igep2_disable_dvi, +}; + static struct omap_dss_device igep2_dvi_device = { .type = OMAP_DISPLAY_TYPE_DPI, .name = "dvi", - .driver_name = "generic_panel", + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .platform_enable = igep2_enable_dvi, - .platform_disable = igep2_disable_dvi, }; static struct omap_dss_device *igep2_dss_devices[] = { @@ -535,6 +542,37 @@ static struct twl4030_codec_data igep2_codec_data = { .audio = &igep2_audio_data, }; +static int igep2_keymap[] = { + KEY(0, 0, KEY_LEFT), + KEY(0, 1, KEY_RIGHT), + KEY(0, 2, KEY_A), + KEY(0, 3, KEY_B), + KEY(1, 0, KEY_DOWN), + KEY(1, 1, KEY_UP), + KEY(1, 2, KEY_E), + KEY(1, 3, KEY_F), + KEY(2, 0, KEY_ENTER), + KEY(2, 1, KEY_I), + KEY(2, 2, KEY_J), + KEY(2, 3, KEY_K), + KEY(3, 0, KEY_M), + KEY(3, 1, KEY_N), + KEY(3, 2, KEY_O), + KEY(3, 3, KEY_P) +}; + +static struct matrix_keymap_data igep2_keymap_data = { + .keymap = igep2_keymap, + .keymap_size = ARRAY_SIZE(igep2_keymap), +}; + +static struct twl4030_keypad_data igep2_keypad_pdata = { + .keymap_data = &igep2_keymap_data, + .rows = 4, + .cols = 4, + .rep = 1, +}; + static struct twl4030_platform_data igep2_twldata = { .irq_base = TWL4030_IRQ_BASE, .irq_end = TWL4030_IRQ_END, @@ -543,6 +581,7 @@ static struct twl4030_platform_data igep2_twldata = { .usb = &igep2_usb_data, .codec = &igep2_codec_data, .gpio = &igep2_twl4030_gpio_pdata, + .keypad = &igep2_keypad_pdata, .vmmc1 = &igep2_vmmc1, .vpll2 = &igep2_vpll2, .vio = &igep2_vio, diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c index bcccd68f1856..4dc62a9b9cb2 100644 --- a/arch/arm/mach-omap2/board-igep0030.c +++ b/arch/arm/mach-omap2/board-igep0030.c @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -43,7 +44,7 @@ #define IGEP3_GPIO_WIFI_NRESET 139 #define IGEP3_GPIO_BT_NRESET 137 -#define IGEP3_GPIO_USBH_NRESET 115 +#define IGEP3_GPIO_USBH_NRESET 183 #if defined(CONFIG_MTD_ONENAND_OMAP2) || \ @@ -103,7 +104,7 @@ static struct platform_device igep3_onenand_device = { }, }; -void __init igep3_flash_init(void) +static void __init igep3_flash_init(void) { u8 cs = 0; u8 onenandcs = GPMC_CS_NUM + 1; @@ -137,12 +138,11 @@ void __init igep3_flash_init(void) } #else -void __init igep3_flash_init(void) {} +static void __init igep3_flash_init(void) {} #endif -static struct regulator_consumer_supply igep3_vmmc1_supply = { - .supply = "vmmc", -}; +static struct regulator_consumer_supply igep3_vmmc1_supply = + REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0"); /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ static struct regulator_init_data igep3_vmmc1 = { @@ -159,6 +159,52 @@ static struct regulator_init_data igep3_vmmc1 = { .consumer_supplies = &igep3_vmmc1_supply, }; +static struct regulator_consumer_supply igep3_vio_supply = + REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1"); + +static struct regulator_init_data igep3_vio = { + .constraints = { + .min_uV = 1800000, + .max_uV = 1800000, + .apply_uV = 1, + .valid_modes_mask = REGULATOR_MODE_NORMAL + | REGULATOR_MODE_STANDBY, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE + | REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &igep3_vio_supply, +}; + +static struct regulator_consumer_supply igep3_vmmc2_supply = + REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1"); + +static struct regulator_init_data igep3_vmmc2 = { + .constraints = { + .valid_modes_mask = REGULATOR_MODE_NORMAL, + .always_on = 1, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &igep3_vmmc2_supply, +}; + +static struct fixed_voltage_config igep3_vwlan = { + .supply_name = "vwlan", + .microvolts = 3300000, + .gpio = -EINVAL, + .enabled_at_boot = 1, + .init_data = &igep3_vmmc2, +}; + +static struct platform_device igep3_vwlan_device = { + .name = "reg-fixed-voltage", + .id = 0, + .dev = { + .platform_data = &igep3_vwlan, + }, +}; + static struct omap2_hsmmc_info mmc[] = { [0] = { .mmc = 1, @@ -254,12 +300,6 @@ static int igep3_twl4030_gpio_setup(struct device *dev, mmc[0].gpio_cd = gpio + 0; omap2_hsmmc_init(mmc); - /* - * link regulators to MMC adapters ... we "know" the - * regulators will be set up only *after* we return. - */ - igep3_vmmc1_supply.dev = mmc[0].dev; - /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0) @@ -287,6 +327,10 @@ static struct twl4030_usb_data igep3_twl4030_usb_data = { .usb_mode = T2_USB_MODE_ULPI, }; +static struct platform_device *igep3_devices[] __initdata = { + &igep3_vwlan_device, +}; + static void __init igep3_init_irq(void) { omap2_init_common_infrastructure(); @@ -303,6 +347,7 @@ static struct twl4030_platform_data igep3_twl4030_pdata = { .usb = &igep3_twl4030_usb_data, .gpio = &igep3_twl4030_gpio_pdata, .vmmc1 = &igep3_vmmc1, + .vio = &igep3_vio, }; static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = { @@ -363,8 +408,20 @@ static void __init igep3_wifi_bt_init(void) void __init igep3_wifi_bt_init(void) {} #endif +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { + .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, + .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + + .phy_reset = true, + .reset_gpio_port[0] = -EINVAL, + .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET, + .reset_gpio_port[2] = -EINVAL, +}; + #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { + OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif @@ -375,9 +432,10 @@ static void __init igep3_init(void) /* Register I2C busses and drivers */ igep3_i2c_init(); - + platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices)); omap_serial_init(); usb_musb_init(&musb_board_data); + usb_ehci_init(&ehci_pdata); igep3_flash_init(); igep3_leds_init(); @@ -392,6 +450,7 @@ static void __init igep3_init(void) MACHINE_START(IGEP0030, "IGEP OMAP3 module") .boot_params = 0x80000100, + .reserve = omap_reserve, .map_io = omap3_map_io, .init_irq = igep3_init_irq, .init_machine = igep3_init, diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 6c127605942f..46d814ab5656 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -194,14 +195,19 @@ static void beagle_disable_dvi(struct omap_dss_device *dssdev) gpio_set_value(dssdev->reset_gpio, 0); } +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = beagle_enable_dvi, + .platform_disable = beagle_disable_dvi, +}; + static struct omap_dss_device beagle_dvi_device = { .type = OMAP_DISPLAY_TYPE_DPI, .name = "dvi", - .driver_name = "generic_panel", + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .reset_gpio = 170, - .platform_enable = beagle_enable_dvi, - .platform_disable = beagle_disable_dvi, + .reset_gpio = -EINVAL, }; static struct omap_dss_device beagle_tv_device = { @@ -273,6 +279,8 @@ static struct gpio_led gpio_leds[]; static int beagle_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int r; + if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { mmc[0].gpio_wp = -EINVAL; } else if ((omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C1_3) || @@ -293,17 +301,63 @@ static int beagle_twl_gpio_setup(struct device *dev, /* REVISIT: need ehci-omap hooks for external VBUS * power switch and overcurrent detect */ + if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM) { + r = gpio_request(gpio + 1, "EHCI_nOC"); + if (!r) { + r = gpio_direction_input(gpio + 1); + if (r) + gpio_free(gpio + 1); + } + if (r) + pr_err("%s: unable to configure EHCI_nOC\n", __func__); + } - gpio_request(gpio + 1, "EHCI_nOC"); - gpio_direction_input(gpio + 1); - - /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */ + /* + * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active + * high / others active low) + */ gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR"); - gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); + if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) + gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1); + else + gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); + + /* DVI reset GPIO is different between beagle revisions */ + if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) + beagle_dvi_device.reset_gpio = 129; + else + beagle_dvi_device.reset_gpio = 170; /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; + /* + * gpio + 1 on Xm controls the TFP410's enable line (active low) + * gpio + 2 control varies depending on the board rev as follows: + * P7/P8 revisions(prototype): Camera EN + * A2+ revisions (production): LDO (supplies DVI, serial, led blocks) + */ + if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { + r = gpio_request(gpio + 1, "nDVI_PWR_EN"); + if (!r) { + r = gpio_direction_output(gpio + 1, 0); + if (r) + gpio_free(gpio + 1); + } + if (r) + pr_err("%s: unable to configure nDVI_PWR_EN\n", + __func__); + r = gpio_request(gpio + 2, "DVI_LDO_EN"); + if (!r) { + r = gpio_direction_output(gpio + 2, 1); + if (r) + gpio_free(gpio + 2); + } + if (r) + pr_err("%s: unable to configure DVI_LDO_EN\n", + __func__); + } + return 0; } diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 3de8d9b8ec76..323c3809ce39 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "mux.h" #include "sdram-micron-mt46h32m32lf-6.h" @@ -301,13 +302,18 @@ static void omap3_evm_disable_dvi(struct omap_dss_device *dssdev) dvi_enabled = 0; } +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = omap3_evm_enable_dvi, + .platform_disable = omap3_evm_disable_dvi, +}; + static struct omap_dss_device omap3_evm_dvi_device = { .name = "dvi", - .driver_name = "generic_panel", .type = OMAP_DISPLAY_TYPE_DPI, + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .platform_enable = omap3_evm_enable_dvi, - .platform_disable = omap3_evm_disable_dvi, }; static struct omap_dss_device *omap3_evm_dss_devices[] = { diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index 9df9d9367608..2a2dad447e86 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -160,13 +161,18 @@ static void omap3_stalker_disable_lcd(struct omap_dss_device *dssdev) lcd_enabled = 0; } +static struct panel_generic_dpi_data lcd_panel = { + .name = "generic", + .platform_enable = omap3_stalker_enable_lcd, + .platform_disable = omap3_stalker_disable_lcd, +}; + static struct omap_dss_device omap3_stalker_lcd_device = { .name = "lcd", - .driver_name = "generic_panel", + .driver_name = "generic_dpi_panel", + .data = &lcd_panel, .phy.dpi.data_lines = 24, .type = OMAP_DISPLAY_TYPE_DPI, - .platform_enable = omap3_stalker_enable_lcd, - .platform_disable = omap3_stalker_disable_lcd, }; static int omap3_stalker_enable_tv(struct omap_dss_device *dssdev) @@ -208,13 +214,18 @@ static void omap3_stalker_disable_dvi(struct omap_dss_device *dssdev) dvi_enabled = 0; } +static struct panel_generic_dpi_data dvi_panel = { + .name = "generic", + .platform_enable = omap3_stalker_enable_dvi, + .platform_disable = omap3_stalker_disable_dvi, +}; + static struct omap_dss_device omap3_stalker_dvi_device = { .name = "dvi", - .driver_name = "generic_panel", .type = OMAP_DISPLAY_TYPE_DPI, + .driver_name = "generic_dpi_panel", + .data = &dvi_panel, .phy.dpi.data_lines = 24, - .platform_enable = omap3_stalker_enable_dvi, - .platform_disable = omap3_stalker_disable_dvi, }; static struct omap_dss_device *omap3_stalker_dss_devices[] = { diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index 3094e2007844..e001a048dc0c 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -95,7 +96,16 @@ static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { static void __init omap4_ehci_init(void) { int ret; + struct clk *phy_ref_clk; + /* FREF_CLK3 provides the 19.2 MHz reference clock to the PHY */ + phy_ref_clk = clk_get(NULL, "auxclk3_ck"); + if (IS_ERR(phy_ref_clk)) { + pr_err("Cannot request auxclk3\n"); + goto error1; + } + clk_set_rate(phy_ref_clk, 19200000); + clk_enable(phy_ref_clk); /* disable the power to the usb hub prior to init */ ret = gpio_request(GPIO_HUB_POWER, "hub_power"); diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c new file mode 100644 index 000000000000..6bcd43657aed --- /dev/null +++ b/arch/arm/mach-omap2/board-zoom-display.c @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2010 Texas Instruments Inc. + * + * Modified from mach-omap2/board-zoom-peripherals.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define LCD_PANEL_RESET_GPIO_PROD 96 +#define LCD_PANEL_RESET_GPIO_PILOT 55 +#define LCD_PANEL_QVGA_GPIO 56 + +static void zoom_lcd_panel_init(void) +{ + int ret; + unsigned char lcd_panel_reset_gpio; + + lcd_panel_reset_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? + LCD_PANEL_RESET_GPIO_PROD : + LCD_PANEL_RESET_GPIO_PILOT; + + ret = gpio_request(lcd_panel_reset_gpio, "lcd reset"); + if (ret) { + pr_err("Failed to get LCD reset GPIO (gpio%d).\n", + lcd_panel_reset_gpio); + return; + } + gpio_direction_output(lcd_panel_reset_gpio, 1); + + ret = gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga"); + if (ret) { + pr_err("Failed to get LCD_PANEL_QVGA_GPIO (gpio%d).\n", + LCD_PANEL_QVGA_GPIO); + goto err0; + } + gpio_direction_output(LCD_PANEL_QVGA_GPIO, 1); + + return; +err0: + gpio_free(lcd_panel_reset_gpio); +} + +static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev) +{ + return 0; +} + +static void zoom_panel_disable_lcd(struct omap_dss_device *dssdev) +{ +} + +/* + * PWMA/B register offsets (TWL4030_MODULE_PWMA) + */ +#define TWL_INTBR_PMBR1 0xD +#define TWL_INTBR_GPBR1 0xC +#define TWL_LED_PWMON 0x0 +#define TWL_LED_PWMOFF 0x1 + +static int zoom_set_bl_intensity(struct omap_dss_device *dssdev, int level) +{ + unsigned char c; + u8 mux_pwm, enb_pwm; + + if (level > 100) + return -1; + + twl_i2c_read_u8(TWL4030_MODULE_INTBR, &mux_pwm, TWL_INTBR_PMBR1); + twl_i2c_read_u8(TWL4030_MODULE_INTBR, &enb_pwm, TWL_INTBR_GPBR1); + + if (level == 0) { + /* disable pwm1 output and clock */ + enb_pwm = enb_pwm & 0xF5; + /* change pwm1 pin to gpio pin */ + mux_pwm = mux_pwm & 0xCF; + twl_i2c_write_u8(TWL4030_MODULE_INTBR, + enb_pwm, TWL_INTBR_GPBR1); + twl_i2c_write_u8(TWL4030_MODULE_INTBR, + mux_pwm, TWL_INTBR_PMBR1); + return 0; + } + + if (!((enb_pwm & 0xA) && (mux_pwm & 0x30))) { + /* change gpio pin to pwm1 pin */ + mux_pwm = mux_pwm | 0x30; + /* enable pwm1 output and clock*/ + enb_pwm = enb_pwm | 0x0A; + twl_i2c_write_u8(TWL4030_MODULE_INTBR, + mux_pwm, TWL_INTBR_PMBR1); + twl_i2c_write_u8(TWL4030_MODULE_INTBR, + enb_pwm, TWL_INTBR_GPBR1); + } + + c = ((50 * (100 - level)) / 100) + 1; + twl_i2c_write_u8(TWL4030_MODULE_PWM1, 0x7F, TWL_LED_PWMOFF); + twl_i2c_write_u8(TWL4030_MODULE_PWM1, c, TWL_LED_PWMON); + + return 0; +} + +static struct omap_dss_device zoom_lcd_device = { + .name = "lcd", + .driver_name = "NEC_8048_panel", + .type = OMAP_DISPLAY_TYPE_DPI, + .phy.dpi.data_lines = 24, + .platform_enable = zoom_panel_enable_lcd, + .platform_disable = zoom_panel_disable_lcd, + .max_backlight_level = 100, + .set_backlight = zoom_set_bl_intensity, +}; + +static struct omap_dss_device *zoom_dss_devices[] = { + &zoom_lcd_device, +}; + +static struct omap_dss_board_info zoom_dss_data = { + .num_devices = ARRAY_SIZE(zoom_dss_devices), + .devices = zoom_dss_devices, + .default_device = &zoom_lcd_device, +}; + +static struct platform_device zoom_dss_device = { + .name = "omapdss", + .id = -1, + .dev = { + .platform_data = &zoom_dss_data, + }, +}; + +static struct omap2_mcspi_device_config dss_lcd_mcspi_config = { + .turbo_mode = 1, + .single_channel = 1, /* 0: slave, 1: master */ +}; + +static struct spi_board_info nec_8048_spi_board_info[] __initdata = { + [0] = { + .modalias = "nec_8048_spi", + .bus_num = 1, + .chip_select = 2, + .max_speed_hz = 375000, + .controller_data = &dss_lcd_mcspi_config, + }, +}; + +static struct platform_device *zoom_display_devices[] __initdata = { + &zoom_dss_device, +}; + +void __init zoom_display_init(void) +{ + platform_add_devices(zoom_display_devices, + ARRAY_SIZE(zoom_display_devices)); + spi_register_board_info(nec_8048_spi_board_info, + ARRAY_SIZE(nec_8048_spi_board_info)); + zoom_lcd_panel_init(); +} + diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c index 3fbd0edd712e..e0e040f34c68 100644 --- a/arch/arm/mach-omap2/board-zoom-peripherals.c +++ b/arch/arm/mach-omap2/board-zoom-peripherals.c @@ -35,6 +35,8 @@ #define OMAP_ZOOM_WLAN_PMENA_GPIO (101) #define OMAP_ZOOM_WLAN_IRQ_GPIO (162) +#define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES) + /* Zoom2 has Qwerty keyboard*/ static uint32_t board_keymap[] = { KEY(0, 0, KEY_E), @@ -190,7 +192,7 @@ static struct platform_device omap_vwlan_device = { }, }; -struct wl12xx_platform_data omap_zoom_wlan_data __initdata = { +static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = { .irq = OMAP_GPIO_IRQ(OMAP_ZOOM_WLAN_IRQ_GPIO), /* ZOOM ref clock is 26 MHz */ .board_ref_clock = 1, @@ -224,9 +226,43 @@ static struct omap2_hsmmc_info mmc[] = { {} /* Terminator */ }; +static struct regulator_consumer_supply zoom_vpll2_supply = + REGULATOR_SUPPLY("vdds_dsi", "omapdss"); + +static struct regulator_consumer_supply zoom_vdda_dac_supply = + REGULATOR_SUPPLY("vdda_dac", "omapdss"); + +static struct regulator_init_data zoom_vpll2 = { + .constraints = { + .min_uV = 1800000, + .max_uV = 1800000, + .valid_modes_mask = REGULATOR_MODE_NORMAL + | REGULATOR_MODE_STANDBY, + .valid_ops_mask = REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &zoom_vpll2_supply, +}; + +static struct regulator_init_data zoom_vdac = { + .constraints = { + .min_uV = 1800000, + .max_uV = 1800000, + .valid_modes_mask = REGULATOR_MODE_NORMAL + | REGULATOR_MODE_STANDBY, + .valid_ops_mask = REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &zoom_vdda_dac_supply, +}; + static int zoom_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int ret; + /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; omap2_hsmmc_init(mmc); @@ -238,11 +274,19 @@ static int zoom_twl_gpio_setup(struct device *dev, zoom_vsim_supply.dev = mmc[0].dev; zoom_vmmc2_supply.dev = mmc[1].dev; - return 0; + ret = gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd enable"); + if (ret) { + pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n", + LCD_PANEL_ENABLE_GPIO); + return ret; + } + gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0); + + return ret; } /* EXTMUTE callback function */ -void zoom2_set_hs_extmute(int mute) +static void zoom2_set_hs_extmute(int mute) { gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute); } @@ -301,7 +345,8 @@ static struct twl4030_platform_data zoom_twldata = { .vmmc1 = &zoom_vmmc1, .vmmc2 = &zoom_vmmc2, .vsim = &zoom_vsim, - + .vpll2 = &zoom_vpll2, + .vdac = &zoom_vdac, }; static struct i2c_board_info __initdata zoom_i2c_boardinfo[] = { diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c index e041c537ea37..e26754c24ee8 100644 --- a/arch/arm/mach-omap2/board-zoom.c +++ b/arch/arm/mach-omap2/board-zoom.c @@ -130,6 +130,7 @@ static void __init omap_zoom_init(void) ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS); zoom_debugboard_init(); zoom_peripherals_init(); + zoom_display_init(); } MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board") diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index d3ab1c9e50b0..403a4a1d3f9c 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c @@ -3286,7 +3286,7 @@ static struct omap_clk omap3xxx_clks[] = { CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), - CLK("ehci-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2 | CK_AM35XX), + CLK("ehci-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX), CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX), CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX), diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h index de3faa20b46b..9b459c26fb85 100644 --- a/arch/arm/mach-omap2/clockdomain.h +++ b/arch/arm/mach-omap2/clockdomain.h @@ -103,9 +103,7 @@ struct clockdomain { const char *name; struct powerdomain *ptr; } pwrdm; -#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) const u16 clktrctrl_mask; -#endif const u8 flags; const u8 dep_bit; const u8 prcm_partition; diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index f3e043fe5eb8..f7b22a16f385 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -47,6 +47,8 @@ #define OMAP3_STATE_MAX OMAP3_STATE_C7 +#define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */ + struct omap3_processor_cx { u8 valid; u8 type; @@ -252,7 +254,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, * FIXME: we currently manage device-specific idle states * for PER and CORE in combination with CPU-specific * idle states. This is wrong, and device-specific - * idle managment needs to be separated out into + * idle management needs to be separated out into * its own code. */ diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 381f4eb92352..2c9c912f2c42 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -978,7 +978,7 @@ static int __init omap2_init_devices(void) arch_initcall(omap2_init_devices); #if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE) -struct omap_device_pm_latency omap_wdt_latency[] = { +static struct omap_device_pm_latency omap_wdt_latency[] = { [0] = { .deactivate_func = omap_device_idle_hwmods, .activate_func = omap_device_enable_hwmods, diff --git a/arch/arm/mach-omap2/include/mach/board-zoom.h b/arch/arm/mach-omap2/include/mach/board-zoom.h index f93ca3928c3b..d20bd9c1a106 100644 --- a/arch/arm/mach-omap2/include/mach/board-zoom.h +++ b/arch/arm/mach-omap2/include/mach/board-zoom.h @@ -1,9 +1,12 @@ /* * Defines for zoom boards */ +#include + #define ZOOM_NAND_CS 0 extern int __init zoom_debugboard_init(void); extern void __init zoom_peripherals_init(void); +extern void __init zoom_display_init(void); #define ZOOM2_HEADSET_EXTMUTE_GPIO 153 diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 17bd6394d224..df8d2f2872c6 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -893,7 +893,7 @@ static struct omap_mux * __init omap_mux_list_add( return NULL; m = &entry->mux; - memcpy(m, src, sizeof(struct omap_mux_entry)); + entry->mux = *src; #ifdef CONFIG_OMAP_MUX if (omap_mux_copy_names(src, m)) { diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c index 440c98e9a510..17f80e4ab162 100644 --- a/arch/arm/mach-omap2/mux34xx.c +++ b/arch/arm/mach-omap2/mux34xx.c @@ -703,7 +703,7 @@ static struct omap_mux __initdata omap3_muxmodes[] = { * Signals different on CBC package compared to the superset */ #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBC) -struct omap_mux __initdata omap3_cbc_subset[] = { +static struct omap_mux __initdata omap3_cbc_subset[] = { { .reg_offset = OMAP_MUX_TERMINATOR }, }; #else @@ -721,7 +721,7 @@ struct omap_mux __initdata omap3_cbc_subset[] = { */ #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \ && defined(CONFIG_OMAP_PACKAGE_CBC) -struct omap_ball __initdata omap3_cbc_ball[] = { +static struct omap_ball __initdata omap3_cbc_ball[] = { _OMAP3_BALLENTRY(CAM_D0, "ae16", NULL), _OMAP3_BALLENTRY(CAM_D1, "ae15", NULL), _OMAP3_BALLENTRY(CAM_D10, "d25", NULL), diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c index 980f11d45c79..c322e7bdaa17 100644 --- a/arch/arm/mach-omap2/mux44xx.c +++ b/arch/arm/mach-omap2/mux44xx.c @@ -544,7 +544,7 @@ static struct omap_mux __initdata omap4_core_muxmodes[] = { */ #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \ && defined(CONFIG_OMAP_PACKAGE_CBL) -struct omap_ball __initdata omap4_core_cbl_ball[] = { +static struct omap_ball __initdata omap4_core_cbl_ball[] = { _OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL), _OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL), _OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL), @@ -1262,7 +1262,7 @@ static struct omap_mux __initdata omap4_es2_core_muxmodes[] = { */ #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \ && defined(CONFIG_OMAP_PACKAGE_CBS) -struct omap_ball __initdata omap4_core_cbs_ball[] = { +static struct omap_ball __initdata omap4_core_cbs_ball[] = { _OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL), _OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL), _OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL), @@ -1546,7 +1546,7 @@ static struct omap_mux __initdata omap4_wkup_muxmodes[] = { */ #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \ && defined(CONFIG_OMAP_PACKAGE_CBL) -struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = { +static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = { _OMAP4_BALLENTRY(SIM_IO, "h4", NULL), _OMAP4_BALLENTRY(SIM_CLK, "j2", NULL), _OMAP4_BALLENTRY(SIM_RESET, "g2", NULL), diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c index 15f8c6c1bb0f..00e1d2b53683 100644 --- a/arch/arm/mach-omap2/omap_twl.c +++ b/arch/arm/mach-omap2/omap_twl.c @@ -20,6 +20,8 @@ #include +#include "pm.h" + #define OMAP3_SRI2C_SLAVE_ADDR 0x12 #define OMAP3_VDD_MPU_SR_CONTROL_REG 0x00 #define OMAP3_VDD_CORE_SR_CONTROL_REG 0x01 @@ -60,17 +62,17 @@ static u8 smps_offset; #define REG_SMPS_OFFSET 0xE0 -unsigned long twl4030_vsel_to_uv(const u8 vsel) +static unsigned long twl4030_vsel_to_uv(const u8 vsel) { return (((vsel * 125) + 6000)) * 100; } -u8 twl4030_uv_to_vsel(unsigned long uv) +static u8 twl4030_uv_to_vsel(unsigned long uv) { return DIV_ROUND_UP(uv - 600000, 12500); } -unsigned long twl6030_vsel_to_uv(const u8 vsel) +static unsigned long twl6030_vsel_to_uv(const u8 vsel) { /* * In TWL6030 depending on the value of SMPS_OFFSET @@ -102,7 +104,7 @@ unsigned long twl6030_vsel_to_uv(const u8 vsel) return ((((vsel - 1) * 125) + 6000)) * 100; } -u8 twl6030_uv_to_vsel(unsigned long uv) +static u8 twl6030_uv_to_vsel(unsigned long uv) { /* * In TWL6030 depending on the value of SMPS_OFFSET diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c index dac2d1d9987d..9e5dc8ed51e9 100644 --- a/arch/arm/mach-omap2/pm24xx.c +++ b/arch/arm/mach-omap2/pm24xx.c @@ -350,7 +350,7 @@ static void omap2_pm_end(void) enable_hlt(); } -static struct platform_suspend_ops omap_pm_ops = { +static const struct platform_suspend_ops omap_pm_ops = { .begin = omap2_pm_begin, .enter = omap2_pm_enter, .end = omap2_pm_end, diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 5b323f28da2d..8cbbeade4b8a 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -605,7 +605,7 @@ static void omap3_pm_end(void) return; } -static struct platform_suspend_ops omap_pm_ops = { +static const struct platform_suspend_ops omap_pm_ops = { .begin = omap3_pm_begin, .end = omap3_pm_end, .enter = omap3_pm_enter, diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index e9f4862c4de4..76cfff2db514 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -65,7 +65,7 @@ static void omap4_pm_end(void) return; } -static struct platform_suspend_ops omap_pm_ops = { +static const struct platform_suspend_ops omap_pm_ops = { .begin = omap4_pm_begin, .end = omap4_pm_end, .enter = omap4_pm_enter, diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c index 784989f8f2f5..5acd2ab298b1 100644 --- a/arch/arm/mach-omap2/pm_bus.c +++ b/arch/arm/mach-omap2/pm_bus.c @@ -20,7 +20,7 @@ #include #ifdef CONFIG_PM_RUNTIME -int omap_pm_runtime_suspend(struct device *dev) +static int omap_pm_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); int r, ret = 0; @@ -37,7 +37,7 @@ int omap_pm_runtime_suspend(struct device *dev) return ret; }; -int omap_pm_runtime_resume(struct device *dev) +static int omap_pm_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); int r; diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h index 53d44f6e3736..49654c8d18f5 100644 --- a/arch/arm/mach-omap2/prm2xxx_3xxx.h +++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h @@ -228,7 +228,67 @@ #ifndef __ASSEMBLER__ - +/* + * Stub omap2xxx/omap3xxx functions so that common files + * continue to build when custom builds are used + */ +#if defined(CONFIG_ARCH_OMAP4) && !(defined(CONFIG_ARCH_OMAP2) || \ + defined(CONFIG_ARCH_OMAP3)) +static inline u32 omap2_prm_read_mod_reg(s16 module, u16 idx) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +static inline void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); +} +static inline u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits, + s16 module, s16 idx) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +static inline u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +static inline u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +static inline u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +static inline int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +static inline int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +static inline int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift) +{ + WARN(1, "prm: omap2xxx/omap3xxx specific function and " + "not suppose to be used on omap4\n"); + return 0; +} +#else /* Power/reset management domain register get/set */ extern u32 omap2_prm_read_mod_reg(s16 module, u16 idx); extern void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx); @@ -242,6 +302,7 @@ extern int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift); extern int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift); extern int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift); +#endif /* CONFIG_ARCH_OMAP4 */ #endif /* diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index c64578853a8d..302da7403a10 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -852,7 +852,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata) } /** - * omap_serial_init() - intialize all supported serial ports + * omap_serial_init() - initialize all supported serial ports * * Initializes all available UARTs as serial ports. Platforms * can call this function when they want to have default behaviour diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c index 786d685c09a9..b1e0af18a26a 100644 --- a/arch/arm/mach-omap2/sr_device.c +++ b/arch/arm/mach-omap2/sr_device.c @@ -27,6 +27,7 @@ #include #include "control.h" +#include "pm.h" static bool sr_enable_on_init; diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c index b0c4907ab3ca..4067669d96c4 100644 --- a/arch/arm/mach-omap2/wd_timer.c +++ b/arch/arm/mach-omap2/wd_timer.c @@ -13,6 +13,8 @@ #include +#include "wd_timer.h" + /* * In order to avoid any assumptions from bootloader regarding WDT * settings, WDT module is reset during init. This enables the watchdog diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c index ee3c29c57ae3..f3e60a049f98 100644 --- a/arch/arm/mach-pnx4008/pm.c +++ b/arch/arm/mach-pnx4008/pm.c @@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_state_t state) (state == PM_SUSPEND_MEM); } -static struct platform_suspend_ops pnx4008_pm_ops = { +static const struct platform_suspend_ops pnx4008_pm_ops = { .enter = pnx4008_pm_enter, .valid = pnx4008_pm_valid, }; diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index d6e15f71fc09..f5d91efc2965 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -22,7 +22,6 @@ #include #include -#include #include #include diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c index 462167ac05f9..cdf7f41e2bb3 100644 --- a/arch/arm/mach-pxa/mxm8x10.c +++ b/arch/arm/mach-pxa/mxm8x10.c @@ -337,7 +337,7 @@ void __init mxm_8x10_mmc_init(void) } #endif -/* USB Open Host Controler Interface */ +/* USB Open Host Controller Interface */ static struct pxaohci_platform_data mxm_8x10_ohci_platform_data = { .port_mode = PMM_NPS_MODE, .flags = ENABLE_PORT_ALL diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 166c15f62916..978e1b289544 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c @@ -96,7 +96,7 @@ void pxa_pm_finish(void) pxa_cpu_pm_fns->finish(); } -static struct platform_suspend_ops pxa_pm_ops = { +static const struct platform_suspend_ops pxa_pm_ops = { .valid = pxa_pm_valid, .enter = pxa_pm_enter, .prepare = pxa_pm_prepare, diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c index e68d46d415f3..785880f67b60 100644 --- a/arch/arm/mach-pxa/sharpsl_pm.c +++ b/arch/arm/mach-pxa/sharpsl_pm.c @@ -869,7 +869,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info) } #ifdef CONFIG_PM -static struct platform_suspend_ops sharpsl_pm_ops = { +static const struct platform_suspend_ops sharpsl_pm_ops = { .prepare = pxa_pm_prepare, .finish = pxa_pm_finish, .enter = corgi_pxa_pm_enter, diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c index d7ada8c7e41f..1a81fe12ccd7 100644 --- a/arch/arm/mach-s3c2410/mach-h1940.c +++ b/arch/arm/mach-s3c2410/mach-h1940.c @@ -387,7 +387,7 @@ static struct platform_device *h1940_devices[] __initdata = { &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, - &s3c_device_pcm, + &samsung_asoc_dma, &s3c_device_usbgadget, &h1940_device_leds, &h1940_device_bluetooth, diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c index e0622bbb6dfa..eab6ae50683c 100644 --- a/arch/arm/mach-s3c2440/mach-rx1950.c +++ b/arch/arm/mach-s3c2440/mach-rx1950.c @@ -692,7 +692,7 @@ static struct platform_device *rx1950_devices[] __initdata = { &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, - &s3c_device_pcm, + &samsung_asoc_dma, &s3c_device_usbgadget, &s3c_device_rtc, &s3c_device_nand, diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c index 7618627b98f5..cad67022fa25 100644 --- a/arch/arm/mach-s3c64xx/dev-audio.c +++ b/arch/arm/mach-s3c64xx/dev-audio.c @@ -296,7 +296,7 @@ static struct s3c_audio_pdata s3c_ac97_pdata; static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32); struct platform_device s3c64xx_device_ac97 = { - .name = "s3c-ac97", + .name = "samsung-ac97", .id = -1, .num_resources = ARRAY_SIZE(s3c64xx_ac97_resource), .resource = s3c64xx_ac97_resource, @@ -315,16 +315,3 @@ void __init s3c64xx_ac97_setup_gpio(int num) else s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpe; } - -static u64 s3c_device_audio_dmamask = 0xffffffffUL; - -struct platform_device s3c_device_pcm = { - .name = "s3c24xx-pcm-audio", - .id = -1, - .dev = { - .dma_mask = &s3c_device_audio_dmamask, - .coherent_dma_mask = 0xffffffffUL - } -}; -EXPORT_SYMBOL(s3c_device_pcm); - diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index d9aa072ecee3..135db1b41252 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c @@ -741,7 +741,7 @@ static int __init s3c64xx_dma_init(void) /* Set all DMA configuration to be DMA, not SDMA */ writel(0xffffff, S3C_SYSREG(0x110)); - /* Register standard DMA controlers */ + /* Register standard DMA controllers */ s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000); s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000); diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index 77488facfe4c..e85192a86fbe 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -283,7 +283,7 @@ static struct platform_device *smdk6410_devices[] __initdata = { &s3c_device_fb, &s3c_device_ohci, &s3c_device_usb_hsotg, - &s3c_device_pcm, + &samsung_asoc_dma, &s3c64xx_device_iisv4, &samsung_device_keypad, diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c index 10ab275ebd63..ab2d27172cbc 100644 --- a/arch/arm/mach-s5pc100/dev-audio.c +++ b/arch/arm/mach-s5pc100/dev-audio.c @@ -281,7 +281,7 @@ static struct s3c_audio_pdata s3c_ac97_pdata = { static u64 s5pc100_ac97_dmamask = DMA_BIT_MASK(32); struct platform_device s5pc100_device_ac97 = { - .name = "s3c-ac97", + .name = "samsung-ac97", .id = -1, .num_resources = ARRAY_SIZE(s5pc100_ac97_resource), .resource = s5pc100_ac97_resource, diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c index 18b405d514d6..dd192a27524d 100644 --- a/arch/arm/mach-s5pc100/mach-smdkc100.c +++ b/arch/arm/mach-s5pc100/mach-smdkc100.c @@ -96,6 +96,7 @@ static struct s3c2410_uartcfg smdkc100_uartcfgs[] __initdata = { /* I2C0 */ static struct i2c_board_info i2c_devs0[] __initdata = { + {I2C_BOARD_INFO("wm8580", 0x1b),}, }; /* I2C1 */ @@ -190,6 +191,7 @@ static struct platform_device *smdkc100_devices[] __initdata = { &s3c_device_ts, &s3c_device_wdt, &smdkc100_lcd_powerdev, + &samsung_asoc_dma, &s5pc100_device_iis0, &samsung_device_keypad, &s5pc100_device_ac97, diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c index ddd2704b3467..8d58f1926241 100644 --- a/arch/arm/mach-s5pv210/dev-audio.c +++ b/arch/arm/mach-s5pv210/dev-audio.c @@ -311,7 +311,7 @@ static struct s3c_audio_pdata s3c_ac97_pdata = { static u64 s5pv210_ac97_dmamask = DMA_BIT_MASK(32); struct platform_device s5pv210_device_ac97 = { - .name = "s3c-ac97", + .name = "samsung-ac97", .id = -1, .num_resources = ARRAY_SIZE(s5pv210_ac97_resource), .resource = s5pv210_ac97_resource, diff --git a/arch/arm/mach-s5pv310/hotplug.c b/arch/arm/mach-s5pv310/hotplug.c index afa5392d9fc0..c24235c89eed 100644 --- a/arch/arm/mach-s5pv310/hotplug.c +++ b/arch/arm/mach-s5pv310/hotplug.c @@ -30,10 +30,10 @@ static inline void cpu_enter_lowpower(void) * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" - " bic %0, %0, %2\n" + " bic %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" - " bic %0, %0, #0x04\n" + " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C) diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 59d14f0fdcf8..e21f3470eece 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index c83fdc80edfd..ab9fc4470d36 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp) return virt_to_phys(sp); } -static struct platform_suspend_ops sa11x0_pm_ops = { +static const struct platform_suspend_ops sa11x0_pm_ops = { .enter = sa11x0_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index cd79d7c1ba0d..3cf0951caa2d 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -247,10 +247,7 @@ static struct platform_device smc911x_device = { */ static int slot_cn7_get_cd(struct platform_device *pdev) { - if (gpio_is_valid(GPIO_PORT41)) - return !gpio_get_value(GPIO_PORT41); - else - return -ENXIO; + return !gpio_get_value(GPIO_PORT41); } /* SH_MMCIF */ @@ -308,6 +305,7 @@ static struct platform_device sh_mmcif_device = { static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, + .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct resource sdhi0_resources[] = { @@ -339,7 +337,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_ocr_mask = MMC_VDD_165_195, .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, - .tmio_caps = MMC_CAP_NEEDS_POLL, + .tmio_caps = MMC_CAP_NEEDS_POLL | MMC_CAP_SDIO_IRQ, .get_cd = slot_cn7_get_cd, }; @@ -711,6 +709,10 @@ static struct platform_device fsi_device = { }, }; +static struct platform_device fsi_ak4643_device = { + .name = "sh_fsi2_a_ak4643", +}; + static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = { .clock_source = LCDC_CLK_EXTERNAL, .ch[0] = { @@ -933,6 +935,7 @@ static struct platform_device *ap4evb_devices[] __initdata = { &sdhi1_device, &usb1_host_device, &fsi_device, + &fsi_ak4643_device, &sh_mmcif_device, &lcdc1_device, &lcdc_device, diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c index c13f01280b7e..dee3e9231fb9 100644 --- a/arch/arm/mach-shmobile/board-g4evm.c +++ b/arch/arm/mach-shmobile/board-g4evm.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -196,6 +197,10 @@ static struct platform_device keysc_device = { }; /* SDHI */ +static struct sh_mobile_sdhi_info sdhi0_info = { + .tmio_caps = MMC_CAP_SDIO_IRQ, +}; + static struct resource sdhi0_resources[] = { [0] = { .name = "SDHI0", @@ -214,6 +219,13 @@ static struct platform_device sdhi0_device = { .num_resources = ARRAY_SIZE(sdhi0_resources), .resource = sdhi0_resources, .id = 0, + .dev = { + .platform_data = &sdhi0_info, + }, +}; + +static struct sh_mobile_sdhi_info sdhi1_info = { + .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, }; static struct resource sdhi1_resources[] = { @@ -234,6 +246,9 @@ static struct platform_device sdhi1_device = { .num_resources = ARRAY_SIZE(sdhi1_resources), .resource = sdhi1_resources, .id = 1, + .dev = { + .platform_data = &sdhi1_info, + }, }; static struct platform_device *g4evm_devices[] __initdata = { diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 5bcf5c1e1399..7b15d21f0f68 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -657,17 +657,14 @@ static struct platform_device fsi_ak4643_device = { */ static int slot_cn7_get_cd(struct platform_device *pdev) { - if (gpio_is_valid(GPIO_PORT41)) - return !gpio_get_value(GPIO_PORT41); - else - return -ENXIO; + return !gpio_get_value(GPIO_PORT41); } /* SDHI0 */ static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, - .tmio_caps = MMC_CAP_SD_HIGHSPEED, + .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, }; static struct resource sdhi0_resources[] = { @@ -700,7 +697,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_ocr_mask = MMC_VDD_165_195, .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, - .tmio_caps = MMC_CAP_SD_HIGHSPEED | + .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL, .get_cd = slot_cn7_get_cd, }; @@ -729,13 +726,23 @@ static struct platform_device sdhi1_device = { }; #endif +/* + * The card detect pin of the top SD/MMC slot (CN23) is active low and is + * connected to GPIO SCIFB_SCK of SH7372 (GPIO_PORT162). + */ +static int slot_cn23_get_cd(struct platform_device *pdev) +{ + return !gpio_get_value(GPIO_PORT162); +} + /* SDHI2 */ static struct sh_mobile_sdhi_info sdhi2_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI2_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI2_RX, .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, - .tmio_caps = MMC_CAP_SD_HIGHSPEED | + .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL, + .get_cd = slot_cn23_get_cd, }; static struct resource sdhi2_resources[] = { @@ -953,6 +960,7 @@ static struct tca6416_keys_platform_data mackerel_tca6416_keys_info = { }; /* I2C */ +#define IRQ7 evt2irq(0x02e0) #define IRQ9 evt2irq(0x0320) static struct i2c_board_info i2c0_devices[] = { @@ -965,6 +973,11 @@ static struct i2c_board_info i2c0_devices[] = { .platform_data = &mackerel_tca6416_keys_info, .irq = IRQ9, }, + /* Touchscreen */ + { + I2C_BOARD_INFO("st1232-ts", 0x55), + .irq = IRQ7, + }, }; #define IRQ21 evt2irq(0x32a0) @@ -1092,6 +1105,10 @@ static void __init mackerel_init(void) gpio_request(GPIO_FN_IRQ9_42, NULL); set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH); + /* enable Touchscreen */ + gpio_request(GPIO_FN_IRQ7_40, NULL); + set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); + /* enable Accelerometer */ gpio_request(GPIO_FN_IRQ21, NULL); set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); @@ -1127,6 +1144,10 @@ static void __init mackerel_init(void) gpio_request(GPIO_FN_SDHID2_1, NULL); gpio_request(GPIO_FN_SDHID2_0, NULL); + /* card detect pin for microSD slot (CN23) */ + gpio_request(GPIO_PORT162, NULL); + gpio_direction_input(GPIO_PORT162); + /* MMCIF */ gpio_request(GPIO_FN_MMCD0_0, NULL); gpio_request(GPIO_FN_MMCD0_1, NULL); diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt index e3ebfa73956e..efd3687ba190 100644 --- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt +++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt @@ -85,3 +85,10 @@ ED 0xE6150004, 0x80331050 WAIT 1, 0xFE40009C ED 0xE6150354, 0x00000002 + +LIST "SCIF0 - Serial port for earlyprintk" +EB 0xE6053098, 0x11 +EB 0xE6053098, 0xe1 +EW 0xE6C40000, 0x0000 +EB 0xE6C40004, 0x19 +EW 0xE6C40008, 0x3000 diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt index e3ebfa73956e..efd3687ba190 100644 --- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt +++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt @@ -85,3 +85,10 @@ ED 0xE6150004, 0x80331050 WAIT 1, 0xFE40009C ED 0xE6150354, 0x00000002 + +LIST "SCIF0 - Serial port for earlyprintk" +EB 0xE6053098, 0x11 +EB 0xE6053098, 0xe1 +EW 0xE6C40000, 0x0000 +EB 0xE6C40004, 0x19 +EW 0xE6C40008, 0x3000 diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c index 1a20c489b20d..2fe9704d5ea1 100644 --- a/arch/arm/mach-shmobile/intc-sh7367.c +++ b/arch/arm/mach-shmobile/intc-sh7367.c @@ -189,10 +189,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { DISABLED, DISABLED, ENABLED, ENABLED, + { DISABLED, ENABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { DISABLED, DISABLED, ENABLED, ENABLED, + { DISABLED, ENABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, SPU, SIU } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, @@ -207,7 +207,7 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { 0, 0, TPU0, TPU1, TPU2, TPU3, TPU4, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ - { DISABLED, DISABLED, ENABLED, ENABLED, + { DISABLED, ENABLED, ENABLED, ENABLED, MISTY, CMT3, RWDT1, RWDT0 } }, }; diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c index 30b2f400666a..f78a1ead71a5 100644 --- a/arch/arm/mach-shmobile/intc-sh7372.c +++ b/arch/arm/mach-shmobile/intc-sh7372.c @@ -230,10 +230,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { DISABLED, DISABLED, ENABLED, ENABLED, + { DISABLED, ENABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { 0, DISABLED, ENABLED, ENABLED, + { 0, ENABLED, ENABLED, ENABLED, TTI20, USBHSDMAC0_USHDMI, 0, 0 } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c index 2cdeb8ccd821..dd568382cc9f 100644 --- a/arch/arm/mach-shmobile/intc-sh7377.c +++ b/arch/arm/mach-shmobile/intc-sh7377.c @@ -234,10 +234,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { DISABLED, DISABLED, ENABLED, ENABLED, + { DISABLED, ENABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { DISABLED, DISABLED, ENABLED, ENABLED, + { DISABLED, ENABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, 0, MSUG } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c index 003008c18360..ce28141662da 100644 --- a/arch/arm/mach-shmobile/setup-sh7367.c +++ b/arch/arm/mach-shmobile/setup-sh7367.c @@ -35,6 +35,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xe6c40000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00) }, @@ -52,6 +54,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xe6c50000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20) }, @@ -69,6 +73,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xe6c60000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40) }, @@ -86,6 +92,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xe6c70000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60) }, @@ -103,6 +111,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xe6c80000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20) }, @@ -120,6 +130,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xe6cb0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40) }, @@ -137,6 +149,8 @@ static struct platform_device scif5_device = { static struct plat_sci_port scif6_platform_data = { .mapbase = 0xe6c30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60) }, diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 2e3e11ee7c43..ff0494f3d00c 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -38,6 +38,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xe6c40000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c00), evt2irq(0x0c00), evt2irq(0x0c00), evt2irq(0x0c00) }, @@ -55,6 +57,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xe6c50000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c20), evt2irq(0x0c20), evt2irq(0x0c20), evt2irq(0x0c20) }, @@ -72,6 +76,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xe6c60000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c40), evt2irq(0x0c40), evt2irq(0x0c40), evt2irq(0x0c40) }, @@ -89,6 +95,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xe6c70000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c60), evt2irq(0x0c60), evt2irq(0x0c60), evt2irq(0x0c60) }, @@ -106,6 +114,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xe6c80000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0d20), evt2irq(0x0d20), evt2irq(0x0d20), evt2irq(0x0d20) }, @@ -123,6 +133,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xe6cb0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0d40), evt2irq(0x0d40), evt2irq(0x0d40), evt2irq(0x0d40) }, @@ -140,6 +152,8 @@ static struct platform_device scif5_device = { static struct plat_sci_port scif6_platform_data = { .mapbase = 0xe6c30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFB, .irqs = { evt2irq(0x0d60), evt2irq(0x0d60), evt2irq(0x0d60), evt2irq(0x0d60) }, diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c index 575dbd6c2f1d..8099b0b8a934 100644 --- a/arch/arm/mach-shmobile/setup-sh7377.c +++ b/arch/arm/mach-shmobile/setup-sh7377.c @@ -36,6 +36,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xe6c40000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00) }, @@ -53,6 +55,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xe6c50000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20) }, @@ -70,6 +74,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xe6c60000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40) }, @@ -87,6 +93,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xe6c70000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60) }, @@ -104,6 +112,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xe6c80000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20) }, @@ -121,6 +131,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xe6cb0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40) }, @@ -138,6 +150,8 @@ static struct platform_device scif5_device = { static struct plat_sci_port scif6_platform_data = { .mapbase = 0xe6cc0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) }, @@ -155,6 +169,8 @@ static struct platform_device scif6_device = { static struct plat_sci_port scif7_platform_data = { .mapbase = 0xe6c30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60) }, diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index f1eff8b37bd6..685c40a2f5e6 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -36,6 +36,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xe6c40000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(72), gic_spi(72), gic_spi(72), gic_spi(72) }, @@ -52,6 +54,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xe6c50000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(73), gic_spi(73), gic_spi(73), gic_spi(73) }, @@ -68,6 +72,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xe6c60000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(74), gic_spi(74), gic_spi(74), gic_spi(74) }, @@ -84,6 +90,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xe6c70000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(75), gic_spi(75), gic_spi(75), gic_spi(75) }, @@ -100,6 +108,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xe6c80000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(78), gic_spi(78), gic_spi(78), gic_spi(78) }, @@ -116,6 +126,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xe6cb0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(79), gic_spi(79), gic_spi(79), gic_spi(79) }, @@ -132,6 +144,8 @@ static struct platform_device scif5_device = { static struct plat_sci_port scif6_platform_data = { .mapbase = 0xe6cc0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(156), gic_spi(156), gic_spi(156), gic_spi(156) }, @@ -148,6 +162,8 @@ static struct platform_device scif6_device = { static struct plat_sci_port scif7_platform_data = { .mapbase = 0xe6cd0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(143), gic_spi(143), gic_spi(143), gic_spi(143) }, @@ -164,6 +180,8 @@ static struct platform_device scif7_device = { static struct plat_sci_port scif8_platform_data = { .mapbase = 0xe6c30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFB, .irqs = { gic_spi(80), gic_spi(80), gic_spi(80), gic_spi(80) }, diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c index 3560f8c1e723..5aa2d54ebfaa 100644 --- a/arch/arm/mach-spear3xx/spear300.c +++ b/arch/arm/mach-spear3xx/spear300.c @@ -371,7 +371,7 @@ struct pmx_driver pmx_driver = { }; /* Add spear300 specific devices here */ -/* arm gpio1 device registeration */ +/* arm gpio1 device registration */ static struct pl061_platform_data gpio1_plat_data = { .gpio_base = 8, .irq_base = SPEAR_GPIO1_INT_BASE, @@ -451,7 +451,7 @@ void __init spear300_init(void) /* call spear3xx family common init function */ spear3xx_init(); - /* shared irq registeration */ + /* shared irq registration */ shirq_ras1.regs.base = ioremap(SPEAR300_TELECOM_BASE, SPEAR300_TELECOM_REG_SIZE); if (shirq_ras1.regs.base) { diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c index 96a1ab824bac..53b41b52d7ee 100644 --- a/arch/arm/mach-spear3xx/spear310.c +++ b/arch/arm/mach-spear3xx/spear310.c @@ -266,7 +266,7 @@ void __init spear310_init(void) /* call spear3xx family common init function */ spear3xx_init(); - /* shared irq registeration */ + /* shared irq registration */ base = ioremap(SPEAR310_SOC_CONFIG_BASE, SPEAR310_SOC_CONFIG_SIZE); if (base) { /* shirq 1 */ diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c index 6a1219549369..88b465284c36 100644 --- a/arch/arm/mach-spear3xx/spear320.c +++ b/arch/arm/mach-spear3xx/spear320.c @@ -519,7 +519,7 @@ void __init spear320_init(void) /* call spear3xx family common init function */ spear3xx_init(); - /* shared irq registeration */ + /* shared irq registration */ base = ioremap(SPEAR320_SOC_CONFIG_BASE, SPEAR320_SOC_CONFIG_SIZE); if (base) { /* shirq 1 */ diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c index e87313aeae20..52f553c8c46d 100644 --- a/arch/arm/mach-spear3xx/spear3xx.c +++ b/arch/arm/mach-spear3xx/spear3xx.c @@ -22,7 +22,7 @@ #include /* Add spear3xx machines common devices here */ -/* gpio device registeration */ +/* gpio device registration */ static struct pl061_platform_data gpio_plat_data = { .gpio_base = 0, .irq_base = SPEAR_GPIO_INT_BASE, @@ -41,7 +41,7 @@ struct amba_device gpio_device = { .irq = {IRQ_BASIC_GPIO, NO_IRQ}, }; -/* uart device registeration */ +/* uart device registration */ struct amba_device uart_device = { .dev = { .init_name = "uart", @@ -543,6 +543,6 @@ void spear_pmx_init(struct pmx_driver *pmx_driver, uint base, uint size) pmx_fail: if (ret) - printk(KERN_ERR "padmux: registeration failed. err no: %d\n", + printk(KERN_ERR "padmux: registration failed. err no: %d\n", ret); } diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c index baf6bcc3169c..f2fe14e8471d 100644 --- a/arch/arm/mach-spear6xx/spear6xx.c +++ b/arch/arm/mach-spear6xx/spear6xx.c @@ -23,7 +23,7 @@ #include /* Add spear6xx machines common devices here */ -/* uart device registeration */ +/* uart device registration */ struct amba_device uart_device[] = { { .dev = { @@ -50,7 +50,7 @@ struct amba_device uart_device[] = { } }; -/* gpio device registeration */ +/* gpio device registration */ static struct pl061_platform_data gpio_plat_data[] = { { .gpio_base = 0, diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c index a5cb1ce76ff2..f3294040d357 100644 --- a/arch/arm/mach-tegra/hotplug.c +++ b/arch/arm/mach-tegra/hotplug.c @@ -26,10 +26,10 @@ static inline void cpu_enter_lowpower(void) * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" - " bic %0, %0, %2\n" + " bic %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" - " bic %0, %0, #0x04\n" + " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C) diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig index 801b21e7f677..32a7b0f7e9f7 100644 --- a/arch/arm/mach-u300/Kconfig +++ b/arch/arm/mach-u300/Kconfig @@ -64,7 +64,7 @@ config MACH_U300_DUAL_RAM bool "Dual RAM" help Select this if you want support for Dual RAM phones. - This is two RAM memorys on different EMIFs. + This is two RAM memories on different EMIFs. endchoice config U300_DEBUG diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h index 193da2df732c..6193aaa47794 100644 --- a/arch/arm/mach-u300/include/mach/coh901318.h +++ b/arch/arm/mach-u300/include/mach/coh901318.h @@ -24,7 +24,7 @@ * @src_addr: transfer source address * @dst_addr: transfer destination address * @link_addr: physical address to next lli - * @virt_link_addr: virtual addres of next lli (only used by pool_free) + * @virt_link_addr: virtual address of next lli (only used by pool_free) * @phy_this: physical address of current lli (only used by pool_free) */ struct coh901318_lli { @@ -90,7 +90,7 @@ struct powersave { * struct coh901318_platform - platform arch structure * @chans_slave: specifying dma slave channels * @chans_memcpy: specifying dma memcpy channels - * @access_memory_state: requesting DMA memeory access (on / off) + * @access_memory_state: requesting DMA memory access (on / off) * @chan_conf: dma channel configurations * @max_channels: max number of dma chanenls */ diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c index 1187f1fc2e53..533967c2d095 100644 --- a/arch/arm/mach-ux500/board-mop500-regulators.c +++ b/arch/arm/mach-ux500/board-mop500-regulators.c @@ -3,99 +3,94 @@ * * License Terms: GNU General Public License v2 * - * Author: Sundar Iyer + * Authors: Sundar Iyer + * Bengt Jonsson * * MOP500 board specific initialization for regulators */ #include #include +#include -/* supplies to the display/camera */ -static struct regulator_init_data ab8500_vaux1_regulator = { - .constraints = { - .name = "V-DISPLAY", - .min_uV = 2500000, - .max_uV = 2900000, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE| - REGULATOR_CHANGE_STATUS, +/* AB8500 regulators */ +struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { + /* supplies to the display/camera */ + [AB8500_LDO_AUX1] = { + .constraints = { + .name = "V-DISPLAY", + .min_uV = 2500000, + .max_uV = 2900000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supplies to the on-board eMMC */ -static struct regulator_init_data ab8500_vaux2_regulator = { - .constraints = { - .name = "V-eMMC1", - .min_uV = 1100000, - .max_uV = 3300000, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE| - REGULATOR_CHANGE_STATUS, + /* supplies to the on-board eMMC */ + [AB8500_LDO_AUX2] = { + .constraints = { + .name = "V-eMMC1", + .min_uV = 1100000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for VAUX3, supplies to SDcard slots */ -static struct regulator_init_data ab8500_vaux3_regulator = { - .constraints = { - .name = "V-MMC-SD", - .min_uV = 1100000, - .max_uV = 3300000, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE| - REGULATOR_CHANGE_STATUS, + /* supply for VAUX3, supplies to SDcard slots */ + [AB8500_LDO_AUX3] = { + .constraints = { + .name = "V-MMC-SD", + .min_uV = 1100000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for tvout, gpadc, TVOUT LDO */ -static struct regulator_init_data ab8500_vtvout_init = { - .constraints = { - .name = "V-TVOUT", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + /* supply for tvout, gpadc, TVOUT LDO */ + [AB8500_LDO_TVOUT] = { + .constraints = { + .name = "V-TVOUT", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for ab8500-vaudio, VAUDIO LDO */ -static struct regulator_init_data ab8500_vaudio_init = { - .constraints = { - .name = "V-AUD", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + /* supply for ab8500-vaudio, VAUDIO LDO */ + [AB8500_LDO_AUDIO] = { + .constraints = { + .name = "V-AUD", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for v-anamic1 VAMic1-LDO */ -static struct regulator_init_data ab8500_vamic1_init = { - .constraints = { - .name = "V-AMIC1", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + /* supply for v-anamic1 VAMic1-LDO */ + [AB8500_LDO_ANAMIC1] = { + .constraints = { + .name = "V-AMIC1", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */ -static struct regulator_init_data ab8500_vamic2_init = { - .constraints = { - .name = "V-AMIC2", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */ + [AB8500_LDO_ANAMIC2] = { + .constraints = { + .name = "V-AMIC2", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for v-dmic, VDMIC LDO */ -static struct regulator_init_data ab8500_vdmic_init = { - .constraints = { - .name = "V-DMIC", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + /* supply for v-dmic, VDMIC LDO */ + [AB8500_LDO_DMIC] = { + .constraints = { + .name = "V-DMIC", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for v-intcore12, VINTCORE12 LDO */ -static struct regulator_init_data ab8500_vintcore_init = { - .constraints = { - .name = "V-INTCORE", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + /* supply for v-intcore12, VINTCORE12 LDO */ + [AB8500_LDO_INTCORE] = { + .constraints = { + .name = "V-INTCORE", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }, -}; - -/* supply for U8500 CSI/DSI, VANA LDO */ -static struct regulator_init_data ab8500_vana_init = { - .constraints = { - .name = "V-CSI/DSI", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + /* supply for U8500 CSI/DSI, VANA LDO */ + [AB8500_LDO_ANA] = { + .constraints = { + .name = "V-CSI/DSI", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }, }; - diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h new file mode 100644 index 000000000000..2675fae52537 --- /dev/null +++ b/arch/arm/mach-ux500/board-mop500-regulators.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * + * Author: Bengt Jonsson for ST-Ericsson + * + * MOP500 board specific initialization for regulators + */ + +#ifndef __BOARD_MOP500_REGULATORS_H +#define __BOARD_MOP500_REGULATORS_H + +#include +#include + +extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS]; + +#endif diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index a1c9ea1a66df..a393f57ed2a8 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -35,6 +35,7 @@ #include "devices-db8500.h" #include "pins-db8500.h" #include "board-mop500.h" +#include "board-mop500-regulators.h" static pin_cfg_t mop500_pins[] = { /* SSP0 */ @@ -80,6 +81,8 @@ static pin_cfg_t mop500_pins[] = { static struct ab8500_platform_data ab8500_platdata = { .irq_base = MOP500_AB8500_IRQ_BASE, + .regulator = ab8500_regulators, + .num_regulator = ARRAY_SIZE(ab8500_regulators), }; static struct resource ab8500_resources[] = { diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index fcc1e628e050..9d30c6f804b9 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -644,7 +644,7 @@ config ARM_THUMBEE config SWP_EMULATE bool "Emulate SWP/SWPB instructions" - depends on CPU_V7 + depends on CPU_V7 && !CPU_V6 select HAVE_PROC_CPU if PROC_FS default y if SMP help diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6b48e0a3d7aa..4771dba61448 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -577,7 +577,7 @@ EXPORT_SYMBOL(dma_map_sg); * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers - * @nents: number of buffers to unmap (returned from dma_map_sg) + * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c29f2839f1d2..2b269c955524 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "mm.h" diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 93292a18cf77..709244c66fa3 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -50,7 +50,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!new_pmd) goto no_pmd; - new_pte = pte_alloc_map(mm, new_pmd, 0); + new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b49fab21517c..0c1172b56b4e 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -159,7 +159,9 @@ ENTRY(cpu_v7_set_pte_ext) tstne r1, #L_PTE_PRESENT moveq r3, #0 - str r3, [r0, #2048]! + ARM( str r3, [r0, #2048]! ) + THUMB( add r0, r0, #2048 ) + THUMB( str r3, [r0] ) mcr p15, 0, r0, c7, c10, 1 @ flush_pte #endif mov pc, lr diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h index 58a49cc83797..ba65c9231a78 100644 --- a/arch/arm/plat-mxc/include/mach/irqs.h +++ b/arch/arm/plat-mxc/include/mach/irqs.h @@ -70,7 +70,7 @@ extern int imx_irq_set_priority(unsigned char irq, unsigned char prio); /* all normal IRQs can be FIQs */ #define FIQ_START 0 -/* switch betwean IRQ and FIQ */ +/* switch between IRQ and FIQ */ extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type); #endif /* __ASM_ARCH_MXC_IRQS_H__ */ diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 99f57b9c7fa3..971d18636942 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -718,7 +718,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) case METHOD_GPIO_24XX: case METHOD_GPIO_44XX: set_24xx_gpio_triggering(bank, gpio, trigger); - break; + return 0; #endif default: goto bad; @@ -756,8 +756,10 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) spin_lock_irqsave(&bank->lock, flags); retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type); if (retval == 0) { - irq_desc[d->irq].status &= ~IRQ_TYPE_SENSE_MASK; - irq_desc[d->irq].status |= type; + struct irq_desc *desc = irq_to_desc(d->irq); + + desc->status &= ~IRQ_TYPE_SENSE_MASK; + desc->status |= type; } spin_unlock_irqrestore(&bank->lock, flags); @@ -1670,7 +1672,9 @@ static void __init omap_gpio_chip_init(struct gpio_bank *bank) for (j = bank->virtual_irq_start; j < bank->virtual_irq_start + bank_width; j++) { - lockdep_set_class(&irq_desc[j].lock, &gpio_lock_class); + struct irq_desc *d = irq_to_desc(j); + + lockdep_set_class(&d->lock, &gpio_lock_class); set_irq_chip_data(j, bank); if (bank_is_mpuio(bank)) set_irq_chip(j, &mpuio_irq_chip); diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h index c915a661f1f5..537f4e449f50 100644 --- a/arch/arm/plat-omap/include/plat/display.h +++ b/arch/arm/plat-omap/include/plat/display.h @@ -42,6 +42,10 @@ #define DISPC_IRQ_SYNC_LOST (1 << 14) #define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15) #define DISPC_IRQ_WAKEUP (1 << 16) +#define DISPC_IRQ_SYNC_LOST2 (1 << 17) +#define DISPC_IRQ_VSYNC2 (1 << 18) +#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21) +#define DISPC_IRQ_FRAMEDONE2 (1 << 22) struct omap_dss_device; struct omap_overlay_manager; @@ -64,6 +68,7 @@ enum omap_plane { enum omap_channel { OMAP_DSS_CHANNEL_LCD = 0, OMAP_DSS_CHANNEL_DIGIT = 1, + OMAP_DSS_CHANNEL_LCD2 = 2, }; enum omap_color_mode { @@ -142,6 +147,7 @@ enum omap_dss_display_state { enum omap_dss_overlay_managers { OMAP_DSS_OVL_MGR_LCD, OMAP_DSS_OVL_MGR_TV, + OMAP_DSS_OVL_MGR_LCD2, }; enum omap_dss_rotation_type { @@ -268,6 +274,7 @@ struct omap_overlay_info { u16 out_width; /* if 0, out_width == width */ u16 out_height; /* if 0, out_height == height */ u8 global_alpha; + u8 pre_mult_alpha; }; struct omap_overlay { @@ -351,6 +358,8 @@ struct omap_dss_device { enum omap_display_type type; + enum omap_channel channel; + union { struct { u8 data_lines; diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index 6864a997f2ca..1eee85a8abb3 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h @@ -351,7 +351,7 @@ struct omap_hwmod_omap2_prcm { /** * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data * @clkctrl_reg: PRCM address of the clock control register - * @rstctrl_reg: adress of the XXX_RSTCTRL register located in the PRM + * @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM * @submodule_wkdep_bit: bit shift of the WKDEP range */ struct omap_hwmod_omap4_prcm { diff --git a/arch/arm/plat-omap/include/plat/panel-generic-dpi.h b/arch/arm/plat-omap/include/plat/panel-generic-dpi.h new file mode 100644 index 000000000000..790619734bcd --- /dev/null +++ b/arch/arm/plat-omap/include/plat/panel-generic-dpi.h @@ -0,0 +1,37 @@ +/* + * Header for generic DPI panel driver + * + * Copyright (C) 2010 Canonical Ltd. + * Author: Bryan Wu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H +#define __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H + +#include "display.h" + +/** + * struct panel_generic_dpi_data - panel driver configuration data + * @name: panel name + * @platform_enable: platform specific panel enable function + * @platform_disable: platform specific panel disable function + */ +struct panel_generic_dpi_data { + const char *name; + int (*platform_enable)(struct omap_dss_device *dssdev); + void (*platform_disable)(struct omap_dss_device *dssdev); +}; + +#endif /* __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H */ diff --git a/arch/arm/plat-omap/include/plat/voltage.h b/arch/arm/plat-omap/include/plat/voltage.h index 0ff123399f3b..5bd204e55c32 100644 --- a/arch/arm/plat-omap/include/plat/voltage.h +++ b/arch/arm/plat-omap/include/plat/voltage.h @@ -14,6 +14,8 @@ #ifndef __ARCH_ARM_MACH_OMAP2_VOLTAGE_H #define __ARCH_ARM_MACH_OMAP2_VOLTAGE_H +#include + #define VOLTSCALE_VPFORCEUPDATE 1 #define VOLTSCALE_VCBYPASS 2 @@ -65,9 +67,6 @@ struct voltagedomain { char *name; }; -/* API to get the voltagedomain pointer */ -struct voltagedomain *omap_voltage_domain_lookup(char *name); - /** * struct omap_volt_data - Omap voltage specific data. * @voltage_nominal: The possible voltage value in uV @@ -131,16 +130,26 @@ int omap_voltage_register_pmic(struct voltagedomain *voltdm, struct omap_volt_pmic_info *pmic_info); void omap_change_voltscale_method(struct voltagedomain *voltdm, int voltscale_method); +/* API to get the voltagedomain pointer */ +struct voltagedomain *omap_voltage_domain_lookup(char *name); + int omap_voltage_late_init(void); #else static inline int omap_voltage_register_pmic(struct voltagedomain *voltdm, - struct omap_volt_pmic_info *pmic_info) {} + struct omap_volt_pmic_info *pmic_info) +{ + return -EINVAL; +} static inline void omap_change_voltscale_method(struct voltagedomain *voltdm, int voltscale_method) {} static inline int omap_voltage_late_init(void) { return -EINVAL; } +static inline struct voltagedomain *omap_voltage_domain_lookup(char *name) +{ + return ERR_PTR(-EINVAL); +} #endif #endif diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c index 10ce6666687e..268f3ed0a105 100644 --- a/arch/arm/plat-s3c24xx/devs.c +++ b/arch/arm/plat-s3c24xx/devs.c @@ -258,21 +258,6 @@ struct platform_device s3c_device_iis = { EXPORT_SYMBOL(s3c_device_iis); -/* ASoC PCM DMA */ - -static u64 s3c_device_audio_dmamask = 0xffffffffUL; - -struct platform_device s3c_device_pcm = { - .name = "s3c24xx-pcm-audio", - .id = -1, - .dev = { - .dma_mask = &s3c_device_audio_dmamask, - .coherent_dma_mask = 0xffffffffUL - } -}; - -EXPORT_SYMBOL(s3c_device_pcm); - /* RTC */ static struct resource s3c_rtc_resource[] = { @@ -495,8 +480,10 @@ static struct resource s3c_ac97_resource[] = { }, }; +static u64 s3c_device_audio_dmamask = 0xffffffffUL; + struct platform_device s3c_device_ac97 = { - .name = "s3c-ac97", + .name = "samsung-ac97", .id = -1, .num_resources = ARRAY_SIZE(s3c_ac97_resource), .resource = s3c_ac97_resource, diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile index 09dbd78b56f5..29932f88a8d6 100644 --- a/arch/arm/plat-samsung/Makefile +++ b/arch/arm/plat-samsung/Makefile @@ -17,6 +17,7 @@ obj-y += clock.o obj-y += pwm-clock.o obj-y += gpio.o obj-y += gpio-config.o +obj-y += dev-asocdma.o obj-$(CONFIG_SAMSUNG_GPIOLIB_4BIT) += gpiolib.o obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o diff --git a/arch/arm/plat-samsung/dev-asocdma.c b/arch/arm/plat-samsung/dev-asocdma.c new file mode 100644 index 000000000000..a068c4f42d56 --- /dev/null +++ b/arch/arm/plat-samsung/dev-asocdma.c @@ -0,0 +1,25 @@ +/* linux/arch/arm/plat-samsung/dev-asocdma.c + * + * Copyright (c) 2010 Samsung Electronics Co. Ltd + * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +static u64 audio_dmamask = DMA_BIT_MASK(32); + +struct platform_device samsung_asoc_dma = { + .name = "samsung-audio", + .id = -1, + .dev = { + .dma_mask = &audio_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + } +}; +EXPORT_SYMBOL(samsung_asoc_dma); diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h index 1be192209a7e..b4d208b42957 100644 --- a/arch/arm/plat-samsung/include/plat/devs.h +++ b/arch/arm/plat-samsung/include/plat/devs.h @@ -32,7 +32,7 @@ extern struct platform_device s3c64xx_device_iisv4; extern struct platform_device s3c64xx_device_spi0; extern struct platform_device s3c64xx_device_spi1; -extern struct platform_device s3c_device_pcm; +extern struct platform_device samsung_asoc_dma; extern struct platform_device s3c64xx_device_pcm0; extern struct platform_device s3c64xx_device_pcm1; diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index eaa57dc969ae..02d531fb3f81 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -355,7 +355,7 @@ static void s3c_pm_finish(void) s3c_pm_check_cleanup(); } -static struct platform_suspend_ops s3c_pm_ops = { +static const struct platform_suspend_ops s3c_pm_ops = { .enter = s3c_pm_enter, .prepare = s3c_pm_prepare, .finish = s3c_pm_finish, diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c index 8c6a2440e345..659d119ce712 100644 --- a/arch/avr32/boards/atngw100/setup.c +++ b/arch/avr32/boards/atngw100/setup.c @@ -188,7 +188,7 @@ static void __init set_hw_addr(struct platform_device *pdev) */ regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); - if (!pclk) + if (IS_ERR(pclk)) return; clk_enable(pclk); diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c index 2adc261c9e3d..6ce30fb2ec94 100644 --- a/arch/avr32/boards/atstk1000/atstk1002.c +++ b/arch/avr32/boards/atstk1000/atstk1002.c @@ -203,7 +203,7 @@ static void __init set_hw_addr(struct platform_device *pdev) */ regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); - if (!pclk) + if (IS_ERR(pclk)) return; clk_enable(pclk); diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c index 75f19f47fb2f..86fab77a5a00 100644 --- a/arch/avr32/boards/favr-32/setup.c +++ b/arch/avr32/boards/favr-32/setup.c @@ -206,7 +206,7 @@ static void __init set_hw_addr(struct platform_device *pdev) */ regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); - if (!pclk) + if (IS_ERR(pclk)) return; clk_enable(pclk); diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c index dd009875a405..da14fbdd4e8e 100644 --- a/arch/avr32/boards/hammerhead/setup.c +++ b/arch/avr32/boards/hammerhead/setup.c @@ -150,7 +150,7 @@ static void __init set_hw_addr(struct platform_device *pdev) regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); - if (!pclk) + if (IS_ERR(pclk)) return; clk_enable(pclk); diff --git a/arch/avr32/boards/merisc/setup.c b/arch/avr32/boards/merisc/setup.c index 623b077594fc..e61bc948f959 100644 --- a/arch/avr32/boards/merisc/setup.c +++ b/arch/avr32/boards/merisc/setup.c @@ -134,7 +134,7 @@ static void __init set_hw_addr(struct platform_device *pdev) regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); - if (!pclk) + if (IS_ERR(pclk)) return; clk_enable(pclk); diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c index 523d8e183bef..c4da5cba2dbf 100644 --- a/arch/avr32/boards/mimc200/setup.c +++ b/arch/avr32/boards/mimc200/setup.c @@ -162,7 +162,7 @@ static void __init set_hw_addr(struct platform_device *pdev) */ regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); - if (!pclk) + if (IS_ERR(pclk)) return; clk_enable(pclk); diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index 9854013d2728..6f9ca56de1f6 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig @@ -2,20 +2,17 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_NO_HZ=y @@ -29,6 +26,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -72,8 +70,8 @@ CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y -CONFIG_EEPROM_AT24=m CONFIG_NETDEVICES=y CONFIG_TUN=m CONFIG_NET_ETHERNET=y @@ -106,6 +104,7 @@ CONFIG_GPIO_SYSFS=y CONFIG_WATCHDOG=y CONFIG_AT32AP700X_WDT=y CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=350 CONFIG_USB_ZERO=m CONFIG_USB_ETH=m CONFIG_USB_GADGETFS=m @@ -115,14 +114,12 @@ CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y -CONFIG_MMC_SPI=m CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y -CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AT32AP700X=y CONFIG_DMADEVICES=y @@ -130,21 +127,23 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_CONFIGFS_FS=m +CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y -CONFIG_UFS_FS=y +CONFIG_UBIFS_FS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_SMB_FS=m CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m @@ -155,5 +154,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_CRYPTO_PCBC=m diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index 7ceda354597b..7eece0af34c9 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig @@ -2,20 +2,17 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_NO_HZ=y @@ -31,6 +28,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -74,8 +72,10 @@ CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_NETDEVICES=y +CONFIG_TUN=m CONFIG_NET_ETHERNET=y CONFIG_MACB=y # CONFIG_NETDEV_1000 is not set @@ -104,6 +104,7 @@ CONFIG_I2C_GPIO=m CONFIG_SPI=y CONFIG_SPI_ATMEL=y CONFIG_SPI_SPIDEV=m +CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_AT32AP700X_WDT=y @@ -127,6 +128,7 @@ CONFIG_USB_FILE_STORAGE=m CONFIG_USB_G_SERIAL=m CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y +CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -141,11 +143,14 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y @@ -155,7 +160,6 @@ CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_SMB_FS=m CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m @@ -166,4 +170,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index 7bc5b2ce68d5..387eb9d6e423 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig @@ -2,20 +2,17 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_NO_HZ=y @@ -30,6 +27,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -73,8 +71,10 @@ CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_NETDEVICES=y +CONFIG_TUN=m CONFIG_NET_ETHERNET=y CONFIG_MACB=y # CONFIG_NETDEV_1000 is not set @@ -103,6 +103,7 @@ CONFIG_I2C_GPIO=m CONFIG_SPI=y CONFIG_SPI_ATMEL=y CONFIG_SPI_SPIDEV=m +CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_AT32AP700X_WDT=y @@ -126,6 +127,7 @@ CONFIG_USB_FILE_STORAGE=m CONFIG_USB_G_SERIAL=m CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y +CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -140,11 +142,14 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y @@ -154,7 +159,6 @@ CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_SMB_FS=m CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m @@ -165,4 +169,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig index 4bd36821d4a2..f0fe237133a9 100644 --- a/arch/avr32/configs/atngw100mkii_defconfig +++ b/arch/avr32/configs/atngw100mkii_defconfig @@ -2,20 +2,17 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_NO_HZ=y @@ -29,6 +26,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -74,6 +72,7 @@ CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_NETDEVICES=y CONFIG_TUN=m @@ -107,6 +106,7 @@ CONFIG_GPIO_SYSFS=y CONFIG_WATCHDOG=y CONFIG_AT32AP700X_WDT=y CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=350 CONFIG_USB_ZERO=m CONFIG_USB_ETH=m CONFIG_USB_GADGETFS=m @@ -116,14 +116,12 @@ CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y -CONFIG_MMC_SPI=m CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y -CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AT32AP700X=y CONFIG_DMADEVICES=y @@ -131,21 +129,23 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_CONFIGFS_FS=m +CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y -CONFIG_UFS_FS=y +CONFIG_UBIFS_FS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_SMB_FS=m CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m @@ -156,5 +156,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_CRYPTO_PCBC=m diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig index f8437ef3237f..e4a7c1dc8380 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig @@ -2,20 +2,17 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_NO_HZ=y @@ -32,6 +29,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -77,8 +75,10 @@ CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_NETDEVICES=y +CONFIG_TUN=m CONFIG_NET_ETHERNET=y CONFIG_MACB=y # CONFIG_NETDEV_1000 is not set @@ -107,6 +107,7 @@ CONFIG_I2C_GPIO=m CONFIG_SPI=y CONFIG_SPI_ATMEL=y CONFIG_SPI_SPIDEV=m +CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_AT32AP700X_WDT=y @@ -130,6 +131,7 @@ CONFIG_USB_FILE_STORAGE=m CONFIG_USB_G_SERIAL=m CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y +CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -144,11 +146,14 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y @@ -158,7 +163,6 @@ CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_SMB_FS=m CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m @@ -169,4 +173,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig index 7f58f996d945..6f37f70c2c37 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig @@ -2,20 +2,17 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_NO_HZ=y @@ -31,6 +28,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -76,8 +74,10 @@ CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_NETDEVICES=y +CONFIG_TUN=m CONFIG_NET_ETHERNET=y CONFIG_MACB=y # CONFIG_NETDEV_1000 is not set @@ -106,6 +106,7 @@ CONFIG_I2C_GPIO=m CONFIG_SPI=y CONFIG_SPI_ATMEL=y CONFIG_SPI_SPIDEV=m +CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_AT32AP700X_WDT=y @@ -129,6 +130,7 @@ CONFIG_USB_FILE_STORAGE=m CONFIG_USB_G_SERIAL=m CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y +CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -143,11 +145,14 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y @@ -157,7 +162,6 @@ CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_SMB_FS=m CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m @@ -168,4 +172,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index aec4c43a75da..4fb01f5ab42f 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig @@ -3,7 +3,6 @@ CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set @@ -11,7 +10,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -26,6 +25,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -35,6 +35,7 @@ CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -58,16 +59,14 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_DATAFLASH=m -CONFIG_MTD_M25P80=m CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_PWM=m CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_SSC=m -CONFIG_EEPROM_AT24=m # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m @@ -120,7 +119,6 @@ CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m # CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set -# CONFIG_SND_DRIVERS is not set CONFIG_SND_AT73C213=m # CONFIG_HID_SUPPORT is not set CONFIG_USB_GADGET=y @@ -131,16 +129,15 @@ CONFIG_USB_FILE_STORAGE=m CONFIG_USB_G_SERIAL=m CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y +CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y -CONFIG_MMC_SPI=m CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=m +CONFIG_LEDS_CLASS=y CONFIG_LEDS_ATMEL_PWM=m CONFIG_LEDS_GPIO=m CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AT32AP700X=y CONFIG_DMADEVICES=y @@ -149,20 +146,23 @@ CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 CONFIG_PROC_KCORE=y CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y -# CONFIG_JFFS2_FS_WRITEBUFFER is not set CONFIG_UBIFS_FS=y -CONFIG_MINIX_FS=m CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y +CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_850=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_UTF8=m CONFIG_MAGIC_SYSRQ=y @@ -170,6 +170,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -# CONFIG_CRYPTO_HW is not set -CONFIG_CRC_T10DIF=m diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig index 50ba3db682ca..9faaf9b900f2 100644 --- a/arch/avr32/configs/atstk1003_defconfig +++ b/arch/avr32/configs/atstk1003_defconfig @@ -2,22 +2,15 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_AUDIT=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set -# CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -33,6 +26,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -54,18 +48,18 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_DATAFLASH=m -CONFIG_MTD_M25P80=m +CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_PWM=m CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_SSC=m -CONFIG_EEPROM_AT24=m # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m +# CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=m # CONFIG_SATA_PMP is not set CONFIG_PATA_AT32=m @@ -77,6 +71,7 @@ CONFIG_PPP_ASYNC=m CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m CONFIG_INPUT=m +CONFIG_INPUT_EVDEV=m # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=m # CONFIG_MOUSE_PS2 is not set @@ -106,7 +101,6 @@ CONFIG_SND_PCM_OSS=m CONFIG_SND_AT73C213=m # CONFIG_HID_SUPPORT is not set CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_DEBUG_FS=y CONFIG_USB_ZERO=m CONFIG_USB_ETH=m CONFIG_USB_GADGETFS=m @@ -116,36 +110,39 @@ CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y -CONFIG_MMC_SPI=m CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_ATMEL_PWM=m -CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO=m CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_LEDS_TRIGGER_HEARTBEAT=y -CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AT32AP700X=y CONFIG_DMADEVICES=y -CONFIG_DW_DMAC=y -CONFIG_EXT2_FS=m -CONFIG_EXT3_FS=m +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_CONFIGFS_FS=m +CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y +CONFIG_UBIFS_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_850=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_UTF8=m CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -CONFIG_CRC_T10DIF=m diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig index 329e10ba3b54..3d2a5d85f970 100644 --- a/arch/avr32/configs/atstk1004_defconfig +++ b/arch/avr32/configs/atstk1004_defconfig @@ -1,19 +1,32 @@ CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set -# CONFIG_FUTEX is not set -# CONFIG_EPOLL is not set -# CONFIG_SIGNALFD is not set -# CONFIG_TIMERFD is not set -# CONFIG_EVENTFD is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLOB=y -# CONFIG_BLOCK is not set +CONFIG_PROFILING=y +CONFIG_OPROFILE=m +# CONFIG_KPROBES is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BOARD_ATSTK1004=y # CONFIG_OWNERSHIP_TRACE is not set +CONFIG_NMI_DEBUGGING=y +CONFIG_PM=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_AT32AP=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -31,40 +44,104 @@ CONFIG_MTD=y CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y -# CONFIG_MISC_DEVICES is not set -# CONFIG_INPUT is not set +CONFIG_MTD_UBI=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y +CONFIG_ATMEL_PWM=m +CONFIG_ATMEL_TCLIB=y +CONFIG_ATMEL_SSC=m +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=m +CONFIG_BLK_DEV_SR=m +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=m +# CONFIG_SATA_PMP is not set +CONFIG_PATA_AT32=m +CONFIG_NETDEVICES=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_PPP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_INPUT=m +CONFIG_INPUT_EVDEV=m +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_GPIO=m # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y -# CONFIG_SERIAL_ATMEL_PDC is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set +CONFIG_I2C=m +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_GPIO=m CONFIG_SPI=y CONFIG_SPI_ATMEL=y +CONFIG_SPI_SPIDEV=m +CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_AT32AP700X_WDT=y CONFIG_FB=y CONFIG_FB_ATMEL=y CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_LTV350QV=y # CONFIG_BACKLIGHT_CLASS_DEVICE is not set CONFIG_USB_GADGET=y -CONFIG_USB_ETH=y -# CONFIG_USB_ETH_RNDIS is not set +CONFIG_USB_ZERO=m +CONFIG_USB_ETH=m +CONFIG_USB_GADGETFS=m +CONFIG_USB_FILE_STORAGE=m +CONFIG_USB_G_SERIAL=m +CONFIG_USB_CDC_COMPOSITE=m +CONFIG_MMC=y +CONFIG_MMC_TEST=m +CONFIG_MMC_ATMELMCI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_ATMEL_PWM=m +CONFIG_LEDS_GPIO=m +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_RTC_CLASS=y -# CONFIG_RTC_INTF_PROC is not set CONFIG_RTC_DRV_AT32AP700X=y +CONFIG_DMADEVICES=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set +CONFIG_FUSE_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 CONFIG_PROC_KCORE=y -# CONFIG_PROC_PAGE_MONITOR is not set CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y -# CONFIG_JFFS2_FS_WRITEBUFFER is not set +CONFIG_UBIFS_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_UTF8=m CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_FRAME_POINTER=y diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index dbcc1b51e506..1ed8f22d4fe2 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig @@ -3,7 +3,6 @@ CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_SYSCTL_SYSCALL is not set @@ -11,7 +10,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -37,6 +36,7 @@ CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -60,15 +60,13 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_DATAFLASH=m -CONFIG_MTD_DATAFLASH_OTP=y -CONFIG_MTD_M25P80=m CONFIG_MTD_NAND=y CONFIG_MTD_NAND_ATMEL=y CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_MISC_DEVICES=y CONFIG_ATMEL_PWM=m CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_SSC=m @@ -132,17 +130,17 @@ CONFIG_USB_ETH=m CONFIG_USB_GADGETFS=m CONFIG_USB_FILE_STORAGE=m CONFIG_USB_G_SERIAL=m +CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y +CONFIG_MMC_TEST=m CONFIG_MMC_ATMELMCI=y -CONFIG_MMC_SPI=m CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=m +CONFIG_LEDS_CLASS=y CONFIG_LEDS_ATMEL_PWM=m CONFIG_LEDS_GPIO=m CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AT32AP700X=y CONFIG_DMADEVICES=y @@ -156,15 +154,18 @@ CONFIG_EXT4_FS=y CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 CONFIG_PROC_KCORE=y CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y CONFIG_UBIFS_FS=y -CONFIG_MINIX_FS=m CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y +CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_850=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_UTF8=m CONFIG_MAGIC_SYSRQ=y @@ -172,7 +173,3 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_CRYPTO_FIPS=y -# CONFIG_CRYPTO_HW is not set -CONFIG_CRC_T10DIF=m diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index 0c813b661a0a..aeadc955db32 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig @@ -11,7 +11,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig index dcc01f0eb294..1692beeb7ed3 100644 --- a/arch/avr32/configs/hammerhead_defconfig +++ b/arch/avr32/configs/hammerhead_defconfig @@ -12,7 +12,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=m -CONFIG_KPROBES=y +# CONFIG_KPROBES is not set CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h index ab608b70b24d..244f2acab546 100644 --- a/arch/avr32/include/asm/syscalls.h +++ b/arch/avr32/include/asm/syscalls.h @@ -15,20 +15,6 @@ #include #include -/* kernel/process.c */ -asmlinkage int sys_fork(struct pt_regs *); -asmlinkage int sys_clone(unsigned long, unsigned long, - unsigned long, unsigned long, - struct pt_regs *); -asmlinkage int sys_vfork(struct pt_regs *); -asmlinkage int sys_execve(const char __user *, char __user *__user *, - char __user *__user *, struct pt_regs *); - -/* kernel/signal.c */ -asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *, - struct pt_regs *); -asmlinkage int sys_rt_sigreturn(struct pt_regs *); - /* mm/cache.c */ asmlinkage int sys_cacheflush(int, void __user *, size_t); diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 9c46aaad11ce..ef5a2a08fcca 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -367,14 +367,13 @@ asmlinkage int sys_fork(struct pt_regs *regs) } asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, - unsigned long parent_tidptr, - unsigned long child_tidptr, struct pt_regs *regs) + void __user *parent_tidptr, void __user *child_tidptr, + struct pt_regs *regs) { if (!newsp) newsp = regs->sp; - return do_fork(clone_flags, newsp, regs, 0, - (int __user *)parent_tidptr, - (int __user *)child_tidptr); + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, + child_tidptr); } asmlinkage int sys_vfork(struct pt_regs *regs) diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 668ed2817e51..05ad29112ff4 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -35,7 +35,6 @@ static struct clocksource counter = { .rating = 50, .read = read_cycle_count, .mask = CLOCKSOURCE_MASK(32), - .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -123,9 +122,7 @@ void __init time_init(void) /* figure rate for counter */ counter_hz = clk_get_rate(boot_cpu_data.clk); - counter.mult = clocksource_hz2mult(counter_hz, counter.shift); - - ret = clocksource_register(&counter); + ret = clocksource_register_hz(&counter, counter_hz); if (ret) pr_debug("timer: could not register clocksource: %d\n", ret); diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c index f021edfeaab0..32d680eb6f48 100644 --- a/arch/avr32/mach-at32ap/pm.c +++ b/arch/avr32/mach-at32ap/pm.c @@ -176,7 +176,7 @@ out: return 0; } -static struct platform_suspend_ops avr32_pm_ops = { +static const struct platform_suspend_ops avr32_pm_ops = { .valid = avr32_pm_valid_state, .enter = avr32_pm_enter, }; diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h index 32529a03b266..725bb35f3aaa 100644 --- a/arch/blackfin/mach-bf537/include/mach/defBF534.h +++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h @@ -1418,7 +1418,7 @@ #define SADD_LEN 0x0002 /* Slave Address Length */ #define STDVAL 0x0004 /* Slave Transmit Data Valid */ #define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */ -#define GEN 0x0010 /* General Call Adrress Matching Enabled */ +#define GEN 0x0010 /* General Call Address Matching Enabled */ /* TWI_SLAVE_STAT Masks */ #define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */ diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index 42fa87e8375c..3c648a077e75 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c @@ -223,7 +223,7 @@ static int bfin_pm_enter(suspend_state_t state) return 0; } -struct platform_suspend_ops bfin_pm_ops = { +static const struct platform_suspend_ops bfin_pm_ops = { .enter = bfin_pm_enter, .valid = bfin_pm_valid, }; diff --git a/arch/cris/arch-v32/lib/nand_init.S b/arch/cris/arch-v32/lib/nand_init.S index e705f5cce969..d671fed451c9 100644 --- a/arch/cris/arch-v32/lib/nand_init.S +++ b/arch/cris/arch-v32/lib/nand_init.S @@ -139,7 +139,7 @@ copy_nand_to_ram: lsrq 8, $r4 move.b $r4, [$r1] ; Row address lsrq 8, $r4 - move.b $r4, [$r1] ; Row adddress + move.b $r4, [$r1] ; Row address moveq 20, $r4 2: bne 2b subq 1, $r4 diff --git a/arch/cris/include/asm/etraxgpio.h b/arch/cris/include/asm/etraxgpio.h index d474818a537e..461c089db765 100644 --- a/arch/cris/include/asm/etraxgpio.h +++ b/arch/cris/include/asm/etraxgpio.h @@ -1,5 +1,5 @@ /* - * The following devices are accessable using this driver using + * The following devices are accessible using this driver using * GPIO_MAJOR (120) and a couple of minor numbers. * * For ETRAX 100LX (CONFIG_ETRAX_ARCH_V10): diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c index b5096430ce1c..4e73092e85c0 100644 --- a/arch/cris/kernel/time.c +++ b/arch/cris/kernel/time.c @@ -86,7 +86,7 @@ int set_rtc_mmss(unsigned long nowtime) CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); } else { - printk(KERN_WARNING + printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug index ee671c3f2c74..e8d1b236ad8c 100644 --- a/arch/h8300/Kconfig.debug +++ b/arch/h8300/Kconfig.debug @@ -48,7 +48,7 @@ config DEFAULT_CMDLINE builtin kernel commandline enabled. config KERNEL_COMMAND - string "Buildin commmand string" + string "Buildin command string" depends on DEFAULT_CMDLINE help builtin kernel commandline strings. diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index cc8335eb3110..e5a6c3530c6c 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -426,6 +426,11 @@ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size) extern void iounmap (volatile void __iomem *addr); extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); extern void early_iounmap (volatile void __iomem *addr, unsigned long size); +static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) +{ + return ioremap(phys_addr, size); +} + /* * String version of IO memory access ops: diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 2f229e5de498..2689ee54a1c9 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -590,6 +590,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu); int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); void kvm_sal_emul(struct kvm_vcpu *vcpu); +#define __KVM_HAVE_ARCH_VM_ALLOC 1 +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ASSEMBLY__*/ #endif diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index 41b6d31110fd..961a16f43e6b 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -189,6 +189,7 @@ get_order (unsigned long size) # define pgprot_val(x) ((x).pgprot) # define __pte(x) ((pte_t) { (x) } ) +# define __pmd(x) ((pmd_t) { (x) } ) # define __pgprot(x) ((pgprot_t) { (x) } ) #else /* !STRICT_MM_TYPECHECKS */ diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 348e44d08ce3..03afe7970748 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -717,8 +717,9 @@ prefetchw (const void *x) #define spin_lock_prefetch(x) prefetchw(x) extern unsigned long boot_option_idle_override; -extern unsigned long idle_halt; -extern unsigned long idle_nomwait; + +enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, + IDLE_NOMWAIT, IDLE_POLL}; #endif /* !__ASSEMBLY__ */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 7b897b7b0ae6..90ebceb899a0 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -479,7 +479,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) { printk_once(KERN_WARNING - "node_cpuid[%d] is too small, may not be able to use all cpus\n", + "node_cpuid[%ld] is too small, may not be able to use all cpus\n", ARRAY_SIZE(node_cpuid)); return; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index d92d5b5161fc..89accc626b86 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -617,11 +617,14 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, return get_unmapped_area(file, addr, len, pgoff, flags); } +/* forward declaration */ +static const struct dentry_operations pfmfs_dentry_operations; static struct dentry * pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC); + return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations, + PFMFS_MAGIC); } static struct file_system_type pfm_fs_type = { @@ -2232,7 +2235,6 @@ pfm_alloc_file(pfm_context_t *ctx) } path.mnt = mntget(pfmfs_mnt); - d_set_d_op(path.dentry, &pfmfs_dentry_operations); d_add(path.dentry, inode); file = alloc_file(&path, FMODE_READ, &pfm_file_ops); diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 16f1c7b04c69..6d33c5cc94f0 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -53,12 +53,8 @@ void (*ia64_mark_idle)(int); -unsigned long boot_option_idle_override = 0; +unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); -unsigned long idle_halt; -EXPORT_SYMBOL(idle_halt); -unsigned long idle_nomwait; -EXPORT_SYMBOL(idle_nomwait); void (*pm_idle) (void); EXPORT_SYMBOL(pm_idle); void (*pm_power_off) (void); diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f56a6316e134..70d224d4264c 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -749,7 +749,7 @@ out: return r; } -static struct kvm *kvm_alloc_kvm(void) +struct kvm *kvm_arch_alloc_vm(void) { struct kvm *kvm; @@ -760,7 +760,7 @@ static struct kvm *kvm_alloc_kvm(void) vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); if (!vm_base) - return ERR_PTR(-ENOMEM); + return NULL; memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); kvm = (struct kvm *)(vm_base + @@ -806,10 +806,12 @@ static void kvm_build_io_pmt(struct kvm *kvm) #define GUEST_PHYSICAL_RR4 0x2739 #define VMM_INIT_RR 0x1660 -static void kvm_init_vm(struct kvm *kvm) +int kvm_arch_init_vm(struct kvm *kvm) { BUG_ON(!kvm); + kvm->arch.is_sn2 = ia64_platform_is("sn2"); + kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; kvm->arch.vmm_init_rr = VMM_INIT_RR; @@ -823,21 +825,8 @@ static void kvm_init_vm(struct kvm *kvm) /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); -} - -struct kvm *kvm_arch_create_vm(void) -{ - struct kvm *kvm = kvm_alloc_kvm(); - - if (IS_ERR(kvm)) - return ERR_PTR(-ENOMEM); - - kvm->arch.is_sn2 = ia64_platform_is("sn2"); - - kvm_init_vm(kvm); - - return kvm; + return 0; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, @@ -962,7 +951,9 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; r = kvm_setup_default_irq_routing(kvm); if (r) { + mutex_lock(&kvm->slots_lock); kvm_ioapic_destroy(kvm); + mutex_unlock(&kvm->slots_lock); goto out; } break; @@ -1357,7 +1348,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -EINVAL; } -static void free_kvm(struct kvm *kvm) +void kvm_arch_free_vm(struct kvm *kvm) { unsigned long vm_base = kvm->arch.vm_base; @@ -1399,9 +1390,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) #endif kfree(kvm->arch.vioapic); kvm_release_vm_pages(kvm); - kvm_free_physmem(kvm); - cleanup_srcu_struct(&kvm->srcu); - free_kvm(kvm); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index fb8f9f59a1ed..f1e17d3d6cd9 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c @@ -130,7 +130,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, local_irq_save(psr); - /*Intercept the acces for PIB range*/ + /*Intercept the access for PIB range*/ if (iot == GPFN_PIB) { if (!dir) lsapic_write(vcpu, src_pa, s, *dest); diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 1841ee7e65f9..5ca674b74737 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -38,7 +38,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) if (pud) { pmd = pmd_alloc(mm, pud, taddr); if (pmd) - pte = pte_alloc_map(mm, pmd, taddr); + pte = pte_alloc_map(mm, NULL, pmd, taddr); } return pte; } diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S index 73613b5f1ee5..26e85e2b7a5e 100644 --- a/arch/m68k/ifpsp060/src/fpsp.S +++ b/arch/m68k/ifpsp060/src/fpsp.S @@ -3881,7 +3881,7 @@ _fpsp_fline: # FP Unimplemented Instruction stack frame and jump to that entry # point. # -# but, if the FPU is disabled, then we need to jump to the FPU diabled +# but, if the FPU is disabled, then we need to jump to the FPU disabled # entry point. movc %pcr,%d0 btst &0x1,%d0 diff --git a/arch/m68k/include/asm/m548xgpt.h b/arch/m68k/include/asm/m548xgpt.h index c8ef158a1c4e..33b2eef90f0a 100644 --- a/arch/m68k/include/asm/m548xgpt.h +++ b/arch/m68k/include/asm/m548xgpt.h @@ -59,11 +59,13 @@ #define MCF_GPT_GMS_GPIO_INPUT (0x00000000) #define MCF_GPT_GMS_GPIO_OUTLO (0x00000020) #define MCF_GPT_GMS_GPIO_OUTHI (0x00000030) +#define MCF_GPT_GMS_GPIO_MASK (0x00000030) #define MCF_GPT_GMS_TMS_DISABLE (0x00000000) #define MCF_GPT_GMS_TMS_INCAPT (0x00000001) #define MCF_GPT_GMS_TMS_OUTCAPT (0x00000002) #define MCF_GPT_GMS_TMS_PWM (0x00000003) #define MCF_GPT_GMS_TMS_GPIO (0x00000004) +#define MCF_GPT_GMS_TMS_MASK (0x00000007) /* Bit definitions and macros for MCF_GPT_GCIR */ #define MCF_GPT_GCIR_CNT(x) (((x)&0x0000FFFF)<<0) diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index ba6ccab64018..a4c3eb60706e 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c @@ -88,7 +88,7 @@ void __init psc_init(void) /* * The PSC is always at the same spot, but using psc - * keeps things consisant with the psc_xxxx functions. + * keeps things consistent with the psc_xxxx functions. */ psc = (void *) PSC_BASE; diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c index e5916a516e58..647e518c90bc 100644 --- a/arch/mips/alchemy/common/power.c +++ b/arch/mips/alchemy/common/power.c @@ -130,7 +130,7 @@ static void restore_core_regs(void) au_writel(sleep_usb[1], USBD_ENABLE); au_sync(); #else - /* enable accces to OTG memory */ + /* enable access to OTG memory */ au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4); au_sync(); diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c index 4bbd3133e451..acaf91b5e461 100644 --- a/arch/mips/alchemy/devboards/pm.c +++ b/arch/mips/alchemy/devboards/pm.c @@ -110,7 +110,7 @@ static void db1x_pm_end(void) } -static struct platform_suspend_ops db1x_pm_ops = { +static const struct platform_suspend_ops db1x_pm_ops = { .valid = suspend_valid_only_mem, .begin = db1x_pm_begin, .enter = db1x_pm_enter, diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index 02f505f23c32..ea57f39e6736 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c @@ -104,7 +104,7 @@ int rtc_mips_set_mmss(unsigned long nowtime) CMOS_WRITE(real_seconds, RTC_SECONDS); CMOS_WRITE(real_minutes, RTC_MINUTES); } else { - printk(KERN_WARNING + printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h index 076f2eeaa575..c86ef094ec37 100644 --- a/arch/mips/include/asm/mach-powertv/ioremap.h +++ b/arch/mips/include/asm/mach-powertv/ioremap.h @@ -88,7 +88,7 @@ static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma) } /* These are not portable and should not be used in drivers. Drivers should - * be using ioremap() and friends to map physical addreses to virtual + * be using ioremap() and friends to map physical addresses to virtual * addresses and dma_map*() and friends to map virtual addresses into DMA * addresses and back. */ diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h index 199b45733a95..4a08dbe37db8 100644 --- a/arch/mips/include/asm/mc146818-time.h +++ b/arch/mips/include/asm/mc146818-time.h @@ -66,7 +66,7 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime) CMOS_WRITE(real_seconds, RTC_SECONDS); CMOS_WRITE(real_minutes, RTC_MINUTES); } else { - printk(KERN_WARNING + printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h index c892bfb3e2c1..785b4ea4ec3f 100644 --- a/arch/mips/include/asm/mman.h +++ b/arch/mips/include/asm/mman.h @@ -77,6 +77,9 @@ #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ #define MADV_HWPOISON 100 /* poison a page for testing */ +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 5c0a3575877c..2c0e107966ad 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -65,7 +65,7 @@ static struct nand_ecclayout qi_lb60_ecclayout_1gb = { /* Early prototypes of the QI LB60 had only 1GB of NAND. * In order to support these devices aswell the partition and ecc layout is - * initalized depending on the NAND size */ + * initialized depending on the NAND size */ static struct mtd_partition qi_lb60_partitions_1gb[] = { { .name = "NAND BOOT partition", @@ -464,7 +464,7 @@ static int __init qi_lb60_board_setup(void) board_gpio_setup(); if (qi_lb60_init_platform_devices()) - panic("Failed to initalize platform devices\n"); + panic("Failed to initialize platform devices\n"); return 0; } diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 38f60f35156c..88e6aeda5bf1 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -546,7 +546,7 @@ static int __init jz4740_gpio_init(void) for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); - printk(KERN_INFO "JZ4740 GPIO initalized\n"); + printk(KERN_INFO "JZ4740 GPIO initialized\n"); return 0; } diff --git a/arch/mips/jz4740/pm.c b/arch/mips/jz4740/pm.c index a9994585424d..902d5b50124c 100644 --- a/arch/mips/jz4740/pm.c +++ b/arch/mips/jz4740/pm.c @@ -42,7 +42,7 @@ static int jz4740_pm_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops jz4740_pm_ops = { +static const struct platform_suspend_ops jz4740_pm_ops = { .valid = suspend_valid_only_mem, .enter = jz4740_pm_enter, }; diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 6f51dda87fce..d87a72e9fac7 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -46,17 +46,9 @@ static DEFINE_SPINLOCK(dbe_lock); void *module_alloc(unsigned long size) { #ifdef MODULE_START - struct vm_struct *area; - - size = PAGE_ALIGN(size); - if (!size) - return NULL; - - area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END); - if (!area) - return NULL; - - return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); + return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, + GFP_KERNEL, PAGE_KERNEL, -1, + __builtin_return_address(0)); #else if (size == 0) return NULL; diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c index 6c1fd9001712..f55e07aee071 100644 --- a/arch/mips/loongson/common/pm.c +++ b/arch/mips/loongson/common/pm.c @@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspend_state_t state) } } -static struct platform_suspend_ops loongson_pm_ops = { +static const struct platform_suspend_ops loongson_pm_ops = { .valid = loongson_pm_valid_state, .enter = loongson_pm_enter, }; diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index b27419c84919..a96d281f9221 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -43,7 +43,7 @@ static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS]; static char *mtypes[3] = { "Dont use memory", "YAMON PROM memory", - "Free memmory", + "Free memory", }; #endif diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index 385f035b24e4..0583c463e5f1 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c @@ -900,7 +900,7 @@ static int cvmx_pcie_rc_initialize(int pcie_port) mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Writes. */ mem_access_subid.s.row = 0; - /* PCIe Adddress Bits <63:34>. */ + /* PCIe Address Bits <63:34>. */ mem_access_subid.s.ba = 0; /* diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c index 73880ad29bc2..fb3d29660c42 100644 --- a/arch/mips/powertv/memory.c +++ b/arch/mips/powertv/memory.c @@ -57,7 +57,7 @@ unsigned long ptv_memsize; /* - * struct low_mem_reserved - Items in low memmory that are reserved + * struct low_mem_reserved - Items in low memory that are reserved * @start: Physical address of item * @size: Size, in bytes, of this item * @is_aliased: True if this is RAM aliased from another location. If false, diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c index 9a0be810cafa..96e69a00ffc8 100644 --- a/arch/mips/txx9/generic/pci.c +++ b/arch/mips/txx9/generic/pci.c @@ -107,7 +107,7 @@ int txx9_pci_mem_high __initdata; /* * allocate pci_controller and resources. - * mem_base, io_base: physical addresss. 0 for auto assignment. + * mem_base, io_base: physical address. 0 for auto assignment. * mem_size and io_size means max size on auto assignment. * pcic must be &txx9_primary_pcic or NULL. */ diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 41ba38513c89..8ed41cf2b08d 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -203,6 +203,7 @@ endmenu config SMP bool "Symmetric multi-processing support" default y + select USE_GENERIC_SMP_HELPERS depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050 ---help--- This enables support for systems with more than one CPU. If you have @@ -226,11 +227,6 @@ config NR_CPUS depends on SMP default "2" -config USE_GENERIC_SMP_HELPERS - bool - depends on SMP - default y - source "kernel/Kconfig.preempt" config MN10300_CURRENT_IN_E2 diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c index e9e20f9a4dd3..48d7058b3295 100644 --- a/arch/mn10300/kernel/rtc.c +++ b/arch/mn10300/kernel/rtc.c @@ -89,7 +89,7 @@ static int set_rtc_mmss(unsigned long nowtime) CMOS_WRITE(real_seconds, RTC_SECONDS); CMOS_WRITE(real_minutes, RTC_MINUTES); } else { - printk(KERN_WARNING + printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index 9749c8afe83a..f5b7bf5fba68 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -59,6 +59,9 @@ #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ +#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */ + /* compatibility flags */ #define MAP_FILE 0 #define MAP_VARIABLE 0 diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h index 4b0e15206006..6b6dc20b0beb 100644 --- a/arch/powerpc/include/asm/8xx_immap.h +++ b/arch/powerpc/include/asm/8xx_immap.h @@ -93,7 +93,7 @@ typedef struct mem_ctlr { } memctl8xx_t; /*----------------------------------------------------------------------- - * BR - Memory Controler: Base Register 16-9 + * BR - Memory Controller: Base Register 16-9 */ #define BR_BA_MSK 0xffff8000 /* Base Address Mask */ #define BR_AT_MSK 0x00007000 /* Address Type Mask */ @@ -110,7 +110,7 @@ typedef struct mem_ctlr { #define BR_V 0x00000001 /* Bank Valid */ /*----------------------------------------------------------------------- - * OR - Memory Controler: Option Register 16-11 + * OR - Memory Controller: Option Register 16-11 */ #define OR_AM_MSK 0xffff8000 /* Address Mask Mask */ #define OR_ATM_MSK 0x00007000 /* Address Type Mask Mask */ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index e316847c08c0..badc983031b3 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -1307,12 +1307,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) int err = -ENOMEM; unsigned long p; - vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); + vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); if (!vcpu_book3s) goto out; - memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); - vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); if (!vcpu_book3s->shadow_vcpu) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 38f756f25053..99758460efde 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -145,18 +145,12 @@ void kvm_arch_check_processor_compat(void *rtn) *(int *)rtn = kvmppc_core_check_processor_compat(); } -struct kvm *kvm_arch_create_vm(void) +int kvm_arch_init_vm(struct kvm *kvm) { - struct kvm *kvm; - - kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); - if (!kvm) - return ERR_PTR(-ENOMEM); - - return kvm; + return 0; } -static void kvmppc_free_vcpus(struct kvm *kvm) +void kvm_arch_destroy_vm(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; @@ -176,14 +170,6 @@ void kvm_arch_sync_events(struct kvm *kvm) { } -void kvm_arch_destroy_vm(struct kvm *kvm) -{ - kvmppc_free_vcpus(kvm); - kvm_free_physmem(kvm); - cleanup_srcu_struct(&kvm->srcu); - kfree(kvm); -} - int kvm_dev_ioctl_check_extension(long ext) { int r; diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index d7efdbf640c7..fec13200868f 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c @@ -16,6 +16,16 @@ #ifdef __HAVE_ARCH_PTE_SPECIAL +static inline void get_huge_page_tail(struct page *page) +{ + /* + * __split_huge_page_refcount() cannot run + * from under us. + */ + VM_BUG_ON(atomic_read(&page->_count) < 0); + atomic_inc(&page->_count); +} + /* * The performance critical leaf functions are made noinline otherwise gcc * inlines everything into a single function which results in too much @@ -47,6 +57,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, put_page(page); return 0; } + if (PageTail(page)) + get_huge_page_tail(page); pages[*nr] = page; (*nr)++; diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 7fd90d02d8c6..c4d2b7167568 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -1469,7 +1469,7 @@ static int cell_global_start(struct op_counter_config *ctr) * The pm_interval register is setup to write the SPU PC value into the * trace buffer at the maximum rate possible. The trace buffer is configured * to store the PCs, wrapping when it is full. The performance counter is - * intialized to the max hardware count minus the number of events, N, between + * initialized to the max hardware count minus the number of events, N, between * samples. Once the N events have occured, a HW counter overflow occurs * causing the generation of a HW counter interrupt which also stops the * writing of the SPU PC values to the trace buffer. Hence the last PC diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index 80234e5921f5..eda0fc2a3914 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -232,7 +232,7 @@ static void lite5200_pm_end(void) lite5200_pm_target_state = PM_SUSPEND_ON; } -static struct platform_suspend_ops lite5200_pm_ops = { +static const struct platform_suspend_ops lite5200_pm_ops = { .valid = lite5200_pm_valid, .begin = lite5200_pm_begin, .prepare = lite5200_pm_prepare, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index 568cef636275..8310e8b5b57f 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c @@ -186,7 +186,7 @@ void mpc52xx_pm_finish(void) iounmap(mbar); } -static struct platform_suspend_ops mpc52xx_pm_ops = { +static const struct platform_suspend_ops mpc52xx_pm_ops = { .valid = mpc52xx_pm_valid, .prepare = mpc52xx_pm_prepare, .enter = mpc52xx_pm_enter, diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index 1930543c98d3..3d1ecd211776 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S @@ -231,7 +231,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) ori r4, r4, 0x002a mtspr SPRN_DBAT0L, r4 lis r8, TMP_VIRT_IMMR@h - ori r4, r8, 0x001e /* 1 MByte accessable from Kernel Space only */ + ori r4, r8, 0x001e /* 1 MByte accessible from Kernel Space only */ mtspr SPRN_DBAT0U, r4 isync @@ -241,7 +241,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) ori r4, r4, 0x002a mtspr SPRN_DBAT1L, r4 lis r9, (TMP_VIRT_IMMR + 0x01000000)@h - ori r4, r9, 0x001e /* 1 MByte accessable from Kernel Space only */ + ori r4, r9, 0x001e /* 1 MByte accessible from Kernel Space only */ mtspr SPRN_DBAT1U, r4 isync @@ -253,7 +253,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) li r4, 0x0002 mtspr SPRN_DBAT2L, r4 lis r4, KERNELBASE@h - ori r4, r4, 0x001e /* 1 MByte accessable from Kernel Space only */ + ori r4, r4, 0x001e /* 1 MByte accessible from Kernel Space only */ mtspr SPRN_DBAT2U, r4 isync diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 75ae77f1af6a..fd4f2f2f19e6 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -311,7 +311,7 @@ static int mpc83xx_is_pci_agent(void) return ret; } -static struct platform_suspend_ops mpc83xx_suspend_ops = { +static const struct platform_suspend_ops mpc83xx_suspend_ops = { .valid = mpc83xx_suspend_valid, .begin = mpc83xx_suspend_begin, .enter = mpc83xx_suspend_enter, diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index aa34cac4eb5c..747d1ee661fd 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -309,7 +309,7 @@ static void __init mpc85xx_mds_qe_init(void) /* P1021 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 - * and QE12 for QE MII management singals in PMUXCR + * and QE12 for QE MII management signals in PMUXCR * register. */ setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index b341018326df..6c4b5837fc8a 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -566,10 +566,10 @@ static int ps3_setup_dynamic_device(const struct ps3_repository_device *repo) case PS3_DEV_TYPE_STOR_DISK: result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK); - /* Some devices are not accessable from the Other OS lpar. */ + /* Some devices are not accessible from the Other OS lpar. */ if (result == -ENODEV) { result = 0; - pr_debug("%s:%u: not accessable\n", __func__, + pr_debug("%s:%u: not accessible\n", __func__, __LINE__); } diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 59d9712d7364..92290ff4761a 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -44,7 +44,7 @@ * @lock: * @ipi_debug_brk_mask: * - * The HV mantains per SMT thread mappings of HV outlet to HV plug on + * The HV maintains per SMT thread mappings of HV outlet to HV plug on * behalf of the guest. These mappings are implemented as 256 bit guest * supplied bitmaps indexed by plug number. The addresses of the bitmaps * are registered with the HV through lv1_configure_irq_state_bitmap(). diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index e19ff021e711..f106662f4381 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -55,7 +55,7 @@ static void hc_stop(struct seq_file *m, void *p) static int hc_show(struct seq_file *m, void *p) { unsigned long h_num = (unsigned long)p; - struct hcall_stats *hs = (struct hcall_stats *)m->private; + struct hcall_stats *hs = m->private; if (hs[h_num].num_calls) { if (cpu_has_feature(CPU_FTR_PURR)) diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index ed72098bb4e3..a8ca289ff267 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -153,7 +153,7 @@ static struct sysdev_class suspend_sysdev_class = { .name = "power", }; -static struct platform_suspend_ops pseries_suspend_ops = { +static const struct platform_suspend_ops pseries_suspend_ops = { .valid = suspend_valid_only_mem, .begin = pseries_suspend_begin, .prepare_late = pseries_prepare_late, diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 44de8559c975..e9381bfefb21 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_state_t state) return 1; } -static struct platform_suspend_ops pmc_suspend_ops = { +static const struct platform_suspend_ops pmc_suspend_ops = { .valid = pmc_suspend_valid, .enter = pmc_suspend_enter, }; diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 9725369d432a..9f99bef2adec 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -973,7 +973,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance) if (dsr & DOORBELL_DSR_QFI) { pr_info("RIO: doorbell queue full\n"); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); - goto out; } /* XXX Need to check/dispatch until queue empty */ diff --git a/arch/s390/defconfig b/arch/s390/defconfig index d79697157ac0..29c82c640a88 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -5,10 +5,21 @@ CONFIG_AUDIT=y CONFIG_RCU_TRACE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_MEM_RES_CTLR=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_PERF_EVENTS=y +# CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y CONFIG_KPROBES=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -19,7 +30,9 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y CONFIG_BINFMT_MISC=m +CONFIG_CMM=m CONFIG_HZ_100=y CONFIG_KEXEC=y CONFIG_PM=y @@ -105,6 +118,7 @@ CONFIG_DEBUG_LIST=y CONFIG_DEBUG_NOTIFIERS=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_KPROBES_SANITY_TEST=y +CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y CONFIG_CPU_NOTIFIER_ERROR_INJECT=m CONFIG_LATENCYTOP=y CONFIG_SYSCTL_SYSCALL_CHECK=y diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index a875c2f542e1..da359ca6fe55 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -169,7 +169,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) static inline int is_compat_task(void) { - return test_thread_flag(TIF_31BIT); + return is_32bit_task(); } #else diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 354d42616c7e..10c029cfcc7d 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -161,7 +161,9 @@ extern unsigned int vdso_enabled; use of this is to invoke "./ld.so someprog" to test out a new version of the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) + +extern unsigned long randomize_et_dyn(unsigned long base); +#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ @@ -206,6 +208,8 @@ do { \ current->mm->context.noexec == 0; \ }) +#define STACK_RND_MASK 0x7ffUL + #define ARCH_DLINFO \ do { \ if (vdso_enabled) \ @@ -218,4 +222,7 @@ struct linux_binprm; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 int arch_setup_additional_pages(struct linux_binprm *, int); +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk + #endif diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 6710b0eac165..8f8d759f6a7b 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -449,7 +449,7 @@ extern void (*_machine_restart)(char *command); extern void (*_machine_halt)(void); extern void (*_machine_power_off)(void); -#define arch_align_stack(x) (x) +extern unsigned long arch_align_stack(unsigned long sp); static inline int tprot(unsigned long addr) { diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ebc77091466f..ad1382f7932e 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -118,6 +118,12 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SINGLE_STEP (1< #include #include +#include #include #include #include +#include #include #include #include @@ -332,3 +334,39 @@ unsigned long get_wchan(struct task_struct *p) } return 0; } + +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_int() & ~PAGE_MASK; + return sp & ~0xf; +} + +static inline unsigned long brk_rnd(void) +{ + /* 8MB for 32bit, 1GB for 64bit */ + if (is_32bit_task()) + return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; + else + return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); + + if (ret < mm->brk) + return mm->brk; + return ret; +} + +unsigned long randomize_et_dyn(unsigned long base) +{ + unsigned long ret = PAGE_ALIGN(base + brk_rnd()); + + if (!(current->flags & PF_RANDOMIZE)) + return base; + if (ret < base) + return base; + return ret; +} diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index e3150dd2fe74..f438d74dedbd 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -203,7 +203,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!uses_interp) return 0; - vdso_base = mm->mmap_base; #ifdef CONFIG_64BIT vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; @@ -233,8 +232,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * fail and end up putting it elsewhere. */ down_write(&mm->mmap_sem); - vdso_base = get_unmapped_area(NULL, vdso_base, - vdso_pages << PAGE_SHIFT, 0, 0); + vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); if (IS_ERR_VALUE(vdso_base)) { rc = vdso_base; goto out_up; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 985d825494f1..bade533ba288 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -164,24 +164,18 @@ long kvm_arch_vm_ioctl(struct file *filp, return r; } -struct kvm *kvm_arch_create_vm(void) +int kvm_arch_init_vm(struct kvm *kvm) { - struct kvm *kvm; int rc; char debug_name[16]; rc = s390_enable_sie(); if (rc) - goto out_nokvm; - - rc = -ENOMEM; - kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); - if (!kvm) - goto out_nokvm; + goto out_err; kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); if (!kvm->arch.sca) - goto out_nosca; + goto out_err; sprintf(debug_name, "kvm-%u", current->pid); @@ -195,13 +189,11 @@ struct kvm *kvm_arch_create_vm(void) debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); - return kvm; + return 0; out_nodbf: free_page((unsigned long)(kvm->arch.sca)); -out_nosca: - kfree(kvm); -out_nokvm: - return ERR_PTR(rc); +out_err: + return rc; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -240,11 +232,8 @@ void kvm_arch_sync_events(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_free_vcpus(kvm); - kvm_free_physmem(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); - cleanup_srcu_struct(&kvm->srcu); - kfree(kvm); } /* Section: vcpu related */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 869efbaed3ea..c9a9f7f18188 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -27,17 +27,44 @@ #include #include #include +#include #include #include +static unsigned long stack_maxrandom_size(void) +{ + if (!(current->flags & PF_RANDOMIZE)) + return 0; + if (current->personality & ADDR_NO_RANDOMIZE) + return 0; + return STACK_RND_MASK << PAGE_SHIFT; +} + /* * Top of mmap area (just below the process stack). * - * Leave an at least ~128 MB hole. + * Leave at least a ~32 MB hole. */ -#define MIN_GAP (128*1024*1024) +#define MIN_GAP (32*1024*1024) #define MAX_GAP (STACK_TOP/6*5) +static inline int mmap_is_legacy(void) +{ + if (current->personality & ADDR_COMPAT_LAYOUT) + return 1; + if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) + return 1; + return sysctl_legacy_va_layout; +} + +static unsigned long mmap_rnd(void) +{ + if (!(current->flags & PF_RANDOMIZE)) + return 0; + /* 8MB randomization for mmap_base */ + return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; +} + static inline unsigned long mmap_base(void) { unsigned long gap = rlimit(RLIMIT_STACK); @@ -46,22 +73,8 @@ static inline unsigned long mmap_base(void) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; - - return STACK_TOP - (gap & PAGE_MASK); -} - -static inline int mmap_is_legacy(void) -{ -#ifdef CONFIG_64BIT - /* - * Force standard allocation for 64 bit programs. - */ - if (!is_compat_task()) - return 1; -#endif - return sysctl_legacy_va_layout || - (current->personality & ADDR_COMPAT_LAYOUT) || - rlimit(RLIMIT_STACK) == RLIM_INFINITY; + gap &= PAGE_MASK; + return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; } #ifndef CONFIG_64BIT diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 2018c7ea4c93..d893411022d5 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -3,6 +3,9 @@ menu "Board support" config SOLUTION_ENGINE bool +config SH_ALPHA_BOARD + bool + config SH_SOLUTION_ENGINE bool "SolutionEngine" select SOLUTION_ENGINE @@ -320,6 +323,21 @@ config SH_SH2007 Compact Flash socket, two serial ports and PC-104 bus. More information at . +config SH_APSH4A3A + bool "AP-SH4A-3A" + select SH_ALPHA_BOARD + depends on CPU_SUBTYPE_SH7785 + help + Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A. + +config SH_APSH4AD0A + bool "AP-SH4AD-0A" + select SH_ALPHA_BOARD + select SYS_SUPPORTS_PCI + depends on CPU_SUBTYPE_SH7786 + help + Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A. + endmenu source "arch/sh/boards/mach-r2d/Kconfig" diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile index be7d11d04b26..975a0f64ff20 100644 --- a/arch/sh/boards/Makefile +++ b/arch/sh/boards/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_SH_ESPT) += board-espt.o obj-$(CONFIG_SH_POLARIS) += board-polaris.o obj-$(CONFIG_SH_TITAN) += board-titan.o obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o +obj-$(CONFIG_SH_APSH4A3A) += board-apsh4a3a.o +obj-$(CONFIG_SH_APSH4AD0A) += board-apsh4ad0a.o diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c new file mode 100644 index 000000000000..8e2a27057bc9 --- /dev/null +++ b/arch/sh/boards/board-apsh4a3a.c @@ -0,0 +1,175 @@ +/* + * ALPHAPROJECT AP-SH4A-3A Support. + * + * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. + * Copyright (C) 2008 Yoshihiro Shimoda + * Copyright (C) 2009 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct mtd_partition nor_flash_partitions[] = { + { + .name = "loader", + .offset = 0x00000000, + .size = 512 * 1024, + }, + { + .name = "bootenv", + .offset = MTDPART_OFS_APPEND, + .size = 512 * 1024, + }, + { + .name = "kernel", + .offset = MTDPART_OFS_APPEND, + .size = 4 * 1024 * 1024, + }, + { + .name = "data", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct physmap_flash_data nor_flash_data = { + .width = 4, + .parts = nor_flash_partitions, + .nr_parts = ARRAY_SIZE(nor_flash_partitions), +}; + +static struct resource nor_flash_resources[] = { + [0] = { + .start = 0x00000000, + .end = 0x01000000 - 1, + .flags = IORESOURCE_MEM, + } +}; + +static struct platform_device nor_flash_device = { + .name = "physmap-flash", + .dev = { + .platform_data = &nor_flash_data, + }, + .num_resources = ARRAY_SIZE(nor_flash_resources), + .resource = nor_flash_resources, +}; + +static struct resource smsc911x_resources[] = { + [0] = { + .name = "smsc911x-memory", + .start = 0xA4000000, + .end = 0xA4000000 + SZ_256 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "smsc911x-irq", + .start = evt2irq(0x200), + .end = evt2irq(0x200), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct smsc911x_platform_config smsc911x_config = { + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, + .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, + .flags = SMSC911X_USE_16BIT, + .phy_interface = PHY_INTERFACE_MODE_MII, +}; + +static struct platform_device smsc911x_device = { + .name = "smsc911x", + .id = -1, + .num_resources = ARRAY_SIZE(smsc911x_resources), + .resource = smsc911x_resources, + .dev = { + .platform_data = &smsc911x_config, + }, +}; + +static struct platform_device *apsh4a3a_devices[] __initdata = { + &nor_flash_device, + &smsc911x_device, +}; + +static int __init apsh4a3a_devices_setup(void) +{ + return platform_add_devices(apsh4a3a_devices, + ARRAY_SIZE(apsh4a3a_devices)); +} +device_initcall(apsh4a3a_devices_setup); + +static int apsh4a3a_clk_init(void) +{ + struct clk *clk; + int ret; + + clk = clk_get(NULL, "extal"); + if (!clk || IS_ERR(clk)) + return PTR_ERR(clk); + ret = clk_set_rate(clk, 33333000); + clk_put(clk); + + return ret; +} + +/* Initialize the board */ +static void __init apsh4a3a_setup(char **cmdline_p) +{ + printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n"); +} + +static void __init apsh4a3a_init_irq(void) +{ + plat_irq_setup_pins(IRQ_MODE_IRQ7654); +} + +/* Return the board specific boot mode pin configuration */ +static int apsh4a3a_mode_pins(void) +{ + int value = 0; + + /* These are the factory default settings of SW1 and SW2. + * If you change these dip switches then you will need to + * adjust the values below as well. + */ + value &= ~MODE_PIN0; /* Clock Mode 16 */ + value &= ~MODE_PIN1; + value &= ~MODE_PIN2; + value &= ~MODE_PIN3; + value |= MODE_PIN4; + value &= ~MODE_PIN5; /* 16-bit Area0 bus width */ + value |= MODE_PIN6; /* Area 0 SRAM interface */ + value |= MODE_PIN7; + value |= MODE_PIN8; /* Little Endian */ + value |= MODE_PIN9; /* Master Mode */ + value |= MODE_PIN10; /* Crystal resonator */ + value |= MODE_PIN11; /* Display Unit */ + value |= MODE_PIN12; + value &= ~MODE_PIN13; /* 29-bit address mode */ + value |= MODE_PIN14; /* No PLL step-up */ + + return value; +} + +/* + * The Machine Vector + */ +static struct sh_machine_vector mv_apsh4a3a __initmv = { + .mv_name = "AP-SH4A-3A", + .mv_setup = apsh4a3a_setup, + .mv_clk_init = apsh4a3a_clk_init, + .mv_init_irq = apsh4a3a_init_irq, + .mv_mode_pins = apsh4a3a_mode_pins, +}; diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c new file mode 100644 index 000000000000..e2bd218a054e --- /dev/null +++ b/arch/sh/boards/board-apsh4ad0a.c @@ -0,0 +1,125 @@ +/* + * ALPHAPROJECT AP-SH4AD-0A Support. + * + * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. + * Copyright (C) 2010 Matt Fleming + * Copyright (C) 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static struct resource smsc911x_resources[] = { + [0] = { + .name = "smsc911x-memory", + .start = 0xA4000000, + .end = 0xA4000000 + SZ_256 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "smsc911x-irq", + .start = evt2irq(0x200), + .end = evt2irq(0x200), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct smsc911x_platform_config smsc911x_config = { + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, + .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, + .flags = SMSC911X_USE_16BIT, + .phy_interface = PHY_INTERFACE_MODE_MII, +}; + +static struct platform_device smsc911x_device = { + .name = "smsc911x", + .id = -1, + .num_resources = ARRAY_SIZE(smsc911x_resources), + .resource = smsc911x_resources, + .dev = { + .platform_data = &smsc911x_config, + }, +}; + +static struct platform_device *apsh4ad0a_devices[] __initdata = { + &smsc911x_device, +}; + +static int __init apsh4ad0a_devices_setup(void) +{ + return platform_add_devices(apsh4ad0a_devices, + ARRAY_SIZE(apsh4ad0a_devices)); +} +device_initcall(apsh4ad0a_devices_setup); + +static int apsh4ad0a_mode_pins(void) +{ + int value = 0; + + /* These are the factory default settings of SW1 and SW2. + * If you change these dip switches then you will need to + * adjust the values below as well. + */ + value |= MODE_PIN0; /* Clock Mode 3 */ + value |= MODE_PIN1; + value &= ~MODE_PIN2; + value &= ~MODE_PIN3; + value &= ~MODE_PIN4; /* 16-bit Area0 bus width */ + value |= MODE_PIN5; + value |= MODE_PIN6; + value |= MODE_PIN7; /* Normal mode */ + value |= MODE_PIN8; /* Little Endian */ + value |= MODE_PIN9; /* Crystal resonator */ + value &= ~MODE_PIN10; /* 29-bit address mode */ + value &= ~MODE_PIN11; /* PCI-E Root port */ + value &= ~MODE_PIN12; /* 4 lane + 1 lane */ + value |= MODE_PIN13; /* AUD Enable */ + value &= ~MODE_PIN14; /* Normal Operation */ + + return value; +} + +static int apsh4ad0a_clk_init(void) +{ + struct clk *clk; + int ret; + + clk = clk_get(NULL, "extal"); + if (!clk || IS_ERR(clk)) + return PTR_ERR(clk); + ret = clk_set_rate(clk, 33333000); + clk_put(clk); + + return ret; +} + +/* Initialize the board */ +static void __init apsh4ad0a_setup(char **cmdline_p) +{ + pr_info("Alpha Project AP-SH4AD-0A support:\n"); +} + +static void __init apsh4ad0a_init_irq(void) +{ + plat_irq_setup_pins(IRQ_MODE_IRQ3210); +} + +/* + * The Machine Vector + */ +static struct sh_machine_vector mv_apsh4ad0a __initmv = { + .mv_name = "AP-SH4AD-0A", + .mv_setup = apsh4ad0a_setup, + .mv_mode_pins = apsh4ad0a_mode_pins, + .mv_clk_init = apsh4ad0a_clk_init, + .mv_init_irq = apsh4ad0a_init_irq, +}; diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c index 4cb3bb74c36f..541d8a281035 100644 --- a/arch/sh/boards/board-edosk7705.c +++ b/arch/sh/boards/board-edosk7705.c @@ -66,7 +66,7 @@ static int __init init_edosk7705_devices(void) return platform_add_devices(edosk7705_devices, ARRAY_SIZE(edosk7705_devices)); } -__initcall(init_edosk7705_devices); +device_initcall(init_edosk7705_devices); /* * The Machine Vector diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c index 35dc0994875d..f47ac82da876 100644 --- a/arch/sh/boards/board-edosk7760.c +++ b/arch/sh/boards/board-edosk7760.c @@ -182,7 +182,7 @@ static int __init init_edosk7760_devices(void) return platform_add_devices(edosk7760_devices, ARRAY_SIZE(edosk7760_devices)); } -__initcall(init_edosk7760_devices); +device_initcall(init_edosk7760_devices); /* * The Machine Vector diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index fe7e686c94ac..ee65ff05c558 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c @@ -284,7 +284,7 @@ static int __init sh7785lcr_devices_setup(void) return platform_add_devices(sh7785lcr_devices, ARRAY_SIZE(sh7785lcr_devices)); } -__initcall(sh7785lcr_devices_setup); +device_initcall(sh7785lcr_devices_setup); /* Initialize IRQ setting */ void __init init_sh7785lcr_IRQ(void) diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index 07ea908c510d..3e5fc3bbf3ed 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include #include @@ -430,11 +432,18 @@ static struct resource sdhi0_cn3_resources[] = { }, }; +static struct sh_mobile_sdhi_info sdhi0_cn3_data = { + .tmio_caps = MMC_CAP_SDIO_IRQ, +}; + static struct platform_device sdhi0_cn3_device = { .name = "sh_mobile_sdhi", .id = 0, /* "sdhi0" clock */ .num_resources = ARRAY_SIZE(sdhi0_cn3_resources), .resource = sdhi0_cn3_resources, + .dev = { + .platform_data = &sdhi0_cn3_data, + }, .archdata = { .hwblk_id = HWBLK_SDHI0, }, @@ -453,11 +462,18 @@ static struct resource sdhi1_cn7_resources[] = { }, }; +static struct sh_mobile_sdhi_info sdhi1_cn7_data = { + .tmio_caps = MMC_CAP_SDIO_IRQ, +}; + static struct platform_device sdhi1_cn7_device = { .name = "sh_mobile_sdhi", .id = 1, /* "sdhi1" clock */ .num_resources = ARRAY_SIZE(sdhi1_cn7_resources), .resource = sdhi1_cn7_resources, + .dev = { + .platform_data = &sdhi1_cn7_data, + }, .archdata = { .hwblk_id = HWBLK_SDHI1, }, diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c index 7e8216ac31bd..e89e8e122a26 100644 --- a/arch/sh/boards/mach-cayman/setup.c +++ b/arch/sh/boards/mach-cayman/setup.c @@ -165,7 +165,7 @@ static int __init smsc_superio_setup(void) return 0; } -__initcall(smsc_superio_setup); +device_initcall(smsc_superio_setup); static void __iomem *cayman_ioport_map(unsigned long port, unsigned int len) { diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index f48c492a68d3..33b662999fc6 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -473,6 +473,7 @@ static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .set_pwr = sdhi0_set_pwr, + .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD, }; static struct resource sdhi0_resources[] = { @@ -511,6 +512,7 @@ static void sdhi1_set_pwr(struct platform_device *pdev, int state) static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, + .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD, .set_pwr = sdhi1_set_pwr, }; diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c index 4499a3749d40..adc9b4bba828 100644 --- a/arch/sh/boards/mach-hp6xx/pm.c +++ b/arch/sh/boards/mach-hp6xx/pm.c @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops hp6x0_pm_ops = { +static const struct platform_suspend_ops hp6x0_pm_ops = { .enter = hp6x0_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 9b60eaabf8f3..7504daaa85da 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -366,6 +367,7 @@ static struct sh_mobile_sdhi_info sh7724_sdhi0_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, + .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device kfr2r09_sh_sdhi0_device = { diff --git a/arch/sh/boards/mach-landisk/irq.c b/arch/sh/boards/mach-landisk/irq.c index e79412a40490..c00ace38db3f 100644 --- a/arch/sh/boards/mach-landisk/irq.c +++ b/arch/sh/boards/mach-landisk/irq.c @@ -1,9 +1,10 @@ /* - * arch/sh/boards/landisk/irq.c + * arch/sh/boards/mach-landisk/irq.c * * I-O DATA Device, Inc. LANDISK Support * * Copyright (C) 2005-2007 kogiidena + * Copyright (C) 2011 Nobuhiro Iwamatsu * * Copyright (C) 2001 Ian da Silva, Jeremy Siegel * Based largely on io_se.c. @@ -12,44 +13,54 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ + #include #include #include #include #include -static void disable_landisk_irq(struct irq_data *data) -{ - unsigned char mask = 0xff ^ (0x01 << (data->irq - 5)); +enum { + UNUSED = 0, - __raw_writeb(__raw_readb(PA_IMASK) & mask, PA_IMASK); -} - -static void enable_landisk_irq(struct irq_data *data) -{ - unsigned char value = (0x01 << (data->irq - 5)); + PCI_INTA, /* PCI int A */ + PCI_INTB, /* PCI int B */ + PCI_INTC, /* PCI int C */ + PCI_INTD, /* PCI int D */ + ATA, /* ATA */ + FATA, /* CF */ + POWER, /* Power swtich */ + BUTTON, /* Button swtich */ +}; - __raw_writeb(__raw_readb(PA_IMASK) | value, PA_IMASK); -} +/* Vectors for LANDISK */ +static struct intc_vect vectors_landisk[] __initdata = { + INTC_IRQ(PCI_INTA, IRQ_PCIINTA), + INTC_IRQ(PCI_INTB, IRQ_PCIINTB), + INTC_IRQ(PCI_INTC, IRQ_PCIINTC), + INTC_IRQ(PCI_INTD, IRQ_PCIINTD), + INTC_IRQ(ATA, IRQ_ATA), + INTC_IRQ(FATA, IRQ_FATA), + INTC_IRQ(POWER, IRQ_POWER), + INTC_IRQ(BUTTON, IRQ_BUTTON), +}; -static struct irq_chip landisk_irq_chip __read_mostly = { - .name = "LANDISK", - .irq_mask = disable_landisk_irq, - .irq_unmask = enable_landisk_irq, +/* IRLMSK mask register layout for LANDISK */ +static struct intc_mask_reg mask_registers_landisk[] __initdata = { + { PA_IMASK, 0, 8, /* IRLMSK */ + { BUTTON, POWER, FATA, ATA, + PCI_INTD, PCI_INTC, PCI_INTB, PCI_INTA, + } + }, }; +static DECLARE_INTC_DESC(intc_desc_landisk, "landisk", vectors_landisk, NULL, + mask_registers_landisk, NULL, NULL); /* * Initialize IRQ setting */ void __init init_landisk_IRQ(void) { - int i; - - for (i = 5; i < 14; i++) { - disable_irq_nosync(i); - set_irq_chip_and_handler_name(i, &landisk_irq_chip, - handle_level_irq, "level"); - enable_landisk_irq(irq_get_irq_data(i)); - } + register_intc_controller(&intc_desc_landisk); __raw_writeb(0x00, PA_PWRINT_CLR); } diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index 50337acc18c5..94186cf079b6 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -21,8 +21,6 @@ #include #include -void init_landisk_IRQ(void); - static void landisk_power_off(void) { __raw_writeb(0x01, PA_SHUTDOWN); @@ -83,7 +81,7 @@ static int __init landisk_devices_setup(void) ARRAY_SIZE(landisk_devices)); } -__initcall(landisk_devices_setup); +device_initcall(landisk_devices_setup); static void __init landisk_setup(char **cmdline_p) { diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index c8acfec98695..03a7ffe729d5 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -410,6 +411,7 @@ static struct resource sdhi_cn9_resources[] = { static struct sh_mobile_sdhi_info sh7724_sdhi_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, + .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi_cn9_device = { diff --git a/arch/sh/boards/mach-r2d/setup.c b/arch/sh/boards/mach-r2d/setup.c index b84df6a3a93c..4b98a5251f83 100644 --- a/arch/sh/boards/mach-r2d/setup.c +++ b/arch/sh/boards/mach-r2d/setup.c @@ -258,7 +258,7 @@ static int __init rts7751r2d_devices_setup(void) return platform_add_devices(rts7751r2d_devices, ARRAY_SIZE(rts7751r2d_devices)); } -__initcall(rts7751r2d_devices_setup); +device_initcall(rts7751r2d_devices_setup); static void rts7751r2d_power_off(void) { diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c index 75e4ddbbec3e..1521aa75ee3a 100644 --- a/arch/sh/boards/mach-sdk7786/setup.c +++ b/arch/sh/boards/mach-sdk7786/setup.c @@ -15,13 +15,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include @@ -135,7 +135,7 @@ static int __init sdk7786_devices_setup(void) return sdk7786_i2c_setup(); } -__initcall(sdk7786_devices_setup); +device_initcall(sdk7786_devices_setup); static int sdk7786_mode_pins(void) { diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c index 33039e0dc568..8ab8330e3fd1 100644 --- a/arch/sh/boards/mach-se/7206/setup.c +++ b/arch/sh/boards/mach-se/7206/setup.c @@ -77,7 +77,7 @@ static int __init se7206_devices_setup(void) { return platform_add_devices(se7206_devices, ARRAY_SIZE(se7206_devices)); } -__initcall(se7206_devices_setup); +device_initcall(se7206_devices_setup); static int se7206_mode_pins(void) { diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 527a0cd956b5..527679394a25 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -318,6 +319,10 @@ static struct platform_device fsi_device = { }, }; +static struct platform_device fsi_ak4642_device = { + .name = "sh_fsi_a_ak4642", +}; + /* KEYSC in SoC (Needs SW33-2 set to ON) */ static struct sh_keysc_info keysc_info = { .mode = SH_KEYSC_MODE_1, @@ -467,6 +472,7 @@ static struct resource sdhi0_cn7_resources[] = { static struct sh_mobile_sdhi_info sh7724_sdhi0_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, + .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi0_cn7_device = { @@ -498,6 +504,7 @@ static struct resource sdhi1_cn8_resources[] = { static struct sh_mobile_sdhi_info sh7724_sdhi1_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, + .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi1_cn8_device = { @@ -590,6 +597,7 @@ static struct platform_device *ms7724se_devices[] __initdata = { &sh7724_usb0_host_device, &sh7724_usb1_gadget_device, &fsi_device, + &fsi_ak4642_device, &sdhi0_cn7_device, &sdhi1_cn8_device, &irda_device, diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c index 9fbc51beb181..4ed60c5e221f 100644 --- a/arch/sh/boards/mach-se/7751/setup.c +++ b/arch/sh/boards/mach-se/7751/setup.c @@ -48,7 +48,7 @@ static int __init se7751_devices_setup(void) { return platform_add_devices(se7751_devices, ARRAY_SIZE(se7751_devices)); } -__initcall(se7751_devices_setup); +device_initcall(se7751_devices_setup); /* * The Machine Vector diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c index 1b200990500c..f83ac7995d0f 100644 --- a/arch/sh/boards/mach-sh03/rtc.c +++ b/arch/sh/boards/mach-sh03/rtc.c @@ -108,7 +108,7 @@ static int set_rtc_mmss(unsigned long nowtime) __raw_writeb(real_minutes % 10, RTC_MIN1); __raw_writeb(real_minutes / 10, RTC_MIN10); } else { - printk(KERN_WARNING + printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c index af4a0c012a96..d4f79b2a6514 100644 --- a/arch/sh/boards/mach-sh03/setup.c +++ b/arch/sh/boards/mach-sh03/setup.c @@ -96,7 +96,7 @@ static int __init sh03_devices_setup(void) return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices)); } -__initcall(sh03_devices_setup); +device_initcall(sh03_devices_setup); static struct sh_machine_vector mv_sh03 __initmv = { .mv_name = "Interface (CTP/PCI-SH03)", diff --git a/arch/sh/boot/romimage/mmcif-sh7724.c b/arch/sh/boot/romimage/mmcif-sh7724.c index 14863d7292cb..c84e7831018d 100644 --- a/arch/sh/boot/romimage/mmcif-sh7724.c +++ b/arch/sh/boot/romimage/mmcif-sh7724.c @@ -21,9 +21,6 @@ #define HIZCRC 0xa405015c #define DRVCRA 0xa405018a -enum { MMCIF_PROGRESS_ENTER, MMCIF_PROGRESS_INIT, - MMCIF_PROGRESS_LOAD, MMCIF_PROGRESS_DONE }; - /* SH7724 specific MMCIF loader * * loads the romImage from an MMC card starting from block 512 @@ -63,7 +60,9 @@ asmlinkage void mmcif_loader(unsigned char *buf, unsigned long no_bytes) mmcif_update_progress(MMCIF_PROGRESS_LOAD); /* load kernel via MMCIF interface */ - sh_mmcif_boot_slurp(MMCIF_BASE, buf, no_bytes); + sh_mmcif_boot_do_read(MMCIF_BASE, 512, + (no_bytes + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, + buf); /* disable clock to the MMCIF hardware block */ __raw_writel(__raw_readl(MSTPCR2) | 0x20000000, MSTPCR2); diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig new file mode 100644 index 000000000000..6cb327977d13 --- /dev/null +++ b/arch/sh/configs/apsh4a3a_defconfig @@ -0,0 +1,102 @@ +CONFIG_EXPERIMENTAL=y +CONFIG_SYSVIPC=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_SLAB=y +CONFIG_PROFILING=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_CPU_SUBTYPE_SH7785=y +CONFIG_MEMORY_START=0x0C000000 +CONFIG_FLATMEM_MANUAL=y +CONFIG_SH_STORE_QUEUES=y +CONFIG_SH_APSH4A3A=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_KEXEC=y +CONFIG_PREEMPT=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_INET_LRO is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_FW_LOADER is not set +CONFIG_MTD=y +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_PHYSMAP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_NETDEVICES=y +CONFIG_NET_ETHERNET=y +CONFIG_SMSC911X=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=6 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_SH7785FB=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_JFFS2_FS=y +CONFIG_CRAMFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +CONFIG_NFS_V4=y +CONFIG_CIFS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_DEBUG_INFO=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_FTRACE is not set +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig new file mode 100644 index 000000000000..e71a531f1e31 --- /dev/null +++ b/arch/sh/configs/apsh4ad0a_defconfig @@ -0,0 +1,133 @@ +CONFIG_EXPERIMENTAL=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_RCU_TRACE=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_CGROUP_NS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_MEM_RES_CTLR=y +CONFIG_BLK_CGROUP=y +CONFIG_NAMESPACES=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_PROFILING=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_CPU_SUBTYPE_SH7786=y +CONFIG_MEMORY_SIZE=0x10000000 +CONFIG_HUGETLB_PAGE_SIZE_1MB=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_SH_STORE_QUEUES=y +CONFIG_SH_APSH4AD0A=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_SH_CPU_FREQ=y +CONFIG_KEXEC=y +CONFIG_SECCOMP=y +CONFIG_PREEMPT=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_VERBOSE=y +CONFIG_PM_RUNTIME=y +CONFIG_CPU_IDLE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_NET_KEY=y +CONFIG_INET=y +# CONFIG_INET_LRO is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_FW_LOADER is not set +CONFIG_MTD=y +CONFIG_MTD_CFI=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_SCSI_MULTI_LUN=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +CONFIG_MDIO_BITBANG=y +CONFIG_NET_ETHERNET=y +CONFIG_SMSC911X=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=6 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +CONFIG_FB_SH7785FB=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_DEBUG=y +CONFIG_USB_MON=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_HUGETLBFS=y +CONFIG_JFFS2_FS=y +CONFIG_CRAMFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +CONFIG_NFS_V4=y +CONFIG_CIFS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_VM=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_DWARF_UNWINDER=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index 273f3fa198f7..5f7f667b9f3b 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -39,21 +39,15 @@ CONFIG_IPV6=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y -CONFIG_PHYLIB=y CONFIG_VITESSE_PHY=y -CONFIG_MDIO_BITBANG=y CONFIG_NET_ETHERNET=y -CONFIG_MII=y +CONFIG_SH_ETH=y # CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set # CONFIG_SERIO is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=3 CONFIG_SERIAL_SH_SCI_CONSOLE=y @@ -63,7 +57,6 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -CONFIG_INOTIFY=y CONFIG_ISO9660_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y @@ -76,10 +69,8 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y CONFIG_DEBUG_KERNEL=y -# CONFIG_DETECT_SOFTLOCKUP is not set # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_FTRACE is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/drivers/pci/fixups-landisk.c b/arch/sh/drivers/pci/fixups-landisk.c index bb1a6bb5149e..95c6e2d94a0a 100644 --- a/arch/sh/drivers/pci/fixups-landisk.c +++ b/arch/sh/drivers/pci/fixups-landisk.c @@ -1,9 +1,10 @@ /* - * arch/sh/drivers/pci/ops-landisk.c + * arch/sh/drivers/pci/fixups-landisk.c * * PCI initialization for the I-O DATA Device, Inc. LANDISK board * * Copyright (C) 2006 kogiidena + * Copyright (C) 2010 Nobuhiro Iwamatsu * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. @@ -15,6 +16,9 @@ #include #include "pci-sh4.h" +#define PCIMCR_MRSET_OFF 0xBFFFFFFF +#define PCIMCR_RFSH_OFF 0xFFFFFFFB + int pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin) { /* @@ -26,9 +30,29 @@ int pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin) int irq = ((slot + pin - 1) & 0x3) + 5; if ((slot | (pin - 1)) > 0x3) { - printk("PCI: Bad IRQ mapping request for slot %d pin %c\n", + printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n", slot, pin - 1 + 'A'); return -1; } return irq; } + +int pci_fixup_pcic(struct pci_channel *chan) +{ + unsigned long bcr1, mcr; + + bcr1 = __raw_readl(SH7751_BCR1); + bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ + pci_write_reg(chan, bcr1, SH4_PCIBCR1); + + mcr = __raw_readl(SH7751_MCR); + mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; + pci_write_reg(chan, mcr, SH4_PCIMCR); + + pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5); + pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6); + pci_write_reg(chan, 0x0c000000, SH4_PCILAR0); + pci_write_reg(chan, 0x00000000, SH4_PCILAR1); + + return 0; +} diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 89ab2c57a4c2..28c5aa58bb45 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -11,11 +11,6 @@ * * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers * automatically, there are also __raw versions, which do not. - * - * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for - * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice - * these have the same semantics as the __raw variants, and as such, all - * new code should be using the __raw versions. */ #include #include @@ -231,52 +226,6 @@ __BUILD_IOPORT_STRING(q, u64) #endif -/* - * Legacy SuperH on-chip I/O functions - * - * These are all deprecated, all new (and especially cross-platform) code - * should be using the __raw_xxx() routines directly. - */ -static inline u8 __deprecated ctrl_inb(unsigned long addr) -{ - return __raw_readb(addr); -} - -static inline u16 __deprecated ctrl_inw(unsigned long addr) -{ - return __raw_readw(addr); -} - -static inline u32 __deprecated ctrl_inl(unsigned long addr) -{ - return __raw_readl(addr); -} - -static inline u64 __deprecated ctrl_inq(unsigned long addr) -{ - return __raw_readq(addr); -} - -static inline void __deprecated ctrl_outb(u8 v, unsigned long addr) -{ - __raw_writeb(v, addr); -} - -static inline void __deprecated ctrl_outw(u16 v, unsigned long addr) -{ - __raw_writew(v, addr); -} - -static inline void __deprecated ctrl_outl(u32 v, unsigned long addr) -{ - __raw_writel(v, addr); -} - -static inline void __deprecated ctrl_outq(u64 v, unsigned long addr) -{ - __raw_writeq(v, addr); -} - #define IO_SPACE_LIMIT 0xffffffff /* synco on SH-4A, otherwise a nop */ @@ -341,7 +290,15 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) * mapping must be done by the PMB or by using page tables. */ if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { - if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE)) + u64 flags = pgprot_val(prot); + + /* + * Anything using the legacy PTEA space attributes needs + * to be kicked down to page table mappings. + */ + if (unlikely(flags & _PAGE_PCC_MASK)) + return NULL; + if (unlikely(flags & _PAGE_CACHABLE)) return (void __iomem *)P1SEGADDR(offset); return (void __iomem *)P2SEGADDR(offset); diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index dd5d6e5bf204..57c5c3d0f39f 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -31,6 +31,7 @@ struct sh_machine_vector { int (*mv_mode_pins)(void); void (*mv_mem_init)(void); + void (*mv_mem_reserve)(void); }; extern struct sh_machine_vector sh_mv; diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index 43528ec656ba..b799fe71114c 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -76,6 +76,10 @@ /* Wrapper for extended mode pgprot twiddling */ #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) +#ifdef CONFIG_X2TLB +#define _PAGE_PCC_MASK 0x00000000 /* No legacy PTEA support */ +#else + /* software: moves to PTEA.TC (Timing Control) */ #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ @@ -89,7 +93,8 @@ #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ -#ifndef CONFIG_X2TLB +#define _PAGE_PCC_MASK 0xe0000001 + /* copy the ptea attributes */ static inline unsigned long copy_ptea_attributes(unsigned long x) { @@ -231,13 +236,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) _PAGE_EXT_KERN_EXEC)) #define PAGE_KERNEL_PCC(slot, type) \ - __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_EXEC) \ - (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ - (type)) + __pgprot(0) #elif defined(CONFIG_MMU) /* SH-X TLB */ #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index c9e7cbc4768a..9c7bdfcaebbd 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -35,7 +35,7 @@ enum cpu_type { CPU_SH7723, CPU_SH7724, CPU_SH7757, CPU_SHX3, /* SH4AL-DSP types */ - CPU_SH7343, CPU_SH7722, CPU_SH7366, + CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372, /* SH-5 types */ CPU_SH5_101, CPU_SH5_103, diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index e3c73cdd8c90..900f8d72ffe2 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -194,15 +194,17 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4) + #define PREFETCH_STRIDE L1_CACHE_BYTES #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW -static inline void prefetch(void *x) + +static inline void prefetch(const void *x) { __builtin_prefetch(x, 0, 3); } -static inline void prefetchw(void *x) +static inline void prefetchw(const void *x) { __builtin_prefetch(x, 1, 3); } diff --git a/arch/sh/include/mach-landisk/mach/iodata_landisk.h b/arch/sh/include/mach-landisk/mach/iodata_landisk.h index 6fb04ab38b9f..f432773a9571 100644 --- a/arch/sh/include/mach-landisk/mach/iodata_landisk.h +++ b/arch/sh/include/mach-landisk/mach/iodata_landisk.h @@ -2,7 +2,7 @@ #define __ASM_SH_IODATA_LANDISK_H /* - * linux/include/asm-sh/landisk/iodata_landisk.h + * arch/sh/include/mach-landisk/mach/iodata_landisk.h * * Copyright (C) 2000 Atom Create Engineering Co., Ltd. * @@ -27,7 +27,7 @@ #define IRQ_PCIINTA 5 /* PCI INTA IRQ */ #define IRQ_PCIINTB 6 /* PCI INTB IRQ */ -#define IRQ_PCIINDC 7 /* PCI INTC IRQ */ +#define IRQ_PCIINTC 7 /* PCI INTC IRQ */ #define IRQ_PCIINTD 8 /* PCI INTD IRQ */ #define IRQ_ATA 9 /* ATA IRQ */ #define IRQ_FATA 10 /* FATA IRQ */ @@ -35,6 +35,8 @@ #define IRQ_BUTTON 12 /* USL-5P Button IRQ */ #define IRQ_FAULT 13 /* USL-5P Fault IRQ */ +void init_landisk_IRQ(void); + #define __IO_PREFIX landisk #include diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c index e80a936f409a..f47be8727b3b 100644 --- a/arch/sh/kernel/cpu/proc.c +++ b/arch/sh/kernel/cpu/proc.c @@ -25,7 +25,7 @@ static const char *cpu_name[] = { [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103", [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723", [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724", - [CPU_SH_NONE] = "Unknown" + [CPU_SH7372] = "SH7372", [CPU_SH_NONE] = "Unknown" }; const char *get_cpu_subtype(struct sh_cpuinfo *c) diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index c3638516bffc..0f8befccf9fa 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -62,6 +62,8 @@ static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL, static struct plat_sci_port scif0_platform_data = { .mapbase = 0xf8400000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 88, 88, 88, 88 }, }; @@ -77,6 +79,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xf8410000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 92, 92, 92, 92 }, }; @@ -92,6 +96,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xf8420000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 96, 96, 96, 96 }, }; diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c index 6c96ea02bf8d..949bf2bac28c 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c @@ -201,6 +201,8 @@ static struct platform_device mtu2_2_device = { static struct plat_sci_port scif0_platform_data = { .mapbase = 0xff804000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 220, 220, 220, 220 }, }; diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c index d08bf4c07d60..9df558dcdb86 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c @@ -180,6 +180,8 @@ static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups, static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 180, 180, 180, 180 } }; @@ -195,6 +197,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 184, 184, 184, 184 } }; @@ -210,6 +214,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 188, 188, 188, 188 } }; @@ -225,6 +231,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 192, 192, 192, 192 } }; @@ -240,6 +248,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xfffea000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 196, 196, 196, 196 } }; @@ -255,6 +265,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xfffea800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 200, 200, 200, 200 } }; @@ -270,6 +282,8 @@ static struct platform_device scif5_device = { static struct plat_sci_port scif6_platform_data = { .mapbase = 0xfffeb000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 204, 204, 204, 204 } }; @@ -285,6 +299,8 @@ static struct platform_device scif6_device = { static struct plat_sci_port scif7_platform_data = { .mapbase = 0xfffeb800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 208, 208, 208, 208 } }; diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index 832f401b5860..a43124e608c3 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c @@ -176,6 +176,8 @@ static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups, static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 192, 192, 192, 192 }, }; @@ -191,6 +193,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 196, 196, 196, 196 }, }; @@ -206,6 +210,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 200, 200, 200, 200 }, }; @@ -221,6 +227,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 204, 204, 204, 204 }, }; diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index dc47b04e1049..5d14f849aea3 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c @@ -136,6 +136,8 @@ static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups, static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 240, 240, 240, 240 }, }; @@ -151,6 +153,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 244, 244, 244, 244 }, }; @@ -166,6 +170,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 248, 248, 248, 248 }, }; @@ -181,6 +187,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 252, 252, 252, 252 }, }; diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index baadd7f54d94..cd2e702feb7e 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -70,6 +70,9 @@ static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL, static struct plat_sci_port scif0_platform_data = { .mapbase = 0xa4410000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | + SCSCR_RE | SCSCR_CKE1 | SCSCR_CKE0, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 56, 56, 56 }, }; @@ -85,6 +88,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xa4400000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 52, 52, 52 }, }; diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 3cf8c8ef7b32..4551ad647c2c 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -109,6 +109,8 @@ static struct platform_device rtc_device = { static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffffe80, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCI, .irqs = { 23, 23, 23, 0 }, }; @@ -126,6 +128,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xa4000150, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 56, 56, 56, 56 }, }; @@ -143,6 +147,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xa4000140, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_IRDA, .irqs = { 52, 52, 52, 52 }, }; diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index b0c2fb4ab479..78f6b01d42c3 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c @@ -99,6 +99,9 @@ static struct platform_device rtc_device = { static struct plat_sci_port scif0_platform_data = { .mapbase = 0xa4400000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE | + SCSCR_CKE1 | SCSCR_CKE0, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 52, 52, 52, 52 }, }; @@ -114,6 +117,9 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xa4410000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE | + SCSCR_CKE1 | SCSCR_CKE0, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 56, 56, 56, 56 }, }; diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index 24b17135d5d2..365b94a6fcb7 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -1,5 +1,5 @@ /* - * SH7720 Setup + * Setup code for SH7720, SH7721. * * Copyright (C) 2007 Markus Brunner, Mark Jonas * Copyright (C) 2009 Paul Mundt @@ -51,6 +51,8 @@ static struct platform_device rtc_device = { static struct plat_sci_port scif0_platform_data = { .mapbase = 0xa4430000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; @@ -66,6 +68,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xa4438000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, }; diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index b93458f33b74..971cf0fce4f5 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -151,8 +151,14 @@ void __cpuinit cpu_probe(void) boot_cpu_data.flags |= CPU_HAS_L2_CACHE; break; case 0x10: + case 0x11: boot_cpu_data.type = CPU_SH7757; break; + case 0xd0: + case 0x40: /* yon-ten-go */ + boot_cpu_data.type = CPU_SH7372; + break; + } break; case 0x4000: /* 1st cut */ diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index e916b18e1f7c..5b2833159b7d 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c @@ -18,6 +18,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe80000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 40, 41, 43, 42 }, }; diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 911d196e86b5..c2b0aaaedcae 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -14,6 +14,7 @@ #include #include #include +#include static struct resource rtc_resources[] = { [0] = { @@ -35,33 +36,37 @@ static struct platform_device rtc_device = { .resource = rtc_resources, }; -static struct plat_sci_port scif0_platform_data = { +static struct plat_sci_port sci_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCI, .irqs = { 23, 23, 23, 0 }, }; -static struct platform_device scif0_device = { +static struct platform_device sci_device = { .name = "sh-sci", .id = 0, .dev = { - .platform_data = &scif0_platform_data, + .platform_data = &sci_platform_data, }, }; -static struct plat_sci_port scif1_platform_data = { +static struct plat_sci_port scif_platform_data = { .mapbase = 0xffe80000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, }; -static struct platform_device scif1_device = { +static struct platform_device scif_device = { .name = "sh-sci", .id = 1, .dev = { - .platform_data = &scif1_platform_data, + .platform_data = &scif_platform_data, }, }; @@ -210,8 +215,6 @@ static struct platform_device tmu4_device = { #endif static struct platform_device *sh7750_devices[] __initdata = { - &scif0_device, - &scif1_device, &rtc_device, &tmu0_device, &tmu1_device, @@ -226,14 +229,19 @@ static struct platform_device *sh7750_devices[] __initdata = { static int __init sh7750_devices_setup(void) { + if (mach_is_rts7751r2d()) { + platform_register_device(&scif_device); + } else { + platform_register_device(&sci_device); + platform_register_device(&scif_device); + } + return platform_add_devices(sh7750_devices, ARRAY_SIZE(sh7750_devices)); } arch_initcall(sh7750_devices_setup); static struct platform_device *sh7750_early_devices[] __initdata = { - &scif0_device, - &scif1_device, &tmu0_device, &tmu1_device, &tmu2_device, @@ -247,6 +255,14 @@ static struct platform_device *sh7750_early_devices[] __initdata = { void __init plat_early_device_setup(void) { + if (mach_is_rts7751r2d()) { + scif_platform_data.scscr |= SCSCR_CKE1; + early_platform_add_devices(&scif_device, 1); + } else { + early_platform_add_devices(&sci_device, 1); + early_platform_add_devices(&scif_device, 1); + } + early_platform_add_devices(sh7750_early_devices, ARRAY_SIZE(sh7750_early_devices)); } diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 48ea8fe85dc5..78bbf232e391 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -129,6 +129,8 @@ static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups, static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfe600000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 52, 53, 55, 54 }, }; @@ -145,6 +147,8 @@ static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfe610000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .irqs = { 72, 73, 75, 74 }, }; @@ -159,6 +163,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfe620000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 76, 77, 79, 78 }, }; @@ -174,6 +180,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfe480000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCI, .irqs = { 80, 81, 82, 0 }, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 3681cafdb4af..1b8848317e9c 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c @@ -19,6 +19,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; @@ -34,6 +36,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, }; @@ -49,6 +53,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, }; @@ -64,6 +70,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xffe30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 83, 83, 83, 83 }, }; @@ -360,6 +368,8 @@ void __init plat_early_device_setup(void) enum { UNUSED = 0, + ENABLED, + DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, @@ -375,15 +385,13 @@ enum { I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, - IRDA, - SDHI0, SDHI1, SDHI2, SDHI3, - CMT, TSIF, SIU, + IRDA, SDHI, CMT, TSIF, SIU, TMU0, TMU1, TMU2, JPU, LCDC, /* interrupt groups */ - DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, SDHI, USB, + DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB, }; static struct intc_vect vectors[] __initdata = { @@ -412,8 +420,8 @@ static struct intc_vect vectors[] __initdata = { INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20), INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60), - INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), - INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), + INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), + INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), INTC_VECT(SIU, 0xf80), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), @@ -431,7 +439,6 @@ static struct intc_group groups[] __initdata = { INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI), - INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3), INTC_GROUP(USB, USBI0, USBI1), }; @@ -452,7 +459,7 @@ static struct intc_mask_reg mask_registers[] __initdata = { { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ - { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } }, + { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USBI1, USBI0 } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ @@ -488,9 +495,13 @@ static struct intc_mask_reg ack_registers[] __initdata = { { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; -static DECLARE_INTC_DESC_ACK(intc_desc, "sh7343", vectors, groups, - mask_registers, prio_registers, sense_registers, - ack_registers); +static struct intc_desc intc_desc __initdata = { + .name = "sh7343", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(vectors, groups, mask_registers, + prio_registers, sense_registers, ack_registers), +}; void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index 8dab9e1bbd89..82616af64d62 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -21,6 +21,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; @@ -319,6 +321,8 @@ void __init plat_early_device_setup(void) enum { UNUSED=0, + ENABLED, + DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, @@ -332,14 +336,13 @@ enum { DENC, MSIOF, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, - SDHI0, SDHI1, SDHI2, SDHI3, - CMT, TSIF, SIU, + SDHI, CMT, TSIF, SIU, TMU0, TMU1, TMU2, VEU2, LCDC, /* interrupt groups */ - DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI, + DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, }; static struct intc_vect vectors[] __initdata = { @@ -364,8 +367,8 @@ static struct intc_vect vectors[] __initdata = { INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), - INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), - INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), + INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), + INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), INTC_VECT(SIU, 0xf80), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), @@ -381,7 +384,6 @@ static struct intc_group groups[] __initdata = { INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), - INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3), }; static struct intc_mask_reg mask_registers[] __initdata = { @@ -403,7 +405,7 @@ static struct intc_mask_reg mask_registers[] __initdata = { { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ - { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } }, + { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB, } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ @@ -441,9 +443,13 @@ static struct intc_mask_reg ack_registers[] __initdata = { { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; -static DECLARE_INTC_DESC_ACK(intc_desc, "sh7366", vectors, groups, - mask_registers, prio_registers, sense_registers, - ack_registers); +static struct intc_desc intc_desc __initdata = { + .name = "sh7366", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(vectors, groups, mask_registers, + prio_registers, sense_registers, ack_registers), +}; void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index d551ed8dea95..5813d8023619 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -181,6 +181,8 @@ struct platform_device dma_device = { static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; @@ -196,6 +198,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, }; @@ -211,6 +215,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, }; @@ -699,7 +705,7 @@ static struct intc_mask_reg mask_registers[] __initdata = { { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ - { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } }, + { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 0eadefdbbba1..072382280f96 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -24,6 +24,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; @@ -39,6 +41,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, }; @@ -54,6 +58,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, }; @@ -69,6 +75,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xa4e30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 56, 56, 56, 56 }, }; @@ -84,6 +92,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xa4e40000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 88, 88, 88, 88 }, }; @@ -99,6 +109,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xa4e50000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 109, 109, 109, 109 }, }; @@ -719,7 +731,7 @@ static struct intc_group groups[] __initdata = { static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, - 0, DISABLED, ENABLED, ENABLED } }, + 0, ENABLED, ENABLED, ENABLED } }, { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } }, { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ @@ -736,7 +748,7 @@ static struct intc_mask_reg mask_registers[] __initdata = { { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ - { 0, DISABLED, ENABLED, ENABLED, + { 0, ENABLED, ENABLED, ENABLED, 0, 0, SCIFA_SCIFA2, SIU_SIUI } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } }, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index 828c9657eb52..0333fe9e3881 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -257,6 +257,8 @@ static struct platform_device dma1_device = { static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; @@ -272,6 +274,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, }; @@ -287,6 +291,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, }; @@ -302,6 +308,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xa4e30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 56, 56, 56, 56 }, }; @@ -317,6 +325,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xa4e40000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 88, 88, 88, 88 }, }; @@ -332,6 +342,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xa4e50000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE, + .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 109, 109, 109, 109 }, }; @@ -1144,7 +1156,7 @@ static struct intc_group groups[] __initdata = { static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, - 0, DISABLED, ENABLED, ENABLED } }, + 0, ENABLED, ENABLED, ENABLED } }, { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, @@ -1166,7 +1178,7 @@ static struct intc_mask_reg mask_registers[] __initdata = { { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ - { DISABLED, DISABLED, ENABLED, ENABLED, + { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, SCIFA5, FSI } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index 749c6388d5a5..9c1de2633ac3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c @@ -20,6 +20,8 @@ static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfe4b0000, /* SCIF2 */ .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, }; @@ -35,6 +37,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfe4c0000, /* SCIF3 */ .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 76, 76, 76, 76 }, }; @@ -50,6 +54,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xfe4d0000, /* SCIF4 */ .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 104, 104, 104, 104 }, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index 5b5f6b005fc5..593eca6509b5 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -19,6 +19,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, }; @@ -34,6 +36,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe08000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 76, 76, 76, 76 }, }; @@ -49,6 +53,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 104, 104, 104, 104 }, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 7270d7fd6761..2c6aa22cf5f6 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c @@ -17,6 +17,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xff923000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 61, 61, 61, 61 }, }; @@ -32,6 +34,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xff924000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 62, 62, 62, 62 }, }; @@ -47,6 +51,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xff925000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 63, 63, 63, 63 }, }; @@ -62,6 +68,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xff926000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 64, 64, 64, 64 }, }; @@ -77,6 +85,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xff927000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 65, 65, 65, 65 }, }; @@ -92,6 +102,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xff928000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 66, 66, 66, 66 }, }; @@ -107,6 +119,8 @@ static struct platform_device scif5_device = { static struct plat_sci_port scif6_platform_data = { .mapbase = 0xff929000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 67, 67, 67, 67 }, }; @@ -122,6 +136,8 @@ static struct platform_device scif6_device = { static struct plat_sci_port scif7_platform_data = { .mapbase = 0xff92a000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 68, 68, 68, 68 }, }; @@ -137,6 +153,8 @@ static struct platform_device scif7_device = { static struct plat_sci_port scif8_platform_data = { .mapbase = 0xff92b000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 69, 69, 69, 69 }, }; @@ -152,6 +170,8 @@ static struct platform_device scif8_device = { static struct plat_sci_port scif9_platform_data = { .mapbase = 0xff92c000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 70, 70, 70, 70 }, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 0f414864f76b..08add7fa6849 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -20,6 +20,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, }; @@ -35,6 +37,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 76, 76, 76, 76 }, }; @@ -379,6 +383,7 @@ static int __init sh7780_devices_setup(void) ARRAY_SIZE(sh7780_devices)); } arch_initcall(sh7780_devices_setup); + static struct platform_device *sh7780_early_devices[] __initdata = { &scif0_device, &scif1_device, @@ -392,6 +397,13 @@ static struct platform_device *sh7780_early_devices[] __initdata = { void __init plat_early_device_setup(void) { + if (mach_is_sh2007()) { + scif0_platform_data.scscr &= ~SCSCR_CKE1; + scif0_platform_data.scbrr_algo_id = SCBRR_ALGO_2; + scif1_platform_data.scscr &= ~SCSCR_CKE1; + scif1_platform_data.scbrr_algo_id = SCBRR_ALGO_2; + } + early_platform_add_devices(sh7780_early_devices, ARRAY_SIZE(sh7780_early_devices)); } diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index c9a572bc6dc8..18d8fc136fb2 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -23,6 +23,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffea0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, }; @@ -38,6 +40,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffeb0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 44, 44, 44, 44 }, }; @@ -53,6 +57,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffec0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 60, 60, 60, 60 }, }; @@ -68,6 +74,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xffed0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 61, 61, 61, 61 }, }; @@ -83,6 +91,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xffee0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 62, 62, 62, 62 }, }; @@ -98,6 +108,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xffef0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 63, 63, 63, 63 }, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index 0170dbda1d00..1656b8c91faf 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -29,6 +29,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffea0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 40, 41, 43, 42 }, }; @@ -47,6 +49,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffeb0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 44, 44, 44, 44 }, }; @@ -62,6 +66,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffec0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 50, 50, 50, 50 }, }; @@ -77,6 +83,8 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xffed0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 51, 51, 51, 51 }, }; @@ -92,6 +100,8 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xffee0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 52, 52, 52, 52 }, }; @@ -107,6 +117,8 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xffef0000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, + .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 53, 53, 53, 53 }, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 013f0b144489..bb208806dc1a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c @@ -29,6 +29,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffc30000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 40, 41, 43, 42 }, }; @@ -44,6 +46,8 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffc40000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 44, 45, 47, 46 }, }; @@ -59,6 +63,8 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffc60000, .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 52, 53, 55, 54 }, }; diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c index d910666142b1..18419f1de963 100644 --- a/arch/sh/kernel/cpu/sh5/setup-sh5.c +++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c @@ -19,6 +19,8 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 39, 40, 42, 0 }, }; diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 83972aa319c2..c19e2a940e3f 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -81,7 +81,6 @@ void sh_mobile_setup_cpuidle(void) state->target_residency = 1 * 2; state->power_usage = 3; state->flags = 0; - state->flags |= CPUIDLE_FLAG_SHALLOW; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index e55968712706..a6f95ae4aae7 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops sh_pm_ops = { +static const struct platform_suspend_ops sh_pm_ops = { .enter = sh_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index eb4cc4ec7952..d1bffbcd9d52 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -568,7 +568,7 @@ static void sh5_flush_dcache_page(void *page) } /* - * Flush the range [start,end] of kernel virtual adddress space from + * Flush the range [start,end] of kernel virtual address space from * the I-cache. The corresponding range must be purged from the * D-cache also because the SH-5 doesn't have cache snooping between * the caches. The addresses will be visible through the superpage diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 9163db3e8d15..d7762349ea48 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -35,7 +35,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_map(mm, NULL, pmd, addr); } } diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 3385b28acaac..0d3f912e3334 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -2,7 +2,7 @@ * linux/arch/sh/mm/init.c * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002 - 2010 Paul Mundt + * Copyright (C) 2002 - 2011 Paul Mundt * * Based on linux/arch/i386/mm/init.c: * Copyright (C) 1995 Linus Torvalds @@ -325,11 +325,17 @@ void __init paging_init(void) int nid; memblock_init(); - sh_mv.mv_mem_init(); early_reserve_mem(); + /* + * Once the early reservations are out of the way, give the + * platforms a chance to kick out some memory. + */ + if (sh_mv.mv_mem_reserve) + sh_mv.mv_mem_reserve(); + memblock_enforce_memory_limit(memory_limit); memblock_analyze(); diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index 0e68465e7b50..6dd56c4d0054 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -9,6 +9,7 @@ SE SH_SOLUTION_ENGINE HIGHLANDER SH_HIGHLANDER RTS7751R2D SH_RTS7751R2D RSK SH_RSK +ALPHA_BOARD SH_ALPHA_BOARD # # List of companion chips / MFDs. @@ -61,3 +62,5 @@ ESPT SH_ESPT POLARIS SH_POLARIS KFR2R09 SH_KFR2R09 ECOVEC SH_ECOVEC +APSH4A3A SH_APSH4A3A +APSH4AD0A SH_APSH4AD0A diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index ee3c7dde8d9f..8d348c474a2f 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -23,17 +23,11 @@ static void *module_map(unsigned long size) { - struct vm_struct *area; - - size = PAGE_ALIGN(size); - if (!size || size > MODULES_LEN) - return NULL; - - area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); - if (!area) + if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; - - return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL, -1, + __builtin_return_address(0)); } static char *dot2underscore(char *name) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 42ad2ba85010..1e9770936c3b 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -622,7 +622,7 @@ static const char CHAFSR_PERR_msg[] = static const char CHAFSR_IERR_msg[] = "Internal processor error"; static const char CHAFSR_ISAP_msg[] = - "System request parity error on incoming addresss"; + "System request parity error on incoming address"; static const char CHAFSR_UCU_msg[] = "Uncorrectable E-cache ECC error for ifetch/data"; static const char CHAFSR_UCC_msg[] = diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c index 5edcac184eaf..e6067b75f11c 100644 --- a/arch/sparc/mm/generic_32.c +++ b/arch/sparc/mm/generic_32.c @@ -50,7 +50,7 @@ static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned end = PGDIR_SIZE; offset -= address; do { - pte_t * pte = pte_alloc_map(mm, pmd, address); + pte_t *pte = pte_alloc_map(mm, NULL, pmd, address); if (!pte) return -ENOMEM; io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c index 04f2bf4cd571..3cb00dfd4bd6 100644 --- a/arch/sparc/mm/generic_64.c +++ b/arch/sparc/mm/generic_64.c @@ -92,7 +92,7 @@ static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned end = PGDIR_SIZE; offset -= address; do { - pte_t * pte = pte_alloc_map(mm, pmd, address); + pte_t *pte = pte_alloc_map(mm, NULL, pmd, address); if (!pte) return -ENOMEM; io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 5fdddf134caa..f4e97646ce23 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -214,7 +214,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_map(mm, NULL, pmd, addr); } return pte; } diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um index 50d6aa20c353..f8d1d0d47fe6 100644 --- a/arch/um/Kconfig.um +++ b/arch/um/Kconfig.um @@ -131,7 +131,7 @@ config NR_CPUS config HIGHMEM bool "Highmem support (EXPERIMENTAL)" - depends on !64BIT && EXPERIMENTAL + depends on !64BIT && BROKEN default n help This was used to allow UML to run with big amounts of memory. diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 1664cce7b0ac..050e4ddbbb65 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -821,12 +821,12 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, static void unregister_winch(struct tty_struct *tty) { - struct list_head *ele; + struct list_head *ele, *next; struct winch *winch; spin_lock(&winch_handler_lock); - list_for_each(ele, &winch_handlers) { + list_for_each_safe(ele, next, &winch_handlers) { winch = list_entry(ele, struct winch, list); if (winch->tty == tty) { free_winch(winch, 1); diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c index 8501e7d0015c..7e0619c2c2c6 100644 --- a/arch/um/drivers/mmapper_kern.c +++ b/arch/um/drivers/mmapper_kern.c @@ -37,13 +37,7 @@ static ssize_t mmapper_write(struct file *file, const char __user *buf, if (*ppos > mmapper_size) return -EINVAL; - if (count > mmapper_size - *ppos) - count = mmapper_size - *ppos; - - if (copy_from_user(&v_buf[*ppos], buf, count)) - return -EFAULT; - - return count; + return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count); } static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -137,3 +131,4 @@ module_exit(mmapper_exit); MODULE_AUTHOR("Greg Lonnon "); MODULE_DESCRIPTION("DSPLinux simulator mmapper driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 3d099f974785..1aee587e9c5d 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -31,7 +31,7 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, if (!pmd) goto out_pmd; - pte = pte_alloc_map(mm, pmd, proc); + pte = pte_alloc_map(mm, NULL, pmd, proc); if (!pte) goto out_pte; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b6fccb07123e..47ae4a751a59 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -51,6 +51,7 @@ config X86 select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA + select HAVE_KERNEL_XZ select HAVE_KERNEL_LZO select HAVE_HW_BREAKPOINT select HAVE_MIXED_BREAKPOINTS_REGS @@ -65,6 +66,7 @@ config X86 select HAVE_SPARSE_IRQ select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP + select USE_GENERIC_SMP_HELPERS if SMP config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) @@ -203,10 +205,6 @@ config HAVE_INTEL_TXT def_bool y depends on EXPERIMENTAL && DMAR && ACPI -config USE_GENERIC_SMP_HELPERS - def_bool y - depends on SMP - config X86_32_SMP def_bool y depends on X86_32 && SMP @@ -1936,13 +1934,19 @@ config PCI_MMCONFIG depends on X86_64 && PCI && ACPI config PCI_CNB20LE_QUIRK - bool "Read CNB20LE Host Bridge Windows" - depends on PCI + bool "Read CNB20LE Host Bridge Windows" if EMBEDDED + default n + depends on PCI && EXPERIMENTAL help Read the PCI windows out of the CNB20LE host bridge. This allows PCI hotplug to work on systems with the CNB20LE chipset which do not have ACPI. + There's no public spec for this chipset, and this functionality + is known to be incomplete. + + You should say N unless you know you need this. + config DMAR bool "Support for DMA Remapping Devices (EXPERIMENTAL)" depends on PCI_MSI && ACPI && EXPERIMENTAL @@ -2070,7 +2074,7 @@ config OLPC config OLPC_XO1 tristate "OLPC XO-1 support" - depends on OLPC && PCI + depends on OLPC && MFD_CS5535 ---help--- Add support for non-essential features of the OLPC XO-1 laptop. @@ -2078,11 +2082,17 @@ config OLPC_OPENFIRMWARE bool "Support for OLPC's Open Firmware" depends on !X86_64 && !X86_PAE default n + select OF help This option adds support for the implementation of Open Firmware that is used on the OLPC XO-1 Children's Machine. If unsure, say N here. +config OLPC_OPENFIRMWARE_DT + bool + default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE + select OF_PROMTREE + endif # X86_32 config AMD_NB diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 0c229551eead..09664efb9cee 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o +targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -49,12 +49,15 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE $(call if_changed,bzip2) $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzma) +$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE + $(call if_changed,xzkern) $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzo) suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 suffix-$(CONFIG_KERNEL_LZMA) := lzma +suffix-$(CONFIG_KERNEL_XZ) := xz suffix-$(CONFIG_KERNEL_LZO) := lzo quiet_cmd_mkpiggy = MKPIGGY $@ diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 325c05294fc4..3a19d04cebeb 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -139,6 +139,10 @@ static int lines, cols; #include "../../../../lib/decompress_unlzma.c" #endif +#ifdef CONFIG_KERNEL_XZ +#include "../../../../lib/decompress_unxz.c" +#endif + #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 5c228129d175..646aa78ba5fd 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -74,7 +74,7 @@ int main(int argc, char *argv[]) offs = (olen > ilen) ? olen - ilen : 0; offs += olen >> 12; /* Add 8 bytes for each 32K block */ - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ + offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ printf(".section \".rodata..compressed\",\"a\",@progbits\n"); diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index ff16756a51c1..8fe2a4966b7a 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -9,6 +9,20 @@ * Vinodh Gopal * Kahraman Akdemir * + * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD + * interface for 64-bit kernels. + * Authors: Erdinc Ozturk (erdinc.ozturk@intel.com) + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Adrian Hoban + * James Guilford (james.guilford@intel.com) + * Gabriele Paoloni + * Tadeusz Struk (tadeusz.struk@intel.com) + * Wajdi Feghali (wajdi.k.feghali@intel.com) + * Copyright (c) 2010, Intel Corporation. + * + * Ported x86_64 version to x86: + * Author: Mathias Krause + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -18,8 +32,62 @@ #include #include +#ifdef __x86_64__ +.data +POLY: .octa 0xC2000000000000000000000000000001 +TWOONE: .octa 0x00000001000000000000000000000001 + +# order of these constants should not change. +# more specifically, ALL_F should follow SHIFT_MASK, +# and ZERO should follow ALL_F + +SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F +MASK1: .octa 0x0000000000000000ffffffffffffffff +MASK2: .octa 0xffffffffffffffff0000000000000000 +SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 +ALL_F: .octa 0xffffffffffffffffffffffffffffffff +ZERO: .octa 0x00000000000000000000000000000000 +ONE: .octa 0x00000000000000000000000000000001 +F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0 +dec: .octa 0x1 +enc: .octa 0x2 + + .text + +#define STACK_OFFSET 8*3 +#define HashKey 16*0 // store HashKey <<1 mod poly here +#define HashKey_2 16*1 // store HashKey^2 <<1 mod poly here +#define HashKey_3 16*2 // store HashKey^3 <<1 mod poly here +#define HashKey_4 16*3 // store HashKey^4 <<1 mod poly here +#define HashKey_k 16*4 // store XOR of High 64 bits and Low 64 + // bits of HashKey <<1 mod poly here + //(for Karatsuba purposes) +#define HashKey_2_k 16*5 // store XOR of High 64 bits and Low 64 + // bits of HashKey^2 <<1 mod poly here + // (for Karatsuba purposes) +#define HashKey_3_k 16*6 // store XOR of High 64 bits and Low 64 + // bits of HashKey^3 <<1 mod poly here + // (for Karatsuba purposes) +#define HashKey_4_k 16*7 // store XOR of High 64 bits and Low 64 + // bits of HashKey^4 <<1 mod poly here + // (for Karatsuba purposes) +#define VARIABLE_OFFSET 16*8 + +#define arg1 rdi +#define arg2 rsi +#define arg3 rdx +#define arg4 rcx +#define arg5 r8 +#define arg6 r9 +#define arg7 STACK_OFFSET+8(%r14) +#define arg8 STACK_OFFSET+16(%r14) +#define arg9 STACK_OFFSET+24(%r14) +#define arg10 STACK_OFFSET+32(%r14) +#endif + + #define STATE1 %xmm0 #define STATE2 %xmm4 #define STATE3 %xmm5 @@ -32,12 +100,16 @@ #define IN IN1 #define KEY %xmm2 #define IV %xmm3 + #define BSWAP_MASK %xmm10 #define CTR %xmm11 #define INC %xmm12 +#ifdef __x86_64__ +#define AREG %rax #define KEYP %rdi #define OUTP %rsi +#define UKEYP OUTP #define INP %rdx #define LEN %rcx #define IVP %r8 @@ -46,6 +118,1588 @@ #define TKEYP T1 #define T2 %r11 #define TCTR_LOW T2 +#else +#define AREG %eax +#define KEYP %edi +#define OUTP AREG +#define UKEYP OUTP +#define INP %edx +#define LEN %esi +#define IVP %ebp +#define KLEN %ebx +#define T1 %ecx +#define TKEYP T1 +#endif + + +#ifdef __x86_64__ +/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) +* +* +* Input: A and B (128-bits each, bit-reflected) +* Output: C = A*B*x mod poly, (i.e. >>1 ) +* To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input +* GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly. +* +*/ +.macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5 + movdqa \GH, \TMP1 + pshufd $78, \GH, \TMP2 + pshufd $78, \HK, \TMP3 + pxor \GH, \TMP2 # TMP2 = a1+a0 + pxor \HK, \TMP3 # TMP3 = b1+b0 + PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1 + PCLMULQDQ 0x00, \HK, \GH # GH = a0*b0 + PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) + pxor \GH, \TMP2 + pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0) + movdqa \TMP2, \TMP3 + pslldq $8, \TMP3 # left shift TMP3 2 DWs + psrldq $8, \TMP2 # right shift TMP2 2 DWs + pxor \TMP3, \GH + pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK + + # first phase of the reduction + + movdqa \GH, \TMP2 + movdqa \GH, \TMP3 + movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4 + # in in order to perform + # independent shifts + pslld $31, \TMP2 # packed right shift <<31 + pslld $30, \TMP3 # packed right shift <<30 + pslld $25, \TMP4 # packed right shift <<25 + pxor \TMP3, \TMP2 # xor the shifted versions + pxor \TMP4, \TMP2 + movdqa \TMP2, \TMP5 + psrldq $4, \TMP5 # right shift TMP5 1 DW + pslldq $12, \TMP2 # left shift TMP2 3 DWs + pxor \TMP2, \GH + + # second phase of the reduction + + movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4 + # in in order to perform + # independent shifts + movdqa \GH,\TMP3 + movdqa \GH,\TMP4 + psrld $1,\TMP2 # packed left shift >>1 + psrld $2,\TMP3 # packed left shift >>2 + psrld $7,\TMP4 # packed left shift >>7 + pxor \TMP3,\TMP2 # xor the shifted versions + pxor \TMP4,\TMP2 + pxor \TMP5, \TMP2 + pxor \TMP2, \GH + pxor \TMP1, \GH # result is in TMP1 +.endm + +/* +* if a = number of total plaintext bytes +* b = floor(a/16) +* num_initial_blocks = b mod 4 +* encrypt the initial num_initial_blocks blocks and apply ghash on +* the ciphertext +* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers +* are clobbered +* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified +*/ + + +.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ +XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation + mov arg7, %r10 # %r10 = AAD + mov arg8, %r12 # %r12 = aadLen + mov %r12, %r11 + pxor %xmm\i, %xmm\i +_get_AAD_loop\num_initial_blocks\operation: + movd (%r10), \TMP1 + pslldq $12, \TMP1 + psrldq $4, %xmm\i + pxor \TMP1, %xmm\i + add $4, %r10 + sub $4, %r12 + jne _get_AAD_loop\num_initial_blocks\operation + cmp $16, %r11 + je _get_AAD_loop2_done\num_initial_blocks\operation + mov $16, %r12 +_get_AAD_loop2\num_initial_blocks\operation: + psrldq $4, %xmm\i + sub $4, %r12 + cmp %r11, %r12 + jne _get_AAD_loop2\num_initial_blocks\operation +_get_AAD_loop2_done\num_initial_blocks\operation: + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data + + xor %r11, %r11 # initialise the data pointer offset as zero + + # start AES for num_initial_blocks blocks + + mov %arg5, %rax # %rax = *Y0 + movdqu (%rax), \XMM0 # XMM0 = Y0 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM0 + +.if (\i == 5) || (\i == 6) || (\i == 7) +.irpc index, \i_seq + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, %xmm\index + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap + +.endr +.irpc index, \i_seq + pxor 16*0(%arg1), %xmm\index +.endr +.irpc index, \i_seq + movaps 0x10(%rdi), \TMP1 + AESENC \TMP1, %xmm\index # Round 1 +.endr +.irpc index, \i_seq + movaps 0x20(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x30(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x40(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x50(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x60(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x70(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x80(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x90(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0xa0(%arg1), \TMP1 + AESENCLAST \TMP1, %xmm\index # Round 10 +.endr +.irpc index, \i_seq + movdqu (%arg3 , %r11, 1), \TMP1 + pxor \TMP1, %xmm\index + movdqu %xmm\index, (%arg2 , %r11, 1) + # write back plaintext/ciphertext for num_initial_blocks + add $16, %r11 + + movdqa \TMP1, %xmm\index + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, %xmm\index + + # prepare plaintext/ciphertext for GHASH computation +.endr +.endif + GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + # apply GHASH on num_initial_blocks blocks + +.if \i == 5 + pxor %xmm5, %xmm6 + GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + pxor %xmm6, %xmm7 + GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + pxor %xmm7, %xmm8 + GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 +.elseif \i == 6 + pxor %xmm6, %xmm7 + GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + pxor %xmm7, %xmm8 + GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 +.elseif \i == 7 + pxor %xmm7, %xmm8 + GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 +.endif + cmp $64, %r13 + jl _initial_blocks_done\num_initial_blocks\operation + # no need for precomputed values +/* +* +* Precomputations for HashKey parallel with encryption of first 4 blocks. +* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i +*/ + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM1 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM2 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap + + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM3 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap + + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM4 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + + pxor 16*0(%arg1), \XMM1 + pxor 16*0(%arg1), \XMM2 + pxor 16*0(%arg1), \XMM3 + pxor 16*0(%arg1), \XMM4 + movdqa \TMP3, \TMP5 + pshufd $78, \TMP3, \TMP1 + pxor \TMP3, \TMP1 + movdqa \TMP1, HashKey_k(%rsp) + GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 +# TMP5 = HashKey^2<<1 (mod poly) + movdqa \TMP5, HashKey_2(%rsp) +# HashKey_2 = HashKey^2<<1 (mod poly) + pshufd $78, \TMP5, \TMP1 + pxor \TMP5, \TMP1 + movdqa \TMP1, HashKey_2_k(%rsp) +.irpc index, 1234 # do 4 rounds + movaps 0x10*\index(%arg1), \TMP1 + AESENC \TMP1, \XMM1 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 +.endr + GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 +# TMP5 = HashKey^3<<1 (mod poly) + movdqa \TMP5, HashKey_3(%rsp) + pshufd $78, \TMP5, \TMP1 + pxor \TMP5, \TMP1 + movdqa \TMP1, HashKey_3_k(%rsp) +.irpc index, 56789 # do next 5 rounds + movaps 0x10*\index(%arg1), \TMP1 + AESENC \TMP1, \XMM1 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 +.endr + GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 +# TMP5 = HashKey^3<<1 (mod poly) + movdqa \TMP5, HashKey_4(%rsp) + pshufd $78, \TMP5, \TMP1 + pxor \TMP5, \TMP1 + movdqa \TMP1, HashKey_4_k(%rsp) + movaps 0xa0(%arg1), \TMP2 + AESENCLAST \TMP2, \XMM1 + AESENCLAST \TMP2, \XMM2 + AESENCLAST \TMP2, \XMM3 + AESENCLAST \TMP2, \XMM4 + movdqu 16*0(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM1 + movdqu \XMM1, 16*0(%arg2 , %r11 , 1) + movdqa \TMP1, \XMM1 + movdqu 16*1(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM2 + movdqu \XMM2, 16*1(%arg2 , %r11 , 1) + movdqa \TMP1, \XMM2 + movdqu 16*2(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM3 + movdqu \XMM3, 16*2(%arg2 , %r11 , 1) + movdqa \TMP1, \XMM3 + movdqu 16*3(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM4 + movdqu \XMM4, 16*3(%arg2 , %r11 , 1) + movdqa \TMP1, \XMM4 + add $64, %r11 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + pxor \XMMDst, \XMM1 +# combine GHASHed value with the corresponding ciphertext + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + +_initial_blocks_done\num_initial_blocks\operation: + +.endm + + +/* +* if a = number of total plaintext bytes +* b = floor(a/16) +* num_initial_blocks = b mod 4 +* encrypt the initial num_initial_blocks blocks and apply ghash on +* the ciphertext +* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers +* are clobbered +* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified +*/ + + +.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ +XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation + mov arg7, %r10 # %r10 = AAD + mov arg8, %r12 # %r12 = aadLen + mov %r12, %r11 + pxor %xmm\i, %xmm\i +_get_AAD_loop\num_initial_blocks\operation: + movd (%r10), \TMP1 + pslldq $12, \TMP1 + psrldq $4, %xmm\i + pxor \TMP1, %xmm\i + add $4, %r10 + sub $4, %r12 + jne _get_AAD_loop\num_initial_blocks\operation + cmp $16, %r11 + je _get_AAD_loop2_done\num_initial_blocks\operation + mov $16, %r12 +_get_AAD_loop2\num_initial_blocks\operation: + psrldq $4, %xmm\i + sub $4, %r12 + cmp %r11, %r12 + jne _get_AAD_loop2\num_initial_blocks\operation +_get_AAD_loop2_done\num_initial_blocks\operation: + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data + + xor %r11, %r11 # initialise the data pointer offset as zero + + # start AES for num_initial_blocks blocks + + mov %arg5, %rax # %rax = *Y0 + movdqu (%rax), \XMM0 # XMM0 = Y0 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM0 + +.if (\i == 5) || (\i == 6) || (\i == 7) +.irpc index, \i_seq + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, %xmm\index + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap + +.endr +.irpc index, \i_seq + pxor 16*0(%arg1), %xmm\index +.endr +.irpc index, \i_seq + movaps 0x10(%rdi), \TMP1 + AESENC \TMP1, %xmm\index # Round 1 +.endr +.irpc index, \i_seq + movaps 0x20(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x30(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x40(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x50(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x60(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x70(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x80(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0x90(%arg1), \TMP1 + AESENC \TMP1, %xmm\index # Round 2 +.endr +.irpc index, \i_seq + movaps 0xa0(%arg1), \TMP1 + AESENCLAST \TMP1, %xmm\index # Round 10 +.endr +.irpc index, \i_seq + movdqu (%arg3 , %r11, 1), \TMP1 + pxor \TMP1, %xmm\index + movdqu %xmm\index, (%arg2 , %r11, 1) + # write back plaintext/ciphertext for num_initial_blocks + add $16, %r11 + + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, %xmm\index + + # prepare plaintext/ciphertext for GHASH computation +.endr +.endif + GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + # apply GHASH on num_initial_blocks blocks + +.if \i == 5 + pxor %xmm5, %xmm6 + GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + pxor %xmm6, %xmm7 + GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + pxor %xmm7, %xmm8 + GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 +.elseif \i == 6 + pxor %xmm6, %xmm7 + GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 + pxor %xmm7, %xmm8 + GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 +.elseif \i == 7 + pxor %xmm7, %xmm8 + GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 +.endif + cmp $64, %r13 + jl _initial_blocks_done\num_initial_blocks\operation + # no need for precomputed values +/* +* +* Precomputations for HashKey parallel with encryption of first 4 blocks. +* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i +*/ + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM1 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM2 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap + + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM3 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap + + paddd ONE(%rip), \XMM0 # INCR Y0 + movdqa \XMM0, \XMM4 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + + pxor 16*0(%arg1), \XMM1 + pxor 16*0(%arg1), \XMM2 + pxor 16*0(%arg1), \XMM3 + pxor 16*0(%arg1), \XMM4 + movdqa \TMP3, \TMP5 + pshufd $78, \TMP3, \TMP1 + pxor \TMP3, \TMP1 + movdqa \TMP1, HashKey_k(%rsp) + GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 +# TMP5 = HashKey^2<<1 (mod poly) + movdqa \TMP5, HashKey_2(%rsp) +# HashKey_2 = HashKey^2<<1 (mod poly) + pshufd $78, \TMP5, \TMP1 + pxor \TMP5, \TMP1 + movdqa \TMP1, HashKey_2_k(%rsp) +.irpc index, 1234 # do 4 rounds + movaps 0x10*\index(%arg1), \TMP1 + AESENC \TMP1, \XMM1 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 +.endr + GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 +# TMP5 = HashKey^3<<1 (mod poly) + movdqa \TMP5, HashKey_3(%rsp) + pshufd $78, \TMP5, \TMP1 + pxor \TMP5, \TMP1 + movdqa \TMP1, HashKey_3_k(%rsp) +.irpc index, 56789 # do next 5 rounds + movaps 0x10*\index(%arg1), \TMP1 + AESENC \TMP1, \XMM1 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 +.endr + GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 +# TMP5 = HashKey^3<<1 (mod poly) + movdqa \TMP5, HashKey_4(%rsp) + pshufd $78, \TMP5, \TMP1 + pxor \TMP5, \TMP1 + movdqa \TMP1, HashKey_4_k(%rsp) + movaps 0xa0(%arg1), \TMP2 + AESENCLAST \TMP2, \XMM1 + AESENCLAST \TMP2, \XMM2 + AESENCLAST \TMP2, \XMM3 + AESENCLAST \TMP2, \XMM4 + movdqu 16*0(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM1 + movdqu 16*1(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM2 + movdqu 16*2(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM3 + movdqu 16*3(%arg3 , %r11 , 1), \TMP1 + pxor \TMP1, \XMM4 + movdqu \XMM1, 16*0(%arg2 , %r11 , 1) + movdqu \XMM2, 16*1(%arg2 , %r11 , 1) + movdqu \XMM3, 16*2(%arg2 , %r11 , 1) + movdqu \XMM4, 16*3(%arg2 , %r11 , 1) + + add $64, %r11 + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + pxor \XMMDst, \XMM1 +# combine GHASHed value with the corresponding ciphertext + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap + movdqa SHUF_MASK(%rip), %xmm14 + PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + +_initial_blocks_done\num_initial_blocks\operation: + +.endm + +/* +* encrypt 4 blocks at a time +* ghash the 4 previously encrypted ciphertext blocks +* arg1, %arg2, %arg3 are used as pointers only, not modified +* %r11 is the data offset value +*/ +.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ +TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation + + movdqa \XMM1, \XMM5 + movdqa \XMM2, \XMM6 + movdqa \XMM3, \XMM7 + movdqa \XMM4, \XMM8 + + movdqa SHUF_MASK(%rip), %xmm15 + # multiply TMP5 * HashKey using karatsuba + + movdqa \XMM5, \TMP4 + pshufd $78, \XMM5, \TMP6 + pxor \XMM5, \TMP6 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa HashKey_4(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 + movdqa \XMM0, \XMM1 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa \XMM0, \XMM2 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa \XMM0, \XMM3 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa \XMM0, \XMM4 + PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap + PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 + PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + + pxor (%arg1), \XMM1 + pxor (%arg1), \XMM2 + pxor (%arg1), \XMM3 + pxor (%arg1), \XMM4 + movdqa HashKey_4_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) + movaps 0x10(%arg1), \TMP1 + AESENC \TMP1, \XMM1 # Round 1 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 + movaps 0x20(%arg1), \TMP1 + AESENC \TMP1, \XMM1 # Round 2 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 + movdqa \XMM6, \TMP1 + pshufd $78, \XMM6, \TMP2 + pxor \XMM6, \TMP2 + movdqa HashKey_3(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 + movaps 0x30(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 3 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 + movaps 0x40(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 4 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + movdqa HashKey_3_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + movaps 0x50(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 5 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + pxor \TMP1, \TMP4 +# accumulate the results in TMP4:XMM5, TMP6 holds the middle part + pxor \XMM6, \XMM5 + pxor \TMP2, \TMP6 + movdqa \XMM7, \TMP1 + pshufd $78, \XMM7, \TMP2 + pxor \XMM7, \TMP2 + movdqa HashKey_2(%rsp ), \TMP5 + + # Multiply TMP5 * HashKey using karatsuba + + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + movaps 0x60(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 6 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 + movaps 0x70(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 7 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + movdqa HashKey_2_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + movaps 0x80(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 8 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + pxor \TMP1, \TMP4 +# accumulate the results in TMP4:XMM5, TMP6 holds the middle part + pxor \XMM7, \XMM5 + pxor \TMP2, \TMP6 + + # Multiply XMM8 * HashKey + # XMM8 and TMP5 hold the values for the two operands + + movdqa \XMM8, \TMP1 + pshufd $78, \XMM8, \TMP2 + pxor \XMM8, \TMP2 + movdqa HashKey(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + movaps 0x90(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 9 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 + movaps 0xa0(%arg1), \TMP3 + AESENCLAST \TMP3, \XMM1 # Round 10 + AESENCLAST \TMP3, \XMM2 + AESENCLAST \TMP3, \XMM3 + AESENCLAST \TMP3, \XMM4 + movdqa HashKey_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + movdqu (%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK + movdqu 16(%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK + movdqu 32(%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK + movdqu 48(%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK + movdqu \XMM1, (%arg2,%r11,1) # Write to the ciphertext buffer + movdqu \XMM2, 16(%arg2,%r11,1) # Write to the ciphertext buffer + movdqu \XMM3, 32(%arg2,%r11,1) # Write to the ciphertext buffer + movdqu \XMM4, 48(%arg2,%r11,1) # Write to the ciphertext buffer + PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + + pxor \TMP4, \TMP1 + pxor \XMM8, \XMM5 + pxor \TMP6, \TMP2 + pxor \TMP1, \TMP2 + pxor \XMM5, \TMP2 + movdqa \TMP2, \TMP3 + pslldq $8, \TMP3 # left shift TMP3 2 DWs + psrldq $8, \TMP2 # right shift TMP2 2 DWs + pxor \TMP3, \XMM5 + pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5 + + # first phase of reduction + + movdqa \XMM5, \TMP2 + movdqa \XMM5, \TMP3 + movdqa \XMM5, \TMP4 +# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently + pslld $31, \TMP2 # packed right shift << 31 + pslld $30, \TMP3 # packed right shift << 30 + pslld $25, \TMP4 # packed right shift << 25 + pxor \TMP3, \TMP2 # xor the shifted versions + pxor \TMP4, \TMP2 + movdqa \TMP2, \TMP5 + psrldq $4, \TMP5 # right shift T5 1 DW + pslldq $12, \TMP2 # left shift T2 3 DWs + pxor \TMP2, \XMM5 + + # second phase of reduction + + movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4 + movdqa \XMM5,\TMP3 + movdqa \XMM5,\TMP4 + psrld $1, \TMP2 # packed left shift >>1 + psrld $2, \TMP3 # packed left shift >>2 + psrld $7, \TMP4 # packed left shift >>7 + pxor \TMP3,\TMP2 # xor the shifted versions + pxor \TMP4,\TMP2 + pxor \TMP5, \TMP2 + pxor \TMP2, \XMM5 + pxor \TMP1, \XMM5 # result is in TMP1 + + pxor \XMM5, \XMM1 +.endm + +/* +* decrypt 4 blocks at a time +* ghash the 4 previously decrypted ciphertext blocks +* arg1, %arg2, %arg3 are used as pointers only, not modified +* %r11 is the data offset value +*/ +.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ +TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation + + movdqa \XMM1, \XMM5 + movdqa \XMM2, \XMM6 + movdqa \XMM3, \XMM7 + movdqa \XMM4, \XMM8 + + movdqa SHUF_MASK(%rip), %xmm15 + # multiply TMP5 * HashKey using karatsuba + + movdqa \XMM5, \TMP4 + pshufd $78, \XMM5, \TMP6 + pxor \XMM5, \TMP6 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa HashKey_4(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 + movdqa \XMM0, \XMM1 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa \XMM0, \XMM2 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa \XMM0, \XMM3 + paddd ONE(%rip), \XMM0 # INCR CNT + movdqa \XMM0, \XMM4 + PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap + PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 + PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + + pxor (%arg1), \XMM1 + pxor (%arg1), \XMM2 + pxor (%arg1), \XMM3 + pxor (%arg1), \XMM4 + movdqa HashKey_4_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) + movaps 0x10(%arg1), \TMP1 + AESENC \TMP1, \XMM1 # Round 1 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 + movaps 0x20(%arg1), \TMP1 + AESENC \TMP1, \XMM1 # Round 2 + AESENC \TMP1, \XMM2 + AESENC \TMP1, \XMM3 + AESENC \TMP1, \XMM4 + movdqa \XMM6, \TMP1 + pshufd $78, \XMM6, \TMP2 + pxor \XMM6, \TMP2 + movdqa HashKey_3(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 + movaps 0x30(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 3 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 + movaps 0x40(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 4 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + movdqa HashKey_3_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + movaps 0x50(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 5 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + pxor \TMP1, \TMP4 +# accumulate the results in TMP4:XMM5, TMP6 holds the middle part + pxor \XMM6, \XMM5 + pxor \TMP2, \TMP6 + movdqa \XMM7, \TMP1 + pshufd $78, \XMM7, \TMP2 + pxor \XMM7, \TMP2 + movdqa HashKey_2(%rsp ), \TMP5 + + # Multiply TMP5 * HashKey using karatsuba + + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + movaps 0x60(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 6 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 + movaps 0x70(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 7 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + movdqa HashKey_2_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + movaps 0x80(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 8 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + pxor \TMP1, \TMP4 +# accumulate the results in TMP4:XMM5, TMP6 holds the middle part + pxor \XMM7, \XMM5 + pxor \TMP2, \TMP6 + + # Multiply XMM8 * HashKey + # XMM8 and TMP5 hold the values for the two operands + + movdqa \XMM8, \TMP1 + pshufd $78, \XMM8, \TMP2 + pxor \XMM8, \TMP2 + movdqa HashKey(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + movaps 0x90(%arg1), \TMP3 + AESENC \TMP3, \XMM1 # Round 9 + AESENC \TMP3, \XMM2 + AESENC \TMP3, \XMM3 + AESENC \TMP3, \XMM4 + PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 + movaps 0xa0(%arg1), \TMP3 + AESENCLAST \TMP3, \XMM1 # Round 10 + AESENCLAST \TMP3, \XMM2 + AESENCLAST \TMP3, \XMM3 + AESENCLAST \TMP3, \XMM4 + movdqa HashKey_k(%rsp), \TMP5 + PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + movdqu (%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK + movdqu \XMM1, (%arg2,%r11,1) # Write to plaintext buffer + movdqa \TMP3, \XMM1 + movdqu 16(%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK + movdqu \XMM2, 16(%arg2,%r11,1) # Write to plaintext buffer + movdqa \TMP3, \XMM2 + movdqu 32(%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK + movdqu \XMM3, 32(%arg2,%r11,1) # Write to plaintext buffer + movdqa \TMP3, \XMM3 + movdqu 48(%arg3,%r11,1), \TMP3 + pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK + movdqu \XMM4, 48(%arg2,%r11,1) # Write to plaintext buffer + movdqa \TMP3, \XMM4 + PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap + PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + + pxor \TMP4, \TMP1 + pxor \XMM8, \XMM5 + pxor \TMP6, \TMP2 + pxor \TMP1, \TMP2 + pxor \XMM5, \TMP2 + movdqa \TMP2, \TMP3 + pslldq $8, \TMP3 # left shift TMP3 2 DWs + psrldq $8, \TMP2 # right shift TMP2 2 DWs + pxor \TMP3, \XMM5 + pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5 + + # first phase of reduction + + movdqa \XMM5, \TMP2 + movdqa \XMM5, \TMP3 + movdqa \XMM5, \TMP4 +# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently + pslld $31, \TMP2 # packed right shift << 31 + pslld $30, \TMP3 # packed right shift << 30 + pslld $25, \TMP4 # packed right shift << 25 + pxor \TMP3, \TMP2 # xor the shifted versions + pxor \TMP4, \TMP2 + movdqa \TMP2, \TMP5 + psrldq $4, \TMP5 # right shift T5 1 DW + pslldq $12, \TMP2 # left shift T2 3 DWs + pxor \TMP2, \XMM5 + + # second phase of reduction + + movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4 + movdqa \XMM5,\TMP3 + movdqa \XMM5,\TMP4 + psrld $1, \TMP2 # packed left shift >>1 + psrld $2, \TMP3 # packed left shift >>2 + psrld $7, \TMP4 # packed left shift >>7 + pxor \TMP3,\TMP2 # xor the shifted versions + pxor \TMP4,\TMP2 + pxor \TMP5, \TMP2 + pxor \TMP2, \XMM5 + pxor \TMP1, \XMM5 # result is in TMP1 + + pxor \XMM5, \XMM1 +.endm + +/* GHASH the last 4 ciphertext blocks. */ +.macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \ +TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst + + # Multiply TMP6 * HashKey (using Karatsuba) + + movdqa \XMM1, \TMP6 + pshufd $78, \XMM1, \TMP2 + pxor \XMM1, \TMP2 + movdqa HashKey_4(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 + PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 + movdqa HashKey_4_k(%rsp), \TMP4 + PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + movdqa \XMM1, \XMMDst + movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 + + # Multiply TMP1 * HashKey (using Karatsuba) + + movdqa \XMM2, \TMP1 + pshufd $78, \XMM2, \TMP2 + pxor \XMM2, \TMP2 + movdqa HashKey_3(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 + movdqa HashKey_3_k(%rsp), \TMP4 + PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pxor \TMP1, \TMP6 + pxor \XMM2, \XMMDst + pxor \TMP2, \XMM1 +# results accumulated in TMP6, XMMDst, XMM1 + + # Multiply TMP1 * HashKey (using Karatsuba) + + movdqa \XMM3, \TMP1 + pshufd $78, \XMM3, \TMP2 + pxor \XMM3, \TMP2 + movdqa HashKey_2(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 + movdqa HashKey_2_k(%rsp), \TMP4 + PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pxor \TMP1, \TMP6 + pxor \XMM3, \XMMDst + pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1 + + # Multiply TMP1 * HashKey (using Karatsuba) + movdqa \XMM4, \TMP1 + pshufd $78, \XMM4, \TMP2 + pxor \XMM4, \TMP2 + movdqa HashKey(%rsp), \TMP5 + PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 + movdqa HashKey_k(%rsp), \TMP4 + PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pxor \TMP1, \TMP6 + pxor \XMM4, \XMMDst + pxor \XMM1, \TMP2 + pxor \TMP6, \TMP2 + pxor \XMMDst, \TMP2 + # middle section of the temp results combined as in karatsuba algorithm + movdqa \TMP2, \TMP4 + pslldq $8, \TMP4 # left shift TMP4 2 DWs + psrldq $8, \TMP2 # right shift TMP2 2 DWs + pxor \TMP4, \XMMDst + pxor \TMP2, \TMP6 +# TMP6:XMMDst holds the result of the accumulated carry-less multiplications + # first phase of the reduction + movdqa \XMMDst, \TMP2 + movdqa \XMMDst, \TMP3 + movdqa \XMMDst, \TMP4 +# move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently + pslld $31, \TMP2 # packed right shifting << 31 + pslld $30, \TMP3 # packed right shifting << 30 + pslld $25, \TMP4 # packed right shifting << 25 + pxor \TMP3, \TMP2 # xor the shifted versions + pxor \TMP4, \TMP2 + movdqa \TMP2, \TMP7 + psrldq $4, \TMP7 # right shift TMP7 1 DW + pslldq $12, \TMP2 # left shift TMP2 3 DWs + pxor \TMP2, \XMMDst + + # second phase of the reduction + movdqa \XMMDst, \TMP2 + # make 3 copies of XMMDst for doing 3 shift operations + movdqa \XMMDst, \TMP3 + movdqa \XMMDst, \TMP4 + psrld $1, \TMP2 # packed left shift >> 1 + psrld $2, \TMP3 # packed left shift >> 2 + psrld $7, \TMP4 # packed left shift >> 7 + pxor \TMP3, \TMP2 # xor the shifted versions + pxor \TMP4, \TMP2 + pxor \TMP7, \TMP2 + pxor \TMP2, \XMMDst + pxor \TMP6, \XMMDst # reduced result is in XMMDst +.endm + +/* Encryption of a single block done*/ +.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1 + + pxor (%arg1), \XMM0 + movaps 16(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 32(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 48(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 64(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 80(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 96(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 112(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 128(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 144(%arg1), \TMP1 + AESENC \TMP1, \XMM0 + movaps 160(%arg1), \TMP1 + AESENCLAST \TMP1, \XMM0 +.endm + + +/***************************************************************************** +* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. +* u8 *out, // Plaintext output. Encrypt in-place is allowed. +* const u8 *in, // Ciphertext input +* u64 plaintext_len, // Length of data in bytes for decryption. +* u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) +* // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) +* // concatenated with 0x00000001. 16-byte aligned pointer. +* u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. +* const u8 *aad, // Additional Authentication Data (AAD) +* u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes +* u8 *auth_tag, // Authenticated Tag output. The driver will compare this to the +* // given authentication tag and only return the plaintext if they match. +* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 +* // (most likely), 12 or 8. +* +* Assumptions: +* +* keys: +* keys are pre-expanded and aligned to 16 bytes. we are using the first +* set of 11 keys in the data structure void *aes_ctx +* +* iv: +* 0 1 2 3 +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | Salt (From the SA) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | Initialization Vector | +* | (This is the sequence number from IPSec header) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 0x1 | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* +* +* +* AAD: +* AAD padded to 128 bits with 0 +* for example, assume AAD is a u32 vector +* +* if AAD is 8 bytes: +* AAD[3] = {A0, A1}; +* padded AAD in xmm register = {A1 A0 0 0} +* +* 0 1 2 3 +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | SPI (A1) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 32-bit Sequence Number (A0) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 0x0 | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* +* AAD Format with 32-bit Sequence Number +* +* if AAD is 12 bytes: +* AAD[3] = {A0, A1, A2}; +* padded AAD in xmm register = {A2 A1 A0 0} +* +* 0 1 2 3 +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | SPI (A2) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 64-bit Extended Sequence Number {A1,A0} | +* | | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 0x0 | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* +* AAD Format with 64-bit Extended Sequence Number +* +* aadLen: +* from the definition of the spec, aadLen can only be 8 or 12 bytes. +* The code supports 16 too but for other sizes, the code will fail. +* +* TLen: +* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. +* For other sizes, the code will fail. +* +* poly = x^128 + x^127 + x^126 + x^121 + 1 +* +*****************************************************************************/ + +ENTRY(aesni_gcm_dec) + push %r12 + push %r13 + push %r14 + mov %rsp, %r14 +/* +* states of %xmm registers %xmm6:%xmm15 not saved +* all %xmm registers are clobbered +*/ + sub $VARIABLE_OFFSET, %rsp + and $~63, %rsp # align rsp to 64 bytes + mov %arg6, %r12 + movdqu (%r12), %xmm13 # %xmm13 = HashKey + movdqa SHUF_MASK(%rip), %xmm2 + PSHUFB_XMM %xmm2, %xmm13 + + +# Precompute HashKey<<1 (mod poly) from the hash key (required for GHASH) + + movdqa %xmm13, %xmm2 + psllq $1, %xmm13 + psrlq $63, %xmm2 + movdqa %xmm2, %xmm1 + pslldq $8, %xmm2 + psrldq $8, %xmm1 + por %xmm2, %xmm13 + + # Reduction + + pshufd $0x24, %xmm1, %xmm2 + pcmpeqd TWOONE(%rip), %xmm2 + pand POLY(%rip), %xmm2 + pxor %xmm2, %xmm13 # %xmm13 holds the HashKey<<1 (mod poly) + + + # Decrypt first few blocks + + movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly) + mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext + and $-16, %r13 # %r13 = %r13 - (%r13 mod 16) + mov %r13, %r12 + and $(3<<4), %r12 + jz _initial_num_blocks_is_0_decrypt + cmp $(2<<4), %r12 + jb _initial_num_blocks_is_1_decrypt + je _initial_num_blocks_is_2_decrypt +_initial_num_blocks_is_3_decrypt: + INITIAL_BLOCKS_DEC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, dec + sub $48, %r13 + jmp _initial_blocks_decrypted +_initial_num_blocks_is_2_decrypt: + INITIAL_BLOCKS_DEC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, dec + sub $32, %r13 + jmp _initial_blocks_decrypted +_initial_num_blocks_is_1_decrypt: + INITIAL_BLOCKS_DEC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, dec + sub $16, %r13 + jmp _initial_blocks_decrypted +_initial_num_blocks_is_0_decrypt: + INITIAL_BLOCKS_DEC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, dec +_initial_blocks_decrypted: + cmp $0, %r13 + je _zero_cipher_left_decrypt + sub $64, %r13 + je _four_cipher_left_decrypt +_decrypt_by_4: + GHASH_4_ENCRYPT_4_PARALLEL_DEC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \ +%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, dec + add $64, %r11 + sub $64, %r13 + jne _decrypt_by_4 +_four_cipher_left_decrypt: + GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \ +%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8 +_zero_cipher_left_decrypt: + mov %arg4, %r13 + and $15, %r13 # %r13 = arg4 (mod 16) + je _multiple_of_16_bytes_decrypt + + # Handle the last <16 byte block seperately + + paddd ONE(%rip), %xmm0 # increment CNT to get Yn + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10, %xmm0 + + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) + sub $16, %r11 + add %r13, %r11 + movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block + lea SHIFT_MASK+16(%rip), %r12 + sub %r13, %r12 +# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes +# (%r13 is the number of bytes in plaintext mod 16) + movdqu (%r12), %xmm2 # get the appropriate shuffle mask + PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes + + movdqa %xmm1, %xmm2 + pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) + movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 + pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 + pand %xmm1, %xmm2 + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10 ,%xmm2 + + pxor %xmm2, %xmm8 + GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 + # GHASH computation for the last <16 byte block + sub %r13, %r11 + add $16, %r11 + + # output %r13 bytes + MOVQ_R64_XMM %xmm0, %rax + cmp $8, %r13 + jle _less_than_8_bytes_left_decrypt + mov %rax, (%arg2 , %r11, 1) + add $8, %r11 + psrldq $8, %xmm0 + MOVQ_R64_XMM %xmm0, %rax + sub $8, %r13 +_less_than_8_bytes_left_decrypt: + mov %al, (%arg2, %r11, 1) + add $1, %r11 + shr $8, %rax + sub $1, %r13 + jne _less_than_8_bytes_left_decrypt +_multiple_of_16_bytes_decrypt: + mov arg8, %r12 # %r13 = aadLen (number of bytes) + shl $3, %r12 # convert into number of bits + movd %r12d, %xmm15 # len(A) in %xmm15 + shl $3, %arg4 # len(C) in bits (*128) + MOVQ_R64_XMM %arg4, %xmm1 + pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 + pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C) + pxor %xmm15, %xmm8 + GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 + # final GHASH computation + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10, %xmm8 + + mov %arg5, %rax # %rax = *Y0 + movdqu (%rax), %xmm0 # %xmm0 = Y0 + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0) + pxor %xmm8, %xmm0 +_return_T_decrypt: + mov arg9, %r10 # %r10 = authTag + mov arg10, %r11 # %r11 = auth_tag_len + cmp $16, %r11 + je _T_16_decrypt + cmp $12, %r11 + je _T_12_decrypt +_T_8_decrypt: + MOVQ_R64_XMM %xmm0, %rax + mov %rax, (%r10) + jmp _return_T_done_decrypt +_T_12_decrypt: + MOVQ_R64_XMM %xmm0, %rax + mov %rax, (%r10) + psrldq $8, %xmm0 + movd %xmm0, %eax + mov %eax, 8(%r10) + jmp _return_T_done_decrypt +_T_16_decrypt: + movdqu %xmm0, (%r10) +_return_T_done_decrypt: + mov %r14, %rsp + pop %r14 + pop %r13 + pop %r12 + ret + + +/***************************************************************************** +* void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. +* u8 *out, // Ciphertext output. Encrypt in-place is allowed. +* const u8 *in, // Plaintext input +* u64 plaintext_len, // Length of data in bytes for encryption. +* u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) +* // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) +* // concatenated with 0x00000001. 16-byte aligned pointer. +* u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. +* const u8 *aad, // Additional Authentication Data (AAD) +* u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes +* u8 *auth_tag, // Authenticated Tag output. +* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely), +* // 12 or 8. +* +* Assumptions: +* +* keys: +* keys are pre-expanded and aligned to 16 bytes. we are using the +* first set of 11 keys in the data structure void *aes_ctx +* +* +* iv: +* 0 1 2 3 +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | Salt (From the SA) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | Initialization Vector | +* | (This is the sequence number from IPSec header) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 0x1 | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* +* +* +* AAD: +* AAD padded to 128 bits with 0 +* for example, assume AAD is a u32 vector +* +* if AAD is 8 bytes: +* AAD[3] = {A0, A1}; +* padded AAD in xmm register = {A1 A0 0 0} +* +* 0 1 2 3 +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | SPI (A1) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 32-bit Sequence Number (A0) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 0x0 | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* +* AAD Format with 32-bit Sequence Number +* +* if AAD is 12 bytes: +* AAD[3] = {A0, A1, A2}; +* padded AAD in xmm register = {A2 A1 A0 0} +* +* 0 1 2 3 +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | SPI (A2) | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 64-bit Extended Sequence Number {A1,A0} | +* | | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* | 0x0 | +* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +* +* AAD Format with 64-bit Extended Sequence Number +* +* aadLen: +* from the definition of the spec, aadLen can only be 8 or 12 bytes. +* The code supports 16 too but for other sizes, the code will fail. +* +* TLen: +* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. +* For other sizes, the code will fail. +* +* poly = x^128 + x^127 + x^126 + x^121 + 1 +***************************************************************************/ +ENTRY(aesni_gcm_enc) + push %r12 + push %r13 + push %r14 + mov %rsp, %r14 +# +# states of %xmm registers %xmm6:%xmm15 not saved +# all %xmm registers are clobbered +# + sub $VARIABLE_OFFSET, %rsp + and $~63, %rsp + mov %arg6, %r12 + movdqu (%r12), %xmm13 + movdqa SHUF_MASK(%rip), %xmm2 + PSHUFB_XMM %xmm2, %xmm13 + + +# precompute HashKey<<1 mod poly from the HashKey (required for GHASH) + + movdqa %xmm13, %xmm2 + psllq $1, %xmm13 + psrlq $63, %xmm2 + movdqa %xmm2, %xmm1 + pslldq $8, %xmm2 + psrldq $8, %xmm1 + por %xmm2, %xmm13 + + # reduce HashKey<<1 + + pshufd $0x24, %xmm1, %xmm2 + pcmpeqd TWOONE(%rip), %xmm2 + pand POLY(%rip), %xmm2 + pxor %xmm2, %xmm13 + movdqa %xmm13, HashKey(%rsp) + mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly) + and $-16, %r13 + mov %r13, %r12 + + # Encrypt first few blocks + + and $(3<<4), %r12 + jz _initial_num_blocks_is_0_encrypt + cmp $(2<<4), %r12 + jb _initial_num_blocks_is_1_encrypt + je _initial_num_blocks_is_2_encrypt +_initial_num_blocks_is_3_encrypt: + INITIAL_BLOCKS_ENC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, enc + sub $48, %r13 + jmp _initial_blocks_encrypted +_initial_num_blocks_is_2_encrypt: + INITIAL_BLOCKS_ENC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, enc + sub $32, %r13 + jmp _initial_blocks_encrypted +_initial_num_blocks_is_1_encrypt: + INITIAL_BLOCKS_ENC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, enc + sub $16, %r13 + jmp _initial_blocks_encrypted +_initial_num_blocks_is_0_encrypt: + INITIAL_BLOCKS_ENC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ +%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, enc +_initial_blocks_encrypted: + + # Main loop - Encrypt remaining blocks + + cmp $0, %r13 + je _zero_cipher_left_encrypt + sub $64, %r13 + je _four_cipher_left_encrypt +_encrypt_by_4_encrypt: + GHASH_4_ENCRYPT_4_PARALLEL_ENC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \ +%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc + add $64, %r11 + sub $64, %r13 + jne _encrypt_by_4_encrypt +_four_cipher_left_encrypt: + GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \ +%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8 +_zero_cipher_left_encrypt: + mov %arg4, %r13 + and $15, %r13 # %r13 = arg4 (mod 16) + je _multiple_of_16_bytes_encrypt + + # Handle the last <16 Byte block seperately + paddd ONE(%rip), %xmm0 # INCR CNT to get Yn + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10, %xmm0 + + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) + sub $16, %r11 + add %r13, %r11 + movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks + lea SHIFT_MASK+16(%rip), %r12 + sub %r13, %r12 + # adjust the shuffle mask pointer to be able to shift 16-r13 bytes + # (%r13 is the number of bytes in plaintext mod 16) + movdqu (%r12), %xmm2 # get the appropriate shuffle mask + PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte + pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) + movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + # get the appropriate mask to mask out top 16-r13 bytes of xmm0 + pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10,%xmm0 + + pxor %xmm0, %xmm8 + GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 + # GHASH computation for the last <16 byte block + sub %r13, %r11 + add $16, %r11 + PSHUFB_XMM %xmm10, %xmm1 + + # shuffle xmm0 back to output as ciphertext + + # Output %r13 bytes + MOVQ_R64_XMM %xmm0, %rax + cmp $8, %r13 + jle _less_than_8_bytes_left_encrypt + mov %rax, (%arg2 , %r11, 1) + add $8, %r11 + psrldq $8, %xmm0 + MOVQ_R64_XMM %xmm0, %rax + sub $8, %r13 +_less_than_8_bytes_left_encrypt: + mov %al, (%arg2, %r11, 1) + add $1, %r11 + shr $8, %rax + sub $1, %r13 + jne _less_than_8_bytes_left_encrypt +_multiple_of_16_bytes_encrypt: + mov arg8, %r12 # %r12 = addLen (number of bytes) + shl $3, %r12 + movd %r12d, %xmm15 # len(A) in %xmm15 + shl $3, %arg4 # len(C) in bits (*128) + MOVQ_R64_XMM %arg4, %xmm1 + pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 + pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C) + pxor %xmm15, %xmm8 + GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 + # final GHASH computation + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10, %xmm8 # perform a 16 byte swap + + mov %arg5, %rax # %rax = *Y0 + movdqu (%rax), %xmm0 # %xmm0 = Y0 + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm15 # Encrypt(K, Y0) + pxor %xmm8, %xmm0 +_return_T_encrypt: + mov arg9, %r10 # %r10 = authTag + mov arg10, %r11 # %r11 = auth_tag_len + cmp $16, %r11 + je _T_16_encrypt + cmp $12, %r11 + je _T_12_encrypt +_T_8_encrypt: + MOVQ_R64_XMM %xmm0, %rax + mov %rax, (%r10) + jmp _return_T_done_encrypt +_T_12_encrypt: + MOVQ_R64_XMM %xmm0, %rax + mov %rax, (%r10) + psrldq $8, %xmm0 + movd %xmm0, %eax + mov %eax, 8(%r10) + jmp _return_T_done_encrypt +_T_16_encrypt: + movdqu %xmm0, (%r10) +_return_T_done_encrypt: + mov %r14, %rsp + pop %r14 + pop %r13 + pop %r12 + ret + +#endif + _key_expansion_128: _key_expansion_256a: @@ -55,10 +1709,11 @@ _key_expansion_256a: shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 - movaps %xmm0, (%rcx) - add $0x10, %rcx + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ret +.align 4 _key_expansion_192a: pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 @@ -76,12 +1731,13 @@ _key_expansion_192a: movaps %xmm0, %xmm1 shufps $0b01000100, %xmm0, %xmm6 - movaps %xmm6, (%rcx) + movaps %xmm6, (TKEYP) shufps $0b01001110, %xmm2, %xmm1 - movaps %xmm1, 16(%rcx) - add $0x20, %rcx + movaps %xmm1, 0x10(TKEYP) + add $0x20, TKEYP ret +.align 4 _key_expansion_192b: pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 @@ -96,10 +1752,11 @@ _key_expansion_192b: pxor %xmm3, %xmm2 pxor %xmm5, %xmm2 - movaps %xmm0, (%rcx) - add $0x10, %rcx + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ret +.align 4 _key_expansion_256b: pshufd $0b10101010, %xmm1, %xmm1 shufps $0b00010000, %xmm2, %xmm4 @@ -107,8 +1764,8 @@ _key_expansion_256b: shufps $0b10001100, %xmm2, %xmm4 pxor %xmm4, %xmm2 pxor %xmm1, %xmm2 - movaps %xmm2, (%rcx) - add $0x10, %rcx + movaps %xmm2, (TKEYP) + add $0x10, TKEYP ret /* @@ -116,17 +1773,23 @@ _key_expansion_256b: * unsigned int key_len) */ ENTRY(aesni_set_key) - movups (%rsi), %xmm0 # user key (first 16 bytes) - movaps %xmm0, (%rdi) - lea 0x10(%rdi), %rcx # key addr - movl %edx, 480(%rdi) +#ifndef __x86_64__ + pushl KEYP + movl 8(%esp), KEYP # ctx + movl 12(%esp), UKEYP # in_key + movl 16(%esp), %edx # key_len +#endif + movups (UKEYP), %xmm0 # user key (first 16 bytes) + movaps %xmm0, (KEYP) + lea 0x10(KEYP), TKEYP # key addr + movl %edx, 480(KEYP) pxor %xmm4, %xmm4 # xmm4 is assumed 0 in _key_expansion_x cmp $24, %dl jb .Lenc_key128 je .Lenc_key192 - movups 0x10(%rsi), %xmm2 # other user key - movaps %xmm2, (%rcx) - add $0x10, %rcx + movups 0x10(UKEYP), %xmm2 # other user key + movaps %xmm2, (TKEYP) + add $0x10, TKEYP AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 call _key_expansion_256a AESKEYGENASSIST 0x1 %xmm0 %xmm1 @@ -155,7 +1818,7 @@ ENTRY(aesni_set_key) call _key_expansion_256a jmp .Ldec_key .Lenc_key192: - movq 0x10(%rsi), %xmm2 # other user key + movq 0x10(UKEYP), %xmm2 # other user key AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 call _key_expansion_192a AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 @@ -195,33 +1858,47 @@ ENTRY(aesni_set_key) AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10 call _key_expansion_128 .Ldec_key: - sub $0x10, %rcx - movaps (%rdi), %xmm0 - movaps (%rcx), %xmm1 - movaps %xmm0, 240(%rcx) - movaps %xmm1, 240(%rdi) - add $0x10, %rdi - lea 240-16(%rcx), %rsi + sub $0x10, TKEYP + movaps (KEYP), %xmm0 + movaps (TKEYP), %xmm1 + movaps %xmm0, 240(TKEYP) + movaps %xmm1, 240(KEYP) + add $0x10, KEYP + lea 240-16(TKEYP), UKEYP .align 4 .Ldec_key_loop: - movaps (%rdi), %xmm0 + movaps (KEYP), %xmm0 AESIMC %xmm0 %xmm1 - movaps %xmm1, (%rsi) - add $0x10, %rdi - sub $0x10, %rsi - cmp %rcx, %rdi + movaps %xmm1, (UKEYP) + add $0x10, KEYP + sub $0x10, UKEYP + cmp TKEYP, KEYP jb .Ldec_key_loop - xor %rax, %rax + xor AREG, AREG +#ifndef __x86_64__ + popl KEYP +#endif ret /* * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ ENTRY(aesni_enc) +#ifndef __x86_64__ + pushl KEYP + pushl KLEN + movl 12(%esp), KEYP + movl 16(%esp), OUTP + movl 20(%esp), INP +#endif movl 480(KEYP), KLEN # key length movups (INP), STATE # input call _aesni_enc1 movups STATE, (OUTP) # output +#ifndef __x86_64__ + popl KLEN + popl KEYP +#endif ret /* @@ -236,6 +1913,7 @@ ENTRY(aesni_enc) * KEY * TKEYP (T1) */ +.align 4 _aesni_enc1: movaps (KEYP), KEY # key mov KEYP, TKEYP @@ -298,6 +1976,7 @@ _aesni_enc1: * KEY * TKEYP (T1) */ +.align 4 _aesni_enc4: movaps (KEYP), KEY # key mov KEYP, TKEYP @@ -391,11 +2070,22 @@ _aesni_enc4: * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ ENTRY(aesni_dec) +#ifndef __x86_64__ + pushl KEYP + pushl KLEN + movl 12(%esp), KEYP + movl 16(%esp), OUTP + movl 20(%esp), INP +#endif mov 480(KEYP), KLEN # key length add $240, KEYP movups (INP), STATE # input call _aesni_dec1 movups STATE, (OUTP) #output +#ifndef __x86_64__ + popl KLEN + popl KEYP +#endif ret /* @@ -410,6 +2100,7 @@ ENTRY(aesni_dec) * KEY * TKEYP (T1) */ +.align 4 _aesni_dec1: movaps (KEYP), KEY # key mov KEYP, TKEYP @@ -472,6 +2163,7 @@ _aesni_dec1: * KEY * TKEYP (T1) */ +.align 4 _aesni_dec4: movaps (KEYP), KEY # key mov KEYP, TKEYP @@ -566,6 +2258,15 @@ _aesni_dec4: * size_t len) */ ENTRY(aesni_ecb_enc) +#ifndef __x86_64__ + pushl LEN + pushl KEYP + pushl KLEN + movl 16(%esp), KEYP + movl 20(%esp), OUTP + movl 24(%esp), INP + movl 28(%esp), LEN +#endif test LEN, LEN # check length jz .Lecb_enc_ret mov 480(KEYP), KLEN @@ -602,6 +2303,11 @@ ENTRY(aesni_ecb_enc) cmp $16, LEN jge .Lecb_enc_loop1 .Lecb_enc_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN +#endif ret /* @@ -609,6 +2315,15 @@ ENTRY(aesni_ecb_enc) * size_t len); */ ENTRY(aesni_ecb_dec) +#ifndef __x86_64__ + pushl LEN + pushl KEYP + pushl KLEN + movl 16(%esp), KEYP + movl 20(%esp), OUTP + movl 24(%esp), INP + movl 28(%esp), LEN +#endif test LEN, LEN jz .Lecb_dec_ret mov 480(KEYP), KLEN @@ -646,6 +2361,11 @@ ENTRY(aesni_ecb_dec) cmp $16, LEN jge .Lecb_dec_loop1 .Lecb_dec_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN +#endif ret /* @@ -653,6 +2373,17 @@ ENTRY(aesni_ecb_dec) * size_t len, u8 *iv) */ ENTRY(aesni_cbc_enc) +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl 20(%esp), KEYP + movl 24(%esp), OUTP + movl 28(%esp), INP + movl 32(%esp), LEN + movl 36(%esp), IVP +#endif cmp $16, LEN jb .Lcbc_enc_ret mov 480(KEYP), KLEN @@ -670,6 +2401,12 @@ ENTRY(aesni_cbc_enc) jge .Lcbc_enc_loop movups STATE, (IVP) .Lcbc_enc_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif ret /* @@ -677,6 +2414,17 @@ ENTRY(aesni_cbc_enc) * size_t len, u8 *iv) */ ENTRY(aesni_cbc_dec) +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl 20(%esp), KEYP + movl 24(%esp), OUTP + movl 28(%esp), INP + movl 32(%esp), LEN + movl 36(%esp), IVP +#endif cmp $16, LEN jb .Lcbc_dec_just_ret mov 480(KEYP), KLEN @@ -690,16 +2438,30 @@ ENTRY(aesni_cbc_dec) movaps IN1, STATE1 movups 0x10(INP), IN2 movaps IN2, STATE2 +#ifdef __x86_64__ movups 0x20(INP), IN3 movaps IN3, STATE3 movups 0x30(INP), IN4 movaps IN4, STATE4 +#else + movups 0x20(INP), IN1 + movaps IN1, STATE3 + movups 0x30(INP), IN2 + movaps IN2, STATE4 +#endif call _aesni_dec4 pxor IV, STATE1 +#ifdef __x86_64__ pxor IN1, STATE2 pxor IN2, STATE3 pxor IN3, STATE4 movaps IN4, IV +#else + pxor (INP), STATE2 + pxor 0x10(INP), STATE3 + pxor IN1, STATE4 + movaps IN2, IV +#endif movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) @@ -727,8 +2489,15 @@ ENTRY(aesni_cbc_dec) .Lcbc_dec_ret: movups IV, (IVP) .Lcbc_dec_just_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif ret +#ifdef __x86_64__ .align 16 .Lbswap_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 @@ -744,6 +2513,7 @@ ENTRY(aesni_cbc_dec) * INC: == 1, in little endian * BSWAP_MASK == endian swapping mask */ +.align 4 _aesni_inc_init: movaps .Lbswap_mask, BSWAP_MASK movaps IV, CTR @@ -768,6 +2538,7 @@ _aesni_inc_init: * CTR: == output IV, in little endian * TCTR_LOW: == lower qword of CTR */ +.align 4 _aesni_inc: paddq INC, CTR add $1, TCTR_LOW @@ -839,3 +2610,4 @@ ENTRY(aesni_ctr_enc) movups IV, (IVP) .Lctr_enc_just_ret: ret +#endif diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 2cb3dcc4490a..e1e60c7d5813 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -5,6 +5,14 @@ * Copyright (C) 2008, Intel Corp. * Author: Huang Ying * + * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD + * interface for 64-bit kernels. + * Authors: Adrian Hoban + * Gabriele Paoloni + * Tadeusz Struk (tadeusz.struk@intel.com) + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Copyright (c) 2010, Intel Corporation. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -21,6 +29,10 @@ #include #include #include +#include +#include +#include +#include #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE) #define HAS_CTR @@ -42,8 +54,31 @@ struct async_aes_ctx { struct cryptd_ablkcipher *cryptd_tfm; }; -#define AESNI_ALIGN 16 +/* This data is stored at the end of the crypto_tfm struct. + * It's a type of per "session" data storage location. + * This needs to be 16 byte aligned. + */ +struct aesni_rfc4106_gcm_ctx { + u8 hash_subkey[16]; + struct crypto_aes_ctx aes_key_expanded; + u8 nonce[4]; + struct cryptd_aead *cryptd_tfm; +}; + +struct aesni_gcm_set_hash_subkey_result { + int err; + struct completion completion; +}; + +struct aesni_hash_subkey_req_data { + u8 iv[16]; + struct aesni_gcm_set_hash_subkey_result result; + struct scatterlist sg; +}; + +#define AESNI_ALIGN (16) #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) +#define RFC4106_HASH_SUBKEY_SIZE 16 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); @@ -59,9 +94,62 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); +#ifdef CONFIG_X86_64 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); +/* asmlinkage void aesni_gcm_enc() + * void *ctx, AES Key schedule. Starts on a 16 byte boundary. + * u8 *out, Ciphertext output. Encrypt in-place is allowed. + * const u8 *in, Plaintext input + * unsigned long plaintext_len, Length of data in bytes for encryption. + * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) + * concatenated with 8 byte Initialisation Vector (from IPSec ESP + * Payload) concatenated with 0x00000001. 16-byte aligned pointer. + * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. + * const u8 *aad, Additional Authentication Data (AAD) + * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this + * is going to be 8 or 12 bytes + * u8 *auth_tag, Authenticated Tag output. + * unsigned long auth_tag_len), Authenticated Tag Length in bytes. + * Valid values are 16 (most likely), 12 or 8. + */ +asmlinkage void aesni_gcm_enc(void *ctx, u8 *out, + const u8 *in, unsigned long plaintext_len, u8 *iv, + u8 *hash_subkey, const u8 *aad, unsigned long aad_len, + u8 *auth_tag, unsigned long auth_tag_len); + +/* asmlinkage void aesni_gcm_dec() + * void *ctx, AES Key schedule. Starts on a 16 byte boundary. + * u8 *out, Plaintext output. Decrypt in-place is allowed. + * const u8 *in, Ciphertext input + * unsigned long ciphertext_len, Length of data in bytes for decryption. + * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) + * concatenated with 8 byte Initialisation Vector (from IPSec ESP + * Payload) concatenated with 0x00000001. 16-byte aligned pointer. + * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. + * const u8 *aad, Additional Authentication Data (AAD) + * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going + * to be 8 or 12 bytes + * u8 *auth_tag, Authenticated Tag output. + * unsigned long auth_tag_len) Authenticated Tag Length in bytes. + * Valid values are 16 (most likely), 12 or 8. + */ +asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, + const u8 *in, unsigned long ciphertext_len, u8 *iv, + u8 *hash_subkey, const u8 *aad, unsigned long aad_len, + u8 *auth_tag, unsigned long auth_tag_len); + +static inline struct +aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) +{ + return + (struct aesni_rfc4106_gcm_ctx *) + PTR_ALIGN((u8 *) + crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN); +} +#endif + static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) { unsigned long addr = (unsigned long)raw_ctx; @@ -324,6 +412,7 @@ static struct crypto_alg blk_cbc_alg = { }, }; +#ifdef CONFIG_X86_64 static void ctr_crypt_final(struct crypto_aes_ctx *ctx, struct blkcipher_walk *walk) { @@ -389,6 +478,7 @@ static struct crypto_alg blk_ctr_alg = { }, }, }; +#endif static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int key_len) @@ -536,6 +626,7 @@ static struct crypto_alg ablk_cbc_alg = { }, }; +#ifdef CONFIG_X86_64 static int ablk_ctr_init(struct crypto_tfm *tfm) { struct cryptd_ablkcipher *cryptd_tfm; @@ -612,6 +703,7 @@ static struct crypto_alg ablk_rfc3686_ctr_alg = { }, }; #endif +#endif #ifdef HAS_LRW static int ablk_lrw_init(struct crypto_tfm *tfm) @@ -730,6 +822,424 @@ static struct crypto_alg ablk_xts_alg = { }; #endif +#ifdef CONFIG_X86_64 +static int rfc4106_init(struct crypto_tfm *tfm) +{ + struct cryptd_aead *cryptd_tfm; + struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) + PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); + cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + ctx->cryptd_tfm = cryptd_tfm; + tfm->crt_aead.reqsize = sizeof(struct aead_request) + + crypto_aead_reqsize(&cryptd_tfm->base); + return 0; +} + +static void rfc4106_exit(struct crypto_tfm *tfm) +{ + struct aesni_rfc4106_gcm_ctx *ctx = + (struct aesni_rfc4106_gcm_ctx *) + PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); + if (!IS_ERR(ctx->cryptd_tfm)) + cryptd_free_aead(ctx->cryptd_tfm); + return; +} + +static void +rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) +{ + struct aesni_gcm_set_hash_subkey_result *result = req->data; + + if (err == -EINPROGRESS) + return; + result->err = err; + complete(&result->completion); +} + +static int +rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) +{ + struct crypto_ablkcipher *ctr_tfm; + struct ablkcipher_request *req; + int ret = -EINVAL; + struct aesni_hash_subkey_req_data *req_data; + + ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); + if (IS_ERR(ctr_tfm)) + return PTR_ERR(ctr_tfm); + + crypto_ablkcipher_clear_flags(ctr_tfm, ~0); + + ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); + if (ret) { + crypto_free_ablkcipher(ctr_tfm); + return ret; + } + + req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); + if (!req) { + crypto_free_ablkcipher(ctr_tfm); + return -EINVAL; + } + + req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); + if (!req_data) { + crypto_free_ablkcipher(ctr_tfm); + return -ENOMEM; + } + memset(req_data->iv, 0, sizeof(req_data->iv)); + + /* Clear the data in the hash sub key container to zero.*/ + /* We want to cipher all zeros to create the hash sub key. */ + memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); + + init_completion(&req_data->result.completion); + sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); + ablkcipher_request_set_tfm(req, ctr_tfm); + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + rfc4106_set_hash_subkey_done, + &req_data->result); + + ablkcipher_request_set_crypt(req, &req_data->sg, + &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); + + ret = crypto_ablkcipher_encrypt(req); + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = wait_for_completion_interruptible + (&req_data->result.completion); + if (!ret) + ret = req_data->result.err; + } + ablkcipher_request_free(req); + kfree(req_data); + crypto_free_ablkcipher(ctr_tfm); + return ret; +} + +static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, + unsigned int key_len) +{ + int ret = 0; + struct crypto_tfm *tfm = crypto_aead_tfm(parent); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); + u8 *new_key_mem = NULL; + + if (key_len < 4) { + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + /*Account for 4 byte nonce at the end.*/ + key_len -= 4; + if (key_len != AES_KEYSIZE_128) { + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); + /*This must be on a 16 byte boundary!*/ + if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) + return -EINVAL; + + if ((unsigned long)key % AESNI_ALIGN) { + /*key is not aligned: use an auxuliar aligned pointer*/ + new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); + if (!new_key_mem) + return -ENOMEM; + + new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN); + memcpy(new_key_mem, key, key_len); + key = new_key_mem; + } + + if (!irq_fpu_usable()) + ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), + key, key_len); + else { + kernel_fpu_begin(); + ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); + kernel_fpu_end(); + } + /*This must be on a 16 byte boundary!*/ + if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { + ret = -EINVAL; + goto exit; + } + ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); +exit: + kfree(new_key_mem); + return ret; +} + +/* This is the Integrity Check Value (aka the authentication tag length and can + * be 8, 12 or 16 bytes long. */ +static int rfc4106_set_authsize(struct crypto_aead *parent, + unsigned int authsize) +{ + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + crypto_aead_crt(parent)->authsize = authsize; + crypto_aead_crt(cryptd_child)->authsize = authsize; + return 0; +} + +static int rfc4106_encrypt(struct aead_request *req) +{ + int ret; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + + if (!irq_fpu_usable()) { + struct aead_request *cryptd_req = + (struct aead_request *) aead_request_ctx(req); + memcpy(cryptd_req, req, sizeof(*req)); + aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_aead_encrypt(cryptd_req); + } else { + kernel_fpu_begin(); + ret = cryptd_child->base.crt_aead.encrypt(req); + kernel_fpu_end(); + return ret; + } +} + +static int rfc4106_decrypt(struct aead_request *req) +{ + int ret; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + + if (!irq_fpu_usable()) { + struct aead_request *cryptd_req = + (struct aead_request *) aead_request_ctx(req); + memcpy(cryptd_req, req, sizeof(*req)); + aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_aead_decrypt(cryptd_req); + } else { + kernel_fpu_begin(); + ret = cryptd_child->base.crt_aead.decrypt(req); + kernel_fpu_end(); + return ret; + } +} + +static struct crypto_alg rfc4106_alg = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aesni", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, + .cra_alignmask = 0, + .cra_type = &crypto_nivaead_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list), + .cra_init = rfc4106_init, + .cra_exit = rfc4106_exit, + .cra_u = { + .aead = { + .setkey = rfc4106_set_key, + .setauthsize = rfc4106_set_authsize, + .encrypt = rfc4106_encrypt, + .decrypt = rfc4106_decrypt, + .geniv = "seqiv", + .ivsize = 8, + .maxauthsize = 16, + }, + }, +}; + +static int __driver_rfc4106_encrypt(struct aead_request *req) +{ + u8 one_entry_in_sg = 0; + u8 *src, *dst, *assoc; + __be32 counter = cpu_to_be32(1); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); + void *aes_ctx = &(ctx->aes_key_expanded); + unsigned long auth_tag_len = crypto_aead_authsize(tfm); + u8 iv_tab[16+AESNI_ALIGN]; + u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); + struct scatter_walk src_sg_walk; + struct scatter_walk assoc_sg_walk; + struct scatter_walk dst_sg_walk; + unsigned int i; + + /* Assuming we are supporting rfc4106 64-bit extended */ + /* sequence numbers We need to have the AAD length equal */ + /* to 8 or 12 bytes */ + if (unlikely(req->assoclen != 8 && req->assoclen != 12)) + return -EINVAL; + /* IV below built */ + for (i = 0; i < 4; i++) + *(iv+i) = ctx->nonce[i]; + for (i = 0; i < 8; i++) + *(iv+4+i) = req->iv[i]; + *((__be32 *)(iv+12)) = counter; + + if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { + one_entry_in_sg = 1; + scatterwalk_start(&src_sg_walk, req->src); + scatterwalk_start(&assoc_sg_walk, req->assoc); + src = scatterwalk_map(&src_sg_walk, 0); + assoc = scatterwalk_map(&assoc_sg_walk, 0); + dst = src; + if (unlikely(req->src != req->dst)) { + scatterwalk_start(&dst_sg_walk, req->dst); + dst = scatterwalk_map(&dst_sg_walk, 0); + } + + } else { + /* Allocate memory for src, dst, assoc */ + src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, + GFP_ATOMIC); + if (unlikely(!src)) + return -ENOMEM; + assoc = (src + req->cryptlen + auth_tag_len); + scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); + scatterwalk_map_and_copy(assoc, req->assoc, 0, + req->assoclen, 0); + dst = src; + } + + aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, + ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst + + ((unsigned long)req->cryptlen), auth_tag_len); + + /* The authTag (aka the Integrity Check Value) needs to be written + * back to the packet. */ + if (one_entry_in_sg) { + if (unlikely(req->src != req->dst)) { + scatterwalk_unmap(dst, 0); + scatterwalk_done(&dst_sg_walk, 0, 0); + } + scatterwalk_unmap(src, 0); + scatterwalk_unmap(assoc, 0); + scatterwalk_done(&src_sg_walk, 0, 0); + scatterwalk_done(&assoc_sg_walk, 0, 0); + } else { + scatterwalk_map_and_copy(dst, req->dst, 0, + req->cryptlen + auth_tag_len, 1); + kfree(src); + } + return 0; +} + +static int __driver_rfc4106_decrypt(struct aead_request *req) +{ + u8 one_entry_in_sg = 0; + u8 *src, *dst, *assoc; + unsigned long tempCipherLen = 0; + __be32 counter = cpu_to_be32(1); + int retval = 0; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); + void *aes_ctx = &(ctx->aes_key_expanded); + unsigned long auth_tag_len = crypto_aead_authsize(tfm); + u8 iv_and_authTag[32+AESNI_ALIGN]; + u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN); + u8 *authTag = iv + 16; + struct scatter_walk src_sg_walk; + struct scatter_walk assoc_sg_walk; + struct scatter_walk dst_sg_walk; + unsigned int i; + + if (unlikely((req->cryptlen < auth_tag_len) || + (req->assoclen != 8 && req->assoclen != 12))) + return -EINVAL; + /* Assuming we are supporting rfc4106 64-bit extended */ + /* sequence numbers We need to have the AAD length */ + /* equal to 8 or 12 bytes */ + + tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); + /* IV below built */ + for (i = 0; i < 4; i++) + *(iv+i) = ctx->nonce[i]; + for (i = 0; i < 8; i++) + *(iv+4+i) = req->iv[i]; + *((__be32 *)(iv+12)) = counter; + + if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { + one_entry_in_sg = 1; + scatterwalk_start(&src_sg_walk, req->src); + scatterwalk_start(&assoc_sg_walk, req->assoc); + src = scatterwalk_map(&src_sg_walk, 0); + assoc = scatterwalk_map(&assoc_sg_walk, 0); + dst = src; + if (unlikely(req->src != req->dst)) { + scatterwalk_start(&dst_sg_walk, req->dst); + dst = scatterwalk_map(&dst_sg_walk, 0); + } + + } else { + /* Allocate memory for src, dst, assoc */ + src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); + if (!src) + return -ENOMEM; + assoc = (src + req->cryptlen + auth_tag_len); + scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); + scatterwalk_map_and_copy(assoc, req->assoc, 0, + req->assoclen, 0); + dst = src; + } + + aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv, + ctx->hash_subkey, assoc, (unsigned long)req->assoclen, + authTag, auth_tag_len); + + /* Compare generated tag with passed in tag. */ + retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ? + -EBADMSG : 0; + + if (one_entry_in_sg) { + if (unlikely(req->src != req->dst)) { + scatterwalk_unmap(dst, 0); + scatterwalk_done(&dst_sg_walk, 0, 0); + } + scatterwalk_unmap(src, 0); + scatterwalk_unmap(assoc, 0); + scatterwalk_done(&src_sg_walk, 0, 0); + scatterwalk_done(&assoc_sg_walk, 0, 0); + } else { + scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); + kfree(src); + } + return retval; +} + +static struct crypto_alg __rfc4106_alg = { + .cra_name = "__gcm-aes-aesni", + .cra_driver_name = "__driver-gcm-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list), + .cra_u = { + .aead = { + .encrypt = __driver_rfc4106_encrypt, + .decrypt = __driver_rfc4106_decrypt, + }, + }, +}; +#endif + static int __init aesni_init(void) { int err; @@ -738,6 +1248,7 @@ static int __init aesni_init(void) printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); return -ENODEV; } + if ((err = crypto_register_alg(&aesni_alg))) goto aes_err; if ((err = crypto_register_alg(&__aesni_alg))) @@ -746,18 +1257,24 @@ static int __init aesni_init(void) goto blk_ecb_err; if ((err = crypto_register_alg(&blk_cbc_alg))) goto blk_cbc_err; - if ((err = crypto_register_alg(&blk_ctr_alg))) - goto blk_ctr_err; if ((err = crypto_register_alg(&ablk_ecb_alg))) goto ablk_ecb_err; if ((err = crypto_register_alg(&ablk_cbc_alg))) goto ablk_cbc_err; +#ifdef CONFIG_X86_64 + if ((err = crypto_register_alg(&blk_ctr_alg))) + goto blk_ctr_err; if ((err = crypto_register_alg(&ablk_ctr_alg))) goto ablk_ctr_err; + if ((err = crypto_register_alg(&__rfc4106_alg))) + goto __aead_gcm_err; + if ((err = crypto_register_alg(&rfc4106_alg))) + goto aead_gcm_err; #ifdef HAS_CTR if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg))) goto ablk_rfc3686_ctr_err; #endif +#endif #ifdef HAS_LRW if ((err = crypto_register_alg(&ablk_lrw_alg))) goto ablk_lrw_err; @@ -770,7 +1287,6 @@ static int __init aesni_init(void) if ((err = crypto_register_alg(&ablk_xts_alg))) goto ablk_xts_err; #endif - return err; #ifdef HAS_XTS @@ -784,18 +1300,24 @@ ablk_pcbc_err: crypto_unregister_alg(&ablk_lrw_alg); ablk_lrw_err: #endif +#ifdef CONFIG_X86_64 #ifdef HAS_CTR crypto_unregister_alg(&ablk_rfc3686_ctr_alg); ablk_rfc3686_ctr_err: #endif + crypto_unregister_alg(&rfc4106_alg); +aead_gcm_err: + crypto_unregister_alg(&__rfc4106_alg); +__aead_gcm_err: crypto_unregister_alg(&ablk_ctr_alg); ablk_ctr_err: + crypto_unregister_alg(&blk_ctr_alg); +blk_ctr_err: +#endif crypto_unregister_alg(&ablk_cbc_alg); ablk_cbc_err: crypto_unregister_alg(&ablk_ecb_alg); ablk_ecb_err: - crypto_unregister_alg(&blk_ctr_alg); -blk_ctr_err: crypto_unregister_alg(&blk_cbc_alg); blk_cbc_err: crypto_unregister_alg(&blk_ecb_alg); @@ -818,13 +1340,17 @@ static void __exit aesni_exit(void) #ifdef HAS_LRW crypto_unregister_alg(&ablk_lrw_alg); #endif +#ifdef CONFIG_X86_64 #ifdef HAS_CTR crypto_unregister_alg(&ablk_rfc3686_ctr_alg); #endif + crypto_unregister_alg(&rfc4106_alg); + crypto_unregister_alg(&__rfc4106_alg); crypto_unregister_alg(&ablk_ctr_alg); + crypto_unregister_alg(&blk_ctr_alg); +#endif crypto_unregister_alg(&ablk_cbc_alg); crypto_unregister_alg(&ablk_ecb_alg); - crypto_unregister_alg(&blk_ctr_alg); crypto_unregister_alg(&blk_cbc_alg); crypto_unregister_alg(&blk_ecb_alg); crypto_unregister_alg(&__aesni_alg); diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 3b62ab56c7a0..5e1a2eef3e7c 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -32,11 +32,7 @@ #define BOOT_HEAP_SIZE 0x400000 #else /* !CONFIG_KERNEL_BZIP2 */ -#ifdef CONFIG_X86_64 -#define BOOT_HEAP_SIZE 0x7000 -#else -#define BOOT_HEAP_SIZE 0x4000 -#endif +#define BOOT_HEAP_SIZE 0x8000 #endif /* !CONFIG_KERNEL_BZIP2 */ diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ba870bb6dd8e..c704b38c57a2 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -10,6 +10,9 @@ #include #include +/* Even though we don't support this, supply it to appease OF */ +static inline void irq_dispose_mapping(unsigned int virq) { } + static inline int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index b36c6b3fe144..8e37deb1eb38 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -15,6 +15,14 @@ struct x86_emulate_ctxt; +struct x86_exception { + u8 vector; + bool error_code_valid; + u16 error_code; + bool nested_page_fault; + u64 address; /* cr2 or nested page fault gpa */ +}; + /* * x86_emulate_ops: * @@ -64,7 +72,8 @@ struct x86_emulate_ops { * @bytes: [IN ] Number of bytes to read from memory. */ int (*read_std)(unsigned long addr, void *val, - unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); + unsigned int bytes, struct kvm_vcpu *vcpu, + struct x86_exception *fault); /* * write_std: Write bytes of standard (non-emulated/special) memory. @@ -74,7 +83,8 @@ struct x86_emulate_ops { * @bytes: [IN ] Number of bytes to write to memory. */ int (*write_std)(unsigned long addr, void *val, - unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); + unsigned int bytes, struct kvm_vcpu *vcpu, + struct x86_exception *fault); /* * fetch: Read bytes of standard (non-emulated/special) memory. * Used for instruction fetch. @@ -83,7 +93,8 @@ struct x86_emulate_ops { * @bytes: [IN ] Number of bytes to read from memory. */ int (*fetch)(unsigned long addr, void *val, - unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); + unsigned int bytes, struct kvm_vcpu *vcpu, + struct x86_exception *fault); /* * read_emulated: Read bytes from emulated/special memory area. @@ -94,7 +105,7 @@ struct x86_emulate_ops { int (*read_emulated)(unsigned long addr, void *val, unsigned int bytes, - unsigned int *error, + struct x86_exception *fault, struct kvm_vcpu *vcpu); /* @@ -107,7 +118,7 @@ struct x86_emulate_ops { int (*write_emulated)(unsigned long addr, const void *val, unsigned int bytes, - unsigned int *error, + struct x86_exception *fault, struct kvm_vcpu *vcpu); /* @@ -122,7 +133,7 @@ struct x86_emulate_ops { const void *old, const void *new, unsigned int bytes, - unsigned int *error, + struct x86_exception *fault, struct kvm_vcpu *vcpu); int (*pio_in_emulated)(int size, unsigned short port, void *val, @@ -159,7 +170,10 @@ struct operand { }; union { unsigned long *reg; - unsigned long mem; + struct segmented_address { + ulong ea; + unsigned seg; + } mem; } addr; union { unsigned long val; @@ -226,9 +240,8 @@ struct x86_emulate_ctxt { bool perm_ok; /* do not check permissions if true */ - int exception; /* exception that happens during emulation or -1 */ - u32 error_code; /* error code for exception */ - bool error_code_valid; + bool have_exception; + struct x86_exception exception; /* decode cache */ struct decode_cache decode; @@ -252,7 +265,7 @@ struct x86_emulate_ctxt { #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 #endif -int x86_decode_insn(struct x86_emulate_ctxt *ctxt); +int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); #define EMULATION_FAILED -1 #define EMULATION_OK 0 #define EMULATION_RESTART 1 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f702f82aa1eb..ffd7f8d29187 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -83,11 +83,14 @@ #define KVM_NR_FIXED_MTRR_REGION 88 #define KVM_NR_VAR_MTRR 8 +#define ASYNC_PF_PER_VCPU 64 + extern spinlock_t kvm_lock; extern struct list_head vm_list; struct kvm_vcpu; struct kvm; +struct kvm_async_pf; enum kvm_reg { VCPU_REGS_RAX = 0, @@ -114,6 +117,7 @@ enum kvm_reg { enum kvm_reg_ex { VCPU_EXREG_PDPTR = NR_VCPU_REGS, + VCPU_EXREG_CR3, }; enum { @@ -238,16 +242,18 @@ struct kvm_mmu { void (*new_cr3)(struct kvm_vcpu *vcpu); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); - int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); - void (*inject_page_fault)(struct kvm_vcpu *vcpu); + int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, + bool prefault); + void (*inject_page_fault)(struct kvm_vcpu *vcpu, + struct x86_exception *fault); void (*free)(struct kvm_vcpu *vcpu); gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, - u32 *error); + struct x86_exception *exception); gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); void (*prefetch_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page); int (*sync_page)(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, bool clear_unsync); + struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); hpa_t root_hpa; int root_level; @@ -315,16 +321,6 @@ struct kvm_vcpu_arch { */ struct kvm_mmu *walk_mmu; - /* - * This struct is filled with the necessary information to propagate a - * page fault into the guest - */ - struct { - u64 address; - unsigned error_code; - bool nested; - } fault; - /* only needed in kvm_pv_mmu_op() path, but it's hot so * put it here to avoid allocation */ struct kvm_pv_mmu_op_buffer mmu_op_buffer; @@ -412,6 +408,15 @@ struct kvm_vcpu_arch { u64 hv_vapic; cpumask_var_t wbinvd_dirty_mask; + + struct { + bool halted; + gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; + struct gfn_to_hva_cache data; + u64 msr_val; + u32 id; + bool send_user_only; + } apf; }; struct kvm_arch { @@ -456,6 +461,10 @@ struct kvm_arch { /* fields used by HYPER-V emulation */ u64 hv_guest_os_id; u64 hv_hypercall; + + #ifdef CONFIG_KVM_MMU_AUDIT + int audit_point; + #endif }; struct kvm_vm_stat { @@ -529,6 +538,7 @@ struct kvm_x86_ops { struct kvm_segment *var, int seg); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); + void (*decache_cr3)(struct kvm_vcpu *vcpu); void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); @@ -582,9 +592,17 @@ struct kvm_x86_ops { void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); + void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); const struct trace_print_flags *exit_reasons_str; }; +struct kvm_arch_async_pf { + u32 token; + gfn_t gfn; + unsigned long cr3; + bool direct_map; +}; + extern struct kvm_x86_ops *kvm_x86_ops; int kvm_mmu_module_init(void); @@ -594,7 +612,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); -void kvm_mmu_set_base_ptes(u64 base_pte); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask); @@ -623,8 +640,15 @@ enum emulation_result { #define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_SKIP (1 << 2) -int emulate_instruction(struct kvm_vcpu *vcpu, - unsigned long cr2, u16 error_code, int emulation_type); +int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, + int emulation_type, void *insn, int insn_len); + +static inline int emulate_instruction(struct kvm_vcpu *vcpu, + int emulation_type) +{ + return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); +} + void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); @@ -650,7 +674,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); -void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); +int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); @@ -668,11 +692,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); -void kvm_inject_page_fault(struct kvm_vcpu *vcpu); +void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t gfn, void *data, int offset, int len, u32 access); -void kvm_propagate_fault(struct kvm_vcpu *vcpu); +void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); int kvm_pic_set_irq(void *opaque, int irq, int level); @@ -690,16 +714,21 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); -gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); -gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); -gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); -gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); +gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_fix_hypercall(struct kvm_vcpu *vcpu); -int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, + void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); void kvm_enable_tdp(void); @@ -766,20 +795,25 @@ enum { #define HF_VINTR_MASK (1 << 2) #define HF_NMI_MASK (1 << 3) #define HF_IRET_MASK (1 << 4) +#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ /* * Hardware virtualization extension instructions may fault if a * reboot turns off virtualization while processes are running. * Trap the fault and ignore the instruction if that happens. */ -asmlinkage void kvm_handle_fault_on_reboot(void); +asmlinkage void kvm_spurious_fault(void); +extern bool kvm_rebooting; #define __kvm_handle_fault_on_reboot(insn) \ "666: " insn "\n\t" \ + "668: \n\t" \ ".pushsection .fixup, \"ax\" \n" \ "667: \n\t" \ + "cmpb $0, kvm_rebooting \n\t" \ + "jne 668b \n\t" \ __ASM_SIZE(push) " $666b \n\t" \ - "jmp kvm_handle_fault_on_reboot \n\t" \ + "call kvm_spurious_fault \n\t" \ ".popsection \n\t" \ ".pushsection __ex_table, \"a\" \n\t" \ _ASM_PTR " 666b, 667b \n\t" \ @@ -788,6 +822,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void); #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_age_hva(struct kvm *kvm, unsigned long hva); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); @@ -799,4 +834,15 @@ void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); +void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); +void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); +extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); + +void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 7b562b6184bc..a427bf77a93d 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -20,6 +20,7 @@ * are available. The use of 0x11 and 0x12 is deprecated */ #define KVM_FEATURE_CLOCKSOURCE2 3 +#define KVM_FEATURE_ASYNC_PF 4 /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. @@ -32,9 +33,13 @@ /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 +#define MSR_KVM_ASYNC_PF_EN 0x4b564d02 #define KVM_MAX_MMU_OP_BATCH 32 +#define KVM_ASYNC_PF_ENABLED (1 << 0) +#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1) + /* Operations for KVM_HC_MMU_OP */ #define KVM_MMU_OP_WRITE_PTE 1 #define KVM_MMU_OP_FLUSH_TLB 2 @@ -61,10 +66,20 @@ struct kvm_mmu_op_release_pt { __u64 pt_phys; }; +#define KVM_PV_REASON_PAGE_NOT_PRESENT 1 +#define KVM_PV_REASON_PAGE_READY 2 + +struct kvm_vcpu_pv_apf_data { + __u32 reason; + __u8 pad[60]; + __u32 enabled; +}; + #ifdef __KERNEL__ #include extern void kvmclock_init(void); +extern int kvm_register_clock(char *txt); /* This instruction is vmcall. On non-VT architectures, it will generate a @@ -160,8 +175,17 @@ static inline unsigned int kvm_arch_para_features(void) #ifdef CONFIG_KVM_GUEST void __init kvm_guest_init(void); +void kvm_async_pf_task_wait(u32 token); +void kvm_async_pf_task_wake(u32 token); +u32 kvm_read_and_reset_pf_reason(void); #else #define kvm_guest_init() do { } while (0) +#define kvm_async_pf_task_wait(T) do {} while(0) +#define kvm_async_pf_task_wake(T) do {} while(0) +static inline u32 kvm_read_and_reset_pf_reason(void) +{ + return 0; +} #endif #endif /* __KERNEL__ */ diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h index 42a978c0c1b3..f482010350fb 100644 --- a/arch/x86/include/asm/olpc.h +++ b/arch/x86/include/asm/olpc.h @@ -107,10 +107,14 @@ extern int olpc_ec_mask_unset(uint8_t bits); /* GPIO assignments */ #define OLPC_GPIO_MIC_AC 1 -#define OLPC_GPIO_DCON_IRQ geode_gpio(7) +#define OLPC_GPIO_DCON_STAT0 5 +#define OLPC_GPIO_DCON_STAT1 6 +#define OLPC_GPIO_DCON_IRQ 7 #define OLPC_GPIO_THRM_ALRM geode_gpio(10) -#define OLPC_GPIO_SMB_CLK geode_gpio(14) -#define OLPC_GPIO_SMB_DATA geode_gpio(15) +#define OLPC_GPIO_DCON_LOAD 11 +#define OLPC_GPIO_DCON_BLANK 12 +#define OLPC_GPIO_SMB_CLK 14 +#define OLPC_GPIO_SMB_DATA 15 #define OLPC_GPIO_WORKAUX geode_gpio(24) #define OLPC_GPIO_LID geode_gpio(26) #define OLPC_GPIO_ECSCI geode_gpio(27) diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h index 2a8478140bb3..641988efe063 100644 --- a/arch/x86/include/asm/olpc_ofw.h +++ b/arch/x86/include/asm/olpc_ofw.h @@ -8,6 +8,8 @@ #ifdef CONFIG_OLPC_OPENFIRMWARE +extern bool olpc_ofw_is_installed(void); + /* run an OFW command by calling into the firmware */ #define olpc_ofw(name, args, res) \ __olpc_ofw((name), ARRAY_SIZE(args), args, ARRAY_SIZE(res), res) @@ -26,10 +28,17 @@ extern bool olpc_ofw_present(void); #else /* !CONFIG_OLPC_OPENFIRMWARE */ +static inline bool olpc_ofw_is_installed(void) { return false; } static inline void olpc_ofw_detect(void) { } static inline void setup_olpc_ofw_pgd(void) { } static inline bool olpc_ofw_present(void) { return false; } #endif /* !CONFIG_OLPC_OPENFIRMWARE */ +#ifdef CONFIG_OLPC_OPENFIRMWARE_DT +extern void olpc_dt_build_devicetree(void); +#else +static inline void olpc_dt_build_devicetree(void) { } +#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */ + #endif /* _ASM_X86_OLPC_OFW_H */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 7709c12431b8..2071a8b2b32f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -435,6 +435,11 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr, { PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); } +static inline void pmd_update(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp); +} static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -442,6 +447,12 @@ static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); } +static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp); +} + static inline pte_t __pte(pteval_t val) { pteval_t ret; @@ -543,6 +554,20 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ +#if PAGETABLE_LEVELS >= 3 + if (sizeof(pmdval_t) > sizeof(long)) + /* 5 arg words */ + pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); + else + PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd); +#endif +} +#endif + static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { pmdval_t val = native_pmd_val(pmd); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index b82bac975250..82885099c869 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -265,10 +265,16 @@ struct pv_mmu_ops { void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); + void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval); void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); void (*pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (*pmd_update)(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp); + void (*pmd_update_defer)(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp); pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 271de94c3810..b4389a468fb6 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -92,7 +92,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, - unsigned long adddress) + unsigned long address) { ___pmd_free_tlb(tlb, pmd); } diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 2334982b339e..98391db840c6 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -46,6 +46,15 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) #endif +#ifdef CONFIG_SMP +static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) +{ + return __pmd(xchg((pmdval_t *)xp, 0)); +} +#else +#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) +#endif + /* * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, * split up the 29 bits of offset into this range: diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 177b0165ea01..94b979d1b58d 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -104,6 +104,29 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) #endif +#ifdef CONFIG_SMP +union split_pmd { + struct { + u32 pmd_low; + u32 pmd_high; + }; + pmd_t pmd; +}; +static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) +{ + union split_pmd res, *orig = (union split_pmd *)pmdp; + + /* xchg acts as a barrier before setting of the high bits */ + res.pmd_low = xchg(&orig->pmd_low, 0); + res.pmd_high = orig->pmd_high; + orig->pmd_high = 0; + + return res.pmd; +} +#else +#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) +#endif + /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index ada823a13c7c..18601c86fab1 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -35,6 +35,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); #else /* !CONFIG_PARAVIRT */ #define set_pte(ptep, pte) native_set_pte(ptep, pte) #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) +#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) #define set_pte_atomic(ptep, pte) \ native_set_pte_atomic(ptep, pte) @@ -59,6 +60,8 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); #define pte_update(mm, addr, ptep) do { } while (0) #define pte_update_defer(mm, addr, ptep) do { } while (0) +#define pmd_update(mm, addr, ptep) do { } while (0) +#define pmd_update_defer(mm, addr, ptep) do { } while (0) #define pgd_val(x) native_pgd_val(x) #define __pgd(x) native_make_pgd(x) @@ -94,6 +97,11 @@ static inline int pte_young(pte_t pte) return pte_flags(pte) & _PAGE_ACCESSED; } +static inline int pmd_young(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_ACCESSED; +} + static inline int pte_write(pte_t pte) { return pte_flags(pte) & _PAGE_RW; @@ -142,6 +150,23 @@ static inline int pmd_large(pmd_t pte) (_PAGE_PSE | _PAGE_PRESENT); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return pmd_val(pmd) & _PAGE_SPLITTING; +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_val(pmd) & _PAGE_PSE; +} + +static inline int has_transparent_hugepage(void) +{ + return cpu_has_pse; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + static inline pte_t pte_set_flags(pte_t pte, pteval_t set) { pteval_t v = native_pte_val(pte); @@ -216,6 +241,55 @@ static inline pte_t pte_mkspecial(pte_t pte) return pte_set_flags(pte, _PAGE_SPECIAL); } +static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) +{ + pmdval_t v = native_pmd_val(pmd); + + return __pmd(v | set); +} + +static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) +{ + pmdval_t v = native_pmd_val(pmd); + + return __pmd(v & ~clear); +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + return pmd_clear_flags(pmd, _PAGE_ACCESSED); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + return pmd_clear_flags(pmd, _PAGE_RW); +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_DIRTY); +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_PSE); +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_ACCESSED); +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_RW); +} + +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + return pmd_clear_flags(pmd, _PAGE_PRESENT); +} + /* * Mask out unsupported bits in a present pgprot. Non-present pgprots * can use those bits for other purposes, so leave them be. @@ -256,6 +330,16 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return __pte(val); } +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmdval_t val = pmd_val(pmd); + + val &= _HPAGE_CHG_MASK; + val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; + + return __pmd(val); +} + /* mprotect needs to preserve PAT bits when updating vm_page_prot */ #define pgprot_modify pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) @@ -350,7 +434,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) * Currently stuck as a macro due to indirect forward reference to * linux/mmzone.h's __section_mem_map_addr() definition: */ -#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) +#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] @@ -524,12 +608,26 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) return res; } +static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) +{ + pmd_t res = *pmdp; + + native_pmd_clear(pmdp); + return res; +} + static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep , pte_t pte) { native_set_pte(ptep, pte); } +static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp , pmd_t pmd) +{ + native_set_pmd(pmdp, pmd); +} + #ifndef CONFIG_PARAVIRT /* * Rules for using pte_update - it must be called after any PTE update which @@ -607,6 +705,49 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, #define flush_tlb_fix_spurious_fault(vma, address) +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMD_WRITE +static inline int pmd_write(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_RW; +} + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + pmd_t pmd = native_pmdp_get_and_clear(pmdp); + pmd_update(mm, addr, pmdp); + return pmd; +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); + pmd_update(mm, addr, pmdp); +} + /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index f86da20347f2..975f709e09ae 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -59,6 +59,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) native_set_pte(ptep, pte); } +static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} + +static inline void native_pmd_clear(pmd_t *pmd) +{ + native_set_pmd(pmd, native_make_pmd(0)); +} + static inline pte_t native_ptep_get_and_clear(pte_t *xp) { #ifdef CONFIG_SMP @@ -72,14 +82,17 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #endif } -static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) +static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) { - *pmdp = pmd; -} - -static inline void native_pmd_clear(pmd_t *pmd) -{ - native_set_pmd(pmd, native_make_pmd(0)); +#ifdef CONFIG_SMP + return native_make_pmd(xchg(&xp->pmd, 0)); +#else + /* native_local_pmdp_get_and_clear, + but duplicated because of cyclic dependency */ + pmd_t ret = *xp; + native_pmd_clear(xp); + return ret; +#endif } static inline void native_set_pud(pud_t *pudp, pud_t pud) @@ -168,6 +181,7 @@ extern void cleanup_highmap(void); #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) #define __HAVE_ARCH_PTE_SAME + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_64_H */ diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index d1f4a760be23..7db7723d1f32 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -22,6 +22,7 @@ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ /* If _PAGE_BIT_PRESENT is clear, we use these: */ @@ -45,6 +46,7 @@ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) +#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING) #define __HAVE_ARCH_PTE_SPECIAL #ifdef CONFIG_KMEMCHECK @@ -70,6 +72,7 @@ /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) #define _PAGE_CACHE_WB (0) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c6efecf85a6a..45636cefa186 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -761,10 +761,11 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void init_c1e_mask(void); extern unsigned long boot_option_idle_override; -extern unsigned long idle_halt; -extern unsigned long idle_nomwait; extern bool c1e_detected; +enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, + IDLE_POLL, IDLE_FORCE_MWAIT}; + extern void enable_sep_cpu(void); extern int sysenter_setup(void); @@ -901,7 +902,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. * This is necessary to guarantee that the entire "struct pt_regs" - * is accessable even if the CPU haven't stored the SS/ESP registers + * is accessible even if the CPU haven't stored the SS/ESP registers * on the stack (interrupt gate does not save these registers * when switching to the same priv ring). * Therefore beware: accessing the ss/esp fields of the diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h new file mode 100644 index 000000000000..b4ec95f07518 --- /dev/null +++ b/arch/x86/include/asm/prom.h @@ -0,0 +1 @@ +/* dummy prom.h; here to make linux/of.h's #includes happy */ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 0e831059ac5a..f2b83bc7d784 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -47,14 +47,13 @@ enum { INTERCEPT_MONITOR, INTERCEPT_MWAIT, INTERCEPT_MWAIT_COND, + INTERCEPT_XSETBV, }; struct __attribute__ ((__packed__)) vmcb_control_area { - u16 intercept_cr_read; - u16 intercept_cr_write; - u16 intercept_dr_read; - u16 intercept_dr_write; + u32 intercept_cr; + u32 intercept_dr; u32 intercept_exceptions; u64 intercept; u8 reserved_1[42]; @@ -81,14 +80,19 @@ struct __attribute__ ((__packed__)) vmcb_control_area { u32 event_inj_err; u64 nested_cr3; u64 lbr_ctl; - u64 reserved_5; + u32 clean; + u32 reserved_5; u64 next_rip; - u8 reserved_6[816]; + u8 insn_len; + u8 insn_bytes[15]; + u8 reserved_6[800]; }; #define TLB_CONTROL_DO_NOTHING 0 #define TLB_CONTROL_FLUSH_ALL_ASID 1 +#define TLB_CONTROL_FLUSH_ASID 3 +#define TLB_CONTROL_FLUSH_ASID_LOCAL 7 #define V_TPR_MASK 0x0f @@ -204,19 +208,31 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK #define SVM_SELECTOR_CODE_MASK (1 << 3) -#define INTERCEPT_CR0_MASK 1 -#define INTERCEPT_CR3_MASK (1 << 3) -#define INTERCEPT_CR4_MASK (1 << 4) -#define INTERCEPT_CR8_MASK (1 << 8) - -#define INTERCEPT_DR0_MASK 1 -#define INTERCEPT_DR1_MASK (1 << 1) -#define INTERCEPT_DR2_MASK (1 << 2) -#define INTERCEPT_DR3_MASK (1 << 3) -#define INTERCEPT_DR4_MASK (1 << 4) -#define INTERCEPT_DR5_MASK (1 << 5) -#define INTERCEPT_DR6_MASK (1 << 6) -#define INTERCEPT_DR7_MASK (1 << 7) +#define INTERCEPT_CR0_READ 0 +#define INTERCEPT_CR3_READ 3 +#define INTERCEPT_CR4_READ 4 +#define INTERCEPT_CR8_READ 8 +#define INTERCEPT_CR0_WRITE (16 + 0) +#define INTERCEPT_CR3_WRITE (16 + 3) +#define INTERCEPT_CR4_WRITE (16 + 4) +#define INTERCEPT_CR8_WRITE (16 + 8) + +#define INTERCEPT_DR0_READ 0 +#define INTERCEPT_DR1_READ 1 +#define INTERCEPT_DR2_READ 2 +#define INTERCEPT_DR3_READ 3 +#define INTERCEPT_DR4_READ 4 +#define INTERCEPT_DR5_READ 5 +#define INTERCEPT_DR6_READ 6 +#define INTERCEPT_DR7_READ 7 +#define INTERCEPT_DR0_WRITE (16 + 0) +#define INTERCEPT_DR1_WRITE (16 + 1) +#define INTERCEPT_DR2_WRITE (16 + 2) +#define INTERCEPT_DR3_WRITE (16 + 3) +#define INTERCEPT_DR4_WRITE (16 + 4) +#define INTERCEPT_DR5_WRITE (16 + 5) +#define INTERCEPT_DR6_WRITE (16 + 6) +#define INTERCEPT_DR7_WRITE (16 + 7) #define SVM_EVTINJ_VEC_MASK 0xff @@ -246,6 +262,8 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44 +#define SVM_EXITINFO_REG_MASK 0x0F + #define SVM_EXIT_READ_CR0 0x000 #define SVM_EXIT_READ_CR3 0x003 #define SVM_EXIT_READ_CR4 0x004 @@ -316,6 +334,7 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_EXIT_MONITOR 0x08a #define SVM_EXIT_MWAIT 0x08b #define SVM_EXIT_MWAIT_COND 0x08c +#define SVM_EXIT_XSETBV 0x08d #define SVM_EXIT_NPF 0x400 #define SVM_EXIT_ERR -1 diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index f66cda56781d..0310da67307f 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -30,6 +30,7 @@ asmlinkage void segment_not_present(void); asmlinkage void stack_segment(void); asmlinkage void general_protection(void); asmlinkage void page_fault(void); +asmlinkage void async_page_fault(void); asmlinkage void spurious_interrupt_bug(void); asmlinkage void coprocessor_error(void); asmlinkage void alignment_check(void); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 9f0cbd987d50..84471b810460 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -66,15 +66,23 @@ #define PIN_BASED_NMI_EXITING 0x00000008 #define PIN_BASED_VIRTUAL_NMIS 0x00000020 +#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000002 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 +#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 #define VM_EXIT_SAVE_IA32_PAT 0x00040000 #define VM_EXIT_LOAD_IA32_PAT 0x00080000 +#define VM_EXIT_SAVE_IA32_EFER 0x00100000 +#define VM_EXIT_LOAD_IA32_EFER 0x00200000 +#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 +#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000002 #define VM_ENTRY_IA32E_MODE 0x00000200 #define VM_ENTRY_SMM 0x00000400 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 +#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 +#define VM_ENTRY_LOAD_IA32_EFER 0x00008000 /* VMCS Encodings */ enum vmcs_field { @@ -239,6 +247,7 @@ enum vmcs_field { #define EXIT_REASON_TASK_SWITCH 9 #define EXIT_REASON_CPUID 10 #define EXIT_REASON_HLT 12 +#define EXIT_REASON_INVD 13 #define EXIT_REASON_INVLPG 14 #define EXIT_REASON_RDPMC 15 #define EXIT_REASON_RDTSC 16 @@ -296,6 +305,12 @@ enum vmcs_field { #define GUEST_INTR_STATE_SMI 0x00000004 #define GUEST_INTR_STATE_NMI 0x00000008 +/* GUEST_ACTIVITY_STATE flags */ +#define GUEST_ACTIVITY_ACTIVE 0 +#define GUEST_ACTIVITY_HLT 1 +#define GUEST_ACTIVITY_SHUTDOWN 2 +#define GUEST_ACTIVITY_WAIT_SIPI 3 + /* * Exit Qualifications for MOV for Control Register Access */ diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 8760cc60a21c..f25bdf238a33 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -42,6 +42,11 @@ extern unsigned int machine_to_phys_order; extern unsigned long get_phys_to_machine(unsigned long pfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); +extern int m2p_add_override(unsigned long mfn, struct page *page); +extern int m2p_remove_override(struct page *page); +extern struct page *m2p_find_override(unsigned long mfn); +extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); + static inline unsigned long pfn_to_mfn(unsigned long pfn) { unsigned long mfn; @@ -72,9 +77,6 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; - if (unlikely((mfn >> machine_to_phys_order) != 0)) - return ~0; - pfn = 0; /* * The array access can fail (e.g., device space beyond end of RAM). @@ -83,6 +85,14 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) */ __get_user(pfn, &machine_to_phys_mapping[mfn]); + /* + * If this appears to be a foreign mfn (because the pfn + * doesn't map back to the mfn), then check the local override + * table to see if there's a better pfn to use. + */ + if (get_phys_to_machine(pfn) != mfn) + pfn = m2p_find_override_pfn(mfn, pfn); + return pfn; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ec881c6bfee0..b3a71137983a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -509,6 +509,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) return 0; } +EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) { diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index d2fdb0826df2..57ca77787220 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1086,7 +1086,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, dma_dom->aperture_size += APERTURE_RANGE_SIZE; - /* Intialize the exclusion range if necessary */ + /* Initialize the exclusion range if necessary */ for_each_iommu(iommu) { if (iommu->exclusion_start && iommu->exclusion_start >= dma_dom->aperture[index]->offset @@ -1353,7 +1353,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) /* * Allocates a new protection domain usable for the dma_ops functions. - * It also intializes the page table and the address allocator data + * It also initializes the page table and the address allocator data * structures required for the dma_ops interface */ static struct dma_ops_domain *dma_ops_domain_alloc(void) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index d6fb146c0d8b..df20723a6a1b 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -234,6 +234,7 @@ unsigned __kprobes long oops_begin(void) bust_spinlocks(1); return flags; } +EXPORT_SYMBOL_GPL(oops_begin); void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) { diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0c2b7ef7a34d..294f26da0c0c 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 591e60104278..c8b4efad7ebb 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1406,6 +1406,16 @@ ENTRY(general_protection) CFI_ENDPROC END(general_protection) +#ifdef CONFIG_KVM_GUEST +ENTRY(async_page_fault) + RING0_EC_FRAME + pushl $do_async_page_fault + CFI_ADJUST_CFA_OFFSET 4 + jmp error_code + CFI_ENDPROC +END(apf_page_fault) +#endif + /* * End of kprobes section */ diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index d3b895f375d3..aed1ffbeb0c9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1329,6 +1329,9 @@ errorentry xen_stack_segment do_stack_segment #endif errorentry general_protection do_general_protection errorentry page_fault do_page_fault +#ifdef CONFIG_KVM_GUEST +errorentry async_page_fault do_async_page_fault +#endif #ifdef CONFIG_X86_MCE paranoidzeroentry machine_check *machine_check_vector(%rip) #endif diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 9f54b209c378..fc293dc8dc35 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -126,7 +126,7 @@ ENTRY(startup_32) movsl movl pa(boot_params) + NEW_CL_POINTER,%esi andl %esi,%esi - jz 1f # No comand line + jz 1f # No command line movl $pa(boot_command_line),%edi movl $(COMMAND_LINE_SIZE/4),%ecx rep diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 58bb239a2fd7..e60c38cc0eed 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -169,6 +169,7 @@ int init_fpu(struct task_struct *tsk) set_stopped_child_used_math(tsk); return 0; } +EXPORT_SYMBOL_GPL(init_fpu); /* * The xstateregs_active() routine is the same as the fpregs_active() routine, diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3a43caa3beb7..52945da52a94 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -275,6 +276,15 @@ void smp_x86_platform_ipi(struct pt_regs *regs) EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); +#ifdef CONFIG_OF +unsigned int irq_create_of_mapping(struct device_node *controller, + const u32 *intspec, unsigned int intsize) +{ + return intspec[0]; +} +EXPORT_SYMBOL_GPL(irq_create_of_mapping); +#endif + #ifdef CONFIG_HOTPLUG_CPU /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 63b0ec8d3d4a..8dc44662394b 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -27,16 +27,37 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #define MMU_QUEUE_SIZE 1024 +static int kvmapf = 1; + +static int parse_no_kvmapf(char *arg) +{ + kvmapf = 0; + return 0; +} + +early_param("no-kvmapf", parse_no_kvmapf); + struct kvm_para_state { u8 mmu_queue[MMU_QUEUE_SIZE]; int mmu_queue_len; }; static DEFINE_PER_CPU(struct kvm_para_state, para_state); +static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); static struct kvm_para_state *kvm_para_state(void) { @@ -50,6 +71,195 @@ static void kvm_io_delay(void) { } +#define KVM_TASK_SLEEP_HASHBITS 8 +#define KVM_TASK_SLEEP_HASHSIZE (1<list) { + struct kvm_task_sleep_node *n = + hlist_entry(p, typeof(*n), link); + if (n->token == token) + return n; + } + + return NULL; +} + +void kvm_async_pf_task_wait(u32 token) +{ + u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); + struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; + struct kvm_task_sleep_node n, *e; + DEFINE_WAIT(wait); + int cpu, idle; + + cpu = get_cpu(); + idle = idle_cpu(cpu); + put_cpu(); + + spin_lock(&b->lock); + e = _find_apf_task(b, token); + if (e) { + /* dummy entry exist -> wake up was delivered ahead of PF */ + hlist_del(&e->link); + kfree(e); + spin_unlock(&b->lock); + return; + } + + n.token = token; + n.cpu = smp_processor_id(); + n.mm = current->active_mm; + n.halted = idle || preempt_count() > 1; + atomic_inc(&n.mm->mm_count); + init_waitqueue_head(&n.wq); + hlist_add_head(&n.link, &b->list); + spin_unlock(&b->lock); + + for (;;) { + if (!n.halted) + prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); + if (hlist_unhashed(&n.link)) + break; + + if (!n.halted) { + local_irq_enable(); + schedule(); + local_irq_disable(); + } else { + /* + * We cannot reschedule. So halt. + */ + native_safe_halt(); + local_irq_disable(); + } + } + if (!n.halted) + finish_wait(&n.wq, &wait); + + return; +} +EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); + +static void apf_task_wake_one(struct kvm_task_sleep_node *n) +{ + hlist_del_init(&n->link); + if (!n->mm) + return; + mmdrop(n->mm); + if (n->halted) + smp_send_reschedule(n->cpu); + else if (waitqueue_active(&n->wq)) + wake_up(&n->wq); +} + +static void apf_task_wake_all(void) +{ + int i; + + for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { + struct hlist_node *p, *next; + struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; + spin_lock(&b->lock); + hlist_for_each_safe(p, next, &b->list) { + struct kvm_task_sleep_node *n = + hlist_entry(p, typeof(*n), link); + if (n->cpu == smp_processor_id()) + apf_task_wake_one(n); + } + spin_unlock(&b->lock); + } +} + +void kvm_async_pf_task_wake(u32 token) +{ + u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); + struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; + struct kvm_task_sleep_node *n; + + if (token == ~0) { + apf_task_wake_all(); + return; + } + +again: + spin_lock(&b->lock); + n = _find_apf_task(b, token); + if (!n) { + /* + * async PF was not yet handled. + * Add dummy entry for the token. + */ + n = kmalloc(sizeof(*n), GFP_ATOMIC); + if (!n) { + /* + * Allocation failed! Busy wait while other cpu + * handles async PF. + */ + spin_unlock(&b->lock); + cpu_relax(); + goto again; + } + n->token = token; + n->cpu = smp_processor_id(); + n->mm = NULL; + init_waitqueue_head(&n->wq); + hlist_add_head(&n->link, &b->list); + } else + apf_task_wake_one(n); + spin_unlock(&b->lock); + return; +} +EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); + +u32 kvm_read_and_reset_pf_reason(void) +{ + u32 reason = 0; + + if (__get_cpu_var(apf_reason).enabled) { + reason = __get_cpu_var(apf_reason).reason; + __get_cpu_var(apf_reason).reason = 0; + } + + return reason; +} +EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); + +dotraplinkage void __kprobes +do_async_page_fault(struct pt_regs *regs, unsigned long error_code) +{ + switch (kvm_read_and_reset_pf_reason()) { + default: + do_page_fault(regs, error_code); + break; + case KVM_PV_REASON_PAGE_NOT_PRESENT: + /* page is swapped out by the host. */ + kvm_async_pf_task_wait((u32)read_cr2()); + break; + case KVM_PV_REASON_PAGE_READY: + kvm_async_pf_task_wake((u32)read_cr2()); + break; + } +} + static void kvm_mmu_op(void *buffer, unsigned len) { int r; @@ -231,10 +441,117 @@ static void __init paravirt_ops_setup(void) #endif } +void __cpuinit kvm_guest_cpu_init(void) +{ + if (!kvm_para_available()) + return; + + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { + u64 pa = __pa(&__get_cpu_var(apf_reason)); + +#ifdef CONFIG_PREEMPT + pa |= KVM_ASYNC_PF_SEND_ALWAYS; +#endif + wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); + __get_cpu_var(apf_reason).enabled = 1; + printk(KERN_INFO"KVM setup async PF for cpu %d\n", + smp_processor_id()); + } +} + +static void kvm_pv_disable_apf(void *unused) +{ + if (!__get_cpu_var(apf_reason).enabled) + return; + + wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); + __get_cpu_var(apf_reason).enabled = 0; + + printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", + smp_processor_id()); +} + +static int kvm_pv_reboot_notify(struct notifier_block *nb, + unsigned long code, void *unused) +{ + if (code == SYS_RESTART) + on_each_cpu(kvm_pv_disable_apf, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block kvm_pv_reboot_nb = { + .notifier_call = kvm_pv_reboot_notify, +}; + +#ifdef CONFIG_SMP +static void __init kvm_smp_prepare_boot_cpu(void) +{ +#ifdef CONFIG_KVM_CLOCK + WARN_ON(kvm_register_clock("primary cpu clock")); +#endif + kvm_guest_cpu_init(); + native_smp_prepare_boot_cpu(); +} + +static void kvm_guest_cpu_online(void *dummy) +{ + kvm_guest_cpu_init(); +} + +static void kvm_guest_cpu_offline(void *dummy) +{ + kvm_pv_disable_apf(NULL); + apf_task_wake_all(); +} + +static int __cpuinit kvm_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int cpu = (unsigned long)hcpu; + switch (action) { + case CPU_ONLINE: + case CPU_DOWN_FAILED: + case CPU_ONLINE_FROZEN: + smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata kvm_cpu_notifier = { + .notifier_call = kvm_cpu_notify, +}; +#endif + +static void __init kvm_apf_trap_init(void) +{ + set_intr_gate(14, &async_page_fault); +} + void __init kvm_guest_init(void) { + int i; + if (!kvm_para_available()) return; paravirt_ops_setup(); + register_reboot_notifier(&kvm_pv_reboot_nb); + for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) + spin_lock_init(&async_pf_sleepers[i].lock); + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) + x86_init.irqs.trap_init = kvm_apf_trap_init; + +#ifdef CONFIG_SMP + smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; + register_cpu_notifier(&kvm_cpu_notifier); +#else + kvm_guest_cpu_init(); +#endif } diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index ca43ce31a19c..f98d3eafe07a 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -125,7 +125,7 @@ static struct clocksource kvm_clock = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static int kvm_register_clock(char *txt) +int kvm_register_clock(char *txt) { int cpu = smp_processor_id(); int low, high, ret; @@ -152,14 +152,6 @@ static void __cpuinit kvm_setup_secondary_clock(void) } #endif -#ifdef CONFIG_SMP -static void __init kvm_smp_prepare_boot_cpu(void) -{ - WARN_ON(kvm_register_clock("primary cpu clock")); - native_smp_prepare_boot_cpu(); -} -#endif - /* * After the clock is registered, the host will keep writing to the * registered memory location. If the guest happens to shutdown, this memory @@ -205,9 +197,6 @@ void __init kvmclock_init(void) #ifdef CONFIG_X86_LOCAL_APIC x86_cpuinit.setup_percpu_clockev = kvm_setup_secondary_clock; -#endif -#ifdef CONFIG_SMP - smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; #endif machine_ops.shutdown = kvm_shutdown; #ifdef CONFIG_KEXEC diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 8f2956091735..ab23f1ad4bf1 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -37,20 +37,11 @@ void *module_alloc(unsigned long size) { - struct vm_struct *area; - - if (!size) - return NULL; - size = PAGE_ALIGN(size); - if (size > MODULES_LEN) + if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; - - area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); - if (!area) - return NULL; - - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM, - PAGE_KERNEL_EXEC); + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, + -1, __builtin_return_address(0)); } /* Free memory returned from module_alloc */ diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index c5b250011fd4..869e1aeeb71b 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -421,8 +421,11 @@ struct pv_mmu_ops pv_mmu_ops = { .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, + .set_pmd_at = native_set_pmd_at, .pte_update = paravirt_nop, .pte_update_defer = paravirt_nop, + .pmd_update = paravirt_nop, + .pmd_update_defer = paravirt_nop, .ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_commit = __ptep_modify_prot_commit, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 09c08a1c706f..d8286ed54ffa 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -22,11 +22,6 @@ #include #include -unsigned long idle_halt; -EXPORT_SYMBOL(idle_halt); -unsigned long idle_nomwait; -EXPORT_SYMBOL(idle_nomwait); - struct kmem_cache *task_xstate_cachep; EXPORT_SYMBOL_GPL(task_xstate_cachep); @@ -327,7 +322,7 @@ long sys_execve(const char __user *name, /* * Idle related variables and functions */ -unsigned long boot_option_idle_override = 0; +unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); /* @@ -386,6 +381,8 @@ void default_idle(void) else local_irq_enable(); current_thread_info()->status |= TS_POLLING; + trace_power_end(smp_processor_id()); + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); } else { local_irq_enable(); /* loop is done by the caller */ @@ -443,8 +440,6 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { - trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); - trace_cpu_idle((ax>>4)+1, smp_processor_id()); if (!need_resched()) { if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); @@ -471,6 +466,8 @@ static void mwait_idle(void) __sti_mwait(0, 0); else local_irq_enable(); + trace_power_end(smp_processor_id()); + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); } else local_irq_enable(); } @@ -503,7 +500,6 @@ static void poll_idle(void) * * idle=mwait overrides this decision and forces the usage of mwait. */ -static int __cpuinitdata force_mwait; #define MWAIT_INFO 0x05 #define MWAIT_ECX_EXTENDED_INFO 0x01 @@ -513,7 +509,7 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; - if (force_mwait) + if (boot_option_idle_override == IDLE_FORCE_MWAIT) return 1; if (c->cpuid_level < MWAIT_INFO) @@ -633,9 +629,10 @@ static int __init idle_setup(char *str) if (!strcmp(str, "poll")) { printk("using polling idle threads.\n"); pm_idle = poll_idle; - } else if (!strcmp(str, "mwait")) - force_mwait = 1; - else if (!strcmp(str, "halt")) { + boot_option_idle_override = IDLE_POLL; + } else if (!strcmp(str, "mwait")) { + boot_option_idle_override = IDLE_FORCE_MWAIT; + } else if (!strcmp(str, "halt")) { /* * When the boot option of idle=halt is added, halt is * forced to be used for CPU idle. In such case CPU C2/C3 @@ -644,8 +641,7 @@ static int __init idle_setup(char *str) * the boot_option_idle_override. */ pm_idle = default_idle; - idle_halt = 1; - return 0; + boot_option_idle_override = IDLE_HALT; } else if (!strcmp(str, "nomwait")) { /* * If the boot option of "idle=nomwait" is added, @@ -653,12 +649,10 @@ static int __init idle_setup(char *str) * states. In such case it won't touch the variable * of boot_option_idle_override. */ - idle_nomwait = 1; - return 0; + boot_option_idle_override = IDLE_NOMWAIT; } else return -1; - boot_option_idle_override = 1; return 0; } early_param("idle", idle_setup); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4b9befa0e347..8d128783af47 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -57,8 +57,6 @@ #include #include -#include - asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); /* @@ -113,8 +111,6 @@ void cpu_idle(void) stop_critical_timings(); pm_idle(); start_critical_timings(); - trace_power_end(smp_processor_id()); - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); } tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4c818a738396..bd387e8f73b4 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -51,8 +51,6 @@ #include #include -#include - asmlinkage extern void ret_from_fork(void); DEFINE_PER_CPU(unsigned long, old_rsp); @@ -141,10 +139,6 @@ void cpu_idle(void) pm_idle(); start_critical_timings(); - trace_power_end(smp_processor_id()); - trace_cpu_idle(PWR_EVENT_EXIT, - smp_processor_id()); - /* In many cases the interrupt that ended idle has already called exit_idle. But some idle loops can be woken up without interrupt. */ diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 1cfbbfc3ae26..6f39cab052d5 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -76,7 +76,7 @@ int mach_set_rtc_mmss(unsigned long nowtime) CMOS_WRITE(real_seconds, RTC_SECONDS); CMOS_WRITE(real_minutes, RTC_MINUTES); } else { - printk(KERN_WARNING + printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index c2f1b26141e2..998e972f3b1a 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -133,7 +133,7 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, pmd = pmd_alloc(&tboot_mm, pud, vaddr); if (!pmd) return -1; - pte = pte_alloc_map(&tboot_mm, pmd, vaddr); + pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr); if (!pte) return -1; set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 61fb98519622..863f8753ab0a 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -179,6 +179,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) if (pud_none_or_clear_bad(pud)) goto out; pmd = pmd_offset(pud, 0xA0000); + split_huge_page_pmd(mm, pmd); if (pmd_none_or_clear_bad(pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ddc131ff438f..50f63648ce1b 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -28,6 +28,7 @@ config KVM select HAVE_KVM_IRQCHIP select HAVE_KVM_EVENTFD select KVM_APIC_ARCHITECTURE + select KVM_ASYNC_PF select USER_RETURN_NOTIFIER select KVM_MMIO ---help--- diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 31a7035c4bd9..f15501f431c8 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,5 +1,5 @@ -EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm +ccflags-y += -Ivirt/kvm -Iarch/x86/kvm CFLAGS_x86.o := -I. CFLAGS_svm.o := -I. @@ -9,6 +9,7 @@ kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ coalesced_mmio.o irq_comm.o eventfd.o \ assigned-dev.o) kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) +kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o) kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ i8254.o timer.o diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 38b6e8dafaff..caf966781d25 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -20,16 +20,8 @@ * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 */ -#ifndef __KERNEL__ -#include -#include -#include -#define DPRINTF(_f, _a ...) printf(_f , ## _a) -#else #include #include "kvm_cache_regs.h" -#define DPRINTF(x...) do {} while (0) -#endif #include #include @@ -418,9 +410,9 @@ address_mask(struct decode_cache *c, unsigned long reg) } static inline unsigned long -register_address(struct decode_cache *c, unsigned long base, unsigned long reg) +register_address(struct decode_cache *c, unsigned long reg) { - return base + address_mask(c, reg); + return address_mask(c, reg); } static inline void @@ -452,60 +444,55 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, return ops->get_cached_segment_base(seg, ctxt->vcpu); } -static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, - struct decode_cache *c) +static unsigned seg_override(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops, + struct decode_cache *c) { if (!c->has_seg_override) return 0; - return seg_base(ctxt, ops, c->seg_override); + return c->seg_override; } -static unsigned long es_base(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) +static ulong linear(struct x86_emulate_ctxt *ctxt, + struct segmented_address addr) { - return seg_base(ctxt, ops, VCPU_SREG_ES); -} - -static unsigned long ss_base(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) -{ - return seg_base(ctxt, ops, VCPU_SREG_SS); -} + struct decode_cache *c = &ctxt->decode; + ulong la; -static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, - u32 error, bool valid) -{ - ctxt->exception = vec; - ctxt->error_code = error; - ctxt->error_code_valid = valid; + la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; + if (c->ad_bytes != 8) + la &= (u32)-1; + return la; } -static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) +static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, + u32 error, bool valid) { - emulate_exception(ctxt, GP_VECTOR, err, true); + ctxt->exception.vector = vec; + ctxt->exception.error_code = error; + ctxt->exception.error_code_valid = valid; + return X86EMUL_PROPAGATE_FAULT; } -static void emulate_pf(struct x86_emulate_ctxt *ctxt) +static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) { - emulate_exception(ctxt, PF_VECTOR, 0, true); + return emulate_exception(ctxt, GP_VECTOR, err, true); } -static void emulate_ud(struct x86_emulate_ctxt *ctxt) +static int emulate_ud(struct x86_emulate_ctxt *ctxt) { - emulate_exception(ctxt, UD_VECTOR, 0, false); + return emulate_exception(ctxt, UD_VECTOR, 0, false); } -static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err) +static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) { - emulate_exception(ctxt, TS_VECTOR, err, true); + return emulate_exception(ctxt, TS_VECTOR, err, true); } static int emulate_de(struct x86_emulate_ctxt *ctxt) { - emulate_exception(ctxt, DE_VECTOR, 0, false); - return X86EMUL_PROPAGATE_FAULT; + return emulate_exception(ctxt, DE_VECTOR, 0, false); } static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, @@ -520,7 +507,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, cur_size = fc->end - fc->start; size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size, - size, ctxt->vcpu, NULL); + size, ctxt->vcpu, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; fc->end += size; @@ -564,7 +551,7 @@ static void *decode_register(u8 modrm_reg, unsigned long *regs, static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops, - ulong addr, + struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; @@ -572,10 +559,13 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, if (op_bytes == 2) op_bytes = 3; *address = 0; - rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL); + rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2, + ctxt->vcpu, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; - rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL); + addr.ea += 2; + rc = ops->read_std(linear(ctxt, addr), address, op_bytes, + ctxt->vcpu, &ctxt->exception); return rc; } @@ -768,7 +758,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, break; } } - op->addr.mem = modrm_ea; + op->addr.mem.ea = modrm_ea; done: return rc; } @@ -783,13 +773,13 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt, op->type = OP_MEM; switch (c->ad_bytes) { case 2: - op->addr.mem = insn_fetch(u16, 2, c->eip); + op->addr.mem.ea = insn_fetch(u16, 2, c->eip); break; case 4: - op->addr.mem = insn_fetch(u32, 4, c->eip); + op->addr.mem.ea = insn_fetch(u32, 4, c->eip); break; case 8: - op->addr.mem = insn_fetch(u64, 8, c->eip); + op->addr.mem.ea = insn_fetch(u64, 8, c->eip); break; } done: @@ -808,7 +798,7 @@ static void fetch_bit_operand(struct decode_cache *c) else if (c->src.bytes == 4) sv = (s32)c->src.val & (s32)mask; - c->dst.addr.mem += (sv >> 3); + c->dst.addr.mem.ea += (sv >> 3); } /* only subword offset */ @@ -821,7 +811,6 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, { int rc; struct read_cache *mc = &ctxt->decode.mem_read; - u32 err; while (size) { int n = min(size, 8u); @@ -829,10 +818,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, if (mc->pos < mc->end) goto read_cached; - rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, - ctxt->vcpu); - if (rc == X86EMUL_PROPAGATE_FAULT) - emulate_pf(ctxt); + rc = ops->read_emulated(addr, mc->data + mc->end, n, + &ctxt->exception, ctxt->vcpu); if (rc != X86EMUL_CONTINUE) return rc; mc->end += n; @@ -907,19 +894,15 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, struct desc_ptr dt; u16 index = selector >> 3; int ret; - u32 err; ulong addr; get_descriptor_table_ptr(ctxt, ops, selector, &dt); - if (dt.size < index * 8 + 7) { - emulate_gp(ctxt, selector & 0xfffc); - return X86EMUL_PROPAGATE_FAULT; - } + if (dt.size < index * 8 + 7) + return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; - ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); - if (ret == X86EMUL_PROPAGATE_FAULT) - emulate_pf(ctxt); + ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, + &ctxt->exception); return ret; } @@ -931,21 +914,17 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, { struct desc_ptr dt; u16 index = selector >> 3; - u32 err; ulong addr; int ret; get_descriptor_table_ptr(ctxt, ops, selector, &dt); - if (dt.size < index * 8 + 7) { - emulate_gp(ctxt, selector & 0xfffc); - return X86EMUL_PROPAGATE_FAULT; - } + if (dt.size < index * 8 + 7) + return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; - ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); - if (ret == X86EMUL_PROPAGATE_FAULT) - emulate_pf(ctxt); + ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, + &ctxt->exception); return ret; } @@ -1092,7 +1071,6 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, { int rc; struct decode_cache *c = &ctxt->decode; - u32 err; switch (c->dst.type) { case OP_REG: @@ -1101,21 +1079,19 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, case OP_MEM: if (c->lock_prefix) rc = ops->cmpxchg_emulated( - c->dst.addr.mem, + linear(ctxt, c->dst.addr.mem), &c->dst.orig_val, &c->dst.val, c->dst.bytes, - &err, + &ctxt->exception, ctxt->vcpu); else rc = ops->write_emulated( - c->dst.addr.mem, + linear(ctxt, c->dst.addr.mem), &c->dst.val, c->dst.bytes, - &err, + &ctxt->exception, ctxt->vcpu); - if (rc == X86EMUL_PROPAGATE_FAULT) - emulate_pf(ctxt); if (rc != X86EMUL_CONTINUE) return rc; break; @@ -1137,8 +1113,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt, c->dst.bytes = c->op_bytes; c->dst.val = c->src.val; register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); - c->dst.addr.mem = register_address(c, ss_base(ctxt, ops), - c->regs[VCPU_REGS_RSP]); + c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]); + c->dst.addr.mem.seg = VCPU_SREG_SS; } static int emulate_pop(struct x86_emulate_ctxt *ctxt, @@ -1147,10 +1123,11 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, { struct decode_cache *c = &ctxt->decode; int rc; + struct segmented_address addr; - rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops), - c->regs[VCPU_REGS_RSP]), - dest, len); + addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); + addr.seg = VCPU_SREG_SS; + rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len); if (rc != X86EMUL_CONTINUE) return rc; @@ -1184,10 +1161,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, change_mask |= EFLG_IF; break; case X86EMUL_MODE_VM86: - if (iopl < 3) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if (iopl < 3) + return emulate_gp(ctxt, 0); change_mask |= EFLG_IF; break; default: /* real mode */ @@ -1198,9 +1173,6 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, *(unsigned long *)dest = (ctxt->eflags & ~change_mask) | (val & change_mask); - if (rc == X86EMUL_PROPAGATE_FAULT) - emulate_pf(ctxt); - return rc; } @@ -1287,7 +1259,6 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, gva_t cs_addr; gva_t eip_addr; u16 cs, eip; - u32 err; /* TODO: Add limit checks */ c->src.val = ctxt->eflags; @@ -1317,11 +1288,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; - rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err); + rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; - rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err); + rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; @@ -1370,10 +1341,8 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - if (temp_eip & ~0xffff) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if (temp_eip & ~0xffff) + return emulate_gp(ctxt, 0); rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); @@ -1624,10 +1593,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || - ctxt->mode == X86EMUL_MODE_VM86) { - emulate_ud(ctxt); - return X86EMUL_PROPAGATE_FAULT; - } + ctxt->mode == X86EMUL_MODE_VM86) + return emulate_ud(ctxt); setup_syscalls_segments(ctxt, ops, &cs, &ss); @@ -1678,34 +1645,26 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) u16 cs_sel, ss_sel; /* inject #GP if in real mode */ - if (ctxt->mode == X86EMUL_MODE_REAL) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if (ctxt->mode == X86EMUL_MODE_REAL) + return emulate_gp(ctxt, 0); /* XXX sysenter/sysexit have not been tested in 64bit mode. * Therefore, we inject an #UD. */ - if (ctxt->mode == X86EMUL_MODE_PROT64) { - emulate_ud(ctxt); - return X86EMUL_PROPAGATE_FAULT; - } + if (ctxt->mode == X86EMUL_MODE_PROT64) + return emulate_ud(ctxt); setup_syscalls_segments(ctxt, ops, &cs, &ss); ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: - if ((msr_data & 0xfffc) == 0x0) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if ((msr_data & 0xfffc) == 0x0) + return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: - if (msr_data == 0x0) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if (msr_data == 0x0) + return emulate_gp(ctxt, 0); break; } @@ -1745,10 +1704,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || - ctxt->mode == X86EMUL_MODE_VM86) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + ctxt->mode == X86EMUL_MODE_VM86) + return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, ops, &cs, &ss); @@ -1763,18 +1720,14 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); - if ((msr_data & 0xfffc) == 0x0) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if ((msr_data & 0xfffc) == 0x0) + return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); - if (msr_data == 0x0) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if (msr_data == 0x0) + return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; @@ -1934,33 +1887,27 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, { struct tss_segment_16 tss_seg; int ret; - u32 err, new_tss_base = get_desc_base(new_desc); + u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, - &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } save_state_to_tss16(ctxt, ops, &tss_seg); ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, - &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, - &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; @@ -1968,12 +1915,10 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ret = ops->write_std(new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, - ctxt->vcpu, &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + ctxt->vcpu, &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } } return load_state_from_tss16(ctxt, ops, &tss_seg); @@ -2013,10 +1958,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct decode_cache *c = &ctxt->decode; int ret; - if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) + return emulate_gp(ctxt, 0); c->eip = tss->eip; ctxt->eflags = tss->eflags | 2; c->regs[VCPU_REGS_RAX] = tss->eax; @@ -2076,33 +2019,27 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, { struct tss_segment_32 tss_seg; int ret; - u32 err, new_tss_base = get_desc_base(new_desc); + u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, - &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } save_state_to_tss32(ctxt, ops, &tss_seg); ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, - &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, - &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; @@ -2110,12 +2047,10 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ret = ops->write_std(new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, - ctxt->vcpu, &err); - if (ret == X86EMUL_PROPAGATE_FAULT) { + ctxt->vcpu, &ctxt->exception); + if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ - emulate_pf(ctxt); return ret; - } } return load_state_from_tss32(ctxt, ops, &tss_seg); @@ -2146,10 +2081,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, if (reason != TASK_SWITCH_IRET) { if ((tss_selector & 3) > next_tss_desc.dpl || - ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) + return emulate_gp(ctxt, 0); } desc_limit = desc_limit_scaled(&next_tss_desc); @@ -2231,14 +2164,15 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; } -static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base, +static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, int reg, struct operand *op) { struct decode_cache *c = &ctxt->decode; int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; register_address_increment(c, &c->regs[reg], df * op->bytes); - op->addr.mem = register_address(c, base, c->regs[reg]); + op->addr.mem.ea = register_address(c, c->regs[reg]); + op->addr.mem.seg = seg; } static int em_push(struct x86_emulate_ctxt *ctxt) @@ -2369,10 +2303,8 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt) struct decode_cache *c = &ctxt->decode; u64 tsc = 0; - if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) { - emulate_gp(ctxt, 0); - return X86EMUL_PROPAGATE_FAULT; - } + if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) + return emulate_gp(ctxt, 0); ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc); c->regs[VCPU_REGS_RAX] = (u32)tsc; c->regs[VCPU_REGS_RDX] = tsc >> 32; @@ -2647,7 +2579,7 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, op->type = OP_IMM; op->bytes = size; - op->addr.mem = c->eip; + op->addr.mem.ea = c->eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: @@ -2678,7 +2610,7 @@ done: } int -x86_decode_insn(struct x86_emulate_ctxt *ctxt) +x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { struct x86_emulate_ops *ops = ctxt->ops; struct decode_cache *c = &ctxt->decode; @@ -2689,7 +2621,10 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt) struct operand memop = { .type = OP_NONE }; c->eip = ctxt->eip; - c->fetch.start = c->fetch.end = c->eip; + c->fetch.start = c->eip; + c->fetch.end = c->fetch.start + insn_len; + if (insn_len > 0) + memcpy(c->fetch.data, insn, insn_len); ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS); switch (mode) { @@ -2803,10 +2738,8 @@ done_prefixes: c->execute = opcode.u.execute; /* Unrecognised? */ - if (c->d == 0 || (c->d & Undefined)) { - DPRINTF("Cannot emulate %02x\n", c->b); + if (c->d == 0 || (c->d & Undefined)) return -1; - } if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack)) c->op_bytes = 8; @@ -2831,14 +2764,13 @@ done_prefixes: if (!c->has_seg_override) set_seg_override(c, VCPU_SREG_DS); - if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d)) - memop.addr.mem += seg_override_base(ctxt, ops, c); + memop.addr.mem.seg = seg_override(ctxt, ops, c); if (memop.type == OP_MEM && c->ad_bytes != 8) - memop.addr.mem = (u32)memop.addr.mem; + memop.addr.mem.ea = (u32)memop.addr.mem.ea; if (memop.type == OP_MEM && c->rip_relative) - memop.addr.mem += c->eip; + memop.addr.mem.ea += c->eip; /* * Decode and fetch the source operand: register, memory @@ -2890,14 +2822,14 @@ done_prefixes: case SrcSI: c->src.type = OP_MEM; c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->src.addr.mem = - register_address(c, seg_override_base(ctxt, ops, c), - c->regs[VCPU_REGS_RSI]); + c->src.addr.mem.ea = + register_address(c, c->regs[VCPU_REGS_RSI]); + c->src.addr.mem.seg = seg_override(ctxt, ops, c), c->src.val = 0; break; case SrcImmFAddr: c->src.type = OP_IMM; - c->src.addr.mem = c->eip; + c->src.addr.mem.ea = c->eip; c->src.bytes = c->op_bytes + 2; insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip); break; @@ -2944,7 +2876,7 @@ done_prefixes: break; case DstImmUByte: c->dst.type = OP_IMM; - c->dst.addr.mem = c->eip; + c->dst.addr.mem.ea = c->eip; c->dst.bytes = 1; c->dst.val = insn_fetch(u8, 1, c->eip); break; @@ -2969,9 +2901,9 @@ done_prefixes: case DstDI: c->dst.type = OP_MEM; c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->dst.addr.mem = - register_address(c, es_base(ctxt, ops), - c->regs[VCPU_REGS_RDI]); + c->dst.addr.mem.ea = + register_address(c, c->regs[VCPU_REGS_RDI]); + c->dst.addr.mem.seg = VCPU_SREG_ES; c->dst.val = 0; break; case ImplicitOps: @@ -3020,24 +2952,24 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ctxt->decode.mem_read.pos = 0; if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { - emulate_ud(ctxt); + rc = emulate_ud(ctxt); goto done; } /* LOCK prefix is allowed only with some instructions */ if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { - emulate_ud(ctxt); + rc = emulate_ud(ctxt); goto done; } if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) { - emulate_ud(ctxt); + rc = emulate_ud(ctxt); goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { - emulate_gp(ctxt, 0); + rc = emulate_gp(ctxt, 0); goto done; } @@ -3050,7 +2982,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) } if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) { - rc = read_emulated(ctxt, ops, c->src.addr.mem, + rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem), c->src.valptr, c->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; @@ -3058,7 +2990,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) } if (c->src2.type == OP_MEM) { - rc = read_emulated(ctxt, ops, c->src2.addr.mem, + rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem), &c->src2.val, c->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; @@ -3070,7 +3002,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ - rc = read_emulated(ctxt, ops, c->dst.addr.mem, + rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem), &c->dst.val, c->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; @@ -3215,13 +3147,13 @@ special_insn: break; case 0x8c: /* mov r/m, sreg */ if (c->modrm_reg > VCPU_SREG_GS) { - emulate_ud(ctxt); + rc = emulate_ud(ctxt); goto done; } c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); break; case 0x8d: /* lea r16/r32, m */ - c->dst.val = c->src.addr.mem; + c->dst.val = c->src.addr.mem.ea; break; case 0x8e: { /* mov seg, r/m16 */ uint16_t sel; @@ -3230,7 +3162,7 @@ special_insn: if (c->modrm_reg == VCPU_SREG_CS || c->modrm_reg > VCPU_SREG_GS) { - emulate_ud(ctxt); + rc = emulate_ud(ctxt); goto done; } @@ -3268,7 +3200,6 @@ special_insn: break; case 0xa6 ... 0xa7: /* cmps */ c->dst.type = OP_NONE; /* Disable writeback. */ - DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem); goto cmp; case 0xa8 ... 0xa9: /* test ax, imm */ goto test; @@ -3363,7 +3294,7 @@ special_insn: do_io_in: c->dst.bytes = min(c->dst.bytes, 4u); if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { - emulate_gp(ctxt, 0); + rc = emulate_gp(ctxt, 0); goto done; } if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, @@ -3377,7 +3308,7 @@ special_insn: c->src.bytes = min(c->src.bytes, 4u); if (!emulator_io_permited(ctxt, ops, c->dst.val, c->src.bytes)) { - emulate_gp(ctxt, 0); + rc = emulate_gp(ctxt, 0); goto done; } ops->pio_out_emulated(c->src.bytes, c->dst.val, @@ -3402,14 +3333,14 @@ special_insn: break; case 0xfa: /* cli */ if (emulator_bad_iopl(ctxt, ops)) { - emulate_gp(ctxt, 0); + rc = emulate_gp(ctxt, 0); goto done; } else ctxt->eflags &= ~X86_EFLAGS_IF; break; case 0xfb: /* sti */ if (emulator_bad_iopl(ctxt, ops)) { - emulate_gp(ctxt, 0); + rc = emulate_gp(ctxt, 0); goto done; } else { ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; @@ -3449,11 +3380,11 @@ writeback: c->dst.type = saved_dst_type; if ((c->d & SrcMask) == SrcSI) - string_addr_inc(ctxt, seg_override_base(ctxt, ops, c), + string_addr_inc(ctxt, seg_override(ctxt, ops, c), VCPU_REGS_RSI, &c->src); if ((c->d & DstMask) == DstDI) - string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI, + string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI, &c->dst); if (c->rep_prefix && (c->d & String)) { @@ -3482,6 +3413,8 @@ writeback: ctxt->eip = c->eip; done: + if (rc == X86EMUL_PROPAGATE_FAULT) + ctxt->have_exception = true; return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: @@ -3544,9 +3477,11 @@ twobyte_insn: break; case 5: /* not defined */ emulate_ud(ctxt); + rc = X86EMUL_PROPAGATE_FAULT; goto done; case 7: /* invlpg*/ - emulate_invlpg(ctxt->vcpu, c->src.addr.mem); + emulate_invlpg(ctxt->vcpu, + linear(ctxt, c->src.addr.mem)); /* Disable writeback. */ c->dst.type = OP_NONE; break; @@ -3573,6 +3508,7 @@ twobyte_insn: case 5 ... 7: case 9 ... 15: emulate_ud(ctxt); + rc = X86EMUL_PROPAGATE_FAULT; goto done; } c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu); @@ -3581,6 +3517,7 @@ twobyte_insn: if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && (c->modrm_reg == 4 || c->modrm_reg == 5)) { emulate_ud(ctxt); + rc = X86EMUL_PROPAGATE_FAULT; goto done; } ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu); @@ -3588,6 +3525,7 @@ twobyte_insn: case 0x22: /* mov reg, cr */ if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) { emulate_gp(ctxt, 0); + rc = X86EMUL_PROPAGATE_FAULT; goto done; } c->dst.type = OP_NONE; @@ -3596,6 +3534,7 @@ twobyte_insn: if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && (c->modrm_reg == 4 || c->modrm_reg == 5)) { emulate_ud(ctxt); + rc = X86EMUL_PROPAGATE_FAULT; goto done; } @@ -3604,6 +3543,7 @@ twobyte_insn: ~0ULL : ~0U), ctxt->vcpu) < 0) { /* #UD condition is already handled by the code above */ emulate_gp(ctxt, 0); + rc = X86EMUL_PROPAGATE_FAULT; goto done; } @@ -3615,6 +3555,7 @@ twobyte_insn: | ((u64)c->regs[VCPU_REGS_RDX] << 32); if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { emulate_gp(ctxt, 0); + rc = X86EMUL_PROPAGATE_FAULT; goto done; } rc = X86EMUL_CONTINUE; @@ -3623,6 +3564,7 @@ twobyte_insn: /* rdmsr */ if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { emulate_gp(ctxt, 0); + rc = X86EMUL_PROPAGATE_FAULT; goto done; } else { c->regs[VCPU_REGS_RAX] = (u32)msr_data; @@ -3785,6 +3727,5 @@ twobyte_insn: goto writeback; cannot_emulate: - DPRINTF("Cannot emulate %02x\n", c->b); return -1; } diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 975bb45329a1..3377d53fcd36 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -73,6 +73,13 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) return vcpu->arch.cr4 & mask; } +static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) +{ + if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + kvm_x86_ops->decache_cr3(vcpu); + return vcpu->arch.cr3; +} + static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, ~0UL); @@ -84,4 +91,19 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); } +static inline void enter_guest_mode(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hflags |= HF_GUEST_MASK; +} + +static inline void leave_guest_mode(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hflags &= ~HF_GUEST_MASK; +} + +static inline bool is_guest_mode(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.hflags & HF_GUEST_MASK; +} + #endif diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 413f8973a855..93cf9d0d3653 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -277,7 +277,8 @@ static void apic_update_ppr(struct kvm_lapic *apic) if (old_ppr != ppr) { apic_set_reg(apic, APIC_PROCPRI, ppr); - kvm_make_request(KVM_REQ_EVENT, apic->vcpu); + if (ppr < old_ppr) + kvm_make_request(KVM_REQ_EVENT, apic->vcpu); } } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fbb04aee8301..f02b8edc3d44 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -18,9 +18,11 @@ * */ +#include "irq.h" #include "mmu.h" #include "x86.h" #include "kvm_cache_regs.h" +#include "x86.h" #include #include @@ -194,7 +196,6 @@ static struct percpu_counter kvm_total_used_mmu_pages; static u64 __read_mostly shadow_trap_nonpresent_pte; static u64 __read_mostly shadow_notrap_nonpresent_pte; -static u64 __read_mostly shadow_base_present_pte; static u64 __read_mostly shadow_nx_mask; static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask; @@ -213,12 +214,6 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) } EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); -void kvm_mmu_set_base_ptes(u64 base_pte) -{ - shadow_base_present_pte = base_pte; -} -EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); - void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask) { @@ -482,46 +477,46 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) } /* - * Return the pointer to the largepage write count for a given - * gfn, handling slots that are not large page aligned. + * Return the pointer to the large page information for a given gfn, + * handling slots that are not large page aligned. */ -static int *slot_largepage_idx(gfn_t gfn, - struct kvm_memory_slot *slot, - int level) +static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, + struct kvm_memory_slot *slot, + int level) { unsigned long idx; idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); - return &slot->lpage_info[level - 2][idx].write_count; + return &slot->lpage_info[level - 2][idx]; } static void account_shadowed(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; - int *write_count; + struct kvm_lpage_info *linfo; int i; slot = gfn_to_memslot(kvm, gfn); for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { - write_count = slot_largepage_idx(gfn, slot, i); - *write_count += 1; + linfo = lpage_info_slot(gfn, slot, i); + linfo->write_count += 1; } } static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; - int *write_count; + struct kvm_lpage_info *linfo; int i; slot = gfn_to_memslot(kvm, gfn); for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { - write_count = slot_largepage_idx(gfn, slot, i); - *write_count -= 1; - WARN_ON(*write_count < 0); + linfo = lpage_info_slot(gfn, slot, i); + linfo->write_count -= 1; + WARN_ON(linfo->write_count < 0); } } @@ -530,12 +525,12 @@ static int has_wrprotected_page(struct kvm *kvm, int level) { struct kvm_memory_slot *slot; - int *largepage_idx; + struct kvm_lpage_info *linfo; slot = gfn_to_memslot(kvm, gfn); if (slot) { - largepage_idx = slot_largepage_idx(gfn, slot, level); - return *largepage_idx; + linfo = lpage_info_slot(gfn, slot, level); + return linfo->write_count; } return 1; @@ -559,14 +554,18 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn) return ret; } -static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) +static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) { struct kvm_memory_slot *slot; - int host_level, level, max_level; - slot = gfn_to_memslot(vcpu->kvm, large_gfn); if (slot && slot->dirty_bitmap) - return PT_PAGE_TABLE_LEVEL; + return true; + return false; +} + +static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) +{ + int host_level, level, max_level; host_level = host_mapping_level(vcpu->kvm, large_gfn); @@ -590,16 +589,15 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) { struct kvm_memory_slot *slot; - unsigned long idx; + struct kvm_lpage_info *linfo; slot = gfn_to_memslot(kvm, gfn); if (likely(level == PT_PAGE_TABLE_LEVEL)) return &slot->rmap[gfn - slot->base_gfn]; - idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - - (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); + linfo = lpage_info_slot(gfn, slot, level); - return &slot->lpage_info[level - 2][idx].rmap_pde; + return &linfo->rmap_pde; } /* @@ -887,19 +885,16 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, end = start + (memslot->npages << PAGE_SHIFT); if (hva >= start && hva < end) { gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; + gfn_t gfn = memslot->base_gfn + gfn_offset; ret = handler(kvm, &memslot->rmap[gfn_offset], data); for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { - unsigned long idx; - int sh; - - sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j); - idx = ((memslot->base_gfn+gfn_offset) >> sh) - - (memslot->base_gfn >> sh); - ret |= handler(kvm, - &memslot->lpage_info[j][idx].rmap_pde, - data); + struct kvm_lpage_info *linfo; + + linfo = lpage_info_slot(gfn, memslot, + PT_DIRECTORY_LEVEL + j); + ret |= handler(kvm, &linfo->rmap_pde, data); } trace_kvm_age_page(hva, memslot, ret); retval |= ret; @@ -950,6 +945,35 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, return young; } +static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, + unsigned long data) +{ + u64 *spte; + int young = 0; + + /* + * If there's no access bit in the secondary pte set by the + * hardware it's up to gup-fast/gup to set the access bit in + * the primary pte or in the page structure. + */ + if (!shadow_accessed_mask) + goto out; + + spte = rmap_next(kvm, rmapp, NULL); + while (spte) { + u64 _spte = *spte; + BUG_ON(!(_spte & PT_PRESENT_MASK)); + young = _spte & PT_ACCESSED_MASK; + if (young) { + young = 1; + break; + } + spte = rmap_next(kvm, rmapp, spte); + } +out: + return young; +} + #define RMAP_RECYCLE_THRESHOLD 1000 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) @@ -970,6 +994,11 @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva) return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); } +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); +} + #ifdef MMU_DEBUG static int is_empty_shadow_page(u64 *spt) { @@ -1161,7 +1190,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, } static int nonpaging_sync_page(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, bool clear_unsync) + struct kvm_mmu_page *sp) { return 1; } @@ -1291,7 +1320,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (clear_unsync) kvm_unlink_unsync_page(vcpu->kvm, sp); - if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) { + if (vcpu->arch.mmu.sync_page(vcpu, sp)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return 1; } @@ -1332,12 +1361,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); + kvm_unlink_unsync_page(vcpu->kvm, s); if ((s->role.cr4_pae != !!is_pae(vcpu)) || - (vcpu->arch.mmu.sync_page(vcpu, s, true))) { + (vcpu->arch.mmu.sync_page(vcpu, s))) { kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); continue; } - kvm_unlink_unsync_page(vcpu->kvm, s); flush = true; } @@ -1963,9 +1992,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int user_fault, int write_fault, int dirty, int level, gfn_t gfn, pfn_t pfn, bool speculative, - bool can_unsync, bool reset_host_protection) + bool can_unsync, bool host_writable) { - u64 spte; + u64 spte, entry = *sptep; int ret = 0; /* @@ -1973,7 +2002,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, * whether the guest actually used the pte (in order to detect * demand paging). */ - spte = shadow_base_present_pte; + spte = PT_PRESENT_MASK; if (!speculative) spte |= shadow_accessed_mask; if (!dirty) @@ -1990,8 +2019,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, kvm_is_mmio_pfn(pfn)); - if (reset_host_protection) + if (host_writable) spte |= SPTE_HOST_WRITEABLE; + else + pte_access &= ~ACC_WRITE_MASK; spte |= (u64)pfn << PAGE_SHIFT; @@ -2036,6 +2067,14 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, set_pte: update_spte(sptep, spte); + /* + * If we overwrite a writable spte with a read-only one we + * should flush remote TLBs. Otherwise rmap_write_protect + * will find a read-only spte, even though the writable spte + * might be cached on a CPU's TLB. + */ + if (is_writable_pte(entry) && !is_writable_pte(*sptep)) + kvm_flush_remote_tlbs(vcpu->kvm); done: return ret; } @@ -2045,7 +2084,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, int user_fault, int write_fault, int dirty, int *ptwrite, int level, gfn_t gfn, pfn_t pfn, bool speculative, - bool reset_host_protection) + bool host_writable) { int was_rmapped = 0; int rmap_count; @@ -2080,7 +2119,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, dirty, level, gfn, pfn, speculative, true, - reset_host_protection)) { + host_writable)) { if (write_fault) *ptwrite = 1; kvm_mmu_flush_tlb(vcpu); @@ -2211,7 +2250,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) } static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, - int level, gfn_t gfn, pfn_t pfn) + int map_writable, int level, gfn_t gfn, pfn_t pfn, + bool prefault) { struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; @@ -2220,9 +2260,11 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { - mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, + unsigned pte_access = ACC_ALL; + + mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, 0, write, 1, &pt_write, - level, gfn, pfn, false, true); + level, gfn, pfn, prefault, map_writable); direct_pte_prefetch(vcpu, iterator.sptep); ++vcpu->stat.pf_fixed; break; @@ -2277,27 +2319,81 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) return 1; } -static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) +static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, + gfn_t *gfnp, pfn_t *pfnp, int *levelp) +{ + pfn_t pfn = *pfnp; + gfn_t gfn = *gfnp; + int level = *levelp; + + /* + * Check if it's a transparent hugepage. If this would be an + * hugetlbfs page, level wouldn't be set to + * PT_PAGE_TABLE_LEVEL and there would be no adjustment done + * here. + */ + if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && + level == PT_PAGE_TABLE_LEVEL && + PageTransCompound(pfn_to_page(pfn)) && + !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { + unsigned long mask; + /* + * mmu_notifier_retry was successful and we hold the + * mmu_lock here, so the pmd can't become splitting + * from under us, and in turn + * __split_huge_page_refcount() can't run from under + * us and we can safely transfer the refcount from + * PG_tail to PG_head as we switch the pfn to tail to + * head. + */ + *levelp = level = PT_DIRECTORY_LEVEL; + mask = KVM_PAGES_PER_HPAGE(level) - 1; + VM_BUG_ON((gfn & mask) != (pfn & mask)); + if (pfn & mask) { + gfn &= ~mask; + *gfnp = gfn; + kvm_release_pfn_clean(pfn); + pfn &= ~mask; + if (!get_page_unless_zero(pfn_to_page(pfn))) + BUG(); + *pfnp = pfn; + } + } +} + +static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, + gva_t gva, pfn_t *pfn, bool write, bool *writable); + +static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, + bool prefault) { int r; int level; + int force_pt_level; pfn_t pfn; unsigned long mmu_seq; + bool map_writable; - level = mapping_level(vcpu, gfn); - - /* - * This path builds a PAE pagetable - so we can map 2mb pages at - * maximum. Therefore check if the level is larger than that. - */ - if (level > PT_DIRECTORY_LEVEL) - level = PT_DIRECTORY_LEVEL; + force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); + if (likely(!force_pt_level)) { + level = mapping_level(vcpu, gfn); + /* + * This path builds a PAE pagetable - so we can map + * 2mb pages at maximum. Therefore check if the level + * is larger than that. + */ + if (level > PT_DIRECTORY_LEVEL) + level = PT_DIRECTORY_LEVEL; - gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); + gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); + } else + level = PT_PAGE_TABLE_LEVEL; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - pfn = gfn_to_pfn(vcpu->kvm, gfn); + + if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) + return 0; /* mmio */ if (is_error_pfn(pfn)) @@ -2307,7 +2403,10 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) if (mmu_notifier_retry(vcpu, mmu_seq)) goto out_unlock; kvm_mmu_free_some_pages(vcpu); - r = __direct_map(vcpu, v, write, level, gfn, pfn); + if (likely(!force_pt_level)) + transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); + r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, + prefault); spin_unlock(&vcpu->kvm->mmu_lock); @@ -2530,6 +2629,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) hpa_t root = vcpu->arch.mmu.root_hpa; sp = page_header(root); mmu_sync_children(vcpu, sp); + trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); return; } for (i = 0; i < 4; ++i) { @@ -2552,23 +2652,24 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) } static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, u32 *error) + u32 access, struct x86_exception *exception) { - if (error) - *error = 0; + if (exception) + exception->error_code = 0; return vaddr; } static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, u32 *error) + u32 access, + struct x86_exception *exception) { - if (error) - *error = 0; + if (exception) + exception->error_code = 0; return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); } static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, - u32 error_code) + u32 error_code, bool prefault) { gfn_t gfn; int r; @@ -2584,17 +2685,68 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, gfn = gva >> PAGE_SHIFT; return nonpaging_map(vcpu, gva & PAGE_MASK, - error_code & PFERR_WRITE_MASK, gfn); + error_code & PFERR_WRITE_MASK, gfn, prefault); +} + +static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) +{ + struct kvm_arch_async_pf arch; + + arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; + arch.gfn = gfn; + arch.direct_map = vcpu->arch.mmu.direct_map; + arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); + + return kvm_setup_async_pf(vcpu, gva, gfn, &arch); +} + +static bool can_do_async_pf(struct kvm_vcpu *vcpu) +{ + if (unlikely(!irqchip_in_kernel(vcpu->kvm) || + kvm_event_needs_reinjection(vcpu))) + return false; + + return kvm_x86_ops->interrupt_allowed(vcpu); } -static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, - u32 error_code) +static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, + gva_t gva, pfn_t *pfn, bool write, bool *writable) +{ + bool async; + + *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); + + if (!async) + return false; /* *pfn has correct page already */ + + put_page(pfn_to_page(*pfn)); + + if (!prefault && can_do_async_pf(vcpu)) { + trace_kvm_try_async_get_page(gva, gfn); + if (kvm_find_async_pf_gfn(vcpu, gfn)) { + trace_kvm_async_pf_doublefault(gva, gfn); + kvm_make_request(KVM_REQ_APF_HALT, vcpu); + return true; + } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) + return true; + } + + *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); + + return false; +} + +static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, + bool prefault) { pfn_t pfn; int r; int level; + int force_pt_level; gfn_t gfn = gpa >> PAGE_SHIFT; unsigned long mmu_seq; + int write = error_code & PFERR_WRITE_MASK; + bool map_writable; ASSERT(vcpu); ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); @@ -2603,21 +2755,30 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, if (r) return r; - level = mapping_level(vcpu, gfn); - - gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); + force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); + if (likely(!force_pt_level)) { + level = mapping_level(vcpu, gfn); + gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); + } else + level = PT_PAGE_TABLE_LEVEL; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - pfn = gfn_to_pfn(vcpu->kvm, gfn); + + if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) + return 0; + + /* mmio */ if (is_error_pfn(pfn)) return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) goto out_unlock; kvm_mmu_free_some_pages(vcpu); - r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, - level, gfn, pfn); + if (likely(!force_pt_level)) + transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); + r = __direct_map(vcpu, gpa, write, map_writable, + level, gfn, pfn, prefault); spin_unlock(&vcpu->kvm->mmu_lock); return r; @@ -2659,18 +2820,19 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) static void paging_new_cr3(struct kvm_vcpu *vcpu) { - pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3); + pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); mmu_free_roots(vcpu); } static unsigned long get_cr3(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr3; + return kvm_read_cr3(vcpu); } -static void inject_page_fault(struct kvm_vcpu *vcpu) +static void inject_page_fault(struct kvm_vcpu *vcpu, + struct x86_exception *fault) { - vcpu->arch.mmu.inject_page_fault(vcpu); + vcpu->arch.mmu.inject_page_fault(vcpu, fault); } static void paging_free(struct kvm_vcpu *vcpu) @@ -2816,6 +2978,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *context = vcpu->arch.walk_mmu; + context->base_role.word = 0; context->new_cr3 = nonpaging_new_cr3; context->page_fault = tdp_page_fault; context->free = nonpaging_free; @@ -3008,9 +3171,6 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, return; } - if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL)) - return; - ++vcpu->kvm->stat.mmu_pte_updated; if (!sp->role.cr4_pae) paging32_update_pte(vcpu, sp, spte, new); @@ -3264,12 +3424,13 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) } } -int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, + void *insn, int insn_len) { int r; enum emulation_result er; - r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code); + r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); if (r < 0) goto out; @@ -3282,7 +3443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) if (r) goto out; - er = emulate_instruction(vcpu, cr2, error_code, 0); + er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); switch (er) { case EMULATE_DONE: @@ -3377,11 +3538,14 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) if (!test_bit(slot, sp->slot_bitmap)) continue; + if (sp->role.level != PT_PAGE_TABLE_LEVEL) + continue; + pt = sp->spt; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) /* avoid RMW */ if (is_writable_pte(pt[i])) - pt[i] &= ~PT_WRITABLE_MASK; + update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK); } kvm_flush_remote_tlbs(kvm); } @@ -3463,13 +3627,6 @@ static void mmu_destroy_caches(void) kmem_cache_destroy(mmu_page_header_cache); } -void kvm_mmu_module_exit(void) -{ - mmu_destroy_caches(); - percpu_counter_destroy(&kvm_total_used_mmu_pages); - unregister_shrinker(&mmu_shrinker); -} - int kvm_mmu_module_init(void) { pte_chain_cache = kmem_cache_create("kvm_pte_chain", @@ -3566,7 +3723,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) { - (void)kvm_set_cr3(vcpu, vcpu->arch.cr3); + (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu)); return 1; } @@ -3662,12 +3819,6 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) } EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); -#ifdef CONFIG_KVM_MMU_AUDIT -#include "mmu_audit.c" -#else -static void mmu_audit_disable(void) { } -#endif - void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { ASSERT(vcpu); @@ -3675,5 +3826,18 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) destroy_kvm_mmu(vcpu); free_mmu_pages(vcpu); mmu_free_memory_caches(vcpu); +} + +#ifdef CONFIG_KVM_MMU_AUDIT +#include "mmu_audit.c" +#else +static void mmu_audit_disable(void) { } +#endif + +void kvm_mmu_module_exit(void) +{ + mmu_destroy_caches(); + percpu_counter_destroy(&kvm_total_used_mmu_pages); + unregister_shrinker(&mmu_shrinker); mmu_audit_disable(); } diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index ba2bcdde6221..5f6223b8bcf7 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -19,11 +19,9 @@ #include -static int audit_point; - -#define audit_printk(fmt, args...) \ +#define audit_printk(kvm, fmt, args...) \ printk(KERN_ERR "audit: (%s) error: " \ - fmt, audit_point_name[audit_point], ##args) + fmt, audit_point_name[kvm->arch.audit_point], ##args) typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); @@ -97,18 +95,21 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) if (sp->unsync) { if (level != PT_PAGE_TABLE_LEVEL) { - audit_printk("unsync sp: %p level = %d\n", sp, level); + audit_printk(vcpu->kvm, "unsync sp: %p " + "level = %d\n", sp, level); return; } if (*sptep == shadow_notrap_nonpresent_pte) { - audit_printk("notrap spte in unsync sp: %p\n", sp); + audit_printk(vcpu->kvm, "notrap spte in unsync " + "sp: %p\n", sp); return; } } if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) { - audit_printk("notrap spte in direct sp: %p\n", sp); + audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n", + sp); return; } @@ -125,8 +126,9 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) hpa = pfn << PAGE_SHIFT; if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) - audit_printk("levels %d pfn %llx hpa %llx ent %llxn", - vcpu->arch.mmu.root_level, pfn, hpa, *sptep); + audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " + "ent %llxn", vcpu->arch.mmu.root_level, pfn, + hpa, *sptep); } static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) @@ -142,8 +144,8 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) if (!gfn_to_memslot(kvm, gfn)) { if (!printk_ratelimit()) return; - audit_printk("no memslot for gfn %llx\n", gfn); - audit_printk("index %ld of sp (gfn=%llx)\n", + audit_printk(kvm, "no memslot for gfn %llx\n", gfn); + audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", (long int)(sptep - rev_sp->spt), rev_sp->gfn); dump_stack(); return; @@ -153,7 +155,8 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) if (!*rmapp) { if (!printk_ratelimit()) return; - audit_printk("no rmap for writable spte %llx\n", *sptep); + audit_printk(kvm, "no rmap for writable spte %llx\n", + *sptep); dump_stack(); } } @@ -168,8 +171,9 @@ static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level) { struct kvm_mmu_page *sp = page_header(__pa(sptep)); - if (audit_point == AUDIT_POST_SYNC && sp->unsync) - audit_printk("meet unsync sp(%p) after sync root.\n", sp); + if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync) + audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync " + "root.\n", sp); } static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) @@ -202,8 +206,9 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) spte = rmap_next(kvm, rmapp, NULL); while (spte) { if (is_writable_pte(*spte)) - audit_printk("shadow page has writable mappings: gfn " - "%llx role %x\n", sp->gfn, sp->role.word); + audit_printk(kvm, "shadow page has writable " + "mappings: gfn %llx role %x\n", + sp->gfn, sp->role.word); spte = rmap_next(kvm, rmapp, spte); } } @@ -238,7 +243,7 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point) if (!__ratelimit(&ratelimit_state)) return; - audit_point = point; + vcpu->kvm->arch.audit_point = point; audit_all_active_sps(vcpu->kvm); audit_vcpu_spte(vcpu); } diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index cd7a833a3b52..6bccc24c4181 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -72,7 +72,7 @@ struct guest_walker { unsigned pt_access; unsigned pte_access; gfn_t gfn; - u32 error_code; + struct x86_exception fault; }; static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) @@ -266,21 +266,23 @@ walk: return 1; error: - walker->error_code = 0; + walker->fault.vector = PF_VECTOR; + walker->fault.error_code_valid = true; + walker->fault.error_code = 0; if (present) - walker->error_code |= PFERR_PRESENT_MASK; + walker->fault.error_code |= PFERR_PRESENT_MASK; - walker->error_code |= write_fault | user_fault; + walker->fault.error_code |= write_fault | user_fault; if (fetch_fault && mmu->nx) - walker->error_code |= PFERR_FETCH_MASK; + walker->fault.error_code |= PFERR_FETCH_MASK; if (rsvd_fault) - walker->error_code |= PFERR_RSVD_MASK; + walker->fault.error_code |= PFERR_RSVD_MASK; - vcpu->arch.fault.address = addr; - vcpu->arch.fault.error_code = walker->error_code; + walker->fault.address = addr; + walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; - trace_kvm_mmu_walker_error(walker->error_code); + trace_kvm_mmu_walker_error(walker->fault.error_code); return 0; } @@ -299,25 +301,42 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker, addr, access); } +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *spte, + pt_element_t gpte) +{ + u64 nonpresent = shadow_trap_nonpresent_pte; + + if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + goto no_present; + + if (!is_present_gpte(gpte)) { + if (!sp->unsync) + nonpresent = shadow_notrap_nonpresent_pte; + goto no_present; + } + + if (!(gpte & PT_ACCESSED_MASK)) + goto no_present; + + return false; + +no_present: + drop_spte(vcpu->kvm, spte, nonpresent); + return true; +} + static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte) { pt_element_t gpte; unsigned pte_access; pfn_t pfn; - u64 new_spte; gpte = *(const pt_element_t *)pte; - if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { - if (!is_present_gpte(gpte)) { - if (sp->unsync) - new_spte = shadow_trap_nonpresent_pte; - else - new_spte = shadow_notrap_nonpresent_pte; - __set_spte(spte, new_spte); - } + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) return; - } + pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) @@ -329,7 +348,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return; kvm_get_pfn(pfn); /* - * we call mmu_set_spte() with reset_host_protection = true beacuse that + * we call mmu_set_spte() with host_writable = true beacuse that * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). */ mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, @@ -364,7 +383,6 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, u64 *sptep) { struct kvm_mmu_page *sp; - struct kvm_mmu *mmu = &vcpu->arch.mmu; pt_element_t *gptep = gw->prefetch_ptes; u64 *spte; int i; @@ -395,14 +413,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, gpte = gptep[i]; - if (!is_present_gpte(gpte) || - is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) { - if (!sp->unsync) - __set_spte(spte, shadow_notrap_nonpresent_pte); - continue; - } - - if (!(gpte & PT_ACCESSED_MASK)) + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) continue; pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); @@ -427,7 +438,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, struct guest_walker *gw, int user_fault, int write_fault, int hlevel, - int *ptwrite, pfn_t pfn) + int *ptwrite, pfn_t pfn, bool map_writable, + bool prefault) { unsigned access = gw->pt_access; struct kvm_mmu_page *sp = NULL; @@ -501,7 +513,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, user_fault, write_fault, dirty, ptwrite, it.level, - gw->gfn, pfn, false, true); + gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); return it.sptep; @@ -527,8 +539,8 @@ out_gpte_changed: * Returns: 1 if we need to emulate the instruction, 0 otherwise, or * a negative value on error. */ -static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, - u32 error_code) +static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, + bool prefault) { int write_fault = error_code & PFERR_WRITE_MASK; int user_fault = error_code & PFERR_USER_MASK; @@ -538,7 +550,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, int r; pfn_t pfn; int level = PT_PAGE_TABLE_LEVEL; + int force_pt_level; unsigned long mmu_seq; + bool map_writable; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); @@ -556,19 +570,29 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, */ if (!r) { pgprintk("%s: guest page fault\n", __func__); - inject_page_fault(vcpu); - vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ + if (!prefault) { + inject_page_fault(vcpu, &walker.fault); + /* reset fork detector */ + vcpu->arch.last_pt_write_count = 0; + } return 0; } - if (walker.level >= PT_DIRECTORY_LEVEL) { + if (walker.level >= PT_DIRECTORY_LEVEL) + force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn); + else + force_pt_level = 1; + if (!force_pt_level) { level = min(walker.level, mapping_level(vcpu, walker.gfn)); walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); } mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); + + if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, + &map_writable)) + return 0; /* mmio */ if (is_error_pfn(pfn)) @@ -580,8 +604,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); kvm_mmu_free_some_pages(vcpu); + if (!force_pt_level) + transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, - level, &write_pt, pfn); + level, &write_pt, pfn, map_writable, prefault); (void)sptep; pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, sptep, *sptep, write_pt); @@ -661,7 +687,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) } static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, - u32 *error) + struct x86_exception *exception) { struct guest_walker walker; gpa_t gpa = UNMAPPED_GVA; @@ -672,14 +698,15 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, if (r) { gpa = gfn_to_gpa(walker.gfn); gpa |= vaddr & ~PAGE_MASK; - } else if (error) - *error = walker.error_code; + } else if (exception) + *exception = walker.fault; return gpa; } static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, u32 *error) + u32 access, + struct x86_exception *exception) { struct guest_walker walker; gpa_t gpa = UNMAPPED_GVA; @@ -690,8 +717,8 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, if (r) { gpa = gfn_to_gpa(walker.gfn); gpa |= vaddr & ~PAGE_MASK; - } else if (error) - *error = walker.error_code; + } else if (exception) + *exception = walker.fault; return gpa; } @@ -730,12 +757,19 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, * Using the cached information from sp->gfns is safe because: * - The spte has a reference to the struct page, so the pfn for a given gfn * can't change unless all sptes pointing to it are nuked first. + * + * Note: + * We should flush all tlbs if spte is dropped even though guest is + * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page + * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't + * used by guest then tlbs are not flushed, so guest is allowed to access the + * freed pages. + * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. */ -static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - bool clear_unsync) +static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { int i, offset, nr_present; - bool reset_host_protection; + bool host_writable; gpa_t first_pte_gpa; offset = nr_present = 0; @@ -764,31 +798,27 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return -EINVAL; gfn = gpte_to_gfn(gpte); - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL) - || gfn != sp->gfns[i] || !is_present_gpte(gpte) - || !(gpte & PT_ACCESSED_MASK)) { - u64 nonpresent; - if (is_present_gpte(gpte) || !clear_unsync) - nonpresent = shadow_trap_nonpresent_pte; - else - nonpresent = shadow_notrap_nonpresent_pte; - drop_spte(vcpu->kvm, &sp->spt[i], nonpresent); + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { + vcpu->kvm->tlbs_dirty++; + continue; + } + + if (gfn != sp->gfns[i]) { + drop_spte(vcpu->kvm, &sp->spt[i], + shadow_trap_nonpresent_pte); + vcpu->kvm->tlbs_dirty++; continue; } nr_present++; pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); - if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { - pte_access &= ~ACC_WRITE_MASK; - reset_host_protection = 0; - } else { - reset_host_protection = 1; - } + host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; + set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, spte_to_pfn(sp->spt[i]), true, false, - reset_host_protection); + host_writable); } return !nr_present; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b81a9b7c2ca4..25bd1bc5aad2 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -31,6 +31,7 @@ #include #include +#include #include #include "trace.h" @@ -50,6 +51,10 @@ MODULE_LICENSE("GPL"); #define SVM_FEATURE_LBRV (1 << 1) #define SVM_FEATURE_SVML (1 << 2) #define SVM_FEATURE_NRIP (1 << 3) +#define SVM_FEATURE_TSC_RATE (1 << 4) +#define SVM_FEATURE_VMCB_CLEAN (1 << 5) +#define SVM_FEATURE_FLUSH_ASID (1 << 6) +#define SVM_FEATURE_DECODE_ASSIST (1 << 7) #define SVM_FEATURE_PAUSE_FILTER (1 << 10) #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ @@ -97,10 +102,8 @@ struct nested_state { unsigned long vmexit_rax; /* cache for intercepts of the guest */ - u16 intercept_cr_read; - u16 intercept_cr_write; - u16 intercept_dr_read; - u16 intercept_dr_write; + u32 intercept_cr; + u32 intercept_dr; u32 intercept_exceptions; u64 intercept; @@ -123,7 +126,12 @@ struct vcpu_svm { u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; - u64 host_gs_base; + struct { + u16 fs; + u16 gs; + u16 ldt; + u64 gs_base; + } host; u32 *msrpm; @@ -133,6 +141,7 @@ struct vcpu_svm { unsigned int3_injected; unsigned long int3_rip; + u32 apf_reason; }; #define MSR_INVALID 0xffffffffU @@ -180,14 +189,151 @@ static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); +enum { + VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, + pause filter count */ + VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ + VMCB_ASID, /* ASID */ + VMCB_INTR, /* int_ctl, int_vector */ + VMCB_NPT, /* npt_en, nCR3, gPAT */ + VMCB_CR, /* CR0, CR3, CR4, EFER */ + VMCB_DR, /* DR6, DR7 */ + VMCB_DT, /* GDT, IDT */ + VMCB_SEG, /* CS, DS, SS, ES, CPL */ + VMCB_CR2, /* CR2 only */ + VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ + VMCB_DIRTY_MAX, +}; + +/* TPR and CR2 are always written before VMRUN */ +#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) + +static inline void mark_all_dirty(struct vmcb *vmcb) +{ + vmcb->control.clean = 0; +} + +static inline void mark_all_clean(struct vmcb *vmcb) +{ + vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) + & ~VMCB_ALWAYS_DIRTY_MASK; +} + +static inline void mark_dirty(struct vmcb *vmcb, int bit) +{ + vmcb->control.clean &= ~(1 << bit); +} + static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_svm, vcpu); } -static inline bool is_nested(struct vcpu_svm *svm) +static void recalc_intercepts(struct vcpu_svm *svm) +{ + struct vmcb_control_area *c, *h; + struct nested_state *g; + + mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + + if (!is_guest_mode(&svm->vcpu)) + return; + + c = &svm->vmcb->control; + h = &svm->nested.hsave->control; + g = &svm->nested; + + c->intercept_cr = h->intercept_cr | g->intercept_cr; + c->intercept_dr = h->intercept_dr | g->intercept_dr; + c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; + c->intercept = h->intercept | g->intercept; +} + +static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) +{ + if (is_guest_mode(&svm->vcpu)) + return svm->nested.hsave; + else + return svm->vmcb; +} + +static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept_cr |= (1U << bit); + + recalc_intercepts(svm); +} + +static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept_cr &= ~(1U << bit); + + recalc_intercepts(svm); +} + +static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + return vmcb->control.intercept_cr & (1U << bit); +} + +static inline void set_dr_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept_dr |= (1U << bit); + + recalc_intercepts(svm); +} + +static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept_dr &= ~(1U << bit); + + recalc_intercepts(svm); +} + +static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept_exceptions |= (1U << bit); + + recalc_intercepts(svm); +} + +static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) { - return svm->nested.vmcb; + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept_exceptions &= ~(1U << bit); + + recalc_intercepts(svm); +} + +static inline void set_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept |= (1ULL << bit); + + recalc_intercepts(svm); +} + +static inline void clr_intercept(struct vcpu_svm *svm, int bit) +{ + struct vmcb *vmcb = get_host_vmcb(svm); + + vmcb->control.intercept &= ~(1ULL << bit); + + recalc_intercepts(svm); } static inline void enable_gif(struct vcpu_svm *svm) @@ -264,11 +410,6 @@ static u32 svm_msrpm_offset(u32 msr) #define MAX_INST_SIZE 15 -static inline u32 svm_has(u32 feat) -{ - return svm_features & feat; -} - static inline void clgi(void) { asm volatile (__ex(SVM_CLGI)); @@ -284,16 +425,6 @@ static inline void invlpga(unsigned long addr, u32 asid) asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid)); } -static inline void force_new_asid(struct kvm_vcpu *vcpu) -{ - to_svm(vcpu)->asid_generation--; -} - -static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) -{ - force_new_asid(vcpu); -} - static int get_npt_level(void) { #ifdef CONFIG_X86_64 @@ -310,6 +441,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) efer &= ~EFER_LME; to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; + mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); } static int is_external_interrupt(u32 info) @@ -347,7 +479,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) svm->next_rip = svm->vmcb->control.next_rip; if (!svm->next_rip) { - if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) != + if (emulate_instruction(vcpu, EMULTYPE_SKIP) != EMULATE_DONE) printk(KERN_DEBUG "%s: NOP\n", __func__); return; @@ -374,7 +506,7 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; - if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) { + if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) { unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); /* @@ -670,7 +802,7 @@ static __init int svm_hardware_setup(void) svm_features = cpuid_edx(SVM_CPUID_FUNC); - if (!svm_has(SVM_FEATURE_NPT)) + if (!boot_cpu_has(X86_FEATURE_NPT)) npt_enabled = false; if (npt_enabled && !npt) { @@ -725,13 +857,15 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; - if (is_nested(svm)) { + if (is_guest_mode(vcpu)) { g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; } svm->vmcb->control.tsc_offset = offset + g_tsc_offset; + + mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) @@ -739,8 +873,9 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.tsc_offset += adjustment; - if (is_nested(svm)) + if (is_guest_mode(vcpu)) svm->nested.hsave->control.tsc_offset += adjustment; + mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } static void init_vmcb(struct vcpu_svm *svm) @@ -749,62 +884,62 @@ static void init_vmcb(struct vcpu_svm *svm) struct vmcb_save_area *save = &svm->vmcb->save; svm->vcpu.fpu_active = 1; + svm->vcpu.arch.hflags = 0; - control->intercept_cr_read = INTERCEPT_CR0_MASK | - INTERCEPT_CR3_MASK | - INTERCEPT_CR4_MASK; - - control->intercept_cr_write = INTERCEPT_CR0_MASK | - INTERCEPT_CR3_MASK | - INTERCEPT_CR4_MASK | - INTERCEPT_CR8_MASK; - - control->intercept_dr_read = INTERCEPT_DR0_MASK | - INTERCEPT_DR1_MASK | - INTERCEPT_DR2_MASK | - INTERCEPT_DR3_MASK | - INTERCEPT_DR4_MASK | - INTERCEPT_DR5_MASK | - INTERCEPT_DR6_MASK | - INTERCEPT_DR7_MASK; - - control->intercept_dr_write = INTERCEPT_DR0_MASK | - INTERCEPT_DR1_MASK | - INTERCEPT_DR2_MASK | - INTERCEPT_DR3_MASK | - INTERCEPT_DR4_MASK | - INTERCEPT_DR5_MASK | - INTERCEPT_DR6_MASK | - INTERCEPT_DR7_MASK; - - control->intercept_exceptions = (1 << PF_VECTOR) | - (1 << UD_VECTOR) | - (1 << MC_VECTOR); - - - control->intercept = (1ULL << INTERCEPT_INTR) | - (1ULL << INTERCEPT_NMI) | - (1ULL << INTERCEPT_SMI) | - (1ULL << INTERCEPT_SELECTIVE_CR0) | - (1ULL << INTERCEPT_CPUID) | - (1ULL << INTERCEPT_INVD) | - (1ULL << INTERCEPT_HLT) | - (1ULL << INTERCEPT_INVLPG) | - (1ULL << INTERCEPT_INVLPGA) | - (1ULL << INTERCEPT_IOIO_PROT) | - (1ULL << INTERCEPT_MSR_PROT) | - (1ULL << INTERCEPT_TASK_SWITCH) | - (1ULL << INTERCEPT_SHUTDOWN) | - (1ULL << INTERCEPT_VMRUN) | - (1ULL << INTERCEPT_VMMCALL) | - (1ULL << INTERCEPT_VMLOAD) | - (1ULL << INTERCEPT_VMSAVE) | - (1ULL << INTERCEPT_STGI) | - (1ULL << INTERCEPT_CLGI) | - (1ULL << INTERCEPT_SKINIT) | - (1ULL << INTERCEPT_WBINVD) | - (1ULL << INTERCEPT_MONITOR) | - (1ULL << INTERCEPT_MWAIT); + set_cr_intercept(svm, INTERCEPT_CR0_READ); + set_cr_intercept(svm, INTERCEPT_CR3_READ); + set_cr_intercept(svm, INTERCEPT_CR4_READ); + set_cr_intercept(svm, INTERCEPT_CR0_WRITE); + set_cr_intercept(svm, INTERCEPT_CR3_WRITE); + set_cr_intercept(svm, INTERCEPT_CR4_WRITE); + set_cr_intercept(svm, INTERCEPT_CR8_WRITE); + + set_dr_intercept(svm, INTERCEPT_DR0_READ); + set_dr_intercept(svm, INTERCEPT_DR1_READ); + set_dr_intercept(svm, INTERCEPT_DR2_READ); + set_dr_intercept(svm, INTERCEPT_DR3_READ); + set_dr_intercept(svm, INTERCEPT_DR4_READ); + set_dr_intercept(svm, INTERCEPT_DR5_READ); + set_dr_intercept(svm, INTERCEPT_DR6_READ); + set_dr_intercept(svm, INTERCEPT_DR7_READ); + + set_dr_intercept(svm, INTERCEPT_DR0_WRITE); + set_dr_intercept(svm, INTERCEPT_DR1_WRITE); + set_dr_intercept(svm, INTERCEPT_DR2_WRITE); + set_dr_intercept(svm, INTERCEPT_DR3_WRITE); + set_dr_intercept(svm, INTERCEPT_DR4_WRITE); + set_dr_intercept(svm, INTERCEPT_DR5_WRITE); + set_dr_intercept(svm, INTERCEPT_DR6_WRITE); + set_dr_intercept(svm, INTERCEPT_DR7_WRITE); + + set_exception_intercept(svm, PF_VECTOR); + set_exception_intercept(svm, UD_VECTOR); + set_exception_intercept(svm, MC_VECTOR); + + set_intercept(svm, INTERCEPT_INTR); + set_intercept(svm, INTERCEPT_NMI); + set_intercept(svm, INTERCEPT_SMI); + set_intercept(svm, INTERCEPT_SELECTIVE_CR0); + set_intercept(svm, INTERCEPT_CPUID); + set_intercept(svm, INTERCEPT_INVD); + set_intercept(svm, INTERCEPT_HLT); + set_intercept(svm, INTERCEPT_INVLPG); + set_intercept(svm, INTERCEPT_INVLPGA); + set_intercept(svm, INTERCEPT_IOIO_PROT); + set_intercept(svm, INTERCEPT_MSR_PROT); + set_intercept(svm, INTERCEPT_TASK_SWITCH); + set_intercept(svm, INTERCEPT_SHUTDOWN); + set_intercept(svm, INTERCEPT_VMRUN); + set_intercept(svm, INTERCEPT_VMMCALL); + set_intercept(svm, INTERCEPT_VMLOAD); + set_intercept(svm, INTERCEPT_VMSAVE); + set_intercept(svm, INTERCEPT_STGI); + set_intercept(svm, INTERCEPT_CLGI); + set_intercept(svm, INTERCEPT_SKINIT); + set_intercept(svm, INTERCEPT_WBINVD); + set_intercept(svm, INTERCEPT_MONITOR); + set_intercept(svm, INTERCEPT_MWAIT); + set_intercept(svm, INTERCEPT_XSETBV); control->iopm_base_pa = iopm_base; control->msrpm_base_pa = __pa(svm->msrpm); @@ -855,25 +990,27 @@ static void init_vmcb(struct vcpu_svm *svm) if (npt_enabled) { /* Setup VMCB for Nested Paging */ control->nested_ctl = 1; - control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | - (1ULL << INTERCEPT_INVLPG)); - control->intercept_exceptions &= ~(1 << PF_VECTOR); - control->intercept_cr_read &= ~INTERCEPT_CR3_MASK; - control->intercept_cr_write &= ~INTERCEPT_CR3_MASK; + clr_intercept(svm, INTERCEPT_TASK_SWITCH); + clr_intercept(svm, INTERCEPT_INVLPG); + clr_exception_intercept(svm, PF_VECTOR); + clr_cr_intercept(svm, INTERCEPT_CR3_READ); + clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = 0x0007040600070406ULL; save->cr3 = 0; save->cr4 = 0; } - force_new_asid(&svm->vcpu); + svm->asid_generation = 0; svm->nested.vmcb = 0; svm->vcpu.arch.hflags = 0; - if (svm_has(SVM_FEATURE_PAUSE_FILTER)) { + if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { control->pause_filter_count = 3000; - control->intercept |= (1ULL << INTERCEPT_PAUSE); + set_intercept(svm, INTERCEPT_PAUSE); } + mark_all_dirty(svm->vmcb); + enable_gif(svm); } @@ -990,8 +1127,16 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (unlikely(cpu != vcpu->cpu)) { svm->asid_generation = 0; + mark_all_dirty(svm->vmcb); } +#ifdef CONFIG_X86_64 + rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); +#endif + savesegment(fs, svm->host.fs); + savesegment(gs, svm->host.gs); + svm->host.ldt = kvm_read_ldt(); + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } @@ -1002,6 +1147,14 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) int i; ++vcpu->stat.host_state_reload; + kvm_load_ldt(svm->host.ldt); +#ifdef CONFIG_X86_64 + loadsegment(fs, svm->host.fs); + load_gs_index(svm->host.gs); + wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); +#else + loadsegment(gs, svm->host.gs); +#endif for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } @@ -1021,7 +1174,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) switch (reg) { case VCPU_EXREG_PDPTR: BUG_ON(!npt_enabled); - load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); + load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); break; default: BUG(); @@ -1030,12 +1183,12 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) static void svm_set_vintr(struct vcpu_svm *svm) { - svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR; + set_intercept(svm, INTERCEPT_VINTR); } static void svm_clear_vintr(struct vcpu_svm *svm) { - svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); + clr_intercept(svm, INTERCEPT_VINTR); } static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) @@ -1150,6 +1303,7 @@ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) svm->vmcb->save.idtr.limit = dt->size; svm->vmcb->save.idtr.base = dt->address ; + mark_dirty(svm->vmcb, VMCB_DT); } static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) @@ -1166,19 +1320,23 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) svm->vmcb->save.gdtr.limit = dt->size; svm->vmcb->save.gdtr.base = dt->address ; + mark_dirty(svm->vmcb, VMCB_DT); } static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { } +static void svm_decache_cr3(struct kvm_vcpu *vcpu) +{ +} + static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } static void update_cr0_intercept(struct vcpu_svm *svm) { - struct vmcb *vmcb = svm->vmcb; ulong gcr0 = svm->vcpu.arch.cr0; u64 *hcr0 = &svm->vmcb->save.cr0; @@ -1188,27 +1346,14 @@ static void update_cr0_intercept(struct vcpu_svm *svm) *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) | (gcr0 & SVM_CR0_SELECTIVE_MASK); + mark_dirty(svm->vmcb, VMCB_CR); if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { - vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; - vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; - if (is_nested(svm)) { - struct vmcb *hsave = svm->nested.hsave; - - hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; - hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; - vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read; - vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write; - } + clr_cr_intercept(svm, INTERCEPT_CR0_READ); + clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); } else { - svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; - svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; - if (is_nested(svm)) { - struct vmcb *hsave = svm->nested.hsave; - - hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK; - hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK; - } + set_cr_intercept(svm, INTERCEPT_CR0_READ); + set_cr_intercept(svm, INTERCEPT_CR0_WRITE); } } @@ -1216,7 +1361,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); - if (is_nested(svm)) { + if (is_guest_mode(vcpu)) { /* * We are here because we run in nested mode, the host kvm * intercepts cr0 writes but the l1 hypervisor does not. @@ -1268,6 +1413,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) */ cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; + mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); } @@ -1277,13 +1423,14 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) - force_new_asid(vcpu); + svm_flush_tlb(vcpu); vcpu->arch.cr4 = cr4; if (!npt_enabled) cr4 |= X86_CR4_PAE; cr4 |= host_cr4_mce; to_svm(vcpu)->vmcb->save.cr4 = cr4; + mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); } static void svm_set_segment(struct kvm_vcpu *vcpu, @@ -1312,26 +1459,25 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, = (svm->vmcb->save.cs.attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; + mark_dirty(svm->vmcb, VMCB_SEG); } static void update_db_intercept(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - svm->vmcb->control.intercept_exceptions &= - ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); + clr_exception_intercept(svm, DB_VECTOR); + clr_exception_intercept(svm, BP_VECTOR); if (svm->nmi_singlestep) - svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR); + set_exception_intercept(svm, DB_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) - svm->vmcb->control.intercept_exceptions |= - 1 << DB_VECTOR; + set_exception_intercept(svm, DB_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) - svm->vmcb->control.intercept_exceptions |= - 1 << BP_VECTOR; + set_exception_intercept(svm, BP_VECTOR); } else vcpu->guest_debug = 0; } @@ -1345,21 +1491,9 @@ static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) else svm->vmcb->save.dr7 = vcpu->arch.dr7; - update_db_intercept(vcpu); -} - -static void load_host_msrs(struct kvm_vcpu *vcpu) -{ -#ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); -#endif -} + mark_dirty(svm->vmcb, VMCB_DR); -static void save_host_msrs(struct kvm_vcpu *vcpu) -{ -#ifdef CONFIG_X86_64 - rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); -#endif + update_db_intercept(vcpu); } static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) @@ -1372,6 +1506,8 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) svm->asid_generation = sd->asid_generation; svm->vmcb->control.asid = sd->next_asid++; + + mark_dirty(svm->vmcb, VMCB_ASID); } static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) @@ -1379,20 +1515,40 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.dr7 = value; + mark_dirty(svm->vmcb, VMCB_DR); } static int pf_interception(struct vcpu_svm *svm) { - u64 fault_address; + u64 fault_address = svm->vmcb->control.exit_info_2; u32 error_code; + int r = 1; - fault_address = svm->vmcb->control.exit_info_2; - error_code = svm->vmcb->control.exit_info_1; - - trace_kvm_page_fault(fault_address, error_code); - if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) - kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); - return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); + switch (svm->apf_reason) { + default: + error_code = svm->vmcb->control.exit_info_1; + + trace_kvm_page_fault(fault_address, error_code); + if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) + kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); + r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, + svm->vmcb->control.insn_bytes, + svm->vmcb->control.insn_len); + break; + case KVM_PV_REASON_PAGE_NOT_PRESENT: + svm->apf_reason = 0; + local_irq_disable(); + kvm_async_pf_task_wait(fault_address); + local_irq_enable(); + break; + case KVM_PV_REASON_PAGE_READY: + svm->apf_reason = 0; + local_irq_disable(); + kvm_async_pf_task_wake(fault_address); + local_irq_enable(); + break; + } + return r; } static int db_interception(struct vcpu_svm *svm) @@ -1440,7 +1596,7 @@ static int ud_interception(struct vcpu_svm *svm) { int er; - er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD); + er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; @@ -1449,21 +1605,8 @@ static int ud_interception(struct vcpu_svm *svm) static void svm_fpu_activate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - u32 excp; - - if (is_nested(svm)) { - u32 h_excp, n_excp; - - h_excp = svm->nested.hsave->control.intercept_exceptions; - n_excp = svm->nested.intercept_exceptions; - h_excp &= ~(1 << NM_VECTOR); - excp = h_excp | n_excp; - } else { - excp = svm->vmcb->control.intercept_exceptions; - excp &= ~(1 << NM_VECTOR); - } - svm->vmcb->control.intercept_exceptions = excp; + clr_exception_intercept(svm, NM_VECTOR); svm->vcpu.fpu_active = 1; update_cr0_intercept(svm); @@ -1570,7 +1713,7 @@ static int io_interception(struct vcpu_svm *svm) string = (io_info & SVM_IOIO_STR_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0; if (string || in) - return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE; + return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; @@ -1624,17 +1767,19 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; - force_new_asid(vcpu); + mark_dirty(svm->vmcb, VMCB_NPT); + svm_flush_tlb(vcpu); } -static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu) +static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, + struct x86_exception *fault) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.exit_code = SVM_EXIT_NPF; svm->vmcb->control.exit_code_hi = 0; - svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code; - svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address; + svm->vmcb->control.exit_info_1 = fault->error_code; + svm->vmcb->control.exit_info_2 = fault->address; nested_svm_vmexit(svm); } @@ -1680,7 +1825,7 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, { int vmexit; - if (!is_nested(svm)) + if (!is_guest_mode(&svm->vcpu)) return 0; svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; @@ -1698,7 +1843,7 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, /* This function returns true if it is save to enable the irq window */ static inline bool nested_svm_intr(struct vcpu_svm *svm) { - if (!is_nested(svm)) + if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) @@ -1737,7 +1882,7 @@ static inline bool nested_svm_intr(struct vcpu_svm *svm) /* This function returns true if it is save to enable the nmi window */ static inline bool nested_svm_nmi(struct vcpu_svm *svm) { - if (!is_nested(svm)) + if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) @@ -1836,8 +1981,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + PF_VECTOR: - /* When we're shadowing, trap PFs */ - if (!npt_enabled) + /* When we're shadowing, trap PFs, but not async PF */ + if (!npt_enabled && svm->apf_reason == 0) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + NM_VECTOR: @@ -1865,27 +2010,15 @@ static int nested_svm_intercept(struct vcpu_svm *svm) case SVM_EXIT_IOIO: vmexit = nested_svm_intercept_ioio(svm); break; - case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: { - u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0); - if (svm->nested.intercept_cr_read & cr_bits) + case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { + u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); + if (svm->nested.intercept_cr & bit) vmexit = NESTED_EXIT_DONE; break; } - case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: { - u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0); - if (svm->nested.intercept_cr_write & cr_bits) - vmexit = NESTED_EXIT_DONE; - break; - } - case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: { - u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0); - if (svm->nested.intercept_dr_read & dr_bits) - vmexit = NESTED_EXIT_DONE; - break; - } - case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: { - u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0); - if (svm->nested.intercept_dr_write & dr_bits) + case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { + u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); + if (svm->nested.intercept_dr & bit) vmexit = NESTED_EXIT_DONE; break; } @@ -1893,6 +2026,10 @@ static int nested_svm_intercept(struct vcpu_svm *svm) u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); if (svm->nested.intercept_exceptions & excp_bits) vmexit = NESTED_EXIT_DONE; + /* async page fault always cause vmexit */ + else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) && + svm->apf_reason != 0) + vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_ERR: { @@ -1926,10 +2063,8 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr struct vmcb_control_area *dst = &dst_vmcb->control; struct vmcb_control_area *from = &from_vmcb->control; - dst->intercept_cr_read = from->intercept_cr_read; - dst->intercept_cr_write = from->intercept_cr_write; - dst->intercept_dr_read = from->intercept_dr_read; - dst->intercept_dr_write = from->intercept_dr_write; + dst->intercept_cr = from->intercept_cr; + dst->intercept_dr = from->intercept_dr; dst->intercept_exceptions = from->intercept_exceptions; dst->intercept = from->intercept; dst->iopm_base_pa = from->iopm_base_pa; @@ -1970,7 +2105,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) if (!nested_vmcb) return 1; - /* Exit nested SVM mode */ + /* Exit Guest-Mode */ + leave_guest_mode(&svm->vcpu); svm->nested.vmcb = 0; /* Give the current vmcb to the guest */ @@ -1984,7 +2120,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) nested_vmcb->save.idtr = vmcb->save.idtr; nested_vmcb->save.efer = svm->vcpu.arch.efer; nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); - nested_vmcb->save.cr3 = svm->vcpu.arch.cr3; + nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); nested_vmcb->save.cr2 = vmcb->save.cr2; nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; nested_vmcb->save.rflags = vmcb->save.rflags; @@ -2061,6 +2197,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) svm->vmcb->save.cpl = 0; svm->vmcb->control.exit_int_info = 0; + mark_all_dirty(svm->vmcb); + nested_svm_unmap(page); nested_svm_uninit_mmu_context(&svm->vcpu); @@ -2148,8 +2286,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) nested_vmcb->control.event_inj, nested_vmcb->control.nested_ctl); - trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read, - nested_vmcb->control.intercept_cr_write, + trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, + nested_vmcb->control.intercept_cr >> 16, nested_vmcb->control.intercept_exceptions, nested_vmcb->control.intercept); @@ -2177,7 +2315,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) if (npt_enabled) hsave->save.cr3 = vmcb->save.cr3; else - hsave->save.cr3 = svm->vcpu.arch.cr3; + hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); copy_vmcb_control_area(hsave, vmcb); @@ -2229,14 +2367,12 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; /* cache intercepts */ - svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read; - svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write; - svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read; - svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write; + svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; + svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm->nested.intercept = nested_vmcb->control.intercept; - force_new_asid(&svm->vcpu); + svm_flush_tlb(&svm->vcpu); svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) svm->vcpu.arch.hflags |= HF_VINTR_MASK; @@ -2245,29 +2381,12 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { /* We only want the cr8 intercept bits of the guest */ - svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK; - svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; + clr_cr_intercept(svm, INTERCEPT_CR8_READ); + clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); } /* We don't want to see VMMCALLs from a nested guest */ - svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL); - - /* - * We don't want a nested guest to be more powerful than the guest, so - * all intercepts are ORed - */ - svm->vmcb->control.intercept_cr_read |= - nested_vmcb->control.intercept_cr_read; - svm->vmcb->control.intercept_cr_write |= - nested_vmcb->control.intercept_cr_write; - svm->vmcb->control.intercept_dr_read |= - nested_vmcb->control.intercept_dr_read; - svm->vmcb->control.intercept_dr_write |= - nested_vmcb->control.intercept_dr_write; - svm->vmcb->control.intercept_exceptions |= - nested_vmcb->control.intercept_exceptions; - - svm->vmcb->control.intercept |= nested_vmcb->control.intercept; + clr_intercept(svm, INTERCEPT_VMMCALL); svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; @@ -2278,11 +2397,21 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) nested_svm_unmap(page); - /* nested_vmcb is our indicator if nested SVM is activated */ + /* Enter Guest-Mode */ + enter_guest_mode(&svm->vcpu); + + /* + * Merge guest and host intercepts - must be called with vcpu in + * guest-mode to take affect here + */ + recalc_intercepts(svm); + svm->nested.vmcb = vmcb_gpa; enable_gif(svm); + mark_all_dirty(svm->vmcb); + return true; } @@ -2400,6 +2529,8 @@ static int clgi_interception(struct vcpu_svm *svm) svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; + mark_dirty(svm->vmcb, VMCB_INTR); + return 1; } @@ -2426,6 +2557,19 @@ static int skinit_interception(struct vcpu_svm *svm) return 1; } +static int xsetbv_interception(struct vcpu_svm *svm) +{ + u64 new_bv = kvm_read_edx_eax(&svm->vcpu); + u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); + + if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + } + + return 1; +} + static int invalid_op_interception(struct vcpu_svm *svm) { kvm_queue_exception(&svm->vcpu, UD_VECTOR); @@ -2507,19 +2651,92 @@ static int cpuid_interception(struct vcpu_svm *svm) static int iret_interception(struct vcpu_svm *svm) { ++svm->vcpu.stat.nmi_window_exits; - svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET); + clr_intercept(svm, INTERCEPT_IRET); svm->vcpu.arch.hflags |= HF_IRET_MASK; return 1; } static int invlpg_interception(struct vcpu_svm *svm) { - return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; + if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) + return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; + + kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); + skip_emulated_instruction(&svm->vcpu); + return 1; } static int emulate_on_interception(struct vcpu_svm *svm) { - return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; + return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; +} + +#define CR_VALID (1ULL << 63) + +static int cr_interception(struct vcpu_svm *svm) +{ + int reg, cr; + unsigned long val; + int err; + + if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) + return emulate_on_interception(svm); + + if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) + return emulate_on_interception(svm); + + reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; + cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; + + err = 0; + if (cr >= 16) { /* mov to cr */ + cr -= 16; + val = kvm_register_read(&svm->vcpu, reg); + switch (cr) { + case 0: + err = kvm_set_cr0(&svm->vcpu, val); + break; + case 3: + err = kvm_set_cr3(&svm->vcpu, val); + break; + case 4: + err = kvm_set_cr4(&svm->vcpu, val); + break; + case 8: + err = kvm_set_cr8(&svm->vcpu, val); + break; + default: + WARN(1, "unhandled write to CR%d", cr); + kvm_queue_exception(&svm->vcpu, UD_VECTOR); + return 1; + } + } else { /* mov from cr */ + switch (cr) { + case 0: + val = kvm_read_cr0(&svm->vcpu); + break; + case 2: + val = svm->vcpu.arch.cr2; + break; + case 3: + val = kvm_read_cr3(&svm->vcpu); + break; + case 4: + val = kvm_read_cr4(&svm->vcpu); + break; + case 8: + val = kvm_get_cr8(&svm->vcpu); + break; + default: + WARN(1, "unhandled read from CR%d", cr); + kvm_queue_exception(&svm->vcpu, UD_VECTOR); + return 1; + } + kvm_register_write(&svm->vcpu, reg, val); + } + kvm_complete_insn_gp(&svm->vcpu, err); + + return 1; } static int cr0_write_interception(struct vcpu_svm *svm) @@ -2527,7 +2744,7 @@ static int cr0_write_interception(struct vcpu_svm *svm) struct kvm_vcpu *vcpu = &svm->vcpu; int r; - r = emulate_instruction(&svm->vcpu, 0, 0, 0); + r = cr_interception(svm); if (svm->nested.vmexit_rip) { kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip); @@ -2536,22 +2753,47 @@ static int cr0_write_interception(struct vcpu_svm *svm) svm->nested.vmexit_rip = 0; } - return r == EMULATE_DONE; + return r; +} + +static int dr_interception(struct vcpu_svm *svm) +{ + int reg, dr; + unsigned long val; + int err; + + if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) + return emulate_on_interception(svm); + + reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; + dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; + + if (dr >= 16) { /* mov to DRn */ + val = kvm_register_read(&svm->vcpu, reg); + kvm_set_dr(&svm->vcpu, dr - 16, val); + } else { + err = kvm_get_dr(&svm->vcpu, dr, &val); + if (!err) + kvm_register_write(&svm->vcpu, reg, val); + } + + return 1; } static int cr8_write_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; + int r; u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ - emulate_instruction(&svm->vcpu, 0, 0, 0); + r = cr_interception(svm); if (irqchip_in_kernel(svm->vcpu.kvm)) { - svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; - return 1; + clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); + return r; } if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) - return 1; + return r; kvm_run->exit_reason = KVM_EXIT_SET_TPR; return 0; } @@ -2562,14 +2804,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) switch (ecx) { case MSR_IA32_TSC: { - u64 tsc_offset; + struct vmcb *vmcb = get_host_vmcb(svm); - if (is_nested(svm)) - tsc_offset = svm->nested.hsave->control.tsc_offset; - else - tsc_offset = svm->vmcb->control.tsc_offset; - - *data = tsc_offset + native_read_tsc(); + *data = vmcb->control.tsc_offset + native_read_tsc(); break; } case MSR_STAR: @@ -2714,7 +2951,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) svm->vmcb->save.sysenter_esp = data; break; case MSR_IA32_DEBUGCTLMSR: - if (!svm_has(SVM_FEATURE_LBRV)) { + if (!boot_cpu_has(X86_FEATURE_LBRV)) { pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", __func__, data); break; @@ -2723,6 +2960,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) return 1; svm->vmcb->save.dbgctl = data; + mark_dirty(svm->vmcb, VMCB_LBR); if (data & (1ULL<<0)) svm_enable_lbrv(svm); else @@ -2775,6 +3013,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; + mark_dirty(svm->vmcb, VMCB_INTR); /* * If the user space waits to inject interrupts, exit as soon as * possible @@ -2797,31 +3036,31 @@ static int pause_interception(struct vcpu_svm *svm) } static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { - [SVM_EXIT_READ_CR0] = emulate_on_interception, - [SVM_EXIT_READ_CR3] = emulate_on_interception, - [SVM_EXIT_READ_CR4] = emulate_on_interception, - [SVM_EXIT_READ_CR8] = emulate_on_interception, + [SVM_EXIT_READ_CR0] = cr_interception, + [SVM_EXIT_READ_CR3] = cr_interception, + [SVM_EXIT_READ_CR4] = cr_interception, + [SVM_EXIT_READ_CR8] = cr_interception, [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, [SVM_EXIT_WRITE_CR0] = cr0_write_interception, - [SVM_EXIT_WRITE_CR3] = emulate_on_interception, - [SVM_EXIT_WRITE_CR4] = emulate_on_interception, + [SVM_EXIT_WRITE_CR3] = cr_interception, + [SVM_EXIT_WRITE_CR4] = cr_interception, [SVM_EXIT_WRITE_CR8] = cr8_write_interception, - [SVM_EXIT_READ_DR0] = emulate_on_interception, - [SVM_EXIT_READ_DR1] = emulate_on_interception, - [SVM_EXIT_READ_DR2] = emulate_on_interception, - [SVM_EXIT_READ_DR3] = emulate_on_interception, - [SVM_EXIT_READ_DR4] = emulate_on_interception, - [SVM_EXIT_READ_DR5] = emulate_on_interception, - [SVM_EXIT_READ_DR6] = emulate_on_interception, - [SVM_EXIT_READ_DR7] = emulate_on_interception, - [SVM_EXIT_WRITE_DR0] = emulate_on_interception, - [SVM_EXIT_WRITE_DR1] = emulate_on_interception, - [SVM_EXIT_WRITE_DR2] = emulate_on_interception, - [SVM_EXIT_WRITE_DR3] = emulate_on_interception, - [SVM_EXIT_WRITE_DR4] = emulate_on_interception, - [SVM_EXIT_WRITE_DR5] = emulate_on_interception, - [SVM_EXIT_WRITE_DR6] = emulate_on_interception, - [SVM_EXIT_WRITE_DR7] = emulate_on_interception, + [SVM_EXIT_READ_DR0] = dr_interception, + [SVM_EXIT_READ_DR1] = dr_interception, + [SVM_EXIT_READ_DR2] = dr_interception, + [SVM_EXIT_READ_DR3] = dr_interception, + [SVM_EXIT_READ_DR4] = dr_interception, + [SVM_EXIT_READ_DR5] = dr_interception, + [SVM_EXIT_READ_DR6] = dr_interception, + [SVM_EXIT_READ_DR7] = dr_interception, + [SVM_EXIT_WRITE_DR0] = dr_interception, + [SVM_EXIT_WRITE_DR1] = dr_interception, + [SVM_EXIT_WRITE_DR2] = dr_interception, + [SVM_EXIT_WRITE_DR3] = dr_interception, + [SVM_EXIT_WRITE_DR4] = dr_interception, + [SVM_EXIT_WRITE_DR5] = dr_interception, + [SVM_EXIT_WRITE_DR6] = dr_interception, + [SVM_EXIT_WRITE_DR7] = dr_interception, [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, @@ -2854,6 +3093,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_WBINVD] = emulate_on_interception, [SVM_EXIT_MONITOR] = invalid_op_interception, [SVM_EXIT_MWAIT] = invalid_op_interception, + [SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_NPF] = pf_interception, }; @@ -2864,10 +3104,10 @@ void dump_vmcb(struct kvm_vcpu *vcpu) struct vmcb_save_area *save = &svm->vmcb->save; pr_err("VMCB Control Area:\n"); - pr_err("cr_read: %04x\n", control->intercept_cr_read); - pr_err("cr_write: %04x\n", control->intercept_cr_write); - pr_err("dr_read: %04x\n", control->intercept_dr_read); - pr_err("dr_write: %04x\n", control->intercept_dr_write); + pr_err("cr_read: %04x\n", control->intercept_cr & 0xffff); + pr_err("cr_write: %04x\n", control->intercept_cr >> 16); + pr_err("dr_read: %04x\n", control->intercept_dr & 0xffff); + pr_err("dr_write: %04x\n", control->intercept_dr >> 16); pr_err("exceptions: %08x\n", control->intercept_exceptions); pr_err("intercepts: %016llx\n", control->intercept); pr_err("pause filter count: %d\n", control->pause_filter_count); @@ -2950,15 +3190,23 @@ void dump_vmcb(struct kvm_vcpu *vcpu) } +static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) +{ + struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; + + *info1 = control->exit_info_1; + *info2 = control->exit_info_2; +} + static int handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; - trace_kvm_exit(exit_code, vcpu); + trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); - if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK)) + if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) vcpu->arch.cr0 = svm->vmcb->save.cr0; if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; @@ -2970,7 +3218,7 @@ static int handle_exit(struct kvm_vcpu *vcpu) return 1; } - if (is_nested(svm)) { + if (is_guest_mode(vcpu)) { int vmexit; trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, @@ -3033,7 +3281,6 @@ static void pre_svm_run(struct vcpu_svm *svm) struct svm_cpu_data *sd = per_cpu(svm_data, cpu); - svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; /* FIXME: handle wraparound of asid_generation */ if (svm->asid_generation != sd->asid_generation) new_asid(svm, sd); @@ -3045,7 +3292,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; vcpu->arch.hflags |= HF_NMI_MASK; - svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET); + set_intercept(svm, INTERCEPT_IRET); ++vcpu->stat.nmi_injections; } @@ -3058,6 +3305,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); + mark_dirty(svm->vmcb, VMCB_INTR); } static void svm_set_irq(struct kvm_vcpu *vcpu) @@ -3077,14 +3325,14 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); - if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) + if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; if (irr == -1) return; if (tpr >= irr) - svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; + set_cr_intercept(svm, INTERCEPT_CR8_WRITE); } static int svm_nmi_allowed(struct kvm_vcpu *vcpu) @@ -3112,10 +3360,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) if (masked) { svm->vcpu.arch.hflags |= HF_NMI_MASK; - svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET); + set_intercept(svm, INTERCEPT_IRET); } else { svm->vcpu.arch.hflags &= ~HF_NMI_MASK; - svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET); + clr_intercept(svm, INTERCEPT_IRET); } } @@ -3131,7 +3379,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) ret = !!(vmcb->save.rflags & X86_EFLAGS_IF); - if (is_nested(svm)) + if (is_guest_mode(vcpu)) return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); return ret; @@ -3177,7 +3425,12 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) static void svm_flush_tlb(struct kvm_vcpu *vcpu) { - force_new_asid(vcpu); + struct vcpu_svm *svm = to_svm(vcpu); + + if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; + else + svm->asid_generation--; } static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) @@ -3188,10 +3441,10 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) + if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; - if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) { + if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; kvm_set_cr8(vcpu, cr8); } @@ -3202,7 +3455,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; - if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) + if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; cr8 = kvm_get_cr8(vcpu); @@ -3289,9 +3542,6 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) static void svm_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - u16 fs_selector; - u16 gs_selector; - u16 ldt_selector; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; @@ -3308,10 +3558,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) sync_lapic_to_cr8(vcpu); - save_host_msrs(vcpu); - savesegment(fs, fs_selector); - savesegment(gs, gs_selector); - ldt_selector = kvm_read_ldt(); svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); @@ -3389,19 +3635,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); - vcpu->arch.cr2 = svm->vmcb->save.cr2; - vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; - vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; - vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; - - load_host_msrs(vcpu); - kvm_load_ldt(ldt_selector); - loadsegment(fs, fs_selector); #ifdef CONFIG_X86_64 - load_gs_index(gs_selector); - wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); + wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else - loadsegment(gs, gs_selector); + loadsegment(fs, svm->host.fs); #endif reload_tss(vcpu); @@ -3410,10 +3647,21 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) stgi(); + vcpu->arch.cr2 = svm->vmcb->save.cr2; + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; + vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; + sync_cr8_to_lapic(vcpu); svm->next_rip = 0; + svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; + + /* if exit due to PF check for async PF */ + if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) + svm->apf_reason = kvm_read_and_reset_pf_reason(); + if (npt_enabled) { vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); @@ -3426,6 +3674,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + MC_VECTOR)) svm_handle_mce(svm); + + mark_all_clean(svm->vmcb); } #undef R @@ -3435,7 +3685,8 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.cr3 = root; - force_new_asid(vcpu); + mark_dirty(svm->vmcb, VMCB_CR); + svm_flush_tlb(vcpu); } static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) @@ -3443,11 +3694,13 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; + mark_dirty(svm->vmcb, VMCB_NPT); /* Also sync guest cr3 here in case we live migrate */ - svm->vmcb->save.cr3 = vcpu->arch.cr3; + svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); + mark_dirty(svm->vmcb, VMCB_CR); - force_new_asid(vcpu); + svm_flush_tlb(vcpu); } static int is_disabled(void) @@ -3494,10 +3747,6 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu) static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { switch (func) { - case 0x00000001: - /* Mask out xsave bit as long as it is not supported by SVM */ - entry->ecx &= ~(bit(X86_FEATURE_XSAVE)); - break; case 0x80000001: if (nested) entry->ecx |= (1 << 2); /* Set SVM bit */ @@ -3511,7 +3760,7 @@ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) additional features */ /* Support next_rip if host supports it */ - if (svm_has(SVM_FEATURE_NRIP)) + if (boot_cpu_has(X86_FEATURE_NRIPS)) entry->edx |= SVM_FEATURE_NRIP; /* Support NPT for the guest if enabled */ @@ -3571,6 +3820,7 @@ static const struct trace_print_flags svm_exit_reasons_str[] = { { SVM_EXIT_WBINVD, "wbinvd" }, { SVM_EXIT_MONITOR, "monitor" }, { SVM_EXIT_MWAIT, "mwait" }, + { SVM_EXIT_XSETBV, "xsetbv" }, { SVM_EXIT_NPF, "npf" }, { -1, NULL } }; @@ -3594,9 +3844,7 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; - if (is_nested(svm)) - svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR; + set_exception_intercept(svm, NM_VECTOR); update_cr0_intercept(svm); } @@ -3627,6 +3875,7 @@ static struct kvm_x86_ops svm_x86_ops = { .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, + .decache_cr3 = svm_decache_cr3, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, @@ -3667,7 +3916,9 @@ static struct kvm_x86_ops svm_x86_ops = { .get_tdp_level = get_npt_level, .get_mt_mask = svm_get_mt_mask, + .get_exit_info = svm_get_exit_info, .exit_reasons_str = svm_exit_reasons_str, + .get_lpage_level = svm_get_lpage_level, .cpuid_update = svm_cpuid_update, diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index a6544b8e7c0f..1357d7cf4ec8 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -178,27 +178,36 @@ TRACE_EVENT(kvm_apic, #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) +#define KVM_ISA_VMX 1 +#define KVM_ISA_SVM 2 + /* * Tracepoint for kvm guest exit: */ TRACE_EVENT(kvm_exit, - TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu), - TP_ARGS(exit_reason, vcpu), + TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), + TP_ARGS(exit_reason, vcpu, isa), TP_STRUCT__entry( __field( unsigned int, exit_reason ) __field( unsigned long, guest_rip ) + __field( u32, isa ) + __field( u64, info1 ) + __field( u64, info2 ) ), TP_fast_assign( __entry->exit_reason = exit_reason; __entry->guest_rip = kvm_rip_read(vcpu); + __entry->isa = isa; + kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, + &__entry->info2); ), - TP_printk("reason %s rip 0x%lx", + TP_printk("reason %s rip 0x%lx info %llx %llx", ftrace_print_symbols_seq(p, __entry->exit_reason, kvm_x86_ops->exit_reasons_str), - __entry->guest_rip) + __entry->guest_rip, __entry->info1, __entry->info2) ); /* diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 81fcbe9515c5..bf89ec2cfb82 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -69,6 +69,9 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); static int __read_mostly vmm_exclusive = 1; module_param(vmm_exclusive, bool, S_IRUGO); +static int __read_mostly yield_on_hlt = 1; +module_param(yield_on_hlt, bool, S_IRUGO); + #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) #define KVM_GUEST_CR0_MASK \ @@ -177,6 +180,7 @@ static int init_rmode(struct kvm *kvm); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); +static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -188,6 +192,8 @@ static unsigned long *vmx_io_bitmap_b; static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_longmode; +static bool cpu_has_load_ia32_efer; + static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); @@ -472,7 +478,7 @@ static void vmcs_clear(struct vmcs *vmcs) u8 error; asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" - : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) + : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", @@ -485,7 +491,7 @@ static void vmcs_load(struct vmcs *vmcs) u8 error; asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" - : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) + : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", @@ -565,10 +571,10 @@ static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) static unsigned long vmcs_readl(unsigned long field) { - unsigned long value; + unsigned long value = 0; asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX) - : "=a"(value) : "d"(field) : "cc"); + : "+a"(value) : "d"(field) : "cc"); return value; } @@ -661,6 +667,12 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) unsigned i; struct msr_autoload *m = &vmx->msr_autoload; + if (msr == MSR_EFER && cpu_has_load_ia32_efer) { + vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); + vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); + return; + } + for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; @@ -680,6 +692,14 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, unsigned i; struct msr_autoload *m = &vmx->msr_autoload; + if (msr == MSR_EFER && cpu_has_load_ia32_efer) { + vmcs_write64(GUEST_IA32_EFER, guest_val); + vmcs_write64(HOST_IA32_EFER, host_val); + vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); + vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); + return; + } + for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; @@ -1009,6 +1029,17 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) vmx_set_interrupt_shadow(vcpu, 0); } +static void vmx_clear_hlt(struct kvm_vcpu *vcpu) +{ + /* Ensure that we clear the HLT state in the VMCS. We don't need to + * explicitly skip the instruction because if the HLT state is set, then + * the instruction is already executing and RIP has already been + * advanced. */ + if (!yield_on_hlt && + vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) + vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); +} + static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) @@ -1035,6 +1066,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, intr_info |= INTR_TYPE_HARD_EXCEPTION; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); + vmx_clear_hlt(vcpu); } static bool vmx_rdtscp_supported(void) @@ -1305,8 +1337,11 @@ static __init int vmx_disabled_by_bios(void) && tboot_enabled()) return 1; if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) - && !tboot_enabled()) + && !tboot_enabled()) { + printk(KERN_WARNING "kvm: disable TXT in the BIOS or " + " activate TXT before enabling KVM\n"); return 1; + } } return 0; @@ -1400,6 +1435,14 @@ static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, return 0; } +static __init bool allow_1_setting(u32 msr, u32 ctl) +{ + u32 vmx_msr_low, vmx_msr_high; + + rdmsr(msr, vmx_msr_low, vmx_msr_high); + return vmx_msr_high & ctl; +} + static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; @@ -1416,7 +1459,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) &_pin_based_exec_control) < 0) return -EIO; - min = CPU_BASED_HLT_EXITING | + min = #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | @@ -1429,6 +1472,10 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) CPU_BASED_MWAIT_EXITING | CPU_BASED_MONITOR_EXITING | CPU_BASED_INVLPG_EXITING; + + if (yield_on_hlt) + min |= CPU_BASED_HLT_EXITING; + opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; @@ -1510,6 +1557,12 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmentry_ctrl = _vmentry_control; + cpu_has_load_ia32_efer = + allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, + VM_ENTRY_LOAD_IA32_EFER) + && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, + VM_EXIT_LOAD_IA32_EFER); + return 0; } @@ -1683,9 +1736,13 @@ static void fix_rmode_seg(int seg, struct kvm_save_segment *save) save->limit = vmcs_read32(sf->limit); save->ar = vmcs_read32(sf->ar_bytes); vmcs_write16(sf->selector, save->base >> 4); - vmcs_write32(sf->base, save->base & 0xfffff); + vmcs_write32(sf->base, save->base & 0xffff0); vmcs_write32(sf->limit, 0xffff); vmcs_write32(sf->ar_bytes, 0xf3); + if (save->base & 0xf) + printk_once(KERN_WARNING "kvm: segment base is not paragraph" + " aligned when entering protected mode (seg=%d)", + seg); } static void enter_rmode(struct kvm_vcpu *vcpu) @@ -1814,6 +1871,13 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; } +static void vmx_decache_cr3(struct kvm_vcpu *vcpu) +{ + if (enable_ept && is_paging(vcpu)) + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); +} + static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; @@ -1857,6 +1921,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { + vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, @@ -1937,7 +2002,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) if (enable_ept) { eptp = construct_eptp(cr3); vmcs_write64(EPT_POINTER, eptp); - guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : + guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) : vcpu->kvm->arch.ept_identity_map_addr; ept_load_pdptrs(vcpu); } @@ -2725,7 +2790,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); - vmcs_write32(GUEST_ACTIVITY_STATE, 0); + vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); @@ -2787,6 +2852,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) return; } + if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { + enable_irq_window(vcpu); + return; + } cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); @@ -2814,6 +2883,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu) } else intr |= INTR_TYPE_EXT_INTR; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); + vmx_clear_hlt(vcpu); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) @@ -2841,6 +2911,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); + vmx_clear_hlt(vcpu); } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) @@ -2849,7 +2920,8 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) return 0; return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & - (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI)); + (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI + | GUEST_INTR_STATE_NMI)); } static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) @@ -2910,7 +2982,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) - if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE) + if (emulate_instruction(vcpu, 0) == EMULATE_DONE) return 1; /* * Forward all other exceptions that are valid in real mode. @@ -3007,7 +3079,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) } if (is_invalid_opcode(intr_info)) { - er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD); + er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; @@ -3026,7 +3098,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) if (kvm_event_needs_reinjection(vcpu)) kvm_mmu_unprotect_page_virt(vcpu, cr2); - return kvm_mmu_page_fault(vcpu, cr2, error_code); + return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); } if (vmx->rmode.vm86_active && @@ -3098,7 +3170,7 @@ static int handle_io(struct kvm_vcpu *vcpu) ++vcpu->stat.io_exits; if (string || in) - return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE; + return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; @@ -3118,14 +3190,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) hypercall[2] = 0xc1; } -static void complete_insn_gp(struct kvm_vcpu *vcpu, int err) -{ - if (err) - kvm_inject_gp(vcpu, 0); - else - skip_emulated_instruction(vcpu); -} - static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; @@ -3143,21 +3207,21 @@ static int handle_cr(struct kvm_vcpu *vcpu) switch (cr) { case 0: err = kvm_set_cr0(vcpu, val); - complete_insn_gp(vcpu, err); + kvm_complete_insn_gp(vcpu, err); return 1; case 3: err = kvm_set_cr3(vcpu, val); - complete_insn_gp(vcpu, err); + kvm_complete_insn_gp(vcpu, err); return 1; case 4: err = kvm_set_cr4(vcpu, val); - complete_insn_gp(vcpu, err); + kvm_complete_insn_gp(vcpu, err); return 1; case 8: { u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8 = kvm_register_read(vcpu, reg); - kvm_set_cr8(vcpu, cr8); - skip_emulated_instruction(vcpu); + err = kvm_set_cr8(vcpu, cr8); + kvm_complete_insn_gp(vcpu, err); if (irqchip_in_kernel(vcpu->kvm)) return 1; if (cr8_prev <= cr8) @@ -3176,8 +3240,9 @@ static int handle_cr(struct kvm_vcpu *vcpu) case 1: /*mov from cr*/ switch (cr) { case 3: - kvm_register_write(vcpu, reg, vcpu->arch.cr3); - trace_kvm_cr_read(cr, vcpu->arch.cr3); + val = kvm_read_cr3(vcpu); + kvm_register_write(vcpu, reg, val); + trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); return 1; case 8: @@ -3349,6 +3414,11 @@ static int handle_vmx_insn(struct kvm_vcpu *vcpu) return 1; } +static int handle_invd(struct kvm_vcpu *vcpu) +{ + return emulate_instruction(vcpu, 0) == EMULATE_DONE; +} + static int handle_invlpg(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); @@ -3377,7 +3447,7 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu) static int handle_apic_access(struct kvm_vcpu *vcpu) { - return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE; + return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_task_switch(struct kvm_vcpu *vcpu) @@ -3476,7 +3546,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); trace_kvm_page_fault(gpa, exit_qualification); - return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); + return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0); } static u64 ept_rsvd_mask(u64 spte, int level) @@ -3592,7 +3662,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF)) return handle_interrupt_window(&vmx->vcpu); - err = emulate_instruction(vcpu, 0, 0, 0); + err = emulate_instruction(vcpu, 0); if (err == EMULATE_DO_MMIO) { ret = 0; @@ -3649,6 +3719,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_MSR_WRITE] = handle_wrmsr, [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, [EXIT_REASON_HLT] = handle_halt, + [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCLEAR] = handle_vmx_insn, @@ -3676,6 +3747,12 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); +static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) +{ + *info1 = vmcs_readl(EXIT_QUALIFICATION); + *info2 = vmcs_read32(VM_EXIT_INTR_INFO); +} + /* * The guest has exited. See if we can fix it or if we need userspace * assistance. @@ -3686,17 +3763,12 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; - trace_kvm_exit(exit_reason, vcpu); + trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); /* If guest state is invalid, start emulating */ if (vmx->emulation_required && emulate_invalid_guest_state) return handle_invalid_guest_state(vcpu); - /* Access CR3 don't cause VMExit in paging mode, so we need - * to sync with guest real CR3. */ - if (enable_ept && is_paging(vcpu)) - vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason @@ -4013,7 +4085,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ); vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) - | (1 << VCPU_EXREG_PDPTR)); + | (1 << VCPU_EXREG_PDPTR) + | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); @@ -4280,6 +4353,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, + .decache_cr3 = vmx_decache_cr3, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, @@ -4320,7 +4394,9 @@ static struct kvm_x86_ops vmx_x86_ops = { .get_tdp_level = get_ept_level, .get_mt_mask = vmx_get_mt_mask, + .get_exit_info = vmx_get_exit_info, .exit_reasons_str = vmx_exit_reasons_str, + .get_lpage_level = vmx_get_lpage_level, .cpuid_update = vmx_cpuid_update, @@ -4396,8 +4472,6 @@ static int __init vmx_init(void) if (enable_ept) { bypass_guest_pf = 0; - kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | - VMX_EPT_WRITABLE_MASK); kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK); kvm_enable_tdp(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 46a368cb651e..bcc0efce85bf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -155,6 +156,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { u64 __read_mostly host_xcr0; +static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) +{ + int i; + for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) + vcpu->arch.apf.gfns[i] = ~0; +} + static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; @@ -326,23 +334,28 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) } EXPORT_SYMBOL_GPL(kvm_requeue_exception); -void kvm_inject_page_fault(struct kvm_vcpu *vcpu) +void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { - unsigned error_code = vcpu->arch.fault.error_code; + if (err) + kvm_inject_gp(vcpu, 0); + else + kvm_x86_ops->skip_emulated_instruction(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); +void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) +{ ++vcpu->stat.pf_guest; - vcpu->arch.cr2 = vcpu->arch.fault.address; - kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); + vcpu->arch.cr2 = fault->address; + kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } -void kvm_propagate_fault(struct kvm_vcpu *vcpu) +void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { - if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested) - vcpu->arch.nested_mmu.inject_page_fault(vcpu); + if (mmu_is_nested(vcpu) && !fault->nested_page_fault) + vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else - vcpu->arch.mmu.inject_page_fault(vcpu); - - vcpu->arch.fault.nested = false; + vcpu->arch.mmu.inject_page_fault(vcpu, fault); } void kvm_inject_nmi(struct kvm_vcpu *vcpu) @@ -460,8 +473,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) (unsigned long *)&vcpu->arch.regs_avail)) return true; - gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT; - offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1); + gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; + offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) @@ -506,12 +519,15 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, - vcpu->arch.cr3)) + kvm_read_cr3(vcpu))) return 1; } kvm_x86_ops->set_cr0(vcpu, cr0); + if ((cr0 ^ old_cr0) & X86_CR0_PG) + kvm_clear_async_pf_completion_queue(vcpu); + if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; @@ -595,7 +611,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) - && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) + && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, + kvm_read_cr3(vcpu))) return 1; if (cr4 & X86_CR4_VMXE) @@ -615,7 +632,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { - if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { + if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; @@ -650,12 +667,13 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) return 1; vcpu->arch.cr3 = cr3; + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); vcpu->arch.mmu.new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); -int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) +int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; @@ -665,12 +683,6 @@ int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) vcpu->arch.cr8 = cr8; return 0; } - -void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) -{ - if (__kvm_set_cr8(vcpu, cr8)) - kvm_inject_gp(vcpu, 0); -} EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) @@ -775,12 +787,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr); * kvm-specific. Those are put in the beginning of the list. */ -#define KVM_SAVE_MSRS_BEGIN 7 +#define KVM_SAVE_MSRS_BEGIN 8 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, - HV_X64_MSR_APIC_ASSIST_PAGE, + HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 @@ -830,7 +842,6 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer) kvm_x86_ops->set_efer(vcpu, efer); vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; - kvm_mmu_reset_context(vcpu); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) @@ -1418,6 +1429,30 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) return 0; } +static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) +{ + gpa_t gpa = data & ~0x3f; + + /* Bits 2:5 are resrved, Should be zero */ + if (data & 0x3c) + return 1; + + vcpu->arch.apf.msr_val = data; + + if (!(data & KVM_ASYNC_PF_ENABLED)) { + kvm_clear_async_pf_completion_queue(vcpu); + kvm_async_pf_hash_reset(vcpu); + return 0; + } + + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + return 1; + + vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); + kvm_async_pf_wakeup_all(vcpu); + return 0; +} + int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { @@ -1499,6 +1534,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) } break; } + case MSR_KVM_ASYNC_PF_EN: + if (kvm_pv_enable_async_pf(vcpu, data)) + return 1; + break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: @@ -1775,6 +1814,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; + case MSR_KVM_ASYNC_PF_EN: + data = vcpu->arch.apf.msr_val; + break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: @@ -1904,6 +1946,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: + case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_ASSIGN_DEV_IRQ: @@ -1922,6 +1965,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: + case KVM_CAP_ASYNC_PF: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -2185,6 +2229,11 @@ out: return r; } +static void cpuid_mask(u32 *word, int wordnum) +{ + *word &= boot_cpu_data.x86_capability[wordnum]; +} + static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index) { @@ -2259,7 +2308,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, break; case 1: entry->edx &= kvm_supported_word0_x86_features; + cpuid_mask(&entry->edx, 0); entry->ecx &= kvm_supported_word4_x86_features; + cpuid_mask(&entry->ecx, 4); /* we support x2apic emulation even if host does not support * it since we emulate x2apic in software */ entry->ecx |= F(X2APIC); @@ -2350,7 +2401,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, break; case 0x80000001: entry->edx &= kvm_supported_word1_x86_features; + cpuid_mask(&entry->edx, 1); entry->ecx &= kvm_supported_word6_x86_features; + cpuid_mask(&entry->ecx, 6); break; } @@ -3169,20 +3222,18 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memslots *slots, *old_slots; unsigned long *dirty_bitmap; - r = -ENOMEM; - dirty_bitmap = vmalloc(n); - if (!dirty_bitmap) - goto out; + dirty_bitmap = memslot->dirty_bitmap_head; + if (memslot->dirty_bitmap == dirty_bitmap) + dirty_bitmap += n / sizeof(long); memset(dirty_bitmap, 0, n); r = -ENOMEM; slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); - if (!slots) { - vfree(dirty_bitmap); + if (!slots) goto out; - } memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; + slots->generation++; old_slots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); @@ -3195,11 +3246,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, spin_unlock(&kvm->mmu_lock); r = -EFAULT; - if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { - vfree(dirty_bitmap); + if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) goto out; - } - vfree(dirty_bitmap); } else { r = -EFAULT; if (clear_user(log->dirty_bitmap, n)) @@ -3266,8 +3314,10 @@ long kvm_arch_vm_ioctl(struct file *filp, if (vpic) { r = kvm_ioapic_init(kvm); if (r) { + mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); + mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } @@ -3278,10 +3328,12 @@ long kvm_arch_vm_ioctl(struct file *filp, smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { + mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); + mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); @@ -3557,63 +3609,63 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { gpa_t t_gpa; - u32 error; + struct x86_exception exception; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; - t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error); - if (t_gpa == UNMAPPED_GVA) - vcpu->arch.fault.nested = true; + t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); return t_gpa; } -gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } - gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } -gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ -gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error); + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, - u32 *error) + struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, - error); + exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; - if (gpa == UNMAPPED_GVA) { - r = X86EMUL_PROPAGATE_FAULT; - goto out; - } + if (gpa == UNMAPPED_GVA) + return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; @@ -3630,31 +3682,35 @@ out: /* used for instruction fetching */ static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu, u32 *error) + struct kvm_vcpu *vcpu, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, - access | PFERR_FETCH_MASK, error); + access | PFERR_FETCH_MASK, + exception); } static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu, u32 *error) + struct kvm_vcpu *vcpu, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, - error); + exception); } static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu, u32 *error) + struct kvm_vcpu *vcpu, + struct x86_exception *exception) { - return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error); + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } static int kvm_write_guest_virt_system(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, - u32 *error) + struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; @@ -3662,15 +3718,13 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val, while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, - error); + exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; - if (gpa == UNMAPPED_GVA) { - r = X86EMUL_PROPAGATE_FAULT; - goto out; - } + if (gpa == UNMAPPED_GVA) + return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; @@ -3688,7 +3742,7 @@ out: static int emulator_read_emulated(unsigned long addr, void *val, unsigned int bytes, - unsigned int *error_code, + struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; @@ -3701,7 +3755,7 @@ static int emulator_read_emulated(unsigned long addr, return X86EMUL_CONTINUE; } - gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code); + gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; @@ -3710,8 +3764,8 @@ static int emulator_read_emulated(unsigned long addr, if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; - if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL) - == X86EMUL_CONTINUE) + if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception) + == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; mmio: @@ -3735,7 +3789,7 @@ mmio: } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, - const void *val, int bytes) + const void *val, int bytes) { int ret; @@ -3749,12 +3803,12 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, static int emulator_write_emulated_onepage(unsigned long addr, const void *val, unsigned int bytes, - unsigned int *error_code, + struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; - gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code); + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; @@ -3787,7 +3841,7 @@ mmio: int emulator_write_emulated(unsigned long addr, const void *val, unsigned int bytes, - unsigned int *error_code, + struct x86_exception *exception, struct kvm_vcpu *vcpu) { /* Crossing a page boundary? */ @@ -3795,7 +3849,7 @@ int emulator_write_emulated(unsigned long addr, int rc, now; now = -addr & ~PAGE_MASK; - rc = emulator_write_emulated_onepage(addr, val, now, error_code, + rc = emulator_write_emulated_onepage(addr, val, now, exception, vcpu); if (rc != X86EMUL_CONTINUE) return rc; @@ -3803,7 +3857,7 @@ int emulator_write_emulated(unsigned long addr, val += now; bytes -= now; } - return emulator_write_emulated_onepage(addr, val, bytes, error_code, + return emulator_write_emulated_onepage(addr, val, bytes, exception, vcpu); } @@ -3821,7 +3875,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, const void *old, const void *new, unsigned int bytes, - unsigned int *error_code, + struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; @@ -3879,7 +3933,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); - return emulator_write_emulated(addr, new, bytes, error_code, vcpu); + return emulator_write_emulated(addr, new, bytes, exception, vcpu); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) @@ -3904,7 +3958,7 @@ static int emulator_pio_in_emulated(int size, unsigned short port, void *val, if (vcpu->arch.pio.count) goto data_avail; - trace_kvm_pio(0, port, size, 1); + trace_kvm_pio(0, port, size, count); vcpu->arch.pio.port = port; vcpu->arch.pio.in = 1; @@ -3932,7 +3986,7 @@ static int emulator_pio_out_emulated(int size, unsigned short port, const void *val, unsigned int count, struct kvm_vcpu *vcpu) { - trace_kvm_pio(1, port, size, 1); + trace_kvm_pio(1, port, size, count); vcpu->arch.pio.port = port; vcpu->arch.pio.in = 0; @@ -3973,13 +4027,15 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { - preempt_disable(); + int cpu = get_cpu(); + + cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); - preempt_enable(); + put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); - } - wbinvd(); + } else + wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); @@ -4019,7 +4075,7 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu) value = vcpu->arch.cr2; break; case 3: - value = vcpu->arch.cr3; + value = kvm_read_cr3(vcpu); break; case 4: value = kvm_read_cr4(vcpu); @@ -4053,7 +4109,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: - res = __kvm_set_cr8(vcpu, val & 0xfUL); + res = kvm_set_cr8(vcpu, val); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); @@ -4206,12 +4262,13 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) static void inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; - if (ctxt->exception == PF_VECTOR) - kvm_propagate_fault(vcpu); - else if (ctxt->error_code_valid) - kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); + if (ctxt->exception.vector == PF_VECTOR) + kvm_propagate_fault(vcpu, &ctxt->exception); + else if (ctxt->exception.error_code_valid) + kvm_queue_exception_e(vcpu, ctxt->exception.vector, + ctxt->exception.error_code); else - kvm_queue_exception(vcpu, ctxt->exception); + kvm_queue_exception(vcpu, ctxt->exception.vector); } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) @@ -4267,13 +4324,19 @@ EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { + int r = EMULATE_DONE; + ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; + if (!is_guest_mode(vcpu)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + r = EMULATE_FAIL; + } kvm_queue_exception(vcpu, UD_VECTOR); - return EMULATE_FAIL; + + return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) @@ -4302,10 +4365,11 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) return false; } -int emulate_instruction(struct kvm_vcpu *vcpu, - unsigned long cr2, - u16 error_code, - int emulation_type) +int x86_emulate_instruction(struct kvm_vcpu *vcpu, + unsigned long cr2, + int emulation_type, + void *insn, + int insn_len) { int r; struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; @@ -4323,10 +4387,10 @@ int emulate_instruction(struct kvm_vcpu *vcpu, if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); vcpu->arch.emulate_ctxt.interruptibility = 0; - vcpu->arch.emulate_ctxt.exception = -1; + vcpu->arch.emulate_ctxt.have_exception = false; vcpu->arch.emulate_ctxt.perm_ok = false; - r = x86_decode_insn(&vcpu->arch.emulate_ctxt); + r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len); if (r == X86EMUL_PROPAGATE_FAULT) goto done; @@ -4389,7 +4453,7 @@ restart: } done: - if (vcpu->arch.emulate_ctxt.exception >= 0) { + if (vcpu->arch.emulate_ctxt.have_exception) { inject_emulated_exception(vcpu); r = EMULATE_DONE; } else if (vcpu->arch.pio.count) { @@ -4413,7 +4477,7 @@ done: return r; } -EXPORT_SYMBOL_GPL(emulate_instruction); +EXPORT_SYMBOL_GPL(x86_emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { @@ -4653,7 +4717,6 @@ int kvm_arch_init(void *opaque) kvm_x86_ops = ops; kvm_mmu_set_nonpresent_ptes(0ull, 0ull); - kvm_mmu_set_base_ptes(PT_PRESENT_MASK); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); @@ -5116,6 +5179,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } + if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { + /* Page is swapped out. Do synthetic halt */ + vcpu->arch.apf.halted = true; + r = 1; + goto out; + } } r = kvm_mmu_reload(vcpu); @@ -5244,7 +5313,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) r = 1; while (r > 0) { - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) + if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && + !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); @@ -5257,6 +5327,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: + vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_SIPI_RECEIVED: default: @@ -5278,6 +5349,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } + + kvm_check_async_pf_completion(vcpu); + if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; @@ -5302,6 +5376,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int r; sigset_t sigsaved; + if (!tsk_used_math(current) && init_fpu(current)) + return -ENOMEM; + if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -5313,8 +5390,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } /* re-sync apic's tpr */ - if (!irqchip_in_kernel(vcpu->kvm)) - kvm_set_cr8(vcpu, kvm_run->cr8); + if (!irqchip_in_kernel(vcpu->kvm)) { + if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { + r = -EINVAL; + goto out; + } + } if (vcpu->arch.pio.count || vcpu->mmio_needed) { if (vcpu->mmio_needed) { @@ -5323,7 +5404,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->mmio_needed = 0; } vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE); + r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) { r = 0; @@ -5436,7 +5517,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; - sregs->cr3 = vcpu->arch.cr3; + sregs->cr3 = kvm_read_cr3(vcpu); sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; @@ -5504,8 +5585,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; - mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; + mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr8(vcpu, sregs->cr8); @@ -5522,7 +5604,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, if (sregs->cr4 & X86_CR4_OSXSAVE) update_cpuid(vcpu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { - load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); + load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } @@ -5773,6 +5855,8 @@ free_vcpu: void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { + vcpu->arch.apf.msr_val = 0; + vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); @@ -5792,6 +5876,11 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) vcpu->arch.dr7 = DR7_FIXED_1; kvm_make_request(KVM_REQ_EVENT, vcpu); + vcpu->arch.apf.msr_val = 0; + + kvm_clear_async_pf_completion_queue(vcpu); + kvm_async_pf_hash_reset(vcpu); + vcpu->arch.apf.halted = false; return kvm_x86_ops->vcpu_reset(vcpu); } @@ -5881,6 +5970,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) goto fail_free_mce_banks; + kvm_async_pf_hash_reset(vcpu); + return 0; fail_free_mce_banks: kfree(vcpu->arch.mce_banks); @@ -5906,13 +5997,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.pio_data); } -struct kvm *kvm_arch_create_vm(void) +int kvm_arch_init_vm(struct kvm *kvm) { - struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); - - if (!kvm) - return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); @@ -5921,7 +6007,7 @@ struct kvm *kvm_arch_create_vm(void) spin_lock_init(&kvm->arch.tsc_write_lock); - return kvm; + return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) @@ -5939,8 +6025,10 @@ static void kvm_free_vcpus(struct kvm *kvm) /* * Unpin any mmu pages first. */ - kvm_for_each_vcpu(i, vcpu, kvm) + kvm_for_each_vcpu(i, vcpu, kvm) { + kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); + } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); @@ -5964,13 +6052,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); - kvm_free_physmem(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); - cleanup_srcu_struct(&kvm->srcu); - kfree(kvm); } int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -6051,7 +6136,9 @@ void kvm_arch_flush_shadow(struct kvm *kvm) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { - return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && + !vcpu->arch.apf.halted) + || !list_empty_careful(&vcpu->async_pf.done) || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED || vcpu->arch.nmi_pending || (kvm_arch_interrupt_allowed(vcpu) && @@ -6110,6 +6197,147 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) } EXPORT_SYMBOL_GPL(kvm_set_rflags); +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) +{ + int r; + + if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || + is_error_page(work->page)) + return; + + r = kvm_mmu_reload(vcpu); + if (unlikely(r)) + return; + + if (!vcpu->arch.mmu.direct_map && + work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) + return; + + vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); +} + +static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) +{ + return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); +} + +static inline u32 kvm_async_pf_next_probe(u32 key) +{ + return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); +} + +static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + u32 key = kvm_async_pf_hash_fn(gfn); + + while (vcpu->arch.apf.gfns[key] != ~0) + key = kvm_async_pf_next_probe(key); + + vcpu->arch.apf.gfns[key] = gfn; +} + +static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + int i; + u32 key = kvm_async_pf_hash_fn(gfn); + + for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && + (vcpu->arch.apf.gfns[key] != gfn && + vcpu->arch.apf.gfns[key] != ~0); i++) + key = kvm_async_pf_next_probe(key); + + return key; +} + +bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; +} + +static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + u32 i, j, k; + + i = j = kvm_async_pf_gfn_slot(vcpu, gfn); + while (true) { + vcpu->arch.apf.gfns[i] = ~0; + do { + j = kvm_async_pf_next_probe(j); + if (vcpu->arch.apf.gfns[j] == ~0) + return; + k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); + /* + * k lies cyclically in ]i,j] + * | i.k.j | + * |....j i.k.| or |.k..j i...| + */ + } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); + vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; + i = j; + } +} + +static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) +{ + + return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, + sizeof(val)); +} + +void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work) +{ + struct x86_exception fault; + + trace_kvm_async_pf_not_present(work->arch.token, work->gva); + kvm_add_async_pf_gfn(vcpu, work->arch.gfn); + + if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || + (vcpu->arch.apf.send_user_only && + kvm_x86_ops->get_cpl(vcpu) == 0)) + kvm_make_request(KVM_REQ_APF_HALT, vcpu); + else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { + fault.vector = PF_VECTOR; + fault.error_code_valid = true; + fault.error_code = 0; + fault.nested_page_fault = false; + fault.address = work->arch.token; + kvm_inject_page_fault(vcpu, &fault); + } +} + +void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work) +{ + struct x86_exception fault; + + trace_kvm_async_pf_ready(work->arch.token, work->gva); + if (is_error_page(work->page)) + work->arch.token = ~0; /* broadcast wakeup */ + else + kvm_del_async_pf_gfn(vcpu, work->arch.gfn); + + if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && + !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { + fault.vector = PF_VECTOR; + fault.error_code_valid = true; + fault.error_code = 0; + fault.nested_page_fault = false; + fault.address = work->arch.token; + kvm_inject_page_fault(vcpu, &fault); + } + vcpu->arch.apf.halted = false; +} + +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) + return true; + else + return !kvm_event_needs_reinjection(vcpu) && + kvm_x86_ops->interrupt_allowed(vcpu); +} + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 738e6593799d..dbe34b931374 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -89,6 +90,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); get_page(page); + SetPageReferenced(page); pages[*nr] = page; (*nr)++; @@ -103,6 +105,17 @@ static inline void get_head_page_multiple(struct page *page, int nr) VM_BUG_ON(page != compound_head(page)); VM_BUG_ON(page_count(page) == 0); atomic_add(nr, &page->_count); + SetPageReferenced(page); +} + +static inline void get_huge_page_tail(struct page *page) +{ + /* + * __split_huge_page_refcount() cannot run + * from under us. + */ + VM_BUG_ON(atomic_read(&page->_count) < 0); + atomic_inc(&page->_count); } static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, @@ -128,6 +141,8 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; + if (PageTail(page)) + get_huge_page_tail(page); (*nr)++; page++; refs++; @@ -148,7 +163,18 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, pmd_t pmd = *pmdp; next = pmd_addr_end(addr, end); - if (pmd_none(pmd)) + /* + * The pmd_trans_splitting() check below explains why + * pmdp_splitting_flush has to flush the tlb, to stop + * this gup-fast code from running while we set the + * splitting bit in the pmd. Returning zero will take + * the slow path that will call wait_split_huge_page() + * if the pmd is still in splitting state. gup-fast + * can't because it has irq disabled and + * wait_split_huge_page() would never return as the + * tlb flush IPI wouldn't run. + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_large(pmd))) { if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f89b5bb4e93f..c821074b7f0b 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -715,6 +716,7 @@ void __init paging_init(void) /* * NOTE: at this point the bootmem allocator is fully available. */ + olpc_dt_build_devicetree(); sparse_init(); zone_sizes_init(); } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 8be8c7d7bc89..500242d3c96d 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -320,6 +320,25 @@ int ptep_set_access_flags(struct vm_area_struct *vma, return changed; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + int changed = !pmd_same(*pmdp, entry); + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (changed && dirty) { + *pmdp = entry; + pmd_update_defer(vma->vm_mm, address, pmdp); + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + + return changed; +} +#endif + int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { @@ -335,6 +354,23 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma, return ret; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + int ret = 0; + + if (pmd_young(*pmdp)) + ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, + (unsigned long *)pmdp); + + if (ret) + pmd_update(vma->vm_mm, addr, pmdp); + + return ret; +} +#endif + int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { @@ -347,6 +383,36 @@ int ptep_clear_flush_young(struct vm_area_struct *vma, return young; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int young; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + young = pmdp_test_and_clear_young(vma, address, pmdp); + if (young) + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + return young; +} + +void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int set; + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + set = !test_and_set_bit(_PAGE_BIT_SPLITTING, + (unsigned long *)pmdp); + if (set) { + pmd_update(vma->vm_mm, address, pmdp); + /* need tlb flush only to serialize against gup-fast */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } +} +#endif + /** * reserve_top_address - reserves a hole in the top of kernel address space * @reserve - size of hole to reserve diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index 0846a5bbbfbd..ab8269b0da29 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c @@ -9,6 +9,7 @@ * option) any later version. */ +#include #include #include #include @@ -25,12 +26,14 @@ static void __devinit cnb20le_res(struct pci_dev *dev) u8 fbus, lbus; int i; +#ifdef CONFIG_ACPI /* - * The x86_pci_root_bus_res_quirks() function already refuses to use - * this information if ACPI _CRS was used. Therefore, we don't bother - * checking if ACPI is enabled, and just generate the information - * for both the ACPI _CRS and no ACPI cases. + * We should get host bridge information from ACPI unless the BIOS + * doesn't support it. */ + if (acpi_os_get_root_pointer()) + return; +#endif info = &pci_root_info[pci_root_num]; pci_root_num++; diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index f7c8a399978c..5fe75026ecc2 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -22,6 +22,7 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | unsigned int pci_early_dump_regs; static int pci_bf_sort; +static int smbios_type_b1_flag; int pci_routeirq; int noioapicquirk; #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS @@ -185,6 +186,39 @@ static int __devinit set_bf_sort(const struct dmi_system_id *d) return 0; } +static void __devinit read_dmi_type_b1(const struct dmi_header *dm, + void *private_data) +{ + u8 *d = (u8 *)dm + 4; + + if (dm->type != 0xB1) + return; + switch (((*(u32 *)d) >> 9) & 0x03) { + case 0x00: + printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n"); + break; + case 0x01: /* set pci=bfsort */ + smbios_type_b1_flag = 1; + break; + case 0x02: /* do not set pci=bfsort */ + smbios_type_b1_flag = 2; + break; + default: + break; + } +} + +static int __devinit find_sort_method(const struct dmi_system_id *d) +{ + dmi_walk(read_dmi_type_b1, NULL); + + if (smbios_type_b1_flag == 1) { + set_bf_sort(d); + return 0; + } + return -1; +} + /* * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus) */ @@ -212,6 +246,13 @@ static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = { }, }, #endif /* __i386__ */ + { + .callback = find_sort_method, + .ident = "Dell System", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + }, + }, { .callback = set_bf_sort, .ident = "Dell PowerEdge 1950", diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 9f9bfb705cf9..87e6c8323117 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -589,7 +589,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route case PCI_DEVICE_ID_INTEL_ICH10_1: case PCI_DEVICE_ID_INTEL_ICH10_2: case PCI_DEVICE_ID_INTEL_ICH10_3: - case PCI_DEVICE_ID_INTEL_PATSBURG_LPC: + case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0: + case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1: r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c index 65df603622b2..25bfdbb5b130 100644 --- a/arch/x86/platform/mrst/early_printk_mrst.c +++ b/arch/x86/platform/mrst/early_printk_mrst.c @@ -103,7 +103,7 @@ struct dw_spi_reg { static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0; static u32 *pclk_spi0; -/* Always contains an accessable address, start with 0 */ +/* Always contains an accessible address, start with 0 */ static struct dw_spi_reg *pspi; static struct kmsg_dumper dw_dumper; diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile index c31b8fcb5a86..e797428b163b 100644 --- a/arch/x86/platform/olpc/Makefile +++ b/arch/x86/platform/olpc/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_OLPC) += olpc.o obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o +obj-$(CONFIG_OLPC_OPENFIRMWARE_DT) += olpc_dt.o diff --git a/arch/x86/platform/olpc/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c index f5442c03abc3..127775696d6c 100644 --- a/arch/x86/platform/olpc/olpc-xo1.c +++ b/arch/x86/platform/olpc/olpc-xo1.c @@ -1,6 +1,7 @@ /* * Support for features of the OLPC XO-1 laptop * + * Copyright (C) 2010 Andres Salomon * Copyright (C) 2010 One Laptop per Child * Copyright (C) 2006 Red Hat, Inc. * Copyright (C) 2006 Advanced Micro Devices, Inc. @@ -12,8 +13,6 @@ */ #include -#include -#include #include #include @@ -22,9 +21,6 @@ #define DRV_NAME "olpc-xo1" -#define PMS_BAR 4 -#define ACPI_BAR 5 - /* PMC registers (PMS block) */ #define PM_SCLK 0x10 #define PM_IN_SLPCTL 0x20 @@ -57,65 +53,67 @@ static void xo1_power_off(void) outl(0x00002000, acpi_base + PM1_CNT); } -/* Read the base addresses from the PCI BAR info */ -static int __devinit setup_bases(struct pci_dev *pdev) +static int __devinit olpc_xo1_probe(struct platform_device *pdev) { - int r; + struct resource *res; - r = pci_enable_device_io(pdev); - if (r) { - dev_err(&pdev->dev, "can't enable device IO\n"); - return r; - } + /* don't run on non-XOs */ + if (!machine_is_olpc()) + return -ENODEV; - r = pci_request_region(pdev, ACPI_BAR, DRV_NAME); - if (r) { - dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", ACPI_BAR); - return r; + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(&pdev->dev, "can't fetch device resource info\n"); + return -EIO; } - r = pci_request_region(pdev, PMS_BAR, DRV_NAME); - if (r) { - dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", PMS_BAR); - pci_release_region(pdev, ACPI_BAR); - return r; + if (!request_region(res->start, resource_size(res), DRV_NAME)) { + dev_err(&pdev->dev, "can't request region\n"); + return -EIO; } - acpi_base = pci_resource_start(pdev, ACPI_BAR); - pms_base = pci_resource_start(pdev, PMS_BAR); + if (strcmp(pdev->name, "cs5535-pms") == 0) + pms_base = res->start; + else if (strcmp(pdev->name, "cs5535-acpi") == 0) + acpi_base = res->start; + + /* If we have both addresses, we can override the poweroff hook */ + if (pms_base && acpi_base) { + pm_power_off = xo1_power_off; + printk(KERN_INFO "OLPC XO-1 support registered\n"); + } return 0; } -static int __devinit olpc_xo1_probe(struct platform_device *pdev) +static int __devexit olpc_xo1_remove(struct platform_device *pdev) { - struct pci_dev *pcidev; - int r; - - pcidev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, - NULL); - if (!pdev) - return -ENODEV; - - r = setup_bases(pcidev); - if (r) - return r; + struct resource *r; - pm_power_off = xo1_power_off; + r = platform_get_resource(pdev, IORESOURCE_IO, 0); + release_region(r->start, resource_size(r)); - printk(KERN_INFO "OLPC XO-1 support registered\n"); - return 0; -} + if (strcmp(pdev->name, "cs5535-pms") == 0) + pms_base = 0; + else if (strcmp(pdev->name, "cs5535-acpi") == 0) + acpi_base = 0; -static int __devexit olpc_xo1_remove(struct platform_device *pdev) -{ pm_power_off = NULL; return 0; } -static struct platform_driver olpc_xo1_driver = { +static struct platform_driver cs5535_pms_drv = { + .driver = { + .name = "cs5535-pms", + .owner = THIS_MODULE, + }, + .probe = olpc_xo1_probe, + .remove = __devexit_p(olpc_xo1_remove), +}; + +static struct platform_driver cs5535_acpi_drv = { .driver = { - .name = DRV_NAME, + .name = "cs5535-acpi", .owner = THIS_MODULE, }, .probe = olpc_xo1_probe, @@ -124,12 +122,23 @@ static struct platform_driver olpc_xo1_driver = { static int __init olpc_xo1_init(void) { - return platform_driver_register(&olpc_xo1_driver); + int r; + + r = platform_driver_register(&cs5535_pms_drv); + if (r) + return r; + + r = platform_driver_register(&cs5535_acpi_drv); + if (r) + platform_driver_unregister(&cs5535_pms_drv); + + return r; } static void __exit olpc_xo1_exit(void) { - platform_driver_unregister(&olpc_xo1_driver); + platform_driver_unregister(&cs5535_acpi_drv); + platform_driver_unregister(&cs5535_pms_drv); } MODULE_AUTHOR("Daniel Drake "); diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c new file mode 100644 index 000000000000..dab874647530 --- /dev/null +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -0,0 +1,183 @@ +/* + * OLPC-specific OFW device tree support code. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * Adapted for sparc by David S. Miller davem@davemloft.net + * Adapted for x86/OLPC by Andres Salomon + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +static phandle __init olpc_dt_getsibling(phandle node) +{ + const void *args[] = { (void *)node }; + void *res[] = { &node }; + + if ((s32)node == -1) + return 0; + + if (olpc_ofw("peer", args, res) || (s32)node == -1) + return 0; + + return node; +} + +static phandle __init olpc_dt_getchild(phandle node) +{ + const void *args[] = { (void *)node }; + void *res[] = { &node }; + + if ((s32)node == -1) + return 0; + + if (olpc_ofw("child", args, res) || (s32)node == -1) { + pr_err("PROM: %s: fetching child failed!\n", __func__); + return 0; + } + + return node; +} + +static int __init olpc_dt_getproplen(phandle node, const char *prop) +{ + const void *args[] = { (void *)node, prop }; + int len; + void *res[] = { &len }; + + if ((s32)node == -1) + return -1; + + if (olpc_ofw("getproplen", args, res)) { + pr_err("PROM: %s: getproplen failed!\n", __func__); + return -1; + } + + return len; +} + +static int __init olpc_dt_getproperty(phandle node, const char *prop, + char *buf, int bufsize) +{ + int plen; + + plen = olpc_dt_getproplen(node, prop); + if (plen > bufsize || plen < 1) { + return -1; + } else { + const void *args[] = { (void *)node, prop, buf, (void *)plen }; + void *res[] = { &plen }; + + if (olpc_ofw("getprop", args, res)) { + pr_err("PROM: %s: getprop failed!\n", __func__); + return -1; + } + } + + return plen; +} + +static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf) +{ + const void *args[] = { (void *)node, prev, buf }; + int success; + void *res[] = { &success }; + + buf[0] = '\0'; + + if ((s32)node == -1) + return -1; + + if (olpc_ofw("nextprop", args, res) || success != 1) + return -1; + + return 0; +} + +static int __init olpc_dt_pkg2path(phandle node, char *buf, + const int buflen, int *len) +{ + const void *args[] = { (void *)node, buf, (void *)buflen }; + void *res[] = { len }; + + if ((s32)node == -1) + return -1; + + if (olpc_ofw("package-to-path", args, res) || *len < 1) + return -1; + + return 0; +} + +static unsigned int prom_early_allocated __initdata; + +void * __init prom_early_alloc(unsigned long size) +{ + static u8 *mem; + static size_t free_mem; + void *res; + + if (free_mem < size) { + const size_t chunk_size = max(PAGE_SIZE, size); + + /* + * To mimimize the number of allocations, grab at least + * PAGE_SIZE of memory (that's an arbitrary choice that's + * fast enough on the platforms we care about while minimizing + * wasted bootmem) and hand off chunks of it to callers. + */ + res = alloc_bootmem(chunk_size); + if (!res) + return NULL; + prom_early_allocated += chunk_size; + memset(res, 0, chunk_size); + free_mem = chunk_size; + mem = res; + } + + /* allocate from the local cache */ + free_mem -= size; + res = mem; + mem += size; + return res; +} + +static struct of_pdt_ops prom_olpc_ops __initdata = { + .nextprop = olpc_dt_nextprop, + .getproplen = olpc_dt_getproplen, + .getproperty = olpc_dt_getproperty, + .getchild = olpc_dt_getchild, + .getsibling = olpc_dt_getsibling, + .pkg2path = olpc_dt_pkg2path, +}; + +void __init olpc_dt_build_devicetree(void) +{ + phandle root; + + if (!olpc_ofw_is_installed()) + return; + + root = olpc_dt_getsibling(0); + if (!root) { + pr_err("PROM: unable to get root node from OFW!\n"); + return; + } + of_pdt_build_devicetree(root, &prom_olpc_ops); + + pr_info("PROM DT: Built device tree with %u bytes of memory.\n", + prom_early_allocated); +} diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c index 787320464379..e7604f62870d 100644 --- a/arch/x86/platform/olpc/olpc_ofw.c +++ b/arch/x86/platform/olpc/olpc_ofw.c @@ -110,3 +110,8 @@ void __init olpc_ofw_detect(void) (unsigned long)olpc_ofw_cif, (-start) >> 20); reserve_top_address(-start); } + +bool __init olpc_ofw_is_installed(void) +{ + return olpc_ofw_cif != NULL; +} diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 779385158915..17c565de3d64 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -12,7 +12,8 @@ CFLAGS_mmu.o := $(nostackp) obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ time.o xen-asm.o xen-asm_$(BITS).o \ - grant-table.o suspend.o platform-pci-unplug.o + grant-table.o suspend.o platform-pci-unplug.o \ + p2m.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 44924e551fde..5e92b61ad574 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -173,371 +173,6 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ */ #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) -/* - * Xen leaves the responsibility for maintaining p2m mappings to the - * guests themselves, but it must also access and update the p2m array - * during suspend/resume when all the pages are reallocated. - * - * The p2m table is logically a flat array, but we implement it as a - * three-level tree to allow the address space to be sparse. - * - * Xen - * | - * p2m_top p2m_top_mfn - * / \ / \ - * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn - * / \ / \ / / - * p2m p2m p2m p2m p2m p2m p2m ... - * - * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. - * - * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the - * maximum representable pseudo-physical address space is: - * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages - * - * P2M_PER_PAGE depends on the architecture, as a mfn is always - * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to - * 512 and 1024 entries respectively. - */ - -unsigned long xen_max_p2m_pfn __read_mostly; - -#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) -#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) -#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) - -#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) - -/* Placeholders for holes in the address space */ -static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); - -static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); - -RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); -RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); - -static inline unsigned p2m_top_index(unsigned long pfn) -{ - BUG_ON(pfn >= MAX_P2M_PFN); - return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); -} - -static inline unsigned p2m_mid_index(unsigned long pfn) -{ - return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; -} - -static inline unsigned p2m_index(unsigned long pfn) -{ - return pfn % P2M_PER_PAGE; -} - -static void p2m_top_init(unsigned long ***top) -{ - unsigned i; - - for (i = 0; i < P2M_TOP_PER_PAGE; i++) - top[i] = p2m_mid_missing; -} - -static void p2m_top_mfn_init(unsigned long *top) -{ - unsigned i; - - for (i = 0; i < P2M_TOP_PER_PAGE; i++) - top[i] = virt_to_mfn(p2m_mid_missing_mfn); -} - -static void p2m_top_mfn_p_init(unsigned long **top) -{ - unsigned i; - - for (i = 0; i < P2M_TOP_PER_PAGE; i++) - top[i] = p2m_mid_missing_mfn; -} - -static void p2m_mid_init(unsigned long **mid) -{ - unsigned i; - - for (i = 0; i < P2M_MID_PER_PAGE; i++) - mid[i] = p2m_missing; -} - -static void p2m_mid_mfn_init(unsigned long *mid) -{ - unsigned i; - - for (i = 0; i < P2M_MID_PER_PAGE; i++) - mid[i] = virt_to_mfn(p2m_missing); -} - -static void p2m_init(unsigned long *p2m) -{ - unsigned i; - - for (i = 0; i < P2M_MID_PER_PAGE; i++) - p2m[i] = INVALID_P2M_ENTRY; -} - -/* - * Build the parallel p2m_top_mfn and p2m_mid_mfn structures - * - * This is called both at boot time, and after resuming from suspend: - * - At boot time we're called very early, and must use extend_brk() - * to allocate memory. - * - * - After resume we're called from within stop_machine, but the mfn - * tree should alreay be completely allocated. - */ -void xen_build_mfn_list_list(void) -{ - unsigned long pfn; - - /* Pre-initialize p2m_top_mfn to be completely missing */ - if (p2m_top_mfn == NULL) { - p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(p2m_mid_missing_mfn); - - p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_top_mfn_p_init(p2m_top_mfn_p); - - p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_top_mfn_init(p2m_top_mfn); - } else { - /* Reinitialise, mfn's all change after migration */ - p2m_mid_mfn_init(p2m_mid_missing_mfn); - } - - for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { - unsigned topidx = p2m_top_index(pfn); - unsigned mididx = p2m_mid_index(pfn); - unsigned long **mid; - unsigned long *mid_mfn_p; - - mid = p2m_top[topidx]; - mid_mfn_p = p2m_top_mfn_p[topidx]; - - /* Don't bother allocating any mfn mid levels if - * they're just missing, just update the stored mfn, - * since all could have changed over a migrate. - */ - if (mid == p2m_mid_missing) { - BUG_ON(mididx); - BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); - p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); - pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; - continue; - } - - if (mid_mfn_p == p2m_mid_missing_mfn) { - /* - * XXX boot-time only! We should never find - * missing parts of the mfn tree after - * runtime. extend_brk() will BUG if we call - * it too late. - */ - mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(mid_mfn_p); - - p2m_top_mfn_p[topidx] = mid_mfn_p; - } - - p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); - mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); - } -} - -void xen_setup_mfn_list_list(void) -{ - BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); - - HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = - virt_to_mfn(p2m_top_mfn); - HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; -} - -/* Set up p2m_top to point to the domain-builder provided p2m pages */ -void __init xen_build_dynamic_phys_to_machine(void) -{ - unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; - unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); - unsigned long pfn; - - xen_max_p2m_pfn = max_pfn; - - p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_init(p2m_missing); - - p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(p2m_mid_missing); - - p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_top_init(p2m_top); - - /* - * The domain builder gives us a pre-constructed p2m array in - * mfn_list for all the pages initially given to us, so we just - * need to graft that into our tree structure. - */ - for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { - unsigned topidx = p2m_top_index(pfn); - unsigned mididx = p2m_mid_index(pfn); - - if (p2m_top[topidx] == p2m_mid_missing) { - unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(mid); - - p2m_top[topidx] = mid; - } - - p2m_top[topidx][mididx] = &mfn_list[pfn]; - } -} - -unsigned long get_phys_to_machine(unsigned long pfn) -{ - unsigned topidx, mididx, idx; - - if (unlikely(pfn >= MAX_P2M_PFN)) - return INVALID_P2M_ENTRY; - - topidx = p2m_top_index(pfn); - mididx = p2m_mid_index(pfn); - idx = p2m_index(pfn); - - return p2m_top[topidx][mididx][idx]; -} -EXPORT_SYMBOL_GPL(get_phys_to_machine); - -static void *alloc_p2m_page(void) -{ - return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); -} - -static void free_p2m_page(void *p) -{ - free_page((unsigned long)p); -} - -/* - * Fully allocate the p2m structure for a given pfn. We need to check - * that both the top and mid levels are allocated, and make sure the - * parallel mfn tree is kept in sync. We may race with other cpus, so - * the new pages are installed with cmpxchg; if we lose the race then - * simply free the page we allocated and use the one that's there. - */ -static bool alloc_p2m(unsigned long pfn) -{ - unsigned topidx, mididx; - unsigned long ***top_p, **mid; - unsigned long *top_mfn_p, *mid_mfn; - - topidx = p2m_top_index(pfn); - mididx = p2m_mid_index(pfn); - - top_p = &p2m_top[topidx]; - mid = *top_p; - - if (mid == p2m_mid_missing) { - /* Mid level is missing, allocate a new one */ - mid = alloc_p2m_page(); - if (!mid) - return false; - - p2m_mid_init(mid); - - if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) - free_p2m_page(mid); - } - - top_mfn_p = &p2m_top_mfn[topidx]; - mid_mfn = p2m_top_mfn_p[topidx]; - - BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); - - if (mid_mfn == p2m_mid_missing_mfn) { - /* Separately check the mid mfn level */ - unsigned long missing_mfn; - unsigned long mid_mfn_mfn; - - mid_mfn = alloc_p2m_page(); - if (!mid_mfn) - return false; - - p2m_mid_mfn_init(mid_mfn); - - missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); - mid_mfn_mfn = virt_to_mfn(mid_mfn); - if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) - free_p2m_page(mid_mfn); - else - p2m_top_mfn_p[topidx] = mid_mfn; - } - - if (p2m_top[topidx][mididx] == p2m_missing) { - /* p2m leaf page is missing */ - unsigned long *p2m; - - p2m = alloc_p2m_page(); - if (!p2m) - return false; - - p2m_init(p2m); - - if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) - free_p2m_page(p2m); - else - mid_mfn[mididx] = virt_to_mfn(p2m); - } - - return true; -} - -/* Try to install p2m mapping; fail if intermediate bits missing */ -bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - unsigned topidx, mididx, idx; - - if (unlikely(pfn >= MAX_P2M_PFN)) { - BUG_ON(mfn != INVALID_P2M_ENTRY); - return true; - } - - topidx = p2m_top_index(pfn); - mididx = p2m_mid_index(pfn); - idx = p2m_index(pfn); - - if (p2m_top[topidx][mididx] == p2m_missing) - return mfn == INVALID_P2M_ENTRY; - - p2m_top[topidx][mididx][idx] = mfn; - - return true; -} - -bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return true; - } - - if (unlikely(!__set_phys_to_machine(pfn, mfn))) { - if (!alloc_p2m(pfn)) - return false; - - if (!__set_phys_to_machine(pfn, mfn)) - return false; - } - - return true; -} - unsigned long arbitrary_virt_to_mfn(void *vaddr) { xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); @@ -566,6 +201,7 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); } +EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); void make_lowmem_page_readonly(void *vaddr) { diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c new file mode 100644 index 000000000000..8f2251d2a3f8 --- /dev/null +++ b/arch/x86/xen/p2m.c @@ -0,0 +1,510 @@ +/* + * Xen leaves the responsibility for maintaining p2m mappings to the + * guests themselves, but it must also access and update the p2m array + * during suspend/resume when all the pages are reallocated. + * + * The p2m table is logically a flat array, but we implement it as a + * three-level tree to allow the address space to be sparse. + * + * Xen + * | + * p2m_top p2m_top_mfn + * / \ / \ + * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn + * / \ / \ / / + * p2m p2m p2m p2m p2m p2m p2m ... + * + * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. + * + * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the + * maximum representable pseudo-physical address space is: + * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages + * + * P2M_PER_PAGE depends on the architecture, as a mfn is always + * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to + * 512 and 1024 entries respectively. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include "xen-ops.h" + +static void __init m2p_override_init(void); + +unsigned long xen_max_p2m_pfn __read_mostly; + +#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) +#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) +#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) + +#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) + +/* Placeholders for holes in the address space */ +static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); + +static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); + +RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); +RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); + +static inline unsigned p2m_top_index(unsigned long pfn) +{ + BUG_ON(pfn >= MAX_P2M_PFN); + return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); +} + +static inline unsigned p2m_mid_index(unsigned long pfn) +{ + return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; +} + +static inline unsigned p2m_index(unsigned long pfn) +{ + return pfn % P2M_PER_PAGE; +} + +static void p2m_top_init(unsigned long ***top) +{ + unsigned i; + + for (i = 0; i < P2M_TOP_PER_PAGE; i++) + top[i] = p2m_mid_missing; +} + +static void p2m_top_mfn_init(unsigned long *top) +{ + unsigned i; + + for (i = 0; i < P2M_TOP_PER_PAGE; i++) + top[i] = virt_to_mfn(p2m_mid_missing_mfn); +} + +static void p2m_top_mfn_p_init(unsigned long **top) +{ + unsigned i; + + for (i = 0; i < P2M_TOP_PER_PAGE; i++) + top[i] = p2m_mid_missing_mfn; +} + +static void p2m_mid_init(unsigned long **mid) +{ + unsigned i; + + for (i = 0; i < P2M_MID_PER_PAGE; i++) + mid[i] = p2m_missing; +} + +static void p2m_mid_mfn_init(unsigned long *mid) +{ + unsigned i; + + for (i = 0; i < P2M_MID_PER_PAGE; i++) + mid[i] = virt_to_mfn(p2m_missing); +} + +static void p2m_init(unsigned long *p2m) +{ + unsigned i; + + for (i = 0; i < P2M_MID_PER_PAGE; i++) + p2m[i] = INVALID_P2M_ENTRY; +} + +/* + * Build the parallel p2m_top_mfn and p2m_mid_mfn structures + * + * This is called both at boot time, and after resuming from suspend: + * - At boot time we're called very early, and must use extend_brk() + * to allocate memory. + * + * - After resume we're called from within stop_machine, but the mfn + * tree should alreay be completely allocated. + */ +void xen_build_mfn_list_list(void) +{ + unsigned long pfn; + + /* Pre-initialize p2m_top_mfn to be completely missing */ + if (p2m_top_mfn == NULL) { + p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_mfn_init(p2m_mid_missing_mfn); + + p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_top_mfn_p_init(p2m_top_mfn_p); + + p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_top_mfn_init(p2m_top_mfn); + } else { + /* Reinitialise, mfn's all change after migration */ + p2m_mid_mfn_init(p2m_mid_missing_mfn); + } + + for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { + unsigned topidx = p2m_top_index(pfn); + unsigned mididx = p2m_mid_index(pfn); + unsigned long **mid; + unsigned long *mid_mfn_p; + + mid = p2m_top[topidx]; + mid_mfn_p = p2m_top_mfn_p[topidx]; + + /* Don't bother allocating any mfn mid levels if + * they're just missing, just update the stored mfn, + * since all could have changed over a migrate. + */ + if (mid == p2m_mid_missing) { + BUG_ON(mididx); + BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); + p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); + pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; + continue; + } + + if (mid_mfn_p == p2m_mid_missing_mfn) { + /* + * XXX boot-time only! We should never find + * missing parts of the mfn tree after + * runtime. extend_brk() will BUG if we call + * it too late. + */ + mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_mfn_init(mid_mfn_p); + + p2m_top_mfn_p[topidx] = mid_mfn_p; + } + + p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); + mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); + } +} + +void xen_setup_mfn_list_list(void) +{ + BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + + HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = + virt_to_mfn(p2m_top_mfn); + HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; +} + +/* Set up p2m_top to point to the domain-builder provided p2m pages */ +void __init xen_build_dynamic_phys_to_machine(void) +{ + unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; + unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); + unsigned long pfn; + + xen_max_p2m_pfn = max_pfn; + + p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_init(p2m_missing); + + p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_init(p2m_mid_missing); + + p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_top_init(p2m_top); + + /* + * The domain builder gives us a pre-constructed p2m array in + * mfn_list for all the pages initially given to us, so we just + * need to graft that into our tree structure. + */ + for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { + unsigned topidx = p2m_top_index(pfn); + unsigned mididx = p2m_mid_index(pfn); + + if (p2m_top[topidx] == p2m_mid_missing) { + unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_init(mid); + + p2m_top[topidx] = mid; + } + + p2m_top[topidx][mididx] = &mfn_list[pfn]; + } + + m2p_override_init(); +} + +unsigned long get_phys_to_machine(unsigned long pfn) +{ + unsigned topidx, mididx, idx; + + if (unlikely(pfn >= MAX_P2M_PFN)) + return INVALID_P2M_ENTRY; + + topidx = p2m_top_index(pfn); + mididx = p2m_mid_index(pfn); + idx = p2m_index(pfn); + + return p2m_top[topidx][mididx][idx]; +} +EXPORT_SYMBOL_GPL(get_phys_to_machine); + +static void *alloc_p2m_page(void) +{ + return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); +} + +static void free_p2m_page(void *p) +{ + free_page((unsigned long)p); +} + +/* + * Fully allocate the p2m structure for a given pfn. We need to check + * that both the top and mid levels are allocated, and make sure the + * parallel mfn tree is kept in sync. We may race with other cpus, so + * the new pages are installed with cmpxchg; if we lose the race then + * simply free the page we allocated and use the one that's there. + */ +static bool alloc_p2m(unsigned long pfn) +{ + unsigned topidx, mididx; + unsigned long ***top_p, **mid; + unsigned long *top_mfn_p, *mid_mfn; + + topidx = p2m_top_index(pfn); + mididx = p2m_mid_index(pfn); + + top_p = &p2m_top[topidx]; + mid = *top_p; + + if (mid == p2m_mid_missing) { + /* Mid level is missing, allocate a new one */ + mid = alloc_p2m_page(); + if (!mid) + return false; + + p2m_mid_init(mid); + + if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) + free_p2m_page(mid); + } + + top_mfn_p = &p2m_top_mfn[topidx]; + mid_mfn = p2m_top_mfn_p[topidx]; + + BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); + + if (mid_mfn == p2m_mid_missing_mfn) { + /* Separately check the mid mfn level */ + unsigned long missing_mfn; + unsigned long mid_mfn_mfn; + + mid_mfn = alloc_p2m_page(); + if (!mid_mfn) + return false; + + p2m_mid_mfn_init(mid_mfn); + + missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); + mid_mfn_mfn = virt_to_mfn(mid_mfn); + if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) + free_p2m_page(mid_mfn); + else + p2m_top_mfn_p[topidx] = mid_mfn; + } + + if (p2m_top[topidx][mididx] == p2m_missing) { + /* p2m leaf page is missing */ + unsigned long *p2m; + + p2m = alloc_p2m_page(); + if (!p2m) + return false; + + p2m_init(p2m); + + if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) + free_p2m_page(p2m); + else + mid_mfn[mididx] = virt_to_mfn(p2m); + } + + return true; +} + +/* Try to install p2m mapping; fail if intermediate bits missing */ +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + unsigned topidx, mididx, idx; + + if (unlikely(pfn >= MAX_P2M_PFN)) { + BUG_ON(mfn != INVALID_P2M_ENTRY); + return true; + } + + topidx = p2m_top_index(pfn); + mididx = p2m_mid_index(pfn); + idx = p2m_index(pfn); + + if (p2m_top[topidx][mididx] == p2m_missing) + return mfn == INVALID_P2M_ENTRY; + + p2m_top[topidx][mididx][idx] = mfn; + + return true; +} + +bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { + BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); + return true; + } + + if (unlikely(!__set_phys_to_machine(pfn, mfn))) { + if (!alloc_p2m(pfn)) + return false; + + if (!__set_phys_to_machine(pfn, mfn)) + return false; + } + + return true; +} + +#define M2P_OVERRIDE_HASH_SHIFT 10 +#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT) + +static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH); +static DEFINE_SPINLOCK(m2p_override_lock); + +static void __init m2p_override_init(void) +{ + unsigned i; + + m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH, + sizeof(unsigned long)); + + for (i = 0; i < M2P_OVERRIDE_HASH; i++) + INIT_LIST_HEAD(&m2p_overrides[i]); +} + +static unsigned long mfn_hash(unsigned long mfn) +{ + return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT); +} + +/* Add an MFN override for a particular page */ +int m2p_add_override(unsigned long mfn, struct page *page) +{ + unsigned long flags; + unsigned long pfn; + unsigned long address; + unsigned level; + pte_t *ptep = NULL; + + pfn = page_to_pfn(page); + if (!PageHighMem(page)) { + address = (unsigned long)__va(pfn << PAGE_SHIFT); + ptep = lookup_address(address, &level); + + if (WARN(ptep == NULL || level != PG_LEVEL_4K, + "m2p_add_override: pfn %lx not mapped", pfn)) + return -EINVAL; + } + + page->private = mfn; + page->index = pfn_to_mfn(pfn); + + __set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); + if (!PageHighMem(page)) + /* Just zap old mapping for now */ + pte_clear(&init_mm, address, ptep); + + spin_lock_irqsave(&m2p_override_lock, flags); + list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); + spin_unlock_irqrestore(&m2p_override_lock, flags); + + return 0; +} + +int m2p_remove_override(struct page *page) +{ + unsigned long flags; + unsigned long mfn; + unsigned long pfn; + unsigned long address; + unsigned level; + pte_t *ptep = NULL; + + pfn = page_to_pfn(page); + mfn = get_phys_to_machine(pfn); + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) + return -EINVAL; + + if (!PageHighMem(page)) { + address = (unsigned long)__va(pfn << PAGE_SHIFT); + ptep = lookup_address(address, &level); + + if (WARN(ptep == NULL || level != PG_LEVEL_4K, + "m2p_remove_override: pfn %lx not mapped", pfn)) + return -EINVAL; + } + + spin_lock_irqsave(&m2p_override_lock, flags); + list_del(&page->lru); + spin_unlock_irqrestore(&m2p_override_lock, flags); + __set_phys_to_machine(pfn, page->index); + + if (!PageHighMem(page)) + set_pte_at(&init_mm, address, ptep, + pfn_pte(pfn, PAGE_KERNEL)); + /* No tlb flush necessary because the caller already + * left the pte unmapped. */ + + return 0; +} + +struct page *m2p_find_override(unsigned long mfn) +{ + unsigned long flags; + struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)]; + struct page *p, *ret; + + ret = NULL; + + spin_lock_irqsave(&m2p_override_lock, flags); + + list_for_each_entry(p, bucket, lru) { + if (p->private == mfn) { + ret = p; + break; + } + } + + spin_unlock_irqrestore(&m2p_override_lock, flags); + + return ret; +} + +unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) +{ + struct page *p = m2p_find_override(mfn); + unsigned long ret = pfn; + + if (p) + ret = page_to_pfn(p); + + return ret; +} +EXPORT_SYMBOL_GPL(m2p_find_override_pfn); diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/asm/mman.h index fca4db425f6e..30789010733d 100644 --- a/arch/xtensa/include/asm/mman.h +++ b/arch/xtensa/include/asm/mman.h @@ -83,6 +83,9 @@ #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b1febd0f6d2a..455768a3eb9e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1452,10 +1452,6 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) goto done; } - /* Currently we do not support hierarchy deeper than two level (0,1) */ - if (parent != cgroup->top_cgroup) - return ERR_PTR(-EPERM); - blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); if (!blkcg) return ERR_PTR(-ENOMEM); diff --git a/block/blk-core.c b/block/blk-core.c index 4ce953f1b390..2f4002f79a24 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -33,7 +33,7 @@ #include "blk.h" -EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); @@ -64,13 +64,27 @@ static void drive_stat_acct(struct request *rq, int new_io) return; cpu = part_stat_lock(); - part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); - if (!new_io) + if (!new_io) { + part = rq->part; part_stat_inc(cpu, part, merges[rw]); - else { + } else { + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); + if (!hd_struct_try_get(part)) { + /* + * The partition is already being removed, + * the request will be accounted on the disk only + * + * We take a reference on disk->part0 although that + * partition will never be deleted, so we can treat + * it as any other partition. + */ + part = &rq->rq_disk->part0; + hd_struct_get(part); + } part_round_stats(cpu, part); part_inc_in_flight(part, rw); + rq->part = part; } part_stat_unlock(); @@ -128,6 +142,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->ref_count = 1; rq->start_time = jiffies; set_start_time_ns(rq); + rq->part = NULL; } EXPORT_SYMBOL(blk_rq_init); @@ -1329,9 +1344,9 @@ static inline void blk_partition_remap(struct bio *bio) bio->bi_sector += p->start_sect; bio->bi_bdev = bdev->bd_contains; - trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, - bdev->bd_dev, - bio->bi_sector - p->start_sect); + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, + bdev->bd_dev, + bio->bi_sector - p->start_sect); } } @@ -1500,7 +1515,7 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; if (old_sector != -1) - trace_block_remap(q, bio, old_dev, old_sector); + trace_block_bio_remap(q, bio, old_dev, old_sector); old_sector = bio->bi_sector; old_dev = bio->bi_bdev->bd_dev; @@ -1776,7 +1791,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) int cpu; cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); + part = req->part; part_stat_add(cpu, part, sectors[rw], bytes >> 9); part_stat_unlock(); } @@ -1796,13 +1811,14 @@ static void blk_account_io_done(struct request *req) int cpu; cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); + part = req->part; part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); part_round_stats(cpu, part); part_dec_in_flight(part, rw); + hd_struct_put(part); part_stat_unlock(); } } @@ -2606,7 +2622,9 @@ int __init blk_dev_init(void) BUILD_BUG_ON(__REQ_NR_BITS > 8 * sizeof(((struct request *)0)->cmd_flags)); - kblockd_workqueue = create_workqueue("kblockd"); + /* used for unplugging and affects IO latency/throughput - HIGHPRI */ + kblockd_workqueue = alloc_workqueue("kblockd", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!kblockd_workqueue) panic("Failed to create kblockd\n"); diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 3c7a339fe381..b791022beef3 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -64,7 +64,7 @@ static void cfq_exit(struct io_context *ioc) rcu_read_unlock(); } -/* Called by the exitting task */ +/* Called by the exiting task */ void exit_io_context(struct task_struct *task) { struct io_context *ioc; @@ -74,10 +74,9 @@ void exit_io_context(struct task_struct *task) task->io_context = NULL; task_unlock(task); - if (atomic_dec_and_test(&ioc->nr_tasks)) { + if (atomic_dec_and_test(&ioc->nr_tasks)) cfq_exit(ioc); - } put_io_context(ioc); } diff --git a/block/blk-merge.c b/block/blk-merge.c index 74bc4a768f32..ea85e20d5e94 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -351,11 +351,12 @@ static void blk_account_io_merge(struct request *req) int cpu; cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); + part = req->part; part_round_stats(cpu, part); part_dec_in_flight(part, rq_data_dir(req)); + hd_struct_put(part); part_stat_unlock(); } } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4cd59b0d7c15..501ffdf0399c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -87,7 +87,6 @@ struct cfq_rb_root { unsigned count; unsigned total_weight; u64 min_vdisktime; - struct rb_node *active; }; #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \ .count = 0, .min_vdisktime = 0, } @@ -97,7 +96,7 @@ struct cfq_rb_root { */ struct cfq_queue { /* reference count */ - atomic_t ref; + int ref; /* various state flags, see below */ unsigned int flags; /* parent cfq_data */ @@ -180,7 +179,6 @@ struct cfq_group { /* group service_tree key */ u64 vdisktime; unsigned int weight; - bool on_st; /* number of cfqq currently on this group */ int nr_cfqq; @@ -209,7 +207,7 @@ struct cfq_group { struct blkio_group blkg; #ifdef CONFIG_CFQ_GROUP_IOSCHED struct hlist_node cfqd_node; - atomic_t ref; + int ref; #endif /* number of requests that are on the dispatch list or inside driver */ int dispatched; @@ -563,11 +561,6 @@ static void update_min_vdisktime(struct cfq_rb_root *st) u64 vdisktime = st->min_vdisktime; struct cfq_group *cfqg; - if (st->active) { - cfqg = rb_entry_cfqg(st->active); - vdisktime = cfqg->vdisktime; - } - if (st->left) { cfqg = rb_entry_cfqg(st->left); vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); @@ -605,8 +598,8 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) return cfq_target_latency * cfqg->weight / st->total_weight; } -static inline void -cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +static inline unsigned +cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_prio_to_slice(cfqd, cfqq); if (cfqd->cfq_latency) { @@ -632,6 +625,14 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) low_slice); } } + return slice; +} + +static inline void +cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + unsigned slice = cfq_scaled_group_slice(cfqd, cfqq); + cfqq->slice_start = jiffies; cfqq->slice_end = jiffies + slice; cfqq->allocated_slice = slice; @@ -646,11 +647,11 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) static inline bool cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) - return 0; + return false; if (time_before(jiffies, cfqq->slice_end)) - return 0; + return false; - return 1; + return true; } /* @@ -869,7 +870,7 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) struct rb_node *n; cfqg->nr_cfqq++; - if (cfqg->on_st) + if (!RB_EMPTY_NODE(&cfqg->rb_node)) return; /* @@ -885,7 +886,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) cfqg->vdisktime = st->min_vdisktime; __cfq_group_service_tree_add(st, cfqg); - cfqg->on_st = true; st->total_weight += cfqg->weight; } @@ -894,9 +894,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; - if (st->active == &cfqg->rb_node) - st->active = NULL; - BUG_ON(cfqg->nr_cfqq < 1); cfqg->nr_cfqq--; @@ -905,7 +902,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) return; cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); - cfqg->on_st = false; st->total_weight -= cfqg->weight; if (!RB_EMPTY_NODE(&cfqg->rb_node)) cfq_rb_erase(&cfqg->rb_node, st); @@ -1026,11 +1022,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) * elevator which will be dropped by either elevator exit * or cgroup deletion path depending on who is exiting first. */ - atomic_set(&cfqg->ref, 1); + cfqg->ref = 1; /* * Add group onto cgroup list. It might happen that bdi->dev is - * not initiliazed yet. Initialize this new group without major + * not initialized yet. Initialize this new group without major * and minor info and this info will be filled in once a new thread * comes for IO. See code above. */ @@ -1071,7 +1067,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) { - atomic_inc(&cfqg->ref); + cfqg->ref++; return cfqg; } @@ -1083,7 +1079,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) cfqq->cfqg = cfqg; /* cfqq reference on cfqg */ - atomic_inc(&cfqq->cfqg->ref); + cfqq->cfqg->ref++; } static void cfq_put_cfqg(struct cfq_group *cfqg) @@ -1091,11 +1087,12 @@ static void cfq_put_cfqg(struct cfq_group *cfqg) struct cfq_rb_root *st; int i, j; - BUG_ON(atomic_read(&cfqg->ref) <= 0); - if (!atomic_dec_and_test(&cfqg->ref)) + BUG_ON(cfqg->ref <= 0); + cfqg->ref--; + if (cfqg->ref) return; for_each_cfqg_st(cfqg, i, j, st) - BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL); + BUG_ON(!RB_EMPTY_ROOT(&st->rb)); kfree(cfqg); } @@ -1200,7 +1197,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_group_service_tree_del(cfqd, cfqq->cfqg); cfqq->orig_cfqg = cfqq->cfqg; cfqq->cfqg = &cfqd->root_group; - atomic_inc(&cfqd->root_group.ref); + cfqd->root_group.ref++; group_changed = 1; } else if (!cfqd->cfq_group_isolation && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { @@ -1672,8 +1669,11 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, /* * store what was left of this slice, if the queue idled/timed out */ - if (timed_out && !cfq_cfqq_slice_new(cfqq)) { - cfqq->slice_resid = cfqq->slice_end - jiffies; + if (timed_out) { + if (cfq_cfqq_slice_new(cfqq)) + cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq); + else + cfqq->slice_resid = cfqq->slice_end - jiffies; cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); } @@ -1687,9 +1687,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; - if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active) - cfqd->grp_service_tree.active = NULL; - if (cfqd->active_cic) { put_io_context(cfqd->active_cic->ioc); cfqd->active_cic = NULL; @@ -1901,10 +1898,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) * in their service tree. */ if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) - return 1; + return true; cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", service_tree->count); - return 0; + return false; } static void cfq_arm_slice_timer(struct cfq_data *cfqd) @@ -2040,7 +2037,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq) int process_refs, io_refs; io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; - process_refs = atomic_read(&cfqq->ref) - io_refs; + process_refs = cfqq->ref - io_refs; BUG_ON(process_refs < 0); return process_refs; } @@ -2080,10 +2077,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) */ if (new_process_refs >= process_refs) { cfqq->new_cfqq = new_cfqq; - atomic_add(process_refs, &new_cfqq->ref); + new_cfqq->ref += process_refs; } else { new_cfqq->new_cfqq = cfqq; - atomic_add(new_process_refs, &cfqq->ref); + cfqq->ref += new_process_refs; } } @@ -2116,12 +2113,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) unsigned count; struct cfq_rb_root *st; unsigned group_slice; - - if (!cfqg) { - cfqd->serving_prio = IDLE_WORKLOAD; - cfqd->workload_expires = jiffies + 1; - return; - } + enum wl_prio_t original_prio = cfqd->serving_prio; /* Choose next priority. RT > BE > IDLE */ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) @@ -2134,6 +2126,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) return; } + if (original_prio != cfqd->serving_prio) + goto new_workload; + /* * For RT and BE, we have to choose also the type * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload @@ -2148,6 +2143,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) if (count && !time_after(jiffies, cfqd->workload_expires)) return; +new_workload: /* otherwise select new workload type */ cfqd->serving_type = cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); @@ -2199,7 +2195,6 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) if (RB_EMPTY_ROOT(&st->rb)) return NULL; cfqg = cfq_rb_first_group(st); - st->active = &cfqg->rb_node; update_min_vdisktime(st); return cfqg; } @@ -2293,6 +2288,17 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) goto keep_queue; } + /* + * This is a deep seek queue, but the device is much faster than + * the queue can deliver, don't idle + **/ + if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && + (cfq_cfqq_slice_new(cfqq) || + (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { + cfq_clear_cfqq_deep(cfqq); + cfq_clear_cfqq_idle_window(cfqq); + } + if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; @@ -2367,12 +2373,12 @@ static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, { /* the queue hasn't finished any request, can't estimate */ if (cfq_cfqq_slice_new(cfqq)) - return 1; + return true; if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, cfqq->slice_end)) - return 1; + return true; - return 0; + return false; } static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) @@ -2538,9 +2544,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq) struct cfq_data *cfqd = cfqq->cfqd; struct cfq_group *cfqg, *orig_cfqg; - BUG_ON(atomic_read(&cfqq->ref) <= 0); + BUG_ON(cfqq->ref <= 0); - if (!atomic_dec_and_test(&cfqq->ref)) + cfqq->ref--; + if (cfqq->ref) return; cfq_log_cfqq(cfqd, cfqq, "put_queue"); @@ -2843,7 +2850,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, RB_CLEAR_NODE(&cfqq->p_node); INIT_LIST_HEAD(&cfqq->fifo); - atomic_set(&cfqq->ref, 0); + cfqq->ref = 0; cfqq->cfqd = cfqd; cfq_mark_cfqq_prio_changed(cfqq); @@ -2979,11 +2986,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, * pin the queue now that it's allocated, scheduler exit will prune it */ if (!is_sync && !(*async_cfqq)) { - atomic_inc(&cfqq->ref); + cfqq->ref++; *async_cfqq = cfqq; } - atomic_inc(&cfqq->ref); + cfqq->ref++; return cfqq; } @@ -3265,6 +3272,10 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) return true; + /* An idle queue should not be idle now for some reason */ + if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) + return true; + if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return false; @@ -3284,9 +3295,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + struct cfq_queue *old_cfqq = cfqd->active_queue; + cfq_log_cfqq(cfqd, cfqq, "preempt"); cfq_slice_expired(cfqd, 1); + /* + * workload type is changed, don't save slice, otherwise preempt + * doesn't happen + */ + if (cfqq_type(old_cfqq) != cfqq_type(cfqq)) + cfqq->cfqg->saved_workload_slice = 0; + /* * Put the new queue at the front of the of the current list, * so we know that it will be selected next. @@ -3681,13 +3701,13 @@ new_queue: } cfqq->allocated[rw]++; - atomic_inc(&cfqq->ref); - - spin_unlock_irqrestore(q->queue_lock, flags); - + cfqq->ref++; rq->elevator_private = cic; rq->elevator_private2 = cfqq; rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); + + spin_unlock_irqrestore(q->queue_lock, flags); + return 0; queue_fail: @@ -3862,6 +3882,10 @@ static void *cfq_init_queue(struct request_queue *q) if (!cfqd) return NULL; + /* + * Don't need take queue_lock in the routine, since we are + * initializing the ioscheduler, and nobody is using cfqd + */ cfqd->cic_index = i; /* Init root service tree */ @@ -3881,7 +3905,7 @@ static void *cfq_init_queue(struct request_queue *q) * Take a reference to root group which we never drop. This is just * to make sure that cfq_put_cfqg() does not try to kfree root group */ - atomic_set(&cfqg->ref, 1); + cfqg->ref = 1; rcu_read_lock(); cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 0); @@ -3901,7 +3925,7 @@ static void *cfq_init_queue(struct request_queue *q) * will not attempt to free it. */ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); - atomic_inc(&cfqd->oom_cfqq.ref); + cfqd->oom_cfqq.ref++; cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); INIT_LIST_HEAD(&cfqd->cic_list); diff --git a/block/genhd.c b/block/genhd.c index 5fa2b44a72ff..6a5b772aa201 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "blk.h" @@ -35,6 +36,10 @@ static DEFINE_IDR(ext_devt_idr); static struct device_type disk_type; +static void disk_add_events(struct gendisk *disk); +static void disk_del_events(struct gendisk *disk); +static void disk_release_events(struct gendisk *disk); + /** * disk_get_part - get partition * @disk: disk to look partition from @@ -239,7 +244,7 @@ static struct blk_major_name { } *major_names[BLKDEV_MAJOR_HASH_SIZE]; /* index in the above - for now: assume no multimajor ranges */ -static inline int major_to_index(int major) +static inline int major_to_index(unsigned major) { return major % BLKDEV_MAJOR_HASH_SIZE; } @@ -502,6 +507,64 @@ static int exact_lock(dev_t devt, void *data) return 0; } +void register_disk(struct gendisk *disk) +{ + struct device *ddev = disk_to_dev(disk); + struct block_device *bdev; + struct disk_part_iter piter; + struct hd_struct *part; + int err; + + ddev->parent = disk->driverfs_dev; + + dev_set_name(ddev, disk->disk_name); + + /* delay uevents, until we scanned partition table */ + dev_set_uevent_suppress(ddev, 1); + + if (device_add(ddev)) + return; + if (!sysfs_deprecated) { + err = sysfs_create_link(block_depr, &ddev->kobj, + kobject_name(&ddev->kobj)); + if (err) { + device_del(ddev); + return; + } + } + disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); + disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); + + /* No minors to use for partitions */ + if (!disk_partitionable(disk)) + goto exit; + + /* No such device (e.g., media were just removed) */ + if (!get_capacity(disk)) + goto exit; + + bdev = bdget_disk(disk, 0); + if (!bdev) + goto exit; + + bdev->bd_invalidated = 1; + err = blkdev_get(bdev, FMODE_READ, NULL); + if (err < 0) + goto exit; + blkdev_put(bdev, FMODE_READ); + +exit: + /* announce disk after possible partitions are created */ + dev_set_uevent_suppress(ddev, 0); + kobject_uevent(&ddev->kobj, KOBJ_ADD); + + /* announce possible partitions */ + disk_part_iter_init(&piter, disk, 0); + while ((part = disk_part_iter_next(&piter))) + kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); + disk_part_iter_exit(&piter); +} + /** * add_disk - add partitioning information to kernel list * @disk: per-device partitioning information @@ -551,18 +614,48 @@ void add_disk(struct gendisk *disk) retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, "bdi"); WARN_ON(retval); -} + disk_add_events(disk); +} EXPORT_SYMBOL(add_disk); -EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */ -void unlink_gendisk(struct gendisk *disk) +void del_gendisk(struct gendisk *disk) { + struct disk_part_iter piter; + struct hd_struct *part; + + disk_del_events(disk); + + /* invalidate stuff */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); + while ((part = disk_part_iter_next(&piter))) { + invalidate_partition(disk, part->partno); + delete_partition(disk, part->partno); + } + disk_part_iter_exit(&piter); + + invalidate_partition(disk, 0); + blk_free_devt(disk_to_dev(disk)->devt); + set_capacity(disk, 0); + disk->flags &= ~GENHD_FL_UP; + sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); bdi_unregister(&disk->queue->backing_dev_info); blk_unregister_queue(disk); blk_unregister_region(disk_devt(disk), disk->minors); + + part_stat_set_all(&disk->part0, 0); + disk->part0.stamp = 0; + + kobject_put(disk->part0.holder_dir); + kobject_put(disk->slave_dir); + disk->driverfs_dev = NULL; + if (!sysfs_deprecated) + sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); + device_del(disk_to_dev(disk)); } +EXPORT_SYMBOL(del_gendisk); /** * get_gendisk - get partitioning information for a given device @@ -735,7 +828,7 @@ static void *show_partition_start(struct seq_file *seqf, loff_t *pos) static void *p; p = disk_seqf_start(seqf, pos); - if (!IS_ERR(p) && p && !*pos) + if (!IS_ERR_OR_NULL(p) && !*pos) seq_puts(seqf, "major minor #blocks name\n\n"); return p; } @@ -1005,6 +1098,7 @@ static void disk_release(struct device *dev) { struct gendisk *disk = dev_to_disk(dev); + disk_release_events(disk); kfree(disk->random); disk_replace_part_tbl(disk, NULL); free_part_stats(&disk->part0); @@ -1110,29 +1204,6 @@ static int __init proc_genhd_init(void) module_init(proc_genhd_init); #endif /* CONFIG_PROC_FS */ -static void media_change_notify_thread(struct work_struct *work) -{ - struct gendisk *gd = container_of(work, struct gendisk, async_notify); - char event[] = "MEDIA_CHANGE=1"; - char *envp[] = { event, NULL }; - - /* - * set enviroment vars to indicate which event this is for - * so that user space will know to go check the media status. - */ - kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp); - put_device(gd->driverfs_dev); -} - -#if 0 -void genhd_media_change_notify(struct gendisk *disk) -{ - get_device(disk->driverfs_dev); - schedule_work(&disk->async_notify); -} -EXPORT_SYMBOL_GPL(genhd_media_change_notify); -#endif /* 0 */ - dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); @@ -1193,13 +1264,13 @@ struct gendisk *alloc_disk_node(int minors, int node_id) } disk->part_tbl->part[0] = &disk->part0; + hd_ref_init(&disk->part0); + disk->minors = minors; rand_initialize_disk(disk); disk_to_dev(disk)->class = &block_class; disk_to_dev(disk)->type = &disk_type; device_initialize(disk_to_dev(disk)); - INIT_WORK(&disk->async_notify, - media_change_notify_thread); } return disk; } @@ -1291,3 +1362,422 @@ int invalidate_partition(struct gendisk *disk, int partno) } EXPORT_SYMBOL(invalidate_partition); + +/* + * Disk events - monitor disk events like media change and eject request. + */ +struct disk_events { + struct list_head node; /* all disk_event's */ + struct gendisk *disk; /* the associated disk */ + spinlock_t lock; + + int block; /* event blocking depth */ + unsigned int pending; /* events already sent out */ + unsigned int clearing; /* events being cleared */ + + long poll_msecs; /* interval, -1 for default */ + struct delayed_work dwork; +}; + +static const char *disk_events_strs[] = { + [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change", + [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request", +}; + +static char *disk_uevents[] = { + [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1", + [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1", +}; + +/* list of all disk_events */ +static DEFINE_MUTEX(disk_events_mutex); +static LIST_HEAD(disk_events); + +/* disable in-kernel polling by default */ +static unsigned long disk_events_dfl_poll_msecs = 0; + +static unsigned long disk_events_poll_jiffies(struct gendisk *disk) +{ + struct disk_events *ev = disk->ev; + long intv_msecs = 0; + + /* + * If device-specific poll interval is set, always use it. If + * the default is being used, poll iff there are events which + * can't be monitored asynchronously. + */ + if (ev->poll_msecs >= 0) + intv_msecs = ev->poll_msecs; + else if (disk->events & ~disk->async_events) + intv_msecs = disk_events_dfl_poll_msecs; + + return msecs_to_jiffies(intv_msecs); +} + +static void __disk_block_events(struct gendisk *disk, bool sync) +{ + struct disk_events *ev = disk->ev; + unsigned long flags; + bool cancel; + + spin_lock_irqsave(&ev->lock, flags); + cancel = !ev->block++; + spin_unlock_irqrestore(&ev->lock, flags); + + if (cancel) { + if (sync) + cancel_delayed_work_sync(&disk->ev->dwork); + else + cancel_delayed_work(&disk->ev->dwork); + } +} + +static void __disk_unblock_events(struct gendisk *disk, bool check_now) +{ + struct disk_events *ev = disk->ev; + unsigned long intv; + unsigned long flags; + + spin_lock_irqsave(&ev->lock, flags); + + if (WARN_ON_ONCE(ev->block <= 0)) + goto out_unlock; + + if (--ev->block) + goto out_unlock; + + /* + * Not exactly a latency critical operation, set poll timer + * slack to 25% and kick event check. + */ + intv = disk_events_poll_jiffies(disk); + set_timer_slack(&ev->dwork.timer, intv / 4); + if (check_now) + queue_delayed_work(system_nrt_wq, &ev->dwork, 0); + else if (intv) + queue_delayed_work(system_nrt_wq, &ev->dwork, intv); +out_unlock: + spin_unlock_irqrestore(&ev->lock, flags); +} + +/** + * disk_block_events - block and flush disk event checking + * @disk: disk to block events for + * + * On return from this function, it is guaranteed that event checking + * isn't in progress and won't happen until unblocked by + * disk_unblock_events(). Events blocking is counted and the actual + * unblocking happens after the matching number of unblocks are done. + * + * Note that this intentionally does not block event checking from + * disk_clear_events(). + * + * CONTEXT: + * Might sleep. + */ +void disk_block_events(struct gendisk *disk) +{ + if (disk->ev) + __disk_block_events(disk, true); +} + +/** + * disk_unblock_events - unblock disk event checking + * @disk: disk to unblock events for + * + * Undo disk_block_events(). When the block count reaches zero, it + * starts events polling if configured. + * + * CONTEXT: + * Don't care. Safe to call from irq context. + */ +void disk_unblock_events(struct gendisk *disk) +{ + if (disk->ev) + __disk_unblock_events(disk, true); +} + +/** + * disk_check_events - schedule immediate event checking + * @disk: disk to check events for + * + * Schedule immediate event checking on @disk if not blocked. + * + * CONTEXT: + * Don't care. Safe to call from irq context. + */ +void disk_check_events(struct gendisk *disk) +{ + if (disk->ev) { + __disk_block_events(disk, false); + __disk_unblock_events(disk, true); + } +} +EXPORT_SYMBOL_GPL(disk_check_events); + +/** + * disk_clear_events - synchronously check, clear and return pending events + * @disk: disk to fetch and clear events from + * @mask: mask of events to be fetched and clearted + * + * Disk events are synchronously checked and pending events in @mask + * are cleared and returned. This ignores the block count. + * + * CONTEXT: + * Might sleep. + */ +unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) +{ + const struct block_device_operations *bdops = disk->fops; + struct disk_events *ev = disk->ev; + unsigned int pending; + + if (!ev) { + /* for drivers still using the old ->media_changed method */ + if ((mask & DISK_EVENT_MEDIA_CHANGE) && + bdops->media_changed && bdops->media_changed(disk)) + return DISK_EVENT_MEDIA_CHANGE; + return 0; + } + + /* tell the workfn about the events being cleared */ + spin_lock_irq(&ev->lock); + ev->clearing |= mask; + spin_unlock_irq(&ev->lock); + + /* uncondtionally schedule event check and wait for it to finish */ + __disk_block_events(disk, true); + queue_delayed_work(system_nrt_wq, &ev->dwork, 0); + flush_delayed_work(&ev->dwork); + __disk_unblock_events(disk, false); + + /* then, fetch and clear pending events */ + spin_lock_irq(&ev->lock); + WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */ + pending = ev->pending & mask; + ev->pending &= ~mask; + spin_unlock_irq(&ev->lock); + + return pending; +} + +static void disk_events_workfn(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct disk_events *ev = container_of(dwork, struct disk_events, dwork); + struct gendisk *disk = ev->disk; + char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; + unsigned int clearing = ev->clearing; + unsigned int events; + unsigned long intv; + int nr_events = 0, i; + + /* check events */ + events = disk->fops->check_events(disk, clearing); + + /* accumulate pending events and schedule next poll if necessary */ + spin_lock_irq(&ev->lock); + + events &= ~ev->pending; + ev->pending |= events; + ev->clearing &= ~clearing; + + intv = disk_events_poll_jiffies(disk); + if (!ev->block && intv) + queue_delayed_work(system_nrt_wq, &ev->dwork, intv); + + spin_unlock_irq(&ev->lock); + + /* tell userland about new events */ + for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) + if (events & (1 << i)) + envp[nr_events++] = disk_uevents[i]; + + if (nr_events) + kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); +} + +/* + * A disk events enabled device has the following sysfs nodes under + * its /sys/block/X/ directory. + * + * events : list of all supported events + * events_async : list of events which can be detected w/o polling + * events_poll_msecs : polling interval, 0: disable, -1: system default + */ +static ssize_t __disk_events_show(unsigned int events, char *buf) +{ + const char *delim = ""; + ssize_t pos = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++) + if (events & (1 << i)) { + pos += sprintf(buf + pos, "%s%s", + delim, disk_events_strs[i]); + delim = " "; + } + if (pos) + pos += sprintf(buf + pos, "\n"); + return pos; +} + +static ssize_t disk_events_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return __disk_events_show(disk->events, buf); +} + +static ssize_t disk_events_async_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return __disk_events_show(disk->async_events, buf); +} + +static ssize_t disk_events_poll_msecs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%ld\n", disk->ev->poll_msecs); +} + +static ssize_t disk_events_poll_msecs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct gendisk *disk = dev_to_disk(dev); + long intv; + + if (!count || !sscanf(buf, "%ld", &intv)) + return -EINVAL; + + if (intv < 0 && intv != -1) + return -EINVAL; + + __disk_block_events(disk, true); + disk->ev->poll_msecs = intv; + __disk_unblock_events(disk, true); + + return count; +} + +static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL); +static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL); +static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR, + disk_events_poll_msecs_show, + disk_events_poll_msecs_store); + +static const struct attribute *disk_events_attrs[] = { + &dev_attr_events.attr, + &dev_attr_events_async.attr, + &dev_attr_events_poll_msecs.attr, + NULL, +}; + +/* + * The default polling interval can be specified by the kernel + * parameter block.events_dfl_poll_msecs which defaults to 0 + * (disable). This can also be modified runtime by writing to + * /sys/module/block/events_dfl_poll_msecs. + */ +static int disk_events_set_dfl_poll_msecs(const char *val, + const struct kernel_param *kp) +{ + struct disk_events *ev; + int ret; + + ret = param_set_ulong(val, kp); + if (ret < 0) + return ret; + + mutex_lock(&disk_events_mutex); + + list_for_each_entry(ev, &disk_events, node) + disk_check_events(ev->disk); + + mutex_unlock(&disk_events_mutex); + + return 0; +} + +static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = { + .set = disk_events_set_dfl_poll_msecs, + .get = param_get_ulong, +}; + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "block." + +module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops, + &disk_events_dfl_poll_msecs, 0644); + +/* + * disk_{add|del|release}_events - initialize and destroy disk_events. + */ +static void disk_add_events(struct gendisk *disk) +{ + struct disk_events *ev; + + if (!disk->fops->check_events || !(disk->events | disk->async_events)) + return; + + ev = kzalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) { + pr_warn("%s: failed to initialize events\n", disk->disk_name); + return; + } + + if (sysfs_create_files(&disk_to_dev(disk)->kobj, + disk_events_attrs) < 0) { + pr_warn("%s: failed to create sysfs files for events\n", + disk->disk_name); + kfree(ev); + return; + } + + disk->ev = ev; + + INIT_LIST_HEAD(&ev->node); + ev->disk = disk; + spin_lock_init(&ev->lock); + ev->block = 1; + ev->poll_msecs = -1; + INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); + + mutex_lock(&disk_events_mutex); + list_add_tail(&ev->node, &disk_events); + mutex_unlock(&disk_events_mutex); + + /* + * Block count is initialized to 1 and the following initial + * unblock kicks it into action. + */ + __disk_unblock_events(disk, true); +} + +static void disk_del_events(struct gendisk *disk) +{ + if (!disk->ev) + return; + + __disk_block_events(disk, true); + + mutex_lock(&disk_events_mutex); + list_del_init(&disk->ev->node); + mutex_unlock(&disk_events_mutex); + + sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs); +} + +static void disk_release_events(struct gendisk *disk) +{ + /* the block count should be 1 from disk_del_events() */ + WARN_ON_ONCE(disk->ev && disk->ev->block != 1); + kfree(disk->ev); +} diff --git a/block/ioctl.c b/block/ioctl.c index a9a302eba01e..9049d460fa89 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -294,11 +294,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return -EINVAL; if (get_user(n, (int __user *) arg)) return -EFAULT; - if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0) + if (!(mode & FMODE_EXCL) && + blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) return -EBUSY; ret = set_blocksize(bdev, n); if (!(mode & FMODE_EXCL)) - bd_release(bdev); + blkdev_put(bdev, mode | FMODE_EXCL); return ret; case BLKPG: ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); diff --git a/crypto/Kconfig b/crypto/Kconfig index e4bac29a32e7..4b7cb0e691cd 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -110,7 +110,6 @@ config CRYPTO_MANAGER_DISABLE_TESTS config CRYPTO_GF128MUL tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" - depends on EXPERIMENTAL help Efficient table driven implementation of multiplications in the field GF(2^128). This is needed by some cypher modes. This @@ -539,8 +538,9 @@ config CRYPTO_AES_X86_64 config CRYPTO_AES_NI_INTEL tristate "AES cipher algorithms (AES-NI)" - depends on (X86 || UML_X86) && 64BIT - select CRYPTO_AES_X86_64 + depends on (X86 || UML_X86) + select CRYPTO_AES_X86_64 if 64BIT + select CRYPTO_AES_586 if !64BIT select CRYPTO_CRYPTD select CRYPTO_ALGAPI select CRYPTO_FPU @@ -563,9 +563,10 @@ config CRYPTO_AES_NI_INTEL See for more information. - In addition to AES cipher algorithm support, the - acceleration for some popular block cipher mode is supported - too, including ECB, CBC, CTR, LRW, PCBC, XTS. + In addition to AES cipher algorithm support, the acceleration + for some popular block cipher mode is supported too, including + ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional + acceleration for CTR. config CRYPTO_ANUBIS tristate "Anubis cipher algorithm" @@ -841,6 +842,27 @@ config CRYPTO_ANSI_CPRNG ANSI X9.31 A.2.4. Note that this option must be enabled if CRYPTO_FIPS is selected +config CRYPTO_USER_API + tristate + +config CRYPTO_USER_API_HASH + tristate "User-space interface for hash algorithms" + depends on NET + select CRYPTO_HASH + select CRYPTO_USER_API + help + This option enables the user-spaces interface for hash + algorithms. + +config CRYPTO_USER_API_SKCIPHER + tristate "User-space interface for symmetric key cipher algorithms" + depends on NET + select CRYPTO_BLKCIPHER + select CRYPTO_USER_API + help + This option enables the user-spaces interface for symmetric + key cipher algorithms. + source "drivers/crypto/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index 423b7de61f93..e9a399ca69db 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -3,32 +3,32 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-objs := api.o cipher.o compress.o +crypto-y := api.o cipher.o compress.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o crypto_algapi-$(CONFIG_PROC_FS) += proc.o -crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y) +crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o obj-$(CONFIG_CRYPTO_AEAD2) += aead.o -crypto_blkcipher-objs := ablkcipher.o -crypto_blkcipher-objs += blkcipher.o +crypto_blkcipher-y := ablkcipher.o +crypto_blkcipher-y += blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o -crypto_hash-objs += ahash.o -crypto_hash-objs += shash.o +crypto_hash-y += ahash.o +crypto_hash-y += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o -cryptomgr-objs := algboss.o testmgr.o +cryptomgr-y := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o @@ -85,6 +85,9 @@ obj-$(CONFIG_CRYPTO_RNG2) += krng.o obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o +obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o +obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o +obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o # # generic algorithms and the async_tx api diff --git a/crypto/af_alg.c b/crypto/af_alg.c new file mode 100644 index 000000000000..940d70cb5c25 --- /dev/null +++ b/crypto/af_alg.c @@ -0,0 +1,483 @@ +/* + * af_alg: User-space algorithm interface + * + * This file provides the user-space API for algorithms. + * + * Copyright (c) 2010 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct alg_type_list { + const struct af_alg_type *type; + struct list_head list; +}; + +static atomic_long_t alg_memory_allocated; + +static struct proto alg_proto = { + .name = "ALG", + .owner = THIS_MODULE, + .memory_allocated = &alg_memory_allocated, + .obj_size = sizeof(struct alg_sock), +}; + +static LIST_HEAD(alg_types); +static DECLARE_RWSEM(alg_types_sem); + +static const struct af_alg_type *alg_get_type(const char *name) +{ + const struct af_alg_type *type = ERR_PTR(-ENOENT); + struct alg_type_list *node; + + down_read(&alg_types_sem); + list_for_each_entry(node, &alg_types, list) { + if (strcmp(node->type->name, name)) + continue; + + if (try_module_get(node->type->owner)) + type = node->type; + break; + } + up_read(&alg_types_sem); + + return type; +} + +int af_alg_register_type(const struct af_alg_type *type) +{ + struct alg_type_list *node; + int err = -EEXIST; + + down_write(&alg_types_sem); + list_for_each_entry(node, &alg_types, list) { + if (!strcmp(node->type->name, type->name)) + goto unlock; + } + + node = kmalloc(sizeof(*node), GFP_KERNEL); + err = -ENOMEM; + if (!node) + goto unlock; + + type->ops->owner = THIS_MODULE; + node->type = type; + list_add(&node->list, &alg_types); + err = 0; + +unlock: + up_write(&alg_types_sem); + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_register_type); + +int af_alg_unregister_type(const struct af_alg_type *type) +{ + struct alg_type_list *node; + int err = -ENOENT; + + down_write(&alg_types_sem); + list_for_each_entry(node, &alg_types, list) { + if (strcmp(node->type->name, type->name)) + continue; + + list_del(&node->list); + kfree(node); + err = 0; + break; + } + up_write(&alg_types_sem); + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_unregister_type); + +static void alg_do_release(const struct af_alg_type *type, void *private) +{ + if (!type) + return; + + type->release(private); + module_put(type->owner); +} + +int af_alg_release(struct socket *sock) +{ + if (sock->sk) + sock_put(sock->sk); + return 0; +} +EXPORT_SYMBOL_GPL(af_alg_release); + +static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; + + if (sock->state == SS_CONNECTED) + return -EINVAL; + + if (addr_len != sizeof(*sa)) + return -EINVAL; + + sa->salg_type[sizeof(sa->salg_type) - 1] = 0; + sa->salg_name[sizeof(sa->salg_name) - 1] = 0; + + type = alg_get_type(sa->salg_type); + if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) { + request_module("algif-%s", sa->salg_type); + type = alg_get_type(sa->salg_type); + } + + if (IS_ERR(type)) + return PTR_ERR(type); + + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); + if (IS_ERR(private)) { + module_put(type->owner); + return PTR_ERR(private); + } + + lock_sock(sk); + + swap(ask->type, type); + swap(ask->private, private); + + release_sock(sk); + + alg_do_release(type, private); + + return 0; +} + +static int alg_setkey(struct sock *sk, char __user *ukey, + unsigned int keylen) +{ + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type = ask->type; + u8 *key; + int err; + + key = sock_kmalloc(sk, keylen, GFP_KERNEL); + if (!key) + return -ENOMEM; + + err = -EFAULT; + if (copy_from_user(key, ukey, keylen)) + goto out; + + err = type->setkey(ask->private, key, keylen); + +out: + sock_kfree_s(sk, key, keylen); + + return err; +} + +static int alg_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; + int err = -ENOPROTOOPT; + + lock_sock(sk); + type = ask->type; + + if (level != SOL_ALG || !type) + goto unlock; + + switch (optname) { + case ALG_SET_KEY: + if (sock->state == SS_CONNECTED) + goto unlock; + if (!type->setkey) + goto unlock; + + err = alg_setkey(sk, optval, optlen); + } + +unlock: + release_sock(sk); + + return err; +} + +int af_alg_accept(struct sock *sk, struct socket *newsock) +{ + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; + struct sock *sk2; + int err; + + lock_sock(sk); + type = ask->type; + + err = -EINVAL; + if (!type) + goto unlock; + + sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto); + err = -ENOMEM; + if (!sk2) + goto unlock; + + sock_init_data(newsock, sk2); + sock_graft(sk2, newsock); + + err = type->accept(ask->private, sk2); + if (err) { + sk_free(sk2); + goto unlock; + } + + sk2->sk_family = PF_ALG; + + sock_hold(sk); + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; + + newsock->ops = type->ops; + newsock->state = SS_CONNECTED; + + err = 0; + +unlock: + release_sock(sk); + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_accept); + +static int alg_accept(struct socket *sock, struct socket *newsock, int flags) +{ + return af_alg_accept(sock->sk, newsock); +} + +static const struct proto_ops alg_proto_ops = { + .family = PF_ALG, + .owner = THIS_MODULE, + + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .getname = sock_no_getname, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .getsockopt = sock_no_getsockopt, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .poll = sock_no_poll, + + .bind = alg_bind, + .release = af_alg_release, + .setsockopt = alg_setsockopt, + .accept = alg_accept, +}; + +static void alg_sock_destruct(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + + alg_do_release(ask->type, ask->private); +} + +static int alg_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + int err; + + if (sock->type != SOCK_SEQPACKET) + return -ESOCKTNOSUPPORT; + if (protocol != 0) + return -EPROTONOSUPPORT; + + err = -ENOMEM; + sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto); + if (!sk) + goto out; + + sock->ops = &alg_proto_ops; + sock_init_data(sock, sk); + + sk->sk_family = PF_ALG; + sk->sk_destruct = alg_sock_destruct; + + return 0; +out: + return err; +} + +static const struct net_proto_family alg_family = { + .family = PF_ALG, + .create = alg_create, + .owner = THIS_MODULE, +}; + +int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, + int write) +{ + unsigned long from = (unsigned long)addr; + unsigned long npages; + unsigned off; + int err; + int i; + + err = -EFAULT; + if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len)) + goto out; + + off = from & ~PAGE_MASK; + npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (npages > ALG_MAX_PAGES) + npages = ALG_MAX_PAGES; + + err = get_user_pages_fast(from, npages, write, sgl->pages); + if (err < 0) + goto out; + + npages = err; + err = -EINVAL; + if (WARN_ON(npages == 0)) + goto out; + + err = 0; + + sg_init_table(sgl->sg, npages); + + for (i = 0; i < npages; i++) { + int plen = min_t(int, len, PAGE_SIZE - off); + + sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); + + off = 0; + len -= plen; + err += plen; + } + +out: + return err; +} +EXPORT_SYMBOL_GPL(af_alg_make_sg); + +void af_alg_free_sg(struct af_alg_sgl *sgl) +{ + int i; + + i = 0; + do { + put_page(sgl->pages[i]); + } while (!sg_is_last(sgl->sg + (i++))); +} +EXPORT_SYMBOL_GPL(af_alg_free_sg); + +int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) +{ + struct cmsghdr *cmsg; + + for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + if (!CMSG_OK(msg, cmsg)) + return -EINVAL; + if (cmsg->cmsg_level != SOL_ALG) + continue; + + switch(cmsg->cmsg_type) { + case ALG_SET_IV: + if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) + return -EINVAL; + con->iv = (void *)CMSG_DATA(cmsg); + if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen + + sizeof(*con->iv))) + return -EINVAL; + break; + + case ALG_SET_OP: + if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) + return -EINVAL; + con->op = *(u32 *)CMSG_DATA(cmsg); + break; + + default: + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(af_alg_cmsg_send); + +int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) +{ + switch (err) { + case -EINPROGRESS: + case -EBUSY: + wait_for_completion(&completion->completion); + INIT_COMPLETION(completion->completion); + err = completion->err; + break; + }; + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); + +void af_alg_complete(struct crypto_async_request *req, int err) +{ + struct af_alg_completion *completion = req->data; + + completion->err = err; + complete(&completion->completion); +} +EXPORT_SYMBOL_GPL(af_alg_complete); + +static int __init af_alg_init(void) +{ + int err = proto_register(&alg_proto, 0); + + if (err) + goto out; + + err = sock_register(&alg_family); + if (err != 0) + goto out_unregister_proto; + +out: + return err; + +out_unregister_proto: + proto_unregister(&alg_proto); + goto out; +} + +static void __exit af_alg_exit(void) +{ + sock_unregister(PF_ALG); + proto_unregister(&alg_proto); +} + +module_init(af_alg_init); +module_exit(af_alg_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(AF_ALG); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c new file mode 100644 index 000000000000..62122a1a2f7a --- /dev/null +++ b/crypto/algif_hash.c @@ -0,0 +1,319 @@ +/* + * algif_hash: User-space interface for hash algorithms + * + * This file provides the user-space API for hash algorithms. + * + * Copyright (c) 2010 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct hash_ctx { + struct af_alg_sgl sgl; + + u8 *result; + + struct af_alg_completion completion; + + unsigned int len; + bool more; + + struct ahash_request req; +}; + +static int hash_sendmsg(struct kiocb *unused, struct socket *sock, + struct msghdr *msg, size_t ignored) +{ + int limit = ALG_MAX_PAGES * PAGE_SIZE; + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct hash_ctx *ctx = ask->private; + unsigned long iovlen; + struct iovec *iov; + long copied = 0; + int err; + + if (limit > sk->sk_sndbuf) + limit = sk->sk_sndbuf; + + lock_sock(sk); + if (!ctx->more) { + err = crypto_ahash_init(&ctx->req); + if (err) + goto unlock; + } + + ctx->more = 0; + + for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; + iovlen--, iov++) { + unsigned long seglen = iov->iov_len; + char __user *from = iov->iov_base; + + while (seglen) { + int len = min_t(unsigned long, seglen, limit); + int newlen; + + newlen = af_alg_make_sg(&ctx->sgl, from, len, 0); + if (newlen < 0) + goto unlock; + + ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, + newlen); + + err = af_alg_wait_for_completion( + crypto_ahash_update(&ctx->req), + &ctx->completion); + + af_alg_free_sg(&ctx->sgl); + + if (err) + goto unlock; + + seglen -= newlen; + from += newlen; + copied += newlen; + } + } + + err = 0; + + ctx->more = msg->msg_flags & MSG_MORE; + if (!ctx->more) { + ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); + err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), + &ctx->completion); + } + +unlock: + release_sock(sk); + + return err ?: copied; +} + +static ssize_t hash_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct hash_ctx *ctx = ask->private; + int err; + + lock_sock(sk); + sg_init_table(ctx->sgl.sg, 1); + sg_set_page(ctx->sgl.sg, page, size, offset); + + ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); + + if (!(flags & MSG_MORE)) { + if (ctx->more) + err = crypto_ahash_finup(&ctx->req); + else + err = crypto_ahash_digest(&ctx->req); + } else { + if (!ctx->more) { + err = crypto_ahash_init(&ctx->req); + if (err) + goto unlock; + } + + err = crypto_ahash_update(&ctx->req); + } + + err = af_alg_wait_for_completion(err, &ctx->completion); + if (err) + goto unlock; + + ctx->more = flags & MSG_MORE; + +unlock: + release_sock(sk); + + return err ?: size; +} + +static int hash_recvmsg(struct kiocb *unused, struct socket *sock, + struct msghdr *msg, size_t len, int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct hash_ctx *ctx = ask->private; + unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); + int err; + + if (len > ds) + len = ds; + else if (len < ds) + msg->msg_flags |= MSG_TRUNC; + + lock_sock(sk); + if (ctx->more) { + ctx->more = 0; + ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); + err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), + &ctx->completion); + if (err) + goto unlock; + } + + err = memcpy_toiovec(msg->msg_iov, ctx->result, len); + +unlock: + release_sock(sk); + + return err ?: len; +} + +static int hash_accept(struct socket *sock, struct socket *newsock, int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct hash_ctx *ctx = ask->private; + struct ahash_request *req = &ctx->req; + char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))]; + struct sock *sk2; + struct alg_sock *ask2; + struct hash_ctx *ctx2; + int err; + + err = crypto_ahash_export(req, state); + if (err) + return err; + + err = af_alg_accept(ask->parent, newsock); + if (err) + return err; + + sk2 = newsock->sk; + ask2 = alg_sk(sk2); + ctx2 = ask2->private; + ctx2->more = 1; + + err = crypto_ahash_import(&ctx2->req, state); + if (err) { + sock_orphan(sk2); + sock_put(sk2); + } + + return err; +} + +static struct proto_ops algif_hash_ops = { + .family = PF_ALG, + + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .getname = sock_no_getname, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .getsockopt = sock_no_getsockopt, + .mmap = sock_no_mmap, + .bind = sock_no_bind, + .setsockopt = sock_no_setsockopt, + .poll = sock_no_poll, + + .release = af_alg_release, + .sendmsg = hash_sendmsg, + .sendpage = hash_sendpage, + .recvmsg = hash_recvmsg, + .accept = hash_accept, +}; + +static void *hash_bind(const char *name, u32 type, u32 mask) +{ + return crypto_alloc_ahash(name, type, mask); +} + +static void hash_release(void *private) +{ + crypto_free_ahash(private); +} + +static int hash_setkey(void *private, const u8 *key, unsigned int keylen) +{ + return crypto_ahash_setkey(private, key, keylen); +} + +static void hash_sock_destruct(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct hash_ctx *ctx = ask->private; + + sock_kfree_s(sk, ctx->result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); + sock_kfree_s(sk, ctx, ctx->len); + af_alg_release_parent(sk); +} + +static int hash_accept_parent(void *private, struct sock *sk) +{ + struct hash_ctx *ctx; + struct alg_sock *ask = alg_sk(sk); + unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private); + unsigned ds = crypto_ahash_digestsize(private); + + ctx = sock_kmalloc(sk, len, GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); + if (!ctx->result) { + sock_kfree_s(sk, ctx, len); + return -ENOMEM; + } + + memset(ctx->result, 0, ds); + + ctx->len = len; + ctx->more = 0; + af_alg_init_completion(&ctx->completion); + + ask->private = ctx; + + ahash_request_set_tfm(&ctx->req, private); + ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + + sk->sk_destruct = hash_sock_destruct; + + return 0; +} + +static const struct af_alg_type algif_type_hash = { + .bind = hash_bind, + .release = hash_release, + .setkey = hash_setkey, + .accept = hash_accept_parent, + .ops = &algif_hash_ops, + .name = "hash", + .owner = THIS_MODULE +}; + +static int __init algif_hash_init(void) +{ + return af_alg_register_type(&algif_type_hash); +} + +static void __exit algif_hash_exit(void) +{ + int err = af_alg_unregister_type(&algif_type_hash); + BUG_ON(err); +} + +module_init(algif_hash_init); +module_exit(algif_hash_exit); +MODULE_LICENSE("GPL"); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c new file mode 100644 index 000000000000..6a6dfc062d2a --- /dev/null +++ b/crypto/algif_skcipher.c @@ -0,0 +1,632 @@ +/* + * algif_skcipher: User-space interface for skcipher algorithms + * + * This file provides the user-space API for symmetric key ciphers. + * + * Copyright (c) 2010 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct skcipher_sg_list { + struct list_head list; + + int cur; + + struct scatterlist sg[0]; +}; + +struct skcipher_ctx { + struct list_head tsgl; + struct af_alg_sgl rsgl; + + void *iv; + + struct af_alg_completion completion; + + unsigned used; + + unsigned int len; + bool more; + bool merge; + bool enc; + + struct ablkcipher_request req; +}; + +#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \ + sizeof(struct scatterlist) - 1) + +static inline int skcipher_sndbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - + ctx->used, 0); +} + +static inline bool skcipher_writable(struct sock *sk) +{ + return PAGE_SIZE <= skcipher_sndbuf(sk); +} + +static int skcipher_alloc_sgl(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct skcipher_sg_list *sgl; + struct scatterlist *sg = NULL; + + sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); + if (!list_empty(&ctx->tsgl)) + sg = sgl->sg; + + if (!sg || sgl->cur >= MAX_SGL_ENTS) { + sgl = sock_kmalloc(sk, sizeof(*sgl) + + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), + GFP_KERNEL); + if (!sgl) + return -ENOMEM; + + sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); + sgl->cur = 0; + + if (sg) + scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); + + list_add_tail(&sgl->list, &ctx->tsgl); + } + + return 0; +} + +static void skcipher_pull_sgl(struct sock *sk, int used) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct skcipher_sg_list *sgl; + struct scatterlist *sg; + int i; + + while (!list_empty(&ctx->tsgl)) { + sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, + list); + sg = sgl->sg; + + for (i = 0; i < sgl->cur; i++) { + int plen = min_t(int, used, sg[i].length); + + if (!sg_page(sg + i)) + continue; + + sg[i].length -= plen; + sg[i].offset += plen; + + used -= plen; + ctx->used -= plen; + + if (sg[i].length) + return; + + put_page(sg_page(sg + i)); + sg_assign_page(sg + i, NULL); + } + + list_del(&sgl->list); + sock_kfree_s(sk, sgl, + sizeof(*sgl) + sizeof(sgl->sg[0]) * + (MAX_SGL_ENTS + 1)); + } + + if (!ctx->used) + ctx->merge = 0; +} + +static void skcipher_free_sgl(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + + skcipher_pull_sgl(sk, ctx->used); +} + +static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) +{ + long timeout; + DEFINE_WAIT(wait); + int err = -ERESTARTSYS; + + if (flags & MSG_DONTWAIT) + return -EAGAIN; + + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + + for (;;) { + if (signal_pending(current)) + break; + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + timeout = MAX_SCHEDULE_TIMEOUT; + if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { + err = 0; + break; + } + } + finish_wait(sk_sleep(sk), &wait); + + return err; +} + +static void skcipher_wmem_wakeup(struct sock *sk) +{ + struct socket_wq *wq; + + if (!skcipher_writable(sk)) + return; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (wq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLIN | + POLLRDNORM | + POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + rcu_read_unlock(); +} + +static int skcipher_wait_for_data(struct sock *sk, unsigned flags) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + long timeout; + DEFINE_WAIT(wait); + int err = -ERESTARTSYS; + + if (flags & MSG_DONTWAIT) { + return -EAGAIN; + } + + set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + + for (;;) { + if (signal_pending(current)) + break; + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + timeout = MAX_SCHEDULE_TIMEOUT; + if (sk_wait_event(sk, &timeout, ctx->used)) { + err = 0; + break; + } + } + finish_wait(sk_sleep(sk), &wait); + + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + + return err; +} + +static void skcipher_data_wakeup(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct socket_wq *wq; + + if (!ctx->used) + return; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (wq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | + POLLRDNORM | + POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + rcu_read_unlock(); +} + +static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, + struct msghdr *msg, size_t size) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); + unsigned ivsize = crypto_ablkcipher_ivsize(tfm); + struct skcipher_sg_list *sgl; + struct af_alg_control con = {}; + long copied = 0; + bool enc = 0; + int err; + int i; + + if (msg->msg_controllen) { + err = af_alg_cmsg_send(msg, &con); + if (err) + return err; + + switch (con.op) { + case ALG_OP_ENCRYPT: + enc = 1; + break; + case ALG_OP_DECRYPT: + enc = 0; + break; + default: + return -EINVAL; + } + + if (con.iv && con.iv->ivlen != ivsize) + return -EINVAL; + } + + err = -EINVAL; + + lock_sock(sk); + if (!ctx->more && ctx->used) + goto unlock; + + if (!ctx->used) { + ctx->enc = enc; + if (con.iv) + memcpy(ctx->iv, con.iv->iv, ivsize); + } + + while (size) { + struct scatterlist *sg; + unsigned long len = size; + int plen; + + if (ctx->merge) { + sgl = list_entry(ctx->tsgl.prev, + struct skcipher_sg_list, list); + sg = sgl->sg + sgl->cur - 1; + len = min_t(unsigned long, len, + PAGE_SIZE - sg->offset - sg->length); + + err = memcpy_fromiovec(page_address(sg_page(sg)) + + sg->offset + sg->length, + msg->msg_iov, len); + if (err) + goto unlock; + + sg->length += len; + ctx->merge = (sg->offset + sg->length) & + (PAGE_SIZE - 1); + + ctx->used += len; + copied += len; + size -= len; + continue; + } + + if (!skcipher_writable(sk)) { + err = skcipher_wait_for_wmem(sk, msg->msg_flags); + if (err) + goto unlock; + } + + len = min_t(unsigned long, len, skcipher_sndbuf(sk)); + + err = skcipher_alloc_sgl(sk); + if (err) + goto unlock; + + sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); + sg = sgl->sg; + do { + i = sgl->cur; + plen = min_t(int, len, PAGE_SIZE); + + sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); + err = -ENOMEM; + if (!sg_page(sg + i)) + goto unlock; + + err = memcpy_fromiovec(page_address(sg_page(sg + i)), + msg->msg_iov, plen); + if (err) { + __free_page(sg_page(sg + i)); + sg_assign_page(sg + i, NULL); + goto unlock; + } + + sg[i].length = plen; + len -= plen; + ctx->used += plen; + copied += plen; + size -= plen; + sgl->cur++; + } while (len && sgl->cur < MAX_SGL_ENTS); + + ctx->merge = plen & (PAGE_SIZE - 1); + } + + err = 0; + + ctx->more = msg->msg_flags & MSG_MORE; + if (!ctx->more && !list_empty(&ctx->tsgl)) + sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); + +unlock: + skcipher_data_wakeup(sk); + release_sock(sk); + + return copied ?: err; +} + +static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct skcipher_sg_list *sgl; + int err = -EINVAL; + + lock_sock(sk); + if (!ctx->more && ctx->used) + goto unlock; + + if (!size) + goto done; + + if (!skcipher_writable(sk)) { + err = skcipher_wait_for_wmem(sk, flags); + if (err) + goto unlock; + } + + err = skcipher_alloc_sgl(sk); + if (err) + goto unlock; + + ctx->merge = 0; + sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); + + get_page(page); + sg_set_page(sgl->sg + sgl->cur, page, size, offset); + sgl->cur++; + ctx->used += size; + +done: + ctx->more = flags & MSG_MORE; + if (!ctx->more && !list_empty(&ctx->tsgl)) + sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); + +unlock: + skcipher_data_wakeup(sk); + release_sock(sk); + + return err ?: size; +} + +static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, + struct msghdr *msg, size_t ignored, int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( + &ctx->req)); + struct skcipher_sg_list *sgl; + struct scatterlist *sg; + unsigned long iovlen; + struct iovec *iov; + int err = -EAGAIN; + int used; + long copied = 0; + + lock_sock(sk); + for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; + iovlen--, iov++) { + unsigned long seglen = iov->iov_len; + char __user *from = iov->iov_base; + + while (seglen) { + sgl = list_first_entry(&ctx->tsgl, + struct skcipher_sg_list, list); + sg = sgl->sg; + + while (!sg->length) + sg++; + + used = ctx->used; + if (!used) { + err = skcipher_wait_for_data(sk, flags); + if (err) + goto unlock; + } + + used = min_t(unsigned long, used, seglen); + + used = af_alg_make_sg(&ctx->rsgl, from, used, 1); + err = used; + if (err < 0) + goto unlock; + + if (ctx->more || used < ctx->used) + used -= used % bs; + + err = -EINVAL; + if (!used) + goto free; + + ablkcipher_request_set_crypt(&ctx->req, sg, + ctx->rsgl.sg, used, + ctx->iv); + + err = af_alg_wait_for_completion( + ctx->enc ? + crypto_ablkcipher_encrypt(&ctx->req) : + crypto_ablkcipher_decrypt(&ctx->req), + &ctx->completion); + +free: + af_alg_free_sg(&ctx->rsgl); + + if (err) + goto unlock; + + copied += used; + from += used; + seglen -= used; + skcipher_pull_sgl(sk, used); + } + } + + err = 0; + +unlock: + skcipher_wmem_wakeup(sk); + release_sock(sk); + + return copied ?: err; +} + + +static unsigned int skcipher_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + unsigned int mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; + + if (ctx->used) + mask |= POLLIN | POLLRDNORM; + + if (skcipher_writable(sk)) + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + + return mask; +} + +static struct proto_ops algif_skcipher_ops = { + .family = PF_ALG, + + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .getname = sock_no_getname, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .getsockopt = sock_no_getsockopt, + .mmap = sock_no_mmap, + .bind = sock_no_bind, + .accept = sock_no_accept, + .setsockopt = sock_no_setsockopt, + + .release = af_alg_release, + .sendmsg = skcipher_sendmsg, + .sendpage = skcipher_sendpage, + .recvmsg = skcipher_recvmsg, + .poll = skcipher_poll, +}; + +static void *skcipher_bind(const char *name, u32 type, u32 mask) +{ + return crypto_alloc_ablkcipher(name, type, mask); +} + +static void skcipher_release(void *private) +{ + crypto_free_ablkcipher(private); +} + +static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) +{ + return crypto_ablkcipher_setkey(private, key, keylen); +} + +static void skcipher_sock_destruct(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); + + skcipher_free_sgl(sk); + sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); + sock_kfree_s(sk, ctx, ctx->len); + af_alg_release_parent(sk); +} + +static int skcipher_accept_parent(void *private, struct sock *sk) +{ + struct skcipher_ctx *ctx; + struct alg_sock *ask = alg_sk(sk); + unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); + + ctx = sock_kmalloc(sk, len, GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), + GFP_KERNEL); + if (!ctx->iv) { + sock_kfree_s(sk, ctx, len); + return -ENOMEM; + } + + memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); + + INIT_LIST_HEAD(&ctx->tsgl); + ctx->len = len; + ctx->used = 0; + ctx->more = 0; + ctx->merge = 0; + ctx->enc = 0; + af_alg_init_completion(&ctx->completion); + + ask->private = ctx; + + ablkcipher_request_set_tfm(&ctx->req, private); + ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + + sk->sk_destruct = skcipher_sock_destruct; + + return 0; +} + +static const struct af_alg_type algif_type_skcipher = { + .bind = skcipher_bind, + .release = skcipher_release, + .setkey = skcipher_setkey, + .accept = skcipher_accept_parent, + .ops = &algif_skcipher_ops, + .name = "skcipher", + .owner = THIS_MODULE +}; + +static int __init algif_skcipher_init(void) +{ + return af_alg_register_type(&algif_type_skcipher); +} + +static void __exit algif_skcipher_exit(void) +{ + int err = af_alg_unregister_type(&algif_type_skcipher); + BUG_ON(err); +} + +module_init(algif_skcipher_init); +module_exit(algif_skcipher_exit); +MODULE_LICENSE("GPL"); diff --git a/crypto/authenc.c b/crypto/authenc.c index a5a22cfcd07b..5ef7ba6b6a76 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -107,20 +107,6 @@ badkey: goto out; } -static void authenc_chain(struct scatterlist *head, struct scatterlist *sg, - int chain) -{ - if (chain) { - head->length += sg->length; - sg = scatterwalk_sg_next(sg); - } - - if (sg) - scatterwalk_sg_chain(head, 2, sg); - else - sg_mark_end(head); -} - static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, int err) { @@ -345,7 +331,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, if (ivsize) { sg_init_table(cipher, 2); sg_set_buf(cipher, iv, ivsize); - authenc_chain(cipher, dst, vdst == iv + ivsize); + scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); dst = cipher; cryptlen += ivsize; } @@ -354,7 +340,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); - authenc_chain(asg, dst, 0); + scatterwalk_crypto_chain(asg, dst, 0, 2); dst = asg; cryptlen += req->assoclen; } @@ -499,7 +485,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, if (ivsize) { sg_init_table(cipher, 2); sg_set_buf(cipher, iv, ivsize); - authenc_chain(cipher, src, vsrc == iv + ivsize); + scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); src = cipher; cryptlen += ivsize; } @@ -508,7 +494,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); - authenc_chain(asg, src, 0); + scatterwalk_crypto_chain(asg, src, 0, 2); src = asg; cryptlen += req->assoclen; } diff --git a/crypto/cast5.c b/crypto/cast5.c index a1d2294b50ad..4a230ddec877 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c @@ -604,36 +604,23 @@ static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) * Rounds 3, 6, 9, 12, and 15 use f function Type 3. */ + t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); + t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); + t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); + t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); + t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); + t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); + t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); + t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); + t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); + t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); + t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); + t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); if (!(c->rr)) { - t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); - t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); - t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); - t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); - t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); - t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); - t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); - t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); - t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); - t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); - t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); - t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); - } else { - t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); - t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); - t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); - t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); - t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); - t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); - t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); - t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); - t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); - t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); - t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); - t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); } /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and @@ -663,32 +650,19 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); - t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); - t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); - t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); - t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); - t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); - t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); - t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); - t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); - t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); - t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); - t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); - t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); - } else { - t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); - t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); - t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); - t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); - t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); - t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); - t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); - t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); - t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); - t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); - t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); - t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); } + t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); + t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); + t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); + t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); + t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); + t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); + t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); + t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); + t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); + t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); + t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); + t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); dst[0] = cpu_to_be32(r); dst[1] = cpu_to_be32(l); diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c index fdcf6248f152..b980ee1af459 100644 --- a/crypto/crypto_wq.c +++ b/crypto/crypto_wq.c @@ -20,7 +20,8 @@ EXPORT_SYMBOL_GPL(kcrypto_wq); static int __init crypto_wq_init(void) { - kcrypto_wq = create_workqueue("crypto"); + kcrypto_wq = alloc_workqueue("crypto", + WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); if (unlikely(!kcrypto_wq)) return -ENOMEM; return 0; diff --git a/crypto/deflate.c b/crypto/deflate.c index 463dc859aa05..cbc7a33a9600 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -48,12 +48,11 @@ static int deflate_comp_init(struct deflate_ctx *ctx) int ret = 0; struct z_stream_s *stream = &ctx->comp_stream; - stream->workspace = vmalloc(zlib_deflate_workspacesize()); + stream->workspace = vzalloc(zlib_deflate_workspacesize()); if (!stream->workspace) { ret = -ENOMEM; goto out; } - memset(stream->workspace, 0, zlib_deflate_workspacesize()); ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, Z_DEFAULT_STRATEGY); diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c index 3ca3b669d5d5..42ce9f570aec 100644 --- a/crypto/eseqiv.c +++ b/crypto/eseqiv.c @@ -62,20 +62,6 @@ out: skcipher_givcrypt_complete(req, err); } -static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg, - int chain) -{ - if (chain) { - head->length += sg->length; - sg = scatterwalk_sg_next(sg); - } - - if (sg) - scatterwalk_sg_chain(head, 2, sg); - else - sg_mark_end(head); -} - static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); @@ -124,13 +110,13 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) sg_init_table(reqctx->src, 2); sg_set_buf(reqctx->src, giv, ivsize); - eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize); + scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2); dst = reqctx->src; if (osrc != odst) { sg_init_table(reqctx->dst, 2); sg_set_buf(reqctx->dst, giv, ivsize); - eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize); + scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2); dst = reqctx->dst; } diff --git a/crypto/gcm.c b/crypto/gcm.c index 2f5fbba6576c..1a252639ef91 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -1102,21 +1102,6 @@ static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, return crypto_aead_setauthsize(ctx->child, authsize); } -/* this is the same as crypto_authenc_chain */ -static void crypto_rfc4543_chain(struct scatterlist *head, - struct scatterlist *sg, int chain) -{ - if (chain) { - head->length += sg->length; - sg = scatterwalk_sg_next(sg); - } - - if (sg) - scatterwalk_sg_chain(head, 2, sg); - else - sg_mark_end(head); -} - static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, int enc) { @@ -1154,13 +1139,13 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, sg_init_table(payload, 2); sg_set_buf(payload, req->iv, 8); - crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8); + scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); sg_init_table(assoc, 2); sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, req->assoc->offset); - crypto_rfc4543_chain(assoc, payload, 0); + scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 75586f1f86e7..29a89dad68b6 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -455,7 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, get_online_cpus(); - pcrypt->wq = create_workqueue(name); + pcrypt->wq = alloc_workqueue(name, + WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); if (!pcrypt->wq) goto err; diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 1ceb6735aa53..8a0f68b7f257 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c @@ -5,7 +5,7 @@ * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * - * Copyright (c) 2008 Adrian-Ken Rueegsegger + * Copyright (c) 2008 Adrian-Ken Rueegsegger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -325,4 +325,5 @@ module_init(rmd128_mod_init); module_exit(rmd128_mod_fini); MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); diff --git a/crypto/rmd160.c b/crypto/rmd160.c index 472261fc913f..525d7bb752cf 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -5,7 +5,7 @@ * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * - * Copyright (c) 2008 Adrian-Ken Rueegsegger + * Copyright (c) 2008 Adrian-Ken Rueegsegger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -369,4 +369,5 @@ module_init(rmd160_mod_init); module_exit(rmd160_mod_fini); MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); diff --git a/crypto/rmd256.c b/crypto/rmd256.c index 72eafa8d2e7b..69293d9b56e0 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c @@ -5,7 +5,7 @@ * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * - * Copyright (c) 2008 Adrian-Ken Rueegsegger + * Copyright (c) 2008 Adrian-Ken Rueegsegger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -344,4 +344,5 @@ module_init(rmd256_mod_init); module_exit(rmd256_mod_fini); MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); diff --git a/crypto/rmd320.c b/crypto/rmd320.c index 86becaba2f05..09f97dfdfbba 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c @@ -5,7 +5,7 @@ * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * - * Copyright (c) 2008 Adrian-Ken Rueegsegger + * Copyright (c) 2008 Adrian-Ken Rueegsegger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -393,4 +393,5 @@ module_init(rmd320_mod_init); module_exit(rmd320_mod_fini); MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); diff --git a/crypto/shash.c b/crypto/shash.c index 22fd9433141f..76f74b963151 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -310,7 +310,13 @@ static int shash_async_export(struct ahash_request *req, void *out) static int shash_async_import(struct ahash_request *req, const void *in) { - return crypto_shash_import(ahash_request_ctx(req), in); + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + desc->flags = req->base.flags; + + return crypto_shash_import(desc, in); } static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 3ca68f9fc14d..9aac5e58be94 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -8,6 +8,13 @@ * Copyright (c) 2002 Jean-Francois Dive * Copyright (c) 2007 Nokia Siemens Networks * + * Updated RFC4106 AES-GCM testing. + * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) + * Adrian Hoban + * Gabriele Paoloni + * Tadeusz Struk (tadeusz.struk@intel.com) + * Copyright (c) 2010, Intel Corporation. + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) @@ -980,6 +987,10 @@ static int do_test(int m) ret += tcrypt_test("ansi_cprng"); break; + case 151: + ret += tcrypt_test("rfc4106(gcm(aes))"); + break; + case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index fa8c8f78c8d4..27ea9fe9476f 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -6,6 +6,13 @@ * Copyright (c) 2007 Nokia Siemens Networks * Copyright (c) 2008 Herbert Xu * + * Updated RFC4106 AES-GCM testing. + * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) + * Adrian Hoban + * Gabriele Paoloni + * Tadeusz Struk (tadeusz.struk@intel.com) + * Copyright (c) 2010, Intel Corporation. + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) @@ -2242,6 +2249,23 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "rfc4106(gcm(aes))", + .test = alg_test_aead, + .suite = { + .aead = { + .enc = { + .vecs = aes_gcm_rfc4106_enc_tv_template, + .count = AES_GCM_4106_ENC_TEST_VECTORS + }, + .dec = { + .vecs = aes_gcm_rfc4106_dec_tv_template, + .count = AES_GCM_4106_DEC_TEST_VECTORS + } + } + } + }, { + + .alg = "rfc4309(ccm(aes))", .test = alg_test_aead, .fips_allowed = 1, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 74e35377fd30..834af7f2adee 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -6,6 +6,15 @@ * Copyright (c) 2007 Nokia Siemens Networks * Copyright (c) 2008 Herbert Xu * + * Updated RFC4106 AES-GCM testing. Some test vectors were taken from + * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/ + * gcm/gcm-test-vectors.tar.gz + * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) + * Adrian Hoban + * Gabriele Paoloni + * Tadeusz Struk (tadeusz.struk@intel.com) + * Copyright (c) 2010, Intel Corporation. + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) @@ -2947,6 +2956,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { #define AES_CTR_3686_DEC_TEST_VECTORS 6 #define AES_GCM_ENC_TEST_VECTORS 9 #define AES_GCM_DEC_TEST_VECTORS 8 +#define AES_GCM_4106_ENC_TEST_VECTORS 7 +#define AES_GCM_4106_DEC_TEST_VECTORS 7 #define AES_CCM_ENC_TEST_VECTORS 7 #define AES_CCM_DEC_TEST_VECTORS 7 #define AES_CCM_4309_ENC_TEST_VECTORS 7 @@ -5829,6 +5840,356 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { } }; +static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { + { /* Generated using Crypto++ */ + .key = zeroed_string, + .klen = 20, + .iv = zeroed_string, + .input = zeroed_string, + .ilen = 16, + .assoc = zeroed_string, + .alen = 8, + .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" + "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" + "\x97\xFE\x4C\x23\x37\x42\x01\xE0" + "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", + .rlen = 32, + },{ + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00", + .input = zeroed_string, + .ilen = 16, + .assoc = zeroed_string, + .alen = 8, + .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" + "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" + "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" + "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", + .rlen = 32, + + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = zeroed_string, + .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .ilen = 16, + .assoc = zeroed_string, + .alen = 8, + .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" + "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" + "\xB1\x68\xFD\x14\x52\x64\x61\xB2", + .rlen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = zeroed_string, + .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .ilen = 16, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", + .alen = 8, + .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" + "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" + "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", + .rlen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00", + .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .ilen = 16, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", + .alen = 8, + .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" + "\x64\x50\xF9\x32\x13\xFB\x74\x61" + "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", + .rlen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00", + .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .ilen = 64, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", + .alen = 8, + .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" + "\x98\x14\xA1\x42\x37\x80\xFD\x90" + "\x68\x12\x01\xA8\x91\x89\xB9\x83" + "\x5B\x11\x77\x12\x9B\xFF\x24\x89" + "\x94\x5F\x18\x12\xBA\x27\x09\x39" + "\x99\x96\x76\x42\x15\x1C\xCD\xCB" + "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" + "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" + "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", + .rlen = 80, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef" + "\x00\x00\x00\x00", + .input = "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .ilen = 192, + .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa", + .alen = 12, + .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" + "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" + "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" + "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" + "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44" + "\x41\xA9\x82\x6F\x22\xA1\x23\x1A" + "\xA8\xE3\x16\xFD\x31\x5C\x27\x31" + "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1" + "\xCF\x07\x57\x41\x67\xD0\xC4\x42" + "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F" + "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B" + "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE" + "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C" + "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF" + "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59" + "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA" + "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25" + "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B" + "\x7E\x13\x06\x82\x08\x17\xA4\x35" + "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F" + "\xA3\x05\x38\x95\x20\x1A\x47\x04" + "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35" + "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E" + "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" + "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" + "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", + .rlen = 208, + } +}; + +static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { + { /* Generated using Crypto++ */ + .key = zeroed_string, + .klen = 20, + .iv = zeroed_string, + .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" + "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" + "\x97\xFE\x4C\x23\x37\x42\x01\xE0" + "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", + .ilen = 32, + .assoc = zeroed_string, + .alen = 8, + .result = zeroed_string, + .rlen = 16, + + },{ + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00", + .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" + "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" + "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" + "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", + .ilen = 32, + .assoc = zeroed_string, + .alen = 8, + .result = zeroed_string, + .rlen = 16, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = zeroed_string, + .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" + "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" + "\xB1\x68\xFD\x14\x52\x64\x61\xB2", + .ilen = 32, + .assoc = zeroed_string, + .alen = 8, + .result = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .rlen = 16, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = zeroed_string, + .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" + "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" + "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", + .ilen = 32, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", + .alen = 8, + .result = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .rlen = 16, + + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00", + .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" + "\x64\x50\xF9\x32\x13\xFB\x74\x61" + "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", + .ilen = 32, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", + .alen = 8, + .result = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .rlen = 16, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00", + .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" + "\x98\x14\xA1\x42\x37\x80\xFD\x90" + "\x68\x12\x01\xA8\x91\x89\xB9\x83" + "\x5B\x11\x77\x12\x9B\xFF\x24\x89" + "\x94\x5F\x18\x12\xBA\x27\x09\x39" + "\x99\x96\x76\x42\x15\x1C\xCD\xCB" + "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" + "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" + "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", + .ilen = 80, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", + .alen = 8, + .result = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .rlen = 64, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x00\x00\x00\x00", + .klen = 20, + .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef" + "\x00\x00\x00\x00", + .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" + "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" + "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" + "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" + "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44" + "\x41\xA9\x82\x6F\x22\xA1\x23\x1A" + "\xA8\xE3\x16\xFD\x31\x5C\x27\x31" + "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1" + "\xCF\x07\x57\x41\x67\xD0\xC4\x42" + "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F" + "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B" + "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE" + "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C" + "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF" + "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59" + "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA" + "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25" + "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B" + "\x7E\x13\x06\x82\x08\x17\xA4\x35" + "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F" + "\xA3\x05\x38\x95\x20\x1A\x47\x04" + "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35" + "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E" + "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" + "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" + "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", + .ilen = 208, + .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa", + .alen = 12, + .result = "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .rlen = 192, + + } +}; + static struct aead_testvec aes_ccm_enc_tv_template[] = { { /* From RFC 3610 */ .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" diff --git a/crypto/zlib.c b/crypto/zlib.c index c3015733c990..739b8fca4cea 100644 --- a/crypto/zlib.c +++ b/crypto/zlib.c @@ -95,11 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, zlib_comp_exit(ctx); workspacesize = zlib_deflate_workspacesize(); - stream->workspace = vmalloc(workspacesize); + stream->workspace = vzalloc(workspacesize); if (!stream->workspace) return -ENOMEM; - memset(stream->workspace, 0, workspacesize); ret = zlib_deflateInit2(stream, tb[ZLIB_COMP_LEVEL] ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) diff --git a/drivers/Kconfig b/drivers/Kconfig index 3d93b3a3d630..9bfb71ff3a6a 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -26,6 +26,8 @@ source "drivers/ata/Kconfig" source "drivers/md/Kconfig" +source "drivers/target/Kconfig" + source "drivers/message/fusion/Kconfig" source "drivers/firewire/Kconfig" @@ -88,6 +90,8 @@ source "drivers/memstick/Kconfig" source "drivers/leds/Kconfig" +source "drivers/nfc/Kconfig" + source "drivers/accessibility/Kconfig" source "drivers/infiniband/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index bf15ce7493d2..7eb35f479461 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -40,12 +40,13 @@ obj-$(CONFIG_FB_INTEL) += video/intelfb/ obj-y += serial/ obj-$(CONFIG_PARPORT) += parport/ -obj-y += base/ block/ misc/ mfd/ +obj-y += base/ block/ misc/ mfd/ nfc/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ obj-$(CONFIG_SCSI) += scsi/ obj-$(CONFIG_ATA) += ata/ +obj-$(CONFIG_TARGET_CORE) += target/ obj-$(CONFIG_MTD) += mtd/ obj-$(CONFIG_SPI) += spi/ obj-y += net/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 3f3489c5ca8c..10c7ad59c0e1 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -51,12 +51,7 @@ config ACPI_PROCFS For backwards compatibility, this option allows deprecated /proc/acpi/ files to exist, even when they have been replaced by functions in /sys. - The deprecated files (and their replacements) include: - /proc/acpi/processor/*/throttling (/sys/class/thermal/ - cooling_device*/*) - /proc/acpi/video/*/brightness (/sys/class/backlight/) - /proc/acpi/thermal_zone/*/* (/sys/class/thermal/) This option has no effect on /proc/acpi/ files and functions which do not yet exist in /sys. @@ -74,6 +69,8 @@ config ACPI_PROCFS_POWER /proc/acpi/ac_adapter/* (sys/class/power_supply/*) This option has no effect on /proc/acpi/ directories and functions, which do not yet exist in /sys + This option, together with the proc directories, will be + deleted in 2.6.39. Say N to delete power /proc/acpi/ directories that have moved to /sys/ @@ -209,6 +206,17 @@ config ACPI_PROCESSOR To compile this driver as a module, choose M here: the module will be called processor. +config ACPI_IPMI + tristate "IPMI" + depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER + default n + help + This driver enables the ACPI to access the BMC controller. And it + uses the IPMI request/response message to communicate with BMC + controller, which can be found on on the server. + + To compile this driver as a module, choose M here: + the module will be called as acpi_ipmi. config ACPI_HOTPLUG_CPU bool diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 3d031d02e54b..d113fa5100b2 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -24,7 +24,7 @@ acpi-y += atomicio.o # sleep related files acpi-y += wakeup.o acpi-y += sleep.o -acpi-$(CONFIG_ACPI_SLEEP) += proc.o +acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o # @@ -69,5 +69,6 @@ processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o +obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o obj-$(CONFIG_ACPI_APEI) += apei/ diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 25d3aaebc10d..58c3f74bd84c 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -197,7 +197,8 @@ static int acpi_ac_add_fs(struct acpi_device *device) { struct proc_dir_entry *entry = NULL; - + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); if (!acpi_device_dir(device)) { acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_ac_dir); diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c new file mode 100644 index 000000000000..f40acef80269 --- /dev/null +++ b/drivers/acpi/acpi_ipmi.c @@ -0,0 +1,525 @@ +/* + * acpi_ipmi.c - ACPI IPMI opregion + * + * Copyright (C) 2010 Intel Corporation + * Copyright (C) 2010 Zhao Yakui + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Zhao Yakui"); +MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); +MODULE_LICENSE("GPL"); + +#define IPMI_FLAGS_HANDLER_INSTALL 0 + +#define ACPI_IPMI_OK 0 +#define ACPI_IPMI_TIMEOUT 0x10 +#define ACPI_IPMI_UNKNOWN 0x07 +/* the IPMI timeout is 5s */ +#define IPMI_TIMEOUT (5 * HZ) + +struct acpi_ipmi_device { + /* the device list attached to driver_data.ipmi_devices */ + struct list_head head; + /* the IPMI request message list */ + struct list_head tx_msg_list; + struct mutex tx_msg_lock; + acpi_handle handle; + struct pnp_dev *pnp_dev; + ipmi_user_t user_interface; + int ipmi_ifnum; /* IPMI interface number */ + long curr_msgid; + unsigned long flags; + struct ipmi_smi_info smi_data; +}; + +struct ipmi_driver_data { + struct list_head ipmi_devices; + struct ipmi_smi_watcher bmc_events; + struct ipmi_user_hndl ipmi_hndlrs; + struct mutex ipmi_lock; +}; + +struct acpi_ipmi_msg { + struct list_head head; + /* + * General speaking the addr type should be SI_ADDR_TYPE. And + * the addr channel should be BMC. + * In fact it can also be IPMB type. But we will have to + * parse it from the Netfn command buffer. It is so complex + * that it is skipped. + */ + struct ipmi_addr addr; + long tx_msgid; + /* it is used to track whether the IPMI message is finished */ + struct completion tx_complete; + struct kernel_ipmi_msg tx_message; + int msg_done; + /* tx data . And copy it from ACPI object buffer */ + u8 tx_data[64]; + int tx_len; + u8 rx_data[64]; + int rx_len; + struct acpi_ipmi_device *device; +}; + +/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ +struct acpi_ipmi_buffer { + u8 status; + u8 length; + u8 data[64]; +}; + +static void ipmi_register_bmc(int iface, struct device *dev); +static void ipmi_bmc_gone(int iface); +static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); +static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device); +static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device); + +static struct ipmi_driver_data driver_data = { + .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), + .bmc_events = { + .owner = THIS_MODULE, + .new_smi = ipmi_register_bmc, + .smi_gone = ipmi_bmc_gone, + }, + .ipmi_hndlrs = { + .ipmi_recv_hndl = ipmi_msg_handler, + }, +}; + +static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) +{ + struct acpi_ipmi_msg *ipmi_msg; + struct pnp_dev *pnp_dev = ipmi->pnp_dev; + + ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); + if (!ipmi_msg) { + dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n"); + return NULL; + } + init_completion(&ipmi_msg->tx_complete); + INIT_LIST_HEAD(&ipmi_msg->head); + ipmi_msg->device = ipmi; + return ipmi_msg; +} + +#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) +#define IPMI_OP_RGN_CMD(offset) (offset & 0xff) +static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, + acpi_physical_address address, + acpi_integer *value) +{ + struct kernel_ipmi_msg *msg; + struct acpi_ipmi_buffer *buffer; + struct acpi_ipmi_device *device; + + msg = &tx_msg->tx_message; + /* + * IPMI network function and command are encoded in the address + * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3. + */ + msg->netfn = IPMI_OP_RGN_NETFN(address); + msg->cmd = IPMI_OP_RGN_CMD(address); + msg->data = tx_msg->tx_data; + /* + * value is the parameter passed by the IPMI opregion space handler. + * It points to the IPMI request message buffer + */ + buffer = (struct acpi_ipmi_buffer *)value; + /* copy the tx message data */ + msg->data_len = buffer->length; + memcpy(tx_msg->tx_data, buffer->data, msg->data_len); + /* + * now the default type is SYSTEM_INTERFACE and channel type is BMC. + * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE, + * the addr type should be changed to IPMB. Then we will have to parse + * the IPMI request message buffer to get the IPMB address. + * If so, please fix me. + */ + tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + tx_msg->addr.channel = IPMI_BMC_CHANNEL; + tx_msg->addr.data[0] = 0; + + /* Get the msgid */ + device = tx_msg->device; + mutex_lock(&device->tx_msg_lock); + device->curr_msgid++; + tx_msg->tx_msgid = device->curr_msgid; + mutex_unlock(&device->tx_msg_lock); +} + +static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, + acpi_integer *value, int rem_time) +{ + struct acpi_ipmi_buffer *buffer; + + /* + * value is also used as output parameter. It represents the response + * IPMI message returned by IPMI command. + */ + buffer = (struct acpi_ipmi_buffer *)value; + if (!rem_time && !msg->msg_done) { + buffer->status = ACPI_IPMI_TIMEOUT; + return; + } + /* + * If the flag of msg_done is not set or the recv length is zero, it + * means that the IPMI command is not executed correctly. + * The status code will be ACPI_IPMI_UNKNOWN. + */ + if (!msg->msg_done || !msg->rx_len) { + buffer->status = ACPI_IPMI_UNKNOWN; + return; + } + /* + * If the IPMI response message is obtained correctly, the status code + * will be ACPI_IPMI_OK + */ + buffer->status = ACPI_IPMI_OK; + buffer->length = msg->rx_len; + memcpy(buffer->data, msg->rx_data, msg->rx_len); +} + +static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) +{ + struct acpi_ipmi_msg *tx_msg, *temp; + int count = HZ / 10; + struct pnp_dev *pnp_dev = ipmi->pnp_dev; + + list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { + /* wake up the sleep thread on the Tx msg */ + complete(&tx_msg->tx_complete); + } + + /* wait for about 100ms to flush the tx message list */ + while (count--) { + if (list_empty(&ipmi->tx_msg_list)) + break; + schedule_timeout(1); + } + if (!list_empty(&ipmi->tx_msg_list)) + dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n"); +} + +static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) +{ + struct acpi_ipmi_device *ipmi_device = user_msg_data; + int msg_found = 0; + struct acpi_ipmi_msg *tx_msg; + struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; + + if (msg->user != ipmi_device->user_interface) { + dev_warn(&pnp_dev->dev, "Unexpected response is returned. " + "returned user %p, expected user %p\n", + msg->user, ipmi_device->user_interface); + ipmi_free_recv_msg(msg); + return; + } + mutex_lock(&ipmi_device->tx_msg_lock); + list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { + if (msg->msgid == tx_msg->tx_msgid) { + msg_found = 1; + break; + } + } + + mutex_unlock(&ipmi_device->tx_msg_lock); + if (!msg_found) { + dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " + "returned.\n", msg->msgid); + ipmi_free_recv_msg(msg); + return; + } + + if (msg->msg.data_len) { + /* copy the response data to Rx_data buffer */ + memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len); + tx_msg->rx_len = msg->msg.data_len; + tx_msg->msg_done = 1; + } + complete(&tx_msg->tx_complete); + ipmi_free_recv_msg(msg); +}; + +static void ipmi_register_bmc(int iface, struct device *dev) +{ + struct acpi_ipmi_device *ipmi_device, *temp; + struct pnp_dev *pnp_dev; + ipmi_user_t user; + int err; + struct ipmi_smi_info smi_data; + acpi_handle handle; + + err = ipmi_get_smi_info(iface, &smi_data); + + if (err) + return; + + if (smi_data.addr_src != SI_ACPI) { + put_device(smi_data.dev); + return; + } + + handle = smi_data.addr_info.acpi_info.acpi_handle; + + mutex_lock(&driver_data.ipmi_lock); + list_for_each_entry(temp, &driver_data.ipmi_devices, head) { + /* + * if the corresponding ACPI handle is already added + * to the device list, don't add it again. + */ + if (temp->handle == handle) + goto out; + } + + ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); + + if (!ipmi_device) + goto out; + + pnp_dev = to_pnp_dev(smi_data.dev); + ipmi_device->handle = handle; + ipmi_device->pnp_dev = pnp_dev; + + err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs, + ipmi_device, &user); + if (err) { + dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n"); + kfree(ipmi_device); + goto out; + } + acpi_add_ipmi_device(ipmi_device); + ipmi_device->user_interface = user; + ipmi_device->ipmi_ifnum = iface; + mutex_unlock(&driver_data.ipmi_lock); + memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info)); + return; + +out: + mutex_unlock(&driver_data.ipmi_lock); + put_device(smi_data.dev); + return; +} + +static void ipmi_bmc_gone(int iface) +{ + struct acpi_ipmi_device *ipmi_device, *temp; + + mutex_lock(&driver_data.ipmi_lock); + list_for_each_entry_safe(ipmi_device, temp, + &driver_data.ipmi_devices, head) { + if (ipmi_device->ipmi_ifnum != iface) + continue; + + acpi_remove_ipmi_device(ipmi_device); + put_device(ipmi_device->smi_data.dev); + kfree(ipmi_device); + break; + } + mutex_unlock(&driver_data.ipmi_lock); +} +/* -------------------------------------------------------------------------- + * Address Space Management + * -------------------------------------------------------------------------- */ +/* + * This is the IPMI opregion space handler. + * @function: indicates the read/write. In fact as the IPMI message is driven + * by command, only write is meaningful. + * @address: This contains the netfn/command of IPMI request message. + * @bits : not used. + * @value : it is an in/out parameter. It points to the IPMI message buffer. + * Before the IPMI message is sent, it represents the actual request + * IPMI message. After the IPMI message is finished, it represents + * the response IPMI message returned by IPMI command. + * @handler_context: IPMI device context. + */ + +static acpi_status +acpi_ipmi_space_handler(u32 function, acpi_physical_address address, + u32 bits, acpi_integer *value, + void *handler_context, void *region_context) +{ + struct acpi_ipmi_msg *tx_msg; + struct acpi_ipmi_device *ipmi_device = handler_context; + int err, rem_time; + acpi_status status; + /* + * IPMI opregion message. + * IPMI message is firstly written to the BMC and system software + * can get the respsonse. So it is unmeaningful for the read access + * of IPMI opregion. + */ + if ((function & ACPI_IO_MASK) == ACPI_READ) + return AE_TYPE; + + if (!ipmi_device->user_interface) + return AE_NOT_EXIST; + + tx_msg = acpi_alloc_ipmi_msg(ipmi_device); + if (!tx_msg) + return AE_NO_MEMORY; + + acpi_format_ipmi_msg(tx_msg, address, value); + mutex_lock(&ipmi_device->tx_msg_lock); + list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); + mutex_unlock(&ipmi_device->tx_msg_lock); + err = ipmi_request_settime(ipmi_device->user_interface, + &tx_msg->addr, + tx_msg->tx_msgid, + &tx_msg->tx_message, + NULL, 0, 0, 0); + if (err) { + status = AE_ERROR; + goto end_label; + } + rem_time = wait_for_completion_timeout(&tx_msg->tx_complete, + IPMI_TIMEOUT); + acpi_format_ipmi_response(tx_msg, value, rem_time); + status = AE_OK; + +end_label: + mutex_lock(&ipmi_device->tx_msg_lock); + list_del(&tx_msg->head); + mutex_unlock(&ipmi_device->tx_msg_lock); + kfree(tx_msg); + return status; +} + +static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi) +{ + if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) + return; + + acpi_remove_address_space_handler(ipmi->handle, + ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler); + + clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); +} + +static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi) +{ + acpi_status status; + + if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) + return 0; + + status = acpi_install_address_space_handler(ipmi->handle, + ACPI_ADR_SPACE_IPMI, + &acpi_ipmi_space_handler, + NULL, ipmi); + if (ACPI_FAILURE(status)) { + struct pnp_dev *pnp_dev = ipmi->pnp_dev; + dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space " + "handle\n"); + return -EINVAL; + } + set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); + return 0; +} + +static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) +{ + + INIT_LIST_HEAD(&ipmi_device->head); + + mutex_init(&ipmi_device->tx_msg_lock); + INIT_LIST_HEAD(&ipmi_device->tx_msg_list); + ipmi_install_space_handler(ipmi_device); + + list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); +} + +static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device) +{ + /* + * If the IPMI user interface is created, it should be + * destroyed. + */ + if (ipmi_device->user_interface) { + ipmi_destroy_user(ipmi_device->user_interface); + ipmi_device->user_interface = NULL; + } + /* flush the Tx_msg list */ + if (!list_empty(&ipmi_device->tx_msg_list)) + ipmi_flush_tx_msg(ipmi_device); + + list_del(&ipmi_device->head); + ipmi_remove_space_handler(ipmi_device); +} + +static int __init acpi_ipmi_init(void) +{ + int result = 0; + + if (acpi_disabled) + return result; + + mutex_init(&driver_data.ipmi_lock); + + result = ipmi_smi_watcher_register(&driver_data.bmc_events); + + return result; +} + +static void __exit acpi_ipmi_exit(void) +{ + struct acpi_ipmi_device *ipmi_device, *temp; + + if (acpi_disabled) + return; + + ipmi_smi_watcher_unregister(&driver_data.bmc_events); + + /* + * When one smi_watcher is unregistered, it is only deleted + * from the smi_watcher list. But the smi_gone callback function + * is not called. So explicitly uninstall the ACPI IPMI oregion + * handler and free it. + */ + mutex_lock(&driver_data.ipmi_lock); + list_for_each_entry_safe(ipmi_device, temp, + &driver_data.ipmi_devices, head) { + acpi_remove_ipmi_device(ipmi_device); + put_device(ipmi_device->smi_data.dev); + kfree(ipmi_device); + } + mutex_unlock(&driver_data.ipmi_lock); +} + +module_init(acpi_ipmi_init); +module_exit(acpi_ipmi_exit); diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index a7e1d1aa4107..eec2eadd2431 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ evmisc.o evrgnini.o evxface.o evxfregn.o \ - evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o + evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index a6f99cc37a19..70e0b28801aa 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -51,8 +51,6 @@ acpi_status acpi_ev_initialize_events(void); acpi_status acpi_ev_install_xrupt_handlers(void); -acpi_status acpi_ev_install_fadt_gpes(void); - u32 acpi_ev_fixed_event_detect(void); /* @@ -82,9 +80,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); -acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); +acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); -acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); +acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number); @@ -93,6 +91,8 @@ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, struct acpi_gpe_block_info *gpe_block); +acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info); + /* * evgpeblk - Upper-level GPE block support */ @@ -107,12 +107,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, acpi_status acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, - void *ignored); + void *context); acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); u32 -acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, +acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, + struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number); /* @@ -126,10 +127,6 @@ acpi_status acpi_ev_match_gpe_method(acpi_handle obj_handle, u32 level, void *context, void **return_value); -acpi_status -acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, - u32 level, void *context, void **return_value); - /* * evgpeutil - GPE utilities */ @@ -138,6 +135,10 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); +acpi_status +acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block, void *context); + struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index ad88fcae4eb9..0e4dba0d0325 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -146,6 +146,9 @@ u8 acpi_gbl_system_awake_and_running; extern u32 acpi_gbl_nesting_level; +ACPI_EXTERN u32 acpi_gpe_count; +ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; + /* Support for dynamic control method tracing mechanism */ ACPI_EXTERN u32 acpi_gbl_original_dbg_level; @@ -225,8 +228,10 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_present; */ ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ +ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */ #define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock #define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock +#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock /***************************************************************************** * @@ -370,7 +375,9 @@ ACPI_EXTERN struct acpi_fixed_event_handler ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; ACPI_EXTERN struct acpi_gpe_block_info *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; -ACPI_EXTERN u8 acpi_all_gpes_initialized; +ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; +ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; +ACPI_EXTERN void *acpi_gbl_global_event_handler_context; /***************************************************************************** * diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 167470ad2d21..258d628793ea 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -94,7 +94,7 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, struct acpi_gpe_register_info *gpe_register_info); acpi_status -acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); +acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action); acpi_status acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 2ceb0c05b2d7..74000f5b7dab 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -408,17 +408,18 @@ struct acpi_predefined_data { /* Dispatch info for each GPE -- either a method or handler, cannot be both */ -struct acpi_handler_info { - acpi_event_handler address; /* Address of handler, if any */ +struct acpi_gpe_handler_info { + acpi_gpe_handler address; /* Address of handler, if any */ void *context; /* Context to be passed to handler */ struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ - u8 orig_flags; /* Original misc info about this GPE */ - u8 orig_enabled; /* Set if the GPE was originally enabled */ + u8 original_flags; /* Original (pre-handler) GPE info */ + u8 originally_enabled; /* True if GPE was originally enabled */ }; union acpi_gpe_dispatch_info { struct acpi_namespace_node *method_node; /* Method node for this GPE level */ - struct acpi_handler_info *handler; + struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ + struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ }; /* @@ -458,7 +459,7 @@ struct acpi_gpe_block_info { u32 register_count; /* Number of register pairs in block */ u16 gpe_count; /* Number of individual GPEs in block */ u8 block_base_number; /* Base GPE number for this block */ - u8 initialized; /* If set, the GPE block has been initialized */ + u8 initialized; /* TRUE if this block is initialized */ }; /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index bdbfaf22bd14..962a3ccff6fd 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -93,7 +93,7 @@ #define AOPOBJ_AML_CONSTANT 0x01 /* Integer is an AML constant */ #define AOPOBJ_STATIC_POINTER 0x02 /* Data is part of an ACPI table, don't delete */ -#define AOPOBJ_DATA_VALID 0x04 /* Object is intialized and data is valid */ +#define AOPOBJ_DATA_VALID 0x04 /* Object is initialized and data is valid */ #define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */ #define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */ #define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */ diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index c61c3039c31a..e5e313c663a5 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -217,9 +217,17 @@ u32 acpi_ev_fixed_event_detect(void) status_bit_mask) && (fixed_enable & acpi_gbl_fixed_event_info[i]. enable_bit_mask)) { + /* + * Found an active (signalled) event. Invoke global event + * handler if present. + */ + acpi_fixed_event_count[i]++; + if (acpi_gbl_global_event_handler) { + acpi_gbl_global_event_handler + (ACPI_EVENT_TYPE_FIXED, NULL, i, + acpi_gbl_global_event_handler_context); + } - /* Found an active (signalled) event */ - acpi_os_fixed_event_count(i); int_status |= acpi_ev_fixed_event_dispatch(i); } } diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index f226eac314db..7c339d34ab42 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -52,6 +52,8 @@ ACPI_MODULE_NAME("evgpe") /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); +static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); + /******************************************************************************* * * FUNCTION: acpi_ev_update_gpe_enable_mask @@ -102,7 +104,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) * * RETURN: Status * - * DESCRIPTION: Clear the given GPE from stale events and enable it. + * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ acpi_status @@ -113,12 +115,13 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) ACPI_FUNCTION_TRACE(ev_enable_gpe); /* - * We will only allow a GPE to be enabled if it has either an - * associated method (_Lxx/_Exx) or a handler. Otherwise, the - * GPE will be immediately disabled by acpi_ev_gpe_dispatch the - * first time it fires. + * We will only allow a GPE to be enabled if it has either an associated + * method (_Lxx/_Exx) or a handler, or is using the implicit notify + * feature. Otherwise, the GPE will be immediately disabled by + * acpi_ev_gpe_dispatch the first time it fires. */ - if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == + ACPI_GPE_DISPATCH_NONE) { return_ACPI_STATUS(AE_NO_HANDLER); } @@ -137,9 +140,9 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) /******************************************************************************* * - * FUNCTION: acpi_raw_enable_gpe + * FUNCTION: acpi_ev_add_gpe_reference * - * PARAMETERS: gpe_event_info - GPE to enable + * PARAMETERS: gpe_event_info - Add a reference to this GPE * * RETURN: Status * @@ -148,16 +151,21 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) * ******************************************************************************/ -acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) +acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; + ACPI_FUNCTION_TRACE(ev_add_gpe_reference); + if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count++; if (gpe_event_info->runtime_count == 1) { + + /* Enable on first reference */ + status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_ev_enable_gpe(gpe_event_info); @@ -173,9 +181,9 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) /******************************************************************************* * - * FUNCTION: acpi_raw_disable_gpe + * FUNCTION: acpi_ev_remove_gpe_reference * - * PARAMETERS: gpe_event_info - GPE to disable + * PARAMETERS: gpe_event_info - Remove a reference to this GPE * * RETURN: Status * @@ -184,16 +192,21 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) * ******************************************************************************/ -acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) +acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; + ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); + if (!gpe_event_info->runtime_count) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { + + /* Disable on last reference */ + status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_hw_low_set_gpe(gpe_event_info, @@ -379,7 +392,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, - "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", + "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", gpe_register_info->base_gpe_number, status_reg, enable_reg)); @@ -405,7 +418,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) * or method. */ int_status |= - acpi_ev_gpe_dispatch(&gpe_block-> + acpi_ev_gpe_dispatch(gpe_block-> + node, + &gpe_block-> event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } @@ -435,17 +450,25 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) * an interrupt handler. * ******************************************************************************/ -static void acpi_ev_asynch_enable_gpe(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) { - struct acpi_gpe_event_info *gpe_event_info = (void *)context; + struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; - struct acpi_gpe_event_info local_gpe_event_info; + struct acpi_gpe_event_info *local_gpe_event_info; struct acpi_evaluate_info *info; ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); + /* Allocate a local GPE block */ + + local_gpe_event_info = + ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); + if (!local_gpe_event_info) { + ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); + return_VOID; + } + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_VOID; @@ -462,7 +485,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) * Take a snapshot of the GPE info for this level - we copy the info to * prevent a race condition with remove_handler/remove_block. */ - ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, + ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, sizeof(struct acpi_gpe_event_info)); status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); @@ -470,12 +493,26 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) return_VOID; } - /* - * Must check for control method type dispatch one more time to avoid a - * race with ev_gpe_install_handler - */ - if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == - ACPI_GPE_DISPATCH_METHOD) { + /* Do the correct dispatch - normal method or implicit notify */ + + switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { + case ACPI_GPE_DISPATCH_NOTIFY: + + /* + * Implicit notify. + * Dispatch a DEVICE_WAKE notify to the appropriate handler. + * NOTE: the request is queued for execution after this method + * completes. The notify handlers are NOT invoked synchronously + * from this thread -- because handlers may in turn run other + * control methods. + */ + status = + acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. + device_node, + ACPI_NOTIFY_DEVICE_WAKE); + break; + + case ACPI_GPE_DISPATCH_METHOD: /* Allocate the evaluation information block */ @@ -488,7 +525,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) * control method that corresponds to this GPE */ info->prefix_node = - local_gpe_event_info.dispatch.method_node; + local_gpe_event_info->dispatch.method_node; info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info); @@ -499,46 +536,98 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) ACPI_EXCEPTION((AE_INFO, status, "while evaluating GPE method [%4.4s]", acpi_ut_get_node_name - (local_gpe_event_info.dispatch. + (local_gpe_event_info->dispatch. method_node))); } + + break; + + default: + return_VOID; /* Should never happen */ } + /* Defer enabling of GPE until all notify handlers are done */ - acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, - gpe_event_info); + + status = acpi_os_execute(OSL_NOTIFY_HANDLER, + acpi_ev_asynch_enable_gpe, + local_gpe_event_info); + if (ACPI_FAILURE(status)) { + ACPI_FREE(local_gpe_event_info); + } return_VOID; } -static void acpi_ev_asynch_enable_gpe(void *context) + +/******************************************************************************* + * + * FUNCTION: acpi_ev_asynch_enable_gpe + * + * PARAMETERS: Context (gpe_event_info) - Info for this GPE + * Callback from acpi_os_execute + * + * RETURN: None + * + * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to + * complete (i.e., finish execution of Notify) + * + ******************************************************************************/ + +static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; + + (void)acpi_ev_finish_gpe(gpe_event_info); + + ACPI_FREE(gpe_event_info); + return; +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_finish_gpe + * + * PARAMETERS: gpe_event_info - Info for this GPE + * + * RETURN: Status + * + * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution + * of a GPE method or a synchronous or asynchronous GPE handler. + * + ******************************************************************************/ + +acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) +{ acpi_status status; + if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* - * GPE is level-triggered, we clear the GPE status bit after handling - * the event. + * GPE is level-triggered, we clear the GPE status bit after + * handling the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { - return_VOID; + return (status); } } /* - * Enable this GPE, conditionally. This means that the GPE will only be - * physically enabled if the enable_for_run bit is set in the event_info + * Enable this GPE, conditionally. This means that the GPE will + * only be physically enabled if the enable_for_run bit is set + * in the event_info. */ - (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); - - return_VOID; + (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); + return (AE_OK); } + /******************************************************************************* * * FUNCTION: acpi_ev_gpe_dispatch * - * PARAMETERS: gpe_event_info - Info for this GPE + * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 + * gpe_event_info - Info for this GPE * gpe_number - Number relative to the parent GPE block * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED @@ -551,13 +640,22 @@ static void acpi_ev_asynch_enable_gpe(void *context) ******************************************************************************/ u32 -acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) +acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, + struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; + u32 return_value; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); - acpi_os_gpe_count(gpe_number); + /* Invoke global event handler if present */ + + acpi_gpe_count++; + if (acpi_gbl_global_event_handler) { + acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, + gpe_number, + acpi_gbl_global_event_handler_context); + } /* * If edge-triggered, clear the GPE status bit now. Note that @@ -568,59 +666,55 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, - "Unable to clear GPE[0x%2X]", - gpe_number)); + "Unable to clear GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* - * Dispatch the GPE to either an installed handler, or the control method - * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke - * it and do not attempt to run the method. If there is neither a handler - * nor a method, we disable this GPE to prevent further such pointless - * events from firing. + * Always disable the GPE so that it does not keep firing before + * any asynchronous activity completes (either from the execution + * of a GPE method or an asynchronous GPE handler.) + * + * If there is no handler or method to run, just disable the + * GPE and leave it disabled permanently to prevent further such + * pointless events from firing. + */ + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "Unable to disable GPE%02X", gpe_number)); + return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); + } + + /* + * Dispatch the GPE to either an installed handler or the control + * method associated with this GPE (_Lxx or _Exx). If a handler + * exists, we invoke it and do not attempt to run the method. + * If there is neither a handler nor a method, leave the GPE + * disabled. */ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: - /* - * Invoke the installed handler (at interrupt level) - * Ignore return status for now. - * TBD: leave GPE disabled on error? - */ - (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> - dispatch. - handler-> - context); + /* Invoke the installed handler (at interrupt level) */ - /* It is now safe to clear level-triggered events. */ + return_value = + gpe_event_info->dispatch.handler->address(gpe_device, + gpe_number, + gpe_event_info-> + dispatch.handler-> + context); - if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == - ACPI_GPE_LEVEL_TRIGGERED) { - status = acpi_hw_clear_gpe(gpe_event_info); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Unable to clear GPE[0x%2X]", - gpe_number)); - return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); - } + /* If requested, clear (if level-triggered) and reenable the GPE */ + + if (return_value & ACPI_REENABLE_GPE) { + (void)acpi_ev_finish_gpe(gpe_event_info); } break; case ACPI_GPE_DISPATCH_METHOD: - - /* - * Disable the GPE, so it doesn't keep firing before the method has a - * chance to run (it runs asynchronously with interrupts enabled). - */ - status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Unable to disable GPE[0x%2X]", - gpe_number)); - return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); - } + case ACPI_GPE_DISPATCH_NOTIFY: /* * Execute the method associated with the GPE @@ -631,7 +725,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, - "Unable to queue handler for GPE[0x%2X] - event disabled", + "Unable to queue handler for GPE%2X - event disabled", gpe_number)); } break; @@ -644,20 +738,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) * a GPE to be enabled if it has no handler or method. */ ACPI_ERROR((AE_INFO, - "No handler or method for GPE[0x%2X], disabling event", + "No handler or method for GPE%02X, disabling event", gpe_number)); - /* - * Disable the GPE. The GPE will remain disabled a handler - * is installed or ACPICA is restarted. - */ - status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Unable to disable GPE[0x%2X]", - gpe_number)); - return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); - } break; } diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 020add3eee1c..9acb86958c09 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -361,9 +361,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, gpe_block->node = gpe_device; gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); + gpe_block->initialized = FALSE; gpe_block->register_count = register_count; gpe_block->block_base_number = gpe_block_base_number; - gpe_block->initialized = FALSE; ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, sizeof(struct acpi_generic_address)); @@ -386,7 +386,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, return_ACPI_STATUS(status); } - acpi_all_gpes_initialized = FALSE; + acpi_gbl_all_gpes_initialized = FALSE; /* Find all GPE methods (_Lxx or_Exx) for this block */ @@ -423,14 +423,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, * * FUNCTION: acpi_ev_initialize_gpe_block * - * PARAMETERS: gpe_device - Handle to the parent GPE block - * gpe_block - Gpe Block info + * PARAMETERS: acpi_gpe_callback * * RETURN: Status * - * DESCRIPTION: Initialize and enable a GPE block. First find and run any - * _PRT methods associated with the block, then enable the - * appropriate GPEs. + * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have + * associated methods. * Note: Assumes namespace is locked. * ******************************************************************************/ @@ -450,8 +448,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); /* - * Ignore a null GPE block (e.g., if no GPE block 1 exists) and - * GPE blocks that have been initialized already. + * Ignore a null GPE block (e.g., if no GPE block 1 exists), and + * any GPE blocks that have been initialized already. */ if (!gpe_block || gpe_block->initialized) { return_ACPI_STATUS(AE_OK); @@ -459,8 +457,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* * Enable all GPEs that have a corresponding method and have the - * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must - * be enabled via the acpi_enable_gpe() interface. + * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block + * must be enabled via the acpi_enable_gpe() interface. */ gpe_enabled_count = 0; @@ -472,14 +470,19 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; gpe_event_info = &gpe_block->event_info[gpe_index]; - /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ - - if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) + /* + * Ignore GPEs that have no corresponding _Lxx/_Exx method + * and GPEs that are used to wake the system + */ + if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == + ACPI_GPE_DISPATCH_NONE) + || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) + == ACPI_GPE_DISPATCH_HANDLER) || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { continue; } - status = acpi_raw_enable_gpe(gpe_event_info); + status = acpi_ev_add_gpe_reference(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not enable GPE 0x%02X", diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 4c8dea513b66..c59dc2340593 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -45,11 +45,27 @@ #include "accommon.h" #include "acevents.h" #include "acnamesp.h" -#include "acinterp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpeinit") +/* + * Note: History of _PRW support in ACPICA + * + * Originally (2000 - 2010), the GPE initialization code performed a walk of + * the entire namespace to execute the _PRW methods and detect all GPEs + * capable of waking the system. + * + * As of 10/2010, the _PRW method execution has been removed since it is + * actually unnecessary. The host OS must in fact execute all _PRW methods + * in order to identify the device/power-resource dependencies. We now put + * the onus on the host OS to identify the wake GPEs as part of this process + * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This + * not only reduces the complexity of the ACPICA initialization code, but in + * some cases (on systems with very large namespaces) it should reduce the + * kernel boot time as well. + */ + /******************************************************************************* * * FUNCTION: acpi_ev_gpe_initialize @@ -222,7 +238,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) acpi_status status = AE_OK; /* - * 2) Find any _Lxx/_Exx GPE methods that have just been loaded. + * Find any _Lxx/_Exx GPE methods that have just been loaded. * * Any GPEs that correspond to new _Lxx/_Exx methods are immediately * enabled. @@ -235,9 +251,9 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) return; } + walk_info.count = 0; walk_info.owner_id = table_owner_id; walk_info.execute_by_owner_id = TRUE; - walk_info.count = 0; /* Walk the interrupt level descriptor list */ @@ -298,7 +314,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) * xx - is the GPE number [in HEX] * * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods - * with that owner. + * with that owner. * ******************************************************************************/ @@ -415,6 +431,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, * Add the GPE information from above to the gpe_event_info block for * use during dispatch of this GPE. */ + gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); gpe_event_info->dispatch.method_node = method_node; diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 19a0e513ea48..10e477494dcf 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -152,6 +152,45 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) return (FALSE); } +/******************************************************************************* + * + * FUNCTION: acpi_ev_get_gpe_device + * + * PARAMETERS: GPE_WALK_CALLBACK + * + * RETURN: Status + * + * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE + * block device. NULL if the GPE is one of the FADT-defined GPEs. + * + ******************************************************************************/ + +acpi_status +acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block, void *context) +{ + struct acpi_gpe_device_info *info = context; + + /* Increment Index by the number of GPEs in this block */ + + info->next_block_base_index += gpe_block->gpe_count; + + if (info->index < info->next_block_base_index) { + /* + * The GPE index is within this block, get the node. Leave the node + * NULL for the FADT-defined GPEs + */ + if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { + info->gpe_device = gpe_block->node; + } + + info->status = AE_OK; + return (AE_CTRL_END); + } + + return (AE_OK); +} + /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_xrupt_block diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index fcaed9fb44ff..38bba66fcce5 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -284,41 +284,39 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) * RETURN: ACPI_INTERRUPT_HANDLED * * DESCRIPTION: Invoked directly from the SCI handler when a global lock - * release interrupt occurs. Attempt to acquire the global lock, - * if successful, signal the thread waiting for the lock. + * release interrupt occurs. If there's a thread waiting for + * the global lock, signal it. * * NOTE: Assumes that the semaphore can be signaled from interrupt level. If * this is not possible for some reason, a separate thread will have to be * scheduled to do this. * ******************************************************************************/ +static u8 acpi_ev_global_lock_pending; static u32 acpi_ev_global_lock_handler(void *context) { - u8 acquired = FALSE; + acpi_status status; + acpi_cpu_flags flags; - /* - * Attempt to get the lock. - * - * If we don't get it now, it will be marked pending and we will - * take another interrupt when it becomes free. - */ - ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); - if (acquired) { + flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); - /* Got the lock, now wake all threads waiting for it */ + if (!acpi_ev_global_lock_pending) { + goto out; + } - acpi_gbl_global_lock_acquired = TRUE; - /* Send a unit to the semaphore */ + /* Send a unit to the semaphore */ - if (ACPI_FAILURE - (acpi_os_signal_semaphore - (acpi_gbl_global_lock_semaphore, 1))) { - ACPI_ERROR((AE_INFO, - "Could not signal Global Lock semaphore")); - } + status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1); + if (ACPI_FAILURE(status)) { + ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore")); } + acpi_ev_global_lock_pending = FALSE; + + out: + acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); + return (ACPI_INTERRUPT_HANDLED); } @@ -415,6 +413,7 @@ static int acpi_ev_global_lock_acquired; acpi_status acpi_ev_acquire_global_lock(u16 timeout) { + acpi_cpu_flags flags; acpi_status status = AE_OK; u8 acquired = FALSE; @@ -467,32 +466,47 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout) return_ACPI_STATUS(AE_OK); } - /* Attempt to acquire the actual hardware lock */ + flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); + + do { + + /* Attempt to acquire the actual hardware lock */ + + ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); + if (acquired) { + acpi_gbl_global_lock_acquired = TRUE; + + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Acquired hardware Global Lock\n")); + break; + } - ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); - if (acquired) { + acpi_ev_global_lock_pending = TRUE; - /* We got the lock */ + acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); + /* + * Did not get the lock. The pending bit was set above, and we + * must wait until we get the global lock released interrupt. + */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "Acquired hardware Global Lock\n")); + "Waiting for hardware Global Lock\n")); - acpi_gbl_global_lock_acquired = TRUE; - return_ACPI_STATUS(AE_OK); - } + /* + * Wait for handshake with the global lock interrupt handler. + * This interface releases the interpreter if we must wait. + */ + status = acpi_ex_system_wait_semaphore( + acpi_gbl_global_lock_semaphore, + ACPI_WAIT_FOREVER); - /* - * Did not get the lock. The pending bit was set above, and we must now - * wait until we get the global lock released interrupt. - */ - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n")); + flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); - /* - * Wait for handshake with the global lock interrupt handler. - * This interface releases the interpreter if we must wait. - */ - status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, - ACPI_WAIT_FOREVER); + } while (ACPI_SUCCESS(status)); + + acpi_ev_global_lock_pending = FALSE; + + acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 36af222cac65..1226689bdb1b 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -92,6 +92,57 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler) ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) #endif /* ACPI_FUTURE_USAGE */ + +/******************************************************************************* + * + * FUNCTION: acpi_install_global_event_handler + * + * PARAMETERS: Handler - Pointer to the global event handler function + * Context - Value passed to the handler on each event + * + * RETURN: Status + * + * DESCRIPTION: Saves the pointer to the handler function. The global handler + * is invoked upon each incoming GPE and Fixed Event. It is + * invoked at interrupt level at the time of the event dispatch. + * Can be used to update event counters, etc. + * + ******************************************************************************/ +acpi_status +acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_install_global_event_handler); + + /* Parameter validation */ + + if (!handler) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Don't allow two handlers. */ + + if (acpi_gbl_global_event_handler) { + status = AE_ALREADY_EXISTS; + goto cleanup; + } + + acpi_gbl_global_event_handler = handler; + acpi_gbl_global_event_handler_context = context; + + cleanup: + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler) + /******************************************************************************* * * FUNCTION: acpi_install_fixed_event_handler @@ -671,10 +722,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler) acpi_status acpi_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, - u32 type, acpi_event_handler address, void *context) + u32 type, acpi_gpe_handler address, void *context) { struct acpi_gpe_event_info *gpe_event_info; - struct acpi_handler_info *handler; + struct acpi_gpe_handler_info *handler; acpi_status status; acpi_cpu_flags flags; @@ -693,7 +744,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, /* Allocate memory for the handler object */ - handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); + handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info)); if (!handler) { status = AE_NO_MEMORY; goto unlock_and_exit; @@ -722,7 +773,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, handler->address = address; handler->context = context; handler->method_node = gpe_event_info->dispatch.method_node; - handler->orig_flags = gpe_event_info->flags & + handler->original_flags = gpe_event_info->flags & (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* @@ -731,10 +782,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device, * disabled now to avoid spurious execution of the handler. */ - if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) + if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) && gpe_event_info->runtime_count) { - handler->orig_enabled = 1; - (void)acpi_raw_disable_gpe(gpe_event_info); + handler->originally_enabled = 1; + (void)acpi_ev_remove_gpe_reference(gpe_event_info); } /* Install the handler */ @@ -777,10 +828,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) ******************************************************************************/ acpi_status acpi_remove_gpe_handler(acpi_handle gpe_device, - u32 gpe_number, acpi_event_handler address) + u32 gpe_number, acpi_gpe_handler address) { struct acpi_gpe_event_info *gpe_event_info; - struct acpi_handler_info *handler; + struct acpi_gpe_handler_info *handler; acpi_status status; acpi_cpu_flags flags; @@ -835,7 +886,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, gpe_event_info->dispatch.method_node = handler->method_node; gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); - gpe_event_info->flags |= handler->orig_flags; + gpe_event_info->flags |= handler->original_flags; /* * If the GPE was previously associated with a method and it was @@ -843,9 +894,9 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, * post-initialization configuration. */ - if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) - && handler->orig_enabled) - (void)acpi_raw_enable_gpe(gpe_event_info); + if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) + && handler->originally_enabled) + (void)acpi_ev_add_gpe_reference(gpe_event_info); /* Now we can free the handler object */ diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index a1dabe3fd8ae..90488c1e0f3d 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -43,18 +43,11 @@ #include #include "accommon.h" -#include "acevents.h" -#include "acnamesp.h" #include "actables.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxfevnt") -/* Local prototypes */ -static acpi_status -acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, - struct acpi_gpe_block_info *gpe_block, void *context); - /******************************************************************************* * * FUNCTION: acpi_enable @@ -211,185 +204,6 @@ acpi_status acpi_enable_event(u32 event, u32 flags) ACPI_EXPORT_SYMBOL(acpi_enable_event) -/******************************************************************************* - * - * FUNCTION: acpi_gpe_wakeup - * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block - * Action - Enable or Disable - * - * RETURN: Status - * - * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. - * - ******************************************************************************/ -acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action) -{ - acpi_status status = AE_OK; - struct acpi_gpe_event_info *gpe_event_info; - struct acpi_gpe_register_info *gpe_register_info; - acpi_cpu_flags flags; - u32 register_bit; - - ACPI_FUNCTION_TRACE(acpi_gpe_wakeup); - - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); - - /* Ensure that we have a valid GPE number */ - - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - - gpe_register_info = gpe_event_info->register_info; - if (!gpe_register_info) { - status = AE_NOT_EXIST; - goto unlock_and_exit; - } - - register_bit = - acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); - - /* Perform the action */ - - switch (action) { - case ACPI_GPE_ENABLE: - ACPI_SET_BIT(gpe_register_info->enable_for_wake, - (u8)register_bit); - break; - - case ACPI_GPE_DISABLE: - ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, - (u8)register_bit); - break; - - default: - ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); - status = AE_BAD_PARAMETER; - break; - } - -unlock_and_exit: - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup) - -/******************************************************************************* - * - * FUNCTION: acpi_enable_gpe - * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block - * - * RETURN: Status - * - * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is - * hardware-enabled. - * - ******************************************************************************/ -acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) -{ - acpi_status status = AE_BAD_PARAMETER; - struct acpi_gpe_event_info *gpe_event_info; - acpi_cpu_flags flags; - - ACPI_FUNCTION_TRACE(acpi_enable_gpe); - - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); - - /* Ensure that we have a valid GPE number */ - - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (gpe_event_info) { - status = acpi_raw_enable_gpe(gpe_event_info); - } - - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - return_ACPI_STATUS(status); -} -ACPI_EXPORT_SYMBOL(acpi_enable_gpe) - -/******************************************************************************* - * - * FUNCTION: acpi_disable_gpe - * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block - * - * RETURN: Status - * - * DESCRIPTION: Remove a reference to a GPE. When the last reference is - * removed, only then is the GPE disabled (for runtime GPEs), or - * the GPE mask bit disabled (for wake GPEs) - * - ******************************************************************************/ -acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) -{ - acpi_status status = AE_BAD_PARAMETER; - struct acpi_gpe_event_info *gpe_event_info; - acpi_cpu_flags flags; - - ACPI_FUNCTION_TRACE(acpi_disable_gpe); - - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); - - /* Ensure that we have a valid GPE number */ - - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (gpe_event_info) { - status = acpi_raw_disable_gpe(gpe_event_info) ; - } - - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - return_ACPI_STATUS(status); -} -ACPI_EXPORT_SYMBOL(acpi_disable_gpe) - -/******************************************************************************* - * - * FUNCTION: acpi_gpe_can_wake - * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block - * - * RETURN: Status - * - * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE - * has a corresponding method and is currently enabled, disable it - * (GPEs with corresponding methods are enabled unconditionally - * during initialization, but GPEs that can wake up are expected - * to be initially disabled). - * - ******************************************************************************/ -acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number) -{ - acpi_status status = AE_OK; - struct acpi_gpe_event_info *gpe_event_info; - acpi_cpu_flags flags; - - ACPI_FUNCTION_TRACE(acpi_gpe_can_wake); - - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); - - /* Ensure that we have a valid GPE number */ - - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (gpe_event_info) { - gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; - } else { - status = AE_BAD_PARAMETER; - } - - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - return_ACPI_STATUS(status); -} -ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake) - /******************************************************************************* * * FUNCTION: acpi_disable_event @@ -481,44 +295,6 @@ acpi_status acpi_clear_event(u32 event) ACPI_EXPORT_SYMBOL(acpi_clear_event) -/******************************************************************************* - * - * FUNCTION: acpi_clear_gpe - * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block - * - * RETURN: Status - * - * DESCRIPTION: Clear an ACPI event (general purpose) - * - ******************************************************************************/ -acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) -{ - acpi_status status = AE_OK; - struct acpi_gpe_event_info *gpe_event_info; - acpi_cpu_flags flags; - - ACPI_FUNCTION_TRACE(acpi_clear_gpe); - - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); - - /* Ensure that we have a valid GPE number */ - - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (!gpe_event_info) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - - status = acpi_hw_clear_gpe(gpe_event_info); - - unlock_and_exit: - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_clear_gpe) /******************************************************************************* * * FUNCTION: acpi_get_event_status @@ -575,379 +351,3 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) } ACPI_EXPORT_SYMBOL(acpi_get_event_status) - -/******************************************************************************* - * - * FUNCTION: acpi_get_gpe_status - * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block - * event_status - Where the current status of the event will - * be returned - * - * RETURN: Status - * - * DESCRIPTION: Get status of an event (general purpose) - * - ******************************************************************************/ -acpi_status -acpi_get_gpe_status(acpi_handle gpe_device, - u32 gpe_number, acpi_event_status *event_status) -{ - acpi_status status = AE_OK; - struct acpi_gpe_event_info *gpe_event_info; - acpi_cpu_flags flags; - - ACPI_FUNCTION_TRACE(acpi_get_gpe_status); - - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); - - /* Ensure that we have a valid GPE number */ - - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (!gpe_event_info) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - - /* Obtain status on the requested GPE number */ - - status = acpi_hw_get_gpe_status(gpe_event_info, event_status); - - if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) - *event_status |= ACPI_EVENT_FLAG_HANDLE; - - unlock_and_exit: - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) -/******************************************************************************* - * - * FUNCTION: acpi_install_gpe_block - * - * PARAMETERS: gpe_device - Handle to the parent GPE Block Device - * gpe_block_address - Address and space_iD - * register_count - Number of GPE register pairs in the block - * interrupt_number - H/W interrupt for the block - * - * RETURN: Status - * - * DESCRIPTION: Create and Install a block of GPE registers - * - ******************************************************************************/ -acpi_status -acpi_install_gpe_block(acpi_handle gpe_device, - struct acpi_generic_address *gpe_block_address, - u32 register_count, u32 interrupt_number) -{ - acpi_status status = AE_OK; - union acpi_operand_object *obj_desc; - struct acpi_namespace_node *node; - struct acpi_gpe_block_info *gpe_block; - - ACPI_FUNCTION_TRACE(acpi_install_gpe_block); - - if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return (status); - } - - node = acpi_ns_validate_handle(gpe_device); - if (!node) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - - /* - * For user-installed GPE Block Devices, the gpe_block_base_number - * is always zero - */ - status = - acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, - interrupt_number, &gpe_block); - if (ACPI_FAILURE(status)) { - goto unlock_and_exit; - } - - /* Install block in the device_object attached to the node */ - - obj_desc = acpi_ns_get_attached_object(node); - if (!obj_desc) { - - /* - * No object, create a new one (Device nodes do not always have - * an attached object) - */ - obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); - if (!obj_desc) { - status = AE_NO_MEMORY; - goto unlock_and_exit; - } - - status = - acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); - - /* Remove local reference to the object */ - - acpi_ut_remove_reference(obj_desc); - - if (ACPI_FAILURE(status)) { - goto unlock_and_exit; - } - } - - /* Now install the GPE block in the device_object */ - - obj_desc->device.gpe_block = gpe_block; - - unlock_and_exit: - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) - -/******************************************************************************* - * - * FUNCTION: acpi_remove_gpe_block - * - * PARAMETERS: gpe_device - Handle to the parent GPE Block Device - * - * RETURN: Status - * - * DESCRIPTION: Remove a previously installed block of GPE registers - * - ******************************************************************************/ -acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) -{ - union acpi_operand_object *obj_desc; - acpi_status status; - struct acpi_namespace_node *node; - - ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); - - if (!gpe_device) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return (status); - } - - node = acpi_ns_validate_handle(gpe_device); - if (!node) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - - /* Get the device_object attached to the node */ - - obj_desc = acpi_ns_get_attached_object(node); - if (!obj_desc || !obj_desc->device.gpe_block) { - return_ACPI_STATUS(AE_NULL_OBJECT); - } - - /* Delete the GPE block (but not the device_object) */ - - status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); - if (ACPI_SUCCESS(status)) { - obj_desc->device.gpe_block = NULL; - } - - unlock_and_exit: - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) - -/******************************************************************************* - * - * FUNCTION: acpi_get_gpe_device - * - * PARAMETERS: Index - System GPE index (0-current_gpe_count) - * gpe_device - Where the parent GPE Device is returned - * - * RETURN: Status - * - * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL - * gpe device indicates that the gpe number is contained in one of - * the FADT-defined gpe blocks. Otherwise, the GPE block device. - * - ******************************************************************************/ -acpi_status -acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) -{ - struct acpi_gpe_device_info info; - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_get_gpe_device); - - if (!gpe_device) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (index >= acpi_current_gpe_count) { - return_ACPI_STATUS(AE_NOT_EXIST); - } - - /* Setup and walk the GPE list */ - - info.index = index; - info.status = AE_NOT_EXIST; - info.gpe_device = NULL; - info.next_block_base_index = 0; - - status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - *gpe_device = info.gpe_device; - return_ACPI_STATUS(info.status); -} - -ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) - -/******************************************************************************* - * - * FUNCTION: acpi_ev_get_gpe_device - * - * PARAMETERS: GPE_WALK_CALLBACK - * - * RETURN: Status - * - * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE - * block device. NULL if the GPE is one of the FADT-defined GPEs. - * - ******************************************************************************/ -static acpi_status -acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, - struct acpi_gpe_block_info *gpe_block, void *context) -{ - struct acpi_gpe_device_info *info = context; - - /* Increment Index by the number of GPEs in this block */ - - info->next_block_base_index += gpe_block->gpe_count; - - if (info->index < info->next_block_base_index) { - /* - * The GPE index is within this block, get the node. Leave the node - * NULL for the FADT-defined GPEs - */ - if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { - info->gpe_device = gpe_block->node; - } - - info->status = AE_OK; - return (AE_CTRL_END); - } - - return (AE_OK); -} - -/****************************************************************************** - * - * FUNCTION: acpi_disable_all_gpes - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Disable and clear all GPEs in all GPE blocks - * - ******************************************************************************/ - -acpi_status acpi_disable_all_gpes(void) -{ - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); - - status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - status = acpi_hw_disable_all_gpes(); - (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); - - return_ACPI_STATUS(status); -} - -/****************************************************************************** - * - * FUNCTION: acpi_enable_all_runtime_gpes - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks - * - ******************************************************************************/ - -acpi_status acpi_enable_all_runtime_gpes(void) -{ - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); - - status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - status = acpi_hw_enable_all_runtime_gpes(); - (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); - - return_ACPI_STATUS(status); -} - -/****************************************************************************** - * - * FUNCTION: acpi_update_gpes - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and - * are not pointed to by any device _PRW methods indicating that - * these GPEs are generally intended for system or device wakeup - * (such GPEs have to be enabled directly when the devices whose - * _PRW methods point to them are set up for wakeup signaling). - * - ******************************************************************************/ - -acpi_status acpi_update_gpes(void) -{ - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_update_gpes); - - status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } else if (acpi_all_gpes_initialized) { - goto unlock; - } - - status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); - if (ACPI_SUCCESS(status)) { - acpi_all_gpes_initialized = TRUE; - } - -unlock: - (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); - - return_ACPI_STATUS(status); -} diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c new file mode 100644 index 000000000000..416845bc9c1f --- /dev/null +++ b/drivers/acpi/acpica/evxfgpe.c @@ -0,0 +1,669 @@ +/****************************************************************************** + * + * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2010, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include "accommon.h" +#include "acevents.h" +#include "acnamesp.h" + +#define _COMPONENT ACPI_EVENTS +ACPI_MODULE_NAME("evxfgpe") + +/****************************************************************************** + * + * FUNCTION: acpi_update_all_gpes + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Complete GPE initialization and enable all GPEs that have + * associated _Lxx or _Exx methods and are not pointed to by any + * device _PRW methods (this indicates that these GPEs are + * generally intended for system or device wakeup. Such GPEs + * have to be enabled directly when the devices whose _PRW + * methods point to them are set up for wakeup signaling.) + * + * NOTE: Should be called after any GPEs are added to the system. Primarily, + * after the system _PRW methods have been run, but also after a GPE Block + * Device has been added or if any new GPE methods have been added via a + * dynamic table load. + * + ******************************************************************************/ + +acpi_status acpi_update_all_gpes(void) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_update_all_gpes); + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + if (acpi_gbl_all_gpes_initialized) { + goto unlock_and_exit; + } + + status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); + if (ACPI_SUCCESS(status)) { + acpi_gbl_all_gpes_initialized = TRUE; + } + +unlock_and_exit: + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_update_all_gpes) + +/******************************************************************************* + * + * FUNCTION: acpi_enable_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block + * + * RETURN: Status + * + * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is + * hardware-enabled. + * + ******************************************************************************/ + +acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) +{ + acpi_status status = AE_BAD_PARAMETER; + struct acpi_gpe_event_info *gpe_event_info; + acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_enable_gpe); + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); + if (gpe_event_info) { + status = acpi_ev_add_gpe_reference(gpe_event_info); + } + + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); +} +ACPI_EXPORT_SYMBOL(acpi_enable_gpe) + +/******************************************************************************* + * + * FUNCTION: acpi_disable_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block + * + * RETURN: Status + * + * DESCRIPTION: Remove a reference to a GPE. When the last reference is + * removed, only then is the GPE disabled (for runtime GPEs), or + * the GPE mask bit disabled (for wake GPEs) + * + ******************************************************************************/ + +acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) +{ + acpi_status status = AE_BAD_PARAMETER; + struct acpi_gpe_event_info *gpe_event_info; + acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_disable_gpe); + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); + if (gpe_event_info) { + status = acpi_ev_remove_gpe_reference(gpe_event_info) ; + } + + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); +} +ACPI_EXPORT_SYMBOL(acpi_disable_gpe) + + +/******************************************************************************* + * + * FUNCTION: acpi_setup_gpe_for_wake + * + * PARAMETERS: wake_device - Device associated with the GPE (via _PRW) + * gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block + * + * RETURN: Status + * + * DESCRIPTION: Mark a GPE as having the ability to wake the system. This + * interface is intended to be used as the host executes the + * _PRW methods (Power Resources for Wake) in the system tables. + * Each _PRW appears under a Device Object (The wake_device), and + * contains the info for the wake GPE associated with the + * wake_device. + * + ******************************************************************************/ +acpi_status +acpi_setup_gpe_for_wake(acpi_handle wake_device, + acpi_handle gpe_device, u32 gpe_number) +{ + acpi_status status = AE_BAD_PARAMETER; + struct acpi_gpe_event_info *gpe_event_info; + struct acpi_namespace_node *device_node; + acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); + + /* Parameter Validation */ + + if (!wake_device) { + /* + * By forcing wake_device to be valid, we automatically enable the + * implicit notify feature on all hosts. + */ + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + /* Validate wake_device is of type Device */ + + device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); + if (device_node->type != ACPI_TYPE_DEVICE) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); + if (gpe_event_info) { + /* + * If there is no method or handler for this GPE, then the + * wake_device will be notified whenever this GPE fires (aka + * "implicit notify") Note: The GPE is assumed to be + * level-triggered (for windows compatibility). + */ + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == + ACPI_GPE_DISPATCH_NONE) { + gpe_event_info->flags = + (ACPI_GPE_DISPATCH_NOTIFY | + ACPI_GPE_LEVEL_TRIGGERED); + gpe_event_info->dispatch.device_node = device_node; + } + + gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; + status = AE_OK; + } + + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); +} +ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) + +/******************************************************************************* + * + * FUNCTION: acpi_set_gpe_wake_mask + * + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block + * Action - Enable or Disable + * + * RETURN: Status + * + * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must + * already be marked as a WAKE GPE. + * + ******************************************************************************/ + +acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + struct acpi_gpe_register_info *gpe_register_info; + acpi_cpu_flags flags; + u32 register_bit; + + ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask); + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* + * Ensure that we have a valid GPE number and that this GPE is in + * fact a wake GPE + */ + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { + status = AE_TYPE; + goto unlock_and_exit; + } + + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + status = AE_NOT_EXIST; + goto unlock_and_exit; + } + + register_bit = + acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); + + /* Perform the action */ + + switch (action) { + case ACPI_GPE_ENABLE: + ACPI_SET_BIT(gpe_register_info->enable_for_wake, + (u8)register_bit); + break; + + case ACPI_GPE_DISABLE: + ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, + (u8)register_bit); + break; + + default: + ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); + status = AE_BAD_PARAMETER; + break; + } + +unlock_and_exit: + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask) + +/******************************************************************************* + * + * FUNCTION: acpi_clear_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block + * + * RETURN: Status + * + * DESCRIPTION: Clear an ACPI event (general purpose) + * + ******************************************************************************/ +acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_clear_gpe); + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + status = acpi_hw_clear_gpe(gpe_event_info); + + unlock_and_exit: + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_clear_gpe) + +/******************************************************************************* + * + * FUNCTION: acpi_get_gpe_status + * + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block + * event_status - Where the current status of the event will + * be returned + * + * RETURN: Status + * + * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled) + * + ******************************************************************************/ +acpi_status +acpi_get_gpe_status(acpi_handle gpe_device, + u32 gpe_number, acpi_event_status *event_status) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_get_gpe_status); + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Obtain status on the requested GPE number */ + + status = acpi_hw_get_gpe_status(gpe_event_info, event_status); + + if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) + *event_status |= ACPI_EVENT_FLAG_HANDLE; + + unlock_and_exit: + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) + +/****************************************************************************** + * + * FUNCTION: acpi_disable_all_gpes + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Disable and clear all GPEs in all GPE blocks + * + ******************************************************************************/ + +acpi_status acpi_disable_all_gpes(void) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_hw_disable_all_gpes(); + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes) + +/****************************************************************************** + * + * FUNCTION: acpi_enable_all_runtime_gpes + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks + * + ******************************************************************************/ + +acpi_status acpi_enable_all_runtime_gpes(void) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_hw_enable_all_runtime_gpes(); + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) + +/******************************************************************************* + * + * FUNCTION: acpi_install_gpe_block + * + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device + * gpe_block_address - Address and space_iD + * register_count - Number of GPE register pairs in the block + * interrupt_number - H/W interrupt for the block + * + * RETURN: Status + * + * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not + * enabled here. + * + ******************************************************************************/ +acpi_status +acpi_install_gpe_block(acpi_handle gpe_device, + struct acpi_generic_address *gpe_block_address, + u32 register_count, u32 interrupt_number) +{ + acpi_status status; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + struct acpi_gpe_block_info *gpe_block; + + ACPI_FUNCTION_TRACE(acpi_install_gpe_block); + + if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return (status); + } + + node = acpi_ns_validate_handle(gpe_device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* + * For user-installed GPE Block Devices, the gpe_block_base_number + * is always zero + */ + status = + acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, + interrupt_number, &gpe_block); + if (ACPI_FAILURE(status)) { + goto unlock_and_exit; + } + + /* Install block in the device_object attached to the node */ + + obj_desc = acpi_ns_get_attached_object(node); + if (!obj_desc) { + + /* + * No object, create a new one (Device nodes do not always have + * an attached object) + */ + obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); + if (!obj_desc) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + status = + acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); + + /* Remove local reference to the object */ + + acpi_ut_remove_reference(obj_desc); + + if (ACPI_FAILURE(status)) { + goto unlock_and_exit; + } + } + + /* Now install the GPE block in the device_object */ + + obj_desc->device.gpe_block = gpe_block; + + unlock_and_exit: + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) + +/******************************************************************************* + * + * FUNCTION: acpi_remove_gpe_block + * + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device + * + * RETURN: Status + * + * DESCRIPTION: Remove a previously installed block of GPE registers + * + ******************************************************************************/ +acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) +{ + union acpi_operand_object *obj_desc; + acpi_status status; + struct acpi_namespace_node *node; + + ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); + + if (!gpe_device) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return (status); + } + + node = acpi_ns_validate_handle(gpe_device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Get the device_object attached to the node */ + + obj_desc = acpi_ns_get_attached_object(node); + if (!obj_desc || !obj_desc->device.gpe_block) { + return_ACPI_STATUS(AE_NULL_OBJECT); + } + + /* Delete the GPE block (but not the device_object) */ + + status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); + if (ACPI_SUCCESS(status)) { + obj_desc->device.gpe_block = NULL; + } + + unlock_and_exit: + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) + +/******************************************************************************* + * + * FUNCTION: acpi_get_gpe_device + * + * PARAMETERS: Index - System GPE index (0-current_gpe_count) + * gpe_device - Where the parent GPE Device is returned + * + * RETURN: Status + * + * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL + * gpe device indicates that the gpe number is contained in one of + * the FADT-defined gpe blocks. Otherwise, the GPE block device. + * + ******************************************************************************/ +acpi_status +acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) +{ + struct acpi_gpe_device_info info; + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_get_gpe_device); + + if (!gpe_device) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + if (index >= acpi_current_gpe_count) { + return_ACPI_STATUS(AE_NOT_EXIST); + } + + /* Setup and walk the GPE list */ + + info.index = index; + info.status = AE_NOT_EXIST; + info.gpe_device = NULL; + info.next_block_base_index = 0; + + status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device); + return_ACPI_STATUS(info.status); +} + +ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 14750db2a1b8..85c3cbd4304d 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -62,10 +62,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, * PARAMETERS: gpe_event_info - Info block for the GPE * gpe_register_info - Info block for the GPE register * - * RETURN: Status + * RETURN: Register mask with a one in the GPE bit position * - * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given - * GPE set. + * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the + * correct position for the input GPE. * ******************************************************************************/ @@ -85,12 +85,12 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, * * RETURN: Status * - * DESCRIPTION: Enable or disable a single GPE in its enable register. + * DESCRIPTION: Enable or disable a single GPE in the parent enable register. * ******************************************************************************/ acpi_status -acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) +acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status; @@ -113,14 +113,20 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) return (status); } - /* Set ot clear just the bit that corresponds to this GPE */ + /* Set or clear just the bit that corresponds to this GPE */ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); switch (action) { - case ACPI_GPE_COND_ENABLE: - if (!(register_bit & gpe_register_info->enable_for_run)) + case ACPI_GPE_CONDITIONAL_ENABLE: + + /* Only enable if the enable_for_run bit is set */ + + if (!(register_bit & gpe_register_info->enable_for_run)) { return (AE_BAD_PARAMETER); + } + + /*lint -fallthrough */ case ACPI_GPE_ENABLE: ACPI_SET_BIT(enable_mask, register_bit); @@ -131,7 +137,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) break; default: - ACPI_ERROR((AE_INFO, "Invalid action\n")); + ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action)); return (AE_BAD_PARAMETER); } @@ -168,13 +174,13 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) return (AE_NOT_EXIST); } - register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, - gpe_register_info); - /* * Write a one to the appropriate bit in the status register to * clear this GPE. */ + register_bit = + acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); + status = acpi_hw_write(register_bit, &gpe_register_info->status_address); @@ -201,8 +207,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, u32 in_byte; u32 register_bit; struct acpi_gpe_register_info *gpe_register_info; - acpi_status status; acpi_event_status local_event_status = 0; + acpi_status status; ACPI_FUNCTION_ENTRY(); diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index e87bc6760be6..508537f884ac 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -768,7 +768,7 @@ acpi_status acpi_ut_init_globals(void) acpi_gbl_gpe_fadt_blocks[0] = NULL; acpi_gbl_gpe_fadt_blocks[1] = NULL; acpi_current_gpe_count = 0; - acpi_all_gpes_initialized = FALSE; + acpi_gbl_all_gpes_initialized = FALSE; /* Global handlers */ @@ -778,6 +778,7 @@ acpi_status acpi_ut_init_globals(void) acpi_gbl_init_handler = NULL; acpi_gbl_table_handler = NULL; acpi_gbl_interface_handler = NULL; + acpi_gbl_global_event_handler = NULL; /* Global Lock support */ diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index d9efa495b433..199528ff7f1d 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -85,6 +85,7 @@ acpi_status acpi_ut_mutex_initialize(void) spin_lock_init(acpi_gbl_gpe_lock); spin_lock_init(acpi_gbl_hardware_lock); + spin_lock_init(acpi_ev_global_lock_pending_lock); /* Mutex for _OSI support */ status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 18df1e940276..ef0581f2094d 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -109,6 +109,8 @@ static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus) return sizeof(*estatus) + estatus->data_length; } +void apei_estatus_print(const char *pfx, + const struct acpi_hest_generic_status *estatus); int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); int apei_estatus_check(const struct acpi_hest_generic_status *estatus); #endif diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c index f4cf2fc4c8c1..31464a006d76 100644 --- a/drivers/acpi/apei/cper.c +++ b/drivers/acpi/apei/cper.c @@ -46,6 +46,317 @@ u64 cper_next_record_id(void) } EXPORT_SYMBOL_GPL(cper_next_record_id); +static const char *cper_severity_strs[] = { + "recoverable", + "fatal", + "corrected", + "info", +}; + +static const char *cper_severity_str(unsigned int severity) +{ + return severity < ARRAY_SIZE(cper_severity_strs) ? + cper_severity_strs[severity] : "unknown"; +} + +/* + * cper_print_bits - print strings for set bits + * @pfx: prefix for each line, including log level and prefix string + * @bits: bit mask + * @strs: string array, indexed by bit position + * @strs_size: size of the string array: @strs + * + * For each set bit in @bits, print the corresponding string in @strs. + * If the output length is longer than 80, multiple line will be + * printed, with @pfx is printed at the beginning of each line. + */ +static void cper_print_bits(const char *pfx, unsigned int bits, + const char *strs[], unsigned int strs_size) +{ + int i, len = 0; + const char *str; + char buf[84]; + + for (i = 0; i < strs_size; i++) { + if (!(bits & (1U << i))) + continue; + str = strs[i]; + if (len && len + strlen(str) + 2 > 80) { + printk("%s\n", buf); + len = 0; + } + if (!len) + len = snprintf(buf, sizeof(buf), "%s%s", pfx, str); + else + len += snprintf(buf+len, sizeof(buf)-len, ", %s", str); + } + if (len) + printk("%s\n", buf); +} + +static const char *cper_proc_type_strs[] = { + "IA32/X64", + "IA64", +}; + +static const char *cper_proc_isa_strs[] = { + "IA32", + "IA64", + "X64", +}; + +static const char *cper_proc_error_type_strs[] = { + "cache error", + "TLB error", + "bus error", + "micro-architectural error", +}; + +static const char *cper_proc_op_strs[] = { + "unknown or generic", + "data read", + "data write", + "instruction execution", +}; + +static const char *cper_proc_flag_strs[] = { + "restartable", + "precise IP", + "overflow", + "corrected", +}; + +static void cper_print_proc_generic(const char *pfx, + const struct cper_sec_proc_generic *proc) +{ + if (proc->validation_bits & CPER_PROC_VALID_TYPE) + printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type, + proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ? + cper_proc_type_strs[proc->proc_type] : "unknown"); + if (proc->validation_bits & CPER_PROC_VALID_ISA) + printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa, + proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ? + cper_proc_isa_strs[proc->proc_isa] : "unknown"); + if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) { + printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type); + cper_print_bits(pfx, proc->proc_error_type, + cper_proc_error_type_strs, + ARRAY_SIZE(cper_proc_error_type_strs)); + } + if (proc->validation_bits & CPER_PROC_VALID_OPERATION) + printk("%s""operation: %d, %s\n", pfx, proc->operation, + proc->operation < ARRAY_SIZE(cper_proc_op_strs) ? + cper_proc_op_strs[proc->operation] : "unknown"); + if (proc->validation_bits & CPER_PROC_VALID_FLAGS) { + printk("%s""flags: 0x%02x\n", pfx, proc->flags); + cper_print_bits(pfx, proc->flags, cper_proc_flag_strs, + ARRAY_SIZE(cper_proc_flag_strs)); + } + if (proc->validation_bits & CPER_PROC_VALID_LEVEL) + printk("%s""level: %d\n", pfx, proc->level); + if (proc->validation_bits & CPER_PROC_VALID_VERSION) + printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version); + if (proc->validation_bits & CPER_PROC_VALID_ID) + printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id); + if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS) + printk("%s""target_address: 0x%016llx\n", + pfx, proc->target_addr); + if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID) + printk("%s""requestor_id: 0x%016llx\n", + pfx, proc->requestor_id); + if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID) + printk("%s""responder_id: 0x%016llx\n", + pfx, proc->responder_id); + if (proc->validation_bits & CPER_PROC_VALID_IP) + printk("%s""IP: 0x%016llx\n", pfx, proc->ip); +} + +static const char *cper_mem_err_type_strs[] = { + "unknown", + "no error", + "single-bit ECC", + "multi-bit ECC", + "single-symbol chipkill ECC", + "multi-symbol chipkill ECC", + "master abort", + "target abort", + "parity error", + "watchdog timeout", + "invalid address", + "mirror Broken", + "memory sparing", + "scrub corrected error", + "scrub uncorrected error", +}; + +static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) +{ + if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) + printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); + if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) + printk("%s""physical_address: 0x%016llx\n", + pfx, mem->physical_addr); + if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) + printk("%s""physical_address_mask: 0x%016llx\n", + pfx, mem->physical_addr_mask); + if (mem->validation_bits & CPER_MEM_VALID_NODE) + printk("%s""node: %d\n", pfx, mem->node); + if (mem->validation_bits & CPER_MEM_VALID_CARD) + printk("%s""card: %d\n", pfx, mem->card); + if (mem->validation_bits & CPER_MEM_VALID_MODULE) + printk("%s""module: %d\n", pfx, mem->module); + if (mem->validation_bits & CPER_MEM_VALID_BANK) + printk("%s""bank: %d\n", pfx, mem->bank); + if (mem->validation_bits & CPER_MEM_VALID_DEVICE) + printk("%s""device: %d\n", pfx, mem->device); + if (mem->validation_bits & CPER_MEM_VALID_ROW) + printk("%s""row: %d\n", pfx, mem->row); + if (mem->validation_bits & CPER_MEM_VALID_COLUMN) + printk("%s""column: %d\n", pfx, mem->column); + if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION) + printk("%s""bit_position: %d\n", pfx, mem->bit_pos); + if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) + printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id); + if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID) + printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id); + if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID) + printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id); + if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) { + u8 etype = mem->error_type; + printk("%s""error_type: %d, %s\n", pfx, etype, + etype < ARRAY_SIZE(cper_mem_err_type_strs) ? + cper_mem_err_type_strs[etype] : "unknown"); + } +} + +static const char *cper_pcie_port_type_strs[] = { + "PCIe end point", + "legacy PCI end point", + "unknown", + "unknown", + "root port", + "upstream switch port", + "downstream switch port", + "PCIe to PCI/PCI-X bridge", + "PCI/PCI-X to PCIe bridge", + "root complex integrated endpoint device", + "root complex event collector", +}; + +static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie) +{ + if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) + printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, + pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? + cper_pcie_port_type_strs[pcie->port_type] : "unknown"); + if (pcie->validation_bits & CPER_PCIE_VALID_VERSION) + printk("%s""version: %d.%d\n", pfx, + pcie->version.major, pcie->version.minor); + if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS) + printk("%s""command: 0x%04x, status: 0x%04x\n", pfx, + pcie->command, pcie->status); + if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) { + const __u8 *p; + printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx, + pcie->device_id.segment, pcie->device_id.bus, + pcie->device_id.device, pcie->device_id.function); + printk("%s""slot: %d\n", pfx, + pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT); + printk("%s""secondary_bus: 0x%02x\n", pfx, + pcie->device_id.secondary_bus); + printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, + pcie->device_id.vendor_id, pcie->device_id.device_id); + p = pcie->device_id.class_code; + printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); + } + if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) + printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, + pcie->serial_number.lower, pcie->serial_number.upper); + if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS) + printk( + "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", + pfx, pcie->bridge.secondary_status, pcie->bridge.control); +} + +static const char *apei_estatus_section_flag_strs[] = { + "primary", + "containment warning", + "reset", + "threshold exceeded", + "resource not accessible", + "latent error", +}; + +static void apei_estatus_print_section( + const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no) +{ + uuid_le *sec_type = (uuid_le *)gdata->section_type; + __u16 severity; + + severity = gdata->error_severity; + printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity, + cper_severity_str(severity)); + printk("%s""flags: 0x%02x\n", pfx, gdata->flags); + cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs, + ARRAY_SIZE(apei_estatus_section_flag_strs)); + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) + printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id); + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) + printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); + + if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) { + struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1); + printk("%s""section_type: general processor error\n", pfx); + if (gdata->error_data_length >= sizeof(*proc_err)) + cper_print_proc_generic(pfx, proc_err); + else + goto err_section_too_small; + } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); + printk("%s""section_type: memory error\n", pfx); + if (gdata->error_data_length >= sizeof(*mem_err)) + cper_print_mem(pfx, mem_err); + else + goto err_section_too_small; + } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie = (void *)(gdata + 1); + printk("%s""section_type: PCIe error\n", pfx); + if (gdata->error_data_length >= sizeof(*pcie)) + cper_print_pcie(pfx, pcie); + else + goto err_section_too_small; + } else + printk("%s""section type: unknown, %pUl\n", pfx, sec_type); + + return; + +err_section_too_small: + pr_err(FW_WARN "error section length is too small\n"); +} + +void apei_estatus_print(const char *pfx, + const struct acpi_hest_generic_status *estatus) +{ + struct acpi_hest_generic_data *gdata; + unsigned int data_len, gedata_len; + int sec_no = 0; + __u16 severity; + + printk("%s""APEI generic hardware error status\n", pfx); + severity = estatus->error_severity; + printk("%s""severity: %d, %s\n", pfx, severity, + cper_severity_str(severity)); + data_len = estatus->data_length; + gdata = (struct acpi_hest_generic_data *)(estatus + 1); + while (data_len > sizeof(*gdata)) { + gedata_len = gdata->error_data_length; + apei_estatus_print_section(pfx, gdata, sec_no); + data_len -= gedata_len + sizeof(*gdata); + sec_no++; + } +} +EXPORT_SYMBOL_GPL(apei_estatus_print); + int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) { if (estatus->data_length && diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index cf29df69380b..096aebfe7f32 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -39,7 +39,7 @@ #define EINJ_PFX "EINJ: " #define SPIN_UNIT 100 /* 100ns */ -/* Firmware should respond within 1 miliseconds */ +/* Firmware should respond within 1 milliseconds */ #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) /* diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 5850d320404c..cf6db6b7662a 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -53,7 +53,7 @@ sizeof(struct acpi_table_erst))) #define SPIN_UNIT 100 /* 100ns */ -/* Firmware should respond within 1 miliseconds */ +/* Firmware should respond within 1 milliseconds */ #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) #define FIRMWARE_MAX_STALL 50 /* 50us */ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0d505e59214d..d1d484d4a06a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -12,10 +12,6 @@ * For more information about Generic Hardware Error Source, please * refer to ACPI Specification version 4.0, section 17.3.2.6 * - * Now, only SCI notification type and memory errors are - * supported. More notification type and hardware error type will be - * added later. - * * Copyright 2010 Intel Corp. * Author: Huang Ying * @@ -39,14 +35,18 @@ #include #include #include +#include #include #include #include #include +#include +#include #include #include #include #include +#include #include "apei-internal.h" @@ -55,42 +55,131 @@ #define GHES_ESTATUS_MAX_SIZE 65536 /* - * One struct ghes is created for each generic hardware error - * source. - * + * One struct ghes is created for each generic hardware error source. * It provides the context for APEI hardware error timer/IRQ/SCI/NMI - * handler. Handler for one generic hardware error source is only - * triggered after the previous one is done. So handler can uses - * struct ghes without locking. + * handler. * * estatus: memory buffer for error status block, allocated during * HEST parsing. */ #define GHES_TO_CLEAR 0x0001 +#define GHES_EXITING 0x0002 struct ghes { struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; - struct list_head list; u64 buffer_paddr; unsigned long flags; + union { + struct list_head list; + struct timer_list timer; + unsigned int irq; + }; }; +static int ghes_panic_timeout __read_mostly = 30; + /* - * Error source lists, one list for each notification method. The - * members in lists are struct ghes. + * All error sources notified with SCI shares one notifier function, + * so they need to be linked and checked one by one. This is applied + * to NMI too. * - * The list members are only added in HEST parsing and deleted during - * module_exit, that is, single-threaded. So no lock is needed for - * that. - * - * But the mutual exclusion is needed between members adding/deleting - * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is - * used for that. + * RCU is used for these lists, so ghes_list_mutex is only used for + * list changing, not for traversing. */ static LIST_HEAD(ghes_sci); +static LIST_HEAD(ghes_nmi); static DEFINE_MUTEX(ghes_list_mutex); +/* + * NMI may be triggered on any CPU, so ghes_nmi_lock is used for + * mutual exclusion. + */ +static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); + +/* + * Because the memory area used to transfer hardware error information + * from BIOS to Linux can be determined only in NMI, IRQ or timer + * handler, but general ioremap can not be used in atomic context, so + * a special version of atomic ioremap is implemented for that. + */ + +/* + * Two virtual pages are used, one for NMI context, the other for + * IRQ/PROCESS context + */ +#define GHES_IOREMAP_PAGES 2 +#define GHES_IOREMAP_NMI_PAGE(base) (base) +#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) + +/* virtual memory area for atomic ioremap */ +static struct vm_struct *ghes_ioremap_area; +/* + * These 2 spinlock is used to prevent atomic ioremap virtual memory + * area from being mapped simultaneously. + */ +static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); +static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); + +static int ghes_ioremap_init(void) +{ + ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, + VM_IOREMAP, VMALLOC_START, VMALLOC_END); + if (!ghes_ioremap_area) { + pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); + return -ENOMEM; + } + + return 0; +} + +static void ghes_ioremap_exit(void) +{ + free_vm_area(ghes_ioremap_area); +} + +static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) +{ + unsigned long vaddr; + + vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); + ioremap_page_range(vaddr, vaddr + PAGE_SIZE, + pfn << PAGE_SHIFT, PAGE_KERNEL); + + return (void __iomem *)vaddr; +} + +static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) +{ + unsigned long vaddr; + + vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); + ioremap_page_range(vaddr, vaddr + PAGE_SIZE, + pfn << PAGE_SHIFT, PAGE_KERNEL); + + return (void __iomem *)vaddr; +} + +static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) +{ + unsigned long vaddr = (unsigned long __force)vaddr_ptr; + void *base = ghes_ioremap_area->addr; + + BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); + unmap_kernel_range_noflush(vaddr, PAGE_SIZE); + __flush_tlb_one(vaddr); +} + +static void ghes_iounmap_irq(void __iomem *vaddr_ptr) +{ + unsigned long vaddr = (unsigned long __force)vaddr_ptr; + void *base = ghes_ioremap_area->addr; + + BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); + unmap_kernel_range_noflush(vaddr, PAGE_SIZE); + __flush_tlb_one(vaddr); +} + static struct ghes *ghes_new(struct acpi_hest_generic *generic) { struct ghes *ghes; @@ -101,7 +190,6 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic) if (!ghes) return ERR_PTR(-ENOMEM); ghes->generic = generic; - INIT_LIST_HEAD(&ghes->list); rc = acpi_pre_map_gar(&generic->error_status_address); if (rc) goto err_free; @@ -158,22 +246,41 @@ static inline int ghes_severity(int severity) } } -/* SCI handler run in work queue, so ioremap can be used here */ -static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, - int from_phys) +static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, + int from_phys) { - void *vaddr; - - vaddr = ioremap_cache(paddr, len); - if (!vaddr) - return -ENOMEM; - if (from_phys) - memcpy(buffer, vaddr, len); - else - memcpy(vaddr, buffer, len); - iounmap(vaddr); - - return 0; + void __iomem *vaddr; + unsigned long flags = 0; + int in_nmi = in_nmi(); + u64 offset; + u32 trunk; + + while (len > 0) { + offset = paddr - (paddr & PAGE_MASK); + if (in_nmi) { + raw_spin_lock(&ghes_ioremap_lock_nmi); + vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); + } else { + spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); + vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); + } + trunk = PAGE_SIZE - offset; + trunk = min(trunk, len); + if (from_phys) + memcpy_fromio(buffer, vaddr + offset, trunk); + else + memcpy_toio(vaddr + offset, buffer, trunk); + len -= trunk; + paddr += trunk; + buffer += trunk; + if (in_nmi) { + ghes_iounmap_nmi(vaddr); + raw_spin_unlock(&ghes_ioremap_lock_nmi); + } else { + ghes_iounmap_irq(vaddr); + spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); + } + } } static int ghes_read_estatus(struct ghes *ghes, int silent) @@ -194,10 +301,8 @@ static int ghes_read_estatus(struct ghes *ghes, int silent) if (!buf_paddr) return -ENOENT; - rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, - sizeof(*ghes->estatus), 1); - if (rc) - return rc; + ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, + sizeof(*ghes->estatus), 1); if (!ghes->estatus->block_status) return -ENOENT; @@ -212,17 +317,15 @@ static int ghes_read_estatus(struct ghes *ghes, int silent) goto err_read_block; if (apei_estatus_check_header(ghes->estatus)) goto err_read_block; - rc = ghes_copy_tofrom_phys(ghes->estatus + 1, - buf_paddr + sizeof(*ghes->estatus), - len - sizeof(*ghes->estatus), 1); - if (rc) - return rc; + ghes_copy_tofrom_phys(ghes->estatus + 1, + buf_paddr + sizeof(*ghes->estatus), + len - sizeof(*ghes->estatus), 1); if (apei_estatus_check(ghes->estatus)) goto err_read_block; rc = 0; err_read_block: - if (rc && !silent) + if (rc && !silent && printk_ratelimit()) pr_warning(FW_WARN GHES_PFX "Failed to read error status block!\n"); return rc; @@ -255,11 +358,26 @@ static void ghes_do_proc(struct ghes *ghes) } #endif } +} - if (!processed && printk_ratelimit()) - pr_warning(GHES_PFX - "Unknown error record from generic hardware error source: %d\n", - ghes->generic->header.source_id); +static void ghes_print_estatus(const char *pfx, struct ghes *ghes) +{ + /* Not more than 2 messages every 5 seconds */ + static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2); + + if (pfx == NULL) { + if (ghes_severity(ghes->estatus->error_severity) <= + GHES_SEV_CORRECTED) + pfx = KERN_WARNING HW_ERR; + else + pfx = KERN_ERR HW_ERR; + } + if (__ratelimit(&ratelimit)) { + printk( + "%s""Hardware error from APEI Generic Hardware Error Source: %d\n", + pfx, ghes->generic->header.source_id); + apei_estatus_print(pfx, ghes->estatus); + } } static int ghes_proc(struct ghes *ghes) @@ -269,6 +387,7 @@ static int ghes_proc(struct ghes *ghes) rc = ghes_read_estatus(ghes, 0); if (rc) goto out; + ghes_print_estatus(NULL, ghes); ghes_do_proc(ghes); out: @@ -276,6 +395,42 @@ out: return 0; } +static void ghes_add_timer(struct ghes *ghes) +{ + struct acpi_hest_generic *g = ghes->generic; + unsigned long expire; + + if (!g->notify.poll_interval) { + pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", + g->header.source_id); + return; + } + expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); + ghes->timer.expires = round_jiffies_relative(expire); + add_timer(&ghes->timer); +} + +static void ghes_poll_func(unsigned long data) +{ + struct ghes *ghes = (void *)data; + + ghes_proc(ghes); + if (!(ghes->flags & GHES_EXITING)) + ghes_add_timer(ghes); +} + +static irqreturn_t ghes_irq_func(int irq, void *data) +{ + struct ghes *ghes = data; + int rc; + + rc = ghes_proc(ghes); + if (rc) + return IRQ_NONE; + + return IRQ_HANDLED; +} + static int ghes_notify_sci(struct notifier_block *this, unsigned long event, void *data) { @@ -292,10 +447,63 @@ static int ghes_notify_sci(struct notifier_block *this, return ret; } +static int ghes_notify_nmi(struct notifier_block *this, + unsigned long cmd, void *data) +{ + struct ghes *ghes, *ghes_global = NULL; + int sev, sev_global = -1; + int ret = NOTIFY_DONE; + + if (cmd != DIE_NMI) + return ret; + + raw_spin_lock(&ghes_nmi_lock); + list_for_each_entry_rcu(ghes, &ghes_nmi, list) { + if (ghes_read_estatus(ghes, 1)) { + ghes_clear_estatus(ghes); + continue; + } + sev = ghes_severity(ghes->estatus->error_severity); + if (sev > sev_global) { + sev_global = sev; + ghes_global = ghes; + } + ret = NOTIFY_STOP; + } + + if (ret == NOTIFY_DONE) + goto out; + + if (sev_global >= GHES_SEV_PANIC) { + oops_begin(); + ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global); + /* reboot to log the error! */ + if (panic_timeout == 0) + panic_timeout = ghes_panic_timeout; + panic("Fatal hardware error!"); + } + + list_for_each_entry_rcu(ghes, &ghes_nmi, list) { + if (!(ghes->flags & GHES_TO_CLEAR)) + continue; + /* Do not print estatus because printk is not NMI safe */ + ghes_do_proc(ghes); + ghes_clear_estatus(ghes); + } + +out: + raw_spin_unlock(&ghes_nmi_lock); + return ret; +} + static struct notifier_block ghes_notifier_sci = { .notifier_call = ghes_notify_sci, }; +static struct notifier_block ghes_notifier_nmi = { + .notifier_call = ghes_notify_nmi, +}; + static int __devinit ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; @@ -306,18 +514,27 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) if (!generic->enabled) return -ENODEV; - if (generic->error_block_length < - sizeof(struct acpi_hest_generic_status)) { - pr_warning(FW_BUG GHES_PFX -"Invalid error block length: %u for generic hardware error source: %d\n", - generic->error_block_length, + switch (generic->notify.type) { + case ACPI_HEST_NOTIFY_POLLED: + case ACPI_HEST_NOTIFY_EXTERNAL: + case ACPI_HEST_NOTIFY_SCI: + case ACPI_HEST_NOTIFY_NMI: + break; + case ACPI_HEST_NOTIFY_LOCAL: + pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", generic->header.source_id); goto err; + default: + pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", + generic->notify.type, generic->header.source_id); + goto err; } - if (generic->records_to_preallocate == 0) { - pr_warning(FW_BUG GHES_PFX -"Invalid records to preallocate: %u for generic hardware error source: %d\n", - generic->records_to_preallocate, + + rc = -EIO; + if (generic->error_block_length < + sizeof(struct acpi_hest_generic_status)) { + pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", + generic->error_block_length, generic->header.source_id); goto err; } @@ -327,38 +544,43 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) ghes = NULL; goto err; } - if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) { + switch (generic->notify.type) { + case ACPI_HEST_NOTIFY_POLLED: + ghes->timer.function = ghes_poll_func; + ghes->timer.data = (unsigned long)ghes; + init_timer_deferrable(&ghes->timer); + ghes_add_timer(ghes); + break; + case ACPI_HEST_NOTIFY_EXTERNAL: + /* External interrupt vector is GSI */ + if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) { + pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", + generic->header.source_id); + goto err; + } + if (request_irq(ghes->irq, ghes_irq_func, + 0, "GHES IRQ", ghes)) { + pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", + generic->header.source_id); + goto err; + } + break; + case ACPI_HEST_NOTIFY_SCI: mutex_lock(&ghes_list_mutex); if (list_empty(&ghes_sci)) register_acpi_hed_notifier(&ghes_notifier_sci); list_add_rcu(&ghes->list, &ghes_sci); mutex_unlock(&ghes_list_mutex); - } else { - unsigned char *notify = NULL; - - switch (generic->notify.type) { - case ACPI_HEST_NOTIFY_POLLED: - notify = "POLL"; - break; - case ACPI_HEST_NOTIFY_EXTERNAL: - case ACPI_HEST_NOTIFY_LOCAL: - notify = "IRQ"; - break; - case ACPI_HEST_NOTIFY_NMI: - notify = "NMI"; - break; - } - if (notify) { - pr_warning(GHES_PFX -"Generic hardware error source: %d notified via %s is not supported!\n", - generic->header.source_id, notify); - } else { - pr_warning(FW_WARN GHES_PFX -"Unknown notification type: %u for generic hardware error source: %d\n", - generic->notify.type, generic->header.source_id); - } - rc = -ENODEV; - goto err; + break; + case ACPI_HEST_NOTIFY_NMI: + mutex_lock(&ghes_list_mutex); + if (list_empty(&ghes_nmi)) + register_die_notifier(&ghes_notifier_nmi); + list_add_rcu(&ghes->list, &ghes_nmi); + mutex_unlock(&ghes_list_mutex); + break; + default: + BUG(); } platform_set_drvdata(ghes_dev, ghes); @@ -379,7 +601,14 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) ghes = platform_get_drvdata(ghes_dev); generic = ghes->generic; + ghes->flags |= GHES_EXITING; switch (generic->notify.type) { + case ACPI_HEST_NOTIFY_POLLED: + del_timer_sync(&ghes->timer); + break; + case ACPI_HEST_NOTIFY_EXTERNAL: + free_irq(ghes->irq, ghes); + break; case ACPI_HEST_NOTIFY_SCI: mutex_lock(&ghes_list_mutex); list_del_rcu(&ghes->list); @@ -387,12 +616,23 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) unregister_acpi_hed_notifier(&ghes_notifier_sci); mutex_unlock(&ghes_list_mutex); break; + case ACPI_HEST_NOTIFY_NMI: + mutex_lock(&ghes_list_mutex); + list_del_rcu(&ghes->list); + if (list_empty(&ghes_nmi)) + unregister_die_notifier(&ghes_notifier_nmi); + mutex_unlock(&ghes_list_mutex); + /* + * To synchronize with NMI handler, ghes can only be + * freed after NMI handler finishes. + */ + synchronize_rcu(); + break; default: BUG(); break; } - synchronize_rcu(); ghes_fini(ghes); kfree(ghes); @@ -412,6 +652,8 @@ static struct platform_driver ghes_platform_driver = { static int __init ghes_init(void) { + int rc; + if (acpi_disabled) return -ENODEV; @@ -420,12 +662,25 @@ static int __init ghes_init(void) return -EINVAL; } - return platform_driver_register(&ghes_platform_driver); + rc = ghes_ioremap_init(); + if (rc) + goto err; + + rc = platform_driver_register(&ghes_platform_driver); + if (rc) + goto err_ioremap_exit; + + return 0; +err_ioremap_exit: + ghes_ioremap_exit(); +err: + return rc; } static void __exit ghes_exit(void) { platform_driver_unregister(&ghes_platform_driver); + ghes_ioremap_exit(); } module_init(ghes_init); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index daa7bc63f1d4..4ee58e72b730 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -195,24 +195,24 @@ static int __init setup_hest_disable(char *str) __setup("hest_disable", setup_hest_disable); -static int __init hest_init(void) +void __init acpi_hest_init(void) { acpi_status status; int rc = -ENODEV; unsigned int ghes_count = 0; if (acpi_disabled) - goto err; + return; if (hest_disable) { - pr_info(HEST_PFX "HEST tabling parsing is disabled.\n"); - goto err; + pr_info(HEST_PFX "Table parsing disabled.\n"); + return; } status = acpi_get_table(ACPI_SIG_HEST, 0, (struct acpi_table_header **)&hest_tab); if (status == AE_NOT_FOUND) { - pr_info(HEST_PFX "Table is not found!\n"); + pr_info(HEST_PFX "Table not found.\n"); goto err; } else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); @@ -226,15 +226,11 @@ static int __init hest_init(void) goto err; rc = hest_ghes_dev_register(ghes_count); - if (rc) - goto err; - - pr_info(HEST_PFX "HEST table parsing is initialized.\n"); + if (!rc) { + pr_info(HEST_PFX "Table parsing has been initialized.\n"); + return; + } - return 0; err: hest_disable = 1; - return rc; } - -subsys_initcall(hest_init); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 95649d373071..68bc227e7c4c 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -631,6 +631,17 @@ static int acpi_battery_update(struct acpi_battery *battery) return result; } +static void acpi_battery_refresh(struct acpi_battery *battery) +{ + if (!battery->bat.dev) + return; + + acpi_battery_get_info(battery); + /* The battery may have changed its reporting units. */ + sysfs_remove_battery(battery); + sysfs_add_battery(battery); +} + /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ @@ -868,6 +879,8 @@ static int acpi_battery_add_fs(struct acpi_device *device) struct proc_dir_entry *entry = NULL; int i; + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); if (!acpi_device_dir(device)) { acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_battery_dir); @@ -914,6 +927,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) if (!battery) return; old = battery->bat.dev; + if (event == ACPI_BATTERY_NOTIFY_INFO) + acpi_battery_refresh(battery); acpi_battery_update(battery); acpi_bus_generate_proc_event(device, event, acpi_battery_present(battery)); @@ -983,6 +998,7 @@ static int acpi_battery_resume(struct acpi_device *device) if (!device) return -EINVAL; battery = acpi_driver_data(device); + acpi_battery_refresh(battery); battery->update_time = 0; acpi_battery_update(battery); return 0; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d68bd61072bb..7ced61f39492 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -52,22 +52,6 @@ EXPORT_SYMBOL(acpi_root_dir); #define STRUCT_TO_INT(s) (*((int*)&s)) -static int set_power_nocheck(const struct dmi_system_id *id) -{ - printk(KERN_NOTICE PREFIX "%s detected - " - "disable power check in power transition\n", id->ident); - acpi_power_nocheck = 1; - return 0; -} -static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = { - { - set_power_nocheck, "HP Pavilion 05", { - DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), - DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"), - DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL}, - {}, -}; - #ifdef CONFIG_X86 static int set_copy_dsdt(const struct dmi_system_id *id) @@ -196,33 +180,24 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); Power Management -------------------------------------------------------------------------- */ -int acpi_bus_get_power(acpi_handle handle, int *state) +static int __acpi_bus_get_power(struct acpi_device *device, int *state) { int result = 0; acpi_status status = 0; - struct acpi_device *device = NULL; unsigned long long psc = 0; - - result = acpi_bus_get_device(handle, &device); - if (result) - return result; + if (!device || !state) + return -EINVAL; *state = ACPI_STATE_UNKNOWN; - if (!device->flags.power_manageable) { - /* TBD: Non-recursive algorithm for walking up hierarchy */ - if (device->parent) - *state = device->parent->power.state; - else - *state = ACPI_STATE_D0; - } else { + if (device->flags.power_manageable) { /* * Get the device's power state either directly (via _PSC) or * indirectly (via power resources). */ if (device->power.flags.power_resources) { - result = acpi_power_get_inferred_state(device); + result = acpi_power_get_inferred_state(device, state); if (result) return result; } else if (device->power.flags.explicit_get) { @@ -230,59 +205,33 @@ int acpi_bus_get_power(acpi_handle handle, int *state) NULL, &psc); if (ACPI_FAILURE(status)) return -ENODEV; - device->power.state = (int)psc; + *state = (int)psc; } - - *state = device->power.state; + } else { + /* TBD: Non-recursive algorithm for walking up hierarchy. */ + *state = device->parent ? + device->parent->power.state : ACPI_STATE_D0; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", - device->pnp.bus_id, device->power.state)); + device->pnp.bus_id, *state)); return 0; } -EXPORT_SYMBOL(acpi_bus_get_power); -int acpi_bus_set_power(acpi_handle handle, int state) +static int __acpi_bus_set_power(struct acpi_device *device, int state) { int result = 0; acpi_status status = AE_OK; - struct acpi_device *device = NULL; char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; - - result = acpi_bus_get_device(handle, &device); - if (result) - return result; - - if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) + if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) return -EINVAL; /* Make sure this is a valid target state */ - if (!device->flags.power_manageable) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", - kobject_name(&device->dev.kobj))); - return -ENODEV; - } - /* - * Get device's current power state - */ - if (!acpi_power_nocheck) { - /* - * Maybe the incorrect power state is returned on the bogus - * bios, which is different with the real power state. - * For example: the bios returns D0 state and the real power - * state is D3. OS expects to set the device to D0 state. In - * such case if OS uses the power state returned by the BIOS, - * the device can't be transisted to the correct power state. - * So if the acpi_power_nocheck is set, it is unnecessary to - * get the power state by calling acpi_bus_get_power. - */ - acpi_bus_get_power(device->handle, &device->power.state); - } - if ((state == device->power.state) && !device->flags.force_power_state) { + if (state == device->power.state) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", state)); return 0; @@ -351,8 +300,75 @@ int acpi_bus_set_power(acpi_handle handle, int state) return result; } + +int acpi_bus_set_power(acpi_handle handle, int state) +{ + struct acpi_device *device; + int result; + + result = acpi_bus_get_device(handle, &device); + if (result) + return result; + + if (!device->flags.power_manageable) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Device [%s] is not power manageable\n", + dev_name(&device->dev))); + return -ENODEV; + } + + return __acpi_bus_set_power(device, state); +} EXPORT_SYMBOL(acpi_bus_set_power); + +int acpi_bus_init_power(struct acpi_device *device) +{ + int state; + int result; + + if (!device) + return -EINVAL; + + device->power.state = ACPI_STATE_UNKNOWN; + + result = __acpi_bus_get_power(device, &state); + if (result) + return result; + + if (device->power.flags.power_resources) + result = acpi_power_on_resources(device, state); + + if (!result) + device->power.state = state; + + return result; +} + + +int acpi_bus_update_power(acpi_handle handle, int *state_p) +{ + struct acpi_device *device; + int state; + int result; + + result = acpi_bus_get_device(handle, &device); + if (result) + return result; + + result = __acpi_bus_get_power(device, &state); + if (result) + return result; + + result = __acpi_bus_set_power(device, state); + if (!result && state_p) + *state_p = state; + + return result; +} +EXPORT_SYMBOL_GPL(acpi_bus_update_power); + + bool acpi_bus_power_manageable(acpi_handle handle) { struct acpi_device *device; @@ -1023,15 +1039,8 @@ static int __init acpi_init(void) if (acpi_disabled) return result; - /* - * If the laptop falls into the DMI check table, the power state check - * will be disabled in the course of device power transition. - */ - dmi_check_system(power_nocheck_dmi_table); - acpi_scan_init(); acpi_ec_init(); - acpi_power_init(); acpi_debugfs_init(); acpi_sleep_proc_init(); acpi_wakeup_device_init(); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 71ef9cd0735f..76bbb78a5ad9 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -279,6 +279,9 @@ static int acpi_lid_send_state(struct acpi_device *device) input_report_switch(button->input, SW_LID, !state); input_sync(button->input); + if (state) + pm_wakeup_event(&device->dev, 0); + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); if (ret == NOTIFY_DONE) ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, @@ -314,6 +317,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) input_sync(input); input_report_key(input, keycode, 0); input_sync(input); + + pm_wakeup_event(&device->dev, 0); } acpi_bus_generate_proc_event(device, event, ++button->pushed); @@ -426,7 +431,7 @@ static int acpi_button_add(struct acpi_device *device) acpi_enable_gpe(device->wakeup.gpe_device, device->wakeup.gpe_number); device->wakeup.run_wake_count++; - device->wakeup.state.enabled = 1; + device_set_wakeup_enable(&device->dev, true); } printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); @@ -449,7 +454,7 @@ static int acpi_button_remove(struct acpi_device *device, int type) acpi_disable_gpe(device->wakeup.gpe_device, device->wakeup.gpe_number); device->wakeup.run_wake_count--; - device->wakeup.state.enabled = 0; + device_set_wakeup_enable(&device->dev, false); } acpi_button_remove_fs(device); diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 81514a4918cc..1864ad3cf895 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -725,7 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) complete_dock(ds); dock_event(ds, event, DOCK_EVENT); dock_lock(ds, 1); - acpi_update_gpes(); + acpi_update_all_gpes(); break; } if (dock_present(ds) || dock_in_progress(ds)) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 302b31ed31f1..fa848c4116a8 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -606,7 +606,8 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) return 0; } -static u32 acpi_ec_gpe_handler(void *data) +static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, + u32 gpe_number, void *data) { struct acpi_ec *ec = data; @@ -618,7 +619,7 @@ static u32 acpi_ec_gpe_handler(void *data) wake_up(&ec->wait); ec_check_sci(ec, acpi_ec_read_status(ec)); } - return ACPI_INTERRUPT_HANDLED; + return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; } /* -------------------------------------------------------------------------- diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 60049080c869..467479f07c1f 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -86,7 +86,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long if (!device) return -EINVAL; - result = acpi_bus_get_power(device->handle, &acpi_state); + result = acpi_bus_update_power(device->handle, &acpi_state); if (result) return result; @@ -123,7 +123,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = { static int acpi_fan_add(struct acpi_device *device) { int result = 0; - int state = 0; struct thermal_cooling_device *cdev; if (!device) @@ -132,16 +131,12 @@ static int acpi_fan_add(struct acpi_device *device) strcpy(acpi_device_name(device), "Fan"); strcpy(acpi_device_class(device), ACPI_FAN_CLASS); - result = acpi_bus_get_power(device->handle, &state); + result = acpi_bus_update_power(device->handle, NULL); if (result) { - printk(KERN_ERR PREFIX "Reading power state\n"); + printk(KERN_ERR PREFIX "Setting initial power state\n"); goto end; } - device->flags.force_power_state = 1; - acpi_bus_set_power(device->handle, state); - device->flags.force_power_state = 0; - cdev = thermal_cooling_device_register("Fan", device, &fan_cooling_ops); if (IS_ERR(cdev)) { @@ -200,22 +195,14 @@ static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state) static int acpi_fan_resume(struct acpi_device *device) { - int result = 0; - int power_state = 0; + int result; if (!device) return -EINVAL; - result = acpi_bus_get_power(device->handle, &power_state); - if (result) { - printk(KERN_ERR PREFIX - "Error reading fan power state\n"); - return result; - } - - device->flags.force_power_state = 1; - acpi_bus_set_power(device->handle, power_state); - device->flags.force_power_state = 0; + result = acpi_bus_update_power(device->handle, NULL); + if (result) + printk(KERN_ERR PREFIX "Error updating fan power state\n"); return result; } diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 78b0164c35b2..7c47ed55e528 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -167,11 +167,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) "firmware_node"); ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, "physical_node"); - if (acpi_dev->wakeup.flags.valid) { + if (acpi_dev->wakeup.flags.valid) device_set_wakeup_capable(dev, true); - device_set_wakeup_enable(dev, - acpi_dev->wakeup.state.enabled); - } } return 0; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a212bfeddf8c..b1cc81a0431b 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -41,9 +41,10 @@ static inline int acpi_debugfs_init(void) { return 0; } int acpi_power_init(void); int acpi_device_sleep_wake(struct acpi_device *dev, int enable, int sleep_state, int dev_state); -int acpi_power_get_inferred_state(struct acpi_device *device); +int acpi_power_get_inferred_state(struct acpi_device *device, int *state); +int acpi_power_on_resources(struct acpi_device *device, int state); int acpi_power_transition(struct acpi_device *device, int state); -extern int acpi_power_nocheck; +int acpi_bus_init_power(struct acpi_device *device); int acpi_wakeup_device_init(void); void acpi_early_processor_set_pdc(void); @@ -82,8 +83,16 @@ extern int acpi_sleep_init(void); #ifdef CONFIG_ACPI_SLEEP int acpi_sleep_proc_init(void); +int suspend_nvs_alloc(void); +void suspend_nvs_free(void); +int suspend_nvs_save(void); +void suspend_nvs_restore(void); #else static inline int acpi_sleep_proc_init(void) { return 0; } +static inline int suspend_nvs_alloc(void) { return 0; } +static inline void suspend_nvs_free(void) {} +static inline int suspend_nvs_save(void) { return 0; } +static inline void suspend_nvs_restore(void) {} #endif #endif /* _ACPI_INTERNAL_H_ */ diff --git a/kernel/power/nvs.c b/drivers/acpi/nvs.c similarity index 86% rename from kernel/power/nvs.c rename to drivers/acpi/nvs.c index 1836db60bbb6..54b6ab8040a6 100644 --- a/kernel/power/nvs.c +++ b/drivers/acpi/nvs.c @@ -1,7 +1,7 @@ /* - * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory + * nvs.c - Routines for saving and restoring ACPI NVS memory region * - * Copyright (C) 2008,2009 Rafael J. Wysocki , Novell Inc. + * Copyright (C) 2008-2011 Rafael J. Wysocki , Novell Inc. * * This file is released under the GPLv2. */ @@ -11,7 +11,8 @@ #include #include #include -#include +#include +#include /* * Platforms, like ACPI, may want us to save some memory used by them during @@ -79,7 +80,7 @@ void suspend_nvs_free(void) free_page((unsigned long)entry->data); entry->data = NULL; if (entry->kaddr) { - iounmap(entry->kaddr); + acpi_os_unmap_memory(entry->kaddr, entry->size); entry->kaddr = NULL; } } @@ -105,7 +106,7 @@ int suspend_nvs_alloc(void) /** * suspend_nvs_save - save NVS memory regions */ -void suspend_nvs_save(void) +int suspend_nvs_save(void) { struct nvs_page *entry; @@ -113,9 +114,16 @@ void suspend_nvs_save(void) list_for_each_entry(entry, &nvs_list, node) if (entry->data) { - entry->kaddr = ioremap(entry->phys_start, entry->size); + entry->kaddr = acpi_os_map_memory(entry->phys_start, + entry->size); + if (!entry->kaddr) { + suspend_nvs_free(); + return -ENOMEM; + } memcpy(entry->data, entry->kaddr, entry->size); } + + return 0; } /** diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 055d7b701fff..e2dd6de5d50c 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -320,7 +320,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) pg_off = round_down(phys, PAGE_SIZE); pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; - virt = ioremap(pg_off, pg_sz); + virt = ioremap_cache(pg_off, pg_sz); if (!virt) { kfree(map); return NULL; @@ -642,7 +642,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) virt_addr = acpi_map_vaddr_lookup(phys_addr, size); rcu_read_unlock(); if (!virt_addr) { - virt_addr = ioremap(phys_addr, size); + virt_addr = ioremap_cache(phys_addr, size); unmap = 1; } if (!value) @@ -678,7 +678,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) virt_addr = acpi_map_vaddr_lookup(phys_addr, size); rcu_read_unlock(); if (!virt_addr) { - virt_addr = ioremap(phys_addr, size); + virt_addr = ioremap_cache(phys_addr, size); unmap = 1; } @@ -1233,8 +1233,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); int acpi_check_resource_conflict(const struct resource *res) { struct acpi_res_list *res_list_elem; - int ioport; - int clash = 0; + int ioport = 0, clash = 0; if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) return 0; @@ -1264,9 +1263,13 @@ int acpi_check_resource_conflict(const struct resource *res) if (clash) { if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { printk(KERN_WARNING "ACPI: resource %s %pR" - " conflicts with ACPI region %s %pR\n", + " conflicts with ACPI region %s " + "[%s 0x%zx-0x%zx]\n", res->name, res, res_list_elem->name, - res_list_elem); + (res_list_elem->resource_type == + ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem", + (size_t) res_list_elem->start, + (size_t) res_list_elem->end); if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) printk(KERN_NOTICE "ACPI: This conflict may" " cause random problems and system" diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 96668ad09622..d9766797cd98 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -36,6 +36,7 @@ #include #include #include +#include #define PREFIX "ACPI: " @@ -47,6 +48,11 @@ static int acpi_pci_root_add(struct acpi_device *device); static int acpi_pci_root_remove(struct acpi_device *device, int type); static int acpi_pci_root_start(struct acpi_device *device); +#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \ + | OSC_ACTIVE_STATE_PWR_SUPPORT \ + | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \ + | OSC_MSI_SUPPORT) + static const struct acpi_device_id root_device_ids[] = { {"PNP0A03", 0}, {"", 0}, @@ -566,6 +572,33 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) if (flags != base_flags) acpi_pci_osc_support(root, flags); + if (!pcie_ports_disabled + && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { + flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL + | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL + | OSC_PCI_EXPRESS_PME_CONTROL; + + if (pci_aer_available()) { + if (aer_acpi_firmware_first()) + dev_dbg(root->bus->bridge, + "PCIe errors handled by BIOS.\n"); + else + flags |= OSC_PCI_EXPRESS_AER_CONTROL; + } + + dev_info(root->bus->bridge, + "Requesting ACPI _OSC control (0x%02x)\n", flags); + + status = acpi_pci_osc_control_set(device->handle, &flags, + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); + if (ACPI_SUCCESS(status)) + dev_info(root->bus->bridge, + "ACPI _OSC control (0x%02x) granted\n", flags); + else + dev_dbg(root->bus->bridge, + "ACPI _OSC request failed (code %d)\n", status); + } + pci_acpi_add_bus_pm_notifier(device, root->bus); if (device->wakeup.flags.run_wake) device_set_run_wake(root->bus->bridge, true); @@ -603,6 +636,8 @@ static int __init acpi_pci_root_init(void) if (acpi_pci_disabled) return 0; + acpi_hest_init(); + pci_acpi_crs_quirks(); if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) return -ENODEV; diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 4c9c2fb5d98f..9ac2a9fa90ff 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -56,9 +56,6 @@ ACPI_MODULE_NAME("power"); #define ACPI_POWER_RESOURCE_STATE_ON 0x01 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF -int acpi_power_nocheck; -module_param_named(power_nocheck, acpi_power_nocheck, bool, 000); - static int acpi_power_add(struct acpi_device *device); static int acpi_power_remove(struct acpi_device *device, int type); static int acpi_power_resume(struct acpi_device *device); @@ -148,9 +145,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state) static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) { - int result = 0, state1; - u32 i = 0; - + int cur_state; + int i = 0; if (!list || !state) return -EINVAL; @@ -158,25 +154,33 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) /* The state of the list is 'on' IFF all resources are 'on'. */ for (i = 0; i < list->count; i++) { - /* - * The state of the power resource can be obtained by - * using the ACPI handle. In such case it is unnecessary to - * get the Power resource first and then get its state again. - */ - result = acpi_power_get_state(list->handles[i], &state1); + struct acpi_power_resource *resource; + acpi_handle handle = list->handles[i]; + int result; + + result = acpi_power_get_context(handle, &resource); if (result) return result; - *state = state1; + mutex_lock(&resource->resource_lock); - if (*state != ACPI_POWER_RESOURCE_STATE_ON) + result = acpi_power_get_state(handle, &cur_state); + + mutex_unlock(&resource->resource_lock); + + if (result) + return result; + + if (cur_state != ACPI_POWER_RESOURCE_STATE_ON) break; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", - *state ? "on" : "off")); + cur_state ? "on" : "off")); - return result; + *state = cur_state; + + return 0; } static int __acpi_power_on(struct acpi_power_resource *resource) @@ -222,7 +226,7 @@ static int acpi_power_on(acpi_handle handle) return result; } -static int acpi_power_off_device(acpi_handle handle) +static int acpi_power_off(acpi_handle handle) { int result = 0; acpi_status status = AE_OK; @@ -266,6 +270,35 @@ static int acpi_power_off_device(acpi_handle handle) return result; } +static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res) +{ + int i; + + for (i = num_res - 1; i >= 0 ; i--) + acpi_power_off(list->handles[i]); +} + +static void acpi_power_off_list(struct acpi_handle_list *list) +{ + __acpi_power_off_list(list, list->count); +} + +static int acpi_power_on_list(struct acpi_handle_list *list) +{ + int result = 0; + int i; + + for (i = 0; i < list->count; i++) { + result = acpi_power_on(list->handles[i]); + if (result) { + __acpi_power_off_list(list, i); + break; + } + } + + return result; +} + /** * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in * ACPI 3.0) _PSW (Power State Wake) @@ -404,8 +437,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) /* Close power resource */ for (i = 0; i < dev->wakeup.resources.count; i++) { - int ret = acpi_power_off_device( - dev->wakeup.resources.handles[i]); + int ret = acpi_power_off(dev->wakeup.resources.handles[i]); if (ret) { printk(KERN_ERR PREFIX "Transition power state\n"); dev->wakeup.flags.valid = 0; @@ -423,19 +455,16 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) Device Power Management -------------------------------------------------------------------------- */ -int acpi_power_get_inferred_state(struct acpi_device *device) +int acpi_power_get_inferred_state(struct acpi_device *device, int *state) { int result = 0; struct acpi_handle_list *list = NULL; int list_state = 0; int i = 0; - - if (!device) + if (!device || !state) return -EINVAL; - device->power.state = ACPI_STATE_UNKNOWN; - /* * We know a device's inferred power state when all the resources * required for a given D-state are 'on'. @@ -450,22 +479,26 @@ int acpi_power_get_inferred_state(struct acpi_device *device) return result; if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { - device->power.state = i; + *state = i; return 0; } } - device->power.state = ACPI_STATE_D3; - + *state = ACPI_STATE_D3; return 0; } +int acpi_power_on_resources(struct acpi_device *device, int state) +{ + if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3) + return -EINVAL; + + return acpi_power_on_list(&device->power.states[state].resources); +} + int acpi_power_transition(struct acpi_device *device, int state) { - int result = 0; - struct acpi_handle_list *cl = NULL; /* Current Resources */ - struct acpi_handle_list *tl = NULL; /* Target Resources */ - int i = 0; + int result; if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) return -EINVAL; @@ -477,37 +510,20 @@ int acpi_power_transition(struct acpi_device *device, int state) || (device->power.state > ACPI_STATE_D3)) return -ENODEV; - cl = &device->power.states[device->power.state].resources; - tl = &device->power.states[state].resources; - /* TBD: Resources must be ordered. */ /* * First we reference all power resources required in the target list - * (e.g. so the device doesn't lose power while transitioning). + * (e.g. so the device doesn't lose power while transitioning). Then, + * we dereference all power resources used in the current list. */ - for (i = 0; i < tl->count; i++) { - result = acpi_power_on(tl->handles[i]); - if (result) - goto end; - } + result = acpi_power_on_list(&device->power.states[state].resources); + if (!result) + acpi_power_off_list( + &device->power.states[device->power.state].resources); - /* - * Then we dereference all power resources used in the current list. - */ - for (i = 0; i < cl->count; i++) { - result = acpi_power_off_device(cl->handles[i]); - if (result) - goto end; - } - - end: - if (result) - device->power.state = ACPI_STATE_UNKNOWN; - else { - /* We shouldn't change the state till all above operations succeed */ - device->power.state = state; - } + /* We shouldn't change the state unless the above operations succeed. */ + device->power.state = result ? ACPI_STATE_UNKNOWN : state; return result; } diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index afad67769db6..f5f986991b52 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -311,7 +311,9 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) dev->pnp.bus_id, (u32) dev->wakeup.sleep_state, dev->wakeup.flags.run_wake ? '*' : ' ', - dev->wakeup.state.enabled ? "enabled" : "disabled"); + (device_may_wakeup(&dev->dev) + || (ldev && device_may_wakeup(ldev))) ? + "enabled" : "disabled"); if (ldev) seq_printf(seq, "%s:%s", ldev->bus ? ldev->bus->name : "no-bus", @@ -328,8 +330,10 @@ static void physical_device_enable_wakeup(struct acpi_device *adev) { struct device *dev = acpi_get_physical_device(adev->handle); - if (dev && device_can_wakeup(dev)) - device_set_wakeup_enable(dev, adev->wakeup.state.enabled); + if (dev && device_can_wakeup(dev)) { + bool enable = !device_may_wakeup(dev); + device_set_wakeup_enable(dev, enable); + } } static ssize_t @@ -341,7 +345,6 @@ acpi_system_write_wakeup_device(struct file *file, char strbuf[5]; char str[5] = ""; unsigned int len = count; - struct acpi_device *found_dev = NULL; if (len > 4) len = 4; @@ -361,33 +364,13 @@ acpi_system_write_wakeup_device(struct file *file, continue; if (!strncmp(dev->pnp.bus_id, str, 4)) { - dev->wakeup.state.enabled = - dev->wakeup.state.enabled ? 0 : 1; - found_dev = dev; - break; - } - } - if (found_dev) { - physical_device_enable_wakeup(found_dev); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = container_of(node, - struct - acpi_device, - wakeup_list); - - if ((dev != found_dev) && - (dev->wakeup.gpe_number == - found_dev->wakeup.gpe_number) - && (dev->wakeup.gpe_device == - found_dev->wakeup.gpe_device)) { - printk(KERN_WARNING - "ACPI: '%s' and '%s' have the same GPE, " - "can't disable/enable one separately\n", - dev->pnp.bus_id, found_dev->pnp.bus_id); - dev->wakeup.state.enabled = - found_dev->wakeup.state.enabled; + if (device_can_wakeup(&dev->dev)) { + bool enable = !device_may_wakeup(&dev->dev); + device_set_wakeup_enable(&dev->dev, enable); + } else { physical_device_enable_wakeup(dev); } + break; } } mutex_unlock(&acpi_device_lock); diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index bec561c14beb..3c1a2fec8cda 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -23,7 +23,7 @@ static int set_no_mwait(const struct dmi_system_id *id) { printk(KERN_NOTICE PREFIX "%s detected - " "disabling mwait for CPU C-states\n", id->ident); - idle_nomwait = 1; + boot_option_idle_override = IDLE_NOMWAIT; return 0; } @@ -283,7 +283,7 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) { acpi_status status = AE_OK; - if (idle_nomwait) { + if (boot_option_idle_override == IDLE_NOMWAIT) { /* * If mwait is disabled for CPU C-states, the C2C3_FFH access * mode will be disabled in the parameter of _PDC object. diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 85e48047d7b0..360a74e6add0 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -40,10 +40,6 @@ #include #include #include -#ifdef CONFIG_ACPI_PROCFS -#include -#include -#endif #include #include #include @@ -246,53 +242,6 @@ static int acpi_processor_errata(struct acpi_processor *pr) return result; } -#ifdef CONFIG_ACPI_PROCFS -static struct proc_dir_entry *acpi_processor_dir = NULL; - -static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) -{ - struct proc_dir_entry *entry = NULL; - - - if (!acpi_device_dir(device)) { - acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), - acpi_processor_dir); - if (!acpi_device_dir(device)) - return -ENODEV; - } - - /* 'throttling' [R/W] */ - entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, - S_IFREG | S_IRUGO | S_IWUSR, - acpi_device_dir(device), - &acpi_processor_throttling_fops, - acpi_driver_data(device)); - if (!entry) - return -EIO; - return 0; -} -static int acpi_processor_remove_fs(struct acpi_device *device) -{ - - if (acpi_device_dir(device)) { - remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, - acpi_device_dir(device)); - remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); - acpi_device_dir(device) = NULL; - } - - return 0; -} -#else -static inline int acpi_processor_add_fs(struct acpi_device *device) -{ - return 0; -} -static inline int acpi_processor_remove_fs(struct acpi_device *device) -{ - return 0; -} -#endif /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -478,8 +427,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, if (action == CPU_ONLINE && pr) { acpi_processor_ppc_has_changed(pr, 0); acpi_processor_cst_has_changed(pr); + acpi_processor_reevaluate_tstate(pr, action); acpi_processor_tstate_has_changed(pr); } + if (action == CPU_DEAD && pr) { + /* invalidate the flag.throttling after one CPU is offline */ + acpi_processor_reevaluate_tstate(pr, action); + } return NOTIFY_OK; } @@ -537,14 +491,10 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) per_cpu(processors, pr->id) = pr; - result = acpi_processor_add_fs(device); - if (result) - goto err_free_cpumask; - sysdev = get_cpu_sysdev(pr->id); if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { result = -EFAULT; - goto err_remove_fs; + goto err_free_cpumask; } #ifdef CONFIG_CPU_FREQ @@ -590,8 +540,6 @@ err_thermal_unregister: thermal_cooling_device_unregister(pr->cdev); err_power_exit: acpi_processor_power_exit(pr, device); -err_remove_fs: - acpi_processor_remove_fs(device); err_free_cpumask: free_cpumask_var(pr->throttling.shared_cpu_map); @@ -620,8 +568,6 @@ static int acpi_processor_remove(struct acpi_device *device, int type) sysfs_remove_link(&device->dev.kobj, "sysdev"); - acpi_processor_remove_fs(device); - if (pr->cdev) { sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); sysfs_remove_link(&pr->cdev->device.kobj, "device"); @@ -854,12 +800,6 @@ static int __init acpi_processor_init(void) memset(&errata, 0, sizeof(errata)); -#ifdef CONFIG_ACPI_PROCFS - acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); - if (!acpi_processor_dir) - return -ENOMEM; -#endif - if (!cpuidle_register_driver(&acpi_idle_driver)) { printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", acpi_idle_driver.name); @@ -885,10 +825,6 @@ static int __init acpi_processor_init(void) out_cpuidle: cpuidle_unregister_driver(&acpi_idle_driver); -#ifdef CONFIG_ACPI_PROCFS - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); -#endif - return result; } @@ -907,10 +843,6 @@ static void __exit acpi_processor_exit(void) cpuidle_unregister_driver(&acpi_idle_driver); -#ifdef CONFIG_ACPI_PROCFS - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); -#endif - return; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index a765b823aa9e..d615b7d69bca 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -79,6 +79,13 @@ module_param(bm_check_disable, uint, 0000); static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); +static int disabled_by_idle_boot_param(void) +{ + return boot_option_idle_override == IDLE_POLL || + boot_option_idle_override == IDLE_FORCE_MWAIT || + boot_option_idle_override == IDLE_HALT; +} + /* * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. * For now disable this. Probably a bug somewhere else. @@ -455,7 +462,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) continue; } if (cx.type == ACPI_STATE_C1 && - (idle_halt || idle_nomwait)) { + (boot_option_idle_override == IDLE_NOMWAIT)) { /* * In most cases the C1 space_id obtained from * _CST object is FIXED_HARDWARE access mode. @@ -1016,7 +1023,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) state->flags = 0; switch (cx->type) { case ACPI_STATE_C1: - state->flags |= CPUIDLE_FLAG_SHALLOW; if (cx->entry_method == ACPI_CSTATE_FFH) state->flags |= CPUIDLE_FLAG_TIME_VALID; @@ -1025,16 +1031,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) break; case ACPI_STATE_C2: - state->flags |= CPUIDLE_FLAG_BALANCED; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_simple; dev->safe_state = state; break; case ACPI_STATE_C3: - state->flags |= CPUIDLE_FLAG_DEEP; state->flags |= CPUIDLE_FLAG_TIME_VALID; - state->flags |= CPUIDLE_FLAG_CHECK_BM; state->enter = pr->flags.bm_check ? acpi_idle_enter_bm : acpi_idle_enter_simple; @@ -1058,7 +1061,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) { int ret = 0; - if (boot_option_idle_override) + if (disabled_by_idle_boot_param()) return 0; if (!pr) @@ -1089,19 +1092,10 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, acpi_status status = 0; static int first_run; - if (boot_option_idle_override) + if (disabled_by_idle_boot_param()) return 0; if (!first_run) { - if (idle_halt) { - /* - * When the boot option of "idle=halt" is added, halt - * is used for CPU IDLE. - * In such case C2/C3 is meaningless. So the max_cstate - * is set to one. - */ - max_cstate = 1; - } dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) @@ -1142,7 +1136,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device) { - if (boot_option_idle_override) + if (disabled_by_idle_boot_param()) return 0; cpuidle_unregister_device(&pr->power.dev); diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ff3632717c51..fa84e9744330 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -32,10 +32,6 @@ #include #include #include -#ifdef CONFIG_ACPI_PROCFS -#include -#include -#endif #include #include @@ -369,6 +365,58 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) return acpi_processor_set_throttling(pr, target_state, false); } +/* + * This function is used to reevaluate whether the T-state is valid + * after one CPU is onlined/offlined. + * It is noted that it won't reevaluate the following properties for + * the T-state. + * 1. Control method. + * 2. the number of supported T-state + * 3. TSD domain + */ +void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, + unsigned long action) +{ + int result = 0; + + if (action == CPU_DEAD) { + /* When one CPU is offline, the T-state throttling + * will be invalidated. + */ + pr->flags.throttling = 0; + return; + } + /* the following is to recheck whether the T-state is valid for + * the online CPU + */ + if (!pr->throttling.state_count) { + /* If the number of T-state is invalid, it is + * invalidated. + */ + pr->flags.throttling = 0; + return; + } + pr->flags.throttling = 1; + + /* Disable throttling (if enabled). We'll let subsequent + * policy (e.g.thermal) decide to lower performance if it + * so chooses, but for now we'll crank up the speed. + */ + + result = acpi_processor_get_throttling(pr); + if (result) + goto end; + + if (pr->throttling.state) { + result = acpi_processor_set_throttling(pr, 0, false); + if (result) + goto end; + } + +end: + if (result) + pr->flags.throttling = 0; +} /* * _PTC - Processor Throttling Control (and status) register location */ @@ -876,7 +924,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) */ cpumask_copy(saved_mask, ¤t->cpus_allowed); /* FIXME: use work_on_cpu() */ - set_cpus_allowed_ptr(current, cpumask_of(pr->id)); + if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { + /* Can't migrate to the target pr->id CPU. Exit */ + free_cpumask_var(saved_mask); + return -ENODEV; + } ret = pr->throttling.acpi_processor_get_throttling(pr); /* restore the previous state */ set_cpus_allowed_ptr(current, saved_mask); @@ -1051,6 +1103,14 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, return -ENOMEM; } + if (cpu_is_offline(pr->id)) { + /* + * the cpu pointed by pr->id is offline. Unnecessary to change + * the throttling state any more. + */ + return -ENODEV; + } + cpumask_copy(saved_mask, ¤t->cpus_allowed); t_state.target_state = state; p_throttling = &(pr->throttling); @@ -1074,7 +1134,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { /* FIXME: use work_on_cpu() */ - set_cpus_allowed_ptr(current, cpumask_of(pr->id)); + if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { + /* Can't migrate to the pr->id CPU. Exit */ + ret = -ENODEV; + goto exit; + } ret = p_throttling->acpi_processor_set_throttling(pr, t_state.target_state, force); } else { @@ -1106,7 +1170,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, } t_state.cpu = i; /* FIXME: use work_on_cpu() */ - set_cpus_allowed_ptr(current, cpumask_of(i)); + if (set_cpus_allowed_ptr(current, cpumask_of(i))) + continue; ret = match_pr->throttling. acpi_processor_set_throttling( match_pr, t_state.target_state, force); @@ -1126,6 +1191,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, /* restore the previous state */ /* FIXME: use work_on_cpu() */ set_cpus_allowed_ptr(current, saved_mask); +exit: free_cpumask_var(online_throttling_cpus); free_cpumask_var(saved_mask); return ret; @@ -1216,113 +1282,3 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) return result; } -#ifdef CONFIG_ACPI_PROCFS -/* proc interface */ -static int acpi_processor_throttling_seq_show(struct seq_file *seq, - void *offset) -{ - struct acpi_processor *pr = seq->private; - int i = 0; - int result = 0; - - if (!pr) - goto end; - - if (!(pr->throttling.state_count > 0)) { - seq_puts(seq, "\n"); - goto end; - } - - result = acpi_processor_get_throttling(pr); - - if (result) { - seq_puts(seq, - "Could not determine current throttling state.\n"); - goto end; - } - - seq_printf(seq, "state count: %d\n" - "active state: T%d\n" - "state available: T%d to T%d\n", - pr->throttling.state_count, pr->throttling.state, - pr->throttling_platform_limit, - pr->throttling.state_count - 1); - - seq_puts(seq, "states:\n"); - if (pr->throttling.acpi_processor_get_throttling == - acpi_processor_get_throttling_fadt) { - for (i = 0; i < pr->throttling.state_count; i++) - seq_printf(seq, " %cT%d: %02d%%\n", - (i == pr->throttling.state ? '*' : ' '), i, - (pr->throttling.states[i].performance ? pr-> - throttling.states[i].performance / 10 : 0)); - } else { - for (i = 0; i < pr->throttling.state_count; i++) - seq_printf(seq, " %cT%d: %02d%%\n", - (i == pr->throttling.state ? '*' : ' '), i, - (int)pr->throttling.states_tss[i]. - freqpercentage); - } - - end: - return 0; -} - -static int acpi_processor_throttling_open_fs(struct inode *inode, - struct file *file) -{ - return single_open(file, acpi_processor_throttling_seq_show, - PDE(inode)->data); -} - -static ssize_t acpi_processor_write_throttling(struct file *file, - const char __user * buffer, - size_t count, loff_t * data) -{ - int result = 0; - struct seq_file *m = file->private_data; - struct acpi_processor *pr = m->private; - char state_string[5] = ""; - char *charp = NULL; - size_t state_val = 0; - char tmpbuf[5] = ""; - - if (!pr || (count > sizeof(state_string) - 1)) - return -EINVAL; - - if (copy_from_user(state_string, buffer, count)) - return -EFAULT; - - state_string[count] = '\0'; - if ((count > 0) && (state_string[count-1] == '\n')) - state_string[count-1] = '\0'; - - charp = state_string; - if ((state_string[0] == 't') || (state_string[0] == 'T')) - charp++; - - state_val = simple_strtoul(charp, NULL, 0); - if (state_val >= pr->throttling.state_count) - return -EINVAL; - - snprintf(tmpbuf, 5, "%zu", state_val); - - if (strcmp(tmpbuf, charp) != 0) - return -EINVAL; - - result = acpi_processor_set_throttling(pr, state_val, false); - if (result) - return result; - - return count; -} - -const struct file_operations acpi_processor_throttling_fops = { - .owner = THIS_MODULE, - .open = acpi_processor_throttling_open_fs, - .read = seq_read, - .write = acpi_processor_write_throttling, - .llseek = seq_lseek, - .release = single_release, -}; -#endif diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index e5dbedb16bbf..51ae3794ec7f 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -484,6 +484,8 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir, const struct file_operations *state_fops, const struct file_operations *alarm_fops, void *data) { + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); if (!*dir) { *dir = proc_mkdir(dir_name, parent_dir); if (!*dir) { diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 29ef505c487b..b99e62494607 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -778,7 +778,7 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, wakeup->resources.handles[i] = element->reference.handle; } - acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number); + acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number); out: kfree(buffer.pointer); @@ -803,7 +803,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) { device->wakeup.flags.run_wake = 1; - device->wakeup.flags.always_enabled = 1; + device_set_wakeup_capable(&device->dev, true); return; } @@ -815,16 +815,22 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) !!(event_status & ACPI_EVENT_FLAG_HANDLE); } -static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) +static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) { + acpi_handle temp; acpi_status status = 0; int psw_error; + /* Presence of _PRW indicates wake capable */ + status = acpi_get_handle(device->handle, "_PRW", &temp); + if (ACPI_FAILURE(status)) + return; + status = acpi_bus_extract_wakeup_device_power_package(device->handle, &device->wakeup); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); - goto end; + return; } device->wakeup.flags.valid = 1; @@ -840,13 +846,10 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) if (psw_error) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in _DSW or _PSW evaluation\n")); - -end: - if (ACPI_FAILURE(status)) - device->flags.wake_capable = 0; - return 0; } +static void acpi_bus_add_power_resource(acpi_handle handle); + static int acpi_bus_get_power_flags(struct acpi_device *device) { acpi_status status = 0; @@ -875,8 +878,12 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) acpi_evaluate_reference(device->handle, object_name, NULL, &ps->resources); if (ps->resources.count) { + int j; + device->power.flags.power_resources = 1; ps->flags.valid = 1; + for (j = 0; j < ps->resources.count; j++) + acpi_bus_add_power_resource(ps->resources.handles[j]); } /* Evaluate "_PSx" to see if we can do explicit sets */ @@ -901,10 +908,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) device->power.states[ACPI_STATE_D3].flags.valid = 1; device->power.states[ACPI_STATE_D3].power = 0; - /* TBD: System wake support and resource requirements. */ - - device->power.state = ACPI_STATE_UNKNOWN; - acpi_bus_get_power(device->handle, &(device->power.state)); + acpi_bus_init_power(device); return 0; } @@ -947,11 +951,6 @@ static int acpi_bus_get_flags(struct acpi_device *device) if (ACPI_SUCCESS(status)) device->flags.power_manageable = 1; - /* Presence of _PRW indicates wake capable */ - status = acpi_get_handle(device->handle, "_PRW", &temp); - if (ACPI_SUCCESS(status)) - device->flags.wake_capable = 1; - /* TBD: Performance management */ return 0; @@ -1278,11 +1277,7 @@ static int acpi_add_single_object(struct acpi_device **child, * Wakeup device management *----------------------- */ - if (device->flags.wake_capable) { - result = acpi_bus_get_wakeup_device_flags(device); - if (result) - goto end; - } + acpi_bus_get_wakeup_device_flags(device); /* * Performance Management @@ -1326,6 +1321,20 @@ end: #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) +static void acpi_bus_add_power_resource(acpi_handle handle) +{ + struct acpi_bus_ops ops = { + .acpi_op_add = 1, + .acpi_op_start = 1, + }; + struct acpi_device *device = NULL; + + acpi_bus_get_device(handle, &device); + if (!device) + acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER, + ACPI_STA_DEFAULT, &ops); +} + static int acpi_bus_type_and_status(acpi_handle handle, int *type, unsigned long long *sta) { @@ -1371,7 +1380,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, struct acpi_bus_ops *ops = context; int type; unsigned long long sta; - struct acpi_device_wakeup wakeup; struct acpi_device *device; acpi_status status; int result; @@ -1382,7 +1390,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, if (!(sta & ACPI_STA_DEVICE_PRESENT) && !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { - acpi_bus_extract_wakeup_device_power_package(handle, &wakeup); + struct acpi_device_wakeup wakeup; + acpi_handle temp; + + status = acpi_get_handle(handle, "_PRW", &temp); + if (ACPI_SUCCESS(status)) + acpi_bus_extract_wakeup_device_power_package(handle, + &wakeup); return AE_CTRL_DEPTH; } @@ -1467,7 +1481,7 @@ int acpi_bus_start(struct acpi_device *device) result = acpi_bus_scan(device->handle, &ops, NULL); - acpi_update_gpes(); + acpi_update_all_gpes(); return result; } @@ -1573,6 +1587,8 @@ int __init acpi_scan_init(void) printk(KERN_ERR PREFIX "Could not register bus type\n"); } + acpi_power_init(); + /* * Enumerate devices in the ACPI namespace. */ @@ -1584,7 +1600,7 @@ int __init acpi_scan_init(void) if (result) acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); else - acpi_update_gpes(); + acpi_update_all_gpes(); return result; } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index febb153b5a68..fdd3aeeb6def 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -124,8 +124,7 @@ static int acpi_pm_freeze(void) static int acpi_pm_pre_suspend(void) { acpi_pm_freeze(); - suspend_nvs_save(); - return 0; + return suspend_nvs_save(); } /** @@ -151,7 +150,7 @@ static int acpi_pm_prepare(void) { int error = __acpi_pm_prepare(); if (!error) - acpi_pm_pre_suspend(); + error = acpi_pm_pre_suspend(); return error; } @@ -319,7 +318,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state) } } -static struct platform_suspend_ops acpi_suspend_ops = { +static const struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, .prepare_late = acpi_pm_prepare, @@ -347,7 +346,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state) * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ -static struct platform_suspend_ops acpi_suspend_ops_old = { +static const struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, .prepare_late = acpi_pm_pre_suspend, @@ -435,6 +434,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), }, }, + { + .callback = init_nvs_nosave, + .ident = "Averatec AV1020-ED2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ @@ -506,7 +513,7 @@ static void acpi_pm_thaw(void) acpi_enable_all_runtime_gpes(); } -static struct platform_hibernation_ops acpi_hibernation_ops = { +static const struct platform_hibernation_ops acpi_hibernation_ops = { .begin = acpi_hibernation_begin, .end = acpi_pm_end, .pre_snapshot = acpi_pm_prepare, @@ -549,7 +556,7 @@ static int acpi_hibernation_begin_old(void) * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ -static struct platform_hibernation_ops acpi_hibernation_ops_old = { +static const struct platform_hibernation_ops acpi_hibernation_ops_old = { .begin = acpi_hibernation_begin_old, .end = acpi_pm_end, .pre_snapshot = acpi_pm_pre_suspend, diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index f8588f81048a..61891e75583d 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -438,7 +438,7 @@ static void delete_gpe_attr_array(void) return; } -void acpi_os_gpe_count(u32 gpe_number) +static void gpe_count(u32 gpe_number) { acpi_gpe_count++; @@ -454,7 +454,7 @@ void acpi_os_gpe_count(u32 gpe_number) return; } -void acpi_os_fixed_event_count(u32 event_number) +static void fixed_event_count(u32 event_number) { if (!all_counters) return; @@ -468,6 +468,16 @@ void acpi_os_fixed_event_count(u32 event_number) return; } +static void acpi_gbl_event_handler(u32 event_type, acpi_handle device, + u32 event_number, void *context) +{ + if (event_type == ACPI_EVENT_TYPE_GPE) + gpe_count(event_number); + + if (event_type == ACPI_EVENT_TYPE_FIXED) + fixed_event_count(event_number); +} + static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle) { @@ -601,6 +611,7 @@ end: void acpi_irq_stats_init(void) { + acpi_status status; int i; if (all_counters) @@ -619,6 +630,10 @@ void acpi_irq_stats_init(void) if (all_counters == NULL) goto fail; + status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL); + if (ACPI_FAILURE(status)) + goto fail; + counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), GFP_KERNEL); if (counter_attrs == NULL) diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 5a27b0a31315..2607e17b520f 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1059,8 +1059,9 @@ static int acpi_thermal_resume(struct acpi_device *device) break; tz->trips.active[i].flags.enabled = 1; for (j = 0; j < tz->trips.active[i].devices.count; j++) { - result = acpi_bus_get_power(tz->trips.active[i].devices. - handles[j], &power_state); + result = acpi_bus_update_power( + tz->trips.active[i].devices.handles[j], + &power_state); if (result || (power_state != ACPI_STATE_D0)) { tz->trips.active[i].flags.enabled = 0; break; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 5cd0228d2daa..90f8f7676d1f 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -81,6 +80,13 @@ module_param(brightness_switch_enabled, bool, 0644); static int allow_duplicates; module_param(allow_duplicates, bool, 0644); +/* + * Some BIOSes claim they use minimum backlight at boot, + * and this may bring dimming screen after boot + */ +static int use_bios_initial_backlight = 1; +module_param(use_bios_initial_backlight, bool, 0644); + static int register_count = 0; static int acpi_video_bus_add(struct acpi_device *device); static int acpi_video_bus_remove(struct acpi_device *device, int type); @@ -172,9 +178,6 @@ struct acpi_video_device_cap { u8 _BQC:1; /* Get current brightness level */ u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */ u8 _DDC:1; /*Return the EDID for this device */ - u8 _DCS:1; /*Return status of output device */ - u8 _DGS:1; /*Query graphics state */ - u8 _DSS:1; /*Device state set */ }; struct acpi_video_brightness_flags { @@ -202,7 +205,6 @@ struct acpi_video_device { struct acpi_video_device_brightness *brightness; struct backlight_device *backlight; struct thermal_cooling_device *cooling_dev; - struct output_device *output_dev; }; static const char device_decode[][30] = { @@ -226,10 +228,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device, u32 level_current, u32 event); static int acpi_video_switch_brightness(struct acpi_video_device *device, int event); -static int acpi_video_device_get_state(struct acpi_video_device *device, - unsigned long long *state); -static int acpi_video_output_get(struct output_device *od); -static int acpi_video_device_set_state(struct acpi_video_device *device, int state); /*backlight device sysfs support*/ static int acpi_video_get_brightness(struct backlight_device *bd) @@ -260,35 +258,11 @@ static int acpi_video_set_brightness(struct backlight_device *bd) vd->brightness->levels[request_level]); } -static struct backlight_ops acpi_backlight_ops = { +static const struct backlight_ops acpi_backlight_ops = { .get_brightness = acpi_video_get_brightness, .update_status = acpi_video_set_brightness, }; -/*video output device sysfs support*/ -static int acpi_video_output_get(struct output_device *od) -{ - unsigned long long state; - struct acpi_video_device *vd = - (struct acpi_video_device *)dev_get_drvdata(&od->dev); - acpi_video_device_get_state(vd, &state); - return (int)state; -} - -static int acpi_video_output_set(struct output_device *od) -{ - unsigned long state = od->request_state; - struct acpi_video_device *vd= - (struct acpi_video_device *)dev_get_drvdata(&od->dev); - return acpi_video_device_set_state(vd, state); -} - -static struct output_properties acpi_output_properties = { - .set_state = acpi_video_output_set, - .get_status = acpi_video_output_get, -}; - - /* thermal cooling device callbacks */ static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned long *state) @@ -344,34 +318,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = { Video Management -------------------------------------------------------------------------- */ -/* device */ - -static int -acpi_video_device_get_state(struct acpi_video_device *device, - unsigned long long *state) -{ - int status; - - status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state); - - return status; -} - -static int -acpi_video_device_set_state(struct acpi_video_device *device, int state) -{ - int status; - union acpi_object arg0 = { ACPI_TYPE_INTEGER }; - struct acpi_object_list args = { 1, &arg0 }; - unsigned long long ret; - - - arg0.integer.value = state; - status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret); - - return status; -} - static int acpi_video_device_lcd_query_levels(struct acpi_video_device *device, union acpi_object **levels) @@ -766,9 +712,11 @@ acpi_video_init_brightness(struct acpi_video_device *device) * when invoked for the first time, i.e. level_old is invalid. * set the backlight to max_level in this case */ - for (i = 2; i < br->count; i++) - if (level_old == br->levels[i]) - level = level_old; + if (use_bios_initial_backlight) { + for (i = 2; i < br->count; i++) + if (level_old == br->levels[i]) + level = level_old; + } goto set_level; } @@ -831,15 +779,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { device->cap._DDC = 1; } - if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) { - device->cap._DCS = 1; - } - if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) { - device->cap._DGS = 1; - } - if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) { - device->cap._DSS = 1; - } if (acpi_video_backlight_support()) { struct backlight_properties props; @@ -904,21 +843,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) printk(KERN_ERR PREFIX "Create sysfs link\n"); } - - if (acpi_video_display_switch_support()) { - - if (device->cap._DCS && device->cap._DSS) { - static int count; - char *name; - name = kasprintf(GFP_KERNEL, "acpi_video%d", count); - if (!name) - return; - count++; - device->output_dev = video_output_register(name, - NULL, device, &acpi_output_properties); - kfree(name); - } - } } /* @@ -1360,6 +1284,9 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, if (!video_device) continue; + if (!video_device->cap._DDC) + continue; + if (type) { switch (type) { case ACPI_VIDEO_DISPLAY_CRT: @@ -1452,7 +1379,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) thermal_cooling_device_unregister(device->cooling_dev); device->cooling_dev = NULL; } - video_output_unregister(device->output_dev); return 0; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b83676126598..42d3d72dae85 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -17,15 +17,14 @@ * capabilities the graphics cards plugged in support. The check for general * video capabilities will be triggered by the first caller of * acpi_video_get_capabilities(NULL); which will happen when the first - * backlight (or display output) switching supporting driver calls: + * backlight switching supporting driver calls: * acpi_video_backlight_support(); * * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) * are available, video.ko should be used to handle the device. * * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi, - * sony_acpi,... can take care about backlight brightness and display output - * switching. + * sony_acpi,... can take care about backlight brightness. * * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) * this file will not be compiled, acpi_video_get_capabilities() and @@ -83,11 +82,6 @@ long acpi_is_video_device(struct acpi_device *device) if (!device) return 0; - /* Is this device able to support video switching ? */ - if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || - ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) - video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; - /* Is this device able to retrieve a video ROM ? */ if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) video_caps |= ACPI_VIDEO_ROM_AVAILABLE; @@ -161,8 +155,6 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle) * * if (dmi_name_in_vendors("XY")) { * acpi_video_support |= - * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR; - * acpi_video_support |= * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; *} */ @@ -212,33 +204,8 @@ int acpi_video_backlight_support(void) EXPORT_SYMBOL(acpi_video_backlight_support); /* - * Returns true if video.ko can do display output switching. - * This does not work well/at all with binary graphics drivers - * which disable system io ranges and do it on their own. - */ -int acpi_video_display_switch_support(void) -{ - if (!acpi_video_caps_checked) - acpi_video_get_capabilities(NULL); - - if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR) - return 0; - else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO) - return 1; - - if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR) - return 0; - else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO) - return 1; - - return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING; -} -EXPORT_SYMBOL(acpi_video_display_switch_support); - -/* - * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video - * To force that backlight or display output switching is processed by vendor - * specific acpi drivers or video.ko driver. + * Use acpi_backlight=vendor/video to force that backlight switching + * is processed by vendor specific acpi drivers or video.ko driver. */ static int __init acpi_backlight(char *str) { @@ -255,19 +222,3 @@ static int __init acpi_backlight(char *str) return 1; } __setup("acpi_backlight=", acpi_backlight); - -static int __init acpi_display_output(char *str) -{ - if (str == NULL || *str == '\0') - return 1; - else { - if (!strcmp("vendor", str)) - acpi_video_support |= - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR; - if (!strcmp("video", str)) - acpi_video_support |= - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; - } - return 1; -} -__setup("acpi_display_output=", acpi_display_output); diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index f62a50c3ed34..ed6501452507 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -37,15 +37,16 @@ void acpi_enable_wakeup_devices(u8 sleep_state) container_of(node, struct acpi_device, wakeup_list); if (!dev->wakeup.flags.valid - || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) - || sleep_state > (u32) dev->wakeup.sleep_state) + || sleep_state > (u32) dev->wakeup.sleep_state + || !(device_may_wakeup(&dev->dev) + || dev->wakeup.prepare_count)) continue; - if (dev->wakeup.state.enabled) + if (device_may_wakeup(&dev->dev)) acpi_enable_wakeup_device_power(dev, sleep_state); /* The wake-up power should have been enabled already. */ - acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, + acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ACPI_GPE_ENABLE); } } @@ -63,14 +64,15 @@ void acpi_disable_wakeup_devices(u8 sleep_state) container_of(node, struct acpi_device, wakeup_list); if (!dev->wakeup.flags.valid - || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) - || (sleep_state > (u32) dev->wakeup.sleep_state)) + || sleep_state > (u32) dev->wakeup.sleep_state + || !(device_may_wakeup(&dev->dev) + || dev->wakeup.prepare_count)) continue; - acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, + acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ACPI_GPE_DISABLE); - if (dev->wakeup.state.enabled) + if (device_may_wakeup(&dev->dev)) acpi_disable_wakeup_device_power(dev); } } @@ -84,8 +86,8 @@ int __init acpi_wakeup_device_init(void) struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); - if (dev->wakeup.flags.always_enabled) - dev->wakeup.state.enabled = 1; + if (device_can_wakeup(&dev->dev)) + device_set_wakeup_enable(&dev->dev, true); } mutex_unlock(&acpi_device_lock); return 0; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 0a6a943b3779..a31fe96f7de6 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2240,7 +2240,7 @@ int ata_dev_configure(struct ata_device *dev) if (id[ATA_ID_CFA_KEY_MGMT] & 1) ata_dev_printk(dev, KERN_WARNING, "supports DRM functions and may " - "not be fully accessable.\n"); + "not be fully accessible.\n"); snprintf(revbuf, 7, "CFA"); } else { snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); @@ -2248,7 +2248,7 @@ int ata_dev_configure(struct ata_device *dev) if (ata_id_has_tpm(id)) ata_dev_printk(dev, KERN_WARNING, "supports DRM functions and may " - "not be fully accessable.\n"); + "not be fully accessible.\n"); } dev->n_sectors = ata_id_n_sectors(id); diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index b777176ff494..e079cf29ed5d 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -370,7 +370,7 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev, if (pci_resource_len(pdev, 0) == 0) return -ENODEV; - /* map IO regions and intialize host accordingly */ + /* map IO regions and initialize host accordingly */ rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h index 5042bb2dab15..f53a43ae2bbe 100644 --- a/drivers/atm/idt77252.h +++ b/drivers/atm/idt77252.h @@ -572,7 +572,7 @@ struct idt77252_dev #define SAR_STAT_TSQF 0x00001000 /* Transmit Status Queue full */ #define SAR_STAT_TMROF 0x00000800 /* Timer overflow */ #define SAR_STAT_PHYI 0x00000400 /* PHY device Interrupt flag */ -#define SAR_STAT_CMDBZ 0x00000200 /* ABR SAR Comand Busy Flag */ +#define SAR_STAT_CMDBZ 0x00000200 /* ABR SAR Command Busy Flag */ #define SAR_STAT_FBQ3A 0x00000100 /* Free Buffer Queue 3 Attention */ #define SAR_STAT_FBQ2A 0x00000080 /* Free Buffer Queue 2 Attention */ #define SAR_STAT_RSQF 0x00000040 /* Receive Status Queue full */ diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 729254053758..d80d51b62a1a 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2063,7 +2063,7 @@ static int tx_init(struct atm_dev *dev) - UBR Table size is 4K - UBR wait queue is 4K since the table and wait queues are contiguous, all the bytes - can be initialized by one memeset. + can be initialized by one memeset. */ vcsize_sel = 0; @@ -2089,7 +2089,7 @@ static int tx_init(struct atm_dev *dev) - ABR Table size is 2K - ABR wait queue is 2K since the table and wait queues are contiguous, all the bytes - can be intialized by one memeset. + can be initialized by one memeset. */ i = ABR_SCHED_TABLE * iadev->memSize; writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index e243bd49764b..000e7b2006f8 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -975,7 +975,7 @@ struct klist *bus_get_device_klist(struct bus_type *bus) EXPORT_SYMBOL_GPL(bus_get_device_klist); /* - * Yes, this forcably breaks the klist abstraction temporarily. It + * Yes, this forcibly breaks the klist abstraction temporarily. It * just wants to sort the klist, not change reference counts and * take/drop locks rapidly in the process. It does all this while * holding the lock for the list, so objects can't otherwise be diff --git a/drivers/base/node.c b/drivers/base/node.c index ce012a9c6201..36b43052001d 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -117,12 +117,21 @@ static ssize_t node_read_meminfo(struct sys_device * dev, "Node %d WritebackTmp: %8lu kB\n" "Node %d Slab: %8lu kB\n" "Node %d SReclaimable: %8lu kB\n" - "Node %d SUnreclaim: %8lu kB\n", + "Node %d SUnreclaim: %8lu kB\n" +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + "Node %d AnonHugePages: %8lu kB\n" +#endif + , nid, K(node_page_state(nid, NR_FILE_DIRTY)), nid, K(node_page_state(nid, NR_WRITEBACK)), nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), - nid, K(node_page_state(nid, NR_ANON_PAGES)), + nid, K(node_page_state(nid, NR_ANON_PAGES) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * + HPAGE_PMD_NR +#endif + ), nid, K(node_page_state(nid, NR_SHMEM)), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, @@ -133,7 +142,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); + nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + , nid, + K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * + HPAGE_PMD_NR) +#endif + ); n += hugetlb_report_node_meminfo(nid, buf + n); return n; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 2a52270aeb30..83404973f97a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -8,7 +8,7 @@ * * * The driver model core calls device_pm_add() when a device is registered. - * This will intialize the embedded device_pm_info object in the device + * This will initialize the embedded device_pm_info object in the device * and add it to the list of power-controlled devices. sysfs entries for * controlling device power management will also be added. * diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 8e0f9256eb58..516d5bbec2b6 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -238,9 +238,9 @@ static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) /* * Enqueuing and dequeuing functions for cmdlists. */ -static inline void addQ(struct hlist_head *list, CommandList_struct *c) +static inline void addQ(struct list_head *list, CommandList_struct *c) { - hlist_add_head(&c->list, list); + list_add_tail(&c->list, list); } static inline void removeQ(CommandList_struct *c) @@ -253,12 +253,12 @@ static inline void removeQ(CommandList_struct *c) * them off as 'stale' to prevent the driver from * falling over. */ - if (WARN_ON(hlist_unhashed(&c->list))) { + if (WARN_ON(list_empty(&c->list))) { c->cmd_type = CMD_MSG_STALE; return; } - hlist_del_init(&c->list); + list_del_init(&c->list); } static void enqueue_cmd_and_start_io(ctlr_info_t *h, @@ -905,7 +905,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h) c->cmdindex = i; - INIT_HLIST_NODE(&c->list); + INIT_LIST_HEAD(&c->list); c->busaddr = (__u32) cmd_dma_handle; temp64.val = (__u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; @@ -944,7 +944,7 @@ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) } memset(c->err_info, 0, sizeof(ErrorInfo_struct)); - INIT_HLIST_NODE(&c->list); + INIT_LIST_HEAD(&c->list); c->busaddr = (__u32) cmd_dma_handle; temp64.val = (__u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; @@ -2888,8 +2888,8 @@ static void start_io(ctlr_info_t *h) { CommandList_struct *c; - while (!hlist_empty(&h->reqQ)) { - c = hlist_entry(h->reqQ.first, CommandList_struct, list); + while (!list_empty(&h->reqQ)) { + c = list_entry(h->reqQ.next, CommandList_struct, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { dev_warn(&h->pdev->dev, "fifo full\n"); @@ -3402,11 +3402,10 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) { u32 tag; CommandList_struct *c = NULL; - struct hlist_node *tmp; __u32 busaddr_masked, tag_masked; tag = cciss_tag_discard_error_bits(raw_tag); - hlist_for_each_entry(c, tmp, &h->cmpQ, list) { + list_for_each_entry(c, &h->cmpQ, list) { busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); tag_masked = cciss_tag_discard_error_bits(tag); if (busaddr_masked == tag_masked) { @@ -4572,8 +4571,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, h = hba[i]; h->pdev = pdev; h->busy_initializing = 1; - INIT_HLIST_HEAD(&h->cmpQ); - INIT_HLIST_HEAD(&h->reqQ); + INIT_LIST_HEAD(&h->cmpQ); + INIT_LIST_HEAD(&h->reqQ); mutex_init(&h->busy_shutting_down); if (cciss_pci_init(h) != 0) diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 4b8933d778f1..579f74918493 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -103,8 +103,8 @@ struct ctlr_info struct access_method access; /* queue and queue Info */ - struct hlist_head reqQ; - struct hlist_head cmpQ; + struct list_head reqQ; + struct list_head cmpQ; unsigned int Qdepth; unsigned int maxQsinceinit; unsigned int maxSG; diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index eb060f1b00b6..35463d2f0ee7 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h @@ -195,7 +195,7 @@ typedef struct _CommandList_struct { int ctlr; int cmd_type; long cmdindex; - struct hlist_node list; + struct list_head list; struct request * rq; struct completion *waiting; int retry_count; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 1ea1a34e78b2..3803a0348937 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -911,8 +911,6 @@ struct drbd_md { struct drbd_backing_dev { struct block_device *backing_bdev; struct block_device *md_bdev; - struct file *lo_file; - struct file *md_file; struct drbd_md md; struct disk_conf dc; /* The user provided config... */ sector_t known_size; /* last known size of that backing device */ diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 6be5401d0e88..29cd0dc9fe4f 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3372,11 +3372,8 @@ void drbd_free_bc(struct drbd_backing_dev *ldev) if (ldev == NULL) return; - bd_release(ldev->backing_bdev); - bd_release(ldev->md_bdev); - - fput(ldev->lo_file); - fput(ldev->md_file); + blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); kfree(ldev); } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 29e5c70e4e26..8cbfaa687d72 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -855,7 +855,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp sector_t max_possible_sectors; sector_t min_md_device_sectors; struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ - struct inode *inode, *inode2; + struct block_device *bdev; struct lru_cache *resync_lru = NULL; union drbd_state ns, os; unsigned int max_seg_s; @@ -907,46 +907,40 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp } } - nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); - if (IS_ERR(nbc->lo_file)) { + bdev = blkdev_get_by_path(nbc->dc.backing_dev, + FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev); + if (IS_ERR(bdev)) { dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, - PTR_ERR(nbc->lo_file)); - nbc->lo_file = NULL; + PTR_ERR(bdev)); retcode = ERR_OPEN_DISK; goto fail; } + nbc->backing_bdev = bdev; - inode = nbc->lo_file->f_dentry->d_inode; - - if (!S_ISBLK(inode->i_mode)) { - retcode = ERR_DISK_NOT_BDEV; - goto fail; - } - - nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); - if (IS_ERR(nbc->md_file)) { + /* + * meta_dev_idx >= 0: external fixed size, possibly multiple + * drbd sharing one meta device. TODO in that case, paranoia + * check that [md_bdev, meta_dev_idx] is not yet used by some + * other drbd minor! (if you use drbd.conf + drbdadm, that + * should check it for you already; but if you don't, or + * someone fooled it, we need to double check here) + */ + bdev = blkdev_get_by_path(nbc->dc.meta_dev, + FMODE_READ | FMODE_WRITE | FMODE_EXCL, + (nbc->dc.meta_dev_idx < 0) ? + (void *)mdev : (void *)drbd_m_holder); + if (IS_ERR(bdev)) { dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, - PTR_ERR(nbc->md_file)); - nbc->md_file = NULL; + PTR_ERR(bdev)); retcode = ERR_OPEN_MD_DISK; goto fail; } + nbc->md_bdev = bdev; - inode2 = nbc->md_file->f_dentry->d_inode; - - if (!S_ISBLK(inode2->i_mode)) { - retcode = ERR_MD_NOT_BDEV; - goto fail; - } - - nbc->backing_bdev = inode->i_bdev; - if (bd_claim(nbc->backing_bdev, mdev)) { - printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", - nbc->backing_bdev, mdev, - nbc->backing_bdev->bd_holder, - nbc->backing_bdev->bd_contains->bd_holder, - nbc->backing_bdev->bd_holders); - retcode = ERR_BDCLAIM_DISK; + if ((nbc->backing_bdev == nbc->md_bdev) != + (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || + nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { + retcode = ERR_MD_IDX_INVALID; goto fail; } @@ -955,28 +949,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp offsetof(struct bm_extent, lce)); if (!resync_lru) { retcode = ERR_NOMEM; - goto release_bdev_fail; - } - - /* meta_dev_idx >= 0: external fixed size, - * possibly multiple drbd sharing one meta device. - * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is - * not yet used by some other drbd minor! - * (if you use drbd.conf + drbdadm, - * that should check it for you already; but if you don't, or someone - * fooled it, we need to double check here) */ - nbc->md_bdev = inode2->i_bdev; - if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev - : (void *) drbd_m_holder)) { - retcode = ERR_BDCLAIM_MD_DISK; - goto release_bdev_fail; - } - - if ((nbc->backing_bdev == nbc->md_bdev) != - (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || - nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { - retcode = ERR_MD_IDX_INVALID; - goto release_bdev2_fail; + goto fail; } /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ @@ -987,7 +960,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp (unsigned long long) drbd_get_max_capacity(nbc), (unsigned long long) nbc->dc.disk_size); retcode = ERR_DISK_TO_SMALL; - goto release_bdev2_fail; + goto fail; } if (nbc->dc.meta_dev_idx < 0) { @@ -1004,7 +977,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp dev_warn(DEV, "refusing attach: md-device too small, " "at least %llu sectors needed for this meta-disk type\n", (unsigned long long) min_md_device_sectors); - goto release_bdev2_fail; + goto fail; } /* Make sure the new disk is big enough @@ -1012,7 +985,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp if (drbd_get_max_capacity(nbc) < drbd_get_capacity(mdev->this_bdev)) { retcode = ERR_DISK_TO_SMALL; - goto release_bdev2_fail; + goto fail; } nbc->known_size = drbd_get_capacity(nbc->backing_bdev); @@ -1035,7 +1008,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); drbd_resume_io(mdev); if (retcode < SS_SUCCESS) - goto release_bdev2_fail; + goto fail; if (!get_ldev_if_state(mdev, D_ATTACHING)) goto force_diskless; @@ -1269,18 +1242,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp force_diskless: drbd_force_state(mdev, NS(disk, D_FAILED)); drbd_md_sync(mdev); - release_bdev2_fail: - if (nbc) - bd_release(nbc->md_bdev); - release_bdev_fail: - if (nbc) - bd_release(nbc->backing_bdev); fail: if (nbc) { - if (nbc->lo_file) - fput(nbc->lo_file); - if (nbc->md_file) - fput(nbc->md_file); + if (nbc->backing_bdev) + blkdev_put(nbc->backing_bdev, + FMODE_READ | FMODE_WRITE | FMODE_EXCL); + if (nbc->md_bdev) + blkdev_put(nbc->md_bdev, + FMODE_READ | FMODE_WRITE | FMODE_EXCL); kfree(nbc); } lc_destroy(resync_lru); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 25e4dffa0aad..b9ba04fc2b34 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -597,6 +597,11 @@ static unsigned char fsector_t; /* sector in track */ static unsigned char in_sector_offset; /* offset within physical sector, * expressed in units of 512 bytes */ +static inline bool drive_no_geom(int drive) +{ + return !current_type[drive] && !ITYPE(UDRS->fd_device); +} + #ifndef fd_eject static inline int fd_eject(int drive) { @@ -3782,7 +3787,7 @@ static int check_floppy_change(struct gendisk *disk) if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags) || test_bit(drive, &fake_change) || - (!ITYPE(UDRS->fd_device) && !current_type[drive])) + drive_no_geom(drive)) return 1; return 0; } @@ -3848,13 +3853,13 @@ static int __floppy_read_block_0(struct block_device *bdev) static int floppy_revalidate(struct gendisk *disk) { int drive = (long)disk->private_data; -#define NO_GEOM (!current_type[drive] && !ITYPE(UDRS->fd_device)) int cf; int res = 0; if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags) || - test_bit(drive, &fake_change) || NO_GEOM) { + test_bit(drive, &fake_change) || + drive_no_geom(drive)) { if (WARN(atomic_read(&usage_count) == 0, "VFS: revalidate called on non-open device.\n")) return -EFAULT; @@ -3862,7 +3867,7 @@ static int floppy_revalidate(struct gendisk *disk) lock_fdc(drive, false); cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags)); - if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)) { + if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { process_fd_request(); /*already done by another thread */ return 0; } @@ -3874,7 +3879,7 @@ static int floppy_revalidate(struct gendisk *disk) clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); if (cf) UDRS->generation++; - if (NO_GEOM) { + if (drive_no_geom(drive)) { /* auto-sensing */ res = __floppy_read_block_0(opened_bdev[drive]); } else { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 7ea0bea2f7e3..44e18c073c44 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -395,11 +395,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct loop_device *lo = p->lo; struct page *page = buf->page; sector_t IV; - int size, ret; - - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; + int size; IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + (buf->offset >> 9); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 19b3568e9326..77d70eebb6b2 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2296,15 +2296,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) * so bdget() can't fail. */ bdget(pd->bdev->bd_dev); - if ((ret = blkdev_get(pd->bdev, FMODE_READ))) + if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd))) goto out; - if ((ret = bd_claim(pd->bdev, pd))) - goto out_putdev; - if ((ret = pkt_get_last_written(pd, &lba))) { printk(DRIVER_NAME": pkt_get_last_written failed\n"); - goto out_unclaim; + goto out_putdev; } set_capacity(pd->disk, lba << 2); @@ -2314,7 +2311,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) q = bdev_get_queue(pd->bdev); if (write) { if ((ret = pkt_open_write(pd))) - goto out_unclaim; + goto out_putdev; /* * Some CDRW drives can not handle writes larger than one packet, * even if the size is a multiple of the packet size. @@ -2329,23 +2326,21 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) } if ((ret = pkt_set_segment_merging(pd, q))) - goto out_unclaim; + goto out_putdev; if (write) { if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { printk(DRIVER_NAME": not enough memory for buffers\n"); ret = -ENOMEM; - goto out_unclaim; + goto out_putdev; } printk(DRIVER_NAME": %lukB available on disc\n", lba << 1); } return 0; -out_unclaim: - bd_release(pd->bdev); out_putdev: - blkdev_put(pd->bdev, FMODE_READ); + blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); out: return ret; } @@ -2362,8 +2357,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) pkt_lock_door(pd, 0); pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); - bd_release(pd->bdev); - blkdev_put(pd->bdev, FMODE_READ); + blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); pkt_shrink_pktlist(pd); } @@ -2733,7 +2727,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) bdev = bdget(dev); if (!bdev) return -ENOMEM; - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY); + ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); if (ret) return ret; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 008d4a00b50d..e1e38b11f48a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1790,18 +1790,29 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count) rc = rbd_bus_add_dev(rbd_dev); if (rc) - goto err_out_disk; + goto err_out_blkdev; + /* set up and announce blkdev mapping */ rc = rbd_init_disk(rbd_dev); if (rc) - goto err_out_blkdev; + goto err_out_bus; return count; +err_out_bus: + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + list_del_init(&rbd_dev->node); + mutex_unlock(&ctl_mutex); + + /* this will also clean up rest of rbd_dev stuff */ + + rbd_bus_del_dev(rbd_dev); + kfree(options); + kfree(mon_dev_name); + return rc; + err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); -err_out_disk: - rbd_free_disk(rbd_dev); err_out_client: rbd_put_client(rbd_dev); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index af13c62dc473..14033a36bcd0 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1348,7 +1348,10 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) if (!CDROM_CAN(CDC_SELECT_DISC)) return -EDRIVE_CANT_DO_THIS; - (void) cdi->ops->media_changed(cdi, slot); + if (cdi->ops->check_events) + cdi->ops->check_events(cdi, 0, slot); + else + cdi->ops->media_changed(cdi, slot); if (slot == CDSL_NONE) { /* set media changed bits, on both queues */ @@ -1392,6 +1395,42 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) return slot; } +/* + * As cdrom implements an extra ioctl consumer for media changed + * event, it needs to buffer ->check_events() output, such that event + * is not lost for both the usual VFS and ioctl paths. + * cdi->{vfs|ioctl}_events are used to buffer pending events for each + * path. + * + * XXX: Locking is non-existent. cdi->ops->check_events() can be + * called in parallel and buffering fields are accessed without any + * exclusion. The original media_changed code had the same problem. + * It might be better to simply deprecate CDROM_MEDIA_CHANGED ioctl + * and remove this cruft altogether. It doesn't have much usefulness + * at this point. + */ +static void cdrom_update_events(struct cdrom_device_info *cdi, + unsigned int clearing) +{ + unsigned int events; + + events = cdi->ops->check_events(cdi, clearing, CDSL_CURRENT); + cdi->vfs_events |= events; + cdi->ioctl_events |= events; +} + +unsigned int cdrom_check_events(struct cdrom_device_info *cdi, + unsigned int clearing) +{ + unsigned int events; + + cdrom_update_events(cdi, clearing); + events = cdi->vfs_events; + cdi->vfs_events = 0; + return events; +} +EXPORT_SYMBOL(cdrom_check_events); + /* We want to make media_changed accessible to the user through an * ioctl. The main problem now is that we must double-buffer the * low-level implementation, to assure that the VFS and the user both @@ -1403,15 +1442,26 @@ int media_changed(struct cdrom_device_info *cdi, int queue) { unsigned int mask = (1 << (queue & 1)); int ret = !!(cdi->mc_flags & mask); + bool changed; if (!CDROM_CAN(CDC_MEDIA_CHANGED)) - return ret; + return ret; + /* changed since last call? */ - if (cdi->ops->media_changed(cdi, CDSL_CURRENT)) { + if (cdi->ops->check_events) { + BUG_ON(!queue); /* shouldn't be called from VFS path */ + cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE); + changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE; + cdi->ioctl_events = 0; + } else + changed = cdi->ops->media_changed(cdi, CDSL_CURRENT); + + if (changed) { cdi->mc_flags = 0x3; /* set bit on both queues */ ret |= 1; cdi->media_written = 0; } + cdi->mc_flags &= ~mask; /* clear bit */ return ret; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d4a7776f4b77..0f175a866ef0 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -1047,15 +1047,6 @@ config NSC_GPIO pc8736x_gpio drivers. If those drivers are built as modules, this one will be too, named nsc_gpio -config CS5535_GPIO - tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)" - depends on X86_32 - help - Give userspace access to the GPIO pins on the AMD CS5535 and - CS5536 Geode companion devices. - - If compiled as a module, it will be called cs5535_gpio. - config RAW_DRIVER tristate "RAW driver (/dev/raw/rawN)" depends on BLOCK diff --git a/drivers/char/Makefile b/drivers/char/Makefile index fa0b824b7a65..1e9dffb33778 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -82,7 +82,6 @@ obj-$(CONFIG_NWFLASH) += nwflash.o obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o -obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o obj-$(CONFIG_GPIO_TB0219) += tb0219.o obj-$(CONFIG_TELCLOCK) += tlclk.o diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 07e9796fead7..857df10c0428 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -717,8 +717,8 @@ static const struct intel_agp_driver_description { { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver }, { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver }, { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver }, - { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver }, - { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver }, { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver }, diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index e921b693412b..826ab0939a12 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1361,7 +1361,7 @@ static const struct intel_gtt_driver_description { &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &i8xx_gtt_driver}, - { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", + { PCI_DEVICE_ID_INTEL_82845G_IG, "845G", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82854_IG, "854", &i8xx_gtt_driver}, diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 794aacb715c1..d0387a84eec1 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -24,6 +24,7 @@ * warranty of any kind, whether express or implied. */ +#include #include #include #include @@ -34,7 +35,6 @@ #include -#define PFX KBUILD_MODNAME ": " enum { @@ -81,8 +81,7 @@ static inline u32 xstore(u32 *addr, u32 edx_in) ts_state = irq_ts_save(); asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" - :"=m"(*addr), "=a"(eax_out) - :"D"(addr), "d"(edx_in)); + : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); irq_ts_restore(ts_state); return eax_out; @@ -90,8 +89,10 @@ static inline u32 xstore(u32 *addr, u32 edx_in) static int via_rng_data_present(struct hwrng *rng, int wait) { + char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); u32 bytes_out; - u32 *via_rng_datum = (u32 *)(&rng->priv); int i; /* We choose the recommended 1-byte-per-instruction RNG rate, @@ -115,6 +116,7 @@ static int via_rng_data_present(struct hwrng *rng, int wait) break; udelay(10); } + rng->priv = *via_rng_datum; return bytes_out ? 1 : 0; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 2fe72f8edf44..38223e93aa98 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -970,6 +970,33 @@ out_kfree: } EXPORT_SYMBOL(ipmi_create_user); +int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) +{ + int rv = 0; + ipmi_smi_t intf; + struct ipmi_smi_handlers *handlers; + + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + if (intf->intf_num == if_num) + goto found; + } + /* Not found, return an error */ + rv = -EINVAL; + mutex_unlock(&ipmi_interfaces_mutex); + return rv; + +found: + handlers = intf->handlers; + rv = -ENOSYS; + if (handlers->get_smi_info) + rv = handlers->get_smi_info(intf->send_info, data); + mutex_unlock(&ipmi_interfaces_mutex); + + return rv; +} +EXPORT_SYMBOL(ipmi_get_smi_info); + static void free_user(struct kref *ref) { ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index f27c04e18aaa..b6ae6e9a9c5f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include "ipmi_si_sm.h" @@ -109,10 +110,6 @@ enum si_type { }; static char *si_to_str[] = { "kcs", "smic", "bt" }; -enum ipmi_addr_src { - SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, - SI_PCI, SI_DEVICETREE, SI_DEFAULT -}; static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", "device-tree", "default" }; @@ -293,6 +290,7 @@ struct smi_info { struct task_struct *thread; struct list_head link; + union ipmi_smi_info_union addr_info; }; #define smi_inc_stat(smi, stat) \ @@ -1188,6 +1186,18 @@ static int smi_start_processing(void *send_info, return 0; } +static int get_smi_info(void *send_info, struct ipmi_smi_info *data) +{ + struct smi_info *smi = send_info; + + data->addr_src = smi->addr_source; + data->dev = smi->dev; + data->addr_info = smi->addr_info; + get_device(smi->dev); + + return 0; +} + static void set_maintenance_mode(void *send_info, int enable) { struct smi_info *smi_info = send_info; @@ -1199,6 +1209,7 @@ static void set_maintenance_mode(void *send_info, int enable) static struct ipmi_smi_handlers handlers = { .owner = THIS_MODULE, .start_processing = smi_start_processing, + .get_smi_info = get_smi_info, .sender = sender, .request_events = request_events, .set_maintenance_mode = set_maintenance_mode, @@ -1930,7 +1941,8 @@ static void __devinit hardcode_find_bmc(void) static int acpi_failure; /* For GPE-type interrupts. */ -static u32 ipmi_acpi_gpe(void *context) +static u32 ipmi_acpi_gpe(acpi_handle gpe_device, + u32 gpe_number, void *context) { struct smi_info *smi_info = context; unsigned long flags; @@ -2158,6 +2170,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, printk(KERN_INFO PFX "probing via ACPI\n"); handle = acpi_dev->handle; + info->addr_info.acpi_info.acpi_handle = handle; /* _IFT tells us the interface type: KCS, BT, etc */ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c index d3d63be2cd37..1a9f5f6d6ac5 100644 --- a/drivers/char/ramoops.c +++ b/drivers/char/ramoops.c @@ -30,7 +30,7 @@ #define RAMOOPS_KERNMSG_HDR "====" -#define RECORD_SIZE 4096 +#define RECORD_SIZE 4096UL static ulong mem_address; module_param(mem_address, ulong, 0400); @@ -68,11 +68,16 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper, char *buf, *buf_orig; struct timeval timestamp; + if (reason != KMSG_DUMP_OOPS && + reason != KMSG_DUMP_PANIC && + reason != KMSG_DUMP_KEXEC) + return; + /* Only dump oopses if dump_oops is set */ if (reason == KMSG_DUMP_OOPS && !dump_oops) return; - buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE)); + buf = cxt->virt_addr + (cxt->count * RECORD_SIZE); buf_orig = buf; memset(buf, '\0', RECORD_SIZE); @@ -83,8 +88,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper, buf += res; hdr_size = buf - buf_orig; - l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size)); - l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy); + l2_cpy = min(l2, RECORD_SIZE - hdr_size); + l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy); s2_start = l2 - l2_cpy; s1_start = l1 - l1_cpy; diff --git a/drivers/char/raw.c b/drivers/char/raw.c index bfe25ea9766b..b4b9d5a47885 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -65,15 +65,12 @@ static int raw_open(struct inode *inode, struct file *filp) if (!bdev) goto out; igrab(bdev->bd_inode); - err = blkdev_get(bdev, filp->f_mode); + err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open); if (err) goto out; - err = bd_claim(bdev, raw_open); - if (err) - goto out1; err = set_blocksize(bdev, bdev_logical_block_size(bdev)); if (err) - goto out2; + goto out1; filp->f_flags |= O_DIRECT; filp->f_mapping = bdev->bd_inode->i_mapping; if (++raw_devices[minor].inuse == 1) @@ -83,10 +80,8 @@ static int raw_open(struct inode *inode, struct file *filp) mutex_unlock(&raw_mutex); return 0; -out2: - bd_release(bdev); out1: - blkdev_put(bdev, filp->f_mode); + blkdev_put(bdev, filp->f_mode | FMODE_EXCL); out: mutex_unlock(&raw_mutex); return err; @@ -110,8 +105,7 @@ static int raw_release(struct inode *inode, struct file *filp) } mutex_unlock(&raw_mutex); - bd_release(bdev); - blkdev_put(bdev, filp->f_mode); + blkdev_put(bdev, filp->f_mode | FMODE_EXCL); return 0; } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 386888f10df0..bf5092455a8f 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -96,7 +96,15 @@ static void cpuidle_idle_call(void) /* enter the state and update stats */ dev->last_state = target_state; + + trace_power_start(POWER_CSTATE, next_state, dev->cpu); + trace_cpu_idle(next_state, dev->cpu); + dev->last_residency = target_state->enter(dev, target_state); + + trace_power_end(dev->cpu); + trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); + if (dev->last_state) target_state = dev->last_state; @@ -106,8 +114,6 @@ static void cpuidle_idle_call(void) /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev); - trace_power_end(smp_processor_id()); - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); } /** @@ -155,6 +161,45 @@ void cpuidle_resume_and_unlock(void) EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); +#ifdef CONFIG_ARCH_HAS_CPU_RELAX +static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) +{ + ktime_t t1, t2; + s64 diff; + int ret; + + t1 = ktime_get(); + local_irq_enable(); + while (!need_resched()) + cpu_relax(); + + t2 = ktime_get(); + diff = ktime_to_us(ktime_sub(t2, t1)); + if (diff > INT_MAX) + diff = INT_MAX; + + ret = (int) diff; + return ret; +} + +static void poll_idle_init(struct cpuidle_device *dev) +{ + struct cpuidle_state *state = &dev->states[0]; + + cpuidle_set_statedata(state, NULL); + + snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); + snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); + state->exit_latency = 0; + state->target_residency = 0; + state->power_usage = -1; + state->flags = 0; + state->enter = poll_idle; +} +#else +static void poll_idle_init(struct cpuidle_device *dev) {} +#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ + /** * cpuidle_enable_device - enables idle PM for a CPU * @dev: the CPU @@ -179,6 +224,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev) return ret; } + poll_idle_init(dev); + if ((ret = cpuidle_add_state_sysfs(dev))) return ret; @@ -233,45 +280,6 @@ void cpuidle_disable_device(struct cpuidle_device *dev) EXPORT_SYMBOL_GPL(cpuidle_disable_device); -#ifdef CONFIG_ARCH_HAS_CPU_RELAX -static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) -{ - ktime_t t1, t2; - s64 diff; - int ret; - - t1 = ktime_get(); - local_irq_enable(); - while (!need_resched()) - cpu_relax(); - - t2 = ktime_get(); - diff = ktime_to_us(ktime_sub(t2, t1)); - if (diff > INT_MAX) - diff = INT_MAX; - - ret = (int) diff; - return ret; -} - -static void poll_idle_init(struct cpuidle_device *dev) -{ - struct cpuidle_state *state = &dev->states[0]; - - cpuidle_set_statedata(state, NULL); - - snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); - snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); - state->exit_latency = 0; - state->target_residency = 0; - state->power_usage = -1; - state->flags = CPUIDLE_FLAG_POLL; - state->enter = poll_idle; -} -#else -static void poll_idle_init(struct cpuidle_device *dev) {} -#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ - /** * __cpuidle_register_device - internal register function called before register * and enable routines @@ -292,8 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) init_completion(&dev->kobj_unregister); - poll_idle_init(dev); - /* * cpuidle driver should set the dev->power_specified bit * before registering the device if the driver provides diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 7d279e578df5..c99305afa58a 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -857,7 +857,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, printk(KERN_WARNING MV_CESA "Base driver '%s' could not be loaded!\n", base_hash_name); - err = PTR_ERR(fallback_tfm); + err = PTR_ERR(base_hash); goto err_bad_base; } } diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 76141262ea1d..80dc094e78c6 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1542,7 +1542,7 @@ out: return err; } -static void __exit n2_unregister_algs(void) +static void __devexit n2_unregister_algs(void) { mutex_lock(&spu_lock); if (!--algs_registered) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 799ca517c121..add2a1a72ba4 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -74,11 +74,9 @@ #define FLAGS_CBC BIT(1) #define FLAGS_GIV BIT(2) -#define FLAGS_NEW_KEY BIT(4) -#define FLAGS_NEW_IV BIT(5) -#define FLAGS_INIT BIT(6) -#define FLAGS_FAST BIT(7) -#define FLAGS_BUSY 8 +#define FLAGS_INIT BIT(4) +#define FLAGS_FAST BIT(5) +#define FLAGS_BUSY BIT(6) struct omap_aes_ctx { struct omap_aes_dev *dd; @@ -98,19 +96,18 @@ struct omap_aes_reqctx { struct omap_aes_dev { struct list_head list; unsigned long phys_base; - void __iomem *io_base; + void __iomem *io_base; struct clk *iclk; struct omap_aes_ctx *ctx; struct device *dev; unsigned long flags; + int err; - u32 *iv; - u32 ctrl; + spinlock_t lock; + struct crypto_queue queue; - spinlock_t lock; - struct crypto_queue queue; - - struct tasklet_struct task; + struct tasklet_struct done_task; + struct tasklet_struct queue_task; struct ablkcipher_request *req; size_t total; @@ -179,9 +176,13 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) static int omap_aes_hw_init(struct omap_aes_dev *dd) { - int err = 0; - + /* + * clocks are enabled when request starts and disabled when finished. + * It may be long delays between requests. + * Device might go to off mode to save power. + */ clk_enable(dd->iclk); + if (!(dd->flags & FLAGS_INIT)) { /* is it necessary to reset before every operation? */ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, @@ -193,39 +194,26 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) __asm__ __volatile__("nop"); __asm__ __volatile__("nop"); - err = omap_aes_wait(dd, AES_REG_SYSSTATUS, - AES_REG_SYSSTATUS_RESETDONE); - if (!err) - dd->flags |= FLAGS_INIT; - } + if (omap_aes_wait(dd, AES_REG_SYSSTATUS, + AES_REG_SYSSTATUS_RESETDONE)) + return -ETIMEDOUT; - return err; -} + dd->flags |= FLAGS_INIT; + dd->err = 0; + } -static void omap_aes_hw_cleanup(struct omap_aes_dev *dd) -{ - clk_disable(dd->iclk); + return 0; } -static void omap_aes_write_ctrl(struct omap_aes_dev *dd) +static int omap_aes_write_ctrl(struct omap_aes_dev *dd) { unsigned int key32; - int i; + int i, err; u32 val, mask; - val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); - if (dd->flags & FLAGS_CBC) - val |= AES_REG_CTRL_CBC; - if (dd->flags & FLAGS_ENCRYPT) - val |= AES_REG_CTRL_DIRECTION; - - if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && - !(dd->ctx->flags & FLAGS_NEW_KEY)) - goto out; - - /* only need to write control registers for new settings */ - - dd->ctrl = val; + err = omap_aes_hw_init(dd); + if (err) + return err; val = 0; if (dd->dma_lch_out >= 0) @@ -237,30 +225,43 @@ static void omap_aes_write_ctrl(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_MASK, val, mask); - pr_debug("Set key\n"); key32 = dd->ctx->keylen / sizeof(u32); - /* set a key */ + + /* it seems a key should always be set even if it has not changed */ for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(i), __le32_to_cpu(dd->ctx->key[i])); } - dd->ctx->flags &= ~FLAGS_NEW_KEY; - if (dd->flags & FLAGS_NEW_IV) { - pr_debug("Set IV\n"); - omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4); - dd->flags &= ~FLAGS_NEW_IV; - } + if ((dd->flags & FLAGS_CBC) && dd->req->info) + omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4); + + val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); + if (dd->flags & FLAGS_CBC) + val |= AES_REG_CTRL_CBC; + if (dd->flags & FLAGS_ENCRYPT) + val |= AES_REG_CTRL_DIRECTION; mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | AES_REG_CTRL_KEY_SIZE; - omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask); + omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); -out: - /* start DMA or disable idle mode */ - omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, - AES_REG_MASK_START); + /* IN */ + omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + + /* OUT */ + omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + + return 0; } static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) @@ -288,8 +289,16 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) { struct omap_aes_dev *dd = data; - if (lch == dd->dma_lch_out) - tasklet_schedule(&dd->task); + if (ch_status != OMAP_DMA_BLOCK_IRQ) { + pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); + dd->err = -EIO; + dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ + } else if (lch == dd->dma_lch_in) { + return; + } + + /* dma_lch_out - completed */ + tasklet_schedule(&dd->done_task); } static int omap_aes_dma_init(struct omap_aes_dev *dd) @@ -339,18 +348,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) goto err_dma_out; } - omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - - omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - return 0; err_dma_out: @@ -406,6 +403,11 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, if (!count) return off; + /* + * buflen and total are AES_BLOCK_SIZE size aligned, + * so count should be also aligned + */ + sg_copy_buf(buf + off, *sg, *offset, count, out); off += count; @@ -461,7 +463,9 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, omap_start_dma(dd->dma_lch_in); omap_start_dma(dd->dma_lch_out); - omap_aes_write_ctrl(dd); + /* start DMA or disable idle mode */ + omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, + AES_REG_MASK_START); return 0; } @@ -488,8 +492,10 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) count = min(dd->total, sg_dma_len(dd->in_sg)); count = min(count, sg_dma_len(dd->out_sg)); - if (count != dd->total) + if (count != dd->total) { + pr_err("request length != buffer length\n"); return -EINVAL; + } pr_debug("fast\n"); @@ -525,23 +531,25 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) dd->total -= count; - err = omap_aes_hw_init(dd); - err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); + if (err) { + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + } return err; } static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) { - struct omap_aes_ctx *ctx; + struct ablkcipher_request *req = dd->req; pr_debug("err: %d\n", err); - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); + clk_disable(dd->iclk); + dd->flags &= ~FLAGS_BUSY; - if (!dd->total) - dd->req->base.complete(&dd->req->base, err); + req->base.complete(&req->base, err); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) @@ -553,8 +561,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); - omap_aes_hw_cleanup(dd); - omap_stop_dma(dd->dma_lch_in); omap_stop_dma(dd->dma_lch_out); @@ -574,40 +580,39 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) } } - if (err || !dd->total) - omap_aes_finish_req(dd, err); - return err; } -static int omap_aes_handle_req(struct omap_aes_dev *dd) +static int omap_aes_handle_queue(struct omap_aes_dev *dd, + struct ablkcipher_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_aes_ctx *ctx; struct omap_aes_reqctx *rctx; - struct ablkcipher_request *req; unsigned long flags; - - if (dd->total) - goto start; + int err, ret = 0; spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ablkcipher_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); - if (!async_req) - clear_bit(FLAGS_BUSY, &dd->flags); + if (async_req) + dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) - return 0; + return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ablkcipher_request_cast(async_req); - pr_debug("get new req\n"); - /* assign new request to device */ dd->req = req; dd->total = req->nbytes; @@ -621,27 +626,22 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd) rctx->mode &= FLAGS_MODE_MASK; dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; - dd->iv = req->info; - if ((dd->flags & FLAGS_CBC) && dd->iv) - dd->flags |= FLAGS_NEW_IV; - else - dd->flags &= ~FLAGS_NEW_IV; - + dd->ctx = ctx; ctx->dd = dd; - if (dd->ctx != ctx) { - /* assign new context to device */ - dd->ctx = ctx; - ctx->flags |= FLAGS_NEW_KEY; - } - if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) - pr_err("request size is not exact amount of AES blocks\n"); + err = omap_aes_write_ctrl(dd); + if (!err) + err = omap_aes_crypt_dma_start(dd); + if (err) { + /* aes_task will not finish it, so do it here */ + omap_aes_finish_req(dd, err); + tasklet_schedule(&dd->queue_task); + } -start: - return omap_aes_crypt_dma_start(dd); + return ret; /* return ret, which is enqueue return value */ } -static void omap_aes_task(unsigned long data) +static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; int err; @@ -650,40 +650,50 @@ static void omap_aes_task(unsigned long data) err = omap_aes_crypt_dma_stop(dd); - err = omap_aes_handle_req(dd); + err = dd->err ? : err; + + if (dd->total && !err) { + err = omap_aes_crypt_dma_start(dd); + if (!err) + return; /* DMA started. Not fininishing. */ + } + + omap_aes_finish_req(dd, err); + omap_aes_handle_queue(dd, NULL); pr_debug("exit\n"); } +static void omap_aes_queue_task(unsigned long data) +{ + struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + + omap_aes_handle_queue(dd, NULL); +} + static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct omap_aes_dev *dd; - unsigned long flags; - int err; pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of AES blocks\n"); + return -EINVAL; + } + dd = omap_aes_find_dev(ctx); if (!dd) return -ENODEV; rctx->mode = mode; - spin_lock_irqsave(&dd->lock, flags); - err = ablkcipher_enqueue_request(&dd->queue, req); - spin_unlock_irqrestore(&dd->lock, flags); - - if (!test_and_set_bit(FLAGS_BUSY, &dd->flags)) - omap_aes_handle_req(dd); - - pr_debug("exit\n"); - - return err; + return omap_aes_handle_queue(dd, req); } /* ********************** ALG API ************************************ */ @@ -701,7 +711,6 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - ctx->flags |= FLAGS_NEW_KEY; return 0; } @@ -750,7 +759,7 @@ static struct crypto_alg algs[] = { .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, + .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, @@ -770,7 +779,7 @@ static struct crypto_alg algs[] = { .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, + .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, @@ -849,7 +858,8 @@ static int omap_aes_probe(struct platform_device *pdev) (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); clk_disable(dd->iclk); - tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd); + tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); + tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); err = omap_aes_dma_init(dd); if (err) @@ -876,7 +886,8 @@ err_algs: crypto_unregister_alg(&algs[j]); omap_aes_dma_cleanup(dd); err_dma: - tasklet_kill(&dd->task); + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); iounmap(dd->io_base); err_io: clk_put(dd->iclk); @@ -903,7 +914,8 @@ static int omap_aes_remove(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_alg(&algs[i]); - tasklet_kill(&dd->task); + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); omap_aes_dma_cleanup(dd); iounmap(dd->io_base); clk_put(dd->iclk); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a081c7c7d03f..2e71123516e0 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -72,10 +72,9 @@ #define DEFAULT_TIMEOUT_INTERVAL HZ -#define FLAGS_FIRST 0x0001 #define FLAGS_FINUP 0x0002 #define FLAGS_FINAL 0x0004 -#define FLAGS_FAST 0x0008 +#define FLAGS_SG 0x0008 #define FLAGS_SHA1 0x0010 #define FLAGS_DMA_ACTIVE 0x0020 #define FLAGS_OUTPUT_READY 0x0040 @@ -83,13 +82,17 @@ #define FLAGS_INIT 0x0100 #define FLAGS_CPU 0x0200 #define FLAGS_HMAC 0x0400 - -/* 3rd byte */ -#define FLAGS_BUSY 16 +#define FLAGS_ERROR 0x0800 +#define FLAGS_BUSY 0x1000 #define OP_UPDATE 1 #define OP_FINAL 2 +#define OMAP_ALIGN_MASK (sizeof(u32)-1) +#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) + +#define BUFLEN PAGE_SIZE + struct omap_sham_dev; struct omap_sham_reqctx { @@ -97,8 +100,8 @@ struct omap_sham_reqctx { unsigned long flags; unsigned long op; + u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; size_t digcnt; - u8 *buffer; size_t bufcnt; size_t buflen; dma_addr_t dma_addr; @@ -107,6 +110,8 @@ struct omap_sham_reqctx { struct scatterlist *sg; unsigned int offset; /* offset in current sg */ unsigned int total; /* total request */ + + u8 buffer[0] OMAP_ALIGNED; }; struct omap_sham_hmac_ctx { @@ -136,6 +141,7 @@ struct omap_sham_dev { int irq; struct clk *iclk; spinlock_t lock; + int err; int dma; int dma_lch; struct tasklet_struct done_task; @@ -194,53 +200,68 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) static void omap_sham_copy_hash(struct ahash_request *req, int out) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + u32 *hash = (u32 *)ctx->digest; + int i; + + /* MD5 is almost unused. So copy sha1 size to reduce code */ + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { + if (out) + hash[i] = omap_sham_read(ctx->dd, + SHA_REG_DIGEST(i)); + else + omap_sham_write(ctx->dd, + SHA_REG_DIGEST(i), hash[i]); + } +} + +static void omap_sham_copy_ready_hash(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + u32 *in = (u32 *)ctx->digest; u32 *hash = (u32 *)req->result; int i; + if (!hash) + return; + if (likely(ctx->flags & FLAGS_SHA1)) { /* SHA1 results are in big endian */ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) - if (out) - hash[i] = be32_to_cpu(omap_sham_read(ctx->dd, - SHA_REG_DIGEST(i))); - else - omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), - cpu_to_be32(hash[i])); + hash[i] = be32_to_cpu(in[i]); } else { /* MD5 results are in little endian */ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) - if (out) - hash[i] = le32_to_cpu(omap_sham_read(ctx->dd, - SHA_REG_DIGEST(i))); - else - omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), - cpu_to_le32(hash[i])); + hash[i] = le32_to_cpu(in[i]); } } -static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, - int final, int dma) +static int omap_sham_hw_init(struct omap_sham_dev *dd) { - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - u32 val = length << 5, mask; + clk_enable(dd->iclk); - if (unlikely(!ctx->digcnt)) { + if (!(dd->flags & FLAGS_INIT)) { + omap_sham_write_mask(dd, SHA_REG_MASK, + SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); - clk_enable(dd->iclk); + if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, + SHA_REG_SYSSTATUS_RESETDONE)) + return -ETIMEDOUT; - if (!(dd->flags & FLAGS_INIT)) { - omap_sham_write_mask(dd, SHA_REG_MASK, - SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); + dd->flags |= FLAGS_INIT; + dd->err = 0; + } - if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, - SHA_REG_SYSSTATUS_RESETDONE)) - return -ETIMEDOUT; + return 0; +} - dd->flags |= FLAGS_INIT; - } - } else { +static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, + int final, int dma) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val = length << 5, mask; + + if (likely(ctx->digcnt)) omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); - } omap_sham_write_mask(dd, SHA_REG_MASK, SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), @@ -260,29 +281,26 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); - - return 0; } static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int err, count, len32; + int count, len32; const u32 *buffer = (const u32 *)buf; dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); - err = omap_sham_write_ctrl(dd, length, final, 0); - if (err) - return err; + omap_sham_write_ctrl(dd, length, final, 0); + + /* should be non-zero before next lines to disable clocks later */ + ctx->digcnt += length; if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) return -ETIMEDOUT; - ctx->digcnt += length; - if (final) ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ @@ -298,16 +316,11 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int err, len32; + int len32; dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); - /* flush cache entries related to our page */ - if (dma_addr == ctx->dma_addr) - dma_sync_single_for_device(dd->dev, dma_addr, length, - DMA_TO_DEVICE); - len32 = DIV_ROUND_UP(length, sizeof(u32)); omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, @@ -317,9 +330,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0); - err = omap_sham_write_ctrl(dd, length, final, 1); - if (err) - return err; + omap_sham_write_ctrl(dd, length, final, 1); ctx->digcnt += length; @@ -371,15 +382,29 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) return 0; } +static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, + struct omap_sham_reqctx *ctx, + size_t length, int final) +{ + ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, ctx->dma_addr)) { + dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); + return -EINVAL; + } + + ctx->flags &= ~FLAGS_SG; + + /* next call does not fail... so no unmap in the case of error */ + return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); +} + static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int final; size_t count; - if (!ctx->total) - return 0; - omap_sham_append_sg(ctx); final = (ctx->flags & FLAGS_FINUP) && !ctx->total; @@ -390,30 +415,68 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { count = ctx->bufcnt; ctx->bufcnt = 0; - return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); + return omap_sham_xmit_dma_map(dd, ctx, count, final); } return 0; } -static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) +/* Start address alignment */ +#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) +/* SHA1 block size alignment */ +#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) + +static int omap_sham_update_dma_start(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - unsigned int length; + unsigned int length, final, tail; + struct scatterlist *sg; - ctx->flags |= FLAGS_FAST; + if (!ctx->total) + return 0; + + if (ctx->bufcnt || ctx->offset) + return omap_sham_update_dma_slow(dd); + + dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", + ctx->digcnt, ctx->bufcnt, ctx->total); + + sg = ctx->sg; - length = min(ctx->total, sg_dma_len(ctx->sg)); - ctx->total = length; + if (!SG_AA(sg)) + return omap_sham_update_dma_slow(dd); + + if (!sg_is_last(sg) && !SG_SA(sg)) + /* size is not SHA1_BLOCK_SIZE aligned */ + return omap_sham_update_dma_slow(dd); + + length = min(ctx->total, sg->length); + + if (sg_is_last(sg)) { + if (!(ctx->flags & FLAGS_FINUP)) { + /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ + tail = length & (SHA1_MD5_BLOCK_SIZE - 1); + /* without finup() we need one block to close hash */ + if (!tail) + tail = SHA1_MD5_BLOCK_SIZE; + length -= tail; + } + } if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { dev_err(dd->dev, "dma_map_sg error\n"); return -EINVAL; } + ctx->flags |= FLAGS_SG; + ctx->total -= length; + ctx->offset = length; /* offset where to start slow */ - return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); + final = (ctx->flags & FLAGS_FINUP) && !ctx->total; + + /* next call does not fail... so no unmap in the case of error */ + return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); } static int omap_sham_update_cpu(struct omap_sham_dev *dd) @@ -433,8 +496,17 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_stop_dma(dd->dma_lch); - if (ctx->flags & FLAGS_FAST) + if (ctx->flags & FLAGS_SG) { dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); + if (ctx->sg->length == ctx->offset) { + ctx->sg = sg_next(ctx->sg); + if (ctx->sg) + ctx->offset = 0; + } + } else { + dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, + DMA_TO_DEVICE); + } return 0; } @@ -454,14 +526,7 @@ static void omap_sham_cleanup(struct ahash_request *req) spin_unlock_irqrestore(&dd->lock, flags); if (ctx->digcnt) - clk_disable(dd->iclk); - - if (ctx->dma_addr) - dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, - DMA_TO_DEVICE); - - if (ctx->buffer) - free_page((unsigned long)ctx->buffer); + omap_sham_copy_ready_hash(req); dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); } @@ -489,8 +554,6 @@ static int omap_sham_init(struct ahash_request *req) ctx->flags = 0; - ctx->flags |= FLAGS_FIRST; - dev_dbg(dd->dev, "init: digest size: %d\n", crypto_ahash_digestsize(tfm)); @@ -499,21 +562,7 @@ static int omap_sham_init(struct ahash_request *req) ctx->bufcnt = 0; ctx->digcnt = 0; - - ctx->buflen = PAGE_SIZE; - ctx->buffer = (void *)__get_free_page( - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - if (!ctx->buffer) - return -ENOMEM; - - ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, - DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, ctx->dma_addr)) { - dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); - free_page((unsigned long)ctx->buffer); - return -EINVAL; - } + ctx->buflen = BUFLEN; if (tctx->flags & FLAGS_HMAC) { struct omap_sham_hmac_ctx *bctx = tctx->base; @@ -538,10 +587,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) if (ctx->flags & FLAGS_CPU) err = omap_sham_update_cpu(dd); - else if (ctx->flags & FLAGS_FAST) - err = omap_sham_update_dma_fast(dd); else - err = omap_sham_update_dma_slow(dd); + err = omap_sham_update_dma_start(dd); /* wait for dma completion before can take more data */ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); @@ -560,15 +607,12 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) use_dma = 0; if (use_dma) - err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); + err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); else err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); ctx->bufcnt = 0; - if (err != -EINPROGRESS) - omap_sham_cleanup(req); - dev_dbg(dd->dev, "final_req: err: %d\n", err); return err; @@ -576,6 +620,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) static int omap_sham_finish_req_hmac(struct ahash_request *req) { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); @@ -590,48 +635,56 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req) return crypto_shash_init(&desc.shash) ?: crypto_shash_update(&desc.shash, bctx->opad, bs) ?: - crypto_shash_finup(&desc.shash, req->result, ds, req->result); + crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); } static void omap_sham_finish_req(struct ahash_request *req, int err) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; if (!err) { omap_sham_copy_hash(ctx->dd->req, 1); if (ctx->flags & FLAGS_HMAC) err = omap_sham_finish_req_hmac(req); + } else { + ctx->flags |= FLAGS_ERROR; } - if (ctx->flags & FLAGS_FINAL) + if ((ctx->flags & FLAGS_FINAL) || err) omap_sham_cleanup(req); - clear_bit(FLAGS_BUSY, &ctx->dd->flags); + clk_disable(dd->iclk); + dd->flags &= ~FLAGS_BUSY; if (req->base.complete) req->base.complete(&req->base, err); } -static int omap_sham_handle_queue(struct omap_sham_dev *dd) +static int omap_sham_handle_queue(struct omap_sham_dev *dd, + struct ahash_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_sham_reqctx *ctx; - struct ahash_request *req, *prev_req; + struct ahash_request *prev_req; unsigned long flags; - int err = 0; - - if (test_and_set_bit(FLAGS_BUSY, &dd->flags)) - return 0; + int err = 0, ret = 0; spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ahash_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); - if (!async_req) - clear_bit(FLAGS_BUSY, &dd->flags); + if (async_req) + dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) - return 0; + return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -646,7 +699,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", ctx->op, req->nbytes); - if (req != prev_req && ctx->digcnt) + + err = omap_sham_hw_init(dd); + if (err) + goto err1; + + omap_set_dma_dest_params(dd->dma_lch, 0, + OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + SHA_REG_DIN(0), 0, 16); + + omap_set_dma_dest_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_16); + + omap_set_dma_src_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_4); + + if (ctx->digcnt) /* request has changed - restore hash */ omap_sham_copy_hash(req, 0); @@ -658,7 +726,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) } else if (ctx->op == OP_FINAL) { err = omap_sham_final_req(dd); } - +err1: if (err != -EINPROGRESS) { /* done_task will not finish it, so do it here */ omap_sham_finish_req(req, err); @@ -667,7 +735,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) dev_dbg(dd->dev, "exit, err: %d\n", err); - return err; + return ret; } static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) @@ -675,18 +743,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_dev *dd = tctx->dd; - unsigned long flags; - int err; ctx->op = op; - spin_lock_irqsave(&dd->lock, flags); - err = ahash_enqueue_request(&dd->queue, req); - spin_unlock_irqrestore(&dd->lock, flags); - - omap_sham_handle_queue(dd); - - return err; + return omap_sham_handle_queue(dd, req); } static int omap_sham_update(struct ahash_request *req) @@ -709,21 +769,13 @@ static int omap_sham_update(struct ahash_request *req) */ omap_sham_append_sg(ctx); return 0; - } else if (ctx->bufcnt + ctx->total <= 64) { + } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { + /* + * faster to use CPU for short transfers + */ ctx->flags |= FLAGS_CPU; - } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) { - /* may be can use faster functions */ - int aligned = IS_ALIGNED((u32)ctx->sg->offset, - sizeof(u32)); - - if (aligned && (ctx->flags & FLAGS_FIRST)) - /* digest: first and final */ - ctx->flags |= FLAGS_FAST; - - ctx->flags &= ~FLAGS_FIRST; } - } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { - /* if not finaup -> not fast */ + } else if (ctx->bufcnt + ctx->total < ctx->buflen) { omap_sham_append_sg(ctx); return 0; } @@ -761,12 +813,14 @@ static int omap_sham_final(struct ahash_request *req) ctx->flags |= FLAGS_FINUP; - /* OMAP HW accel works only with buffers >= 9 */ - /* HMAC is always >= 9 because of ipad */ - if ((ctx->digcnt + ctx->bufcnt) < 9) - err = omap_sham_final_shash(req); - else if (ctx->bufcnt) - return omap_sham_enqueue(req, OP_FINAL); + if (!(ctx->flags & FLAGS_ERROR)) { + /* OMAP HW accel works only with buffers >= 9 */ + /* HMAC is always >= 9 because of ipad */ + if ((ctx->digcnt + ctx->bufcnt) < 9) + err = omap_sham_final_shash(req); + else if (ctx->bufcnt) + return omap_sham_enqueue(req, OP_FINAL); + } omap_sham_cleanup(req); @@ -836,6 +890,8 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); const char *alg_name = crypto_tfm_alg_name(tfm); + pr_info("enter\n"); + /* Allocate a fallback and abort if it failed. */ tctx->fallback = crypto_alloc_shash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -846,7 +902,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct omap_sham_reqctx)); + sizeof(struct omap_sham_reqctx) + BUFLEN); if (alg_base) { struct omap_sham_hmac_ctx *bctx = tctx->base; @@ -932,7 +988,7 @@ static struct ahash_alg algs[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, @@ -956,7 +1012,7 @@ static struct ahash_alg algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha1_init, .cra_exit = omap_sham_cra_exit, @@ -980,7 +1036,7 @@ static struct ahash_alg algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_md5_init, .cra_exit = omap_sham_cra_exit, @@ -993,7 +1049,7 @@ static void omap_sham_done_task(unsigned long data) struct omap_sham_dev *dd = (struct omap_sham_dev *)data; struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - int ready = 1; + int ready = 0, err = 0; if (ctx->flags & FLAGS_OUTPUT_READY) { ctx->flags &= ~FLAGS_OUTPUT_READY; @@ -1003,15 +1059,18 @@ static void omap_sham_done_task(unsigned long data) if (dd->flags & FLAGS_DMA_ACTIVE) { dd->flags &= ~FLAGS_DMA_ACTIVE; omap_sham_update_dma_stop(dd); - omap_sham_update_dma_slow(dd); + if (!dd->err) + err = omap_sham_update_dma_start(dd); } - if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { - dev_dbg(dd->dev, "update done\n"); + err = dd->err ? : err; + + if (err != -EINPROGRESS && (ready || err)) { + dev_dbg(dd->dev, "update done: err: %d\n", err); /* finish curent request */ - omap_sham_finish_req(req, 0); + omap_sham_finish_req(req, err); /* start new request */ - omap_sham_handle_queue(dd); + omap_sham_handle_queue(dd, NULL); } } @@ -1019,7 +1078,7 @@ static void omap_sham_queue_task(unsigned long data) { struct omap_sham_dev *dd = (struct omap_sham_dev *)data; - omap_sham_handle_queue(dd); + omap_sham_handle_queue(dd, NULL); } static irqreturn_t omap_sham_irq(int irq, void *dev_id) @@ -1041,6 +1100,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) omap_sham_read(dd, SHA_REG_CTRL); ctx->flags |= FLAGS_OUTPUT_READY; + dd->err = 0; tasklet_schedule(&dd->done_task); return IRQ_HANDLED; @@ -1050,8 +1110,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) { struct omap_sham_dev *dd = data; - if (likely(lch == dd->dma_lch)) - tasklet_schedule(&dd->done_task); + if (ch_status != OMAP_DMA_BLOCK_IRQ) { + pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); + dd->err = -EIO; + dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ + } + + tasklet_schedule(&dd->done_task); } static int omap_sham_dma_init(struct omap_sham_dev *dd) @@ -1066,15 +1131,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd) dev_err(dd->dev, "Unable to request DMA channel\n"); return err; } - omap_set_dma_dest_params(dd->dma_lch, 0, - OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + SHA_REG_DIN(0), 0, 16); - - omap_set_dma_dest_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_16); - - omap_set_dma_src_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_4); return 0; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 8a515baa38f7..db33d300aa23 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -21,7 +22,6 @@ #include #include #include -#include "padlock.h" /* * Number of data blocks actually fetched for each xcrypt insn. diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index d3a27e0119bc..adf075b6b9a8 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -22,13 +23,6 @@ #include #include #include -#include "padlock.h" - -#ifdef CONFIG_64BIT -#define STACK_ALIGN 16 -#else -#define STACK_ALIGN 4 -#endif struct padlock_sha_desc { struct shash_desc fallback; diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index b98c67664ae7..c461eda62411 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -110,8 +110,6 @@ static void unregister_dca_providers(void) /* at this point only one domain in the list is expected */ domain = list_first_entry(&dca_domains, struct dca_domain, node); - if (!domain) - return; list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) { list_del(&dca->node); diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 3109bd94bc4f..78266382797e 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -1060,8 +1060,8 @@ static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) * mid_setup_dma - Setup the DMA controller * @pdev: Controller PCI device structure * - * Initilize the DMA controller, channels, registers with DMA engine, - * ISR. Initilize DMA controller channels. + * Initialize the DMA controller, channels, registers with DMA engine, + * ISR. Initialize DMA controller channels. */ static int mid_setup_dma(struct pci_dev *pdev) { @@ -1217,7 +1217,7 @@ static void middma_shutdown(struct pci_dev *pdev) * @pdev: Controller PCI device structure * @id: pci device id structure * - * Initilize the PCI device, map BARs, query driver data. + * Initialize the PCI device, map BARs, query driver data. * Call setup_dma to complete contoller and chan initilzation */ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h index 60e0d1c72dee..6f8b07131ec4 100644 --- a/drivers/edac/amd8131_edac.h +++ b/drivers/edac/amd8131_edac.h @@ -99,7 +99,7 @@ struct amd8131_dev_info { /* * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC - * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are + * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are * four PCIX Bridges on ATCA-6101 altogether. * * These PCIX Bridges share the same PCI Device ID and are all of diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index c973004c002c..db1df59ae2b6 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c @@ -47,7 +47,7 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) offset = address & ~PAGE_MASK; syndrome = (ar & 0x000000001fe00000ul) >> 21; - /* TODO: Decoding of the error addresss */ + /* TODO: Decoding of the error address */ edac_mc_handle_ce(mci, csrow->first_page + pfn, offset, syndrome, 0, chan, ""); } @@ -68,7 +68,7 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) pfn = address >> PAGE_SHIFT; offset = address & ~PAGE_MASK; - /* TODO: Decoding of the error addresss */ + /* TODO: Decoding of the error address */ edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, ""); } diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index ff1eb7bb26c6..3d965347a673 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -259,7 +259,7 @@ enum scrub_type { * for single channel are 64 bits, for dual channel 128 * bits. * - * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory. + * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. * Motherboards commonly drive two chip-select pins to * a memory stick. A single-ranked stick, will occupy * only one of those rows. The other will be unused. diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 362861c15779..81154ab296b6 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1,6 +1,6 @@ /* Intel i7 core/Nehalem Memory Controller kernel module * - * This driver supports yhe memory controllers found on the Intel + * This driver supports the memory controllers found on the Intel * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx, * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield * and Westmere-EP. @@ -1271,7 +1271,7 @@ static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table) int i; /* - * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses + * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses * aren't announced by acpi. So, we need to use a legacy scan probing * to detect them */ @@ -1864,7 +1864,7 @@ static int i7core_mce_check_error(void *priv, struct mce *mce) if (mce->mcgstatus & 1) i7core_check_error(mci); - /* Advice mcelog that the error were handled */ + /* Advise mcelog that the errors were handled */ return 1; } diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 070cea41b661..b9f0c20df1aa 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -873,7 +873,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1) } /** - * ppc4xx_edac_init_csrows - intialize driver instance rows + * ppc4xx_edac_init_csrows - initialize driver instance rows * @mci: A pointer to the EDAC memory controller instance * associated with the ibm,sdram-4xx-ddr2 controller for which * the csrows (i.e. banks/ranks) are being initialized. @@ -881,7 +881,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1) * currently set for the controller, from which bank width * and memory typ information is derived. * - * This routine intializes the virtual "chip select rows" associated + * This routine initializes the virtual "chip select rows" associated * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2 * controller bank/rank is mapped to a row. * @@ -992,7 +992,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) } /** - * ppc4xx_edac_mc_init - intialize driver instance + * ppc4xx_edac_mc_init - initialize driver instance * @mci: A pointer to the EDAC memory controller instance being * initialized. * @op: A pointer to the OpenFirmware device tree node associated diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index d77d120ddc25..bd3c61b6dd8d 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -961,7 +961,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, for (i = 0; i < AR_WRAPAROUND_PAGES; i++) pages[AR_BUFFERS + i] = ctx->pages[i]; ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, - -1, PAGE_KERNEL_RO); + -1, PAGE_KERNEL); if (!ctx->buffer) goto out_of_memory; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 082495bb08a7..664660e56335 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -118,7 +118,7 @@ config GPIO_SCH config GPIO_VX855 tristate "VIA VX855/VX875 GPIO" - depends on GPIOLIB + depends on GPIOLIB && MFD_SUPPORT && PCI select MFD_CORE select MFD_VX855 help @@ -295,7 +295,7 @@ comment "PCI GPIO expanders:" config GPIO_CS5535 tristate "AMD CS5535/CS5536 GPIO support" - depends on PCI && !CS5535_GPIO + depends on PCI && X86 && !CS5535_GPIO help The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that can be used for quite a number of things. The CS5535/6 is found on @@ -333,6 +333,15 @@ config GPIO_PCH which is an IOH(Input/Output Hub) for x86 embedded processor. This driver can access PCH GPIO device. +config GPIO_ML_IOH + tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support" + depends on PCI + help + ML7213 is companion chip for Intel Atom E6xx series. + This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output + Hub) which is for IVI(In-Vehicle Infotainment) use. + This driver can access the IOH's GPIO device. + config GPIO_TIMBERDALE bool "Support for timberdale GPIO IP" depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM @@ -342,6 +351,7 @@ config GPIO_TIMBERDALE config GPIO_RDC321X tristate "RDC R-321x GPIO support" depends on PCI && GPIOLIB + select MFD_SUPPORT select MFD_CORE select MFD_RDC321X help diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 39bfd7a37650..3351cf87b0ed 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -41,3 +41,4 @@ obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o obj-$(CONFIG_GPIO_SX150X) += sx150x.o obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o +obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c index 0871f78af593..33fc685cb385 100644 --- a/drivers/gpio/adp5588-gpio.c +++ b/drivers/gpio/adp5588-gpio.c @@ -146,9 +146,10 @@ static int adp5588_gpio_to_irq(struct gpio_chip *chip, unsigned off) return dev->irq_base + off; } -static void adp5588_irq_bus_lock(unsigned int irq) +static void adp5588_irq_bus_lock(struct irq_data *d) { - struct adp5588_gpio *dev = get_irq_chip_data(irq); + struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); + mutex_lock(&dev->irq_lock); } @@ -160,9 +161,9 @@ static void adp5588_irq_bus_lock(unsigned int irq) * and unlocks the bus. */ -static void adp5588_irq_bus_sync_unlock(unsigned int irq) +static void adp5588_irq_bus_sync_unlock(struct irq_data *d) { - struct adp5588_gpio *dev = get_irq_chip_data(irq); + struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); int i; for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) @@ -175,31 +176,31 @@ static void adp5588_irq_bus_sync_unlock(unsigned int irq) mutex_unlock(&dev->irq_lock); } -static void adp5588_irq_mask(unsigned int irq) +static void adp5588_irq_mask(struct irq_data *d) { - struct adp5588_gpio *dev = get_irq_chip_data(irq); - unsigned gpio = irq - dev->irq_base; + struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); + unsigned gpio = d->irq - dev->irq_base; dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio); } -static void adp5588_irq_unmask(unsigned int irq) +static void adp5588_irq_unmask(struct irq_data *d) { - struct adp5588_gpio *dev = get_irq_chip_data(irq); - unsigned gpio = irq - dev->irq_base; + struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); + unsigned gpio = d->irq - dev->irq_base; dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio); } -static int adp5588_irq_set_type(unsigned int irq, unsigned int type) +static int adp5588_irq_set_type(struct irq_data *d, unsigned int type) { - struct adp5588_gpio *dev = get_irq_chip_data(irq); - uint16_t gpio = irq - dev->irq_base; + struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); + uint16_t gpio = d->irq - dev->irq_base; unsigned bank, bit; if ((type & IRQ_TYPE_EDGE_BOTH)) { dev_err(&dev->client->dev, "irq %d: unsupported type %d\n", - irq, type); + d->irq, type); return -EINVAL; } @@ -222,11 +223,11 @@ static int adp5588_irq_set_type(unsigned int irq, unsigned int type) static struct irq_chip adp5588_irq_chip = { .name = "adp5588", - .mask = adp5588_irq_mask, - .unmask = adp5588_irq_unmask, - .bus_lock = adp5588_irq_bus_lock, - .bus_sync_unlock = adp5588_irq_bus_sync_unlock, - .set_type = adp5588_irq_set_type, + .irq_mask = adp5588_irq_mask, + .irq_unmask = adp5588_irq_unmask, + .irq_bus_lock = adp5588_irq_bus_lock, + .irq_bus_sync_unlock = adp5588_irq_bus_sync_unlock, + .irq_set_type = adp5588_irq_set_type, }; static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf) diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c index d3e55a0ae92b..0d05ea7d499b 100644 --- a/drivers/gpio/cs5535-gpio.c +++ b/drivers/gpio/cs5535-gpio.c @@ -11,13 +11,13 @@ #include #include #include -#include +#include #include #include #include +#include #define DRV_NAME "cs5535-gpio" -#define GPIO_BAR 1 /* * Some GPIO pins @@ -46,7 +46,7 @@ static struct cs5535_gpio_chip { struct gpio_chip chip; resource_size_t base; - struct pci_dev *pdev; + struct platform_device *pdev; spinlock_t lock; } cs5535_gpio_chip; @@ -144,6 +144,57 @@ int cs5535_gpio_isset(unsigned offset, unsigned int reg) } EXPORT_SYMBOL_GPL(cs5535_gpio_isset); +int cs5535_gpio_set_irq(unsigned group, unsigned irq) +{ + uint32_t lo, hi; + + if (group > 7 || irq > 15) + return -EINVAL; + + rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi); + + lo &= ~(0xF << (group * 4)); + lo |= (irq & 0xF) << (group * 4); + + wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi); + return 0; +} +EXPORT_SYMBOL_GPL(cs5535_gpio_set_irq); + +void cs5535_gpio_setup_event(unsigned offset, int pair, int pme) +{ + struct cs5535_gpio_chip *chip = &cs5535_gpio_chip; + uint32_t shift = (offset % 8) * 4; + unsigned long flags; + uint32_t val; + + if (offset >= 24) + offset = GPIO_MAP_W; + else if (offset >= 16) + offset = GPIO_MAP_Z; + else if (offset >= 8) + offset = GPIO_MAP_Y; + else + offset = GPIO_MAP_X; + + spin_lock_irqsave(&chip->lock, flags); + val = inl(chip->base + offset); + + /* Clear whatever was there before */ + val &= ~(0xF << shift); + + /* Set the new value */ + val |= ((pair & 7) << shift); + + /* Set the PME bit if this is a PME event */ + if (pme) + val |= (1 << (shift + 3)); + + outl(val, chip->base + offset); + spin_unlock_irqrestore(&chip->lock, flags); +} +EXPORT_SYMBOL_GPL(cs5535_gpio_setup_event); + /* * Generic gpio_chip API support. */ @@ -249,10 +300,10 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = { }, }; -static int __init cs5535_gpio_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_id) +static int __devinit cs5535_gpio_probe(struct platform_device *pdev) { - int err; + struct resource *res; + int err = -EIO; ulong mask_orig = mask; /* There are two ways to get the GPIO base address; one is by @@ -262,25 +313,23 @@ static int __init cs5535_gpio_probe(struct pci_dev *pdev, * it turns out to be unreliable in the face of crappy BIOSes, we * can always go back to using MSRs.. */ - err = pci_enable_device_io(pdev); - if (err) { - dev_err(&pdev->dev, "can't enable device IO\n"); + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(&pdev->dev, "can't fetch device resource info\n"); goto done; } - err = pci_request_region(pdev, GPIO_BAR, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR); + if (!request_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "can't request region\n"); goto done; } /* set up the driver-specific struct */ - cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR); + cs5535_gpio_chip.base = res->start; cs5535_gpio_chip.pdev = pdev; spin_lock_init(&cs5535_gpio_chip.lock); - dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR, - (unsigned long long) cs5535_gpio_chip.base); + dev_info(&pdev->dev, "reserved resource region %pR\n", res); /* mask out reserved pins */ mask &= 0x1F7FFFFF; @@ -298,78 +347,49 @@ static int __init cs5535_gpio_probe(struct pci_dev *pdev, if (err) goto release_region; - dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n"); + dev_info(&pdev->dev, "GPIO support successfully loaded.\n"); return 0; release_region: - pci_release_region(pdev, GPIO_BAR); + release_region(res->start, resource_size(res)); done: return err; } -static void __exit cs5535_gpio_remove(struct pci_dev *pdev) +static int __devexit cs5535_gpio_remove(struct platform_device *pdev) { + struct resource *r; int err; err = gpiochip_remove(&cs5535_gpio_chip.chip); if (err) { /* uhh? */ dev_err(&pdev->dev, "unable to remove gpio_chip?\n"); - } - pci_release_region(pdev, GPIO_BAR); -} - -static struct pci_device_id cs5535_gpio_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, - { 0, }, -}; -MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl); - -/* - * We can't use the standard PCI driver registration stuff here, since - * that allows only one driver to bind to each PCI device (and we want - * multiple drivers to be able to bind to the device). Instead, manually - * scan for the PCI device, request a single region, and keep track of the - * devices that we're using. - */ - -static int __init cs5535_gpio_scan_pci(void) -{ - struct pci_dev *pdev; - int err = -ENODEV; - int i; - - for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) { - pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor, - cs5535_gpio_pci_tbl[i].device, NULL); - if (pdev) { - err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]); - if (err) - pci_dev_put(pdev); - - /* we only support a single CS5535/6 southbridge */ - break; - } + return err; } - return err; + r = platform_get_resource(pdev, IORESOURCE_IO, 0); + release_region(r->start, resource_size(r)); + return 0; } -static void __exit cs5535_gpio_free_pci(void) -{ - cs5535_gpio_remove(cs5535_gpio_chip.pdev); - pci_dev_put(cs5535_gpio_chip.pdev); -} +static struct platform_driver cs5535_gpio_drv = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = cs5535_gpio_probe, + .remove = __devexit_p(cs5535_gpio_remove), +}; static int __init cs5535_gpio_init(void) { - return cs5535_gpio_scan_pci(); + return platform_driver_register(&cs5535_gpio_drv); } static void __exit cs5535_gpio_exit(void) { - cs5535_gpio_free_pci(); + platform_driver_unregister(&cs5535_gpio_drv); } module_init(cs5535_gpio_init); @@ -378,3 +398,4 @@ module_exit(cs5535_gpio_exit); MODULE_AUTHOR("Andres Salomon "); MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c index 64db9dc3a275..d81cc748e77f 100644 --- a/drivers/gpio/langwell_gpio.c +++ b/drivers/gpio/langwell_gpio.c @@ -134,10 +134,10 @@ static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset) return lnw->irq_base + offset; } -static int lnw_irq_type(unsigned irq, unsigned type) +static int lnw_irq_type(struct irq_data *d, unsigned type) { - struct lnw_gpio *lnw = get_irq_chip_data(irq); - u32 gpio = irq - lnw->irq_base; + struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d); + u32 gpio = d->irq - lnw->irq_base; unsigned long flags; u32 value; void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER); @@ -162,19 +162,19 @@ static int lnw_irq_type(unsigned irq, unsigned type) return 0; } -static void lnw_irq_unmask(unsigned irq) +static void lnw_irq_unmask(struct irq_data *d) { } -static void lnw_irq_mask(unsigned irq) +static void lnw_irq_mask(struct irq_data *d) { } static struct irq_chip lnw_irqchip = { .name = "LNW-GPIO", - .mask = lnw_irq_mask, - .unmask = lnw_irq_unmask, - .set_type = lnw_irq_type, + .irq_mask = lnw_irq_mask, + .irq_unmask = lnw_irq_unmask, + .irq_set_type = lnw_irq_type, }; static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */ diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c index 9cad60f9e962..9e1d01f0071a 100644 --- a/drivers/gpio/max732x.c +++ b/drivers/gpio/max732x.c @@ -327,40 +327,40 @@ static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off) return chip->irq_base + off; } -static void max732x_irq_mask(unsigned int irq) +static void max732x_irq_mask(struct irq_data *d) { - struct max732x_chip *chip = get_irq_chip_data(irq); + struct max732x_chip *chip = irq_data_get_irq_chip_data(d); - chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base)); + chip->irq_mask_cur &= ~(1 << (d->irq - chip->irq_base)); } -static void max732x_irq_unmask(unsigned int irq) +static void max732x_irq_unmask(struct irq_data *d) { - struct max732x_chip *chip = get_irq_chip_data(irq); + struct max732x_chip *chip = irq_data_get_irq_chip_data(d); - chip->irq_mask_cur |= 1 << (irq - chip->irq_base); + chip->irq_mask_cur |= 1 << (d->irq - chip->irq_base); } -static void max732x_irq_bus_lock(unsigned int irq) +static void max732x_irq_bus_lock(struct irq_data *d) { - struct max732x_chip *chip = get_irq_chip_data(irq); + struct max732x_chip *chip = irq_data_get_irq_chip_data(d); mutex_lock(&chip->irq_lock); chip->irq_mask_cur = chip->irq_mask; } -static void max732x_irq_bus_sync_unlock(unsigned int irq) +static void max732x_irq_bus_sync_unlock(struct irq_data *d) { - struct max732x_chip *chip = get_irq_chip_data(irq); + struct max732x_chip *chip = irq_data_get_irq_chip_data(d); max732x_irq_update_mask(chip); mutex_unlock(&chip->irq_lock); } -static int max732x_irq_set_type(unsigned int irq, unsigned int type) +static int max732x_irq_set_type(struct irq_data *d, unsigned int type) { - struct max732x_chip *chip = get_irq_chip_data(irq); - uint16_t off = irq - chip->irq_base; + struct max732x_chip *chip = irq_data_get_irq_chip_data(d); + uint16_t off = d->irq - chip->irq_base; uint16_t mask = 1 << off; if (!(mask & chip->dir_input)) { @@ -371,7 +371,7 @@ static int max732x_irq_set_type(unsigned int irq, unsigned int type) if (!(type & IRQ_TYPE_EDGE_BOTH)) { dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", - irq, type); + d->irq, type); return -EINVAL; } @@ -390,11 +390,11 @@ static int max732x_irq_set_type(unsigned int irq, unsigned int type) static struct irq_chip max732x_irq_chip = { .name = "max732x", - .mask = max732x_irq_mask, - .unmask = max732x_irq_unmask, - .bus_lock = max732x_irq_bus_lock, - .bus_sync_unlock = max732x_irq_bus_sync_unlock, - .set_type = max732x_irq_set_type, + .irq_mask = max732x_irq_mask, + .irq_unmask = max732x_irq_unmask, + .irq_bus_lock = max732x_irq_bus_lock, + .irq_bus_sync_unlock = max732x_irq_bus_sync_unlock, + .irq_set_type = max732x_irq_set_type, }; static uint8_t max732x_irq_pending(struct max732x_chip *chip) diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c new file mode 100644 index 000000000000..cead8e6ff345 --- /dev/null +++ b/drivers/gpio/ml_ioh_gpio.c @@ -0,0 +1,352 @@ +/* + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include + +#define PCI_VENDOR_ID_ROHM 0x10DB + +struct ioh_reg_comn { + u32 ien; + u32 istatus; + u32 idisp; + u32 iclr; + u32 imask; + u32 imaskclr; + u32 po; + u32 pi; + u32 pm; + u32 im_0; + u32 im_1; + u32 reserved; +}; + +struct ioh_regs { + struct ioh_reg_comn regs[8]; + u32 reserve1[16]; + u32 ioh_sel_reg[4]; + u32 reserve2[11]; + u32 srst; +}; + +/** + * struct ioh_gpio_reg_data - The register store data. + * @po_reg: To store contents of PO register. + * @pm_reg: To store contents of PM register. + */ +struct ioh_gpio_reg_data { + u32 po_reg; + u32 pm_reg; +}; + +/** + * struct ioh_gpio - GPIO private data structure. + * @base: PCI base address of Memory mapped I/O register. + * @reg: Memory mapped IOH GPIO register list. + * @dev: Pointer to device structure. + * @gpio: Data for GPIO infrastructure. + * @ioh_gpio_reg: Memory mapped Register data is saved here + * when suspend. + * @ch: Indicate GPIO channel + */ +struct ioh_gpio { + void __iomem *base; + struct ioh_regs __iomem *reg; + struct device *dev; + struct gpio_chip gpio; + struct ioh_gpio_reg_data ioh_gpio_reg; + struct mutex lock; + int ch; +}; + +static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12}; + +static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val) +{ + u32 reg_val; + struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); + + mutex_lock(&chip->lock); + reg_val = ioread32(&chip->reg->regs[chip->ch].po); + if (val) + reg_val |= (1 << nr); + else + reg_val &= ~(1 << nr); + + iowrite32(reg_val, &chip->reg->regs[chip->ch].po); + mutex_unlock(&chip->lock); +} + +static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr) +{ + struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); + + return ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr); +} + +static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, + int val) +{ + struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); + u32 pm; + u32 reg_val; + + mutex_lock(&chip->lock); + pm = ioread32(&chip->reg->regs[chip->ch].pm) & + ((1 << num_ports[chip->ch]) - 1); + pm |= (1 << nr); + iowrite32(pm, &chip->reg->regs[chip->ch].pm); + + reg_val = ioread32(&chip->reg->regs[chip->ch].po); + if (val) + reg_val |= (1 << nr); + else + reg_val &= ~(1 << nr); + + mutex_unlock(&chip->lock); + + return 0; +} + +static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) +{ + struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); + u32 pm; + + mutex_lock(&chip->lock); + pm = ioread32(&chip->reg->regs[chip->ch].pm) & + ((1 << num_ports[chip->ch]) - 1); + pm &= ~(1 << nr); + iowrite32(pm, &chip->reg->regs[chip->ch].pm); + mutex_unlock(&chip->lock); + + return 0; +} + +/* + * Save register configuration and disable interrupts. + */ +static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip) +{ + chip->ioh_gpio_reg.po_reg = ioread32(&chip->reg->regs[chip->ch].po); + chip->ioh_gpio_reg.pm_reg = ioread32(&chip->reg->regs[chip->ch].pm); +} + +/* + * This function restores the register configuration of the GPIO device. + */ +static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip) +{ + /* to store contents of PO register */ + iowrite32(chip->ioh_gpio_reg.po_reg, &chip->reg->regs[chip->ch].po); + /* to store contents of PM register */ + iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm); +} + +static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port) +{ + struct gpio_chip *gpio = &chip->gpio; + + gpio->label = dev_name(chip->dev); + gpio->owner = THIS_MODULE; + gpio->direction_input = ioh_gpio_direction_input; + gpio->get = ioh_gpio_get; + gpio->direction_output = ioh_gpio_direction_output; + gpio->set = ioh_gpio_set; + gpio->dbg_show = NULL; + gpio->base = -1; + gpio->ngpio = num_port; + gpio->can_sleep = 0; +} + +static int __devinit ioh_gpio_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret; + int i; + struct ioh_gpio *chip; + void __iomem *base; + void __iomem *chip_save; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "%s : pci_enable_device failed", __func__); + goto err_pci_enable; + } + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + dev_err(&pdev->dev, "pci_request_regions failed-%d", ret); + goto err_request_regions; + } + + base = pci_iomap(pdev, 1, 0); + if (base == 0) { + dev_err(&pdev->dev, "%s : pci_iomap failed", __func__); + ret = -ENOMEM; + goto err_iomap; + } + + chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL); + if (chip_save == NULL) { + dev_err(&pdev->dev, "%s : kzalloc failed", __func__); + ret = -ENOMEM; + goto err_kzalloc; + } + + chip = chip_save; + for (i = 0; i < 8; i++, chip++) { + chip->dev = &pdev->dev; + chip->base = base; + chip->reg = chip->base; + chip->ch = i; + mutex_init(&chip->lock); + ioh_gpio_setup(chip, num_ports[i]); + ret = gpiochip_add(&chip->gpio); + if (ret) { + dev_err(&pdev->dev, "IOH gpio: Failed to register GPIO\n"); + goto err_gpiochip_add; + } + } + + chip = chip_save; + pci_set_drvdata(pdev, chip); + + return 0; + +err_gpiochip_add: + for (; i != 0; i--) { + chip--; + ret = gpiochip_remove(&chip->gpio); + if (ret) + dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i); + } + kfree(chip_save); + +err_kzalloc: + pci_iounmap(pdev, base); + +err_iomap: + pci_release_regions(pdev); + +err_request_regions: + pci_disable_device(pdev); + +err_pci_enable: + + dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret); + return ret; +} + +static void __devexit ioh_gpio_remove(struct pci_dev *pdev) +{ + int err; + int i; + struct ioh_gpio *chip = pci_get_drvdata(pdev); + void __iomem *chip_save; + + chip_save = chip; + for (i = 0; i < 8; i++, chip++) { + err = gpiochip_remove(&chip->gpio); + if (err) + dev_err(&pdev->dev, "Failed gpiochip_remove\n"); + } + + chip = chip_save; + pci_iounmap(pdev, chip->base); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(chip); +} + +#ifdef CONFIG_PM +static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state) +{ + s32 ret; + struct ioh_gpio *chip = pci_get_drvdata(pdev); + + ioh_gpio_save_reg_conf(chip); + ioh_gpio_restore_reg_conf(chip); + + ret = pci_save_state(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret); + return ret; + } + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D0); + ret = pci_enable_wake(pdev, PCI_D0, 1); + if (ret) + dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret); + + return 0; +} + +static int ioh_gpio_resume(struct pci_dev *pdev) +{ + s32 ret; + struct ioh_gpio *chip = pci_get_drvdata(pdev); + + ret = pci_enable_wake(pdev, PCI_D0, 0); + + pci_set_power_state(pdev, PCI_D0); + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret); + return ret; + } + pci_restore_state(pdev); + + iowrite32(0x01, &chip->reg->srst); + iowrite32(0x00, &chip->reg->srst); + ioh_gpio_restore_reg_conf(chip); + + return 0; +} +#else +#define ioh_gpio_suspend NULL +#define ioh_gpio_resume NULL +#endif + +static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = { + { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) }, + { 0, } +}; + +static struct pci_driver ioh_gpio_driver = { + .name = "ml_ioh_gpio", + .id_table = ioh_gpio_pcidev_id, + .probe = ioh_gpio_probe, + .remove = __devexit_p(ioh_gpio_remove), + .suspend = ioh_gpio_suspend, + .resume = ioh_gpio_resume +}; + +static int __init ioh_gpio_pci_init(void) +{ + return pci_register_driver(&ioh_gpio_driver); +} +module_init(ioh_gpio_pci_init); + +static void __exit ioh_gpio_pci_exit(void) +{ + pci_unregister_driver(&ioh_gpio_driver); +} +module_exit(ioh_gpio_pci_exit); + +MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 501866662e05..a261972f603d 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c @@ -228,30 +228,30 @@ static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off) return chip->irq_base + off; } -static void pca953x_irq_mask(unsigned int irq) +static void pca953x_irq_mask(struct irq_data *d) { - struct pca953x_chip *chip = get_irq_chip_data(irq); + struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); - chip->irq_mask &= ~(1 << (irq - chip->irq_base)); + chip->irq_mask &= ~(1 << (d->irq - chip->irq_base)); } -static void pca953x_irq_unmask(unsigned int irq) +static void pca953x_irq_unmask(struct irq_data *d) { - struct pca953x_chip *chip = get_irq_chip_data(irq); + struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); - chip->irq_mask |= 1 << (irq - chip->irq_base); + chip->irq_mask |= 1 << (d->irq - chip->irq_base); } -static void pca953x_irq_bus_lock(unsigned int irq) +static void pca953x_irq_bus_lock(struct irq_data *d) { - struct pca953x_chip *chip = get_irq_chip_data(irq); + struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); mutex_lock(&chip->irq_lock); } -static void pca953x_irq_bus_sync_unlock(unsigned int irq) +static void pca953x_irq_bus_sync_unlock(struct irq_data *d) { - struct pca953x_chip *chip = get_irq_chip_data(irq); + struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); uint16_t new_irqs; uint16_t level; @@ -268,15 +268,15 @@ static void pca953x_irq_bus_sync_unlock(unsigned int irq) mutex_unlock(&chip->irq_lock); } -static int pca953x_irq_set_type(unsigned int irq, unsigned int type) +static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) { - struct pca953x_chip *chip = get_irq_chip_data(irq); - uint16_t level = irq - chip->irq_base; + struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + uint16_t level = d->irq - chip->irq_base; uint16_t mask = 1 << level; if (!(type & IRQ_TYPE_EDGE_BOTH)) { dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", - irq, type); + d->irq, type); return -EINVAL; } @@ -295,11 +295,11 @@ static int pca953x_irq_set_type(unsigned int irq, unsigned int type) static struct irq_chip pca953x_irq_chip = { .name = "pca953x", - .mask = pca953x_irq_mask, - .unmask = pca953x_irq_unmask, - .bus_lock = pca953x_irq_bus_lock, - .bus_sync_unlock = pca953x_irq_bus_sync_unlock, - .set_type = pca953x_irq_set_type, + .irq_mask = pca953x_irq_mask, + .irq_unmask = pca953x_irq_unmask, + .irq_bus_lock = pca953x_irq_bus_lock, + .irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock, + .irq_set_type = pca953x_irq_set_type, }; static uint16_t pca953x_irq_pending(struct pca953x_chip *chip) diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index 5005990f751f..2975d22daffe 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c @@ -129,10 +129,10 @@ static int pl061_to_irq(struct gpio_chip *gc, unsigned offset) /* * PL061 GPIO IRQ */ -static void pl061_irq_disable(unsigned irq) +static void pl061_irq_disable(struct irq_data *d) { - struct pl061_gpio *chip = get_irq_chip_data(irq); - int offset = irq - chip->irq_base; + struct pl061_gpio *chip = irq_data_get_irq_chip_data(d); + int offset = d->irq - chip->irq_base; unsigned long flags; u8 gpioie; @@ -143,10 +143,10 @@ static void pl061_irq_disable(unsigned irq) spin_unlock_irqrestore(&chip->irq_lock, flags); } -static void pl061_irq_enable(unsigned irq) +static void pl061_irq_enable(struct irq_data *d) { - struct pl061_gpio *chip = get_irq_chip_data(irq); - int offset = irq - chip->irq_base; + struct pl061_gpio *chip = irq_data_get_irq_chip_data(d); + int offset = d->irq - chip->irq_base; unsigned long flags; u8 gpioie; @@ -157,10 +157,10 @@ static void pl061_irq_enable(unsigned irq) spin_unlock_irqrestore(&chip->irq_lock, flags); } -static int pl061_irq_type(unsigned irq, unsigned trigger) +static int pl061_irq_type(struct irq_data *d, unsigned trigger) { - struct pl061_gpio *chip = get_irq_chip_data(irq); - int offset = irq - chip->irq_base; + struct pl061_gpio *chip = irq_data_get_irq_chip_data(d); + int offset = d->irq - chip->irq_base; unsigned long flags; u8 gpiois, gpioibe, gpioiev; @@ -203,9 +203,9 @@ static int pl061_irq_type(unsigned irq, unsigned trigger) static struct irq_chip pl061_irqchip = { .name = "GPIO", - .enable = pl061_irq_enable, - .disable = pl061_irq_disable, - .set_type = pl061_irq_type, + .irq_enable = pl061_irq_enable, + .irq_disable = pl061_irq_disable, + .irq_set_type = pl061_irq_type, }; static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) @@ -214,7 +214,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) struct list_head *ptr; struct pl061_gpio *chip; - desc->chip->ack(irq); + desc->irq_data.chip->irq_ack(&desc->irq_data); list_for_each(ptr, chip_list) { unsigned long pending; int offset; @@ -229,7 +229,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) for_each_set_bit(offset, &pending, PL061_GPIO_NR) generic_handle_irq(pl061_to_irq(&chip->gc, offset)); } - desc->chip->unmask(irq); + desc->irq_data.chip->irq_unmask(&desc->irq_data); } static int pl061_probe(struct amba_device *dev, struct amba_id *id) diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c index 7c9e6a052c45..eb2901f8ab5e 100644 --- a/drivers/gpio/stmpe-gpio.c +++ b/drivers/gpio/stmpe-gpio.c @@ -122,10 +122,10 @@ static struct gpio_chip template_chip = { .can_sleep = 1, }; -static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type) +static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type) { - struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq); - int offset = irq - stmpe_gpio->irq_base; + struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - stmpe_gpio->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -145,16 +145,16 @@ static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type) return 0; } -static void stmpe_gpio_irq_lock(unsigned int irq) +static void stmpe_gpio_irq_lock(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq); + struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); mutex_lock(&stmpe_gpio->irq_lock); } -static void stmpe_gpio_irq_sync_unlock(unsigned int irq) +static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq); + struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); struct stmpe *stmpe = stmpe_gpio->stmpe; int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8); static const u8 regmap[] = { @@ -180,20 +180,20 @@ static void stmpe_gpio_irq_sync_unlock(unsigned int irq) mutex_unlock(&stmpe_gpio->irq_lock); } -static void stmpe_gpio_irq_mask(unsigned int irq) +static void stmpe_gpio_irq_mask(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq); - int offset = irq - stmpe_gpio->irq_base; + struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - stmpe_gpio->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); stmpe_gpio->regs[REG_IE][regoffset] &= ~mask; } -static void stmpe_gpio_irq_unmask(unsigned int irq) +static void stmpe_gpio_irq_unmask(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq); - int offset = irq - stmpe_gpio->irq_base; + struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - stmpe_gpio->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -202,11 +202,11 @@ static void stmpe_gpio_irq_unmask(unsigned int irq) static struct irq_chip stmpe_gpio_irq_chip = { .name = "stmpe-gpio", - .bus_lock = stmpe_gpio_irq_lock, - .bus_sync_unlock = stmpe_gpio_irq_sync_unlock, - .mask = stmpe_gpio_irq_mask, - .unmask = stmpe_gpio_irq_unmask, - .set_type = stmpe_gpio_irq_set_type, + .irq_bus_lock = stmpe_gpio_irq_lock, + .irq_bus_sync_unlock = stmpe_gpio_irq_sync_unlock, + .irq_mask = stmpe_gpio_irq_mask, + .irq_unmask = stmpe_gpio_irq_unmask, + .irq_set_type = stmpe_gpio_irq_set_type, }; static irqreturn_t stmpe_gpio_irq(int irq, void *dev) diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c index 823559ab0e24..e60be0015c9b 100644 --- a/drivers/gpio/sx150x.c +++ b/drivers/gpio/sx150x.c @@ -304,36 +304,36 @@ static int sx150x_gpio_to_irq(struct gpio_chip *gc, unsigned offset) return chip->irq_base + offset; } -static void sx150x_irq_mask(unsigned int irq) +static void sx150x_irq_mask(struct irq_data *d) { - struct irq_chip *ic = get_irq_chip(irq); + struct irq_chip *ic = irq_data_get_irq_chip(d); struct sx150x_chip *chip; unsigned n; chip = container_of(ic, struct sx150x_chip, irq_chip); - n = irq - chip->irq_base; + n = d->irq - chip->irq_base; sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1); sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0); } -static void sx150x_irq_unmask(unsigned int irq) +static void sx150x_irq_unmask(struct irq_data *d) { - struct irq_chip *ic = get_irq_chip(irq); + struct irq_chip *ic = irq_data_get_irq_chip(d); struct sx150x_chip *chip; unsigned n; chip = container_of(ic, struct sx150x_chip, irq_chip); - n = irq - chip->irq_base; + n = d->irq - chip->irq_base; sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0); sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, chip->irq_sense >> (n * 2)); } -static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type) +static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type) { - struct irq_chip *ic = get_irq_chip(irq); + struct irq_chip *ic = irq_data_get_irq_chip(d); struct sx150x_chip *chip; unsigned n, val = 0; @@ -341,7 +341,7 @@ static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type) return -EINVAL; chip = container_of(ic, struct sx150x_chip, irq_chip); - n = irq - chip->irq_base; + n = d->irq - chip->irq_base; if (flow_type & IRQ_TYPE_EDGE_RISING) val |= 0x1; @@ -386,9 +386,9 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id) return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } -static void sx150x_irq_bus_lock(unsigned int irq) +static void sx150x_irq_bus_lock(struct irq_data *d) { - struct irq_chip *ic = get_irq_chip(irq); + struct irq_chip *ic = irq_data_get_irq_chip(d); struct sx150x_chip *chip; chip = container_of(ic, struct sx150x_chip, irq_chip); @@ -396,9 +396,9 @@ static void sx150x_irq_bus_lock(unsigned int irq) mutex_lock(&chip->lock); } -static void sx150x_irq_bus_sync_unlock(unsigned int irq) +static void sx150x_irq_bus_sync_unlock(struct irq_data *d) { - struct irq_chip *ic = get_irq_chip(irq); + struct irq_chip *ic = irq_data_get_irq_chip(d); struct sx150x_chip *chip; unsigned n; @@ -437,16 +437,16 @@ static void sx150x_init_chip(struct sx150x_chip *chip, if (pdata->oscio_is_gpo) ++chip->gpio_chip.ngpio; - chip->irq_chip.name = client->name; - chip->irq_chip.mask = sx150x_irq_mask; - chip->irq_chip.unmask = sx150x_irq_unmask; - chip->irq_chip.set_type = sx150x_irq_set_type; - chip->irq_chip.bus_lock = sx150x_irq_bus_lock; - chip->irq_chip.bus_sync_unlock = sx150x_irq_bus_sync_unlock; - chip->irq_summary = -1; - chip->irq_base = -1; - chip->irq_sense = 0; - chip->irq_set_type_pending = 0; + chip->irq_chip.name = client->name; + chip->irq_chip.irq_mask = sx150x_irq_mask; + chip->irq_chip.irq_unmask = sx150x_irq_unmask; + chip->irq_chip.irq_set_type = sx150x_irq_set_type; + chip->irq_chip.irq_bus_lock = sx150x_irq_bus_lock; + chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock; + chip->irq_summary = -1; + chip->irq_base = -1; + chip->irq_sense = 0; + chip->irq_set_type_pending = 0; } static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg) diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/tc3589x-gpio.c index 180d584454fb..27200af1a595 100644 --- a/drivers/gpio/tc3589x-gpio.c +++ b/drivers/gpio/tc3589x-gpio.c @@ -110,10 +110,10 @@ static struct gpio_chip template_chip = { .can_sleep = 1, }; -static int tc3589x_gpio_irq_set_type(unsigned int irq, unsigned int type) +static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) { - struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq); - int offset = irq - tc3589x_gpio->irq_base; + struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - tc3589x_gpio->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -137,16 +137,16 @@ static int tc3589x_gpio_irq_set_type(unsigned int irq, unsigned int type) return 0; } -static void tc3589x_gpio_irq_lock(unsigned int irq) +static void tc3589x_gpio_irq_lock(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq); + struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); mutex_lock(&tc3589x_gpio->irq_lock); } -static void tc3589x_gpio_irq_sync_unlock(unsigned int irq) +static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq); + struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; static const u8 regmap[] = { [REG_IBE] = TC3589x_GPIOIBE0, @@ -172,20 +172,20 @@ static void tc3589x_gpio_irq_sync_unlock(unsigned int irq) mutex_unlock(&tc3589x_gpio->irq_lock); } -static void tc3589x_gpio_irq_mask(unsigned int irq) +static void tc3589x_gpio_irq_mask(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq); - int offset = irq - tc3589x_gpio->irq_base; + struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - tc3589x_gpio->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask; } -static void tc3589x_gpio_irq_unmask(unsigned int irq) +static void tc3589x_gpio_irq_unmask(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq); - int offset = irq - tc3589x_gpio->irq_base; + struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - tc3589x_gpio->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -194,11 +194,11 @@ static void tc3589x_gpio_irq_unmask(unsigned int irq) static struct irq_chip tc3589x_gpio_irq_chip = { .name = "tc3589x-gpio", - .bus_lock = tc3589x_gpio_irq_lock, - .bus_sync_unlock = tc3589x_gpio_irq_sync_unlock, - .mask = tc3589x_gpio_irq_mask, - .unmask = tc3589x_gpio_irq_unmask, - .set_type = tc3589x_gpio_irq_set_type, + .irq_bus_lock = tc3589x_gpio_irq_lock, + .irq_bus_sync_unlock = tc3589x_gpio_irq_sync_unlock, + .irq_mask = tc3589x_gpio_irq_mask, + .irq_unmask = tc3589x_gpio_irq_unmask, + .irq_set_type = tc3589x_gpio_irq_set_type, }; static irqreturn_t tc3589x_gpio_irq(int irq, void *dev) diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c index 45293662e950..58c8f30352dd 100644 --- a/drivers/gpio/timbgpio.c +++ b/drivers/gpio/timbgpio.c @@ -109,10 +109,10 @@ static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset) /* * GPIO IRQ */ -static void timbgpio_irq_disable(unsigned irq) +static void timbgpio_irq_disable(struct irq_data *d) { - struct timbgpio *tgpio = get_irq_chip_data(irq); - int offset = irq - tgpio->irq_base; + struct timbgpio *tgpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - tgpio->irq_base; unsigned long flags; spin_lock_irqsave(&tgpio->lock, flags); @@ -121,10 +121,10 @@ static void timbgpio_irq_disable(unsigned irq) spin_unlock_irqrestore(&tgpio->lock, flags); } -static void timbgpio_irq_enable(unsigned irq) +static void timbgpio_irq_enable(struct irq_data *d) { - struct timbgpio *tgpio = get_irq_chip_data(irq); - int offset = irq - tgpio->irq_base; + struct timbgpio *tgpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - tgpio->irq_base; unsigned long flags; spin_lock_irqsave(&tgpio->lock, flags); @@ -133,10 +133,10 @@ static void timbgpio_irq_enable(unsigned irq) spin_unlock_irqrestore(&tgpio->lock, flags); } -static int timbgpio_irq_type(unsigned irq, unsigned trigger) +static int timbgpio_irq_type(struct irq_data *d, unsigned trigger) { - struct timbgpio *tgpio = get_irq_chip_data(irq); - int offset = irq - tgpio->irq_base; + struct timbgpio *tgpio = irq_data_get_irq_chip_data(d); + int offset = d->irq - tgpio->irq_base; unsigned long flags; u32 lvr, flr, bflr = 0; u32 ver; @@ -199,7 +199,7 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) unsigned long ipr; int offset; - desc->chip->ack(irq); + desc->irq_data.chip->irq_ack(irq_get_irq_data(irq)); ipr = ioread32(tgpio->membase + TGPIO_IPR); iowrite32(ipr, tgpio->membase + TGPIO_ICR); @@ -217,9 +217,9 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) static struct irq_chip timbgpio_irqchip = { .name = "GPIO", - .enable = timbgpio_irq_enable, - .disable = timbgpio_irq_disable, - .set_type = timbgpio_irq_type, + .irq_enable = timbgpio_irq_enable, + .irq_disable = timbgpio_irq_disable, + .irq_set_type = timbgpio_irq_type, }; static int __devinit timbgpio_probe(struct platform_device *pdev) diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c index b16c9a8c03f5..cffa3bd7ad3b 100644 --- a/drivers/gpio/vr41xx_giu.c +++ b/drivers/gpio/vr41xx_giu.c @@ -111,69 +111,69 @@ static inline u16 giu_clear(u16 offset, u16 clear) return data; } -static void ack_giuint_low(unsigned int irq) +static void ack_giuint_low(struct irq_data *d) { - giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq)); + giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq)); } -static void mask_giuint_low(unsigned int irq) +static void mask_giuint_low(struct irq_data *d) { - giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq)); + giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq)); } -static void mask_ack_giuint_low(unsigned int irq) +static void mask_ack_giuint_low(struct irq_data *d) { unsigned int pin; - pin = GPIO_PIN_OF_IRQ(irq); + pin = GPIO_PIN_OF_IRQ(d->irq); giu_clear(GIUINTENL, 1 << pin); giu_write(GIUINTSTATL, 1 << pin); } -static void unmask_giuint_low(unsigned int irq) +static void unmask_giuint_low(struct irq_data *d) { - giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq)); + giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq)); } static struct irq_chip giuint_low_irq_chip = { .name = "GIUINTL", - .ack = ack_giuint_low, - .mask = mask_giuint_low, - .mask_ack = mask_ack_giuint_low, - .unmask = unmask_giuint_low, + .irq_ack = ack_giuint_low, + .irq_mask = mask_giuint_low, + .irq_mask_ack = mask_ack_giuint_low, + .irq_unmask = unmask_giuint_low, }; -static void ack_giuint_high(unsigned int irq) +static void ack_giuint_high(struct irq_data *d) { giu_write(GIUINTSTATH, - 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET)); + 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET)); } -static void mask_giuint_high(unsigned int irq) +static void mask_giuint_high(struct irq_data *d) { - giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET)); + giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET)); } -static void mask_ack_giuint_high(unsigned int irq) +static void mask_ack_giuint_high(struct irq_data *d) { unsigned int pin; - pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET; + pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET; giu_clear(GIUINTENH, 1 << pin); giu_write(GIUINTSTATH, 1 << pin); } -static void unmask_giuint_high(unsigned int irq) +static void unmask_giuint_high(struct irq_data *d) { - giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET)); + giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET)); } static struct irq_chip giuint_high_irq_chip = { .name = "GIUINTH", - .ack = ack_giuint_high, - .mask = mask_giuint_high, - .mask_ack = mask_ack_giuint_high, - .unmask = unmask_giuint_high, + .irq_ack = ack_giuint_high, + .irq_mask = mask_giuint_high, + .irq_mask_ack = mask_ack_giuint_high, + .irq_unmask = unmask_giuint_high, }; static int giu_get_irq(unsigned int irq) diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c index 618398e4ed8e..c822baacd8fc 100644 --- a/drivers/gpio/wm8994-gpio.c +++ b/drivers/gpio/wm8994-gpio.c @@ -35,6 +35,29 @@ static inline struct wm8994_gpio *to_wm8994_gpio(struct gpio_chip *chip) return container_of(chip, struct wm8994_gpio, gpio_chip); } +static int wm8994_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip); + struct wm8994 *wm8994 = wm8994_gpio->wm8994; + + switch (wm8994->type) { + case WM8958: + switch (offset) { + case 1: + case 2: + case 3: + case 4: + case 6: + return -EINVAL; + } + break; + default: + break; + } + + return 0; +} + static int wm8994_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip); @@ -136,6 +159,7 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) static struct gpio_chip template_chip = { .label = "wm8994", .owner = THIS_MODULE, + .request = wm8994_gpio_request, .direction_input = wm8994_gpio_direction_in, .get = wm8994_gpio_get, .direction_output = wm8994_gpio_direction_out, diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7af443672626..64828a7db77b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -107,7 +107,6 @@ config DRM_I915 select FB_CFB_IMAGEBLIT # i915 depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick - select VIDEO_OUTPUT_CONTROL if ACPI select BACKLIGHT_CLASS_DEVICE if ACPI select INPUT if ACPI select ACPI_VIDEO if ACPI diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0307d601f5e5..5c4f9b9ecdc0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -607,25 +607,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_fini); -void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb) -{ - info->fix.type = FB_TYPE_PACKED_PIXELS; - info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR : - FB_VISUAL_TRUECOLOR; - info->fix.mmio_start = 0; - info->fix.mmio_len = 0; - info->fix.type_aux = 0; - info->fix.xpanstep = 1; /* doing it in hw */ - info->fix.ypanstep = 1; /* doing it in hw */ - info->fix.ywrapstep = 0; - info->fix.accel = FB_ACCEL_NONE; - info->fix.type_aux = 0; - - info->fix.line_length = fb->pitch; - return; -} -EXPORT_SYMBOL(drm_fb_helper_fill_fix); - static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { @@ -835,7 +816,6 @@ int drm_fb_helper_set_par(struct fb_info *info) mutex_unlock(&dev->mode_config.mutex); return ret; } - drm_fb_helper_fill_fix(info, fb_helper->fb); } mutex_unlock(&dev->mode_config.mutex); @@ -973,7 +953,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, if (new_fb) { info->var.pixclock = 0; - drm_fb_helper_fill_fix(info, fb_helper->fb); if (register_framebuffer(info) < 0) { return -EINVAL; } @@ -1000,6 +979,26 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, } EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth) +{ + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : + FB_VISUAL_TRUECOLOR; + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; + info->fix.type_aux = 0; + info->fix.xpanstep = 1; /* doing it in hw */ + info->fix.ypanstep = 1; /* doing it in hw */ + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_NONE; + info->fix.type_aux = 0; + + info->fix.line_length = pitch; + return; +} +EXPORT_SYMBOL(drm_fb_helper_fill_fix); + void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 19a3d58044dd..3601466c5502 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -703,7 +703,7 @@ static void print_error_buffers(struct seq_file *m, seq_printf(m, "%s [%d]:\n", name, count); while (count--) { - seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s", + seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", err->gtt_offset, err->size, err->read_domains, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0de75a23f8e7..72fea2bcfc4f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600); unsigned int i915_lvds_downclock = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); +unsigned int i915_panel_use_ssc = 1; +module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); + bool i915_try_reset = true; module_param_named(reset, i915_try_reset, bool, 0600); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 385fc7ec39d3..5969f46ac2d6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -954,6 +954,7 @@ extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; extern unsigned int i915_powersave; extern unsigned int i915_lvds_downclock; +extern unsigned int i915_panel_use_ssc; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index e69834341ef0..dcfdf4151b6d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -464,8 +464,6 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, int ret; list_for_each_entry(obj, objects, exec_list) { - obj->base.pending_read_domains = 0; - obj->base.pending_write_domain = 0; ret = i915_gem_execbuffer_relocate_object(obj, eb); if (ret) return ret; @@ -505,6 +503,9 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, list_move(&obj->exec_list, &ordered_objects); else list_move_tail(&obj->exec_list, &ordered_objects); + + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; } list_splice(&ordered_objects, objects); @@ -636,6 +637,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, { struct drm_i915_gem_relocation_entry *reloc; struct drm_i915_gem_object *obj; + int *reloc_offset; int i, total, ret; /* We may process another execbuffer during the unlock... */ @@ -653,8 +655,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, for (i = 0; i < count; i++) total += exec[i].relocation_count; + reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset)); reloc = drm_malloc_ab(total, sizeof(*reloc)); - if (reloc == NULL) { + if (reloc == NULL || reloc_offset == NULL) { + drm_free_large(reloc); + drm_free_large(reloc_offset); mutex_lock(&dev->struct_mutex); return -ENOMEM; } @@ -672,6 +677,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } + reloc_offset[i] = total; total += exec[i].relocation_count; } @@ -705,17 +711,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, if (ret) goto err; - total = 0; list_for_each_entry(obj, objects, exec_list) { - obj->base.pending_read_domains = 0; - obj->base.pending_write_domain = 0; + int offset = obj->exec_entry - exec; ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, - reloc + total); + reloc + reloc_offset[offset]); if (ret) goto err; - - total += exec->relocation_count; - exec++; } /* Leave the user relocations as are, this is the painfully slow path, @@ -726,6 +727,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, err: drm_free_large(reloc); + drm_free_large(reloc_offset); return ret; } @@ -770,7 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, if (from == NULL || to == from) return 0; - if (INTEL_INFO(obj->base.dev)->gen < 6) + /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ + if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) return i915_gem_object_wait_rendering(obj, true); idx = intel_ring_sync_index(from, to); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e418e8bb61e6..b8e509ae065e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -720,7 +720,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, if (obj->ring != ring) continue; - if (!i915_seqno_passed(obj->last_rendering_seqno, seqno)) + if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) continue; if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b0b1200ed650..0b44956c336b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -264,17 +264,12 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->int_crt_support = general->int_crt_support; dev_priv->lvds_use_ssc = general->enable_ssc; - if (dev_priv->lvds_use_ssc) { - if (IS_I85X(dev)) - dev_priv->lvds_ssc_freq = - general->ssc_freq ? 66 : 48; - else if (IS_GEN5(dev) || IS_GEN6(dev)) - dev_priv->lvds_ssc_freq = - general->ssc_freq ? 100 : 120; - else - dev_priv->lvds_ssc_freq = - general->ssc_freq ? 100 : 96; - } + if (IS_I85X(dev)) + dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; + else if (IS_GEN5(dev) || IS_GEN6(dev)) + dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; + else + dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 25d96889d7d2..98967f3b7724 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3822,6 +3822,11 @@ static void intel_update_watermarks(struct drm_device *dev) sr_hdisplay, sr_htotal, pixel_size); } +static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) +{ + return dev_priv->lvds_use_ssc && i915_panel_use_ssc; +} + static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -3884,7 +3889,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, num_connectors++; } - if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { + if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", refclk / 1000); @@ -4059,7 +4064,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(200); if (has_edp_encoder) { - if (dev_priv->lvds_use_ssc) { + if (intel_panel_use_ssc(dev_priv)) { temp |= DREF_SSC1_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); @@ -4070,13 +4075,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Enable CPU source on CPU attached eDP */ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { - if (dev_priv->lvds_use_ssc) + if (intel_panel_use_ssc(dev_priv)) temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; else temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else { /* Enable SSC on PCH eDP if needed */ - if (dev_priv->lvds_use_ssc) { + if (intel_panel_use_ssc(dev_priv)) { DRM_ERROR("enabling SSC on PCH\n"); temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; } @@ -4104,7 +4109,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int factor = 21; if (is_lvds) { - if ((dev_priv->lvds_use_ssc && + if ((intel_panel_use_ssc(dev_priv) && dev_priv->lvds_ssc_freq == 100) || (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) factor = 25; @@ -4183,7 +4188,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; - else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) + else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index ee145a257287..512782728e51 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -148,6 +148,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, // memset(info->screen_base, 0, size); + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); info->pixmap.size = 64*1024; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8f4f6bd33ee9..ace8d5d30dd2 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -702,6 +702,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen i915GMm-HFS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), + }, + }, { .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e00d200df3db..c65992df458d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -278,6 +278,6 @@ void intel_panel_setup_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - dev_priv->backlight_level = intel_panel_get_max_backlight(dev); + dev_priv->backlight_level = intel_panel_get_backlight(dev); dev_priv->backlight_enabled = dev_priv->backlight_level != 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index b14c81110575..d3a9c6e02477 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -59,7 +59,7 @@ static int nv40_set_intensity(struct backlight_device *bd) return 0; } -static struct backlight_ops nv40_bl_ops = { +static const struct backlight_ops nv40_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = nv40_get_intensity, .update_status = nv40_set_intensity, @@ -82,7 +82,7 @@ static int nv50_set_intensity(struct backlight_device *bd) return 0; } -static struct backlight_ops nv50_bl_ops = { +static const struct backlight_ops nv50_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = nv50_get_intensity, .update_status = nv50_set_intensity, diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index d3046559bf05..2aef5cd3acf5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -1927,7 +1927,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * offset (8 bit): opcode * offset + 1 (16 bit): time * - * Sleep for "time" miliseconds. + * Sleep for "time" milliseconds. */ unsigned time = ROM16(bios->data[offset + 1]); @@ -1935,7 +1935,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) if (!iexec->execute) return 3; - BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n", + BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n", offset, time); msleep(time); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index a26d04740c88..6d56a54b6e2e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -359,6 +359,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); info->screen_size = size; + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); /* Set aperture base/size for vesafb takeover */ diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 58a0cd02c0a2..04b269d14a59 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -1629,7 +1629,7 @@ typedef struct _GET_ENGINE_CLOCK_PARAMETERS typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS { USHORT usPrescale; //Ratio between Engine clock and I2C clock - USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID + USHORT usVRAMAddress; //Address in Frame Buffer where to pace raw EDID USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte UCHAR ucSlaveAddr; //Read from which slave diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index ca32e9c1e91d..66324b5bb5ba 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -225,6 +225,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, strcpy(info->fix.id, "radeondrmfb"); + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); + info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &radeonfb_ops; diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 0e1edd7311ff..09aea5f1556d 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig @@ -3,7 +3,6 @@ config STUB_POULSBO depends on PCI # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick - select VIDEO_OUTPUT_CONTROL if ACPI select BACKLIGHT_CLASS_DEVICE if ACPI select INPUT if ACPI select ACPI_VIDEO if ACPI diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index ffbc278647bf..24cca2f69dfc 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -295,6 +295,23 @@ config HID_MONTEREY ---help--- Support for Monterey Genius KB29E. +config HID_MULTITOUCH + tristate "HID Multitouch panels" + depends on USB_HID + ---help--- + Generic support for HID multitouch panels. + + Say Y here if you have one of the following devices: + - Cypress TrueTouch panels + - Hanvon dual touch panels + - Pixcir dual touch panels + - 'Sensing Win7-TwoFinger' panel by GeneralTouch + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called hid-multitouch. + config HID_NTRIG tristate "N-Trig touch screen" depends on USB_HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 6eae9a90b8dd..6efc2a0370ad 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o obj-$(CONFIG_HID_MOSART) += hid-mosart.o +obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o obj-$(CONFIG_HID_ORTEK) += hid-ortek.o obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c index 375b50929a50..1ea066c55201 100644 --- a/drivers/hid/hid-cando.c +++ b/drivers/hid/hid-cando.c @@ -235,6 +235,8 @@ static void cando_remove(struct hid_device *hdev) static const struct hid_device_id cando_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, + USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 261168607c91..d678cf3d33d5 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1312,7 +1312,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, @@ -1324,6 +1326,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, @@ -1335,11 +1338,13 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) }, { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, @@ -1422,6 +1427,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, @@ -1640,7 +1646,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index f65cace77729..92a0d61a7379 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -140,7 +140,9 @@ #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577 #define USB_VENDOR_ID_CANDO 0x2087 +#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 +#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01 @@ -186,6 +188,7 @@ #define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61 #define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64 #define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1 +#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001 #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a @@ -236,6 +239,7 @@ #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001 #define USB_VENDOR_ID_GLAB 0x06c2 #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 @@ -318,6 +322,9 @@ #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff +#define USB_VENDOR_ID_HANVON 0x20b3 +#define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18 + #define USB_VENDOR_ID_HAPP 0x078b #define USB_DEVICE_ID_UGCI_DRIVING 0x0010 #define USB_DEVICE_ID_UGCI_FLYING 0x0020 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index e60fdb88101f..7f552bfad32c 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -290,6 +290,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel goto ignore; } + if (field->report_type == HID_FEATURE_REPORT) { + if (device->driver->feature_mapping) { + device->driver->feature_mapping(device, hidinput, field, + usage); + } + goto ignore; + } + if (device->driver->input_mapping) { int ret = device->driver->input_mapping(device, hidinput, field, usage, &bit, &max); @@ -839,7 +847,6 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) struct hid_input *hidinput = NULL; struct input_dev *input_dev; int i, j, k; - int max_report_type = HID_OUTPUT_REPORT; INIT_LIST_HEAD(&hid->inputs); @@ -856,10 +863,11 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) return -1; } - if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) - max_report_type = HID_INPUT_REPORT; + for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) { + if (k == HID_OUTPUT_REPORT && + hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) + continue; - for (k = HID_INPUT_REPORT; k <= max_report_type; k++) list_for_each_entry(report, &hid->report_enum[k].report_list, list) { if (!report->maxfield) @@ -912,6 +920,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) hidinput = NULL; } } + } if (hidinput && input_register_device(hidinput->input)) goto out_cleanup; diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c index 9fb050ce6f04..aed7ffe36283 100644 --- a/drivers/hid/hid-mosart.c +++ b/drivers/hid/hid-mosart.c @@ -257,6 +257,7 @@ static void mosart_remove(struct hid_device *hdev) static const struct hid_device_id mosart_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) }, { } }; MODULE_DEVICE_TABLE(hid, mosart_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c new file mode 100644 index 000000000000..07d3183fdde5 --- /dev/null +++ b/drivers/hid/hid-multitouch.c @@ -0,0 +1,516 @@ +/* + * HID driver for multitouch panels + * + * Copyright (c) 2010-2011 Stephane Chatty + * Copyright (c) 2010-2011 Benjamin Tissoires + * Copyright (c) 2010-2011 Ecole Nationale de l'Aviation Civile, France + * + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include "usbhid/usbhid.h" + + +MODULE_AUTHOR("Stephane Chatty "); +MODULE_DESCRIPTION("HID multitouch panels"); +MODULE_LICENSE("GPL"); + +#include "hid-ids.h" + +/* quirks to control the device */ +#define MT_QUIRK_NOT_SEEN_MEANS_UP (1 << 0) +#define MT_QUIRK_SLOT_IS_CONTACTID (1 << 1) +#define MT_QUIRK_CYPRESS (1 << 2) +#define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3) +#define MT_QUIRK_VALID_IS_INRANGE (1 << 4) +#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 5) + +struct mt_slot { + __s32 x, y, p, w, h; + __s32 contactid; /* the device ContactID assigned to this slot */ + bool touch_state; /* is the touch valid? */ + bool seen_in_this_frame;/* has this slot been updated */ +}; + +struct mt_device { + struct mt_slot curdata; /* placeholder of incoming data */ + struct mt_class *mtclass; /* our mt device class */ + unsigned last_field_index; /* last field index of the report */ + unsigned last_slot_field; /* the last field of a slot */ + __s8 inputmode; /* InputMode HID feature, -1 if non-existent */ + __u8 num_received; /* how many contacts we received */ + __u8 num_expected; /* expected last contact index */ + bool curvalid; /* is the current contact valid? */ + struct mt_slot slots[0]; /* first slot */ +}; + +struct mt_class { + __s32 name; /* MT_CLS */ + __s32 quirks; + __s32 sn_move; /* Signal/noise ratio for move events */ + __s32 sn_pressure; /* Signal/noise ratio for pressure events */ + __u8 maxcontacts; +}; + +/* classes of device behavior */ +#define MT_CLS_DEFAULT 1 +#define MT_CLS_DUAL1 2 +#define MT_CLS_DUAL2 3 +#define MT_CLS_CYPRESS 4 + +/* + * these device-dependent functions determine what slot corresponds + * to a valid contact that was just read. + */ + +static int cypress_compute_slot(struct mt_device *td) +{ + if (td->curdata.contactid != 0 || td->num_received == 0) + return td->curdata.contactid; + else + return -1; +} + +static int find_slot_from_contactid(struct mt_device *td) +{ + int i; + for (i = 0; i < td->mtclass->maxcontacts; ++i) { + if (td->slots[i].contactid == td->curdata.contactid && + td->slots[i].touch_state) + return i; + } + for (i = 0; i < td->mtclass->maxcontacts; ++i) { + if (!td->slots[i].seen_in_this_frame && + !td->slots[i].touch_state) + return i; + } + /* should not occurs. If this happens that means + * that the device sent more touches that it says + * in the report descriptor. It is ignored then. */ + return -1; +} + +struct mt_class mt_classes[] = { + { .name = MT_CLS_DEFAULT, + .quirks = MT_QUIRK_VALID_IS_INRANGE, + .maxcontacts = 10 }, + { .name = MT_CLS_DUAL1, + .quirks = MT_QUIRK_VALID_IS_INRANGE | + MT_QUIRK_SLOT_IS_CONTACTID, + .maxcontacts = 2 }, + { .name = MT_CLS_DUAL2, + .quirks = MT_QUIRK_VALID_IS_INRANGE | + MT_QUIRK_SLOT_IS_CONTACTNUMBER, + .maxcontacts = 2 }, + { .name = MT_CLS_CYPRESS, + .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | + MT_QUIRK_CYPRESS, + .maxcontacts = 10 }, + + { } +}; + +static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage) +{ + if (usage->hid == HID_DG_INPUTMODE) { + struct mt_device *td = hid_get_drvdata(hdev); + td->inputmode = field->report->id; + } +} + +static void set_abs(struct input_dev *input, unsigned int code, + struct hid_field *field, int snratio) +{ + int fmin = field->logical_minimum; + int fmax = field->logical_maximum; + int fuzz = snratio ? (fmax - fmin) / snratio : 0; + input_set_abs_params(input, code, fmin, fmax, fuzz, 0); +} + +static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + struct mt_device *td = hid_get_drvdata(hdev); + struct mt_class *cls = td->mtclass; + switch (usage->hid & HID_USAGE_PAGE) { + + case HID_UP_GENDESK: + switch (usage->hid) { + case HID_GD_X: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_POSITION_X); + set_abs(hi->input, ABS_MT_POSITION_X, field, + cls->sn_move); + /* touchscreen emulation */ + set_abs(hi->input, ABS_X, field, cls->sn_move); + td->last_slot_field = usage->hid; + return 1; + case HID_GD_Y: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_POSITION_Y); + set_abs(hi->input, ABS_MT_POSITION_Y, field, + cls->sn_move); + /* touchscreen emulation */ + set_abs(hi->input, ABS_Y, field, cls->sn_move); + td->last_slot_field = usage->hid; + return 1; + } + return 0; + + case HID_UP_DIGITIZER: + switch (usage->hid) { + case HID_DG_INRANGE: + td->last_slot_field = usage->hid; + return 1; + case HID_DG_CONFIDENCE: + td->last_slot_field = usage->hid; + return 1; + case HID_DG_TIPSWITCH: + hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); + input_set_capability(hi->input, EV_KEY, BTN_TOUCH); + td->last_slot_field = usage->hid; + return 1; + case HID_DG_CONTACTID: + input_mt_init_slots(hi->input, + td->mtclass->maxcontacts); + td->last_slot_field = usage->hid; + return 1; + case HID_DG_WIDTH: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_TOUCH_MAJOR); + td->last_slot_field = usage->hid; + return 1; + case HID_DG_HEIGHT: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_TOUCH_MINOR); + field->logical_maximum = 1; + field->logical_minimum = 0; + set_abs(hi->input, ABS_MT_ORIENTATION, field, 0); + td->last_slot_field = usage->hid; + return 1; + case HID_DG_TIPPRESSURE: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_PRESSURE); + set_abs(hi->input, ABS_MT_PRESSURE, field, + cls->sn_pressure); + /* touchscreen emulation */ + set_abs(hi->input, ABS_PRESSURE, field, + cls->sn_pressure); + td->last_slot_field = usage->hid; + return 1; + case HID_DG_CONTACTCOUNT: + td->last_field_index = field->report->maxfield - 1; + return 1; + case HID_DG_CONTACTMAX: + /* we don't set td->last_slot_field as contactcount and + * contact max are global to the report */ + return -1; + } + /* let hid-input decide for the others */ + return 0; + + case 0xff000000: + /* we do not want to map these: no input-oriented meaning */ + return -1; + } + + return 0; +} + +static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + if (usage->type == EV_KEY || usage->type == EV_ABS) + set_bit(usage->type, hi->input->evbit); + + return -1; +} + +static int mt_compute_slot(struct mt_device *td) +{ + __s32 quirks = td->mtclass->quirks; + + if (quirks & MT_QUIRK_SLOT_IS_CONTACTID) + return td->curdata.contactid; + + if (quirks & MT_QUIRK_CYPRESS) + return cypress_compute_slot(td); + + if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER) + return td->num_received; + + return find_slot_from_contactid(td); +} + +/* + * this function is called when a whole contact has been processed, + * so that it can assign it to a slot and store the data there + */ +static void mt_complete_slot(struct mt_device *td) +{ + td->curdata.seen_in_this_frame = true; + if (td->curvalid) { + int slotnum = mt_compute_slot(td); + + if (slotnum >= 0 && slotnum < td->mtclass->maxcontacts) + td->slots[slotnum] = td->curdata; + } + td->num_received++; +} + + +/* + * this function is called when a whole packet has been received and processed, + * so that it can decide what to send to the input layer. + */ +static void mt_emit_event(struct mt_device *td, struct input_dev *input) +{ + int i; + + for (i = 0; i < td->mtclass->maxcontacts; ++i) { + struct mt_slot *s = &(td->slots[i]); + if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) && + !s->seen_in_this_frame) { + s->touch_state = false; + } + + input_mt_slot(input, i); + input_mt_report_slot_state(input, MT_TOOL_FINGER, + s->touch_state); + if (s->touch_state) { + input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x); + input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y); + input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p); + input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, s->w); + input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, s->h); + } + s->seen_in_this_frame = false; + + } + + input_mt_report_pointer_emulation(input, true); + input_sync(input); + td->num_received = 0; +} + + + +static int mt_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value) +{ + struct mt_device *td = hid_get_drvdata(hid); + __s32 quirks = td->mtclass->quirks; + + if (hid->claimed & HID_CLAIMED_INPUT) { + switch (usage->hid) { + case HID_DG_INRANGE: + if (quirks & MT_QUIRK_VALID_IS_INRANGE) + td->curvalid = value; + break; + case HID_DG_TIPSWITCH: + if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) + td->curvalid = value; + td->curdata.touch_state = value; + break; + case HID_DG_CONFIDENCE: + if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE) + td->curvalid = value; + break; + case HID_DG_CONTACTID: + td->curdata.contactid = value; + break; + case HID_DG_TIPPRESSURE: + td->curdata.p = value; + break; + case HID_GD_X: + td->curdata.x = value; + break; + case HID_GD_Y: + td->curdata.y = value; + break; + case HID_DG_WIDTH: + td->curdata.w = value; + break; + case HID_DG_HEIGHT: + td->curdata.h = value; + break; + case HID_DG_CONTACTCOUNT: + /* + * Includes multi-packet support where subsequent + * packets are sent with zero contactcount. + */ + if (value) + td->num_expected = value; + break; + + default: + /* fallback to the generic hidinput handling */ + return 0; + } + + if (usage->hid == td->last_slot_field) + mt_complete_slot(td); + + if (field->index == td->last_field_index + && td->num_received >= td->num_expected) + mt_emit_event(td, field->hidinput->input); + + } + + /* we have handled the hidinput part, now remains hiddev */ + if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) + hid->hiddev_hid_event(hid, field, usage, value); + + return 1; +} + +static void mt_set_input_mode(struct hid_device *hdev) +{ + struct mt_device *td = hid_get_drvdata(hdev); + struct hid_report *r; + struct hid_report_enum *re; + + if (td->inputmode < 0) + return; + + re = &(hdev->report_enum[HID_FEATURE_REPORT]); + r = re->report_id_hash[td->inputmode]; + if (r) { + r->field[0]->value[0] = 0x02; + usbhid_submit_report(hdev, r, USB_DIR_OUT); + } +} + +static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + int ret, i; + struct mt_device *td; + struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */ + + for (i = 0; mt_classes[i].name ; i++) { + if (id->driver_data == mt_classes[i].name) { + mtclass = &(mt_classes[i]); + break; + } + } + + /* This allows the driver to correctly support devices + * that emit events over several HID messages. + */ + hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC; + + td = kzalloc(sizeof(struct mt_device) + + mtclass->maxcontacts * sizeof(struct mt_slot), + GFP_KERNEL); + if (!td) { + dev_err(&hdev->dev, "cannot allocate multitouch data\n"); + return -ENOMEM; + } + td->mtclass = mtclass; + td->inputmode = -1; + hid_set_drvdata(hdev, td); + + ret = hid_parse(hdev); + if (ret != 0) + goto fail; + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) + goto fail; + + mt_set_input_mode(hdev); + + return 0; + +fail: + kfree(td); + return ret; +} + +#ifdef CONFIG_PM +static int mt_reset_resume(struct hid_device *hdev) +{ + mt_set_input_mode(hdev); + return 0; +} +#endif + +static void mt_remove(struct hid_device *hdev) +{ + struct mt_device *td = hid_get_drvdata(hdev); + hid_hw_stop(hdev); + kfree(td); + hid_set_drvdata(hdev, NULL); +} + +static const struct hid_device_id mt_devices[] = { + + /* Cypress panel */ + { .driver_data = MT_CLS_CYPRESS, + HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, + USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, + + /* GeneralTouch panel */ + { .driver_data = MT_CLS_DUAL2, + HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, + USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) }, + + /* PixCir-based panels */ + { .driver_data = MT_CLS_DUAL1, + HID_USB_DEVICE(USB_VENDOR_ID_HANVON, + USB_DEVICE_ID_HANVON_MULTITOUCH) }, + { .driver_data = MT_CLS_DUAL1, + HID_USB_DEVICE(USB_VENDOR_ID_CANDO, + USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) }, + + { } +}; +MODULE_DEVICE_TABLE(hid, mt_devices); + +static const struct hid_usage_id mt_grabbed_usages[] = { + { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} +}; + +static struct hid_driver mt_driver = { + .name = "hid-multitouch", + .id_table = mt_devices, + .probe = mt_probe, + .remove = mt_remove, + .input_mapping = mt_input_mapping, + .input_mapped = mt_input_mapped, + .feature_mapping = mt_feature_mapping, + .usage_table = mt_grabbed_usages, + .event = mt_event, +#ifdef CONFIG_PM + .reset_resume = mt_reset_resume, +#endif +}; + +static int __init mt_init(void) +{ + return hid_register_driver(&mt_driver); +} + +static void __exit mt_exit(void) +{ + hid_unregister_driver(&mt_driver); +} + +module_init(mt_init); +module_exit(mt_exit); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 76b9a149c7df..9a94b643ccde 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -35,7 +35,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, - { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index bdc13d28b1ea..35f00dae3676 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -809,10 +809,10 @@ config SENSORS_DME1737 will be called dme1737. config SENSORS_EMC1403 - tristate "SMSC EMC1403 thermal sensor" + tristate "SMSC EMC1403/23 thermal sensor" depends on I2C help - If you say yes here you get support for the SMSC EMC1403 + If you say yes here you get support for the SMSC EMC1403/23 temperature monitoring chip. Threshold values can be configured using sysfs. diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index 0727ad250793..9e234b981b83 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -20,7 +20,7 @@ * Alarms 16-bit map of active alarms * Analog Out 0..1250 mV output * - * Chassis Intrusion: clear CI latch with 'echo 1 > chassis_clear' + * Chassis Intrusion: clear CI latch with 'echo 0 > intrusion0_alarm' * * Test hardware: Intel SE440BX-2 desktop motherboard --Grant * @@ -476,13 +476,16 @@ static ssize_t set_aout(struct device *dev, static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout); /* chassis_clear */ -static ssize_t chassis_clear(struct device *dev, +static ssize_t chassis_clear_legacy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); unsigned long val = simple_strtol(buf, NULL, 10); + dev_warn(dev, "Attribute chassis_clear is deprecated, " + "use intrusion0_alarm instead\n"); + if (val == 1) { i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80); @@ -490,7 +493,29 @@ static ssize_t chassis_clear(struct device *dev, } return count; } -static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear); +static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear_legacy); + +static ssize_t chassis_clear(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm9240_data *data = i2c_get_clientdata(client); + unsigned long val; + + if (strict_strtoul(buf, 10, &val) || val != 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80); + data->valid = 0; /* Force cache refresh */ + mutex_unlock(&data->update_lock); + dev_dbg(&client->dev, "chassis intrusion latch cleared\n"); + + return count; +} +static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm, + chassis_clear, 12); static struct attribute *adm9240_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, @@ -532,6 +557,7 @@ static struct attribute *adm9240_attributes[] = { &dev_attr_alarms.attr, &dev_attr_aout_output.attr, &dev_attr_chassis_clear.attr, + &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, &dev_attr_cpu0_vid.attr, NULL }; diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index aac85f3aed50..c42c5a69a664 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -4,7 +4,7 @@ This driver is based on the lm75 and other lm_sensors/hwmon drivers - Written by Steve Hardy + Written by Steve Hardy Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads7828.pdf @@ -271,7 +271,7 @@ static void __exit sensors_ads7828_exit(void) i2c_del_driver(&ads7828_driver); } -MODULE_AUTHOR("Steve Hardy "); +MODULE_AUTHOR("Steve Hardy "); MODULE_DESCRIPTION("ADS7828 driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index e9a610bfd0cc..d9c592713919 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -77,12 +77,14 @@ enum chips { dme1737, sch5027, sch311x, sch5127 }; * in4 +12V * in5 VTR (+3.3V stby) * in6 Vbat + * in7 Vtrip (sch5127 only) * * --------------------------------------------------------------------- */ -/* Voltages (in) numbered 0-6 (ix) */ -#define DME1737_REG_IN(ix) ((ix) < 5 ? 0x20 + (ix) \ - : 0x94 + (ix)) +/* Voltages (in) numbered 0-7 (ix) */ +#define DME1737_REG_IN(ix) ((ix) < 5 ? 0x20 + (ix) : \ + (ix) < 7 ? 0x94 + (ix) : \ + 0x1f) #define DME1737_REG_IN_MIN(ix) ((ix) < 5 ? 0x44 + (ix) * 2 \ : 0x91 + (ix) * 2) #define DME1737_REG_IN_MAX(ix) ((ix) < 5 ? 0x45 + (ix) * 2 \ @@ -101,10 +103,11 @@ enum chips { dme1737, sch5027, sch311x, sch5127 }; * IN_TEMP_LSB(1) = [temp3, temp1] * IN_TEMP_LSB(2) = [in4, temp2] * IN_TEMP_LSB(3) = [in3, in0] - * IN_TEMP_LSB(4) = [in2, in1] */ + * IN_TEMP_LSB(4) = [in2, in1] + * IN_TEMP_LSB(5) = [res, in7] */ #define DME1737_REG_IN_TEMP_LSB(ix) (0x84 + (ix)) -static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0}; -static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4}; +static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0, 5}; +static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4, 4}; static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1}; static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0}; @@ -145,7 +148,7 @@ static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0}; #define DME1737_REG_ALARM1 0x41 #define DME1737_REG_ALARM2 0x42 #define DME1737_REG_ALARM3 0x83 -static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17}; +static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17, 18}; static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6}; static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23}; @@ -190,6 +193,7 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23}; #define HAS_PWM_MIN (1 << 4) /* bit 4 */ #define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */ #define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */ +#define HAS_IN7 (1 << 17) /* bit 17 */ /* --------------------------------------------------------------------- * Data structures and manipulation thereof @@ -213,9 +217,9 @@ struct dme1737_data { u32 has_features; /* Register values */ - u16 in[7]; - u8 in_min[7]; - u8 in_max[7]; + u16 in[8]; + u8 in_min[8]; + u8 in_max[8]; s16 temp[3]; s8 temp_min[3]; s8 temp_max[3]; @@ -247,7 +251,7 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300, static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300, 3300}; static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300, - 3300}; + 3300, 1500}; #define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \ (type) == sch5027 ? IN_NOMINAL_SCH5027 : \ (type) == sch5127 ? IN_NOMINAL_SCH5127 : \ @@ -580,7 +584,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) { struct dme1737_data *data = dev_get_drvdata(dev); int ix; - u8 lsb[5]; + u8 lsb[6]; mutex_lock(&data->update_lock); @@ -603,6 +607,9 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) /* Voltage inputs are stored as 16 bit values even * though they have only 12 bits resolution. This is * to make it consistent with the temp inputs. */ + if (ix == 7 && !(data->has_features & HAS_IN7)) { + continue; + } data->in[ix] = dme1737_read(data, DME1737_REG_IN(ix)) << 8; data->in_min[ix] = dme1737_read(data, @@ -635,10 +642,16 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) * which the registers are read (MSB first, then LSB) is * important! */ for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) { + if (ix == 5 && !(data->has_features & HAS_IN7)) { + continue; + } lsb[ix] = dme1737_read(data, DME1737_REG_IN_TEMP_LSB(ix)); } for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) { + if (ix == 7 && !(data->has_features & HAS_IN7)) { + continue; + } data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] << DME1737_REG_IN_LSB_SHL[ix]) & 0xf0; } @@ -762,7 +775,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) /* --------------------------------------------------------------------- * Voltage sysfs attributes - * ix = [0-5] + * ix = [0-7] * --------------------------------------------------------------------- */ #define SYS_IN_INPUT 0 @@ -1439,7 +1452,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, * Sysfs device attribute defines and structs * --------------------------------------------------------------------- */ -/* Voltages 0-6 */ +/* Voltages 0-7 */ #define SENSOR_DEVICE_ATTR_IN(ix) \ static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \ @@ -1458,6 +1471,7 @@ SENSOR_DEVICE_ATTR_IN(3); SENSOR_DEVICE_ATTR_IN(4); SENSOR_DEVICE_ATTR_IN(5); SENSOR_DEVICE_ATTR_IN(6); +SENSOR_DEVICE_ATTR_IN(7); /* Temperatures 1-3 */ @@ -1576,7 +1590,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* for ISA devices */ * created unconditionally. The attributes that need modification of their * permissions are created read-only and write permissions are added or removed * on the fly when required */ -static struct attribute *dme1737_attr[] ={ +static struct attribute *dme1737_attr[] = { /* Voltages */ &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, @@ -1681,7 +1695,7 @@ static const struct attribute_group dme1737_zone3_group = { }; -/* The following struct holds temp zone hysteresis related attributes, which +/* The following struct holds temp zone hysteresis related attributes, which * are not available in all chips. The following chips support them: * DME1737, SCH311x */ static struct attribute *dme1737_zone_hyst_attr[] = { @@ -1695,6 +1709,21 @@ static const struct attribute_group dme1737_zone_hyst_group = { .attrs = dme1737_zone_hyst_attr, }; +/* The following struct holds voltage in7 related attributes, which + * are not available in all chips. The following chips support them: + * SCH5127 */ +static struct attribute *dme1737_in7_attr[] = { + &sensor_dev_attr_in7_input.dev_attr.attr, + &sensor_dev_attr_in7_min.dev_attr.attr, + &sensor_dev_attr_in7_max.dev_attr.attr, + &sensor_dev_attr_in7_alarm.dev_attr.attr, + NULL +}; + +static const struct attribute_group dme1737_in7_group = { + .attrs = dme1737_in7_attr, +}; + /* The following structs hold the PWM attributes, some of which are optional. * Their creation depends on the chip configuration which is determined during * module load. */ @@ -1986,6 +2015,9 @@ static void dme1737_remove_files(struct device *dev) if (data->has_features & HAS_ZONE_HYST) { sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group); } + if (data->has_features & HAS_IN7) { + sysfs_remove_group(&dev->kobj, &dme1737_in7_group); + } sysfs_remove_group(&dev->kobj, &dme1737_group); if (!data->client) { @@ -1999,43 +2031,58 @@ static int dme1737_create_files(struct device *dev) int err, ix; /* Create a name attribute for ISA devices */ - if (!data->client && - (err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr))) { - goto exit; + if (!data->client) { + err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr); + if (err) { + goto exit; + } } /* Create standard sysfs attributes */ - if ((err = sysfs_create_group(&dev->kobj, &dme1737_group))) { + err = sysfs_create_group(&dev->kobj, &dme1737_group); + if (err) { goto exit_remove; } /* Create chip-dependent sysfs attributes */ - if ((data->has_features & HAS_TEMP_OFFSET) && - (err = sysfs_create_group(&dev->kobj, - &dme1737_temp_offset_group))) { - goto exit_remove; + if (data->has_features & HAS_TEMP_OFFSET) { + err = sysfs_create_group(&dev->kobj, + &dme1737_temp_offset_group); + if (err) { + goto exit_remove; + } } - if ((data->has_features & HAS_VID) && - (err = sysfs_create_group(&dev->kobj, - &dme1737_vid_group))) { - goto exit_remove; + if (data->has_features & HAS_VID) { + err = sysfs_create_group(&dev->kobj, &dme1737_vid_group); + if (err) { + goto exit_remove; + } } - if ((data->has_features & HAS_ZONE3) && - (err = sysfs_create_group(&dev->kobj, - &dme1737_zone3_group))) { - goto exit_remove; + if (data->has_features & HAS_ZONE3) { + err = sysfs_create_group(&dev->kobj, &dme1737_zone3_group); + if (err) { + goto exit_remove; + } } - if ((data->has_features & HAS_ZONE_HYST) && - (err = sysfs_create_group(&dev->kobj, - &dme1737_zone_hyst_group))) { - goto exit_remove; + if (data->has_features & HAS_ZONE_HYST) { + err = sysfs_create_group(&dev->kobj, &dme1737_zone_hyst_group); + if (err) { + goto exit_remove; + } + } + if (data->has_features & HAS_IN7) { + err = sysfs_create_group(&dev->kobj, &dme1737_in7_group); + if (err) { + goto exit_remove; + } } /* Create fan sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { if (data->has_features & HAS_FAN(ix)) { - if ((err = sysfs_create_group(&dev->kobj, - &dme1737_fan_group[ix]))) { + err = sysfs_create_group(&dev->kobj, + &dme1737_fan_group[ix]); + if (err) { goto exit_remove; } } @@ -2044,14 +2091,17 @@ static int dme1737_create_files(struct device *dev) /* Create PWM sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { if (data->has_features & HAS_PWM(ix)) { - if ((err = sysfs_create_group(&dev->kobj, - &dme1737_pwm_group[ix]))) { + err = sysfs_create_group(&dev->kobj, + &dme1737_pwm_group[ix]); + if (err) { goto exit_remove; } - if ((data->has_features & HAS_PWM_MIN) && ix < 3 && - (err = sysfs_create_file(&dev->kobj, - dme1737_auto_pwm_min_attr[ix]))) { - goto exit_remove; + if ((data->has_features & HAS_PWM_MIN) && (ix < 3)) { + err = sysfs_create_file(&dev->kobj, + dme1737_auto_pwm_min_attr[ix]); + if (err) { + goto exit_remove; + } } } } @@ -2188,7 +2238,7 @@ static int dme1737_init_device(struct device *dev) data->has_features |= HAS_ZONE3; break; case sch5127: - data->has_features |= HAS_FAN(2) | HAS_PWM(2); + data->has_features |= HAS_FAN(2) | HAS_PWM(2) | HAS_IN7; break; default: break; @@ -2281,8 +2331,9 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data) dme1737_sio_outb(sio_cip, 0x07, 0x0a); /* Get the base address of the runtime registers */ - if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) | - dme1737_sio_inb(sio_cip, 0x61))) { + addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) | + dme1737_sio_inb(sio_cip, 0x61); + if (!addr) { err = -ENODEV; goto exit; } @@ -2363,13 +2414,15 @@ static int dme1737_i2c_probe(struct i2c_client *client, mutex_init(&data->update_lock); /* Initialize the DME1737 chip */ - if ((err = dme1737_init_device(dev))) { + err = dme1737_init_device(dev); + if (err) { dev_err(dev, "Failed to initialize device.\n"); goto exit_kfree; } /* Create sysfs files */ - if ((err = dme1737_create_files(dev))) { + err = dme1737_create_files(dev); + if (err) { dev_err(dev, "Failed to create sysfs files.\n"); goto exit_kfree; } @@ -2446,8 +2499,9 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr) dme1737_sio_outb(sio_cip, 0x07, 0x0a); /* Get the base address of the runtime registers */ - if (!(base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) | - dme1737_sio_inb(sio_cip, 0x61))) { + base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) | + dme1737_sio_inb(sio_cip, 0x61); + if (!base_addr) { pr_err("Base address not set\n"); err = -ENODEV; goto exit; @@ -2476,18 +2530,21 @@ static int __init dme1737_isa_device_add(unsigned short addr) if (err) goto exit; - if (!(pdev = platform_device_alloc("dme1737", addr))) { + pdev = platform_device_alloc("dme1737", addr); + if (!pdev) { pr_err("Failed to allocate device\n"); err = -ENOMEM; goto exit; } - if ((err = platform_device_add_resources(pdev, &res, 1))) { + err = platform_device_add_resources(pdev, &res, 1); + if (err) { pr_err("Failed to add device resource (err = %d)\n", err); goto exit_device_put; } - if ((err = platform_device_add(pdev))) { + err = platform_device_add(pdev); + if (err) { pr_err("Failed to add device (err = %d)\n", err); goto exit_device_put; } @@ -2514,11 +2571,12 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev) dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n", (unsigned short)res->start, (unsigned short)res->start + DME1737_EXTENT - 1); - err = -EBUSY; - goto exit; - } + err = -EBUSY; + goto exit; + } - if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) { + data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL); + if (!data) { err = -ENOMEM; goto exit_release_region; } @@ -2565,13 +2623,15 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev) data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr); /* Initialize the chip */ - if ((err = dme1737_init_device(dev))) { + err = dme1737_init_device(dev); + if (err) { dev_err(dev, "Failed to initialize device.\n"); goto exit_kfree; } /* Create sysfs files */ - if ((err = dme1737_create_files(dev))) { + err = dme1737_create_files(dev); + if (err) { dev_err(dev, "Failed to create sysfs files.\n"); goto exit_kfree; } @@ -2628,7 +2688,8 @@ static int __init dme1737_init(void) int err; unsigned short addr; - if ((err = i2c_add_driver(&dme1737_i2c_driver))) { + err = i2c_add_driver(&dme1737_i2c_driver); + if (err) { goto exit; } @@ -2641,12 +2702,14 @@ static int __init dme1737_init(void) return 0; } - if ((err = platform_driver_register(&dme1737_isa_driver))) { + err = platform_driver_register(&dme1737_isa_driver); + if (err) { goto exit_del_i2c_driver; } /* Sets global pdev as a side effect */ - if ((err = dme1737_isa_device_add(addr))) { + err = dme1737_isa_device_add(addr); + if (err) { goto exit_del_isa_driver; } diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 8dee3f38fdfb..5dea9faa1656 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -269,23 +269,30 @@ static int emc1403_detect(struct i2c_client *client, struct i2c_board_info *info) { int id; - /* Check if thermal chip is SMSC and EMC1403 */ + /* Check if thermal chip is SMSC and EMC1403 or EMC1423 */ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG); if (id != 0x5d) return -ENODEV; + id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG); + switch (id) { + case 0x21: + strlcpy(info->type, "emc1403", I2C_NAME_SIZE); + break; + case 0x23: + strlcpy(info->type, "emc1423", I2C_NAME_SIZE); + break; /* Note: 0x25 is the 1404 which is very similar and this driver could be extended */ - id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG); - if (id != 0x21) + default: return -ENODEV; + } id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); if (id != 0x01) return -ENODEV; - strlcpy(info->type, "emc1403", I2C_NAME_SIZE); return 0; } @@ -342,6 +349,7 @@ static const unsigned short emc1403_address_list[] = { static const struct i2c_device_id emc1403_idtable[] = { { "emc1403", 0 }, + { "emc1423", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, emc1403_idtable); diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index d4d4ca65d371..aa6d8b686f82 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -49,7 +49,6 @@ #include /* Addresses to scan */ -static DEFINE_MUTEX(watchdog_mutex); static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; /* Insmod parameters */ @@ -850,7 +849,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf, static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - static struct watchdog_info ident = { + struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_CARDRESET, .identity = "FSC watchdog" @@ -858,7 +857,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long ar int i, ret = 0; struct fschmd_data *data = filp->private_data; - mutex_lock(&watchdog_mutex); switch (cmd) { case WDIOC_GETSUPPORT: ident.firmware_version = data->revision; @@ -915,7 +913,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long ar default: ret = -ENOTTY; } - mutex_unlock(&watchdog_mutex); return ret; } diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index a428a9264195..316b64823f7b 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -38,6 +38,8 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -1570,26 +1572,25 @@ static int __init it87_find(unsigned short *address, case 0xffff: /* No device at all */ goto exit; default: - pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n", - chip_type); + pr_debug("Unsupported chip (DEVID=0x%x)\n", chip_type); goto exit; } superio_select(PME); if (!(superio_inb(IT87_ACT_REG) & 0x01)) { - pr_info("it87: Device not activated, skipping\n"); + pr_info("Device not activated, skipping\n"); goto exit; } *address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1); if (*address == 0) { - pr_info("it87: Base address not set, skipping\n"); + pr_info("Base address not set, skipping\n"); goto exit; } err = 0; sio_data->revision = superio_inb(DEVREV) & 0x0f; - pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n", + pr_info("Found IT%04xF chip at 0x%x, revision %d\n", chip_type, *address, sio_data->revision); /* in8 (Vbat) is always internal */ @@ -1615,7 +1616,7 @@ static int __init it87_find(unsigned short *address, } else { /* We need at least 4 VID pins */ if (reg & 0x0f) { - pr_info("it87: VID is disabled (pins used for GPIO)\n"); + pr_info("VID is disabled (pins used for GPIO)\n"); sio_data->skip_vid = 1; } } @@ -1651,7 +1652,7 @@ static int __init it87_find(unsigned short *address, if (sio_data->type == it8720 && !(reg & (1 << 1))) { reg |= (1 << 1); superio_outb(IT87_SIO_PINX2_REG, reg); - pr_notice("it87: Routing internal VCCH to in7\n"); + pr_notice("Routing internal VCCH to in7\n"); } if (reg & (1 << 0)) sio_data->internal |= (1 << 0); @@ -1661,7 +1662,7 @@ static int __init it87_find(unsigned short *address, sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f; } if (sio_data->beep_pin) - pr_info("it87: Beeping is supported\n"); + pr_info("Beeping is supported\n"); /* Disable specific features based on DMI strings */ board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); @@ -1675,8 +1676,7 @@ static int __init it87_find(unsigned short *address, the PWM2 duty cycle, so we disable it. I use the board name string as the trigger in case the same board is ever used in other systems. */ - pr_info("it87: Disabling pwm2 due to " - "hardware constraints\n"); + pr_info("Disabling pwm2 due to hardware constraints\n"); sio_data->skip_pwm = (1 << 1); } } @@ -2189,28 +2189,26 @@ static int __init it87_device_add(unsigned short address, pdev = platform_device_alloc(DRVNAME, address); if (!pdev) { err = -ENOMEM; - printk(KERN_ERR DRVNAME ": Device allocation failed\n"); + pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { - printk(KERN_ERR DRVNAME ": Device resource addition failed " - "(%d)\n", err); + pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add_data(pdev, sio_data, sizeof(struct it87_sio_data)); if (err) { - printk(KERN_ERR DRVNAME ": Platform data allocation failed\n"); + pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(pdev); if (err) { - printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", - err); + pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index 72ff2c4e757d..4cb24eafe318 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -19,6 +19,8 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -858,7 +860,7 @@ static int __init lm78_isa_found(unsigned short address) * individually for the probing phase. */ for (port = address; port < address + LM78_EXTENT; port++) { if (!request_region(port, 1, "lm78")) { - pr_debug("lm78: Failed to request port 0x%x\n", port); + pr_debug("Failed to request port 0x%x\n", port); goto release; } } @@ -920,7 +922,7 @@ static int __init lm78_isa_found(unsigned short address) found = 1; if (found) - pr_info("lm78: Found an %s chip at %#x\n", + pr_info("Found an %s chip at %#x\n", val & 0x80 ? "LM79" : "LM78", (int)address); release: @@ -942,21 +944,19 @@ static int __init lm78_isa_device_add(unsigned short address) pdev = platform_device_alloc("lm78", address); if (!pdev) { err = -ENOMEM; - printk(KERN_ERR "lm78: Device allocation failed\n"); + pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { - printk(KERN_ERR "lm78: Device resource addition failed " - "(%d)\n", err); + pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { - printk(KERN_ERR "lm78: Device addition failed (%d)\n", - err); + pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c index 68e69a49633c..3d99b8854d7c 100644 --- a/drivers/hwmon/pc87360.c +++ b/drivers/hwmon/pc87360.c @@ -33,6 +33,8 @@ * the standard Super-I/O addresses is used (0x2E/0x2F or 0x4E/0x4F). */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -1031,16 +1033,15 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses val = superio_inb(sioaddr, ACT); if (!(val & 0x01)) { - printk(KERN_INFO "pc87360: Device 0x%02x not " - "activated\n", logdev[i]); + pr_info("Device 0x%02x not activated\n", logdev[i]); continue; } val = (superio_inb(sioaddr, BASE) << 8) | superio_inb(sioaddr, BASE + 1); if (!val) { - printk(KERN_INFO "pc87360: Base address not set for " - "device 0x%02x\n", logdev[i]); + pr_info("Base address not set for device 0x%02x\n", + logdev[i]); continue; } @@ -1050,17 +1051,15 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses confreg[0] = superio_inb(sioaddr, 0xF0); confreg[1] = superio_inb(sioaddr, 0xF1); -#ifdef DEBUG - printk(KERN_DEBUG "pc87360: Fan 1: mon=%d " - "ctrl=%d inv=%d\n", (confreg[0]>>2)&1, - (confreg[0]>>3)&1, (confreg[0]>>4)&1); - printk(KERN_DEBUG "pc87360: Fan 2: mon=%d " - "ctrl=%d inv=%d\n", (confreg[0]>>5)&1, - (confreg[0]>>6)&1, (confreg[0]>>7)&1); - printk(KERN_DEBUG "pc87360: Fan 3: mon=%d " - "ctrl=%d inv=%d\n", confreg[1]&1, - (confreg[1]>>1)&1, (confreg[1]>>2)&1); -#endif + pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1, + (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1, + (confreg[0] >> 4) & 1); + pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2, + (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1, + (confreg[0] >> 7) & 1); + pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3, + confreg[1] & 1, (confreg[1] >> 1) & 1, + (confreg[1] >> 2) & 1); } else if (i==1) { /* Voltages */ /* Are we using thermistors? */ if (*devid == 0xE9) { /* PC87366 */ @@ -1071,14 +1070,12 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses confreg[3] = superio_inb(sioaddr, 0x25); if (confreg[2] & 0x40) { - printk(KERN_INFO "pc87360: Using " - "thermistors for temperature " - "monitoring\n"); + pr_info("Using thermistors for " + "temperature monitoring\n"); } if (confreg[3] & 0xE0) { - printk(KERN_INFO "pc87360: VID " - "inputs routed (mode %u)\n", - confreg[3] >> 5); + pr_info("VID inputs routed (mode %u)\n", + confreg[3] >> 5); } } } @@ -1616,7 +1613,7 @@ static int __init pc87360_device_add(unsigned short address) pdev = platform_device_alloc("pc87360", address); if (!pdev) { err = -ENOMEM; - printk(KERN_ERR "pc87360: Device allocation failed\n"); + pr_err("Device allocation failed\n"); goto exit; } @@ -1639,15 +1636,13 @@ static int __init pc87360_device_add(unsigned short address) err = platform_device_add_resources(pdev, res, res_count); if (err) { - printk(KERN_ERR "pc87360: Device resources addition failed " - "(%d)\n", err); + pr_err("Device resources addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { - printk(KERN_ERR "pc87360: Device addition failed (%d)\n", - err); + pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } @@ -1666,8 +1661,7 @@ static int __init pc87360_init(void) if (pc87360_find(0x2e, &devid, extra_isa) && pc87360_find(0x4e, &devid, extra_isa)) { - printk(KERN_WARNING "pc87360: PC8736x not detected, " - "module not inserted.\n"); + pr_warn("PC8736x not detected, module not inserted\n"); return -ENODEV; } @@ -1680,8 +1674,7 @@ static int __init pc87360_init(void) } if (address == 0x0000) { - printk(KERN_WARNING "pc87360: No active logical device, " - "module not inserted.\n"); + pr_warn("No active logical device, module not inserted\n"); return -ENODEV; } diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c index 9ec4daaf6ca6..8da2181630b1 100644 --- a/drivers/hwmon/pc87427.c +++ b/drivers/hwmon/pc87427.c @@ -22,6 +22,8 @@ * mode, and voltages aren't supported at all. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -1077,7 +1079,7 @@ static int __devinit pc87427_probe(struct platform_device *pdev) data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL); if (!data) { err = -ENOMEM; - printk(KERN_ERR DRVNAME ": Out of memory\n"); + pr_err("Out of memory\n"); goto exit; } @@ -1196,28 +1198,26 @@ static int __init pc87427_device_add(const struct pc87427_sio_data *sio_data) pdev = platform_device_alloc(DRVNAME, res[0].start); if (!pdev) { err = -ENOMEM; - printk(KERN_ERR DRVNAME ": Device allocation failed\n"); + pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, res, res_count); if (err) { - printk(KERN_ERR DRVNAME ": Device resource addition failed " - "(%d)\n", err); + pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add_data(pdev, sio_data, sizeof(struct pc87427_sio_data)); if (err) { - printk(KERN_ERR DRVNAME ": Platform data allocation failed\n"); + pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(pdev); if (err) { - printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", - err); + pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } @@ -1249,23 +1249,23 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data) val = superio_inb(sioaddr, SIOREG_ACT); if (!(val & 0x01)) { - printk(KERN_INFO DRVNAME ": Logical device 0x%02x " - "not activated\n", logdev[i]); + pr_info("Logical device 0x%02x not activated\n", + logdev[i]); continue; } val = superio_inb(sioaddr, SIOREG_MAP); if (val & 0x01) { - printk(KERN_WARNING DRVNAME ": Logical device 0x%02x " - "is memory-mapped, can't use\n", logdev[i]); + pr_warn("Logical device 0x%02x is memory-mapped, " + "can't use\n", logdev[i]); continue; } val = (superio_inb(sioaddr, SIOREG_IOBASE) << 8) | superio_inb(sioaddr, SIOREG_IOBASE + 1); if (!val) { - printk(KERN_INFO DRVNAME ": I/O base address not set " - "for logical device 0x%02x\n", logdev[i]); + pr_info("I/O base address not set for logical device " + "0x%02x\n", logdev[i]); continue; } sio_data->address[i] = val; diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c index 13e8d218e495..25e91665a0a2 100644 --- a/drivers/hwmon/via686a.c +++ b/drivers/hwmon/via686a.c @@ -689,6 +689,13 @@ static int __devexit via686a_remove(struct platform_device *pdev) return 0; } +static void via686a_update_fan_div(struct via686a_data *data) +{ + int reg = via686a_read_value(data, VIA686A_REG_FANDIV); + data->fan_div[0] = (reg >> 4) & 0x03; + data->fan_div[1] = reg >> 6; +} + static void __devinit via686a_init_device(struct via686a_data *data) { u8 reg; @@ -702,6 +709,9 @@ static void __devinit via686a_init_device(struct via686a_data *data) via686a_write_value(data, VIA686A_REG_TEMP_MODE, (reg & ~VIA686A_TEMP_MODE_MASK) | VIA686A_TEMP_MODE_CONTINUOUS); + + /* Pre-read fan clock divisor values */ + via686a_update_fan_div(data); } static struct via686a_data *via686a_update_device(struct device *dev) @@ -753,9 +763,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) & 0xc0) >> 6; - i = via686a_read_value(data, VIA686A_REG_FANDIV); - data->fan_div[0] = (i >> 4) & 0x03; - data->fan_div[1] = i >> 6; + via686a_update_fan_div(data); data->alarms = via686a_read_value(data, VIA686A_REG_ALARM1) | diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index c84b9b4e6960..eed43a008be1 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -33,6 +33,8 @@ */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -1798,8 +1800,7 @@ w83781d_isa_found(unsigned short address) * individually for the probing phase. */ for (port = address; port < address + W83781D_EXTENT; port++) { if (!request_region(port, 1, "w83781d")) { - pr_debug("w83781d: Failed to request port 0x%x\n", - port); + pr_debug("Failed to request port 0x%x\n", port); goto release; } } @@ -1811,7 +1812,7 @@ w83781d_isa_found(unsigned short address) if (inb_p(address + 2) != val || inb_p(address + 3) != val || inb_p(address + 7) != val) { - pr_debug("w83781d: Detection failed at step 1\n"); + pr_debug("Detection failed at step %d\n", 1); goto release; } #undef REALLY_SLOW_IO @@ -1820,14 +1821,14 @@ w83781d_isa_found(unsigned short address) MSB (busy flag) should be clear initially, set after the write. */ save = inb_p(address + W83781D_ADDR_REG_OFFSET); if (save & 0x80) { - pr_debug("w83781d: Detection failed at step 2\n"); + pr_debug("Detection failed at step %d\n", 2); goto release; } val = ~save & 0x7f; outb_p(val, address + W83781D_ADDR_REG_OFFSET); if (inb_p(address + W83781D_ADDR_REG_OFFSET) != (val | 0x80)) { outb_p(save, address + W83781D_ADDR_REG_OFFSET); - pr_debug("w83781d: Detection failed at step 3\n"); + pr_debug("Detection failed at step %d\n", 3); goto release; } @@ -1835,7 +1836,7 @@ w83781d_isa_found(unsigned short address) outb_p(W83781D_REG_CONFIG, address + W83781D_ADDR_REG_OFFSET); val = inb_p(address + W83781D_DATA_REG_OFFSET); if (val & 0x80) { - pr_debug("w83781d: Detection failed at step 4\n"); + pr_debug("Detection failed at step %d\n", 4); goto release; } outb_p(W83781D_REG_BANK, address + W83781D_ADDR_REG_OFFSET); @@ -1844,19 +1845,19 @@ w83781d_isa_found(unsigned short address) val = inb_p(address + W83781D_DATA_REG_OFFSET); if ((!(save & 0x80) && (val != 0xa3)) || ((save & 0x80) && (val != 0x5c))) { - pr_debug("w83781d: Detection failed at step 5\n"); + pr_debug("Detection failed at step %d\n", 5); goto release; } outb_p(W83781D_REG_I2C_ADDR, address + W83781D_ADDR_REG_OFFSET); val = inb_p(address + W83781D_DATA_REG_OFFSET); if (val < 0x03 || val > 0x77) { /* Not a valid I2C address */ - pr_debug("w83781d: Detection failed at step 6\n"); + pr_debug("Detection failed at step %d\n", 6); goto release; } /* The busy flag should be clear again */ if (inb_p(address + W83781D_ADDR_REG_OFFSET) & 0x80) { - pr_debug("w83781d: Detection failed at step 7\n"); + pr_debug("Detection failed at step %d\n", 7); goto release; } @@ -1871,7 +1872,7 @@ w83781d_isa_found(unsigned short address) found = 1; if (found) - pr_info("w83781d: Found a %s chip at %#x\n", + pr_info("Found a %s chip at %#x\n", val == 0x30 ? "W83782D" : "W83781D", (int)address); release: @@ -1894,21 +1895,19 @@ w83781d_isa_device_add(unsigned short address) pdev = platform_device_alloc("w83781d", address); if (!pdev) { err = -ENOMEM; - printk(KERN_ERR "w83781d: Device allocation failed\n"); + pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { - printk(KERN_ERR "w83781d: Device resource addition failed " - "(%d)\n", err); + pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { - printk(KERN_ERR "w83781d: Device addition failed (%d)\n", - err); + pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c index 679718e6b017..63841f8cec07 100644 --- a/drivers/hwmon/w83792d.c +++ b/drivers/hwmon/w83792d.c @@ -691,13 +691,23 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, } static ssize_t -show_regs_chassis(struct device *dev, struct device_attribute *attr, +show_chassis(struct device *dev, struct device_attribute *attr, char *buf) { struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", data->chassis); } +static ssize_t +show_regs_chassis(struct device *dev, struct device_attribute *attr, + char *buf) +{ + dev_warn(dev, + "Attribute %s is deprecated, use intrusion0_alarm instead\n", + "chassis"); + return show_chassis(dev, attr, buf); +} + static ssize_t show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf) { @@ -706,7 +716,7 @@ show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf) } static ssize_t -store_chassis_clear(struct device *dev, struct device_attribute *attr, +store_chassis_clear_legacy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); @@ -714,6 +724,10 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr, u32 val; u8 temp1 = 0, temp2 = 0; + dev_warn(dev, + "Attribute %s is deprecated, use intrusion0_alarm instead\n", + "chassis_clear"); + val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->chassis_clear = SENSORS_LIMIT(val, 0 ,1); @@ -726,6 +740,27 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr, return count; } +static ssize_t +store_chassis_clear(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct w83792d_data *data = i2c_get_clientdata(client); + unsigned long val; + u8 reg; + + if (strict_strtoul(buf, 10, &val) || val != 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR); + w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80); + data->valid = 0; /* Force cache refresh */ + mutex_unlock(&data->update_lock); + + return count; +} + /* For Smart Fan I / Thermal Cruise */ static ssize_t show_thermal_cruise(struct device *dev, struct device_attribute *attr, @@ -1012,7 +1047,9 @@ static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22); static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23); static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL); static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR, - show_chassis_clear, store_chassis_clear); + show_chassis_clear, store_chassis_clear_legacy); +static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, + show_chassis, store_chassis_clear); static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2); @@ -1214,6 +1251,7 @@ static struct attribute *w83792d_attributes[] = { &dev_attr_alarms.attr, &dev_attr_chassis.attr, &dev_attr_chassis_clear.attr, + &dev_attr_intrusion0_alarm.attr, &sensor_dev_attr_tolerance1.dev_attr.attr, &sensor_dev_attr_thermal_cruise1.dev_attr.attr, &sensor_dev_attr_tolerance2.dev_attr.attr, diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 8e540ada47d2..e3bdedfb5347 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -51,7 +51,6 @@ #define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */ /* Addresses to scan */ -static DEFINE_MUTEX(watchdog_mutex); static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; @@ -421,14 +420,17 @@ store_beep_enable(struct device *dev, struct device_attribute *attr, /* Write any value to clear chassis alarm */ static ssize_t -store_chassis_clear(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) +store_chassis_clear_legacy(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83793_data *data = i2c_get_clientdata(client); u8 val; + dev_warn(dev, "Attribute chassis is deprecated, " + "use intrusion0_alarm instead\n"); + mutex_lock(&data->update_lock); val = w83793_read_value(client, W83793_REG_CLR_CHASSIS); val |= 0x80; @@ -437,6 +439,28 @@ store_chassis_clear(struct device *dev, return count; } +/* Write 0 to clear chassis alarm */ +static ssize_t +store_chassis_clear(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct w83793_data *data = i2c_get_clientdata(client); + unsigned long val; + u8 reg; + + if (strict_strtoul(buf, 10, &val) || val != 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS); + w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80); + data->valid = 0; /* Force cache refresh */ + mutex_unlock(&data->update_lock); + return count; +} + #define FAN_INPUT 0 #define FAN_MIN 1 static ssize_t @@ -1102,6 +1126,8 @@ static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm); static struct sensor_device_attribute_2 sda_single_files[] = { SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep, + store_chassis_clear_legacy, ALARM_STATUS, 30), + SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, store_chassis_clear, ALARM_STATUS, 30), SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable, store_beep_enable, NOT_USED, NOT_USED), @@ -1323,7 +1349,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf, static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - static struct watchdog_info ident = { + struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_CARDRESET, @@ -1333,7 +1359,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, int val, ret = 0; struct w83793_data *data = filp->private_data; - mutex_lock(&watchdog_mutex); switch (cmd) { case WDIOC_GETSUPPORT: if (!nowayout) @@ -1387,7 +1412,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, default: ret = -ENOTTY; } - mutex_unlock(&watchdog_mutex); return ret; } diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index cdbc7448491e..845232d7f611 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c @@ -458,6 +458,7 @@ static void w83795_update_limits(struct i2c_client *client) { struct w83795_data *data = i2c_get_clientdata(client); int i, limit; + u8 lsb; /* Read the voltage limits */ for (i = 0; i < ARRAY_SIZE(data->in); i++) { @@ -479,9 +480,8 @@ static void w83795_update_limits(struct i2c_client *client) } /* Read the fan limits */ + lsb = 0; /* Silent false gcc warning */ for (i = 0; i < ARRAY_SIZE(data->fan); i++) { - u8 lsb; - /* Each register contains LSB for 2 fans, but we want to * read it only once to save time */ if ((i & 1) == 0 && (data->has_fan & (3 << i))) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 3a6321cb8030..113505a6434e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -638,6 +638,14 @@ config I2C_XILINX This driver can also be built as a module. If so, the module will be called xilinx_i2c. +config I2C_EG20T + tristate "PCH I2C of Intel EG20T" + depends on PCI + help + This driver is for PCH(Platform controller Hub) I2C of EG20T which + is an IOH(Input/Output Hub) for x86 embedded processor. + This driver can access PCH I2C bus device. + comment "External I2C/SMBus adapter drivers" config I2C_PARPORT diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 84cb16ae6f9e..9d2d0ec7fb23 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_I2C_STU300) += i2c-stu300.o obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o +obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o # External I2C/SMBus adapter drivers obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index fb26e5c67515..52b545a795f2 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -159,6 +160,27 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, if (mast_stat & BUFWRERR) dev_dbg(&iface->adap.dev, "Buffer Write Error\n"); + /* Faulty slave devices, may drive SDA low after a transfer + * finishes. To release the bus this code generates up to 9 + * extra clocks until SDA is released. + */ + + if (read_MASTER_STAT(iface) & SDASEN) { + int cnt = 9; + do { + write_MASTER_CTL(iface, SCLOVR); + udelay(6); + write_MASTER_CTL(iface, 0); + udelay(6); + } while ((read_MASTER_STAT(iface) & SDASEN) && cnt--); + + write_MASTER_CTL(iface, SDAOVR | SCLOVR); + udelay(6); + write_MASTER_CTL(iface, SDAOVR); + udelay(6); + write_MASTER_CTL(iface, 0); + } + /* If it is a quick transfer, only address without data, * not an err, return 1. */ @@ -760,7 +782,7 @@ static void __exit i2c_bfin_twi_exit(void) platform_driver_unregister(&i2c_bfin_twi_driver); } -module_init(i2c_bfin_twi_init); +subsys_initcall(i2c_bfin_twi_init); module_exit(i2c_bfin_twi_exit); MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c new file mode 100644 index 000000000000..2e067dd2ee51 --- /dev/null +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -0,0 +1,900 @@ +/* + * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ +#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */ +#define PCH_MAX_CLK 100000 /* Maximum Clock speed in MHz */ +#define PCH_BUFFER_MODE_ENABLE 0x0002 /* flag for Buffer mode enable */ +#define PCH_EEPROM_SW_RST_MODE_ENABLE 0x0008 /* EEPROM SW RST enable flag */ + +#define PCH_I2CSADR 0x00 /* I2C slave address register */ +#define PCH_I2CCTL 0x04 /* I2C control register */ +#define PCH_I2CSR 0x08 /* I2C status register */ +#define PCH_I2CDR 0x0C /* I2C data register */ +#define PCH_I2CMON 0x10 /* I2C bus monitor register */ +#define PCH_I2CBC 0x14 /* I2C bus transfer rate setup counter */ +#define PCH_I2CMOD 0x18 /* I2C mode register */ +#define PCH_I2CBUFSLV 0x1C /* I2C buffer mode slave address register */ +#define PCH_I2CBUFSUB 0x20 /* I2C buffer mode subaddress register */ +#define PCH_I2CBUFFOR 0x24 /* I2C buffer mode format register */ +#define PCH_I2CBUFCTL 0x28 /* I2C buffer mode control register */ +#define PCH_I2CBUFMSK 0x2C /* I2C buffer mode interrupt mask register */ +#define PCH_I2CBUFSTA 0x30 /* I2C buffer mode status register */ +#define PCH_I2CBUFLEV 0x34 /* I2C buffer mode level register */ +#define PCH_I2CESRFOR 0x38 /* EEPROM software reset mode format register */ +#define PCH_I2CESRCTL 0x3C /* EEPROM software reset mode ctrl register */ +#define PCH_I2CESRMSK 0x40 /* EEPROM software reset mode */ +#define PCH_I2CESRSTA 0x44 /* EEPROM software reset mode status register */ +#define PCH_I2CTMR 0x48 /* I2C timer register */ +#define PCH_I2CSRST 0xFC /* I2C reset register */ +#define PCH_I2CNF 0xF8 /* I2C noise filter register */ + +#define BUS_IDLE_TIMEOUT 20 +#define PCH_I2CCTL_I2CMEN 0x0080 +#define TEN_BIT_ADDR_DEFAULT 0xF000 +#define TEN_BIT_ADDR_MASK 0xF0 +#define PCH_START 0x0020 +#define PCH_ESR_START 0x0001 +#define PCH_BUFF_START 0x1 +#define PCH_REPSTART 0x0004 +#define PCH_ACK 0x0008 +#define PCH_GETACK 0x0001 +#define CLR_REG 0x0 +#define I2C_RD 0x1 +#define I2CMCF_BIT 0x0080 +#define I2CMIF_BIT 0x0002 +#define I2CMAL_BIT 0x0010 +#define I2CBMFI_BIT 0x0001 +#define I2CBMAL_BIT 0x0002 +#define I2CBMNA_BIT 0x0004 +#define I2CBMTO_BIT 0x0008 +#define I2CBMIS_BIT 0x0010 +#define I2CESRFI_BIT 0X0001 +#define I2CESRTO_BIT 0x0002 +#define I2CESRFIIE_BIT 0x1 +#define I2CESRTOIE_BIT 0x2 +#define I2CBMDZ_BIT 0x0040 +#define I2CBMAG_BIT 0x0020 +#define I2CMBB_BIT 0x0020 +#define BUFFER_MODE_MASK (I2CBMFI_BIT | I2CBMAL_BIT | I2CBMNA_BIT | \ + I2CBMTO_BIT | I2CBMIS_BIT) +#define I2C_ADDR_MSK 0xFF +#define I2C_MSB_2B_MSK 0x300 +#define FAST_MODE_CLK 400 +#define FAST_MODE_EN 0x0001 +#define SUB_ADDR_LEN_MAX 4 +#define BUF_LEN_MAX 32 +#define PCH_BUFFER_MODE 0x1 +#define EEPROM_SW_RST_MODE 0x0002 +#define NORMAL_INTR_ENBL 0x0300 +#define EEPROM_RST_INTR_ENBL (I2CESRFIIE_BIT | I2CESRTOIE_BIT) +#define EEPROM_RST_INTR_DISBL 0x0 +#define BUFFER_MODE_INTR_ENBL 0x001F +#define BUFFER_MODE_INTR_DISBL 0x0 +#define NORMAL_MODE 0x0 +#define BUFFER_MODE 0x1 +#define EEPROM_SR_MODE 0x2 +#define I2C_TX_MODE 0x0010 +#define PCH_BUF_TX 0xFFF7 +#define PCH_BUF_RD 0x0008 +#define I2C_ERROR_MASK (I2CESRTO_EVENT | I2CBMIS_EVENT | I2CBMTO_EVENT | \ + I2CBMNA_EVENT | I2CBMAL_EVENT | I2CMAL_EVENT) +#define I2CMAL_EVENT 0x0001 +#define I2CMCF_EVENT 0x0002 +#define I2CBMFI_EVENT 0x0004 +#define I2CBMAL_EVENT 0x0008 +#define I2CBMNA_EVENT 0x0010 +#define I2CBMTO_EVENT 0x0020 +#define I2CBMIS_EVENT 0x0040 +#define I2CESRFI_EVENT 0x0080 +#define I2CESRTO_EVENT 0x0100 +#define PCI_DEVICE_ID_PCH_I2C 0x8817 + +#define pch_dbg(adap, fmt, arg...) \ + dev_dbg(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg) + +#define pch_err(adap, fmt, arg...) \ + dev_err(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg) + +#define pch_pci_err(pdev, fmt, arg...) \ + dev_err(&pdev->dev, "%s :" fmt, __func__, ##arg) + +#define pch_pci_dbg(pdev, fmt, arg...) \ + dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg) + +/** + * struct i2c_algo_pch_data - for I2C driver functionalities + * @pch_adapter: stores the reference to i2c_adapter structure + * @p_adapter_info: stores the reference to adapter_info structure + * @pch_base_address: specifies the remapped base address + * @pch_buff_mode_en: specifies if buffer mode is enabled + * @pch_event_flag: specifies occurrence of interrupt events + * @pch_i2c_xfer_in_progress: specifies whether the transfer is completed + */ +struct i2c_algo_pch_data { + struct i2c_adapter pch_adapter; + struct adapter_info *p_adapter_info; + void __iomem *pch_base_address; + int pch_buff_mode_en; + u32 pch_event_flag; + bool pch_i2c_xfer_in_progress; +}; + +/** + * struct adapter_info - This structure holds the adapter information for the + PCH i2c controller + * @pch_data: stores a list of i2c_algo_pch_data + * @pch_i2c_suspended: specifies whether the system is suspended or not + * perhaps with more lines and words. + * + * pch_data has as many elements as maximum I2C channels + */ +struct adapter_info { + struct i2c_algo_pch_data pch_data; + bool pch_i2c_suspended; +}; + + +static int pch_i2c_speed = 100; /* I2C bus speed in Kbps */ +static int pch_clk = 50000; /* specifies I2C clock speed in KHz */ +static wait_queue_head_t pch_event; +static DEFINE_MUTEX(pch_mutex); + +static struct pci_device_id __devinitdata pch_pcidev_id[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)}, + {0,} +}; + +static irqreturn_t pch_i2c_handler(int irq, void *pData); + +static inline void pch_setbit(void __iomem *addr, u32 offset, u32 bitmask) +{ + u32 val; + val = ioread32(addr + offset); + val |= bitmask; + iowrite32(val, addr + offset); +} + +static inline void pch_clrbit(void __iomem *addr, u32 offset, u32 bitmask) +{ + u32 val; + val = ioread32(addr + offset); + val &= (~bitmask); + iowrite32(val, addr + offset); +} + +/** + * pch_i2c_init() - hardware initialization of I2C module + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static void pch_i2c_init(struct i2c_algo_pch_data *adap) +{ + void __iomem *p = adap->pch_base_address; + u32 pch_i2cbc; + u32 pch_i2ctmr; + u32 reg_value; + + /* reset I2C controller */ + iowrite32(0x01, p + PCH_I2CSRST); + msleep(20); + iowrite32(0x0, p + PCH_I2CSRST); + + /* Initialize I2C registers */ + iowrite32(0x21, p + PCH_I2CNF); + + pch_setbit(adap->pch_base_address, PCH_I2CCTL, + PCH_I2CCTL_I2CMEN); + + if (pch_i2c_speed != 400) + pch_i2c_speed = 100; + + reg_value = PCH_I2CCTL_I2CMEN; + if (pch_i2c_speed == FAST_MODE_CLK) { + reg_value |= FAST_MODE_EN; + pch_dbg(adap, "Fast mode enabled\n"); + } + + if (pch_clk > PCH_MAX_CLK) + pch_clk = 62500; + + pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8; + /* Set transfer speed in I2CBC */ + iowrite32(pch_i2cbc, p + PCH_I2CBC); + + pch_i2ctmr = (pch_clk) / 8; + iowrite32(pch_i2ctmr, p + PCH_I2CTMR); + + reg_value |= NORMAL_INTR_ENBL; /* Enable interrupts in normal mode */ + iowrite32(reg_value, p + PCH_I2CCTL); + + pch_dbg(adap, + "I2CCTL=%x pch_i2cbc=%x pch_i2ctmr=%x Enable interrupts\n", + ioread32(p + PCH_I2CCTL), pch_i2cbc, pch_i2ctmr); + + init_waitqueue_head(&pch_event); +} + +static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2) +{ + return cmp1.tv64 < cmp2.tv64; +} + +/** + * pch_i2c_wait_for_bus_idle() - check the status of bus. + * @adap: Pointer to struct i2c_algo_pch_data. + * @timeout: waiting time counter (us). + */ +static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap, + s32 timeout) +{ + void __iomem *p = adap->pch_base_address; + + /* MAX timeout value is timeout*1000*1000nsec */ + ktime_t ns_val = ktime_add_ns(ktime_get(), timeout*1000*1000); + do { + if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0) + break; + msleep(20); + } while (ktime_lt(ktime_get(), ns_val)); + + pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR)); + + if (timeout == 0) { + pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME); + return -ETIME; + } + + return 0; +} + +/** + * pch_i2c_start() - Generate I2C start condition in normal mode. + * @adap: Pointer to struct i2c_algo_pch_data. + * + * Generate I2C start condition in normal mode by setting I2CCTL.I2CMSTA to 1. + */ +static void pch_i2c_start(struct i2c_algo_pch_data *adap) +{ + void __iomem *p = adap->pch_base_address; + pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); + pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_START); +} + +/** + * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap) +{ + s32 ret; + ret = wait_event_timeout(pch_event, + (adap->pch_event_flag != 0), msecs_to_jiffies(50)); + if (ret < 0) { + pch_err(adap, "timeout: %x\n", adap->pch_event_flag); + return ret; + } + + if (ret == 0) { + pch_err(adap, "timeout: %x\n", adap->pch_event_flag); + return -ETIMEDOUT; + } + + if (adap->pch_event_flag & I2C_ERROR_MASK) { + pch_err(adap, "error bits set: %x\n", adap->pch_event_flag); + return -EIO; + } + + adap->pch_event_flag = 0; + + return 0; +} + +/** + * pch_i2c_getack() - to confirm ACK/NACK + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static s32 pch_i2c_getack(struct i2c_algo_pch_data *adap) +{ + u32 reg_val; + void __iomem *p = adap->pch_base_address; + reg_val = ioread32(p + PCH_I2CSR) & PCH_GETACK; + + if (reg_val != 0) { + pch_err(adap, "return%d\n", -EPROTO); + return -EPROTO; + } + + return 0; +} + +/** + * pch_i2c_stop() - generate stop condition in normal mode. + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static void pch_i2c_stop(struct i2c_algo_pch_data *adap) +{ + void __iomem *p = adap->pch_base_address; + pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); + /* clear the start bit */ + pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START); +} + +/** + * pch_i2c_repstart() - generate repeated start condition in normal mode + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static void pch_i2c_repstart(struct i2c_algo_pch_data *adap) +{ + void __iomem *p = adap->pch_base_address; + pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); + pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_REPSTART); +} + +/** + * pch_i2c_writebytes() - write data to I2C bus in normal mode + * @i2c_adap: Pointer to the struct i2c_adapter. + * @last: specifies whether last message or not. + * In the case of compound mode it will be 1 for last message, + * otherwise 0. + * @first: specifies whether first message or not. + * 1 for first message otherwise 0. + */ +static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, u32 last, u32 first) +{ + struct i2c_algo_pch_data *adap = i2c_adap->algo_data; + u8 *buf; + u32 length; + u32 addr; + u32 addr_2_msb; + u32 addr_8_lsb; + s32 wrcount; + void __iomem *p = adap->pch_base_address; + + length = msgs->len; + buf = msgs->buf; + addr = msgs->addr; + + /* enable master tx */ + pch_setbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE); + + pch_dbg(adap, "I2CCTL = %x msgs->len = %d\n", ioread32(p + PCH_I2CCTL), + length); + + if (first) { + if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME) + return -ETIME; + } + + if (msgs->flags & I2C_M_TEN) { + addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7); + iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR); + if (first) + pch_i2c_start(adap); + if (pch_i2c_wait_for_xfer_complete(adap) == 0 && + pch_i2c_getack(adap) == 0) { + addr_8_lsb = (addr & I2C_ADDR_MSK); + iowrite32(addr_8_lsb, p + PCH_I2CDR); + } else { + pch_i2c_stop(adap); + return -ETIME; + } + } else { + /* set 7 bit slave address and R/W bit as 0 */ + iowrite32(addr << 1, p + PCH_I2CDR); + if (first) + pch_i2c_start(adap); + } + + if ((pch_i2c_wait_for_xfer_complete(adap) == 0) && + (pch_i2c_getack(adap) == 0)) { + for (wrcount = 0; wrcount < length; ++wrcount) { + /* write buffer value to I2C data register */ + iowrite32(buf[wrcount], p + PCH_I2CDR); + pch_dbg(adap, "writing %x to Data register\n", + buf[wrcount]); + + if (pch_i2c_wait_for_xfer_complete(adap) != 0) + return -ETIME; + + if (pch_i2c_getack(adap)) + return -EIO; + } + + /* check if this is the last message */ + if (last) + pch_i2c_stop(adap); + else + pch_i2c_repstart(adap); + } else { + pch_i2c_stop(adap); + return -EIO; + } + + pch_dbg(adap, "return=%d\n", wrcount); + + return wrcount; +} + +/** + * pch_i2c_sendack() - send ACK + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static void pch_i2c_sendack(struct i2c_algo_pch_data *adap) +{ + void __iomem *p = adap->pch_base_address; + pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); + pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK); +} + +/** + * pch_i2c_sendnack() - send NACK + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap) +{ + void __iomem *p = adap->pch_base_address; + pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); + pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK); +} + +/** + * pch_i2c_readbytes() - read data from I2C bus in normal mode. + * @i2c_adap: Pointer to the struct i2c_adapter. + * @msgs: Pointer to i2c_msg structure. + * @last: specifies whether last message or not. + * @first: specifies whether first message or not. + */ +s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, + u32 last, u32 first) +{ + struct i2c_algo_pch_data *adap = i2c_adap->algo_data; + + u8 *buf; + u32 count; + u32 length; + u32 addr; + u32 addr_2_msb; + void __iomem *p = adap->pch_base_address; + + length = msgs->len; + buf = msgs->buf; + addr = msgs->addr; + + /* enable master reception */ + pch_clrbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE); + + if (first) { + if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME) + return -ETIME; + } + + if (msgs->flags & I2C_M_TEN) { + addr_2_msb = (((addr & I2C_MSB_2B_MSK) >> 7) | (I2C_RD)); + iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR); + + } else { + /* 7 address bits + R/W bit */ + addr = (((addr) << 1) | (I2C_RD)); + iowrite32(addr, p + PCH_I2CDR); + } + + /* check if it is the first message */ + if (first) + pch_i2c_start(adap); + + if ((pch_i2c_wait_for_xfer_complete(adap) == 0) && + (pch_i2c_getack(adap) == 0)) { + pch_dbg(adap, "return %d\n", 0); + + if (length == 0) { + pch_i2c_stop(adap); + ioread32(p + PCH_I2CDR); /* Dummy read needs */ + + count = length; + } else { + int read_index; + int loop; + pch_i2c_sendack(adap); + + /* Dummy read */ + for (loop = 1, read_index = 0; loop < length; loop++) { + buf[read_index] = ioread32(p + PCH_I2CDR); + + if (loop != 1) + read_index++; + + if (pch_i2c_wait_for_xfer_complete(adap) != 0) { + pch_i2c_stop(adap); + return -ETIME; + } + } /* end for */ + + pch_i2c_sendnack(adap); + + buf[read_index] = ioread32(p + PCH_I2CDR); + + if (length != 1) + read_index++; + + if (pch_i2c_wait_for_xfer_complete(adap) == 0) { + if (last) + pch_i2c_stop(adap); + else + pch_i2c_repstart(adap); + + buf[read_index++] = ioread32(p + PCH_I2CDR); + count = read_index; + } else { + count = -ETIME; + } + + } + } else { + count = -ETIME; + pch_i2c_stop(adap); + } + + return count; +} + +/** + * pch_i2c_cb_ch0() - Interrupt handler Call back function + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap) +{ + u32 sts; + void __iomem *p = adap->pch_base_address; + + sts = ioread32(p + PCH_I2CSR); + sts &= (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT); + if (sts & I2CMAL_BIT) + adap->pch_event_flag |= I2CMAL_EVENT; + + if (sts & I2CMCF_BIT) + adap->pch_event_flag |= I2CMCF_EVENT; + + /* clear the applicable bits */ + pch_clrbit(adap->pch_base_address, PCH_I2CSR, sts); + + pch_dbg(adap, "PCH_I2CSR = %x\n", ioread32(p + PCH_I2CSR)); + + wake_up(&pch_event); +} + +/** + * pch_i2c_handler() - interrupt handler for the PCH I2C controller + * @irq: irq number. + * @pData: cookie passed back to the handler function. + */ +static irqreturn_t pch_i2c_handler(int irq, void *pData) +{ + s32 reg_val; + + struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData; + void __iomem *p = adap_data->pch_base_address; + u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE); + + if (mode != NORMAL_MODE) { + pch_err(adap_data, "I2C mode is not supported\n"); + return IRQ_NONE; + } + + reg_val = ioread32(p + PCH_I2CSR); + if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT)) + pch_i2c_cb_ch0(adap_data); + else + return IRQ_NONE; + + return IRQ_HANDLED; +} + +/** + * pch_i2c_xfer() - Reading adnd writing data through I2C bus + * @i2c_adap: Pointer to the struct i2c_adapter. + * @msgs: Pointer to i2c_msg structure. + * @num: number of messages. + */ +static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, s32 num) +{ + struct i2c_msg *pmsg; + u32 i = 0; + u32 status; + u32 msglen; + u32 subaddrlen; + s32 ret; + + struct i2c_algo_pch_data *adap = i2c_adap->algo_data; + + ret = mutex_lock_interruptible(&pch_mutex); + if (ret) + return -ERESTARTSYS; + + if (adap->p_adapter_info->pch_i2c_suspended) { + mutex_unlock(&pch_mutex); + return -EBUSY; + } + + pch_dbg(adap, "adap->p_adapter_info->pch_i2c_suspended is %d\n", + adap->p_adapter_info->pch_i2c_suspended); + /* transfer not completed */ + adap->pch_i2c_xfer_in_progress = true; + + pmsg = &msgs[0]; + pmsg->flags |= adap->pch_buff_mode_en; + status = pmsg->flags; + pch_dbg(adap, + "After invoking I2C_MODE_SEL :flag= 0x%x\n", status); + /* calculate sub address length and message length */ + /* these are applicable only for buffer mode */ + subaddrlen = pmsg->buf[0]; + /* calculate actual message length excluding + * the sub address fields */ + msglen = (pmsg->len) - (subaddrlen + 1); + if (status & (I2C_M_RD)) { + pch_dbg(adap, "invoking pch_i2c_readbytes\n"); + ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num), + (i == 0)); + } else { + pch_dbg(adap, "invoking pch_i2c_writebytes\n"); + ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num), + (i == 0)); + } + + adap->pch_i2c_xfer_in_progress = false; /* transfer completed */ + + mutex_unlock(&pch_mutex); + + return ret; +} + +/** + * pch_i2c_func() - return the functionality of the I2C driver + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static u32 pch_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; +} + +static struct i2c_algorithm pch_algorithm = { + .master_xfer = pch_i2c_xfer, + .functionality = pch_i2c_func +}; + +/** + * pch_i2c_disbl_int() - Disable PCH I2C interrupts + * @adap: Pointer to struct i2c_algo_pch_data. + */ +static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap) +{ + void __iomem *p = adap->pch_base_address; + + pch_clrbit(adap->pch_base_address, PCH_I2CCTL, NORMAL_INTR_ENBL); + + iowrite32(EEPROM_RST_INTR_DISBL, p + PCH_I2CESRMSK); + + iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK); +} + +static int __devinit pch_i2c_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + void __iomem *base_addr; + s32 ret; + struct adapter_info *adap_info; + + pch_pci_dbg(pdev, "Entered.\n"); + + adap_info = kzalloc((sizeof(struct adapter_info)), GFP_KERNEL); + if (adap_info == NULL) { + pch_pci_err(pdev, "Memory allocation FAILED\n"); + return -ENOMEM; + } + + ret = pci_enable_device(pdev); + if (ret) { + pch_pci_err(pdev, "pci_enable_device FAILED\n"); + goto err_pci_enable; + } + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + pch_pci_err(pdev, "pci_request_regions FAILED\n"); + goto err_pci_req; + } + + base_addr = pci_iomap(pdev, 1, 0); + + if (base_addr == NULL) { + pch_pci_err(pdev, "pci_iomap FAILED\n"); + ret = -ENOMEM; + goto err_pci_iomap; + } + + adap_info->pch_i2c_suspended = false; + + adap_info->pch_data.p_adapter_info = adap_info; + + adap_info->pch_data.pch_adapter.owner = THIS_MODULE; + adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON; + strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME); + adap_info->pch_data.pch_adapter.algo = &pch_algorithm; + adap_info->pch_data.pch_adapter.algo_data = + &adap_info->pch_data; + + /* (i * 0x80) + base_addr; */ + adap_info->pch_data.pch_base_address = base_addr; + + adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev; + + ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter)); + + if (ret) { + pch_pci_err(pdev, "i2c_add_adapter FAILED\n"); + goto err_i2c_add_adapter; + } + + pch_i2c_init(&adap_info->pch_data); + ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, + KBUILD_MODNAME, &adap_info->pch_data); + if (ret) { + pch_pci_err(pdev, "request_irq FAILED\n"); + goto err_request_irq; + } + + pci_set_drvdata(pdev, adap_info); + pch_pci_dbg(pdev, "returns %d.\n", ret); + return 0; + +err_request_irq: + i2c_del_adapter(&(adap_info->pch_data.pch_adapter)); +err_i2c_add_adapter: + pci_iounmap(pdev, base_addr); +err_pci_iomap: + pci_release_regions(pdev); +err_pci_req: + pci_disable_device(pdev); +err_pci_enable: + kfree(adap_info); + return ret; +} + +static void __devexit pch_i2c_remove(struct pci_dev *pdev) +{ + struct adapter_info *adap_info = pci_get_drvdata(pdev); + + pch_i2c_disbl_int(&adap_info->pch_data); + free_irq(pdev->irq, &adap_info->pch_data); + i2c_del_adapter(&(adap_info->pch_data.pch_adapter)); + + if (adap_info->pch_data.pch_base_address) { + pci_iounmap(pdev, adap_info->pch_data.pch_base_address); + adap_info->pch_data.pch_base_address = 0; + } + + pci_set_drvdata(pdev, NULL); + + pci_release_regions(pdev); + + pci_disable_device(pdev); + kfree(adap_info); +} + +#ifdef CONFIG_PM +static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int ret; + struct adapter_info *adap_info = pci_get_drvdata(pdev); + void __iomem *p = adap_info->pch_data.pch_base_address; + + adap_info->pch_i2c_suspended = true; + + while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) { + /* Wait until all channel transfers are completed */ + msleep(20); + } + /* Disable the i2c interrupts */ + pch_i2c_disbl_int(&adap_info->pch_data); + + pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x " + "invoked function pch_i2c_disbl_int successfully\n", + ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA), + ioread32(p + PCH_I2CESRSTA)); + + ret = pci_save_state(pdev); + + if (ret) { + pch_pci_err(pdev, "pci_save_state\n"); + return ret; + } + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int pch_i2c_resume(struct pci_dev *pdev) +{ + struct adapter_info *adap_info = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (pci_enable_device(pdev) < 0) { + pch_pci_err(pdev, "pch_i2c_resume:pci_enable_device FAILED\n"); + return -EIO; + } + + pci_enable_wake(pdev, PCI_D3hot, 0); + + pch_i2c_init(&adap_info->pch_data); + + adap_info->pch_i2c_suspended = false; + + return 0; +} +#else +#define pch_i2c_suspend NULL +#define pch_i2c_resume NULL +#endif + +static struct pci_driver pch_pcidriver = { + .name = KBUILD_MODNAME, + .id_table = pch_pcidev_id, + .probe = pch_i2c_probe, + .remove = __devexit_p(pch_i2c_remove), + .suspend = pch_i2c_suspend, + .resume = pch_i2c_resume +}; + +static int __init pch_pci_init(void) +{ + return pci_register_driver(&pch_pcidriver); +} +module_init(pch_pci_init); + +static void __exit pch_pci_exit(void) +{ + pci_unregister_driver(&pch_pcidriver); +} +module_exit(pch_pci_exit); + +MODULE_DESCRIPTION("PCH I2C PCI Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tomoya MORINAGA. "); +module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); +module_param(pch_clk, int, (S_IRUSR | S_IWUSR)); diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 112c61f7b8cd..f09c9319a2ba 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -409,7 +409,7 @@ iop3xx_i2c_remove(struct platform_device *pdev) IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE); __raw_writel(cr, adapter_data->ioaddr + CR_OFFSET); - iounmap((void __iomem*)adapter_data->ioaddr); + iounmap(adapter_data->ioaddr); release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); kfree(adapter_data); kfree(padapter); @@ -453,7 +453,7 @@ iop3xx_i2c_probe(struct platform_device *pdev) /* set the adapter enumeration # */ adapter_data->id = i2c_id++; - adapter_data->ioaddr = (u32)ioremap(res->start, IOP3XX_I2C_IO_SIZE); + adapter_data->ioaddr = ioremap(res->start, IOP3XX_I2C_IO_SIZE); if (!adapter_data->ioaddr) { ret = -ENOMEM; goto release_region; @@ -498,7 +498,7 @@ iop3xx_i2c_probe(struct platform_device *pdev) return 0; unmap: - iounmap((void __iomem*)adapter_data->ioaddr); + iounmap(adapter_data->ioaddr); release_region: release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); diff --git a/drivers/i2c/busses/i2c-iop3xx.h b/drivers/i2c/busses/i2c-iop3xx.h index 8485861f6a36..097e270955d0 100644 --- a/drivers/i2c/busses/i2c-iop3xx.h +++ b/drivers/i2c/busses/i2c-iop3xx.h @@ -97,7 +97,7 @@ #define IOP3XX_I2C_IO_SIZE 0x18 struct i2c_algo_iop3xx_data { - u32 ioaddr; + void __iomem *ioaddr; wait_queue_head_t waitq; spinlock_t lock; u32 SR_enabled, SR_received; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 16242063144f..a9941c65f226 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -59,6 +59,7 @@ enum { MV64XXX_I2C_STATE_INVALID, MV64XXX_I2C_STATE_IDLE, MV64XXX_I2C_STATE_WAITING_FOR_START_COND, + MV64XXX_I2C_STATE_WAITING_FOR_RESTART, MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK, MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK, MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK, @@ -70,6 +71,7 @@ enum { MV64XXX_I2C_ACTION_INVALID, MV64XXX_I2C_ACTION_CONTINUE, MV64XXX_I2C_ACTION_SEND_START, + MV64XXX_I2C_ACTION_SEND_RESTART, MV64XXX_I2C_ACTION_SEND_ADDR_1, MV64XXX_I2C_ACTION_SEND_ADDR_2, MV64XXX_I2C_ACTION_SEND_DATA, @@ -91,6 +93,7 @@ struct mv64xxx_i2c_data { u32 addr2; u32 bytes_left; u32 byte_posn; + u32 send_stop; u32 block; int rc; u32 freq_m; @@ -159,8 +162,15 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status) if ((drv_data->bytes_left == 0) || (drv_data->aborting && (drv_data->byte_posn != 0))) { - drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; - drv_data->state = MV64XXX_I2C_STATE_IDLE; + if (drv_data->send_stop) { + drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; + drv_data->state = MV64XXX_I2C_STATE_IDLE; + } else { + drv_data->action = + MV64XXX_I2C_ACTION_SEND_RESTART; + drv_data->state = + MV64XXX_I2C_STATE_WAITING_FOR_RESTART; + } } else { drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA; drv_data->state = @@ -228,6 +238,15 @@ static void mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) { switch(drv_data->action) { + case MV64XXX_I2C_ACTION_SEND_RESTART: + drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; + drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN; + writel(drv_data->cntl_bits, + drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); + drv_data->block = 0; + wake_up_interruptible(&drv_data->waitq); + break; + case MV64XXX_I2C_ACTION_CONTINUE: writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); @@ -386,7 +405,8 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data) } static int -mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg) +mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg, + int is_first, int is_last) { unsigned long flags; @@ -406,10 +426,18 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg) drv_data->bytes_left--; } } else { - drv_data->action = MV64XXX_I2C_ACTION_SEND_START; - drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; + if (is_first) { + drv_data->action = MV64XXX_I2C_ACTION_SEND_START; + drv_data->state = + MV64XXX_I2C_STATE_WAITING_FOR_START_COND; + } else { + drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_1; + drv_data->state = + MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK; + } } + drv_data->send_stop = is_last; drv_data->block = 1; mv64xxx_i2c_do_action(drv_data); spin_unlock_irqrestore(&drv_data->lock, flags); @@ -437,9 +465,12 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap); int i, rc; - for (i=0; ipdev->dev, "controller timed out, re-init h/w\n"); (void) init_hw(dev); status = -ETIMEDOUT; @@ -498,7 +498,7 @@ static int write_i2c(struct nmk_i2c_dev *dev) } if (timeout == 0) { - /* controler has timedout, re-init the h/w */ + /* controller has timedout, re-init the h/w */ dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n"); (void) init_hw(dev); status = -ETIMEDOUT; @@ -872,6 +872,8 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->algo = &nmk_i2c_algo; + snprintf(adap->name, sizeof(adap->name), + "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start); /* fetch the controller id */ adap->nr = pdev->id; @@ -891,8 +893,8 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) goto err_init_hw; } - dev_dbg(&pdev->dev, "initialize I2C%d bus on virtual " - "base %p\n", pdev->id, dev->virtbase); + dev_info(&pdev->dev, "initialize %s on virtual " + "base %p\n", adap->name, dev->virtbase); ret = i2c_add_numbered_adapter(adap); if (ret) { diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 0070371b29f3..ef3bcb1ce864 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -9,6 +9,41 @@ * kind, whether express or implied. */ +/* + * Device tree configuration: + * + * Required properties: + * - compatible : "opencores,i2c-ocores" + * - reg : bus address start and address range size of device + * - interrupts : interrupt number + * - regstep : size of device registers in bytes + * - clock-frequency : frequency of bus clock in Hz + * + * Example: + * + * i2c0: ocores@a0000000 { + * compatible = "opencores,i2c-ocores"; + * reg = <0xa0000000 0x8>; + * interrupts = <10>; + * + * regstep = <1>; + * clock-frequency = <20000000>; + * + * -- Devices connected on this I2C bus get + * -- defined here; address- and size-cells + * -- apply to these child devices + * + * #address-cells = <1>; + * #size-cells = <0>; + * + * dummy@60 { + * compatible = "dummy"; + * reg = <60>; + * }; + * }; + * + */ + #include #include #include @@ -210,6 +245,32 @@ static struct i2c_adapter ocores_adapter = { .algo = &ocores_algorithm, }; +#ifdef CONFIG_OF +static int ocores_i2c_of_probe(struct platform_device* pdev, + struct ocores_i2c* i2c) +{ + __be32* val; + + val = of_get_property(pdev->dev.of_node, "regstep", NULL); + if (!val) { + dev_err(&pdev->dev, "Missing required parameter 'regstep'"); + return -ENODEV; + } + i2c->regstep = be32_to_cpup(val); + + val = of_get_property(pdev->dev.of_node, "clock-frequency", NULL); + if (!val) { + dev_err(&pdev->dev, + "Missing required parameter 'clock-frequency'"); + return -ENODEV; + } + i2c->clock_khz = be32_to_cpup(val) / 1000; + + return 0; +} +#else +#define ocores_i2c_of_probe(pdev,i2c) -ENODEV +#endif static int __devinit ocores_i2c_probe(struct platform_device *pdev) { @@ -227,37 +288,41 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) if (!res2) return -ENODEV; - pdata = (struct ocores_i2c_platform_data*) pdev->dev.platform_data; - if (!pdata) - return -ENODEV; - - i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; - if (!request_mem_region(res->start, resource_size(res), - pdev->name)) { + if (!devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), pdev->name)) { dev_err(&pdev->dev, "Memory region busy\n"); - ret = -EBUSY; - goto request_mem_failed; + return -EBUSY; } - i2c->base = ioremap(res->start, resource_size(res)); + i2c->base = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); if (!i2c->base) { dev_err(&pdev->dev, "Unable to map registers\n"); - ret = -EIO; - goto map_failed; + return -EIO; + } + + pdata = pdev->dev.platform_data; + if (pdata) { + i2c->regstep = pdata->regstep; + i2c->clock_khz = pdata->clock_khz; + } else { + ret = ocores_i2c_of_probe(pdev, i2c); + if (ret) + return ret; } - i2c->regstep = pdata->regstep; - i2c->clock_khz = pdata->clock_khz; ocores_init(i2c); init_waitqueue_head(&i2c->wait); - ret = request_irq(res2->start, ocores_isr, 0, pdev->name, i2c); + ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0, + pdev->name, i2c); if (ret) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); - goto request_irq_failed; + return ret; } /* hook up driver to tree */ @@ -265,36 +330,29 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) i2c->adap = ocores_adapter; i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; +#ifdef CONFIG_OF + i2c->adap.dev.of_node = pdev->dev.of_node; +#endif /* add i2c adapter to i2c tree */ ret = i2c_add_adapter(&i2c->adap); if (ret) { dev_err(&pdev->dev, "Failed to add adapter\n"); - goto add_adapter_failed; + return ret; } /* add in known devices to the bus */ - for (i = 0; i < pdata->num_devices; i++) - i2c_new_device(&i2c->adap, pdata->devices + i); + if (pdata) { + for (i = 0; i < pdata->num_devices; i++) + i2c_new_device(&i2c->adap, pdata->devices + i); + } return 0; - -add_adapter_failed: - free_irq(res2->start, i2c); -request_irq_failed: - iounmap(i2c->base); -map_failed: - release_mem_region(res->start, resource_size(res)); -request_mem_failed: - kfree(i2c); - - return ret; } static int __devexit ocores_i2c_remove(struct platform_device* pdev) { struct ocores_i2c *i2c = platform_get_drvdata(pdev); - struct resource *res; /* disable i2c logic */ oc_setreg(i2c, OCI2C_CONTROL, oc_getreg(i2c, OCI2C_CONTROL) @@ -304,18 +362,6 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev) i2c_del_adapter(&i2c->adap); platform_set_drvdata(pdev, NULL); - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res) - free_irq(res->start, i2c); - - iounmap(i2c->base); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) - release_mem_region(res->start, resource_size(res)); - - kfree(i2c); - return 0; } @@ -344,6 +390,16 @@ static int ocores_i2c_resume(struct platform_device *pdev) #define ocores_i2c_resume NULL #endif +#ifdef CONFIG_OF +static struct of_device_id ocores_i2c_match[] = { + { + .compatible = "opencores,i2c-ocores", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, ocores_i2c_match); +#endif + /* work with hotplug and coldplug */ MODULE_ALIAS("platform:ocores-i2c"); @@ -355,6 +411,9 @@ static struct platform_driver ocores_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = "ocores-i2c", +#ifdef CONFIG_OF + .of_match_table = ocores_i2c_match, +#endif }, }; diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 9d090833e245..b605ff3a1fa0 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -598,12 +598,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, * REVISIT: We should abort the transfer on signals, but the bus goes * into arbitration and we're currently unable to recover from it. */ - if (dev->set_mpu_wkup_lat != NULL) - dev->set_mpu_wkup_lat(dev->dev, dev->latency); r = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); - if (dev->set_mpu_wkup_lat != NULL) - dev->set_mpu_wkup_lat(dev->dev, -1); dev->buf_len = 0; if (r < 0) return r; @@ -654,12 +650,18 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (r < 0) goto out; + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, dev->latency); + for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); if (r != 0) break; } + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, -1); + if (r == 0) r = num; diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index 53fab518b3da..986e5f62debe 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ MODULE_AUTHOR("Christer Weinigel "); MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver"); +MODULE_ALIAS("platform:cs5535-smb"); MODULE_LICENSE("GPL"); #define MAX_DEVICES 4 @@ -84,10 +86,6 @@ struct scx200_acb_iface { u8 *ptr; char needs_reset; unsigned len; - - /* PCI device info */ - struct pci_dev *pdev; - int bar; }; /* Register Definitions */ @@ -391,7 +389,7 @@ static const struct i2c_algorithm scx200_acb_algorithm = { static struct scx200_acb_iface *scx200_acb_list; static DEFINE_MUTEX(scx200_acb_list_mutex); -static __init int scx200_acb_probe(struct scx200_acb_iface *iface) +static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface) { u8 val; @@ -427,7 +425,7 @@ static __init int scx200_acb_probe(struct scx200_acb_iface *iface) return 0; } -static __init struct scx200_acb_iface *scx200_create_iface(const char *text, +static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text, struct device *dev, int index) { struct scx200_acb_iface *iface; @@ -452,7 +450,7 @@ static __init struct scx200_acb_iface *scx200_create_iface(const char *text, return iface; } -static int __init scx200_acb_create(struct scx200_acb_iface *iface) +static int __devinit scx200_acb_create(struct scx200_acb_iface *iface) { struct i2c_adapter *adapter; int rc; @@ -472,183 +470,145 @@ static int __init scx200_acb_create(struct scx200_acb_iface *iface) return -ENODEV; } - mutex_lock(&scx200_acb_list_mutex); - iface->next = scx200_acb_list; - scx200_acb_list = iface; - mutex_unlock(&scx200_acb_list_mutex); + if (!adapter->dev.parent) { + /* If there's no dev, we're tracking (ISA) ifaces manually */ + mutex_lock(&scx200_acb_list_mutex); + iface->next = scx200_acb_list; + scx200_acb_list = iface; + mutex_unlock(&scx200_acb_list_mutex); + } return 0; } -static __init int scx200_create_pci(const char *text, struct pci_dev *pdev, - int bar) +static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text, + unsigned long base, int index, struct device *dev) { struct scx200_acb_iface *iface; int rc; - iface = scx200_create_iface(text, &pdev->dev, 0); + iface = scx200_create_iface(text, dev, index); if (iface == NULL) - return -ENOMEM; - - iface->pdev = pdev; - iface->bar = bar; - - rc = pci_enable_device_io(iface->pdev); - if (rc) - goto errout_free; + return NULL; - rc = pci_request_region(iface->pdev, iface->bar, iface->adapter.name); - if (rc) { - printk(KERN_ERR NAME ": can't allocate PCI BAR %d\n", - iface->bar); + if (!request_region(base, 8, iface->adapter.name)) { + printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n", + base, base + 8 - 1); goto errout_free; } - iface->base = pci_resource_start(iface->pdev, iface->bar); + iface->base = base; rc = scx200_acb_create(iface); if (rc == 0) - return 0; + return iface; - pci_release_region(iface->pdev, iface->bar); - pci_dev_put(iface->pdev); + release_region(base, 8); errout_free: kfree(iface); - return rc; + return NULL; } -static int __init scx200_create_isa(const char *text, unsigned long base, - int index) +static int __devinit scx200_probe(struct platform_device *pdev) { struct scx200_acb_iface *iface; - int rc; - - iface = scx200_create_iface(text, NULL, index); - - if (iface == NULL) - return -ENOMEM; + struct resource *res; - if (!request_region(base, 8, iface->adapter.name)) { - printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n", - base, base + 8 - 1); - rc = -EBUSY; - goto errout_free; + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(&pdev->dev, "can't fetch device resource info\n"); + return -ENODEV; } - iface->base = base; - rc = scx200_acb_create(iface); + iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev); + if (!iface) + return -EIO; - if (rc == 0) - return 0; + dev_info(&pdev->dev, "SCx200 device '%s' registered\n", + iface->adapter.name); + platform_set_drvdata(pdev, iface); - release_region(base, 8); - errout_free: - kfree(iface); - return rc; + return 0; } -/* Driver data is an index into the scx200_data array that indicates - * the name and the BAR where the I/O address resource is located. ISA - * devices are flagged with a bar value of -1 */ - -static const struct pci_device_id scx200_pci[] __initconst = { - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE), - .driver_data = 0 }, - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE), - .driver_data = 0 }, - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA), - .driver_data = 1 }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA), - .driver_data = 2 }, - { 0, } -}; - -static struct { - const char *name; - int bar; -} scx200_data[] = { - { "SCx200", -1 }, - { "CS5535", 0 }, - { "CS5536", 0 } -}; +static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface) +{ + i2c_del_adapter(&iface->adapter); + release_region(iface->base, 8); + kfree(iface); +} -static __init int scx200_scan_pci(void) +static int __devexit scx200_remove(struct platform_device *pdev) { - int data, dev; - int rc = -ENODEV; - struct pci_dev *pdev; + struct scx200_acb_iface *iface; - for(dev = 0; dev < ARRAY_SIZE(scx200_pci); dev++) { - pdev = pci_get_device(scx200_pci[dev].vendor, - scx200_pci[dev].device, NULL); + iface = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, NULL); + scx200_cleanup_iface(iface); - if (pdev == NULL) - continue; + return 0; +} - data = scx200_pci[dev].driver_data; +static struct platform_driver scx200_pci_drv = { + .driver = { + .name = "cs5535-smb", + .owner = THIS_MODULE, + }, + .probe = scx200_probe, + .remove = __devexit_p(scx200_remove), +}; - /* if .bar is greater or equal to zero, this is a - * PCI device - otherwise, we assume - that the ports are ISA based - */ +static const struct pci_device_id scx200_isa[] __initconst = { + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, + { 0, } +}; - if (scx200_data[data].bar >= 0) - rc = scx200_create_pci(scx200_data[data].name, pdev, - scx200_data[data].bar); - else { - int i; +static __init void scx200_scan_isa(void) +{ + int i; - pci_dev_put(pdev); - for (i = 0; i < MAX_DEVICES; ++i) { - if (base[i] == 0) - continue; + if (!pci_dev_present(scx200_isa)) + return; - rc = scx200_create_isa(scx200_data[data].name, - base[i], - i); - } - } + for (i = 0; i < MAX_DEVICES; ++i) { + if (base[i] == 0) + continue; - break; + /* XXX: should we care about failures? */ + scx200_create_dev("SCx200", base[i], i, NULL); } - - return rc; } static int __init scx200_acb_init(void) { - int rc; - pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n"); - rc = scx200_scan_pci(); + /* First scan for ISA-based devices */ + scx200_scan_isa(); /* XXX: should we care about errors? */ /* If at least one bus was created, init must succeed */ if (scx200_acb_list) return 0; - return rc; + + /* No ISA devices; register the platform driver for PCI-based devices */ + return platform_driver_register(&scx200_pci_drv); } static void __exit scx200_acb_cleanup(void) { struct scx200_acb_iface *iface; + platform_driver_unregister(&scx200_pci_drv); + mutex_lock(&scx200_acb_list_mutex); while ((iface = scx200_acb_list) != NULL) { scx200_acb_list = iface->next; mutex_unlock(&scx200_acb_list_mutex); - i2c_del_adapter(&iface->adapter); - - if (iface->pdev) { - pci_release_region(iface->pdev, iface->bar); - pci_dev_put(iface->pdev); - } - else - release_region(iface->base, 8); + scx200_cleanup_iface(iface); - kfree(iface); mutex_lock(&scx200_acb_list_mutex); } mutex_unlock(&scx200_acb_list_mutex); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index c7db6980e3a3..f0bd5bcdf563 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -196,88 +196,60 @@ static int i2c_device_pm_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm) { - if (pm_runtime_suspended(dev)) - return 0; - else - return pm->suspend ? pm->suspend(dev) : 0; - } - - return i2c_legacy_suspend(dev, PMSG_SUSPEND); + if (pm) + return pm_generic_suspend(dev); + else + return i2c_legacy_suspend(dev, PMSG_SUSPEND); } static int i2c_device_pm_resume(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int ret; if (pm) - ret = pm->resume ? pm->resume(dev) : 0; + return pm_generic_resume(dev); else - ret = i2c_legacy_resume(dev); - - return ret; + return i2c_legacy_resume(dev); } static int i2c_device_pm_freeze(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm) { - if (pm_runtime_suspended(dev)) - return 0; - else - return pm->freeze ? pm->freeze(dev) : 0; - } - - return i2c_legacy_suspend(dev, PMSG_FREEZE); + if (pm) + return pm_generic_freeze(dev); + else + return i2c_legacy_suspend(dev, PMSG_FREEZE); } static int i2c_device_pm_thaw(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm) { - if (pm_runtime_suspended(dev)) - return 0; - else - return pm->thaw ? pm->thaw(dev) : 0; - } - - return i2c_legacy_resume(dev); + if (pm) + return pm_generic_thaw(dev); + else + return i2c_legacy_resume(dev); } static int i2c_device_pm_poweroff(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm) { - if (pm_runtime_suspended(dev)) - return 0; - else - return pm->poweroff ? pm->poweroff(dev) : 0; - } - - return i2c_legacy_suspend(dev, PMSG_HIBERNATE); + if (pm) + return pm_generic_poweroff(dev); + else + return i2c_legacy_suspend(dev, PMSG_HIBERNATE); } static int i2c_device_pm_restore(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int ret; if (pm) - ret = pm->restore ? pm->restore(dev) : 0; + return pm_generic_restore(dev); else - ret = i2c_legacy_resume(dev); - - if (!ret) { - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - } - - return ret; + return i2c_legacy_resume(dev); } #else /* !CONFIG_PM_SLEEP */ #define i2c_device_pm_suspend NULL @@ -1019,6 +991,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver, } static int __unregister_client(struct device *dev, void *dummy) +{ + struct i2c_client *client = i2c_verify_client(dev); + if (client && strcmp(client->name, "dummy")) + i2c_unregister_device(client); + return 0; +} + +static int __unregister_dummy(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); if (client) @@ -1075,8 +1055,12 @@ int i2c_del_adapter(struct i2c_adapter *adap) mutex_unlock(&adap->userspace_clients_lock); /* Detach any active clients. This can't fail, thus we do not - checking the returned value. */ + * check the returned value. This is a two-pass process, because + * we can't remove the dummy devices during the first pass: they + * could have been instantiated by real devices wishing to clean + * them up properly, so we give them a chance to do that first. */ res = device_for_each_child(&adap->dev, NULL, __unregister_client); + res = device_for_each_child(&adap->dev, NULL, __unregister_dummy); #ifdef CONFIG_I2C_COMPAT class_compat_remove_link(i2c_adapter_compat_class, &adap->dev, @@ -1140,6 +1124,14 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) if (res) return res; + /* Drivers should switch to dev_pm_ops instead. */ + if (driver->suspend) + pr_warn("i2c-core: driver [%s] using legacy suspend method\n", + driver->driver.name); + if (driver->resume) + pr_warn("i2c-core: driver [%s] using legacy resume method\n", + driver->driver.name); + pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); INIT_LIST_HEAD(&driver->clients); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 56ac09d6c930..7acb32e7f817 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -59,6 +59,8 @@ #include /* ktime_get_real() */ #include #include +#include +#include #include #define INTEL_IDLE_VERSION "0.4" @@ -73,6 +75,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; static unsigned int mwait_substates; +#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ @@ -81,6 +84,14 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); static struct cpuidle_state *cpuidle_state_table; +/* + * Set this flag for states where the HW flushes the TLB for us + * and so we don't need cross-calls to keep it consistent. + * If this flag is set, SW flushes the TLB, so even if the + * HW doesn't do the flushing, this flag is safe to use. + */ +#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 + /* * States are indexed by the cstate number, * which is also the index into the MWAIT hint array. @@ -122,7 +133,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { .driver_data = (void *) 0x00, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, - .target_residency = 4, + .target_residency = 1, .enter = &intel_idle }, { /* MWAIT C2 */ .name = "SNB-C3", @@ -130,7 +141,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { .driver_data = (void *) 0x10, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, - .target_residency = 160, + .target_residency = 211, .enter = &intel_idle }, { /* MWAIT C3 */ .name = "SNB-C6", @@ -138,7 +149,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { .driver_data = (void *) 0x20, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, - .target_residency = 208, + .target_residency = 345, .enter = &intel_idle }, { /* MWAIT C4 */ .name = "SNB-C7", @@ -146,7 +157,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { .driver_data = (void *) 0x30, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, - .target_residency = 300, + .target_residency = 345, .enter = &intel_idle }, }; @@ -220,8 +231,6 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) kt_before = ktime_get_real(); stop_critical_timings(); - trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu); - trace_cpu_idle((eax >> 4) + 1, cpu); if (!need_resched()) { __monitor((void *)¤t_thread_info()->flags, 0, 0); @@ -243,6 +252,39 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) return usec_delta; } +static void __setup_broadcast_timer(void *arg) +{ + unsigned long reason = (unsigned long)arg; + int cpu = smp_processor_id(); + + reason = reason ? + CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; + + clockevents_notify(reason, &cpu); +} + +static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n, + unsigned long action, void *hcpu) +{ + int hotcpu = (unsigned long)hcpu; + + switch (action & 0xf) { + case CPU_ONLINE: + smp_call_function_single(hotcpu, __setup_broadcast_timer, + (void *)true, 1); + break; + case CPU_DOWN_PREPARE: + smp_call_function_single(hotcpu, __setup_broadcast_timer, + (void *)false, 1); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata setup_broadcast_notifier = { + .notifier_call = setup_broadcast_cpuhp_notify, +}; + /* * intel_idle_probe() */ @@ -305,7 +347,11 @@ static int intel_idle_probe(void) } if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ - lapic_timer_reliable_states = 0xFFFFFFFF; + lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; + else { + smp_call_function(__setup_broadcast_timer, (void *)true, 1); + register_cpu_notifier(&setup_broadcast_notifier); + } pr_debug(PREFIX "v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); @@ -403,6 +449,10 @@ static int __init intel_idle_init(void) { int retval; + /* Do not load intel_idle at all for now if idle= is passed */ + if (boot_option_idle_override != IDLE_NO_OVERRIDE) + return -ENODEV; + retval = intel_idle_probe(); if (retval) return retval; @@ -428,6 +478,11 @@ static void __exit intel_idle_exit(void) intel_idle_cpuidle_devices_uninit(); cpuidle_unregister_driver(&intel_idle_driver); + if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { + smp_call_function(__setup_broadcast_timer, (void *)false, 1); + unregister_cpu_notifier(&setup_broadcast_notifier); + } + return; } diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index 4bb997aa39d0..83d2e19d31ae 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -689,7 +689,7 @@ struct t3_swrq { * A T3 WQ implements both the SQ and RQ. */ struct t3_wq { - union t3_wr *queue; /* DMA accessable memory */ + union t3_wr *queue; /* DMA accessible memory */ dma_addr_t dma_addr; /* DMA address for HW */ DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */ u32 error; /* 1 once we go to ERROR */ diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index cc600c2dd0b3..2fe19ec9ba60 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -46,7 +46,6 @@ #include #include #include -#include #include diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index dbbb0e85afe4..abd409d592ef 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -332,7 +332,7 @@ MODULE_PARM_DESC(txselect, \ #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) /* - * Per-context kernel registers. Acess only with qib_read_kreg_ctxt() + * Per-context kernel registers. Access only with qib_read_kreg_ctxt() * or qib_write_kreg_ctxt() */ #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 5b596165b571..56eb471b5576 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -255,6 +255,16 @@ config JOYSTICK_AMIGA To compile this driver as a module, choose M here: the module will be called amijoy. +config JOYSTICK_AS5011 + tristate "Austria Microsystem AS5011 joystick" + depends on I2C + help + Say Y here if you have an AS5011 digital joystick connected to your + system. + + To compile this driver as a module, choose M here: the + module will be called as5011. + config JOYSTICK_JOYDUMP tristate "Gameport data dumper" select GAMEPORT diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile index f3a8cbe2abb6..92dc0de9dfed 100644 --- a/drivers/input/joystick/Makefile +++ b/drivers/input/joystick/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_JOYSTICK_A3D) += a3d.o obj-$(CONFIG_JOYSTICK_ADI) += adi.o obj-$(CONFIG_JOYSTICK_AMIGA) += amijoy.o +obj-$(CONFIG_JOYSTICK_AS5011) += as5011.o obj-$(CONFIG_JOYSTICK_ANALOG) += analog.o obj-$(CONFIG_JOYSTICK_COBRA) += cobra.o obj-$(CONFIG_JOYSTICK_DB9) += db9.o diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c new file mode 100644 index 000000000000..f6732b57ca07 --- /dev/null +++ b/drivers/input/joystick/as5011.c @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2010, 2011 Fabien Marteau + * Sponsored by ARMadeus Systems + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Driver for Austria Microsystems joysticks AS5011 + * + * TODO: + * - Power on the chip when open() and power down when close() + * - Manage power mode + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick" +#define MODULE_DEVICE_ALIAS "as5011" + +MODULE_AUTHOR("Fabien Marteau "); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); + +/* registers */ +#define AS5011_CTRL1 0x76 +#define AS5011_CTRL2 0x75 +#define AS5011_XP 0x43 +#define AS5011_XN 0x44 +#define AS5011_YP 0x53 +#define AS5011_YN 0x54 +#define AS5011_X_REG 0x41 +#define AS5011_Y_REG 0x42 +#define AS5011_X_RES_INT 0x51 +#define AS5011_Y_RES_INT 0x52 + +/* CTRL1 bits */ +#define AS5011_CTRL1_LP_PULSED 0x80 +#define AS5011_CTRL1_LP_ACTIVE 0x40 +#define AS5011_CTRL1_LP_CONTINUE 0x20 +#define AS5011_CTRL1_INT_WUP_EN 0x10 +#define AS5011_CTRL1_INT_ACT_EN 0x08 +#define AS5011_CTRL1_EXT_CLK_EN 0x04 +#define AS5011_CTRL1_SOFT_RST 0x02 +#define AS5011_CTRL1_DATA_VALID 0x01 + +/* CTRL2 bits */ +#define AS5011_CTRL2_EXT_SAMPLE_EN 0x08 +#define AS5011_CTRL2_RC_BIAS_ON 0x04 +#define AS5011_CTRL2_INV_SPINNING 0x02 + +#define AS5011_MAX_AXIS 80 +#define AS5011_MIN_AXIS (-80) +#define AS5011_FUZZ 8 +#define AS5011_FLAT 40 + +struct as5011_device { + struct input_dev *input_dev; + struct i2c_client *i2c_client; + unsigned int button_gpio; + unsigned int button_irq; + unsigned int axis_irq; +}; + +static int as5011_i2c_write(struct i2c_client *client, + uint8_t aregaddr, + uint8_t avalue) +{ + uint8_t data[2] = { aregaddr, avalue }; + struct i2c_msg msg = { + client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data + }; + int error; + + error = i2c_transfer(client->adapter, &msg, 1); + return error < 0 ? error : 0; +} + +static int as5011_i2c_read(struct i2c_client *client, + uint8_t aregaddr, signed char *value) +{ + uint8_t data[2] = { aregaddr }; + struct i2c_msg msg_set[2] = { + { client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data }, + { client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data } + }; + int error; + + error = i2c_transfer(client->adapter, msg_set, 2); + if (error < 0) + return error; + + *value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0]; + return 0; +} + +static irqreturn_t as5011_button_interrupt(int irq, void *dev_id) +{ + struct as5011_device *as5011 = dev_id; + int val = gpio_get_value_cansleep(as5011->button_gpio); + + input_report_key(as5011->input_dev, BTN_JOYSTICK, !val); + input_sync(as5011->input_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id) +{ + struct as5011_device *as5011 = dev_id; + int error; + signed char x, y; + + error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x); + if (error < 0) + goto out; + + error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y); + if (error < 0) + goto out; + + input_report_abs(as5011->input_dev, ABS_X, x); + input_report_abs(as5011->input_dev, ABS_Y, y); + input_sync(as5011->input_dev); + +out: + return IRQ_HANDLED; +} + +static int __devinit as5011_configure_chip(struct as5011_device *as5011, + const struct as5011_platform_data *plat_dat) +{ + struct i2c_client *client = as5011->i2c_client; + int error; + signed char value; + + /* chip soft reset */ + error = as5011_i2c_write(client, AS5011_CTRL1, + AS5011_CTRL1_SOFT_RST); + if (error < 0) { + dev_err(&client->dev, "Soft reset failed\n"); + return error; + } + + mdelay(10); + + error = as5011_i2c_write(client, AS5011_CTRL1, + AS5011_CTRL1_LP_PULSED | + AS5011_CTRL1_LP_ACTIVE | + AS5011_CTRL1_INT_ACT_EN); + if (error < 0) { + dev_err(&client->dev, "Power config failed\n"); + return error; + } + + error = as5011_i2c_write(client, AS5011_CTRL2, + AS5011_CTRL2_INV_SPINNING); + if (error < 0) { + dev_err(&client->dev, "Can't invert spinning\n"); + return error; + } + + /* write threshold */ + error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp); + if (error < 0) { + dev_err(&client->dev, "Can't write threshold\n"); + return error; + } + + error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn); + if (error < 0) { + dev_err(&client->dev, "Can't write threshold\n"); + return error; + } + + error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp); + if (error < 0) { + dev_err(&client->dev, "Can't write threshold\n"); + return error; + } + + error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn); + if (error < 0) { + dev_err(&client->dev, "Can't write threshold\n"); + return error; + } + + /* to free irq gpio in chip */ + error = as5011_i2c_read(client, AS5011_X_RES_INT, &value); + if (error < 0) { + dev_err(&client->dev, "Can't read i2c X resolution value\n"); + return error; + } + + return 0; +} + +static int __devinit as5011_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + const struct as5011_platform_data *plat_data; + struct as5011_device *as5011; + struct input_dev *input_dev; + int irq; + int error; + + plat_data = client->dev.platform_data; + if (!plat_data) + return -EINVAL; + + if (!plat_data->axis_irq) { + dev_err(&client->dev, "No axis IRQ?\n"); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_PROTOCOL_MANGLING)) { + dev_err(&client->dev, + "need i2c bus that supports protocol mangling\n"); + return -ENODEV; + } + + as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!as5011 || !input_dev) { + dev_err(&client->dev, + "Can't allocate memory for device structure\n"); + error = -ENOMEM; + goto err_free_mem; + } + + as5011->i2c_client = client; + as5011->input_dev = input_dev; + as5011->button_gpio = plat_data->button_gpio; + as5011->axis_irq = plat_data->axis_irq; + + input_dev->name = "Austria Microsystem as5011 joystick"; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = &client->dev; + + __set_bit(EV_KEY, input_dev->evbit); + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(BTN_JOYSTICK, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_X, + AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT); + input_set_abs_params(as5011->input_dev, ABS_Y, + AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT); + + error = gpio_request(as5011->button_gpio, "AS5011 button"); + if (error < 0) { + dev_err(&client->dev, "Failed to request button gpio\n"); + goto err_free_mem; + } + + irq = gpio_to_irq(as5011->button_gpio); + if (irq < 0) { + dev_err(&client->dev, + "Failed to get irq number for button gpio\n"); + goto err_free_button_gpio; + } + + as5011->button_irq = irq; + + error = request_threaded_irq(as5011->button_irq, + NULL, as5011_button_interrupt, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "as5011_button", as5011); + if (error < 0) { + dev_err(&client->dev, + "Can't allocate button irq %d\n", as5011->button_irq); + goto err_free_button_gpio; + } + + error = as5011_configure_chip(as5011, plat_data); + if (error) + goto err_free_button_irq; + + error = request_threaded_irq(as5011->axis_irq, NULL, + as5011_axis_interrupt, + plat_data->axis_irqflags, + "as5011_joystick", as5011); + if (error) { + dev_err(&client->dev, + "Can't allocate axis irq %d\n", plat_data->axis_irq); + goto err_free_button_irq; + } + + error = input_register_device(as5011->input_dev); + if (error) { + dev_err(&client->dev, "Failed to register input device\n"); + goto err_free_axis_irq; + } + + i2c_set_clientdata(client, as5011); + + return 0; + +err_free_axis_irq: + free_irq(as5011->axis_irq, as5011); +err_free_button_irq: + free_irq(as5011->button_irq, as5011); +err_free_button_gpio: + gpio_free(as5011->button_gpio); +err_free_mem: + input_free_device(input_dev); + kfree(as5011); + + return error; +} + +static int __devexit as5011_remove(struct i2c_client *client) +{ + struct as5011_device *as5011 = i2c_get_clientdata(client); + + free_irq(as5011->axis_irq, as5011); + free_irq(as5011->button_irq, as5011); + gpio_free(as5011->button_gpio); + + input_unregister_device(as5011->input_dev); + kfree(as5011); + + return 0; +} + +static const struct i2c_device_id as5011_id[] = { + { MODULE_DEVICE_ALIAS, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, as5011_id); + +static struct i2c_driver as5011_driver = { + .driver = { + .name = "as5011", + }, + .probe = as5011_probe, + .remove = __devexit_p(as5011_remove), + .id_table = as5011_id, +}; + +static int __init as5011_init(void) +{ + return i2c_add_driver(&as5011_driver); +} +module_init(as5011_init); + +static void __exit as5011_exit(void) +{ + i2c_del_driver(&as5011_driver); +} +module_exit(as5011_exit); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index f829998fabe6..7b3c0b8fa432 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -12,18 +12,6 @@ menuconfig INPUT_KEYBOARD if INPUT_KEYBOARD -config KEYBOARD_AAED2000 - tristate "AAED-2000 keyboard" - depends on MACH_AAED2000 - select INPUT_POLLDEV - default y - help - Say Y here to enable the keyboard on the Agilent AAED-2000 - development board. - - To compile this driver as a module, choose M here: the - module will be called aaed2000_kbd. - config KEYBOARD_ADP5520 tristate "Keypad Support for ADP5520 PMIC" depends on PMIC_ADP5520 diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 8933e9ca938d..4e5571b72cda 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -4,7 +4,6 @@ # Each configuration option enables a list of files. -obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o diff --git a/drivers/input/keyboard/aaed2000_kbd.c b/drivers/input/keyboard/aaed2000_kbd.c deleted file mode 100644 index 18222a689a03..000000000000 --- a/drivers/input/keyboard/aaed2000_kbd.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Keyboard driver for the AAED-2000 dev board - * - * Copyright (c) 2006 Nicolas Bellido Y Ortega - * - * Based on corgikbd.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define KB_ROWS 12 -#define KB_COLS 8 -#define KB_ROWMASK(r) (1 << (r)) -#define SCANCODE(r,c) (((c) * KB_ROWS) + (r)) -#define NR_SCANCODES (KB_COLS * KB_ROWS) - -#define SCAN_INTERVAL (50) /* ms */ -#define KB_ACTIVATE_DELAY (20) /* us */ - -static unsigned char aaedkbd_keycode[NR_SCANCODES] = { - KEY_9, KEY_0, KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, 0, KEY_SPACE, KEY_KP6, 0, KEY_KPDOT, 0, 0, - KEY_K, KEY_M, KEY_O, KEY_DOT, KEY_SLASH, 0, KEY_F, 0, 0, 0, KEY_LEFTSHIFT, 0, - KEY_I, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH, 0, 0, 0, 0, 0, KEY_RIGHTSHIFT, 0, - KEY_8, KEY_L, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_ENTER, 0, 0, 0, 0, 0, 0, 0, - KEY_J, KEY_H, KEY_B, KEY_KP8, KEY_KP4, 0, KEY_C, KEY_D, KEY_S, KEY_A, 0, KEY_CAPSLOCK, - KEY_Y, KEY_U, KEY_N, KEY_T, 0, 0, KEY_R, KEY_E, KEY_W, KEY_Q, 0, KEY_TAB, - KEY_7, KEY_6, KEY_G, 0, KEY_5, 0, KEY_4, KEY_3, KEY_2, KEY_1, 0, KEY_GRAVE, - 0, 0, KEY_COMMA, 0, KEY_KP2, 0, KEY_V, KEY_LEFTALT, KEY_X, KEY_Z, 0, KEY_LEFTCTRL -}; - -struct aaedkbd { - unsigned char keycode[ARRAY_SIZE(aaedkbd_keycode)]; - struct input_polled_dev *poll_dev; - int kbdscan_state[KB_COLS]; - int kbdscan_count[KB_COLS]; -}; - -#define KBDSCAN_STABLE_COUNT 2 - -static void aaedkbd_report_col(struct aaedkbd *aaedkbd, - unsigned int col, unsigned int rowd) -{ - unsigned int scancode, pressed; - unsigned int row; - - for (row = 0; row < KB_ROWS; row++) { - scancode = SCANCODE(row, col); - pressed = rowd & KB_ROWMASK(row); - - input_report_key(aaedkbd->poll_dev->input, - aaedkbd->keycode[scancode], pressed); - } -} - -/* Scan the hardware keyboard and push any changes up through the input layer */ -static void aaedkbd_poll(struct input_polled_dev *dev) -{ - struct aaedkbd *aaedkbd = dev->private; - unsigned int col, rowd; - - col = 0; - do { - AAEC_GPIO_KSCAN = col + 8; - udelay(KB_ACTIVATE_DELAY); - rowd = AAED_EXT_GPIO & AAED_EGPIO_KBD_SCAN; - - if (rowd != aaedkbd->kbdscan_state[col]) { - aaedkbd->kbdscan_count[col] = 0; - aaedkbd->kbdscan_state[col] = rowd; - } else if (++aaedkbd->kbdscan_count[col] >= KBDSCAN_STABLE_COUNT) { - aaedkbd_report_col(aaedkbd, col, rowd); - col++; - } - } while (col < KB_COLS); - - AAEC_GPIO_KSCAN = 0x07; - input_sync(dev->input); -} - -static int __devinit aaedkbd_probe(struct platform_device *pdev) -{ - struct aaedkbd *aaedkbd; - struct input_polled_dev *poll_dev; - struct input_dev *input_dev; - int i; - int error; - - aaedkbd = kzalloc(sizeof(struct aaedkbd), GFP_KERNEL); - poll_dev = input_allocate_polled_device(); - if (!aaedkbd || !poll_dev) { - error = -ENOMEM; - goto fail; - } - - platform_set_drvdata(pdev, aaedkbd); - - aaedkbd->poll_dev = poll_dev; - memcpy(aaedkbd->keycode, aaedkbd_keycode, sizeof(aaedkbd->keycode)); - - poll_dev->private = aaedkbd; - poll_dev->poll = aaedkbd_poll; - poll_dev->poll_interval = SCAN_INTERVAL; - - input_dev = poll_dev->input; - input_dev->name = "AAED-2000 Keyboard"; - input_dev->phys = "aaedkbd/input0"; - input_dev->id.bustype = BUS_HOST; - input_dev->id.vendor = 0x0001; - input_dev->id.product = 0x0001; - input_dev->id.version = 0x0100; - input_dev->dev.parent = &pdev->dev; - - input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); - input_dev->keycode = aaedkbd->keycode; - input_dev->keycodesize = sizeof(unsigned char); - input_dev->keycodemax = ARRAY_SIZE(aaedkbd_keycode); - - for (i = 0; i < ARRAY_SIZE(aaedkbd_keycode); i++) - set_bit(aaedkbd->keycode[i], input_dev->keybit); - clear_bit(0, input_dev->keybit); - - error = input_register_polled_device(aaedkbd->poll_dev); - if (error) - goto fail; - - return 0; - - fail: kfree(aaedkbd); - input_free_polled_device(poll_dev); - return error; -} - -static int __devexit aaedkbd_remove(struct platform_device *pdev) -{ - struct aaedkbd *aaedkbd = platform_get_drvdata(pdev); - - input_unregister_polled_device(aaedkbd->poll_dev); - input_free_polled_device(aaedkbd->poll_dev); - kfree(aaedkbd); - - return 0; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:aaed2000-keyboard"); - -static struct platform_driver aaedkbd_driver = { - .probe = aaedkbd_probe, - .remove = __devexit_p(aaedkbd_remove), - .driver = { - .name = "aaed2000-keyboard", - .owner = THIS_MODULE, - }, -}; - -static int __init aaedkbd_init(void) -{ - return platform_driver_register(&aaedkbd_driver); -} - -static void __exit aaedkbd_exit(void) -{ - platform_driver_unregister(&aaedkbd_driver); -} - -module_init(aaedkbd_init); -module_exit(aaedkbd_exit); - -MODULE_AUTHOR("Nicolas Bellido Y Ortega"); -MODULE_DESCRIPTION("AAED-2000 Keyboard Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index bcb1fdedb595..307eef77a172 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -229,7 +229,7 @@ config SERIO_PS2MULT tristate "TQC PS/2 multiplexer" help Say Y here if you have the PS/2 line multiplexer like the one - present on TQC boads. + present on TQC boards. To compile this driver as a module, choose M here: the module will be called ps2mult. diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 5ae0fc4578fe..bb9f5d31f0d0 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -424,6 +424,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), }, }, + { + /* Dell Vostro V13 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + }, + }, { } }; @@ -545,6 +552,17 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { }; #endif +static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { + { + /* Dell Vostro V13 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + }, + }, + { } +}; + /* * Some Wistron based laptops need us to explicitly enable the 'Dritek * keyboard extension' to make their extra keys start generating scancodes. @@ -896,6 +914,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_nomux_table)) i8042_nomux = true; + if (dmi_check_system(i8042_dmi_notimeout_table)) + i8042_notimeout = true; + if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; #endif /* CONFIG_X86 */ diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index c04ff00a3663..ac4c93689ab9 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -63,6 +63,10 @@ static bool i8042_noloop; module_param_named(noloop, i8042_noloop, bool, 0); MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port"); +static bool i8042_notimeout; +module_param_named(notimeout, i8042_notimeout, bool, 0); +MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); + #ifdef CONFIG_X86 static bool i8042_dritek; module_param_named(dritek, i8042_dritek, bool, 0); @@ -504,7 +508,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) } else { dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) | - ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0); + ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0); port_no = (str & I8042_STR_AUXDATA) ? I8042_AUX_PORT_NO : I8042_KBD_PORT_NO; diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 07ac77d393a4..0c9f4b158ff0 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -610,7 +610,7 @@ config TOUCHSCREEN_USB_ZYTRONIC config TOUCHSCREEN_USB_ETT_TC45USB default y - bool "ET&T USB series TC4UM/TC5UH touchscreen controler support" if EMBEDDED + bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EMBEDDED depends on TOUCHSCREEN_USB_COMPOSITE config TOUCHSCREEN_USB_NEXIO diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c index d82a38ee9a3e..4e4e58cec6c8 100644 --- a/drivers/input/touchscreen/ad7879-i2c.c +++ b/drivers/input/touchscreen/ad7879-i2c.c @@ -10,14 +10,16 @@ #include #include #include +#include #include "ad7879.h" #define AD7879_DEVID 0x79 /* AD7879-1/AD7889-1 */ #ifdef CONFIG_PM -static int ad7879_i2c_suspend(struct i2c_client *client, pm_message_t message) +static int ad7879_i2c_suspend(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct ad7879 *ts = i2c_get_clientdata(client); ad7879_suspend(ts); @@ -25,17 +27,17 @@ static int ad7879_i2c_suspend(struct i2c_client *client, pm_message_t message) return 0; } -static int ad7879_i2c_resume(struct i2c_client *client) +static int ad7879_i2c_resume(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct ad7879 *ts = i2c_get_clientdata(client); ad7879_resume(ts); return 0; } -#else -# define ad7879_i2c_suspend NULL -# define ad7879_i2c_resume NULL + +static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume); #endif /* All registers are word-sized. @@ -117,11 +119,12 @@ static struct i2c_driver ad7879_i2c_driver = { .driver = { .name = "ad7879", .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &ad7879_i2c_pm, +#endif }, .probe = ad7879_i2c_probe, .remove = __devexit_p(ad7879_i2c_remove), - .suspend = ad7879_i2c_suspend, - .resume = ad7879_i2c_resume, .id_table = ad7879_id, }; diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c index d0c3a7229adf..a93c5c26ab3f 100644 --- a/drivers/input/touchscreen/cy8ctmg110_ts.c +++ b/drivers/input/touchscreen/cy8ctmg110_ts.c @@ -280,8 +280,9 @@ err_free_mem: } #ifdef CONFIG_PM -static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg) +static int cy8ctmg110_suspend(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct cy8ctmg110 *ts = i2c_get_clientdata(client); if (device_may_wakeup(&client->dev)) @@ -293,8 +294,9 @@ static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg) return 0; } -static int cy8ctmg110_resume(struct i2c_client *client) +static int cy8ctmg110_resume(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct cy8ctmg110 *ts = i2c_get_clientdata(client); if (device_may_wakeup(&client->dev)) @@ -305,6 +307,8 @@ static int cy8ctmg110_resume(struct i2c_client *client) } return 0; } + +static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume); #endif static int __devexit cy8ctmg110_remove(struct i2c_client *client) @@ -335,14 +339,13 @@ static struct i2c_driver cy8ctmg110_driver = { .driver = { .owner = THIS_MODULE, .name = CY8CTMG110_DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &cy8ctmg110_pm, +#endif }, .id_table = cy8ctmg110_idtable, .probe = cy8ctmg110_probe, .remove = __devexit_p(cy8ctmg110_remove), -#ifdef CONFIG_PM - .suspend = cy8ctmg110_suspend, - .resume = cy8ctmg110_resume, -#endif }; static int __init cy8ctmg110_init(void) diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c index 7a3a916f84a8..7f8f538a9806 100644 --- a/drivers/input/touchscreen/eeti_ts.c +++ b/drivers/input/touchscreen/eeti_ts.c @@ -261,8 +261,9 @@ static int __devexit eeti_ts_remove(struct i2c_client *client) } #ifdef CONFIG_PM -static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg) +static int eeti_ts_suspend(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct eeti_ts_priv *priv = i2c_get_clientdata(client); struct input_dev *input_dev = priv->input; @@ -279,8 +280,9 @@ static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg) return 0; } -static int eeti_ts_resume(struct i2c_client *client) +static int eeti_ts_resume(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct eeti_ts_priv *priv = i2c_get_clientdata(client); struct input_dev *input_dev = priv->input; @@ -296,9 +298,8 @@ static int eeti_ts_resume(struct i2c_client *client) return 0; } -#else -#define eeti_ts_suspend NULL -#define eeti_ts_resume NULL + +static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume); #endif static const struct i2c_device_id eeti_ts_id[] = { @@ -310,11 +311,12 @@ MODULE_DEVICE_TABLE(i2c, eeti_ts_id); static struct i2c_driver eeti_ts_driver = { .driver = { .name = "eeti_ts", +#ifdef CONFIG_PM + .pm = &eeti_ts_pm, +#endif }, .probe = eeti_ts_probe, .remove = __devexit_p(eeti_ts_remove), - .suspend = eeti_ts_suspend, - .resume = eeti_ts_resume, .id_table = eeti_ts_id, }; diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c index 6ee9940aaf5b..2d84c80ceb66 100644 --- a/drivers/input/touchscreen/mcs5000_ts.c +++ b/drivers/input/touchscreen/mcs5000_ts.c @@ -261,25 +261,27 @@ static int __devexit mcs5000_ts_remove(struct i2c_client *client) } #ifdef CONFIG_PM -static int mcs5000_ts_suspend(struct i2c_client *client, pm_message_t mesg) +static int mcs5000_ts_suspend(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); + /* Touch sleep mode */ i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP); return 0; } -static int mcs5000_ts_resume(struct i2c_client *client) +static int mcs5000_ts_resume(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct mcs5000_ts_data *data = i2c_get_clientdata(client); mcs5000_ts_phys_init(data); return 0; } -#else -#define mcs5000_ts_suspend NULL -#define mcs5000_ts_resume NULL + +static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume); #endif static const struct i2c_device_id mcs5000_ts_id[] = { @@ -291,10 +293,11 @@ MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id); static struct i2c_driver mcs5000_ts_driver = { .probe = mcs5000_ts_probe, .remove = __devexit_p(mcs5000_ts_remove), - .suspend = mcs5000_ts_suspend, - .resume = mcs5000_ts_resume, .driver = { .name = "mcs5000_ts", +#ifdef CONFIG_PM + .pm = &mcs5000_ts_pm, +#endif }, .id_table = mcs5000_ts_id, }; diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c index defe5dd3627c..5803bd0c1cca 100644 --- a/drivers/input/touchscreen/migor_ts.c +++ b/drivers/input/touchscreen/migor_ts.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -226,8 +227,9 @@ static int migor_ts_remove(struct i2c_client *client) return 0; } -static int migor_ts_suspend(struct i2c_client *client, pm_message_t mesg) +static int migor_ts_suspend(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct migor_ts_priv *priv = dev_get_drvdata(&client->dev); if (device_may_wakeup(&client->dev)) @@ -236,8 +238,9 @@ static int migor_ts_suspend(struct i2c_client *client, pm_message_t mesg) return 0; } -static int migor_ts_resume(struct i2c_client *client) +static int migor_ts_resume(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct migor_ts_priv *priv = dev_get_drvdata(&client->dev); if (device_may_wakeup(&client->dev)) @@ -246,6 +249,8 @@ static int migor_ts_resume(struct i2c_client *client) return 0; } +static SIMPLE_DEV_PM_OPS(migor_ts_pm, migor_ts_suspend, migor_ts_resume); + static const struct i2c_device_id migor_ts_id[] = { { "migor_ts", 0 }, { } @@ -255,11 +260,10 @@ MODULE_DEVICE_TABLE(i2c, migor_ts); static struct i2c_driver migor_ts_driver = { .driver = { .name = "migor_ts", + .pm = &migor_ts_pm, }, .probe = migor_ts_probe, .remove = migor_ts_remove, - .suspend = migor_ts_suspend, - .resume = migor_ts_resume, .id_table = migor_ts_id, }; diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index 8ed53aded2d3..5cb8449c909d 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c @@ -3,6 +3,7 @@ * * Copyright (c) 2008 Jaya Kumar * Copyright (c) 2010 Red Hat, Inc. + * Copyright (c) 2010 - 2011 Ping Cheng, Wacom. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for @@ -64,11 +65,11 @@ struct w8001_coord { /* touch query reply packet */ struct w8001_touch_query { + u16 x; + u16 y; u8 panel_res; u8 capacity_res; u8 sensor_id; - u16 x; - u16 y; }; /* @@ -87,9 +88,14 @@ struct w8001 { char phys[32]; int type; unsigned int pktlen; + u16 max_touch_x; + u16 max_touch_y; + u16 max_pen_x; + u16 max_pen_y; + char name[64]; }; -static void parse_data(u8 *data, struct w8001_coord *coord) +static void parse_pen_data(u8 *data, struct w8001_coord *coord) { memset(coord, 0, sizeof(*coord)); @@ -113,11 +119,30 @@ static void parse_data(u8 *data, struct w8001_coord *coord) coord->tilt_y = data[8] & 0x7F; } -static void parse_touch(struct w8001 *w8001) +static void parse_single_touch(u8 *data, struct w8001_coord *coord) +{ + coord->x = (data[1] << 7) | data[2]; + coord->y = (data[3] << 7) | data[4]; + coord->tsw = data[0] & 0x01; +} + +static void scale_touch_coordinates(struct w8001 *w8001, + unsigned int *x, unsigned int *y) +{ + if (w8001->max_pen_x && w8001->max_touch_x) + *x = *x * w8001->max_pen_x / w8001->max_touch_x; + + if (w8001->max_pen_y && w8001->max_touch_y) + *y = *y * w8001->max_pen_y / w8001->max_touch_y; +} + +static void parse_multi_touch(struct w8001 *w8001) { struct input_dev *dev = w8001->dev; unsigned char *data = w8001->data; + unsigned int x, y; int i; + int count = 0; for (i = 0; i < 2; i++) { bool touch = data[0] & (1 << i); @@ -125,15 +150,29 @@ static void parse_touch(struct w8001 *w8001) input_mt_slot(dev, i); input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch); if (touch) { - int x = (data[6 * i + 1] << 7) | (data[6 * i + 2]); - int y = (data[6 * i + 3] << 7) | (data[6 * i + 4]); + x = (data[6 * i + 1] << 7) | data[6 * i + 2]; + y = (data[6 * i + 3] << 7) | data[6 * i + 4]; /* data[5,6] and [11,12] is finger capacity */ + /* scale to pen maximum */ + scale_touch_coordinates(w8001, &x, &y); + input_report_abs(dev, ABS_MT_POSITION_X, x); input_report_abs(dev, ABS_MT_POSITION_Y, y); + count++; } } + /* emulate single touch events when stylus is out of proximity. + * This is to make single touch backward support consistent + * across all Wacom single touch devices. + */ + if (w8001->type != BTN_TOOL_PEN && + w8001->type != BTN_TOOL_RUBBER) { + w8001->type = count == 1 ? BTN_TOOL_FINGER : KEY_RESERVED; + input_mt_report_pointer_emulation(dev, true); + } + input_sync(dev); } @@ -152,6 +191,15 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query) query->y = data[5] << 9; query->y |= data[6] << 2; query->y |= (data[2] >> 3) & 0x3; + + /* Early days' single-finger touch models need the following defaults */ + if (!query->x && !query->y) { + query->x = 1024; + query->y = 1024; + if (query->panel_res) + query->x = query->y = (1 << query->panel_res); + query->panel_res = 10; + } } static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord) @@ -161,16 +209,15 @@ static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord) /* * We have 1 bit for proximity (rdy) and 3 bits for tip, side, * side2/eraser. If rdy && f2 are set, this can be either pen + side2, - * or eraser. assume + * or eraser. Assume: * - if dev is already in proximity and f2 is toggled → pen + side2 * - if dev comes into proximity with f2 set → eraser * If f2 disappears after assuming eraser, fake proximity out for * eraser and in for pen. */ - if (!w8001->type) { - w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; - } else if (w8001->type == BTN_TOOL_RUBBER) { + switch (w8001->type) { + case BTN_TOOL_RUBBER: if (!coord->f2) { input_report_abs(dev, ABS_PRESSURE, 0); input_report_key(dev, BTN_TOUCH, 0); @@ -180,8 +227,21 @@ static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord) input_sync(dev); w8001->type = BTN_TOOL_PEN; } - } else { + break; + + case BTN_TOOL_FINGER: + input_report_key(dev, BTN_TOUCH, 0); + input_report_key(dev, BTN_TOOL_FINGER, 0); + input_sync(dev); + /* fall through */ + + case KEY_RESERVED: + w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; + break; + + default: input_report_key(dev, BTN_STYLUS2, coord->f2); + break; } input_report_abs(dev, ABS_X, coord->x); @@ -193,7 +253,26 @@ static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord) input_sync(dev); if (!coord->rdy) - w8001->type = 0; + w8001->type = KEY_RESERVED; +} + +static void report_single_touch(struct w8001 *w8001, struct w8001_coord *coord) +{ + struct input_dev *dev = w8001->dev; + unsigned int x = coord->x; + unsigned int y = coord->y; + + /* scale to pen maximum */ + scale_touch_coordinates(w8001, &x, &y); + + input_report_abs(dev, ABS_X, x); + input_report_abs(dev, ABS_Y, y); + input_report_key(dev, BTN_TOUCH, coord->tsw); + input_report_key(dev, BTN_TOOL_FINGER, coord->tsw); + + input_sync(dev); + + w8001->type = coord->tsw ? BTN_TOOL_FINGER : KEY_RESERVED; } static irqreturn_t w8001_interrupt(struct serio *serio, @@ -214,9 +293,18 @@ static irqreturn_t w8001_interrupt(struct serio *serio, case W8001_PKTLEN_TOUCH93 - 1: case W8001_PKTLEN_TOUCH9A - 1: - /* ignore one-finger touch packet. */ - if (w8001->pktlen == w8001->idx) + tmp = w8001->data[0] & W8001_TOUCH_BYTE; + if (tmp != W8001_TOUCH_BYTE) + break; + + if (w8001->pktlen == w8001->idx) { w8001->idx = 0; + if (w8001->type != BTN_TOOL_PEN && + w8001->type != BTN_TOOL_RUBBER) { + parse_single_touch(w8001->data, &coord); + report_single_touch(w8001, &coord); + } + } break; /* Pen coordinates packet */ @@ -225,18 +313,18 @@ static irqreturn_t w8001_interrupt(struct serio *serio, if (unlikely(tmp == W8001_TAB_BYTE)) break; - tmp = (w8001->data[0] & W8001_TOUCH_BYTE); + tmp = w8001->data[0] & W8001_TOUCH_BYTE; if (tmp == W8001_TOUCH_BYTE) break; w8001->idx = 0; - parse_data(w8001->data, &coord); + parse_pen_data(w8001->data, &coord); report_pen_events(w8001, &coord); break; /* control packet */ case W8001_PKTLEN_TPCCTL - 1: - tmp = (w8001->data[0] & W8001_TOUCH_MASK); + tmp = w8001->data[0] & W8001_TOUCH_MASK; if (tmp == W8001_TOUCH_BYTE) break; @@ -249,7 +337,7 @@ static irqreturn_t w8001_interrupt(struct serio *serio, /* 2 finger touch packet */ case W8001_PKTLEN_TOUCH2FG - 1: w8001->idx = 0; - parse_touch(w8001); + parse_multi_touch(w8001); break; } @@ -279,6 +367,7 @@ static int w8001_setup(struct w8001 *w8001) { struct input_dev *dev = w8001->dev; struct w8001_coord coord; + struct w8001_touch_query touch; int error; error = w8001_command(w8001, W8001_CMD_STOP, false); @@ -287,14 +376,21 @@ static int w8001_setup(struct w8001 *w8001) msleep(250); /* wait 250ms before querying the device */ + dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); + /* penabled? */ error = w8001_command(w8001, W8001_CMD_QUERY, true); if (!error) { + __set_bit(BTN_TOUCH, dev->keybit); __set_bit(BTN_TOOL_PEN, dev->keybit); __set_bit(BTN_TOOL_RUBBER, dev->keybit); __set_bit(BTN_STYLUS, dev->keybit); __set_bit(BTN_STYLUS2, dev->keybit); - parse_data(w8001->response, &coord); + + parse_pen_data(w8001->response, &coord); + w8001->max_pen_x = coord.x; + w8001->max_pen_y = coord.y; input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); @@ -303,6 +399,8 @@ static int w8001_setup(struct w8001 *w8001) input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0); } + w8001->id = 0x90; + strlcat(w8001->name, " Penabled", sizeof(w8001->name)); } /* Touch enabled? */ @@ -313,24 +411,38 @@ static int w8001_setup(struct w8001 *w8001) * second byte is empty, which indicates touch is not supported. */ if (!error && w8001->response[1]) { - struct w8001_touch_query touch; + __set_bit(BTN_TOUCH, dev->keybit); + __set_bit(BTN_TOOL_FINGER, dev->keybit); parse_touchquery(w8001->response, &touch); + w8001->max_touch_x = touch.x; + w8001->max_touch_y = touch.y; + + /* scale to pen maximum */ + if (w8001->max_pen_x && w8001->max_pen_y) { + touch.x = w8001->max_pen_x; + touch.y = w8001->max_pen_y; + } input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); - __set_bit(BTN_TOOL_FINGER, dev->keybit); switch (touch.sensor_id) { case 0: case 2: w8001->pktlen = W8001_PKTLEN_TOUCH93; + w8001->id = 0x93; + strlcat(w8001->name, " 1FG", sizeof(w8001->name)); break; + case 1: case 3: case 4: w8001->pktlen = W8001_PKTLEN_TOUCH9A; + strlcat(w8001->name, " 1FG", sizeof(w8001->name)); + w8001->id = 0x9a; break; + case 5: w8001->pktlen = W8001_PKTLEN_TOUCH2FG; @@ -341,10 +453,18 @@ static int w8001_setup(struct w8001 *w8001) 0, touch.y, 0, 0); input_set_abs_params(dev, ABS_MT_TOOL_TYPE, 0, MT_TOOL_MAX, 0, 0); + + strlcat(w8001->name, " 2FG", sizeof(w8001->name)); + if (w8001->max_pen_x && w8001->max_pen_y) + w8001->id = 0xE3; + else + w8001->id = 0xE2; break; } } + strlcat(w8001->name, " Touchscreen", sizeof(w8001->name)); + return w8001_command(w8001, W8001_CMD_START, false); } @@ -384,22 +504,10 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv) } w8001->serio = serio; - w8001->id = serio->id.id; w8001->dev = input_dev; init_completion(&w8001->cmd_done); snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys); - input_dev->name = "Wacom W8001 Penabled Serial TouchScreen"; - input_dev->phys = w8001->phys; - input_dev->id.bustype = BUS_RS232; - input_dev->id.vendor = SERIO_W8001; - input_dev->id.product = w8001->id; - input_dev->id.version = 0x0100; - input_dev->dev.parent = &serio->dev; - - input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - __set_bit(BTN_TOUCH, input_dev->keybit); - serio_set_drvdata(serio, w8001); err = serio_open(serio, drv); if (err) @@ -409,6 +517,14 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv) if (err) goto fail3; + input_dev->name = w8001->name; + input_dev->phys = w8001->phys; + input_dev->id.product = w8001->id; + input_dev->id.bustype = BUS_RS232; + input_dev->id.vendor = 0x056a; + input_dev->id.version = 0x0100; + input_dev->dev.parent = &serio->dev; + err = input_register_device(w8001->dev); if (err) goto fail3; diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 178942a2ee61..8a3c5cfc4fea 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -2318,7 +2318,7 @@ static int gigaset_probe(struct usb_interface *interface, __func__, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); - /* allocate memory for our device state and intialize it */ + /* allocate memory for our device state and initialize it */ cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode, GIGASET_MODULENAME); if (!cs) @@ -2576,7 +2576,7 @@ static int __init bas_gigaset_init(void) { int result; - /* allocate memory for our driver state and intialize it */ + /* allocate memory for our driver state and initialize it */ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, GIGASET_MODULENAME, GIGASET_DEVNAME, &gigops, THIS_MODULE); diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index d151dcbf770d..0ef09d0eb96b 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -513,7 +513,7 @@ gigaset_tty_open(struct tty_struct *tty) return -ENODEV; } - /* allocate memory for our device state and intialize it */ + /* allocate memory for our device state and initialize it */ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); if (!cs) goto error; @@ -771,7 +771,7 @@ static int __init ser_gigaset_init(void) return rc; } - /* allocate memory for our driver state and intialize it */ + /* allocate memory for our driver state and initialize it */ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, GIGASET_MODULENAME, GIGASET_DEVNAME, &ops, THIS_MODULE); diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 4a66338f4e7d..5e3300d8a2a5 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -695,7 +695,7 @@ static int gigaset_probe(struct usb_interface *interface, dev_info(&udev->dev, "%s: Device matched ... !\n", __func__); - /* allocate memory for our device state and intialize it */ + /* allocate memory for our device state and initialize it */ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); if (!cs) return -ENODEV; @@ -894,7 +894,7 @@ static int __init usb_gigaset_init(void) { int result; - /* allocate memory for our driver state and intialize it */ + /* allocate memory for our driver state and initialize it */ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, GIGASET_MODULENAME, GIGASET_DEVNAME, &ops, THIS_MODULE); diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h index 74a6ccf9065c..8121e046b739 100644 --- a/drivers/isdn/hardware/mISDN/ipac.h +++ b/drivers/isdn/hardware/mISDN/ipac.h @@ -29,7 +29,7 @@ struct isac_hw { u32 type; u32 off; /* offset to isac regs */ char *name; - spinlock_t *hwlock; /* lock HW acccess */ + spinlock_t *hwlock; /* lock HW access */ read_reg_func *read_reg; write_reg_func *write_reg; fifo_func *read_fifo; @@ -70,7 +70,7 @@ struct ipac_hw { struct hscx_hw hscx[2]; char *name; void *hw; - spinlock_t *hwlock; /* lock HW acccess */ + spinlock_t *hwlock; /* lock HW access */ struct module *owner; u32 type; read_reg_func *read_reg; diff --git a/drivers/isdn/hardware/mISDN/isar.h b/drivers/isdn/hardware/mISDN/isar.h index 4a134acd44d0..9962bdf699c7 100644 --- a/drivers/isdn/hardware/mISDN/isar.h +++ b/drivers/isdn/hardware/mISDN/isar.h @@ -44,7 +44,7 @@ struct isar_ch { struct isar_hw { struct isar_ch ch[2]; void *hw; - spinlock_t *hwlock; /* lock HW acccess */ + spinlock_t *hwlock; /* lock HW access */ char *name; struct module *owner; read_reg_func *read_reg; diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 76d9e673b4e1..309bacf1fadc 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -112,7 +112,7 @@ * Disable rx-data: * If cmx is realized in hardware, rx data will be disabled if requested by * the upper layer. If dtmf decoding is done by software and enabled, rx data - * will not be diabled but blocked to the upper layer. + * will not be disabled but blocked to the upper layer. * * HFC conference engine: * If it is possible to realize all features using hardware, hardware will be diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 33facd0c45d1..80a3ae3c00b9 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -98,7 +98,6 @@ #define LP5521_EXT_CLK_USED 0x08 struct lp5521_engine { - const struct attribute_group *attributes; int id; u8 mode; u8 prog_page; @@ -225,25 +224,22 @@ static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) curr); } -static void lp5521_init_engine(struct lp5521_chip *chip, - const struct attribute_group *attr_group) +static void lp5521_init_engine(struct lp5521_chip *chip) { int i; for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { chip->engines[i].id = i + 1; chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2); chip->engines[i].prog_page = i; - chip->engines[i].attributes = &attr_group[i]; } } -static int lp5521_configure(struct i2c_client *client, - const struct attribute_group *attr_group) +static int lp5521_configure(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int ret; - lp5521_init_engine(chip, attr_group); + lp5521_init_engine(chip); /* Set all PWMs to direct control mode */ ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); @@ -329,9 +325,6 @@ static int lp5521_detect(struct i2c_client *client) /* Set engine mode and create appropriate sysfs attributes, if required. */ static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode) { - struct lp5521_chip *chip = engine_to_lp5521(engine); - struct i2c_client *client = chip->client; - struct device *dev = &client->dev; int ret = 0; /* if in that mode already do nothing, except for run */ @@ -343,18 +336,10 @@ static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode) } else if (mode == LP5521_CMD_LOAD) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); lp5521_set_engine_mode(engine, LP5521_CMD_LOAD); - - ret = sysfs_create_group(&dev->kobj, engine->attributes); - if (ret) - return ret; } else if (mode == LP5521_CMD_DISABLED) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); } - /* remove load attribute from sysfs if not in load mode */ - if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD) - sysfs_remove_group(&dev->kobj, engine->attributes); - engine->mode = mode; return ret; @@ -373,6 +358,8 @@ static int lp5521_do_store_load(struct lp5521_engine *engine, while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) { /* separate sscanfs because length is working only for %s */ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); + if (ret != 2) + goto fail; ret = sscanf(c, "%2x", &cmd); if (ret != 1) goto fail; @@ -387,7 +374,10 @@ static int lp5521_do_store_load(struct lp5521_engine *engine, goto fail; mutex_lock(&chip->lock); - ret = lp5521_load_program(engine, pattern); + if (engine->mode == LP5521_CMD_LOAD) + ret = lp5521_load_program(engine, pattern); + else + ret = -EINVAL; mutex_unlock(&chip->lock); if (ret) { @@ -574,20 +564,8 @@ static struct attribute *lp5521_attributes[] = { &dev_attr_engine2_mode.attr, &dev_attr_engine3_mode.attr, &dev_attr_selftest.attr, - NULL -}; - -static struct attribute *lp5521_engine1_attributes[] = { &dev_attr_engine1_load.attr, - NULL -}; - -static struct attribute *lp5521_engine2_attributes[] = { &dev_attr_engine2_load.attr, - NULL -}; - -static struct attribute *lp5521_engine3_attributes[] = { &dev_attr_engine3_load.attr, NULL }; @@ -596,12 +574,6 @@ static const struct attribute_group lp5521_group = { .attrs = lp5521_attributes, }; -static const struct attribute_group lp5521_engine_group[] = { - {.attrs = lp5521_engine1_attributes }, - {.attrs = lp5521_engine2_attributes }, - {.attrs = lp5521_engine3_attributes }, -}; - static int lp5521_register_sysfs(struct i2c_client *client) { struct device *dev = &client->dev; @@ -616,12 +588,6 @@ static void lp5521_unregister_sysfs(struct i2c_client *client) sysfs_remove_group(&dev->kobj, &lp5521_group); - for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { - if (chip->engines[i].mode == LP5521_CMD_LOAD) - sysfs_remove_group(&dev->kobj, - chip->engines[i].attributes); - } - for (i = 0; i < chip->num_leds; i++) sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, &lp5521_led_attribute_group); @@ -651,7 +617,8 @@ static int __init lp5521_init_led(struct lp5521_led *led, return -EINVAL; } - snprintf(name, sizeof(name), "%s:channel%d", client->name, chan); + snprintf(name, sizeof(name), "%s:channel%d", + pdata->label ?: client->name, chan); led->cdev.brightness_set = lp5521_set_brightness; led->cdev.name = name; res = led_classdev_register(dev, &led->cdev); @@ -723,7 +690,7 @@ static int lp5521_probe(struct i2c_client *client, dev_info(&client->dev, "%s programmable led chip found\n", id->name); - ret = lp5521_configure(client, lp5521_engine_group); + ret = lp5521_configure(client); if (ret < 0) { dev_err(&client->dev, "error configuring chip\n"); goto fail2; diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 0cc4ead2fd8b..d0c4068ecddd 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -105,7 +105,6 @@ #define SHIFT_MASK(id) (((id) - 1) * 2) struct lp5523_engine { - const struct attribute_group *attributes; int id; u8 mode; u8 prog_page; @@ -403,14 +402,23 @@ static ssize_t store_engine_leds(struct device *dev, struct i2c_client *client = to_i2c_client(dev); struct lp5523_chip *chip = i2c_get_clientdata(client); u16 mux = 0; + ssize_t ret; if (lp5523_mux_parse(buf, &mux, len)) return -EINVAL; + mutex_lock(&chip->lock); + ret = -EINVAL; + if (chip->engines[nr - 1].mode != LP5523_CMD_LOAD) + goto leave; + if (lp5523_load_mux(&chip->engines[nr - 1], mux)) - return -EINVAL; + goto leave; - return len; + ret = len; +leave: + mutex_unlock(&chip->lock); + return ret; } #define store_leds(nr) \ @@ -556,7 +564,11 @@ static int lp5523_do_store_load(struct lp5523_engine *engine, mutex_lock(&chip->lock); - ret = lp5523_load_program(engine, pattern); + if (engine->mode == LP5523_CMD_LOAD) + ret = lp5523_load_program(engine, pattern); + else + ret = -EINVAL; + mutex_unlock(&chip->lock); if (ret) { @@ -737,37 +749,18 @@ static struct attribute *lp5523_attributes[] = { &dev_attr_engine2_mode.attr, &dev_attr_engine3_mode.attr, &dev_attr_selftest.attr, - NULL -}; - -static struct attribute *lp5523_engine1_attributes[] = { &dev_attr_engine1_load.attr, &dev_attr_engine1_leds.attr, - NULL -}; - -static struct attribute *lp5523_engine2_attributes[] = { &dev_attr_engine2_load.attr, &dev_attr_engine2_leds.attr, - NULL -}; - -static struct attribute *lp5523_engine3_attributes[] = { &dev_attr_engine3_load.attr, &dev_attr_engine3_leds.attr, - NULL }; static const struct attribute_group lp5523_group = { .attrs = lp5523_attributes, }; -static const struct attribute_group lp5523_engine_group[] = { - {.attrs = lp5523_engine1_attributes }, - {.attrs = lp5523_engine2_attributes }, - {.attrs = lp5523_engine3_attributes }, -}; - static int lp5523_register_sysfs(struct i2c_client *client) { struct device *dev = &client->dev; @@ -788,10 +781,6 @@ static void lp5523_unregister_sysfs(struct i2c_client *client) sysfs_remove_group(&dev->kobj, &lp5523_group); - for (i = 0; i < ARRAY_SIZE(chip->engines); i++) - if (chip->engines[i].mode == LP5523_CMD_LOAD) - sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]); - for (i = 0; i < chip->num_leds; i++) sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, &lp5523_led_attribute_group); @@ -802,10 +791,6 @@ static void lp5523_unregister_sysfs(struct i2c_client *client) /*--------------------------------------------------------------*/ static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode) { - /* engine to chip */ - struct lp5523_chip *chip = engine_to_lp5523(engine); - struct i2c_client *client = chip->client; - struct device *dev = &client->dev; int ret = 0; /* if in that mode already do nothing, except for run */ @@ -817,18 +802,10 @@ static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode) } else if (mode == LP5523_CMD_LOAD) { lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); - - ret = sysfs_create_group(&dev->kobj, engine->attributes); - if (ret) - return ret; } else if (mode == LP5523_CMD_DISABLED) { lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); } - /* remove load attribute from sysfs if not in load mode */ - if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD) - sysfs_remove_group(&dev->kobj, engine->attributes); - engine->mode = mode; return ret; @@ -845,7 +822,6 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id) engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id); engine->prog_page = id - 1; engine->mux_page = id + 2; - engine->attributes = &lp5523_engine_group[id - 1]; return 0; } @@ -870,7 +846,8 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, return -EINVAL; } - snprintf(name, 32, "lp5523:channel%d", chan); + snprintf(name, sizeof(name), "%s:channel%d", + pdata->label ?: "lp5523", chan); led->cdev.name = name; led->cdev.brightness_set = lp5523_set_brightness; diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 43d08756d823..afac338d5025 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -200,6 +200,32 @@ static void pca9532_led_work(struct work_struct *work) pca9532_setled(led); } +static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs) +{ + int i = n_devs; + + if (!data) + return; + + while (--i >= 0) { + switch (data->leds[i].type) { + case PCA9532_TYPE_NONE: + break; + case PCA9532_TYPE_LED: + led_classdev_unregister(&data->leds[i].ldev); + cancel_work_sync(&data->leds[i].work); + break; + case PCA9532_TYPE_N2100_BEEP: + if (data->idev != NULL) { + input_unregister_device(data->idev); + cancel_work_sync(&data->work); + data->idev = NULL; + } + break; + } + } +} + static int pca9532_configure(struct i2c_client *client, struct pca9532_data *data, struct pca9532_platform_data *pdata) { @@ -274,25 +300,7 @@ static int pca9532_configure(struct i2c_client *client, return 0; exit: - if (i > 0) - for (i = i - 1; i >= 0; i--) - switch (data->leds[i].type) { - case PCA9532_TYPE_NONE: - break; - case PCA9532_TYPE_LED: - led_classdev_unregister(&data->leds[i].ldev); - cancel_work_sync(&data->leds[i].work); - break; - case PCA9532_TYPE_N2100_BEEP: - if (data->idev != NULL) { - input_unregister_device(data->idev); - input_free_device(data->idev); - cancel_work_sync(&data->work); - data->idev = NULL; - } - break; - } - + pca9532_destroy_devices(data, i); return err; } @@ -329,25 +337,7 @@ static int pca9532_probe(struct i2c_client *client, static int pca9532_remove(struct i2c_client *client) { struct pca9532_data *data = i2c_get_clientdata(client); - int i; - for (i = 0; i < 16; i++) - switch (data->leds[i].type) { - case PCA9532_TYPE_NONE: - break; - case PCA9532_TYPE_LED: - led_classdev_unregister(&data->leds[i].ldev); - cancel_work_sync(&data->leds[i].work); - break; - case PCA9532_TYPE_N2100_BEEP: - if (data->idev != NULL) { - input_unregister_device(data->idev); - input_free_device(data->idev); - cancel_work_sync(&data->work); - data->idev = NULL; - } - break; - } - + pca9532_destroy_devices(data, 16); kfree(data); return 0; } diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c index f948e57bd9b8..2b513a2ad7de 100644 --- a/drivers/leds/ledtrig-backlight.c +++ b/drivers/leds/ledtrig-backlight.c @@ -26,6 +26,7 @@ struct bl_trig_notifier { int brightness; int old_status; struct notifier_block notifier; + unsigned invert; }; static int fb_notifier_callback(struct notifier_block *p, @@ -36,23 +37,64 @@ static int fb_notifier_callback(struct notifier_block *p, struct led_classdev *led = n->led; struct fb_event *fb_event = data; int *blank = fb_event->data; + int new_status = *blank ? BLANK : UNBLANK; switch (event) { case FB_EVENT_BLANK : - if (*blank && n->old_status == UNBLANK) { + if (new_status == n->old_status) + break; + + if ((n->old_status == UNBLANK) ^ n->invert) { n->brightness = led->brightness; led_set_brightness(led, LED_OFF); - n->old_status = BLANK; - } else if (!*blank && n->old_status == BLANK) { + } else { led_set_brightness(led, n->brightness); - n->old_status = UNBLANK; } + + n->old_status = new_status; + break; } return 0; } +static ssize_t bl_trig_invert_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led = dev_get_drvdata(dev); + struct bl_trig_notifier *n = led->trigger_data; + + return sprintf(buf, "%u\n", n->invert); +} + +static ssize_t bl_trig_invert_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t num) +{ + struct led_classdev *led = dev_get_drvdata(dev); + struct bl_trig_notifier *n = led->trigger_data; + unsigned long invert; + int ret; + + ret = strict_strtoul(buf, 10, &invert); + if (ret < 0) + return ret; + + if (invert > 1) + return -EINVAL; + + n->invert = invert; + + /* After inverting, we need to update the LED. */ + if ((n->old_status == BLANK) ^ n->invert) + led_set_brightness(led, LED_OFF); + else + led_set_brightness(led, n->brightness); + + return num; +} +static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store); + static void bl_trig_activate(struct led_classdev *led) { int ret; @@ -66,6 +108,10 @@ static void bl_trig_activate(struct led_classdev *led) return; } + ret = device_create_file(led->dev, &dev_attr_inverted); + if (ret) + goto err_invert; + n->led = led; n->brightness = led->brightness; n->old_status = UNBLANK; @@ -74,6 +120,12 @@ static void bl_trig_activate(struct led_classdev *led) ret = fb_register_client(&n->notifier); if (ret) dev_err(led->dev, "unable to register backlight trigger\n"); + + return; + +err_invert: + led->trigger_data = NULL; + kfree(n); } static void bl_trig_deactivate(struct led_classdev *led) @@ -82,6 +134,7 @@ static void bl_trig_deactivate(struct led_classdev *led) (struct bl_trig_notifier *) led->trigger_data; if (n) { + device_remove_file(led->dev, &dev_attr_inverted); fb_unregister_client(&n->notifier); kfree(n); } diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index 1cec02f6c431..ade1e656bfb2 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c @@ -15,7 +15,7 @@ #define MAX_PMU_LEVEL 0xFF -static struct backlight_ops pmu_backlight_data; +static const struct backlight_ops pmu_backlight_data; static DEFINE_SPINLOCK(pmu_backlight_lock); static int sleeping, uses_pmu_bl; static u8 bl_curve[FB_BACKLIGHT_LEVELS]; @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops pmu_backlight_data = { +static const struct backlight_ops pmu_backlight_data = { .get_brightness = pmu_backlight_get_brightness, .update_status = pmu_backlight_update_status, diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index cd29c8248386..8b021eb0d48c 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2257,7 +2257,7 @@ static int pmu_sleep_valid(suspend_state_t state) && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0); } -static struct platform_suspend_ops pmu_pm_ops = { +static const struct platform_suspend_ops pmu_pm_ops = { .enter = powerbook_sleep, .valid = pmu_sleep_valid, }; diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index bf1a95e31559..98d9ec85e0eb 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -240,6 +240,30 @@ config DM_MIRROR Allow volume managers to mirror logical volumes, also needed for live data migration tools such as 'pvmove'. +config DM_RAID + tristate "RAID 4/5/6 target (EXPERIMENTAL)" + depends on BLK_DEV_DM && EXPERIMENTAL + select MD_RAID456 + select BLK_DEV_MD + ---help--- + A dm target that supports RAID4, RAID5 and RAID6 mappings + + A RAID-5 set of N drives with a capacity of C MB per drive provides + the capacity of C * (N - 1) MB, and protects against a failure + of a single drive. For a given sector (row) number, (N - 1) drives + contain data sectors, and one drive contains the parity protection. + For a RAID-4 set, the parity blocks are present on a single drive, + while a RAID-5 set distributes the parity across the drives in one + of the available parity distribution methods. + + A RAID-6 set of N drives with a capacity of C MB per drive + provides the capacity of C * (N - 2) MB, and protects + against a failure of any two drives. For a given sector + (row) number, (N - 2) drives contain data sectors, and two + drives contains two independent redundancy syndromes. Like + RAID-5, RAID-6 distributes the syndromes across the drives + in one of the available parity distribution methods. + config DM_LOG_USERSPACE tristate "Mirror userspace logging (EXPERIMENTAL)" depends on DM_MIRROR && EXPERIMENTAL && NET diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 5e3aac41919d..d0138606c2e8 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o obj-$(CONFIG_DM_ZERO) += dm-zero.o +obj-$(CONFIG_DM_RAID) += dm-raid.o ifeq ($(CONFIG_DM_UEVENT),y) dm-mod-objs += dm-uevent.o diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 5a1ffe3527aa..9a35320fb59f 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -210,11 +210,11 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, || test_bit(Faulty, &rdev->flags)) continue; - target = rdev->sb_start + offset + index * (PAGE_SIZE/512); + target = offset + index * (PAGE_SIZE/512); if (sync_page_io(rdev, target, roundup(size, bdev_logical_block_size(rdev->bdev)), - page, READ)) { + page, READ, true)) { page->index = index; attach_page_buffers(page, NULL); /* so that free_buffer will * quietly no-op */ @@ -264,14 +264,18 @@ static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) { mdk_rdev_t *rdev = NULL; + struct block_device *bdev; mddev_t *mddev = bitmap->mddev; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; loff_t offset = mddev->bitmap_info.offset; + + bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; + if (page->index == bitmap->file_pages-1) size = roundup(bitmap->last_page_size, - bdev_logical_block_size(rdev->bdev)); + bdev_logical_block_size(bdev)); /* Just make sure we aren't corrupting data or * metadata */ @@ -1542,7 +1546,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) wait_event(bitmap->mddev->recovery_wait, atomic_read(&bitmap->mddev->recovery_active) == 0); - bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; + bitmap->mddev->curr_resync_completed = sector; set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); s = 0; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d5b0e4c0e702..4e054bd91664 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -18,10 +18,14 @@ #include #include #include +#include #include #include #include #include +#include +#include +#include #include @@ -63,6 +67,7 @@ struct dm_crypt_request { struct convert_context *ctx; struct scatterlist sg_in; struct scatterlist sg_out; + sector_t iv_sector; }; struct crypt_config; @@ -73,11 +78,13 @@ struct crypt_iv_operations { void (*dtr)(struct crypt_config *cc); int (*init)(struct crypt_config *cc); int (*wipe)(struct crypt_config *cc); - int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); + int (*generator)(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq); + int (*post)(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq); }; struct iv_essiv_private { - struct crypto_cipher *tfm; struct crypto_hash *hash_tfm; u8 *salt; }; @@ -86,11 +93,32 @@ struct iv_benbi_private { int shift; }; +#define LMK_SEED_SIZE 64 /* hash + 0 */ +struct iv_lmk_private { + struct crypto_shash *hash_tfm; + u8 *seed; +}; + /* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; + +/* + * Duplicated per-CPU state for cipher. + */ +struct crypt_cpu { + struct ablkcipher_request *req; + /* ESSIV: struct crypto_cipher *essiv_tfm */ + void *iv_private; + struct crypto_ablkcipher *tfms[0]; +}; + +/* + * The fields in here must be read only after initialization, + * changing state should be in crypt_cpu. + */ struct crypt_config { struct dm_dev *dev; sector_t start; @@ -108,16 +136,24 @@ struct crypt_config { struct workqueue_struct *crypt_queue; char *cipher; - char *cipher_mode; + char *cipher_string; struct crypt_iv_operations *iv_gen_ops; union { struct iv_essiv_private essiv; struct iv_benbi_private benbi; + struct iv_lmk_private lmk; } iv_gen_private; sector_t iv_offset; unsigned int iv_size; + /* + * Duplicated per cpu state. Access through + * per_cpu_ptr() only. + */ + struct crypt_cpu __percpu *cpu; + unsigned tfms_count; + /* * Layout of each crypto request: * @@ -132,11 +168,10 @@ struct crypt_config { * correctly aligned. */ unsigned int dmreq_start; - struct ablkcipher_request *req; - struct crypto_ablkcipher *tfm; unsigned long flags; unsigned int key_size; + unsigned int key_parts; u8 key[0]; }; @@ -148,6 +183,20 @@ static struct kmem_cache *_crypt_io_pool; static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); +static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); + +static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) +{ + return this_cpu_ptr(cc->cpu); +} + +/* + * Use this to access cipher attributes that are the same for each CPU. + */ +static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) +{ + return __this_cpu_ptr(cc->cpu)->tfms[0]; +} /* * Different IV generation algorithms: @@ -168,23 +217,38 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io); * null: the initial vector is always zero. Provides compatibility with * obsolete loop_fish2 devices. Do not use for new devices. * + * lmk: Compatible implementation of the block chaining mode used + * by the Loop-AES block device encryption system + * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ + * It operates on full 512 byte sectors and uses CBC + * with an IV derived from the sector number, the data and + * optionally extra IV seed. + * This means that after decryption the first block + * of sector must be tweaked according to decrypted data. + * Loop-AES can use three encryption schemes: + * version 1: is plain aes-cbc mode + * version 2: uses 64 multikey scheme with lmk IV generator + * version 3: the same as version 2 with additional IV seed + * (it uses 65 keys, last key is used as IV seed) + * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */ -static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { memset(iv, 0, cc->iv_size); - *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); + *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); return 0; } static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, - sector_t sector) + struct dm_crypt_request *dmreq) { memset(iv, 0, cc->iv_size); - *(u64 *)iv = cpu_to_le64(sector); + *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); return 0; } @@ -195,7 +259,8 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; struct hash_desc desc; struct scatterlist sg; - int err; + struct crypto_cipher *essiv_tfm; + int err, cpu; sg_init_one(&sg, cc->key, cc->key_size); desc.tfm = essiv->hash_tfm; @@ -205,8 +270,16 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) if (err) return err; - return crypto_cipher_setkey(essiv->tfm, essiv->salt, + for_each_possible_cpu(cpu) { + essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, + + err = crypto_cipher_setkey(essiv_tfm, essiv->salt, crypto_hash_digestsize(essiv->hash_tfm)); + if (err) + return err; + } + + return 0; } /* Wipe salt and reset key derived from volume key */ @@ -214,24 +287,76 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); + struct crypto_cipher *essiv_tfm; + int cpu, r, err = 0; memset(essiv->salt, 0, salt_size); - return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); + for_each_possible_cpu(cpu) { + essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; + r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); + if (r) + err = r; + } + + return err; +} + +/* Set up per cpu cipher state */ +static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, + struct dm_target *ti, + u8 *salt, unsigned saltsize) +{ + struct crypto_cipher *essiv_tfm; + int err; + + /* Setup the essiv_tfm with the given salt */ + essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(essiv_tfm)) { + ti->error = "Error allocating crypto tfm for ESSIV"; + return essiv_tfm; + } + + if (crypto_cipher_blocksize(essiv_tfm) != + crypto_ablkcipher_ivsize(any_tfm(cc))) { + ti->error = "Block size of ESSIV cipher does " + "not match IV size of block cipher"; + crypto_free_cipher(essiv_tfm); + return ERR_PTR(-EINVAL); + } + + err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); + if (err) { + ti->error = "Failed to set key for ESSIV cipher"; + crypto_free_cipher(essiv_tfm); + return ERR_PTR(err); + } + + return essiv_tfm; } static void crypt_iv_essiv_dtr(struct crypt_config *cc) { + int cpu; + struct crypt_cpu *cpu_cc; + struct crypto_cipher *essiv_tfm; struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; - crypto_free_cipher(essiv->tfm); - essiv->tfm = NULL; - crypto_free_hash(essiv->hash_tfm); essiv->hash_tfm = NULL; kzfree(essiv->salt); essiv->salt = NULL; + + for_each_possible_cpu(cpu) { + cpu_cc = per_cpu_ptr(cc->cpu, cpu); + essiv_tfm = cpu_cc->iv_private; + + if (essiv_tfm) + crypto_free_cipher(essiv_tfm); + + cpu_cc->iv_private = NULL; + } } static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, @@ -240,7 +365,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; u8 *salt = NULL; - int err; + int err, cpu; if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; @@ -262,48 +387,44 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, goto bad; } - /* Allocate essiv_tfm */ - essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(essiv_tfm)) { - ti->error = "Error allocating crypto tfm for ESSIV"; - err = PTR_ERR(essiv_tfm); - goto bad; - } - if (crypto_cipher_blocksize(essiv_tfm) != - crypto_ablkcipher_ivsize(cc->tfm)) { - ti->error = "Block size of ESSIV cipher does " - "not match IV size of block cipher"; - err = -EINVAL; - goto bad; - } - cc->iv_gen_private.essiv.salt = salt; - cc->iv_gen_private.essiv.tfm = essiv_tfm; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; + for_each_possible_cpu(cpu) { + essiv_tfm = setup_essiv_cpu(cc, ti, salt, + crypto_hash_digestsize(hash_tfm)); + if (IS_ERR(essiv_tfm)) { + crypt_iv_essiv_dtr(cc); + return PTR_ERR(essiv_tfm); + } + per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; + } + return 0; bad: - if (essiv_tfm && !IS_ERR(essiv_tfm)) - crypto_free_cipher(essiv_tfm); if (hash_tfm && !IS_ERR(hash_tfm)) crypto_free_hash(hash_tfm); kfree(salt); return err; } -static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { + struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; + memset(iv, 0, cc->iv_size); - *(u64 *)iv = cpu_to_le64(sector); - crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); + *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); + crypto_cipher_encrypt_one(essiv_tfm, iv, iv); + return 0; } static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); + unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); int log = ilog2(bs); /* we need to calculate how far we must shift the sector count @@ -328,25 +449,177 @@ static void crypt_iv_benbi_dtr(struct crypt_config *cc) { } -static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { __be64 val; memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ - val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); + val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); return 0; } -static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { memset(iv, 0, cc->iv_size); return 0; } +static void crypt_iv_lmk_dtr(struct crypt_config *cc) +{ + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; + + if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) + crypto_free_shash(lmk->hash_tfm); + lmk->hash_tfm = NULL; + + kzfree(lmk->seed); + lmk->seed = NULL; +} + +static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; + + lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); + if (IS_ERR(lmk->hash_tfm)) { + ti->error = "Error initializing LMK hash"; + return PTR_ERR(lmk->hash_tfm); + } + + /* No seed in LMK version 2 */ + if (cc->key_parts == cc->tfms_count) { + lmk->seed = NULL; + return 0; + } + + lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); + if (!lmk->seed) { + crypt_iv_lmk_dtr(cc); + ti->error = "Error kmallocing seed storage in LMK"; + return -ENOMEM; + } + + return 0; +} + +static int crypt_iv_lmk_init(struct crypt_config *cc) +{ + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; + int subkey_size = cc->key_size / cc->key_parts; + + /* LMK seed is on the position of LMK_KEYS + 1 key */ + if (lmk->seed) + memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), + crypto_shash_digestsize(lmk->hash_tfm)); + + return 0; +} + +static int crypt_iv_lmk_wipe(struct crypt_config *cc) +{ + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; + + if (lmk->seed) + memset(lmk->seed, 0, LMK_SEED_SIZE); + + return 0; +} + +static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq, + u8 *data) +{ + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; + struct { + struct shash_desc desc; + char ctx[crypto_shash_descsize(lmk->hash_tfm)]; + } sdesc; + struct md5_state md5state; + u32 buf[4]; + int i, r; + + sdesc.desc.tfm = lmk->hash_tfm; + sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + r = crypto_shash_init(&sdesc.desc); + if (r) + return r; + + if (lmk->seed) { + r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); + if (r) + return r; + } + + /* Sector is always 512B, block size 16, add data of blocks 1-31 */ + r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); + if (r) + return r; + + /* Sector is cropped to 56 bits here */ + buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); + buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); + buf[2] = cpu_to_le32(4024); + buf[3] = 0; + r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); + if (r) + return r; + + /* No MD5 padding here */ + r = crypto_shash_export(&sdesc.desc, &md5state); + if (r) + return r; + + for (i = 0; i < MD5_HASH_WORDS; i++) + __cpu_to_le32s(&md5state.hash[i]); + memcpy(iv, &md5state.hash, cc->iv_size); + + return 0; +} + +static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + u8 *src; + int r = 0; + + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { + src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); + r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); + kunmap_atomic(src, KM_USER0); + } else + memset(iv, 0, cc->iv_size); + + return r; +} + +static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + u8 *dst; + int r; + + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) + return 0; + + dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); + r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); + + /* Tweak the first block of plaintext sector */ + if (!r) + crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); + + kunmap_atomic(dst, KM_USER0); + return r; +} + static struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; @@ -373,6 +646,15 @@ static struct crypt_iv_operations crypt_iv_null_ops = { .generator = crypt_iv_null_gen }; +static struct crypt_iv_operations crypt_iv_lmk_ops = { + .ctr = crypt_iv_lmk_ctr, + .dtr = crypt_iv_lmk_dtr, + .init = crypt_iv_lmk_init, + .wipe = crypt_iv_lmk_wipe, + .generator = crypt_iv_lmk_gen, + .post = crypt_iv_lmk_post +}; + static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -400,6 +682,13 @@ static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); } +static u8 *iv_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + return (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); +} + static int crypt_convert_block(struct crypt_config *cc, struct convert_context *ctx, struct ablkcipher_request *req) @@ -411,9 +700,9 @@ static int crypt_convert_block(struct crypt_config *cc, int r = 0; dmreq = dmreq_of_req(cc, req); - iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), - crypto_ablkcipher_alignmask(cc->tfm) + 1); + iv = iv_of_dmreq(cc, dmreq); + dmreq->iv_sector = ctx->sector; dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, @@ -436,7 +725,7 @@ static int crypt_convert_block(struct crypt_config *cc, } if (cc->iv_gen_ops) { - r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); + r = cc->iv_gen_ops->generator(cc, iv, dmreq); if (r < 0) return r; } @@ -449,21 +738,28 @@ static int crypt_convert_block(struct crypt_config *cc, else r = crypto_ablkcipher_decrypt(req); + if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) + r = cc->iv_gen_ops->post(cc, iv, dmreq); + return r; } static void kcryptd_async_done(struct crypto_async_request *async_req, int error); + static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { - if (!cc->req) - cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(cc->req, cc->tfm); - ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, - dmreq_of_req(cc, cc->req)); + struct crypt_cpu *this_cc = this_crypt_config(cc); + unsigned key_index = ctx->sector & (cc->tfms_count - 1); + + if (!this_cc->req) + this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + + ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); + ablkcipher_request_set_callback(this_cc->req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); } /* @@ -472,6 +768,7 @@ static void crypt_alloc_req(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { + struct crypt_cpu *this_cc = this_crypt_config(cc); int r; atomic_set(&ctx->pending, 1); @@ -483,7 +780,7 @@ static int crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->pending); - r = crypt_convert_block(cc, ctx, cc->req); + r = crypt_convert_block(cc, ctx, this_cc->req); switch (r) { /* async */ @@ -492,7 +789,7 @@ static int crypt_convert(struct crypt_config *cc, INIT_COMPLETION(ctx->restart); /* fall through*/ case -EINPROGRESS: - cc->req = NULL; + this_cc->req = NULL; ctx->sector++; continue; @@ -651,6 +948,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io) * They must be separated as otherwise the final stages could be * starved by new requests which can block in the first stages due * to memory allocation. + * + * The work is done per CPU global for all dm-crypt instances. + * They should not depend on each other and do not block. */ static void crypt_endio(struct bio *clone, int error) { @@ -691,26 +991,30 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_destructor = dm_crypt_bio_destructor; } -static void kcryptd_io_read(struct dm_crypt_io *io) +static void kcryptd_unplug(struct crypt_config *cc) +{ + blk_unplug(bdev_get_queue(cc->dev->bdev)); +} + +static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) { struct crypt_config *cc = io->target->private; struct bio *base_bio = io->base_bio; struct bio *clone; - crypt_inc_pending(io); - /* * The block layer might modify the bvec array, so always * copy the required bvecs because we need the original * one in order to decrypt the whole bio data *afterwards*. */ - clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); - if (unlikely(!clone)) { - io->error = -ENOMEM; - crypt_dec_pending(io); - return; + clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); + if (!clone) { + kcryptd_unplug(cc); + return 1; } + crypt_inc_pending(io); + clone_init(io, clone); clone->bi_idx = 0; clone->bi_vcnt = bio_segments(base_bio); @@ -720,6 +1024,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) sizeof(struct bio_vec) * clone->bi_vcnt); generic_make_request(clone); + return 0; } static void kcryptd_io_write(struct dm_crypt_io *io) @@ -732,9 +1037,12 @@ static void kcryptd_io(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); - if (bio_data_dir(io->base_bio) == READ) - kcryptd_io_read(io); - else + if (bio_data_dir(io->base_bio) == READ) { + crypt_inc_pending(io); + if (kcryptd_io_read(io, GFP_NOIO)) + io->error = -ENOMEM; + crypt_dec_pending(io); + } else kcryptd_io_write(io); } @@ -901,6 +1209,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, return; } + if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) + error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); + mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); if (!atomic_dec_and_test(&ctx->pending)) @@ -971,34 +1282,84 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) } } -static int crypt_set_key(struct crypt_config *cc, char *key) +static void crypt_free_tfms(struct crypt_config *cc, int cpu) { - unsigned key_size = strlen(key) >> 1; + struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); + unsigned i; - if (cc->key_size && cc->key_size != key_size) + for (i = 0; i < cc->tfms_count; i++) + if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { + crypto_free_ablkcipher(cpu_cc->tfms[i]); + cpu_cc->tfms[i] = NULL; + } +} + +static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) +{ + struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); + unsigned i; + int err; + + for (i = 0; i < cc->tfms_count; i++) { + cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); + if (IS_ERR(cpu_cc->tfms[i])) { + err = PTR_ERR(cpu_cc->tfms[i]); + crypt_free_tfms(cc, cpu); + return err; + } + } + + return 0; +} + +static int crypt_setkey_allcpus(struct crypt_config *cc) +{ + unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); + int cpu, err = 0, i, r; + + for_each_possible_cpu(cpu) { + for (i = 0; i < cc->tfms_count; i++) { + r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], + cc->key + (i * subkey_size), subkey_size); + if (r) + err = r; + } + } + + return err; +} + +static int crypt_set_key(struct crypt_config *cc, char *key) +{ + /* The key size may not be changed. */ + if (cc->key_size != (strlen(key) >> 1)) return -EINVAL; - cc->key_size = key_size; /* initial settings */ + /* Hyphen (which gives a key_size of zero) means there is no key. */ + if (!cc->key_size && strcmp(key, "-")) + return -EINVAL; - if ((!key_size && strcmp(key, "-")) || - (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) + if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) return -EINVAL; set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); + return crypt_setkey_allcpus(cc); } static int crypt_wipe_key(struct crypt_config *cc) { clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); memset(&cc->key, 0, cc->key_size * sizeof(u8)); - return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); + + return crypt_setkey_allcpus(cc); } static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; + struct crypt_cpu *cpu_cc; + int cpu; ti->private = NULL; @@ -1010,6 +1371,14 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); + if (cc->cpu) + for_each_possible_cpu(cpu) { + cpu_cc = per_cpu_ptr(cc->cpu, cpu); + if (cpu_cc->req) + mempool_free(cpu_cc->req, cc->req_pool); + crypt_free_tfms(cc, cpu); + } + if (cc->bs) bioset_free(cc->bs); @@ -1023,14 +1392,14 @@ static void crypt_dtr(struct dm_target *ti) if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); - if (cc->tfm && !IS_ERR(cc->tfm)) - crypto_free_ablkcipher(cc->tfm); - if (cc->dev) dm_put_device(ti, cc->dev); + if (cc->cpu) + free_percpu(cc->cpu); + kzfree(cc->cipher); - kzfree(cc->cipher_mode); + kzfree(cc->cipher_string); /* Must zero key material before freeing */ kzfree(cc); @@ -1040,9 +1409,9 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) { struct crypt_config *cc = ti->private; - char *tmp, *cipher, *chainmode, *ivmode, *ivopts; + char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; char *cipher_api = NULL; - int ret = -EINVAL; + int cpu, ret = -EINVAL; /* Convert to crypto api definition? */ if (strchr(cipher_in, '(')) { @@ -1050,23 +1419,31 @@ static int crypt_ctr_cipher(struct dm_target *ti, return -EINVAL; } + cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); + if (!cc->cipher_string) + goto bad_mem; + /* * Legacy dm-crypt cipher specification - * cipher-mode-iv:ivopts + * cipher[:keycount]-mode-iv:ivopts */ tmp = cipher_in; - cipher = strsep(&tmp, "-"); + keycount = strsep(&tmp, "-"); + cipher = strsep(&keycount, ":"); + + if (!keycount) + cc->tfms_count = 1; + else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 || + !is_power_of_2(cc->tfms_count)) { + ti->error = "Bad cipher key count specification"; + return -EINVAL; + } + cc->key_parts = cc->tfms_count; cc->cipher = kstrdup(cipher, GFP_KERNEL); if (!cc->cipher) goto bad_mem; - if (tmp) { - cc->cipher_mode = kstrdup(tmp, GFP_KERNEL); - if (!cc->cipher_mode) - goto bad_mem; - } - chainmode = strsep(&tmp, "-"); ivopts = strsep(&tmp, "-"); ivmode = strsep(&ivopts, ":"); @@ -1074,10 +1451,19 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - /* Compatibility mode for old dm-crypt mappings */ + cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + + cc->tfms_count * sizeof(*(cc->cpu->tfms)), + __alignof__(struct crypt_cpu)); + if (!cc->cpu) { + ti->error = "Cannot allocate per cpu state"; + goto bad_mem; + } + + /* + * For compatibility with the original dm-crypt mapping format, if + * only the cipher name is supplied, use cbc-plain. + */ if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { - kfree(cc->cipher_mode); - cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL); chainmode = "cbc"; ivmode = "plain"; } @@ -1099,11 +1485,12 @@ static int crypt_ctr_cipher(struct dm_target *ti, } /* Allocate cipher */ - cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); - if (IS_ERR(cc->tfm)) { - ret = PTR_ERR(cc->tfm); - ti->error = "Error allocating crypto tfm"; - goto bad; + for_each_possible_cpu(cpu) { + ret = crypt_alloc_tfms(cc, cpu, cipher_api); + if (ret < 0) { + ti->error = "Error allocating crypto tfm"; + goto bad; + } } /* Initialize and set key */ @@ -1114,7 +1501,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, } /* Initialize IV */ - cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); + cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ cc->iv_size = max(cc->iv_size, @@ -1137,7 +1524,15 @@ static int crypt_ctr_cipher(struct dm_target *ti, cc->iv_gen_ops = &crypt_iv_benbi_ops; else if (strcmp(ivmode, "null") == 0) cc->iv_gen_ops = &crypt_iv_null_ops; - else { + else if (strcmp(ivmode, "lmk") == 0) { + cc->iv_gen_ops = &crypt_iv_lmk_ops; + /* Version 2 and 3 is recognised according + * to length of provided multi-key string. + * If present (version 3), last key is used as IV seed. + */ + if (cc->key_size % cc->key_parts) + cc->key_parts++; + } else { ret = -EINVAL; ti->error = "Invalid IV mode"; goto bad; @@ -1194,6 +1589,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->error = "Cannot allocate encryption context"; return -ENOMEM; } + cc->key_size = key_size; ti->private = cc; ret = crypt_ctr_cipher(ti, argv[0], argv[1]); @@ -1208,9 +1604,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->dmreq_start = sizeof(struct ablkcipher_request); - cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); + cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); - cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & + cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & ~(crypto_tfm_ctx_alignment() - 1); cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + @@ -1219,7 +1615,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->error = "Cannot allocate crypt request mempool"; goto bad; } - cc->req = NULL; cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { @@ -1252,13 +1647,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->start = tmpll; ret = -ENOMEM; - cc->io_queue = create_singlethread_workqueue("kcryptd_io"); + cc->io_queue = alloc_workqueue("kcryptd_io", + WQ_NON_REENTRANT| + WQ_MEM_RECLAIM, + 1); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } - cc->crypt_queue = create_singlethread_workqueue("kcryptd"); + cc->crypt_queue = alloc_workqueue("kcryptd", + WQ_NON_REENTRANT| + WQ_CPU_INTENSIVE| + WQ_MEM_RECLAIM, + 1); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad; @@ -1286,9 +1688,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); - if (bio_data_dir(io->base_bio) == READ) - kcryptd_queue_io(io); - else + if (bio_data_dir(io->base_bio) == READ) { + if (kcryptd_io_read(io, GFP_NOWAIT)) + kcryptd_queue_io(io); + } else kcryptd_queue_crypt(io); return DM_MAPIO_SUBMITTED; @@ -1306,10 +1709,7 @@ static int crypt_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - if (cc->cipher_mode) - DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode); - else - DMEMIT("%s ", cc->cipher); + DMEMIT("%s ", cc->cipher_string); if (cc->key_size > 0) { if ((maxlen - sz) < ((cc->key_size << 1) + 1)) @@ -1421,7 +1821,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 7, 0}, + .version = {1, 10, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index baa11912cc94..f18375dcedd9 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -352,7 +352,7 @@ static int __init dm_delay_init(void) { int r = -ENOMEM; - kdelayd_wq = create_workqueue("kdelayd"); + kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); if (!kdelayd_wq) { DMERR("Couldn't start kdelayd"); goto bad_queue; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 4b54618b4159..6d12775a1061 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -295,19 +295,55 @@ retry: DMWARN("remove_all left %d open device(s)", dev_skipped); } +/* + * Set the uuid of a hash_cell that isn't already set. + */ +static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) +{ + mutex_lock(&dm_hash_cells_mutex); + hc->uuid = new_uuid; + mutex_unlock(&dm_hash_cells_mutex); + + list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); +} + +/* + * Changes the name of a hash_cell and returns the old name for + * the caller to free. + */ +static char *__change_cell_name(struct hash_cell *hc, char *new_name) +{ + char *old_name; + + /* + * Rename and move the name cell. + */ + list_del(&hc->name_list); + old_name = hc->name; + + mutex_lock(&dm_hash_cells_mutex); + hc->name = new_name; + mutex_unlock(&dm_hash_cells_mutex); + + list_add(&hc->name_list, _name_buckets + hash_str(new_name)); + + return old_name; +} + static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, const char *new) { - char *new_name, *old_name; + char *new_data, *old_name = NULL; struct hash_cell *hc; struct dm_table *table; struct mapped_device *md; + unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; /* * duplicate new. */ - new_name = kstrdup(new, GFP_KERNEL); - if (!new_name) + new_data = kstrdup(new, GFP_KERNEL); + if (!new_data) return ERR_PTR(-ENOMEM); down_write(&_hash_lock); @@ -315,13 +351,19 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, /* * Is new free ? */ - hc = __get_name_cell(new); + if (change_uuid) + hc = __get_uuid_cell(new); + else + hc = __get_name_cell(new); + if (hc) { - DMWARN("asked to rename to an already-existing name %s -> %s", + DMWARN("Unable to change %s on mapped device %s to one that " + "already exists: %s", + change_uuid ? "uuid" : "name", param->name, new); dm_put(hc->md); up_write(&_hash_lock); - kfree(new_name); + kfree(new_data); return ERR_PTR(-EBUSY); } @@ -330,22 +372,30 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, */ hc = __get_name_cell(param->name); if (!hc) { - DMWARN("asked to rename a non-existent device %s -> %s", - param->name, new); + DMWARN("Unable to rename non-existent device, %s to %s%s", + param->name, change_uuid ? "uuid " : "", new); up_write(&_hash_lock); - kfree(new_name); + kfree(new_data); return ERR_PTR(-ENXIO); } /* - * rename and move the name cell. + * Does this device already have a uuid? */ - list_del(&hc->name_list); - old_name = hc->name; - mutex_lock(&dm_hash_cells_mutex); - hc->name = new_name; - mutex_unlock(&dm_hash_cells_mutex); - list_add(&hc->name_list, _name_buckets + hash_str(new_name)); + if (change_uuid && hc->uuid) { + DMWARN("Unable to change uuid of mapped device %s to %s " + "because uuid is already set to %s", + param->name, new, hc->uuid); + dm_put(hc->md); + up_write(&_hash_lock); + kfree(new_data); + return ERR_PTR(-EINVAL); + } + + if (change_uuid) + __set_cell_uuid(hc, new_data); + else + old_name = __change_cell_name(hc, new_data); /* * Wake up any dm event waiters. @@ -729,7 +779,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) hc = __find_device_hash_cell(param); if (!hc) { - DMWARN("device doesn't appear to be in the dev hash table."); + DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } @@ -741,7 +791,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) */ r = dm_lock_for_deletion(md); if (r) { - DMWARN("unable to remove open device %s", hc->name); + DMDEBUG_LIMIT("unable to remove open device %s", hc->name); up_write(&_hash_lock); dm_put(md); return r; @@ -774,21 +824,24 @@ static int invalid_str(char *str, void *end) static int dev_rename(struct dm_ioctl *param, size_t param_size) { int r; - char *new_name = (char *) param + param->data_start; + char *new_data = (char *) param + param->data_start; struct mapped_device *md; + unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; - if (new_name < param->data || - invalid_str(new_name, (void *) param + param_size) || - strlen(new_name) > DM_NAME_LEN - 1) { - DMWARN("Invalid new logical volume name supplied."); + if (new_data < param->data || + invalid_str(new_data, (void *) param + param_size) || + strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { + DMWARN("Invalid new mapped device name or uuid string supplied."); return -EINVAL; } - r = check_name(new_name); - if (r) - return r; + if (!change_uuid) { + r = check_name(new_data); + if (r) + return r; + } - md = dm_hash_rename(param, new_name); + md = dm_hash_rename(param, new_data); if (IS_ERR(md)) return PTR_ERR(md); @@ -885,7 +938,7 @@ static int do_resume(struct dm_ioctl *param) hc = __find_device_hash_cell(param); if (!hc) { - DMWARN("device doesn't appear to be in the dev hash table."); + DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } @@ -1212,7 +1265,7 @@ static int table_clear(struct dm_ioctl *param, size_t param_size) hc = __find_device_hash_cell(param); if (!hc) { - DMWARN("device doesn't appear to be in the dev hash table."); + DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index d8587bac5682..924f5f0084c2 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -37,6 +37,13 @@ struct dm_kcopyd_client { unsigned int nr_pages; unsigned int nr_free_pages; + /* + * Block devices to unplug. + * Non-NULL pointer means that a block device has some pending requests + * and needs to be unplugged. + */ + struct block_device *unplug[2]; + struct dm_io_client *io_client; wait_queue_head_t destroyq; @@ -308,6 +315,31 @@ static int run_complete_job(struct kcopyd_job *job) return 0; } +/* + * Unplug the block device at the specified index. + */ +static void unplug(struct dm_kcopyd_client *kc, int rw) +{ + if (kc->unplug[rw] != NULL) { + blk_unplug(bdev_get_queue(kc->unplug[rw])); + kc->unplug[rw] = NULL; + } +} + +/* + * Prepare block device unplug. If there's another device + * to be unplugged at the same array index, we unplug that + * device first. + */ +static void prepare_unplug(struct dm_kcopyd_client *kc, int rw, + struct block_device *bdev) +{ + if (likely(kc->unplug[rw] == bdev)) + return; + unplug(kc, rw); + kc->unplug[rw] = bdev; +} + static void complete_io(unsigned long error, void *context) { struct kcopyd_job *job = (struct kcopyd_job *) context; @@ -345,7 +377,7 @@ static int run_io_job(struct kcopyd_job *job) { int r; struct dm_io_request io_req = { - .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG, + .bi_rw = job->rw, .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, .mem.offset = job->offset, @@ -354,10 +386,16 @@ static int run_io_job(struct kcopyd_job *job) .client = job->kc->io_client, }; - if (job->rw == READ) + if (job->rw == READ) { r = dm_io(&io_req, 1, &job->source, NULL); - else + prepare_unplug(job->kc, READ, job->source.bdev); + } else { + if (job->num_dests > 1) + io_req.bi_rw |= REQ_UNPLUG; r = dm_io(&io_req, job->num_dests, job->dests, NULL); + if (!(io_req.bi_rw & REQ_UNPLUG)) + prepare_unplug(job->kc, WRITE, job->dests[0].bdev); + } return r; } @@ -435,10 +473,18 @@ static void do_work(struct work_struct *work) * Pages jobs when successful will jump onto the io jobs * list. io jobs call wake when they complete and it all * starts again. + * + * Note that io_jobs add block devices to the unplug array, + * this array is cleared with "unplug" calls. It is thus + * forbidden to run complete_jobs after io_jobs and before + * unplug because the block device could be destroyed in + * job completion callback. */ process_jobs(&kc->complete_jobs, kc, run_complete_job); process_jobs(&kc->pages_jobs, kc, run_pages_job); process_jobs(&kc->io_jobs, kc, run_io_job); + unplug(kc, READ); + unplug(kc, WRITE); } /* @@ -619,12 +665,15 @@ int dm_kcopyd_client_create(unsigned int nr_pages, INIT_LIST_HEAD(&kc->io_jobs); INIT_LIST_HEAD(&kc->pages_jobs); + memset(kc->unplug, 0, sizeof(kc->unplug)); + kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); if (!kc->job_pool) goto bad_slab; INIT_WORK(&kc->kcopyd_work, do_work); - kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); + kc->kcopyd_wq = alloc_workqueue("kcopyd", + WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); if (!kc->kcopyd_wq) goto bad_workqueue; diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index 1ed0094f064b..aa2e0c374ab3 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c @@ -12,12 +12,22 @@ #include "dm-log-userspace-transfer.h" +#define DM_LOG_USERSPACE_VSN "1.1.0" + struct flush_entry { int type; region_t region; struct list_head list; }; +/* + * This limit on the number of mark and clear request is, to a degree, + * arbitrary. However, there is some basis for the choice in the limits + * imposed on the size of data payload by dm-log-userspace-transfer.c: + * dm_consult_userspace(). + */ +#define MAX_FLUSH_GROUP_COUNT 32 + struct log_c { struct dm_target *ti; uint32_t region_size; @@ -37,8 +47,15 @@ struct log_c { */ uint64_t in_sync_hint; + /* + * Mark and clear requests are held until a flush is issued + * so that we can group, and thereby limit, the amount of + * network traffic between kernel and userspace. The 'flush_lock' + * is used to protect these lists. + */ spinlock_t flush_lock; - struct list_head flush_list; /* only for clear and mark requests */ + struct list_head mark_list; + struct list_head clear_list; }; static mempool_t *flush_entry_pool; @@ -169,7 +186,8 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, strncpy(lc->uuid, argv[0], DM_UUID_LEN); spin_lock_init(&lc->flush_lock); - INIT_LIST_HEAD(&lc->flush_list); + INIT_LIST_HEAD(&lc->mark_list); + INIT_LIST_HEAD(&lc->clear_list); str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); if (str_size < 0) { @@ -181,8 +199,11 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, ctr_str, str_size, NULL, NULL); - if (r == -ESRCH) { - DMERR("Userspace log server not found"); + if (r < 0) { + if (r == -ESRCH) + DMERR("Userspace log server not found"); + else + DMERR("Userspace log server failed to create log"); goto out; } @@ -214,10 +235,9 @@ out: static void userspace_dtr(struct dm_dirty_log *log) { - int r; struct log_c *lc = log->context; - r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, + (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, NULL, 0, NULL, NULL); @@ -338,6 +358,71 @@ static int userspace_in_sync(struct dm_dirty_log *log, region_t region, return (r) ? 0 : (int)in_sync; } +static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list) +{ + int r = 0; + struct flush_entry *fe; + + list_for_each_entry(fe, flush_list, list) { + r = userspace_do_request(lc, lc->uuid, fe->type, + (char *)&fe->region, + sizeof(fe->region), + NULL, NULL); + if (r) + break; + } + + return r; +} + +static int flush_by_group(struct log_c *lc, struct list_head *flush_list) +{ + int r = 0; + int count; + uint32_t type = 0; + struct flush_entry *fe, *tmp_fe; + LIST_HEAD(tmp_list); + uint64_t group[MAX_FLUSH_GROUP_COUNT]; + + /* + * Group process the requests + */ + while (!list_empty(flush_list)) { + count = 0; + + list_for_each_entry_safe(fe, tmp_fe, flush_list, list) { + group[count] = fe->region; + count++; + + list_del(&fe->list); + list_add(&fe->list, &tmp_list); + + type = fe->type; + if (count >= MAX_FLUSH_GROUP_COUNT) + break; + } + + r = userspace_do_request(lc, lc->uuid, type, + (char *)(group), + count * sizeof(uint64_t), + NULL, NULL); + if (r) { + /* Group send failed. Attempt one-by-one. */ + list_splice_init(&tmp_list, flush_list); + r = flush_one_by_one(lc, flush_list); + break; + } + } + + /* + * Must collect flush_entrys that were successfully processed + * as a group so that they will be free'd by the caller. + */ + list_splice_init(&tmp_list, flush_list); + + return r; +} + /* * userspace_flush * @@ -360,31 +445,25 @@ static int userspace_flush(struct dm_dirty_log *log) int r = 0; unsigned long flags; struct log_c *lc = log->context; - LIST_HEAD(flush_list); + LIST_HEAD(mark_list); + LIST_HEAD(clear_list); struct flush_entry *fe, *tmp_fe; spin_lock_irqsave(&lc->flush_lock, flags); - list_splice_init(&lc->flush_list, &flush_list); + list_splice_init(&lc->mark_list, &mark_list); + list_splice_init(&lc->clear_list, &clear_list); spin_unlock_irqrestore(&lc->flush_lock, flags); - if (list_empty(&flush_list)) + if (list_empty(&mark_list) && list_empty(&clear_list)) return 0; - /* - * FIXME: Count up requests, group request types, - * allocate memory to stick all requests in and - * send to server in one go. Failing the allocation, - * do it one by one. - */ + r = flush_by_group(lc, &mark_list); + if (r) + goto fail; - list_for_each_entry(fe, &flush_list, list) { - r = userspace_do_request(lc, lc->uuid, fe->type, - (char *)&fe->region, - sizeof(fe->region), - NULL, NULL); - if (r) - goto fail; - } + r = flush_by_group(lc, &clear_list); + if (r) + goto fail; r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL); @@ -395,7 +474,11 @@ fail: * Calling code will receive an error and will know that * the log facility has failed. */ - list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) { + list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) { + list_del(&fe->list); + mempool_free(fe, flush_entry_pool); + } + list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) { list_del(&fe->list); mempool_free(fe, flush_entry_pool); } @@ -425,7 +508,7 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region) spin_lock_irqsave(&lc->flush_lock, flags); fe->type = DM_ULOG_MARK_REGION; fe->region = region; - list_add(&fe->list, &lc->flush_list); + list_add(&fe->list, &lc->mark_list); spin_unlock_irqrestore(&lc->flush_lock, flags); return; @@ -462,7 +545,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region) spin_lock_irqsave(&lc->flush_lock, flags); fe->type = DM_ULOG_CLEAR_REGION; fe->region = region; - list_add(&fe->list, &lc->flush_list); + list_add(&fe->list, &lc->clear_list); spin_unlock_irqrestore(&lc->flush_lock, flags); return; @@ -684,7 +767,7 @@ static int __init userspace_dirty_log_init(void) return r; } - DMINFO("version 1.0.0 loaded"); + DMINFO("version " DM_LOG_USERSPACE_VSN " loaded"); return 0; } @@ -694,7 +777,7 @@ static void __exit userspace_dirty_log_exit(void) dm_ulog_tfr_exit(); mempool_destroy(flush_entry_pool); - DMINFO("version 1.0.0 unloaded"); + DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded"); return; } diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 075cbcf8a9f5..049eaf12aaab 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -198,6 +198,7 @@ resend: memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg)); memcpy(tfr->uuid, uuid, DM_UUID_LEN); + tfr->version = DM_ULOG_REQUEST_VERSION; tfr->luid = luid; tfr->seq = dm_ulog_seq++; diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 33420e68d153..6951536ea29c 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -455,7 +455,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, r = PTR_ERR(lc->io_req.client); DMWARN("couldn't allocate disk io client"); kfree(lc); - return -ENOMEM; + return r; } lc->disk_header = vmalloc(buf_size); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 487ecda90ad4..b82d28819e2a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -23,6 +23,8 @@ #define DM_MSG_PREFIX "multipath" #define MESG_STR(x) x, sizeof(x) +#define DM_PG_INIT_DELAY_MSECS 2000 +#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) /* Path properties */ struct pgpath { @@ -33,8 +35,7 @@ struct pgpath { unsigned fail_count; /* Cumulative failure count */ struct dm_path path; - struct work_struct deactivate_path; - struct work_struct activate_path; + struct delayed_work activate_path; }; #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) @@ -65,11 +66,15 @@ struct multipath { const char *hw_handler_name; char *hw_handler_params; + unsigned nr_priority_groups; struct list_head priority_groups; + + wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ + unsigned pg_init_required; /* pg_init needs calling? */ unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ - wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ + unsigned pg_init_delay_retry; /* Delay pg_init retry? */ unsigned nr_valid_paths; /* Total number of usable paths */ struct pgpath *current_pgpath; @@ -82,6 +87,7 @@ struct multipath { unsigned saved_queue_if_no_path;/* Saved state during suspension */ unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_count; /* Number of times pg_init called */ + unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ struct work_struct process_queued_ios; struct list_head queued_ios; @@ -116,7 +122,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); -static void deactivate_path(struct work_struct *work); /*----------------------------------------------- @@ -129,8 +134,7 @@ static struct pgpath *alloc_pgpath(void) if (pgpath) { pgpath->is_active = 1; - INIT_WORK(&pgpath->deactivate_path, deactivate_path); - INIT_WORK(&pgpath->activate_path, activate_path); + INIT_DELAYED_WORK(&pgpath->activate_path, activate_path); } return pgpath; @@ -141,14 +145,6 @@ static void free_pgpath(struct pgpath *pgpath) kfree(pgpath); } -static void deactivate_path(struct work_struct *work) -{ - struct pgpath *pgpath = - container_of(work, struct pgpath, deactivate_path); - - blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); -} - static struct priority_group *alloc_priority_group(void) { struct priority_group *pg; @@ -199,6 +195,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) INIT_LIST_HEAD(&m->queued_ios); spin_lock_init(&m->lock); m->queue_io = 1; + m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); @@ -238,14 +235,19 @@ static void free_multipath(struct multipath *m) static void __pg_init_all_paths(struct multipath *m) { struct pgpath *pgpath; + unsigned long pg_init_delay = 0; m->pg_init_count++; m->pg_init_required = 0; + if (m->pg_init_delay_retry) + pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? + m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { /* Skip failed paths */ if (!pgpath->is_active) continue; - if (queue_work(kmpath_handlerd, &pgpath->activate_path)) + if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, + pg_init_delay)) m->pg_init_in_progress++; } } @@ -793,8 +795,9 @@ static int parse_features(struct arg_set *as, struct multipath *m) const char *param_name; static struct param _params[] = { - {0, 3, "invalid number of feature args"}, + {0, 5, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, + {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, }; r = read_param(_params, shift(as), &argc, &ti->error); @@ -821,6 +824,14 @@ static int parse_features(struct arg_set *as, struct multipath *m) continue; } + if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) && + (argc >= 1)) { + r = read_param(_params + 2, shift(as), + &m->pg_init_delay_msecs, &ti->error); + argc--; + continue; + } + ti->error = "Unrecognised multipath feature request"; r = -EINVAL; } while (argc && !r); @@ -931,7 +942,7 @@ static void flush_multipath_work(struct multipath *m) flush_workqueue(kmpath_handlerd); multipath_wait_for_pg_init_completion(m); flush_workqueue(kmultipathd); - flush_scheduled_work(); + flush_work_sync(&m->trigger_event); } static void multipath_dtr(struct dm_target *ti) @@ -995,7 +1006,6 @@ static int fail_path(struct pgpath *pgpath) pgpath->path.dev->name, m->nr_valid_paths); schedule_work(&m->trigger_event); - queue_work(kmultipathd, &pgpath->deactivate_path); out: spin_unlock_irqrestore(&m->lock, flags); @@ -1034,7 +1044,7 @@ static int reinstate_path(struct pgpath *pgpath) m->current_pgpath = NULL; queue_work(kmultipathd, &m->process_queued_ios); } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { - if (queue_work(kmpath_handlerd, &pgpath->activate_path)) + if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; } @@ -1169,6 +1179,7 @@ static void pg_init_done(void *data, int errors) struct priority_group *pg = pgpath->pg; struct multipath *m = pg->m; unsigned long flags; + unsigned delay_retry = 0; /* device or driver problems */ switch (errors) { @@ -1193,8 +1204,9 @@ static void pg_init_done(void *data, int errors) */ bypass_pg(m, pg, 1); break; - /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */ case SCSI_DH_RETRY: + /* Wait before retrying. */ + delay_retry = 1; case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) @@ -1227,6 +1239,7 @@ static void pg_init_done(void *data, int errors) if (!m->pg_init_required) m->queue_io = 0; + m->pg_init_delay_retry = delay_retry; queue_work(kmultipathd, &m->process_queued_ios); /* @@ -1241,7 +1254,7 @@ out: static void activate_path(struct work_struct *work) { struct pgpath *pgpath = - container_of(work, struct pgpath, activate_path); + container_of(work, struct pgpath, activate_path.work); scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), pg_init_done, pgpath); @@ -1382,11 +1395,14 @@ static int multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); else { DMEMIT("%u ", m->queue_if_no_path + - (m->pg_init_retries > 0) * 2); + (m->pg_init_retries > 0) * 2 + + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2); if (m->queue_if_no_path) DMEMIT("queue_if_no_path "); if (m->pg_init_retries) DMEMIT("pg_init_retries %u ", m->pg_init_retries); + if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) + DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); } if (!m->hw_handler_name || type == STATUSTYPE_INFO) @@ -1655,7 +1671,7 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 1, 1}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, @@ -1687,7 +1703,7 @@ static int __init dm_multipath_init(void) return -EINVAL; } - kmultipathd = create_workqueue("kmpathd"); + kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); if (!kmultipathd) { DMERR("failed to create workqueue kmpathd"); dm_unregister_target(&multipath_target); @@ -1701,7 +1717,8 @@ static int __init dm_multipath_init(void) * old workqueue would also create a bottleneck in the * path of the storage hardware device activation. */ - kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); + kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd", + WQ_MEM_RECLAIM); if (!kmpath_handlerd) { DMERR("failed to create workqueue kmpath_handlerd"); destroy_workqueue(kmultipathd); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c new file mode 100644 index 000000000000..b9e1e15ef11c --- /dev/null +++ b/drivers/md/dm-raid.c @@ -0,0 +1,697 @@ +/* + * Copyright (C) 2010-2011 Neil Brown + * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include + +#include "md.h" +#include "raid5.h" +#include "dm.h" +#include "bitmap.h" + +#define DM_MSG_PREFIX "raid" + +/* + * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then + * make it so the flag doesn't set anything. + */ +#ifndef MD_SYNC_STATE_FORCED +#define MD_SYNC_STATE_FORCED 0 +#endif + +struct raid_dev { + /* + * Two DM devices, one to hold metadata and one to hold the + * actual data/parity. The reason for this is to not confuse + * ti->len and give more flexibility in altering size and + * characteristics. + * + * While it is possible for this device to be associated + * with a different physical device than the data_dev, it + * is intended for it to be the same. + * |--------- Physical Device ---------| + * |- meta_dev -|------ data_dev ------| + */ + struct dm_dev *meta_dev; + struct dm_dev *data_dev; + struct mdk_rdev_s rdev; +}; + +/* + * Flags for rs->print_flags field. + */ +#define DMPF_DAEMON_SLEEP 0x1 +#define DMPF_MAX_WRITE_BEHIND 0x2 +#define DMPF_SYNC 0x4 +#define DMPF_NOSYNC 0x8 +#define DMPF_STRIPE_CACHE 0x10 +#define DMPF_MIN_RECOVERY_RATE 0x20 +#define DMPF_MAX_RECOVERY_RATE 0x40 + +struct raid_set { + struct dm_target *ti; + + uint64_t print_flags; + + struct mddev_s md; + struct raid_type *raid_type; + struct dm_target_callbacks callbacks; + + struct raid_dev dev[0]; +}; + +/* Supported raid types and properties. */ +static struct raid_type { + const char *name; /* RAID algorithm. */ + const char *descr; /* Descriptor text for logging. */ + const unsigned parity_devs; /* # of parity devices. */ + const unsigned minimal_devs; /* minimal # of devices in set. */ + const unsigned level; /* RAID level. */ + const unsigned algorithm; /* RAID algorithm. */ +} raid_types[] = { + {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, + {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, + {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, + {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, + {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, + {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, + {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, + {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} +}; + +static struct raid_type *get_raid_type(char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(raid_types); i++) + if (!strcmp(raid_types[i].name, name)) + return &raid_types[i]; + + return NULL; +} + +static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs) +{ + unsigned i; + struct raid_set *rs; + sector_t sectors_per_dev; + + if (raid_devs <= raid_type->parity_devs) { + ti->error = "Insufficient number of devices"; + return ERR_PTR(-EINVAL); + } + + sectors_per_dev = ti->len; + if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { + ti->error = "Target length not divisible by number of data devices"; + return ERR_PTR(-EINVAL); + } + + rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); + if (!rs) { + ti->error = "Cannot allocate raid context"; + return ERR_PTR(-ENOMEM); + } + + mddev_init(&rs->md); + + rs->ti = ti; + rs->raid_type = raid_type; + rs->md.raid_disks = raid_devs; + rs->md.level = raid_type->level; + rs->md.new_level = rs->md.level; + rs->md.dev_sectors = sectors_per_dev; + rs->md.layout = raid_type->algorithm; + rs->md.new_layout = rs->md.layout; + rs->md.delta_disks = 0; + rs->md.recovery_cp = 0; + + for (i = 0; i < raid_devs; i++) + md_rdev_init(&rs->dev[i].rdev); + + /* + * Remaining items to be initialized by further RAID params: + * rs->md.persistent + * rs->md.external + * rs->md.chunk_sectors + * rs->md.new_chunk_sectors + */ + + return rs; +} + +static void context_free(struct raid_set *rs) +{ + int i; + + for (i = 0; i < rs->md.raid_disks; i++) + if (rs->dev[i].data_dev) + dm_put_device(rs->ti, rs->dev[i].data_dev); + + kfree(rs); +} + +/* + * For every device we have two words + * : meta device name or '-' if missing + * : data device name or '-' if missing + * + * This code parses those words. + */ +static int dev_parms(struct raid_set *rs, char **argv) +{ + int i; + int rebuild = 0; + int metadata_available = 0; + int ret = 0; + + for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { + rs->dev[i].rdev.raid_disk = i; + + rs->dev[i].meta_dev = NULL; + rs->dev[i].data_dev = NULL; + + /* + * There are no offsets, since there is a separate device + * for data and metadata. + */ + rs->dev[i].rdev.data_offset = 0; + rs->dev[i].rdev.mddev = &rs->md; + + if (strcmp(argv[0], "-")) { + rs->ti->error = "Metadata devices not supported"; + return -EINVAL; + } + + if (!strcmp(argv[1], "-")) { + if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && + (!rs->dev[i].rdev.recovery_offset)) { + rs->ti->error = "Drive designated for rebuild not specified"; + return -EINVAL; + } + + continue; + } + + ret = dm_get_device(rs->ti, argv[1], + dm_table_get_mode(rs->ti->table), + &rs->dev[i].data_dev); + if (ret) { + rs->ti->error = "RAID device lookup failure"; + return ret; + } + + rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; + list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); + if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) + rebuild++; + } + + if (metadata_available) { + rs->md.external = 0; + rs->md.persistent = 1; + rs->md.major_version = 2; + } else if (rebuild && !rs->md.recovery_cp) { + /* + * Without metadata, we will not be able to tell if the array + * is in-sync or not - we must assume it is not. Therefore, + * it is impossible to rebuild a drive. + * + * Even if there is metadata, the on-disk information may + * indicate that the array is not in-sync and it will then + * fail at that time. + * + * User could specify 'nosync' option if desperate. + */ + DMERR("Unable to rebuild drive while array is not in-sync"); + rs->ti->error = "RAID device lookup failure"; + return -EINVAL; + } + + return 0; +} + +/* + * Possible arguments are... + * RAID456: + * [optional_args] + * + * Optional args: + * [[no]sync] Force or prevent recovery of the entire array + * [rebuild ] Rebuild the drive indicated by the index + * [daemon_sleep ] Time between bitmap daemon work to clear bits + * [min_recovery_rate ] Throttle RAID initialization + * [max_recovery_rate ] Throttle RAID initialization + * [max_write_behind ] See '-write-behind=' (man mdadm) + * [stripe_cache ] Stripe cache size for higher RAIDs + */ +static int parse_raid_params(struct raid_set *rs, char **argv, + unsigned num_raid_params) +{ + unsigned i, rebuild_cnt = 0; + unsigned long value; + char *key; + + /* + * First, parse the in-order required arguments + */ + if ((strict_strtoul(argv[0], 10, &value) < 0) || + !is_power_of_2(value) || (value < 8)) { + rs->ti->error = "Bad chunk size"; + return -EINVAL; + } + + rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; + argv++; + num_raid_params--; + + /* + * Second, parse the unordered optional arguments + */ + for (i = 0; i < rs->md.raid_disks; i++) + set_bit(In_sync, &rs->dev[i].rdev.flags); + + for (i = 0; i < num_raid_params; i++) { + if (!strcmp(argv[i], "nosync")) { + rs->md.recovery_cp = MaxSector; + rs->print_flags |= DMPF_NOSYNC; + rs->md.flags |= MD_SYNC_STATE_FORCED; + continue; + } + if (!strcmp(argv[i], "sync")) { + rs->md.recovery_cp = 0; + rs->print_flags |= DMPF_SYNC; + rs->md.flags |= MD_SYNC_STATE_FORCED; + continue; + } + + /* The rest of the optional arguments come in key/value pairs */ + if ((i + 1) >= num_raid_params) { + rs->ti->error = "Wrong number of raid parameters given"; + return -EINVAL; + } + + key = argv[i++]; + if (strict_strtoul(argv[i], 10, &value) < 0) { + rs->ti->error = "Bad numerical argument given in raid params"; + return -EINVAL; + } + + if (!strcmp(key, "rebuild")) { + if (++rebuild_cnt > rs->raid_type->parity_devs) { + rs->ti->error = "Too many rebuild drives given"; + return -EINVAL; + } + if (value > rs->md.raid_disks) { + rs->ti->error = "Invalid rebuild index given"; + return -EINVAL; + } + clear_bit(In_sync, &rs->dev[value].rdev.flags); + rs->dev[value].rdev.recovery_offset = 0; + } else if (!strcmp(key, "max_write_behind")) { + rs->print_flags |= DMPF_MAX_WRITE_BEHIND; + + /* + * In device-mapper, we specify things in sectors, but + * MD records this value in kB + */ + value /= 2; + if (value > COUNTER_MAX) { + rs->ti->error = "Max write-behind limit out of range"; + return -EINVAL; + } + rs->md.bitmap_info.max_write_behind = value; + } else if (!strcmp(key, "daemon_sleep")) { + rs->print_flags |= DMPF_DAEMON_SLEEP; + if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { + rs->ti->error = "daemon sleep period out of range"; + return -EINVAL; + } + rs->md.bitmap_info.daemon_sleep = value; + } else if (!strcmp(key, "stripe_cache")) { + rs->print_flags |= DMPF_STRIPE_CACHE; + + /* + * In device-mapper, we specify things in sectors, but + * MD records this value in kB + */ + value /= 2; + + if (rs->raid_type->level < 5) { + rs->ti->error = "Inappropriate argument: stripe_cache"; + return -EINVAL; + } + if (raid5_set_cache_size(&rs->md, (int)value)) { + rs->ti->error = "Bad stripe_cache size"; + return -EINVAL; + } + } else if (!strcmp(key, "min_recovery_rate")) { + rs->print_flags |= DMPF_MIN_RECOVERY_RATE; + if (value > INT_MAX) { + rs->ti->error = "min_recovery_rate out of range"; + return -EINVAL; + } + rs->md.sync_speed_min = (int)value; + } else if (!strcmp(key, "max_recovery_rate")) { + rs->print_flags |= DMPF_MAX_RECOVERY_RATE; + if (value > INT_MAX) { + rs->ti->error = "max_recovery_rate out of range"; + return -EINVAL; + } + rs->md.sync_speed_max = (int)value; + } else { + DMERR("Unable to parse RAID parameter: %s", key); + rs->ti->error = "Unable to parse RAID parameters"; + return -EINVAL; + } + } + + /* Assume there are no metadata devices until the drives are parsed */ + rs->md.persistent = 0; + rs->md.external = 1; + + return 0; +} + +static void do_table_event(struct work_struct *ws) +{ + struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); + + dm_table_event(rs->ti->table); +} + +static int raid_is_congested(struct dm_target_callbacks *cb, int bits) +{ + struct raid_set *rs = container_of(cb, struct raid_set, callbacks); + + return md_raid5_congested(&rs->md, bits); +} + +static void raid_unplug(struct dm_target_callbacks *cb) +{ + struct raid_set *rs = container_of(cb, struct raid_set, callbacks); + + md_raid5_unplug_device(rs->md.private); +} + +/* + * Construct a RAID4/5/6 mapping: + * Args: + * <#raid_params> \ + * <#raid_devs> { .. } + * + * ** metadata devices are not supported yet, use '-' instead ** + * + * varies by . See 'parse_raid_params' for + * details on possible . + */ +static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + int ret; + struct raid_type *rt; + unsigned long num_raid_params, num_raid_devs; + struct raid_set *rs = NULL; + + /* Must have at least <#raid_params> */ + if (argc < 2) { + ti->error = "Too few arguments"; + return -EINVAL; + } + + /* raid type */ + rt = get_raid_type(argv[0]); + if (!rt) { + ti->error = "Unrecognised raid_type"; + return -EINVAL; + } + argc--; + argv++; + + /* number of RAID parameters */ + if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) { + ti->error = "Cannot understand number of RAID parameters"; + return -EINVAL; + } + argc--; + argv++; + + /* Skip over RAID params for now and find out # of devices */ + if (num_raid_params + 1 > argc) { + ti->error = "Arguments do not agree with counts given"; + return -EINVAL; + } + + if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) || + (num_raid_devs >= INT_MAX)) { + ti->error = "Cannot understand number of raid devices"; + return -EINVAL; + } + + rs = context_alloc(ti, rt, (unsigned)num_raid_devs); + if (IS_ERR(rs)) + return PTR_ERR(rs); + + ret = parse_raid_params(rs, argv, (unsigned)num_raid_params); + if (ret) + goto bad; + + ret = -EINVAL; + + argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */ + argv += num_raid_params + 1; + + if (argc != (num_raid_devs * 2)) { + ti->error = "Supplied RAID devices does not match the count given"; + goto bad; + } + + ret = dev_parms(rs, argv); + if (ret) + goto bad; + + INIT_WORK(&rs->md.event_work, do_table_event); + ti->split_io = rs->md.chunk_sectors; + ti->private = rs; + + mutex_lock(&rs->md.reconfig_mutex); + ret = md_run(&rs->md); + rs->md.in_sync = 0; /* Assume already marked dirty */ + mutex_unlock(&rs->md.reconfig_mutex); + + if (ret) { + ti->error = "Fail to run raid array"; + goto bad; + } + + rs->callbacks.congested_fn = raid_is_congested; + rs->callbacks.unplug_fn = raid_unplug; + dm_table_add_target_callbacks(ti->table, &rs->callbacks); + + return 0; + +bad: + context_free(rs); + + return ret; +} + +static void raid_dtr(struct dm_target *ti) +{ + struct raid_set *rs = ti->private; + + list_del_init(&rs->callbacks.list); + md_stop(&rs->md); + context_free(rs); +} + +static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) +{ + struct raid_set *rs = ti->private; + mddev_t *mddev = &rs->md; + + mddev->pers->make_request(mddev, bio); + + return DM_MAPIO_SUBMITTED; +} + +static int raid_status(struct dm_target *ti, status_type_t type, + char *result, unsigned maxlen) +{ + struct raid_set *rs = ti->private; + unsigned raid_param_cnt = 1; /* at least 1 for chunksize */ + unsigned sz = 0; + int i; + sector_t sync; + + switch (type) { + case STATUSTYPE_INFO: + DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); + + for (i = 0; i < rs->md.raid_disks; i++) { + if (test_bit(Faulty, &rs->dev[i].rdev.flags)) + DMEMIT("D"); + else if (test_bit(In_sync, &rs->dev[i].rdev.flags)) + DMEMIT("A"); + else + DMEMIT("a"); + } + + if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) + sync = rs->md.curr_resync_completed; + else + sync = rs->md.recovery_cp; + + if (sync > rs->md.resync_max_sectors) + sync = rs->md.resync_max_sectors; + + DMEMIT(" %llu/%llu", + (unsigned long long) sync, + (unsigned long long) rs->md.resync_max_sectors); + + break; + case STATUSTYPE_TABLE: + /* The string you would use to construct this array */ + for (i = 0; i < rs->md.raid_disks; i++) + if (rs->dev[i].data_dev && + !test_bit(In_sync, &rs->dev[i].rdev.flags)) + raid_param_cnt++; /* for rebuilds */ + + raid_param_cnt += (hweight64(rs->print_flags) * 2); + if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) + raid_param_cnt--; + + DMEMIT("%s %u %u", rs->raid_type->name, + raid_param_cnt, rs->md.chunk_sectors); + + if ((rs->print_flags & DMPF_SYNC) && + (rs->md.recovery_cp == MaxSector)) + DMEMIT(" sync"); + if (rs->print_flags & DMPF_NOSYNC) + DMEMIT(" nosync"); + + for (i = 0; i < rs->md.raid_disks; i++) + if (rs->dev[i].data_dev && + !test_bit(In_sync, &rs->dev[i].rdev.flags)) + DMEMIT(" rebuild %u", i); + + if (rs->print_flags & DMPF_DAEMON_SLEEP) + DMEMIT(" daemon_sleep %lu", + rs->md.bitmap_info.daemon_sleep); + + if (rs->print_flags & DMPF_MIN_RECOVERY_RATE) + DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); + + if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) + DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); + + if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) + DMEMIT(" max_write_behind %lu", + rs->md.bitmap_info.max_write_behind); + + if (rs->print_flags & DMPF_STRIPE_CACHE) { + raid5_conf_t *conf = rs->md.private; + + /* convert from kiB to sectors */ + DMEMIT(" stripe_cache %d", + conf ? conf->max_nr_stripes * 2 : 0); + } + + DMEMIT(" %d", rs->md.raid_disks); + for (i = 0; i < rs->md.raid_disks; i++) { + DMEMIT(" -"); /* metadata device */ + + if (rs->dev[i].data_dev) + DMEMIT(" %s", rs->dev[i].data_dev->name); + else + DMEMIT(" -"); + } + } + + return 0; +} + +static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) +{ + struct raid_set *rs = ti->private; + unsigned i; + int ret = 0; + + for (i = 0; !ret && i < rs->md.raid_disks; i++) + if (rs->dev[i].data_dev) + ret = fn(ti, + rs->dev[i].data_dev, + 0, /* No offset on data devs */ + rs->md.dev_sectors, + data); + + return ret; +} + +static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) +{ + struct raid_set *rs = ti->private; + unsigned chunk_size = rs->md.chunk_sectors << 9; + raid5_conf_t *conf = rs->md.private; + + blk_limits_io_min(limits, chunk_size); + blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); +} + +static void raid_presuspend(struct dm_target *ti) +{ + struct raid_set *rs = ti->private; + + md_stop_writes(&rs->md); +} + +static void raid_postsuspend(struct dm_target *ti) +{ + struct raid_set *rs = ti->private; + + mddev_suspend(&rs->md); +} + +static void raid_resume(struct dm_target *ti) +{ + struct raid_set *rs = ti->private; + + mddev_resume(&rs->md); +} + +static struct target_type raid_target = { + .name = "raid", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = raid_ctr, + .dtr = raid_dtr, + .map = raid_map, + .status = raid_status, + .iterate_devices = raid_iterate_devices, + .io_hints = raid_io_hints, + .presuspend = raid_presuspend, + .postsuspend = raid_postsuspend, + .resume = raid_resume, +}; + +static int __init dm_raid_init(void) +{ + return dm_register_target(&raid_target); +} + +static void __exit dm_raid_exit(void) +{ + dm_unregister_target(&raid_target); +} + +module_init(dm_raid_init); +module_exit(dm_raid_exit); + +MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); +MODULE_ALIAS("dm-raid4"); +MODULE_ALIAS("dm-raid5"); +MODULE_ALIAS("dm-raid6"); +MODULE_AUTHOR("Neil Brown "); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 19a59b041c27..dee326775c60 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -261,7 +261,7 @@ static int mirror_flush(struct dm_target *ti) struct dm_io_request io_req = { .bi_rw = WRITE_FLUSH, .mem.type = DM_IO_KMEM, - .mem.ptr.bvec = NULL, + .mem.ptr.addr = NULL, .client = ms->io_client, }; @@ -637,6 +637,12 @@ static void do_write(struct mirror_set *ms, struct bio *bio) .client = ms->io_client, }; + if (bio->bi_rw & REQ_DISCARD) { + io_req.bi_rw |= REQ_DISCARD; + io_req.mem.type = DM_IO_KMEM; + io_req.mem.ptr.addr = NULL; + } + for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) map_region(dest++, m, bio); @@ -670,7 +676,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&requeue); while ((bio = bio_list_pop(writes))) { - if (bio->bi_rw & REQ_FLUSH) { + if ((bio->bi_rw & REQ_FLUSH) || + (bio->bi_rw & REQ_DISCARD)) { bio_list_add(&sync, bio); continue; } @@ -1076,8 +1083,10 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->private = ms; ti->split_io = dm_rh_get_region_size(ms->rh); ti->num_flush_requests = 1; + ti->num_discard_requests = 1; - ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); + ms->kmirrord_wq = alloc_workqueue("kmirrord", + WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); if (!ms->kmirrord_wq) { DMERR("couldn't start kmirrord"); r = -ENOMEM; @@ -1130,7 +1139,7 @@ static void mirror_dtr(struct dm_target *ti) del_timer_sync(&ms->timer); flush_workqueue(ms->kmirrord_wq); - flush_scheduled_work(); + flush_work_sync(&ms->trigger_event); dm_kcopyd_client_destroy(ms->kcopyd_client); destroy_workqueue(ms->kmirrord_wq); free_context(ms, ti, ms->nr_mirrors); @@ -1406,7 +1415,7 @@ static int mirror_iterate_devices(struct dm_target *ti, static struct target_type mirror_target = { .name = "mirror", - .version = {1, 12, 0}, + .version = {1, 12, 1}, .module = THIS_MODULE, .ctr = mirror_ctr, .dtr = mirror_dtr, diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 2129cdb115dc..95891dfcbca0 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); - flush_workqueue(ps->metadata_wq); + flush_work(&req.work); return req.result; } @@ -818,7 +818,7 @@ static int persistent_ctr(struct dm_exception_store *store, atomic_set(&ps->pending_count, 0); ps->callbacks = NULL; - ps->metadata_wq = create_singlethread_workqueue("ksnaphd"); + ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); if (!ps->metadata_wq) { kfree(ps); DMERR("couldn't start header metadata update thread"); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 53cf79d8bcbc..fdde53cd12b7 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -19,7 +19,6 @@ #include #include #include -#include #include "dm-exception-store.h" @@ -80,9 +79,6 @@ struct dm_snapshot { /* Origin writes don't trigger exceptions until this is set */ int active; - /* Whether or not owning mapped_device is suspended */ - int suspended; - atomic_t pending_exceptions_count; mempool_t *pending_pool; @@ -106,10 +102,6 @@ struct dm_snapshot { struct dm_kcopyd_client *kcopyd_client; - /* Queue of snapshot writes for ksnapd to flush */ - struct bio_list queued_bios; - struct work_struct queued_bios_work; - /* Wait for events based on state_bits */ unsigned long state_bits; @@ -160,9 +152,6 @@ struct dm_dev *dm_snap_cow(struct dm_snapshot *s) } EXPORT_SYMBOL(dm_snap_cow); -static struct workqueue_struct *ksnapd; -static void flush_queued_bios(struct work_struct *work); - static sector_t chunk_to_sector(struct dm_exception_store *store, chunk_t chunk) { @@ -1110,7 +1099,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->ti = ti; s->valid = 1; s->active = 0; - s->suspended = 0; atomic_set(&s->pending_exceptions_count, 0); init_rwsem(&s->lock); INIT_LIST_HEAD(&s->list); @@ -1153,9 +1141,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) spin_lock_init(&s->tracked_chunk_lock); - bio_list_init(&s->queued_bios); - INIT_WORK(&s->queued_bios_work, flush_queued_bios); - ti->private = s; ti->num_flush_requests = num_flush_requests; @@ -1279,8 +1264,6 @@ static void snapshot_dtr(struct dm_target *ti) struct dm_snapshot *s = ti->private; struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; - flush_workqueue(ksnapd); - down_read(&_origins_lock); /* Check whether exception handover must be cancelled */ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); @@ -1342,20 +1325,6 @@ static void flush_bios(struct bio *bio) } } -static void flush_queued_bios(struct work_struct *work) -{ - struct dm_snapshot *s = - container_of(work, struct dm_snapshot, queued_bios_work); - struct bio *queued_bios; - unsigned long flags; - - spin_lock_irqsave(&s->pe_lock, flags); - queued_bios = bio_list_get(&s->queued_bios); - spin_unlock_irqrestore(&s->pe_lock, flags); - - flush_bios(queued_bios); -} - static int do_origin(struct dm_dev *origin, struct bio *bio); /* @@ -1760,15 +1729,6 @@ static void snapshot_merge_presuspend(struct dm_target *ti) stop_merge(s); } -static void snapshot_postsuspend(struct dm_target *ti) -{ - struct dm_snapshot *s = ti->private; - - down_write(&s->lock); - s->suspended = 1; - up_write(&s->lock); -} - static int snapshot_preresume(struct dm_target *ti) { int r = 0; @@ -1783,7 +1743,7 @@ static int snapshot_preresume(struct dm_target *ti) DMERR("Unable to resume snapshot source until " "handover completes."); r = -EINVAL; - } else if (!snap_src->suspended) { + } else if (!dm_suspended(snap_src->ti)) { DMERR("Unable to perform snapshot handover until " "source is suspended."); r = -EINVAL; @@ -1816,7 +1776,6 @@ static void snapshot_resume(struct dm_target *ti) down_write(&s->lock); s->active = 1; - s->suspended = 0; up_write(&s->lock); } @@ -2194,7 +2153,7 @@ static int origin_iterate_devices(struct dm_target *ti, static struct target_type origin_target = { .name = "snapshot-origin", - .version = {1, 7, 0}, + .version = {1, 7, 1}, .module = THIS_MODULE, .ctr = origin_ctr, .dtr = origin_dtr, @@ -2207,13 +2166,12 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 9, 0}, + .version = {1, 10, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, .map = snapshot_map, .end_io = snapshot_end_io, - .postsuspend = snapshot_postsuspend, .preresume = snapshot_preresume, .resume = snapshot_resume, .status = snapshot_status, @@ -2222,14 +2180,13 @@ static struct target_type snapshot_target = { static struct target_type merge_target = { .name = dm_snapshot_merge_target_name, - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, .map = snapshot_merge_map, .end_io = snapshot_end_io, .presuspend = snapshot_merge_presuspend, - .postsuspend = snapshot_postsuspend, .preresume = snapshot_preresume, .resume = snapshot_merge_resume, .status = snapshot_status, @@ -2291,17 +2248,8 @@ static int __init dm_snapshot_init(void) goto bad_tracked_chunk_cache; } - ksnapd = create_singlethread_workqueue("ksnapd"); - if (!ksnapd) { - DMERR("Failed to create ksnapd workqueue."); - r = -ENOMEM; - goto bad_pending_pool; - } - return 0; -bad_pending_pool: - kmem_cache_destroy(tracked_chunk_cache); bad_tracked_chunk_cache: kmem_cache_destroy(pending_cache); bad_pending_cache: @@ -2322,8 +2270,6 @@ bad_register_snapshot_target: static void __exit dm_snapshot_exit(void) { - destroy_workqueue(ksnapd); - dm_unregister_target(&snapshot_target); dm_unregister_target(&origin_target); dm_unregister_target(&merge_target); diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index f0371b4c4fbf..dddfa14f2982 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -39,23 +39,20 @@ struct stripe_c { struct dm_target *ti; /* Work struct used for triggering events*/ - struct work_struct kstriped_ws; + struct work_struct trigger_event; struct stripe stripe[0]; }; -static struct workqueue_struct *kstriped; - /* * An event is triggered whenever a drive * drops out of a stripe volume. */ static void trigger_event(struct work_struct *work) { - struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws); - + struct stripe_c *sc = container_of(work, struct stripe_c, + trigger_event); dm_table_event(sc->ti->table); - } static inline struct stripe_c *alloc_context(unsigned int stripes) @@ -160,7 +157,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -ENOMEM; } - INIT_WORK(&sc->kstriped_ws, trigger_event); + INIT_WORK(&sc->trigger_event, trigger_event); /* Set pointer to dm target; used in trigger_event */ sc->ti = ti; @@ -211,7 +208,7 @@ static void stripe_dtr(struct dm_target *ti) for (i = 0; i < sc->stripes; i++) dm_put_device(ti, sc->stripe[i].dev); - flush_workqueue(kstriped); + flush_work_sync(&sc->trigger_event); kfree(sc); } @@ -367,7 +364,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, atomic_inc(&(sc->stripe[i].error_count)); if (atomic_read(&(sc->stripe[i].error_count)) < DM_IO_ERROR_THRESHOLD) - queue_work(kstriped, &sc->kstriped_ws); + schedule_work(&sc->trigger_event); } return error; @@ -401,7 +398,7 @@ static void stripe_io_hints(struct dm_target *ti, static struct target_type stripe_target = { .name = "striped", - .version = {1, 3, 0}, + .version = {1, 3, 1}, .module = THIS_MODULE, .ctr = stripe_ctr, .dtr = stripe_dtr, @@ -422,20 +419,10 @@ int __init dm_stripe_init(void) return r; } - kstriped = create_singlethread_workqueue("kstriped"); - if (!kstriped) { - DMERR("failed to create workqueue kstriped"); - dm_unregister_target(&stripe_target); - return -ENOMEM; - } - return r; } void dm_stripe_exit(void) { dm_unregister_target(&stripe_target); - destroy_workqueue(kstriped); - - return; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 4d705cea0f8c..38e4eb1bb965 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -71,6 +71,8 @@ struct dm_table { void *event_context; struct dm_md_mempools *mempools; + + struct list_head target_callbacks; }; /* @@ -204,6 +206,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode, return -ENOMEM; INIT_LIST_HEAD(&t->devices); + INIT_LIST_HEAD(&t->target_callbacks); atomic_set(&t->holders, 0); t->discards_supported = 1; @@ -325,15 +328,18 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev, BUG_ON(d->dm_dev.bdev); - bdev = open_by_devnum(dev, d->dm_dev.mode); + bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr); if (IS_ERR(bdev)) return PTR_ERR(bdev); - r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); - if (r) - blkdev_put(bdev, d->dm_dev.mode); - else - d->dm_dev.bdev = bdev; - return r; + + r = bd_link_disk_holder(bdev, dm_disk(md)); + if (r) { + blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL); + return r; + } + + d->dm_dev.bdev = bdev; + return 0; } /* @@ -344,8 +350,8 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) if (!d->dm_dev.bdev) return; - bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); - blkdev_put(d->dm_dev.bdev, d->dm_dev.mode); + bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md)); + blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL); d->dm_dev.bdev = NULL; } @@ -1223,10 +1229,17 @@ int dm_table_resume_targets(struct dm_table *t) return 0; } +void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) +{ + list_add(&cb->list, &t->target_callbacks); +} +EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); + int dm_table_any_congested(struct dm_table *t, int bdi_bits) { struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); + struct dm_target_callbacks *cb; int r = 0; list_for_each_entry(dd, devices, list) { @@ -1241,6 +1254,10 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) bdevname(dd->dm_dev.bdev, b)); } + list_for_each_entry(cb, &t->target_callbacks, list) + if (cb->congested_fn) + r |= cb->congested_fn(cb, bdi_bits); + return r; } @@ -1262,6 +1279,7 @@ void dm_table_unplug_all(struct dm_table *t) { struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); + struct dm_target_callbacks *cb; list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); @@ -1274,6 +1292,10 @@ void dm_table_unplug_all(struct dm_table *t) dm_device_name(t->md), bdevname(dd->dm_dev.bdev, b)); } + + list_for_each_entry(cb, &t->target_callbacks, list) + if (cb->unplug_fn) + cb->unplug_fn(cb); } struct mapped_device *dm_table_get_md(struct dm_table *t) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7cb1352f7e7a..eaa3af0e0632 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -32,7 +32,6 @@ #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" #define DM_COOKIE_LENGTH 24 -static DEFINE_MUTEX(dm_mutex); static const char *_name = DM_NAME; static unsigned int major = 0; @@ -328,7 +327,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; - mutex_lock(&dm_mutex); spin_lock(&_minor_lock); md = bdev->bd_disk->private_data; @@ -346,7 +344,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) out: spin_unlock(&_minor_lock); - mutex_unlock(&dm_mutex); return md ? 0 : -ENXIO; } @@ -355,10 +352,12 @@ static int dm_blk_close(struct gendisk *disk, fmode_t mode) { struct mapped_device *md = disk->private_data; - mutex_lock(&dm_mutex); + spin_lock(&_minor_lock); + atomic_dec(&md->open_count); dm_put(md); - mutex_unlock(&dm_mutex); + + spin_unlock(&_minor_lock); return 0; } @@ -630,7 +629,7 @@ static void dec_pending(struct dm_io *io, int error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ - trace_block_bio_complete(md->queue, bio); + trace_block_bio_complete(md->queue, bio, io_error); bio_endio(bio, io_error); } } @@ -990,8 +989,8 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ - trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, - tio->io->bio->bi_bdev->bd_dev, sector); + trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, + tio->io->bio->bi_bdev->bd_dev, sector); generic_make_request(clone); } else if (r < 0 || r == DM_MAPIO_REQUEUE) { @@ -1638,13 +1637,15 @@ static void dm_request_fn(struct request_queue *q) if (map_request(ti, clone, md)) goto requeued; - spin_lock_irq(q->queue_lock); + BUG_ON(!irqs_disabled()); + spin_lock(q->queue_lock); } goto out; requeued: - spin_lock_irq(q->queue_lock); + BUG_ON(!irqs_disabled()); + spin_lock(q->queue_lock); plug_and_out: if (!elv_queue_empty(q)) @@ -1884,7 +1885,8 @@ static struct mapped_device *alloc_dev(int minor) add_disk(md->disk); format_dev_t(md->name, MKDEV(_major, minor)); - md->wq = create_singlethread_workqueue("kdmflush"); + md->wq = alloc_workqueue("kdmflush", + WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); if (!md->wq) goto bad_thread; @@ -1992,13 +1994,14 @@ static void event_callback(void *context) wake_up(&md->eventq); } +/* + * Protected by md->suspend_lock obtained by dm_swap_table(). + */ static void __set_size(struct mapped_device *md, sector_t size) { set_capacity(md->disk, size); - mutex_lock(&md->bdev->bd_inode->i_mutex); i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); - mutex_unlock(&md->bdev->bd_inode->i_mutex); } /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 175c424f201f..b76cfc89e1b5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -288,10 +288,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio) int rv; int cpu; - if (mddev == NULL || mddev->pers == NULL) { + if (mddev == NULL || mddev->pers == NULL + || !mddev->ready) { bio_io_error(bio); return 0; } + smp_rmb(); /* Ensure implications of 'active' are visible */ rcu_read_lock(); if (mddev->suspended) { DEFINE_WAIT(__wait); @@ -703,9 +705,9 @@ static struct mdk_personality *find_pers(int level, char *clevel) } /* return the offset of the super block in 512byte sectors */ -static inline sector_t calc_dev_sboffset(struct block_device *bdev) +static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev) { - sector_t num_sectors = i_size_read(bdev->bd_inode) / 512; + sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; return MD_NEW_SIZE_SECTORS(num_sectors); } @@ -763,7 +765,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, */ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); - bio->bi_bdev = rdev->bdev; + bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; bio->bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; @@ -793,7 +795,7 @@ static void bi_complete(struct bio *bio, int error) } int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, - struct page *page, int rw) + struct page *page, int rw, bool metadata_op) { struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); struct completion event; @@ -801,8 +803,12 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, rw |= REQ_SYNC | REQ_UNPLUG; - bio->bi_bdev = rdev->bdev; - bio->bi_sector = sector; + bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? + rdev->meta_bdev : rdev->bdev; + if (metadata_op) + bio->bi_sector = sector + rdev->sb_start; + else + bio->bi_sector = sector + rdev->data_offset; bio_add_page(bio, page, size, 0); init_completion(&event); bio->bi_private = &event; @@ -827,7 +833,7 @@ static int read_disk_sb(mdk_rdev_t * rdev, int size) return 0; - if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ)) + if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) goto fail; rdev->sb_loaded = 1; return 0; @@ -989,7 +995,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version * * It also happens to be a multiple of 4Kb. */ - rdev->sb_start = calc_dev_sboffset(rdev->bdev); + rdev->sb_start = calc_dev_sboffset(rdev); ret = read_disk_sb(rdev, MD_SB_BYTES); if (ret) return ret; @@ -1330,7 +1336,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) return 0; /* component must fit device */ if (rdev->mddev->bitmap_info.offset) return 0; /* can't move bitmap */ - rdev->sb_start = calc_dev_sboffset(rdev->bdev); + rdev->sb_start = calc_dev_sboffset(rdev); if (!num_sectors || num_sectors > rdev->sb_start) num_sectors = rdev->sb_start; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, @@ -1879,7 +1885,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); list_add_rcu(&rdev->same_set, &mddev->disks); - bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); + bd_link_disk_holder(rdev->bdev, mddev->gendisk); /* May as well allow recovery to be retried once */ mddev->recovery_disabled = 0; @@ -1906,7 +1912,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) MD_BUG(); return; } - bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); + bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); list_del_rcu(&rdev->same_set); printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); rdev->mddev = NULL; @@ -1934,19 +1940,13 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + shared ? (mdk_rdev_t *)lock_rdev : rdev); if (IS_ERR(bdev)) { printk(KERN_ERR "md: could not open %s.\n", __bdevname(dev, b)); return PTR_ERR(bdev); } - err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev); - if (err) { - printk(KERN_ERR "md: could not bd_claim %s.\n", - bdevname(bdev, b)); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); - return err; - } if (!shared) set_bit(AllReserved, &rdev->flags); rdev->bdev = bdev; @@ -1959,8 +1959,7 @@ static void unlock_rdev(mdk_rdev_t *rdev) rdev->bdev = NULL; if (!bdev) MD_BUG(); - bd_release(bdev); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } void md_autodetect_dev(dev_t dev); @@ -2473,6 +2472,10 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) if (rdev2->raid_disk == slot) return -EEXIST; + if (slot >= rdev->mddev->raid_disks && + slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) + return -ENOSPC; + rdev->raid_disk = slot; if (test_bit(In_sync, &rdev->flags)) rdev->saved_raid_disk = slot; @@ -2490,7 +2493,8 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) /* failure here is OK */; /* don't wakeup anyone, leave that to userspace. */ } else { - if (slot >= rdev->mddev->raid_disks) + if (slot >= rdev->mddev->raid_disks && + slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) return -ENOSPC; rdev->raid_disk = slot; /* assume it is working */ @@ -3115,7 +3119,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) char nm[20]; if (rdev->raid_disk < 0) continue; - if (rdev->new_raid_disk > mddev->raid_disks) + if (rdev->new_raid_disk >= mddev->raid_disks) rdev->new_raid_disk = -1; if (rdev->new_raid_disk == rdev->raid_disk) continue; @@ -3744,6 +3748,8 @@ action_show(mddev_t *mddev, char *page) return sprintf(page, "%s\n", type); } +static void reap_sync_thread(mddev_t *mddev); + static ssize_t action_store(mddev_t *mddev, const char *page, size_t len) { @@ -3758,9 +3764,7 @@ action_store(mddev_t *mddev, const char *page, size_t len) if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_unregister_thread(mddev->sync_thread); - mddev->sync_thread = NULL; - mddev->recovery = 0; + reap_sync_thread(mddev); } } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) @@ -3912,7 +3916,7 @@ static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); static ssize_t sync_completed_show(mddev_t *mddev, char *page) { - unsigned long max_sectors, resync; + unsigned long long max_sectors, resync; if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return sprintf(page, "none\n"); @@ -3923,7 +3927,7 @@ sync_completed_show(mddev_t *mddev, char *page) max_sectors = mddev->dev_sectors; resync = mddev->curr_resync_completed; - return sprintf(page, "%lu / %lu\n", resync, max_sectors); + return sprintf(page, "%llu / %llu\n", resync, max_sectors); } static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); @@ -4010,19 +4014,24 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) { char *e; unsigned long long new = simple_strtoull(buf, &e, 10); + unsigned long long old = mddev->suspend_lo; if (mddev->pers == NULL || mddev->pers->quiesce == NULL) return -EINVAL; if (buf == e || (*e && *e != '\n')) return -EINVAL; - if (new >= mddev->suspend_hi || - (new > mddev->suspend_lo && new < mddev->suspend_hi)) { - mddev->suspend_lo = new; + + mddev->suspend_lo = new; + if (new >= old) + /* Shrinking suspended region */ mddev->pers->quiesce(mddev, 2); - return len; - } else - return -EINVAL; + else { + /* Expanding suspended region - need to wait */ + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } + return len; } static struct md_sysfs_entry md_suspend_lo = __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); @@ -4039,20 +4048,24 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) { char *e; unsigned long long new = simple_strtoull(buf, &e, 10); + unsigned long long old = mddev->suspend_hi; if (mddev->pers == NULL || mddev->pers->quiesce == NULL) return -EINVAL; if (buf == e || (*e && *e != '\n')) return -EINVAL; - if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || - (new > mddev->suspend_lo && new > mddev->suspend_hi)) { - mddev->suspend_hi = new; + + mddev->suspend_hi = new; + if (new <= old) + /* Shrinking suspended region */ + mddev->pers->quiesce(mddev, 2); + else { + /* Expanding suspended region - need to wait */ mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); - return len; - } else - return -EINVAL; + } + return len; } static struct md_sysfs_entry md_suspend_hi = __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); @@ -4430,7 +4443,9 @@ int md_run(mddev_t *mddev) * We don't want the data to overlap the metadata, * Internal Bitmap issues have been handled elsewhere. */ - if (rdev->data_offset < rdev->sb_start) { + if (rdev->meta_bdev) { + /* Nothing to check */; + } else if (rdev->data_offset < rdev->sb_start) { if (mddev->dev_sectors && rdev->data_offset + mddev->dev_sectors > rdev->sb_start) { @@ -4564,7 +4579,8 @@ int md_run(mddev_t *mddev) mddev->safemode_timer.data = (unsigned long) mddev; mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ mddev->in_sync = 1; - + smp_wmb(); + mddev->ready = 1; list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0) { char nm[20]; @@ -4701,13 +4717,12 @@ static void md_clean(mddev_t *mddev) mddev->plug = NULL; } -void md_stop_writes(mddev_t *mddev) +static void __md_stop_writes(mddev_t *mddev) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_unregister_thread(mddev->sync_thread); - mddev->sync_thread = NULL; + reap_sync_thread(mddev); } del_timer_sync(&mddev->safemode_timer); @@ -4721,10 +4736,18 @@ void md_stop_writes(mddev_t *mddev) md_update_sb(mddev, 1); } } + +void md_stop_writes(mddev_t *mddev) +{ + mddev_lock(mddev); + __md_stop_writes(mddev); + mddev_unlock(mddev); +} EXPORT_SYMBOL_GPL(md_stop_writes); void md_stop(mddev_t *mddev) { + mddev->ready = 0; mddev->pers->stop(mddev); if (mddev->pers->sync_request && mddev->to_remove == NULL) mddev->to_remove = &md_redundancy_group; @@ -4744,7 +4767,7 @@ static int md_set_readonly(mddev_t *mddev, int is_open) goto out; } if (mddev->pers) { - md_stop_writes(mddev); + __md_stop_writes(mddev); err = -ENXIO; if (mddev->ro==1) @@ -4781,7 +4804,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) if (mddev->ro) set_disk_ro(disk, 0); - md_stop_writes(mddev); + __md_stop_writes(mddev); md_stop(mddev); mddev->queue->merge_bvec_fn = NULL; mddev->queue->unplug_fn = NULL; @@ -5159,9 +5182,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) /* set saved_raid_disk if appropriate */ if (!mddev->persistent) { if (info->state & (1<raid_disk < mddev->raid_disks) + info->raid_disk < mddev->raid_disks) { rdev->raid_disk = info->raid_disk; - else + set_bit(In_sync, &rdev->flags); + } else rdev->raid_disk = -1; } else super_types[mddev->major_version]. @@ -5238,7 +5262,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) printk(KERN_INFO "md: nonpersistent superblock ...\n"); rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; } else - rdev->sb_start = calc_dev_sboffset(rdev->bdev); + rdev->sb_start = calc_dev_sboffset(rdev); rdev->sectors = rdev->sb_start; err = bind_rdev_to_array(rdev, mddev); @@ -5305,7 +5329,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) } if (mddev->persistent) - rdev->sb_start = calc_dev_sboffset(rdev->bdev); + rdev->sb_start = calc_dev_sboffset(rdev); else rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; @@ -5518,7 +5542,6 @@ static int update_size(mddev_t *mddev, sector_t num_sectors) * sb_start or, if that is sync_thread) return -EBUSY; @@ -6041,7 +6064,8 @@ static int md_thread(void * arg) || kthread_should_stop(), thread->timeout); - if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags)) + clear_bit(THREAD_WAKEUP, &thread->flags); + if (!kthread_should_stop()) thread->run(thread->mddev); } @@ -6807,7 +6831,7 @@ void md_do_sync(mddev_t *mddev) desc, mdname(mddev)); mddev->curr_resync = j; } - mddev->curr_resync_completed = mddev->curr_resync; + mddev->curr_resync_completed = j; while (j < max_sectors) { sector_t sectors; @@ -6825,8 +6849,7 @@ void md_do_sync(mddev_t *mddev) md_unplug(mddev); wait_event(mddev->recovery_wait, atomic_read(&mddev->recovery_active) == 0); - mddev->curr_resync_completed = - mddev->curr_resync; + mddev->curr_resync_completed = j; set_bit(MD_CHANGE_CLEAN, &mddev->flags); sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } @@ -7031,6 +7054,45 @@ static int remove_and_add_spares(mddev_t *mddev) } return spares; } + +static void reap_sync_thread(mddev_t *mddev) +{ + mdk_rdev_t *rdev; + + /* resync has finished, collect result */ + md_unregister_thread(mddev->sync_thread); + mddev->sync_thread = NULL; + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* success...*/ + /* activate any spares */ + if (mddev->pers->spare_active(mddev)) + sysfs_notify(&mddev->kobj, NULL, + "degraded"); + } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + mddev->pers->finish_reshape) + mddev->pers->finish_reshape(mddev); + md_update_sb(mddev, 1); + + /* if array is no-longer degraded, then any saved_raid_disk + * information must be scrapped + */ + if (!mddev->degraded) + list_for_each_entry(rdev, &mddev->disks, same_set) + rdev->saved_raid_disk = -1; + + clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + /* flag recovery needed just to double check */ + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + sysfs_notify_dirent_safe(mddev->sysfs_action); + md_new_event(mddev); +} + /* * This routine is regularly called by all per-raid-array threads to * deal with generic issues like resync and super-block update. @@ -7055,9 +7117,6 @@ static int remove_and_add_spares(mddev_t *mddev) */ void md_check_recovery(mddev_t *mddev) { - mdk_rdev_t *rdev; - - if (mddev->bitmap) bitmap_daemon_work(mddev); @@ -7125,34 +7184,7 @@ void md_check_recovery(mddev_t *mddev) goto unlock; } if (mddev->sync_thread) { - /* resync has finished, collect result */ - md_unregister_thread(mddev->sync_thread); - mddev->sync_thread = NULL; - if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { - /* success...*/ - /* activate any spares */ - if (mddev->pers->spare_active(mddev)) - sysfs_notify(&mddev->kobj, NULL, - "degraded"); - } - if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - mddev->pers->finish_reshape) - mddev->pers->finish_reshape(mddev); - md_update_sb(mddev, 1); - - /* if array is no-longer degraded, then any saved_raid_disk - * information must be scrapped - */ - if (!mddev->degraded) - list_for_each_entry(rdev, &mddev->disks, same_set) - rdev->saved_raid_disk = -1; - - mddev->recovery = 0; - /* flag recovery needed just to double check */ - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - sysfs_notify_dirent_safe(mddev->sysfs_action); - md_new_event(mddev); + reap_sync_thread(mddev); goto unlock; } /* Set RUNNING before clearing NEEDED to avoid @@ -7210,7 +7242,11 @@ void md_check_recovery(mddev_t *mddev) " thread...\n", mdname(mddev)); /* leave the spares where they are, it shouldn't hurt */ - mddev->recovery = 0; + clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); } else md_wakeup_thread(mddev->sync_thread); sysfs_notify_dirent_safe(mddev->sysfs_action); diff --git a/drivers/md/md.h b/drivers/md/md.h index d05bab55df4e..eec517ced31a 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -60,6 +60,12 @@ struct mdk_rdev_s mddev_t *mddev; /* RAID array if running */ int last_events; /* IO event timestamp */ + /* + * If meta_bdev is non-NULL, it means that a separate device is + * being used to store the metadata (superblock/bitmap) which + * would otherwise be contained on the same device as the data (bdev). + */ + struct block_device *meta_bdev; struct block_device *bdev; /* block device handle */ struct page *sb_page; @@ -148,7 +154,8 @@ struct mddev_s * are happening, so run/ * takeover/stop are not safe */ - + int ready; /* See when safe to pass + * IO requests down */ struct gendisk *gendisk; struct kobject kobj; @@ -497,8 +504,8 @@ extern void md_flush_request(mddev_t *mddev, struct bio *bio); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); -extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, - struct page *page, int rw); +extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, + struct page *page, int rw, bool metadata_op); extern void md_do_sync(mddev_t *mddev); extern void md_new_event(mddev_t *mddev); extern int md_allow_write(mddev_t *mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 845cf95b612c..a23ffa397ba9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1027,8 +1027,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) } else set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); - printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n" - KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n", + printk(KERN_ALERT + "md/raid1:%s: Disk failure on %s, disabling device.\n" + "md/raid1:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b), mdname(mddev), conf->raid_disks - mddev->degraded); } @@ -1364,10 +1365,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) */ rdev = conf->mirrors[d].rdev; if (sync_page_io(rdev, - sect + rdev->data_offset, + sect, s<<9, bio->bi_io_vec[idx].bv_page, - READ)) { + READ, false)) { success = 1; break; } @@ -1390,10 +1391,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) rdev = conf->mirrors[d].rdev; atomic_add(s, &rdev->corrected_errors); if (sync_page_io(rdev, - sect + rdev->data_offset, + sect, s<<9, bio->bi_io_vec[idx].bv_page, - WRITE) == 0) + WRITE, false) == 0) md_error(mddev, rdev); } d = start; @@ -1405,10 +1406,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) continue; rdev = conf->mirrors[d].rdev; if (sync_page_io(rdev, - sect + rdev->data_offset, + sect, s<<9, bio->bi_io_vec[idx].bv_page, - READ) == 0) + READ, false) == 0) md_error(mddev, rdev); } } else { @@ -1488,10 +1489,8 @@ static void fix_read_error(conf_t *conf, int read_disk, rdev = conf->mirrors[d].rdev; if (rdev && test_bit(In_sync, &rdev->flags) && - sync_page_io(rdev, - sect + rdev->data_offset, - s<<9, - conf->tmppage, READ)) + sync_page_io(rdev, sect, s<<9, + conf->tmppage, READ, false)) success = 1; else { d++; @@ -1514,9 +1513,8 @@ static void fix_read_error(conf_t *conf, int read_disk, rdev = conf->mirrors[d].rdev; if (rdev && test_bit(In_sync, &rdev->flags)) { - if (sync_page_io(rdev, - sect + rdev->data_offset, - s<<9, conf->tmppage, WRITE) + if (sync_page_io(rdev, sect, s<<9, + conf->tmppage, WRITE, false) == 0) /* Well, this device is dead */ md_error(mddev, rdev); @@ -1531,9 +1529,8 @@ static void fix_read_error(conf_t *conf, int read_disk, rdev = conf->mirrors[d].rdev; if (rdev && test_bit(In_sync, &rdev->flags)) { - if (sync_page_io(rdev, - sect + rdev->data_offset, - s<<9, conf->tmppage, READ) + if (sync_page_io(rdev, sect, s<<9, + conf->tmppage, READ, false) == 0) /* Well, this device is dead */ md_error(mddev, rdev); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0641674827f0..69b659544390 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1051,8 +1051,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) } set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); - printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n" - KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n", + printk(KERN_ALERT + "md/raid10:%s: Disk failure on %s, disabling device.\n" + "md/raid10:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b), mdname(mddev), conf->raid_disks - mddev->degraded); } @@ -1559,9 +1560,9 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) rcu_read_unlock(); success = sync_page_io(rdev, r10_bio->devs[sl].addr + - sect + rdev->data_offset, + sect, s<<9, - conf->tmppage, READ); + conf->tmppage, READ, false); rdev_dec_pending(rdev, mddev); rcu_read_lock(); if (success) @@ -1598,8 +1599,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) atomic_add(s, &rdev->corrected_errors); if (sync_page_io(rdev, r10_bio->devs[sl].addr + - sect + rdev->data_offset, - s<<9, conf->tmppage, WRITE) + sect, + s<<9, conf->tmppage, WRITE, false) == 0) { /* Well, this device is dead */ printk(KERN_NOTICE @@ -1635,9 +1636,9 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) rcu_read_unlock(); if (sync_page_io(rdev, r10_bio->devs[sl].addr + - sect + rdev->data_offset, + sect, s<<9, conf->tmppage, - READ) == 0) { + READ, false) == 0) { /* Well, this device is dead */ printk(KERN_NOTICE "md/raid10:%s: unable to read back " diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dc574f303f8b..5044babfcda0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1721,7 +1721,6 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) set_bit(Faulty, &rdev->flags); printk(KERN_ALERT "md/raid:%s: Disk failure on %s, disabling device.\n" - KERN_ALERT "md/raid:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b), @@ -4237,7 +4236,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped wait_event(conf->wait_for_overlap, atomic_read(&conf->reshape_stripes)==0); mddev->reshape_position = conf->reshape_progress; - mddev->curr_resync_completed = mddev->curr_resync; + mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); @@ -4338,7 +4337,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped wait_event(conf->wait_for_overlap, atomic_read(&conf->reshape_stripes) == 0); mddev->reshape_position = conf->reshape_progress; - mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors; + mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); @@ -5339,7 +5338,7 @@ static int raid5_spare_active(mddev_t *mddev) && !test_bit(Faulty, &tmp->rdev->flags) && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { count++; - sysfs_notify_dirent(tmp->rdev->sysfs_state); + sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); } } spin_lock_irqsave(&conf->device_lock, flags); @@ -5528,8 +5527,8 @@ static int raid5_start_reshape(mddev_t *mddev) return -ENOSPC; list_for_each_entry(rdev, &mddev->disks, same_set) - if (rdev->raid_disk < 0 && - !test_bit(Faulty, &rdev->flags)) + if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks) + && !test_bit(Faulty, &rdev->flags)) spares++; if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) @@ -5589,6 +5588,11 @@ static int raid5_start_reshape(mddev_t *mddev) /* Failure here is OK */; } else break; + } else if (rdev->raid_disk >= conf->previous_raid_disks + && !test_bit(Faulty, &rdev->flags)) { + /* This is a spare that was manually added */ + set_bit(In_sync, &rdev->flags); + added_devices++; } /* When a reshape changes the number of devices, ->degraded diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index 789087cd6a9c..49f1b8f1418e 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c @@ -2184,9 +2184,7 @@ static int cafe_pci_resume(struct pci_dev *pdev) struct cafe_camera *cam = to_cam(v4l2_dev); int ret = 0; - ret = pci_restore_state(pdev); - if (ret) - return ret; + pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) { diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h index 2c00980acfcb..7e40035028d2 100644 --- a/drivers/media/video/cx18/cx23418.h +++ b/drivers/media/video/cx18/cx23418.h @@ -177,7 +177,7 @@ IN[0] - Task handle. IN[1] - luma type: 0 = disable, 1 = 1D horizontal only, 2 = 1D vertical only, 3 = 2D H/V separable, 4 = 2D symmetric non-separable - IN[2] - chroma type: 0 - diable, 1 = 1D horizontal + IN[2] - chroma type: 0 - disable, 1 = 1D horizontal ReturnCode - One of the ERR_CAPTURE_... */ #define CX18_CPU_SET_SPATIAL_FILTER_TYPE (CPU_CMD_MASK_CAPTURE | 0x000C) diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c index 627926f6bde8..7eb79af28aa3 100644 --- a/drivers/media/video/cx25840/cx25840-ir.c +++ b/drivers/media/video/cx25840/cx25840-ir.c @@ -261,7 +261,7 @@ static u16 ns_to_pulse_width_count(u32 ns, u16 divider) u32 rem; /* - * The 2 lsb's of the pulse width timer count are not accessable, hence + * The 2 lsb's of the pulse width timer count are not accessible, hence * the (1 << 2) */ n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */ diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h index 188841b476e0..ebd5c4338ebb 100644 --- a/drivers/media/video/davinci/vpif.h +++ b/drivers/media/video/davinci/vpif.h @@ -33,7 +33,7 @@ extern spinlock_t vpif_lock; #define regr(reg) readl((reg) + vpif_base) #define regw(value, reg) writel(value, (reg + vpif_base)) -/* Register Addresss Offsets */ +/* Register Address Offsets */ #define VPIF_PID (0x0000) #define VPIF_CH0_CTRL (0x0004) #define VPIF_CH1_CTRL (0x0008) diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c index 7918680917d0..3e5cf27ec2b2 100644 --- a/drivers/media/video/davinci/vpss.c +++ b/drivers/media/video/davinci/vpss.c @@ -85,7 +85,7 @@ enum vpss_platform_type { /* * vpss operations. Depends on platform. Not all functions are available * on all platforms. The api, first check if a functio is available before - * invoking it. In the probe, the function ptrs are intialized based on + * invoking it. In the probe, the function ptrs are initialized based on * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc. */ struct vpss_hw_ops { diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index 83de97ad971e..029a4babfd61 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c @@ -1286,7 +1286,7 @@ static int omap_vout_release(struct file *file) videobuf_mmap_free(q); /* Even if apply changes fails we should continue - freeing allocated memeory */ + freeing allocated memory */ if (vout->streaming) { u32 mask = 0; diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c index d6bf3f82cc34..58af67f2278b 100644 --- a/drivers/media/video/saa7164/saa7164-core.c +++ b/drivers/media/video/saa7164/saa7164-core.c @@ -655,8 +655,8 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id) goto out; } - /* Check that the hardware is accessable. If the status bytes are - * 0xFF then the device is not accessable, the the IRQ belongs + /* Check that the hardware is accessible. If the status bytes are + * 0xFF then the device is not accessible, the the IRQ belongs * to another driver. * 4 x u32 interrupt registers. */ diff --git a/drivers/media/video/sn9c102/sn9c102_sensor.h b/drivers/media/video/sn9c102/sn9c102_sensor.h index 494957b10bac..7f38549715b6 100644 --- a/drivers/media/video/sn9c102/sn9c102_sensor.h +++ b/drivers/media/video/sn9c102/sn9c102_sensor.h @@ -147,7 +147,7 @@ enum sn9c102_i2c_interface { struct sn9c102_sensor { char name[32], /* sensor name */ - maintainer[64]; /* name of the mantainer */ + maintainer[64]; /* name of the maintainer */ enum sn9c102_bridge supported_bridge; /* supported SN9C1xx bridges */ diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c index e63b40f5a706..c799e4eb6fcd 100644 --- a/drivers/media/video/tvp7002.c +++ b/drivers/media/video/tvp7002.c @@ -789,7 +789,7 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd, * Get the value of a TVP7002 decoder device register. * Returns zero when successful, -EINVAL if register read fails or * access to I2C client fails, -EPERM if the call is not allowed - * by diabled CAP_SYS_ADMIN. + * by disabled CAP_SYS_ADMIN. */ static int tvp7002_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index e25aca5759fb..2f973cd56408 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c @@ -13,14 +13,12 @@ #include #include #include -#include #include #include #include #include #include #include -#include #include #include #include diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index c00fe8253c51..e9a3eab7b0cf 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -465,6 +465,7 @@ static void memstick_check(struct work_struct *work) if (!host->card) { host->card = card; if (device_register(&card->dev)) { + put_device(&card->dev); kfree(host->card); host->card = NULL; } @@ -510,14 +511,18 @@ int memstick_add_host(struct memstick_host *host) { int rc; - if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL)) - return -ENOMEM; + while (1) { + if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL)) + return -ENOMEM; - spin_lock(&memstick_host_lock); - rc = idr_get_new(&memstick_host_idr, host, &host->id); - spin_unlock(&memstick_host_lock); - if (rc) - return rc; + spin_lock(&memstick_host_lock); + rc = idr_get_new(&memstick_host_idr, host, &host->id); + spin_unlock(&memstick_host_lock); + if (!rc) + break; + else if (rc != -EAGAIN) + return rc; + } dev_set_name(&host->dev, "memstick%u", host->id); diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 02362eccc588..57b42bfc7d23 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -23,7 +23,6 @@ #define DRIVER_NAME "mspro_block" -static DEFINE_MUTEX(mspro_block_mutex); static int major; module_param(major, int, 0644); @@ -160,6 +159,13 @@ struct mspro_block_data { int (*mrq_handler)(struct memstick_dev *card, struct memstick_request **mrq); + + /* Default request setup function for data access method preferred by + * this host instance. + */ + void (*setup_transfer)(struct memstick_dev *card, + u64 offset, size_t length); + struct attribute_group attr_group; struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS]; @@ -181,7 +187,6 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode) struct mspro_block_data *msb = disk->private_data; int rc = -ENXIO; - mutex_lock(&mspro_block_mutex); mutex_lock(&mspro_block_disk_lock); if (msb && msb->card) { @@ -193,7 +198,6 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode) } mutex_unlock(&mspro_block_disk_lock); - mutex_unlock(&mspro_block_mutex); return rc; } @@ -225,11 +229,7 @@ static int mspro_block_disk_release(struct gendisk *disk) static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode) { - int ret; - mutex_lock(&mspro_block_mutex); - ret = mspro_block_disk_release(disk); - mutex_unlock(&mspro_block_mutex); - return ret; + return mspro_block_disk_release(disk); } static int mspro_block_bd_getgeo(struct block_device *bdev, @@ -663,14 +663,43 @@ has_int_reg: } } +/*** Transfer setup functions for different access methods. ***/ + +/** Setup data transfer request for SET_CMD TPC with arguments in card + * registers. + * + * @card Current media instance + * @offset Target data offset in bytes + * @length Required transfer length in bytes. + */ +static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset, + size_t length) +{ + struct mspro_block_data *msb = memstick_get_drvdata(card); + struct mspro_param_register param = { + .system = msb->system, + .data_count = cpu_to_be16((uint16_t)(length / msb->page_size)), + /* ISO C90 warning precludes direct initialization for now. */ + .data_address = 0, + .tpc_param = 0 + }; + + do_div(offset, msb->page_size); + param.data_address = cpu_to_be32((uint32_t)offset); + + card->next_request = h_mspro_block_req_init; + msb->mrq_handler = h_mspro_block_transfer_data; + memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, + ¶m, sizeof(param)); +} + /*** Data transfer ***/ static int mspro_block_issue_req(struct memstick_dev *card, int chunk) { struct mspro_block_data *msb = memstick_get_drvdata(card); - sector_t t_sec; + u64 t_off; unsigned int count; - struct mspro_param_register param; try_again: while (chunk) { @@ -685,30 +714,17 @@ try_again: continue; } - t_sec = blk_rq_pos(msb->block_req) << 9; - sector_div(t_sec, msb->page_size); - + t_off = blk_rq_pos(msb->block_req); + t_off <<= 9; count = blk_rq_bytes(msb->block_req); - count /= msb->page_size; - param.system = msb->system; - param.data_count = cpu_to_be16(count); - param.data_address = cpu_to_be32((uint32_t)t_sec); - param.tpc_param = 0; + msb->setup_transfer(card, t_off, count); msb->data_dir = rq_data_dir(msb->block_req); msb->transfer_cmd = msb->data_dir == READ ? MSPRO_CMD_READ_DATA : MSPRO_CMD_WRITE_DATA; - dev_dbg(&card->dev, "data transfer: cmd %x, " - "lba %x, count %x\n", msb->transfer_cmd, - be32_to_cpu(param.data_address), count); - - card->next_request = h_mspro_block_req_init; - msb->mrq_handler = h_mspro_block_transfer_data; - memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, - ¶m, sizeof(param)); memstick_new_req(card->host); return 0; } @@ -963,18 +979,16 @@ try_again: static int mspro_block_read_attributes(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); - struct mspro_param_register param = { - .system = msb->system, - .data_count = cpu_to_be16(1), - .data_address = 0, - .tpc_param = 0 - }; struct mspro_attribute *attr = NULL; struct mspro_sys_attr *s_attr = NULL; unsigned char *buffer = NULL; int cnt, rc, attr_count; - unsigned int addr; - unsigned short page_count; + /* While normally physical device offsets, represented here by + * attr_offset and attr_len will be of large numeric types, we can be + * sure, that attributes are close enough to the beginning of the + * device, to save ourselves some trouble. + */ + unsigned int addr, attr_offset = 0, attr_len = msb->page_size; attr = kmalloc(msb->page_size, GFP_KERNEL); if (!attr) @@ -987,10 +1001,8 @@ static int mspro_block_read_attributes(struct memstick_dev *card) msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; - card->next_request = h_mspro_block_req_init; - msb->mrq_handler = h_mspro_block_transfer_data; - memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, ¶m, - sizeof(param)); + msb->setup_transfer(card, attr_offset, attr_len); + memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { @@ -1021,13 +1033,12 @@ static int mspro_block_read_attributes(struct memstick_dev *card) } msb->attr_group.name = "media_attributes"; - buffer = kmalloc(msb->page_size, GFP_KERNEL); + buffer = kmalloc(attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } - memcpy(buffer, (char *)attr, msb->page_size); - page_count = 1; + memcpy(buffer, (char *)attr, attr_len); for (cnt = 0; cnt < attr_count; ++cnt) { s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL); @@ -1038,9 +1049,10 @@ static int mspro_block_read_attributes(struct memstick_dev *card) msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr; addr = be32_to_cpu(attr->entries[cnt].address); - rc = be32_to_cpu(attr->entries[cnt].size); + s_attr->size = be32_to_cpu(attr->entries[cnt].size); dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, " - "size %x\n", cnt, attr->entries[cnt].id, addr, rc); + "size %zx\n", cnt, attr->entries[cnt].id, addr, + s_attr->size); s_attr->id = attr->entries[cnt].id; if (mspro_block_attr_name(s_attr->id)) snprintf(s_attr->name, sizeof(s_attr->name), "%s", @@ -1054,57 +1066,47 @@ static int mspro_block_read_attributes(struct memstick_dev *card) s_attr->dev_attr.attr.mode = S_IRUGO; s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id); - if (!rc) + if (!s_attr->size) continue; - s_attr->size = rc; - s_attr->data = kmalloc(rc, GFP_KERNEL); + s_attr->data = kmalloc(s_attr->size, GFP_KERNEL); if (!s_attr->data) { rc = -ENOMEM; goto out_free_buffer; } - if (((addr / msb->page_size) - == be32_to_cpu(param.data_address)) - && (((addr + rc - 1) / msb->page_size) - == be32_to_cpu(param.data_address))) { + if (((addr / msb->page_size) == (attr_offset / msb->page_size)) + && (((addr + s_attr->size - 1) / msb->page_size) + == (attr_offset / msb->page_size))) { memcpy(s_attr->data, buffer + addr % msb->page_size, - rc); + s_attr->size); continue; } - if (page_count <= (rc / msb->page_size)) { + attr_offset = (addr / msb->page_size) * msb->page_size; + + if ((attr_offset + attr_len) < (addr + s_attr->size)) { kfree(buffer); - page_count = (rc / msb->page_size) + 1; - buffer = kmalloc(page_count * msb->page_size, - GFP_KERNEL); + attr_len = (((addr + s_attr->size) / msb->page_size) + + 1 ) * msb->page_size - attr_offset; + buffer = kmalloc(attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } } - param.system = msb->system; - param.data_count = cpu_to_be16((rc / msb->page_size) + 1); - param.data_address = cpu_to_be32(addr / msb->page_size); - param.tpc_param = 0; - - sg_init_one(&msb->req_sg[0], buffer, - be16_to_cpu(param.data_count) * msb->page_size); + sg_init_one(&msb->req_sg[0], buffer, attr_len); msb->seg_count = 1; msb->current_seg = 0; msb->current_page = 0; msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; - dev_dbg(&card->dev, "reading attribute pages %x, %x\n", - be32_to_cpu(param.data_address), - be16_to_cpu(param.data_count)); + dev_dbg(&card->dev, "reading attribute range %x, %x\n", + attr_offset, attr_len); - card->next_request = h_mspro_block_req_init; - msb->mrq_handler = h_mspro_block_transfer_data; - memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, - (char *)¶m, sizeof(param)); + msb->setup_transfer(card, attr_offset, attr_len); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { @@ -1112,7 +1114,8 @@ static int mspro_block_read_attributes(struct memstick_dev *card) goto out_free_buffer; } - memcpy(s_attr->data, buffer + addr % msb->page_size, rc); + memcpy(s_attr->data, buffer + addr % msb->page_size, + s_attr->size); } rc = 0; @@ -1130,6 +1133,8 @@ static int mspro_block_init_card(struct memstick_dev *card) int rc = 0; msb->system = MEMSTICK_SYS_SERIAL; + msb->setup_transfer = h_mspro_block_setup_cmd; + card->reg_addr.r_offset = offsetof(struct mspro_register, status); card->reg_addr.r_length = sizeof(struct ms_status_register); card->reg_addr.w_offset = offsetof(struct mspro_register, param); @@ -1206,10 +1211,12 @@ static int mspro_block_init_disk(struct memstick_dev *card) msb->page_size = be16_to_cpu(sys_info->unit_size); - if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) + mutex_lock(&mspro_block_disk_lock); + if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) { + mutex_unlock(&mspro_block_disk_lock); return -ENOMEM; + } - mutex_lock(&mspro_block_disk_lock); rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id); mutex_unlock(&mspro_block_disk_lock); diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index f2b894cd8b02..d89d925caecf 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -61,6 +61,7 @@ struct jmb38x_ms_host { struct memstick_request *req; unsigned char cmd_flags; unsigned char io_pos; + unsigned char ifmode; unsigned int io_word[2]; }; @@ -136,15 +137,14 @@ struct jmb38x_ms { #define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000 #define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000 +#define CLOCK_CONTROL_BY_MMIO 0x00000008 #define CLOCK_CONTROL_40MHZ 0x00000001 -#define CLOCK_CONTROL_50MHZ 0x0000000a -#define CLOCK_CONTROL_60MHZ 0x00000008 -#define CLOCK_CONTROL_62_5MHZ 0x0000000c +#define CLOCK_CONTROL_50MHZ 0x00000002 +#define CLOCK_CONTROL_60MHZ 0x00000010 +#define CLOCK_CONTROL_62_5MHZ 0x00000004 #define CLOCK_CONTROL_OFF 0x00000000 #define PCI_CTL_CLOCK_DLY_ADDR 0x000000b0 -#define PCI_CTL_CLOCK_DLY_MASK_A 0x00000f00 -#define PCI_CTL_CLOCK_DLY_MASK_B 0x0000f000 enum { CMD_READY = 0x01, @@ -390,8 +390,13 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh) if (host->req->data_dir == READ) cmd |= TPC_DIR; - if (host->req->need_card_int) - cmd |= TPC_WAIT_INT; + + if (host->req->need_card_int) { + if (host->ifmode == MEMSTICK_SERIAL) + cmd |= TPC_GET_INT; + else + cmd |= TPC_WAIT_INT; + } data = host->req->data; @@ -529,7 +534,10 @@ static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id) if (irq_status & INT_STATUS_ANY_ERR) { if (irq_status & INT_STATUS_CRC_ERR) host->req->error = -EILSEQ; - else + else if (irq_status & INT_STATUS_TPC_ERR) { + dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n"); + jmb38x_ms_complete_cmd(msh, 0); + } else host->req->error = -ETIME; } else { if (host->cmd_flags & DMA_DATA) { @@ -644,7 +652,6 @@ static int jmb38x_ms_reset(struct jmb38x_ms_host *host) ndelay(20); } dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n"); - /* return -EIO; */ reset_next: writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN @@ -675,7 +682,7 @@ static int jmb38x_ms_set_param(struct memstick_host *msh, { struct jmb38x_ms_host *host = memstick_priv(msh); unsigned int host_ctl = readl(host->addr + HOST_CONTROL); - unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0; + unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0; int rc = 0; switch (param) { @@ -687,9 +694,7 @@ static int jmb38x_ms_set_param(struct memstick_host *msh, host_ctl = 7; host_ctl |= HOST_CONTROL_POWER_EN - | HOST_CONTROL_CLOCK_EN - | HOST_CONTROL_HW_OC_P - | HOST_CONTROL_TDELAY_EN; + | HOST_CONTROL_CLOCK_EN; writel(host_ctl, host->addr + HOST_CONTROL); writel(host->id ? PAD_PU_PD_ON_MS_SOCK1 @@ -712,46 +717,88 @@ static int jmb38x_ms_set_param(struct memstick_host *msh, return -EINVAL; break; case MEMSTICK_INTERFACE: + dev_dbg(&host->chip->pdev->dev, + "Set Host Interface Mode to %d\n", value); + host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI | + HOST_CONTROL_REO); + host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P; host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT); - pci_read_config_dword(host->chip->pdev, - PCI_CTL_CLOCK_DLY_ADDR, - &clock_delay); - clock_delay &= host->id ? ~PCI_CTL_CLOCK_DLY_MASK_B - : ~PCI_CTL_CLOCK_DLY_MASK_A; if (value == MEMSTICK_SERIAL) { - host_ctl &= ~HOST_CONTROL_FAST_CLK; - host_ctl &= ~HOST_CONTROL_REO; host_ctl |= HOST_CONTROL_IF_SERIAL << HOST_CONTROL_IF_SHIFT; host_ctl |= HOST_CONTROL_REI; - clock_ctl = CLOCK_CONTROL_40MHZ; + clock_ctl |= CLOCK_CONTROL_40MHZ; + clock_delay = 0; } else if (value == MEMSTICK_PAR4) { - host_ctl |= HOST_CONTROL_FAST_CLK | HOST_CONTROL_REO; + host_ctl |= HOST_CONTROL_FAST_CLK; host_ctl |= HOST_CONTROL_IF_PAR4 << HOST_CONTROL_IF_SHIFT; - host_ctl &= ~HOST_CONTROL_REI; - clock_ctl = CLOCK_CONTROL_40MHZ; - clock_delay |= host->id ? (4 << 12) : (4 << 8); + host_ctl |= HOST_CONTROL_REO; + clock_ctl |= CLOCK_CONTROL_40MHZ; + clock_delay = 4; } else if (value == MEMSTICK_PAR8) { host_ctl |= HOST_CONTROL_FAST_CLK; host_ctl |= HOST_CONTROL_IF_PAR8 << HOST_CONTROL_IF_SHIFT; - host_ctl &= ~(HOST_CONTROL_REI | HOST_CONTROL_REO); - clock_ctl = CLOCK_CONTROL_50MHZ; + clock_ctl |= CLOCK_CONTROL_50MHZ; + clock_delay = 0; } else return -EINVAL; writel(host_ctl, host->addr + HOST_CONTROL); + writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL); writel(clock_ctl, host->addr + CLOCK_CONTROL); - pci_write_config_dword(host->chip->pdev, - PCI_CTL_CLOCK_DLY_ADDR, - clock_delay); + pci_write_config_byte(host->chip->pdev, + PCI_CTL_CLOCK_DLY_ADDR + 1, + clock_delay); + host->ifmode = value; break; }; return 0; } +#define PCI_PMOS0_CONTROL 0xae +#define PMOS0_ENABLE 0x01 +#define PMOS0_OVERCURRENT_LEVEL_2_4V 0x06 +#define PMOS0_EN_OVERCURRENT_DEBOUNCE 0x40 +#define PMOS0_SW_LED_POLARITY_ENABLE 0x80 +#define PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \ + PMOS0_OVERCURRENT_LEVEL_2_4V) +#define PCI_PMOS1_CONTROL 0xbd +#define PMOS1_ACTIVE_BITS 0x4a +#define PCI_CLOCK_CTL 0xb9 + +static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag) +{ + unsigned char val; + + pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val); + if (flag) + val |= PMOS0_ACTIVE_BITS; + else + val &= ~PMOS0_ACTIVE_BITS; + pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val); + dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val); + + if (pci_resource_flags(pdev, 1)) { + pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val); + if (flag) + val |= PMOS1_ACTIVE_BITS; + else + val &= ~PMOS1_ACTIVE_BITS; + pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val); + dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val); + } + + pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val); + pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f); + pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01); + dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n"); + + return 0; +} + #ifdef CONFIG_PM static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state) @@ -784,8 +831,7 @@ static int jmb38x_ms_resume(struct pci_dev *dev) return rc; pci_set_master(dev); - pci_read_config_dword(dev, 0xac, &rc); - pci_write_config_dword(dev, 0xac, rc | 0x00470000); + jmb38x_ms_pmos(dev, 1); for (rc = 0; rc < jm->host_cnt; ++rc) { if (!jm->hosts[rc]) @@ -894,8 +940,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev, goto err_out; } - pci_read_config_dword(pdev, 0xac, &rc); - pci_write_config_dword(pdev, 0xac, rc | 0x00470000); + jmb38x_ms_pmos(pdev, 1); cnt = jmb38x_ms_count_slots(pdev); if (!cnt) { @@ -976,6 +1021,8 @@ static void jmb38x_ms_remove(struct pci_dev *dev) jmb38x_ms_free_host(jm->hosts[cnt]); } + jmb38x_ms_pmos(dev, 0); + pci_set_drvdata(dev, NULL); pci_release_regions(dev); pci_disable_device(dev); @@ -983,8 +1030,9 @@ static void jmb38x_ms_remove(struct pci_dev *dev) } static struct pci_device_id jmb38x_ms_id_tbl [] = { - { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS, PCI_ANY_ID, - PCI_ANY_ID, 0, 0, 0 }, + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) }, + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) }, + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) }, { } }; diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h index 691620dbedd2..8b04810df469 100644 --- a/drivers/message/fusion/lsi/mpi_log_sas.h +++ b/drivers/message/fusion/lsi/mpi_log_sas.h @@ -268,7 +268,7 @@ /* Compatibility Error : IR Disabled */ #define IR_LOGINFO_COMPAT_ERROR_RAID_DISABLED (0x00010030) -/* Compatibility Error : Inquiry Comand failed */ +/* Compatibility Error : Inquiry Command failed */ #define IR_LOGINFO_COMPAT_ERROR_INQUIRY_FAILED (0x00010031) /* Compatibility Error : Device not direct access device */ #define IR_LOGINFO_COMPAT_ERROR_NOT_DIRECT_ACCESS (0x00010032) diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 3e57b61ca446..3358c0af3466 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -7977,7 +7977,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) NULL, /* 2Eh */ NULL, /* 2Fh */ "Compatibility Error: IR Disabled", /* 30h */ - "Compatibility Error: Inquiry Comand Failed", /* 31h */ + "Compatibility Error: Inquiry Command Failed", /* 31h */ "Compatibility Error: Device not Direct Access " "Device ", /* 32h */ "Compatibility Error: Removable Device Found", /* 33h */ diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index d48c2c6058e1..8aefb1829fcd 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1146,7 +1146,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, * * This function will delete scheduled target reset from the list and * try to send next target reset. This will be called from completion - * context of any Task managment command. + * context of any Task management command. */ void diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index f87a9d405a5e..ae7cad185898 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -309,7 +309,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq) * @ireq: I2O block request * @mptr: message body pointer * - * Builds the SG list and map it to be accessable by the controller. + * Builds the SG list and map it to be accessible by the controller. * * Returns 0 on failure or 1 on success. */ diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 20895e7a99c9..793300c554b4 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -361,12 +361,6 @@ static struct pm860x_irq_data pm860x_irqs[] = { }, }; -static inline struct pm860x_irq_data *irq_to_pm860x(struct pm860x_chip *chip, - int irq) -{ - return &pm860x_irqs[irq - chip->irq_base]; -} - static irqreturn_t pm860x_irq(int irq, void *data) { struct pm860x_chip *chip = data; @@ -388,16 +382,16 @@ static irqreturn_t pm860x_irq(int irq, void *data) return IRQ_HANDLED; } -static void pm860x_irq_lock(unsigned int irq) +static void pm860x_irq_lock(struct irq_data *data) { - struct pm860x_chip *chip = get_irq_chip_data(irq); + struct pm860x_chip *chip = irq_data_get_irq_chip_data(data); mutex_lock(&chip->irq_lock); } -static void pm860x_irq_sync_unlock(unsigned int irq) +static void pm860x_irq_sync_unlock(struct irq_data *data) { - struct pm860x_chip *chip = get_irq_chip_data(irq); + struct pm860x_chip *chip = irq_data_get_irq_chip_data(data); struct pm860x_irq_data *irq_data; struct i2c_client *i2c; static unsigned char cached[3] = {0x0, 0x0, 0x0}; @@ -439,25 +433,25 @@ static void pm860x_irq_sync_unlock(unsigned int irq) mutex_unlock(&chip->irq_lock); } -static void pm860x_irq_enable(unsigned int irq) +static void pm860x_irq_enable(struct irq_data *data) { - struct pm860x_chip *chip = get_irq_chip_data(irq); - pm860x_irqs[irq - chip->irq_base].enable - = pm860x_irqs[irq - chip->irq_base].offs; + struct pm860x_chip *chip = irq_data_get_irq_chip_data(data); + pm860x_irqs[data->irq - chip->irq_base].enable + = pm860x_irqs[data->irq - chip->irq_base].offs; } -static void pm860x_irq_disable(unsigned int irq) +static void pm860x_irq_disable(struct irq_data *data) { - struct pm860x_chip *chip = get_irq_chip_data(irq); - pm860x_irqs[irq - chip->irq_base].enable = 0; + struct pm860x_chip *chip = irq_data_get_irq_chip_data(data); + pm860x_irqs[data->irq - chip->irq_base].enable = 0; } static struct irq_chip pm860x_irq_chip = { .name = "88pm860x", - .bus_lock = pm860x_irq_lock, - .bus_sync_unlock = pm860x_irq_sync_unlock, - .enable = pm860x_irq_enable, - .disable = pm860x_irq_disable, + .irq_bus_lock = pm860x_irq_lock, + .irq_bus_sync_unlock = pm860x_irq_sync_unlock, + .irq_enable = pm860x_irq_enable, + .irq_disable = pm860x_irq_disable, }; static int __devinit device_gpadc_init(struct pm860x_chip *chip, diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index da9d2971102e..fd018366d670 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -496,13 +496,13 @@ config EZX_PCAP config AB8500_CORE bool "ST-Ericsson AB8500 Mixed Signal Power Management chip" - depends on GENERIC_HARDIRQS && ABX500_CORE && SPI_MASTER && ARCH_U8500 + depends on GENERIC_HARDIRQS && ABX500_CORE select MFD_CORE help Select this option to enable access to AB8500 power management - chip. This connects to U8500 either on the SSP/SPI bus - or the I2C bus via PRCMU. It also adds the irq_chip - parts for handling the Mixed Signal chip events. + chip. This connects to U8500 either on the SSP/SPI bus (deprecated + since hardware version v1.0) or the I2C bus via PRCMU. It also adds + the irq_chip parts for handling the Mixed Signal chip events. This chip embeds various other multimedia funtionalities as well. config AB8500_I2C_CORE @@ -537,6 +537,14 @@ config AB3550_CORE LEDs, vibrator, system power and temperature, power management and ALSA sound. +config MFD_CS5535 + tristate "Support for CS5535 and CS5536 southbridge core functions" + select MFD_CORE + depends on PCI + ---help--- + This is the core driver for CS5535/CS5536 MFD functions. This is + necessary for using the board's GPIO and MFGPT functionality. + config MFD_TIMBERDALE tristate "Support for the Timberdale FPGA" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 848e7eac75aa..a54e2c7c6a1c 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -70,7 +70,7 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o obj-$(CONFIG_AB3100_CORE) += ab3100-core.o obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o obj-$(CONFIG_AB3550_CORE) += ab3550-core.o -obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o +obj-$(CONFIG_AB8500_CORE) += ab8500-core.o obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o @@ -82,3 +82,4 @@ obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o obj-$(CONFIG_MFD_VX855) += vx855.o obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o +obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c index 8a98739e6d9c..5fbca346b998 100644 --- a/drivers/mfd/ab3550-core.c +++ b/drivers/mfd/ab3550-core.c @@ -1159,15 +1159,16 @@ static void ab3550_mask_work(struct work_struct *work) } } -static void ab3550_mask(unsigned int irq) +static void ab3550_mask(struct irq_data *data) { unsigned long flags; struct ab3550 *ab; struct ab3550_platform_data *plf_data; + int irq; - ab = get_irq_chip_data(irq); + ab = irq_data_get_irq_chip_data(data); plf_data = ab->i2c_client[0]->dev.platform_data; - irq -= plf_data->irq.base; + irq = data->irq - plf_data->irq.base; spin_lock_irqsave(&ab->event_lock, flags); ab->event_mask[irq / 8] |= BIT(irq % 8); @@ -1176,15 +1177,16 @@ static void ab3550_mask(unsigned int irq) schedule_work(&ab->mask_work); } -static void ab3550_unmask(unsigned int irq) +static void ab3550_unmask(struct irq_data *data) { unsigned long flags; struct ab3550 *ab; struct ab3550_platform_data *plf_data; + int irq; - ab = get_irq_chip_data(irq); + ab = irq_data_get_irq_chip_data(data); plf_data = ab->i2c_client[0]->dev.platform_data; - irq -= plf_data->irq.base; + irq = data->irq - plf_data->irq.base; spin_lock_irqsave(&ab->event_lock, flags); ab->event_mask[irq / 8] &= ~BIT(irq % 8); @@ -1193,20 +1195,16 @@ static void ab3550_unmask(unsigned int irq) schedule_work(&ab->mask_work); } -static void noop(unsigned int irq) +static void noop(struct irq_data *data) { } static struct irq_chip ab3550_irq_chip = { .name = "ab3550-core", /* Keep the same name as the request */ - .startup = NULL, /* defaults to enable */ - .shutdown = NULL, /* defaults to disable */ - .enable = NULL, /* defaults to unmask */ - .disable = ab3550_mask, /* No default to mask in chip.c */ - .ack = noop, - .mask = ab3550_mask, - .unmask = ab3550_unmask, - .end = NULL, + .irq_disable = ab3550_mask, /* No default to mask in chip.c */ + .irq_ack = noop, + .irq_mask = ab3550_mask, + .irq_unmask = ab3550_unmask, }; struct ab_family_id { diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index d9640a623ff4..b6887014d687 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -52,6 +52,7 @@ #define AB8500_IT_LATCH8_REG 0x27 #define AB8500_IT_LATCH9_REG 0x28 #define AB8500_IT_LATCH10_REG 0x29 +#define AB8500_IT_LATCH12_REG 0x2B #define AB8500_IT_LATCH19_REG 0x32 #define AB8500_IT_LATCH20_REG 0x33 #define AB8500_IT_LATCH21_REG 0x34 @@ -98,13 +99,17 @@ * offset 0. */ static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = { - 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21, + 0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21, }; static int ab8500_get_chip_id(struct device *dev) { - struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); - return (int)ab8500->chip_id; + struct ab8500 *ab8500; + + if (!dev) + return -EINVAL; + ab8500 = dev_get_drvdata(dev->parent); + return ab8500 ? (int)ab8500->chip_id : -EINVAL; } static int set_register_interruptible(struct ab8500 *ab8500, u8 bank, @@ -228,16 +233,16 @@ static struct abx500_ops ab8500_ops = { .startup_irq_enabled = NULL, }; -static void ab8500_irq_lock(unsigned int irq) +static void ab8500_irq_lock(struct irq_data *data) { - struct ab8500 *ab8500 = get_irq_chip_data(irq); + struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data); mutex_lock(&ab8500->irq_lock); } -static void ab8500_irq_sync_unlock(unsigned int irq) +static void ab8500_irq_sync_unlock(struct irq_data *data) { - struct ab8500 *ab8500 = get_irq_chip_data(irq); + struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data); int i; for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { @@ -248,6 +253,10 @@ static void ab8500_irq_sync_unlock(unsigned int irq) if (new == old) continue; + /* Interrupt register 12 does'nt exist prior to version 0x20 */ + if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20) + continue; + ab8500->oldmask[i] = new; reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i]; @@ -257,20 +266,20 @@ static void ab8500_irq_sync_unlock(unsigned int irq) mutex_unlock(&ab8500->irq_lock); } -static void ab8500_irq_mask(unsigned int irq) +static void ab8500_irq_mask(struct irq_data *data) { - struct ab8500 *ab8500 = get_irq_chip_data(irq); - int offset = irq - ab8500->irq_base; + struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data); + int offset = data->irq - ab8500->irq_base; int index = offset / 8; int mask = 1 << (offset % 8); ab8500->mask[index] |= mask; } -static void ab8500_irq_unmask(unsigned int irq) +static void ab8500_irq_unmask(struct irq_data *data) { - struct ab8500 *ab8500 = get_irq_chip_data(irq); - int offset = irq - ab8500->irq_base; + struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data); + int offset = data->irq - ab8500->irq_base; int index = offset / 8; int mask = 1 << (offset % 8); @@ -279,10 +288,10 @@ static void ab8500_irq_unmask(unsigned int irq) static struct irq_chip ab8500_irq_chip = { .name = "ab8500", - .bus_lock = ab8500_irq_lock, - .bus_sync_unlock = ab8500_irq_sync_unlock, - .mask = ab8500_irq_mask, - .unmask = ab8500_irq_unmask, + .irq_bus_lock = ab8500_irq_lock, + .irq_bus_sync_unlock = ab8500_irq_sync_unlock, + .irq_mask = ab8500_irq_mask, + .irq_unmask = ab8500_irq_unmask, }; static irqreturn_t ab8500_irq(int irq, void *dev) @@ -297,6 +306,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev) int status; u8 value; + /* Interrupt register 12 does'nt exist prior to version 0x20 */ + if (regoffset == 11 && ab8500->chip_id < 0x20) + continue; + status = get_register_interruptible(ab8500, AB8500_INTERRUPT, AB8500_IT_LATCH1_REG + regoffset, &value); if (status < 0 || value == 0) @@ -393,12 +406,194 @@ static struct resource ab8500_poweronkey_db_resources[] = { }, }; +static struct resource ab8500_bm_resources[] = { + { + .name = "MAIN_EXT_CH_NOT_OK", + .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, + .end = AB8500_INT_MAIN_EXT_CH_NOT_OK, + .flags = IORESOURCE_IRQ, + }, + { + .name = "BATT_OVV", + .start = AB8500_INT_BATT_OVV, + .end = AB8500_INT_BATT_OVV, + .flags = IORESOURCE_IRQ, + }, + { + .name = "MAIN_CH_UNPLUG_DET", + .start = AB8500_INT_MAIN_CH_UNPLUG_DET, + .end = AB8500_INT_MAIN_CH_UNPLUG_DET, + .flags = IORESOURCE_IRQ, + }, + { + .name = "MAIN_CHARGE_PLUG_DET", + .start = AB8500_INT_MAIN_CH_PLUG_DET, + .end = AB8500_INT_MAIN_CH_PLUG_DET, + .flags = IORESOURCE_IRQ, + }, + { + .name = "VBUS_DET_F", + .start = AB8500_INT_VBUS_DET_F, + .end = AB8500_INT_VBUS_DET_F, + .flags = IORESOURCE_IRQ, + }, + { + .name = "VBUS_DET_R", + .start = AB8500_INT_VBUS_DET_R, + .end = AB8500_INT_VBUS_DET_R, + .flags = IORESOURCE_IRQ, + }, + { + .name = "BAT_CTRL_INDB", + .start = AB8500_INT_BAT_CTRL_INDB, + .end = AB8500_INT_BAT_CTRL_INDB, + .flags = IORESOURCE_IRQ, + }, + { + .name = "CH_WD_EXP", + .start = AB8500_INT_CH_WD_EXP, + .end = AB8500_INT_CH_WD_EXP, + .flags = IORESOURCE_IRQ, + }, + { + .name = "VBUS_OVV", + .start = AB8500_INT_VBUS_OVV, + .end = AB8500_INT_VBUS_OVV, + .flags = IORESOURCE_IRQ, + }, + { + .name = "NCONV_ACCU", + .start = AB8500_INT_CCN_CONV_ACC, + .end = AB8500_INT_CCN_CONV_ACC, + .flags = IORESOURCE_IRQ, + }, + { + .name = "LOW_BAT_F", + .start = AB8500_INT_LOW_BAT_F, + .end = AB8500_INT_LOW_BAT_F, + .flags = IORESOURCE_IRQ, + }, + { + .name = "LOW_BAT_R", + .start = AB8500_INT_LOW_BAT_R, + .end = AB8500_INT_LOW_BAT_R, + .flags = IORESOURCE_IRQ, + }, + { + .name = "BTEMP_LOW", + .start = AB8500_INT_BTEMP_LOW, + .end = AB8500_INT_BTEMP_LOW, + .flags = IORESOURCE_IRQ, + }, + { + .name = "BTEMP_HIGH", + .start = AB8500_INT_BTEMP_HIGH, + .end = AB8500_INT_BTEMP_HIGH, + .flags = IORESOURCE_IRQ, + }, + { + .name = "USB_CHARGER_NOT_OKR", + .start = AB8500_INT_USB_CHARGER_NOT_OK, + .end = AB8500_INT_USB_CHARGER_NOT_OK, + .flags = IORESOURCE_IRQ, + }, + { + .name = "USB_CHARGE_DET_DONE", + .start = AB8500_INT_USB_CHG_DET_DONE, + .end = AB8500_INT_USB_CHG_DET_DONE, + .flags = IORESOURCE_IRQ, + }, + { + .name = "USB_CH_TH_PROT_R", + .start = AB8500_INT_USB_CH_TH_PROT_R, + .end = AB8500_INT_USB_CH_TH_PROT_R, + .flags = IORESOURCE_IRQ, + }, + { + .name = "MAIN_CH_TH_PROT_R", + .start = AB8500_INT_MAIN_CH_TH_PROT_R, + .end = AB8500_INT_MAIN_CH_TH_PROT_R, + .flags = IORESOURCE_IRQ, + }, + { + .name = "USB_CHARGER_NOT_OKF", + .start = AB8500_INT_USB_CHARGER_NOT_OKF, + .end = AB8500_INT_USB_CHARGER_NOT_OKF, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource ab8500_debug_resources[] = { + { + .name = "IRQ_FIRST", + .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, + .end = AB8500_INT_MAIN_EXT_CH_NOT_OK, + .flags = IORESOURCE_IRQ, + }, + { + .name = "IRQ_LAST", + .start = AB8500_INT_USB_CHARGER_NOT_OKF, + .end = AB8500_INT_USB_CHARGER_NOT_OKF, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource ab8500_usb_resources[] = { + { + .name = "ID_WAKEUP_R", + .start = AB8500_INT_ID_WAKEUP_R, + .end = AB8500_INT_ID_WAKEUP_R, + .flags = IORESOURCE_IRQ, + }, + { + .name = "ID_WAKEUP_F", + .start = AB8500_INT_ID_WAKEUP_F, + .end = AB8500_INT_ID_WAKEUP_F, + .flags = IORESOURCE_IRQ, + }, + { + .name = "VBUS_DET_F", + .start = AB8500_INT_VBUS_DET_F, + .end = AB8500_INT_VBUS_DET_F, + .flags = IORESOURCE_IRQ, + }, + { + .name = "VBUS_DET_R", + .start = AB8500_INT_VBUS_DET_R, + .end = AB8500_INT_VBUS_DET_R, + .flags = IORESOURCE_IRQ, + }, + { + .name = "USB_LINK_STATUS", + .start = AB8500_INT_USB_LINK_STATUS, + .end = AB8500_INT_USB_LINK_STATUS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource ab8500_temp_resources[] = { + { + .name = "AB8500_TEMP_WARM", + .start = AB8500_INT_TEMP_WARM, + .end = AB8500_INT_TEMP_WARM, + .flags = IORESOURCE_IRQ, + }, +}; + static struct mfd_cell ab8500_devs[] = { #ifdef CONFIG_DEBUG_FS { .name = "ab8500-debug", + .num_resources = ARRAY_SIZE(ab8500_debug_resources), + .resources = ab8500_debug_resources, }, #endif + { + .name = "ab8500-sysctrl", + }, + { + .name = "ab8500-regulator", + }, { .name = "ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), @@ -409,6 +604,22 @@ static struct mfd_cell ab8500_devs[] = { .num_resources = ARRAY_SIZE(ab8500_rtc_resources), .resources = ab8500_rtc_resources, }, + { + .name = "ab8500-bm", + .num_resources = ARRAY_SIZE(ab8500_bm_resources), + .resources = ab8500_bm_resources, + }, + { .name = "ab8500-codec", }, + { + .name = "ab8500-usb", + .num_resources = ARRAY_SIZE(ab8500_usb_resources), + .resources = ab8500_usb_resources, + }, + { + .name = "ab8500-poweron-key", + .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources), + .resources = ab8500_poweronkey_db_resources, + }, { .name = "ab8500-pwm", .id = 1, @@ -421,17 +632,37 @@ static struct mfd_cell ab8500_devs[] = { .name = "ab8500-pwm", .id = 3, }, - { .name = "ab8500-charger", }, - { .name = "ab8500-audio", }, - { .name = "ab8500-usb", }, - { .name = "ab8500-regulator", }, + { .name = "ab8500-leds", }, { - .name = "ab8500-poweron-key", - .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources), - .resources = ab8500_poweronkey_db_resources, + .name = "ab8500-denc", + }, + { + .name = "ab8500-temp", + .num_resources = ARRAY_SIZE(ab8500_temp_resources), + .resources = ab8500_temp_resources, }, }; +static ssize_t show_chip_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ab8500 *ab8500; + + ab8500 = dev_get_drvdata(dev); + return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL); +} + +static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL); + +static struct attribute *ab8500_sysfs_entries[] = { + &dev_attr_chip_id.attr, + NULL, +}; + +static struct attribute_group ab8500_attr_group = { + .attrs = ab8500_sysfs_entries, +}; + int __devinit ab8500_init(struct ab8500 *ab8500) { struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev); @@ -454,8 +685,9 @@ int __devinit ab8500_init(struct ab8500 *ab8500) * 0x0 - Early Drop * 0x10 - Cut 1.0 * 0x11 - Cut 1.1 + * 0x20 - Cut 2.0 */ - if (value == 0x0 || value == 0x10 || value == 0x11) { + if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) { ab8500->revision = value; dev_info(ab8500->dev, "detected chip, revision: %#x\n", value); } else { @@ -468,18 +700,16 @@ int __devinit ab8500_init(struct ab8500 *ab8500) plat->init(ab8500); /* Clear and mask all interrupts */ - for (i = 0; i < 10; i++) { - get_register_interruptible(ab8500, AB8500_INTERRUPT, - AB8500_IT_LATCH1_REG + i, &value); - set_register_interruptible(ab8500, AB8500_INTERRUPT, - AB8500_IT_MASK1_REG + i, 0xff); - } + for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { + /* Interrupt register 12 does'nt exist prior to version 0x20 */ + if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20) + continue; - for (i = 18; i < 24; i++) { get_register_interruptible(ab8500, AB8500_INTERRUPT, - AB8500_IT_LATCH1_REG + i, &value); + AB8500_IT_LATCH1_REG + ab8500_irq_regoffset[i], + &value); set_register_interruptible(ab8500, AB8500_INTERRUPT, - AB8500_IT_MASK1_REG + i, 0xff); + AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i], 0xff); } ret = abx500_register_ops(ab8500->dev, &ab8500_ops); @@ -495,7 +725,8 @@ int __devinit ab8500_init(struct ab8500 *ab8500) return ret; ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq, - IRQF_ONESHOT, "ab8500", ab8500); + IRQF_ONESHOT | IRQF_NO_SUSPEND, + "ab8500", ab8500); if (ret) goto out_removeirq; } @@ -506,6 +737,10 @@ int __devinit ab8500_init(struct ab8500 *ab8500) if (ret) goto out_freeirq; + ret = sysfs_create_group(&ab8500->dev->kobj, &ab8500_attr_group); + if (ret) + dev_err(ab8500->dev, "error creating sysfs entries\n"); + return ret; out_freeirq: @@ -519,6 +754,7 @@ out_removeirq: int __devexit ab8500_exit(struct ab8500 *ab8500) { + sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group); mfd_remove_devices(ab8500->dev); if (ab8500->irq_base) { free_irq(ab8500->irq, ab8500); diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index 8d1e05a39815..3c1541ae7223 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -24,9 +24,9 @@ static u32 debug_address; * @perm: access permissions for the range */ struct ab8500_reg_range { - u8 first; - u8 last; - u8 perm; + u8 first; + u8 last; + u8 perm; }; /** @@ -36,9 +36,9 @@ struct ab8500_reg_range { * @range: the list of register ranges */ struct ab8500_i2c_ranges { - u8 num_ranges; - u8 bankid; - const struct ab8500_reg_range *range; + u8 num_ranges; + u8 bankid; + const struct ab8500_reg_range *range; }; #define AB8500_NAME_STRING "ab8500" @@ -47,521 +47,521 @@ struct ab8500_i2c_ranges { #define AB8500_REV_REG 0x80 static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = { - [0x0] = { - .num_ranges = 0, - .range = 0, - }, - [AB8500_SYS_CTRL1_BLOCK] = { - .num_ranges = 3, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x02, - }, - { - .first = 0x42, - .last = 0x42, - }, - { - .first = 0x80, - .last = 0x81, - }, - }, - }, - [AB8500_SYS_CTRL2_BLOCK] = { - .num_ranges = 4, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x0D, - }, - { - .first = 0x0F, - .last = 0x17, - }, - { - .first = 0x30, - .last = 0x30, - }, - { - .first = 0x32, - .last = 0x33, - }, - }, - }, - [AB8500_REGU_CTRL1] = { - .num_ranges = 3, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x00, - }, - { - .first = 0x03, - .last = 0x10, - }, - { - .first = 0x80, - .last = 0x84, - }, - }, - }, - [AB8500_REGU_CTRL2] = { - .num_ranges = 5, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x15, - }, - { - .first = 0x17, - .last = 0x19, - }, - { - .first = 0x1B, - .last = 0x1D, - }, - { - .first = 0x1F, - .last = 0x22, - }, - { - .first = 0x40, - .last = 0x44, - }, - /* 0x80-0x8B is SIM registers and should - * not be accessed from here */ - }, - }, - [AB8500_USB] = { - .num_ranges = 2, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x80, - .last = 0x83, - }, - { - .first = 0x87, - .last = 0x8A, - }, - }, - }, - [AB8500_TVOUT] = { - .num_ranges = 9, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x12, - }, - { - .first = 0x15, - .last = 0x17, - }, - { - .first = 0x19, - .last = 0x21, - }, - { - .first = 0x27, - .last = 0x2C, - }, - { - .first = 0x41, - .last = 0x41, - }, - { - .first = 0x45, - .last = 0x5B, - }, - { - .first = 0x5D, - .last = 0x5D, - }, - { - .first = 0x69, - .last = 0x69, - }, - { - .first = 0x80, - .last = 0x81, - }, - }, - }, - [AB8500_DBI] = { - .num_ranges = 0, - .range = 0, - }, - [AB8500_ECI_AV_ACC] = { - .num_ranges = 1, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x80, - .last = 0x82, - }, - }, - }, - [0x9] = { - .num_ranges = 0, - .range = 0, - }, - [AB8500_GPADC] = { - .num_ranges = 1, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x08, - }, - }, - }, - [AB8500_CHARGER] = { - .num_ranges = 8, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x03, - }, - { - .first = 0x05, - .last = 0x05, - }, - { - .first = 0x40, - .last = 0x40, - }, - { - .first = 0x42, - .last = 0x42, - }, - { - .first = 0x44, - .last = 0x44, - }, - { - .first = 0x50, - .last = 0x55, - }, - { - .first = 0x80, - .last = 0x82, - }, - { - .first = 0xC0, - .last = 0xC2, - }, - }, - }, - [AB8500_GAS_GAUGE] = { - .num_ranges = 3, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x00, - }, - { - .first = 0x07, - .last = 0x0A, - }, - { - .first = 0x10, - .last = 0x14, - }, - }, - }, - [AB8500_AUDIO] = { - .num_ranges = 1, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x6F, - }, - }, - }, - [AB8500_INTERRUPT] = { - .num_ranges = 0, - .range = 0, - }, - [AB8500_RTC] = { - .num_ranges = 1, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x0F, - }, - }, - }, - [AB8500_MISC] = { - .num_ranges = 8, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x00, - .last = 0x05, - }, - { - .first = 0x10, - .last = 0x15, - }, - { - .first = 0x20, - .last = 0x25, - }, - { - .first = 0x30, - .last = 0x35, - }, - { - .first = 0x40, - .last = 0x45, - }, - { - .first = 0x50, - .last = 0x50, - }, - { - .first = 0x60, - .last = 0x67, - }, - { - .first = 0x80, - .last = 0x80, - }, - }, - }, - [0x11] = { - .num_ranges = 0, - .range = 0, - }, - [0x12] = { - .num_ranges = 0, - .range = 0, - }, - [0x13] = { - .num_ranges = 0, - .range = 0, - }, - [0x14] = { - .num_ranges = 0, - .range = 0, - }, - [AB8500_OTP_EMUL] = { - .num_ranges = 1, - .range = (struct ab8500_reg_range[]) { - { - .first = 0x01, - .last = 0x0F, - }, - }, - }, + [0x0] = { + .num_ranges = 0, + .range = 0, + }, + [AB8500_SYS_CTRL1_BLOCK] = { + .num_ranges = 3, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x02, + }, + { + .first = 0x42, + .last = 0x42, + }, + { + .first = 0x80, + .last = 0x81, + }, + }, + }, + [AB8500_SYS_CTRL2_BLOCK] = { + .num_ranges = 4, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x0D, + }, + { + .first = 0x0F, + .last = 0x17, + }, + { + .first = 0x30, + .last = 0x30, + }, + { + .first = 0x32, + .last = 0x33, + }, + }, + }, + [AB8500_REGU_CTRL1] = { + .num_ranges = 3, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x00, + }, + { + .first = 0x03, + .last = 0x10, + }, + { + .first = 0x80, + .last = 0x84, + }, + }, + }, + [AB8500_REGU_CTRL2] = { + .num_ranges = 5, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x15, + }, + { + .first = 0x17, + .last = 0x19, + }, + { + .first = 0x1B, + .last = 0x1D, + }, + { + .first = 0x1F, + .last = 0x22, + }, + { + .first = 0x40, + .last = 0x44, + }, + /* 0x80-0x8B is SIM registers and should + * not be accessed from here */ + }, + }, + [AB8500_USB] = { + .num_ranges = 2, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x80, + .last = 0x83, + }, + { + .first = 0x87, + .last = 0x8A, + }, + }, + }, + [AB8500_TVOUT] = { + .num_ranges = 9, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x12, + }, + { + .first = 0x15, + .last = 0x17, + }, + { + .first = 0x19, + .last = 0x21, + }, + { + .first = 0x27, + .last = 0x2C, + }, + { + .first = 0x41, + .last = 0x41, + }, + { + .first = 0x45, + .last = 0x5B, + }, + { + .first = 0x5D, + .last = 0x5D, + }, + { + .first = 0x69, + .last = 0x69, + }, + { + .first = 0x80, + .last = 0x81, + }, + }, + }, + [AB8500_DBI] = { + .num_ranges = 0, + .range = NULL, + }, + [AB8500_ECI_AV_ACC] = { + .num_ranges = 1, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x80, + .last = 0x82, + }, + }, + }, + [0x9] = { + .num_ranges = 0, + .range = NULL, + }, + [AB8500_GPADC] = { + .num_ranges = 1, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x08, + }, + }, + }, + [AB8500_CHARGER] = { + .num_ranges = 8, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x03, + }, + { + .first = 0x05, + .last = 0x05, + }, + { + .first = 0x40, + .last = 0x40, + }, + { + .first = 0x42, + .last = 0x42, + }, + { + .first = 0x44, + .last = 0x44, + }, + { + .first = 0x50, + .last = 0x55, + }, + { + .first = 0x80, + .last = 0x82, + }, + { + .first = 0xC0, + .last = 0xC2, + }, + }, + }, + [AB8500_GAS_GAUGE] = { + .num_ranges = 3, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x00, + }, + { + .first = 0x07, + .last = 0x0A, + }, + { + .first = 0x10, + .last = 0x14, + }, + }, + }, + [AB8500_AUDIO] = { + .num_ranges = 1, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x6F, + }, + }, + }, + [AB8500_INTERRUPT] = { + .num_ranges = 0, + .range = NULL, + }, + [AB8500_RTC] = { + .num_ranges = 1, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x0F, + }, + }, + }, + [AB8500_MISC] = { + .num_ranges = 8, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x05, + }, + { + .first = 0x10, + .last = 0x15, + }, + { + .first = 0x20, + .last = 0x25, + }, + { + .first = 0x30, + .last = 0x35, + }, + { + .first = 0x40, + .last = 0x45, + }, + { + .first = 0x50, + .last = 0x50, + }, + { + .first = 0x60, + .last = 0x67, + }, + { + .first = 0x80, + .last = 0x80, + }, + }, + }, + [0x11] = { + .num_ranges = 0, + .range = NULL, + }, + [0x12] = { + .num_ranges = 0, + .range = NULL, + }, + [0x13] = { + .num_ranges = 0, + .range = NULL, + }, + [0x14] = { + .num_ranges = 0, + .range = NULL, + }, + [AB8500_OTP_EMUL] = { + .num_ranges = 1, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x01, + .last = 0x0F, + }, + }, + }, }; static int ab8500_registers_print(struct seq_file *s, void *p) { - struct device *dev = s->private; - unsigned int i; - u32 bank = debug_bank; - - seq_printf(s, AB8500_NAME_STRING " register values:\n"); - - seq_printf(s, " bank %u:\n", bank); - for (i = 0; i < debug_ranges[bank].num_ranges; i++) { - u32 reg; - - for (reg = debug_ranges[bank].range[i].first; - reg <= debug_ranges[bank].range[i].last; - reg++) { - u8 value; - int err; - - err = abx500_get_register_interruptible(dev, - (u8)bank, (u8)reg, &value); - if (err < 0) { - dev_err(dev, "ab->read fail %d\n", err); - return err; - } - - err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank, - reg, value); - if (err < 0) { - dev_err(dev, "seq_printf overflow\n"); - /* Error is not returned here since - * the output is wanted in any case */ - return 0; - } - } - } - return 0; + struct device *dev = s->private; + unsigned int i; + u32 bank = debug_bank; + + seq_printf(s, AB8500_NAME_STRING " register values:\n"); + + seq_printf(s, " bank %u:\n", bank); + for (i = 0; i < debug_ranges[bank].num_ranges; i++) { + u32 reg; + + for (reg = debug_ranges[bank].range[i].first; + reg <= debug_ranges[bank].range[i].last; + reg++) { + u8 value; + int err; + + err = abx500_get_register_interruptible(dev, + (u8)bank, (u8)reg, &value); + if (err < 0) { + dev_err(dev, "ab->read fail %d\n", err); + return err; + } + + err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank, + reg, value); + if (err < 0) { + dev_err(dev, "seq_printf overflow\n"); + /* Error is not returned here since + * the output is wanted in any case */ + return 0; + } + } + } + return 0; } static int ab8500_registers_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_registers_print, inode->i_private); + return single_open(file, ab8500_registers_print, inode->i_private); } static const struct file_operations ab8500_registers_fops = { - .open = ab8500_registers_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, + .open = ab8500_registers_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, }; static int ab8500_bank_print(struct seq_file *s, void *p) { - return seq_printf(s, "%d\n", debug_bank); + return seq_printf(s, "%d\n", debug_bank); } static int ab8500_bank_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_bank_print, inode->i_private); + return single_open(file, ab8500_bank_print, inode->i_private); } static ssize_t ab8500_bank_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) + const char __user *user_buf, + size_t count, loff_t *ppos) { - struct device *dev = ((struct seq_file *)(file->private_data))->private; - char buf[32]; - int buf_size; - unsigned long user_bank; - int err; - - /* Get userspace string and assure termination */ - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - buf[buf_size] = 0; - - err = strict_strtoul(buf, 0, &user_bank); - if (err) - return -EINVAL; - - if (user_bank >= AB8500_NUM_BANKS) { - dev_err(dev, "debugfs error input > number of banks\n"); - return -EINVAL; - } - - debug_bank = user_bank; - - return buf_size; + struct device *dev = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_bank; + int err; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_bank); + if (err) + return -EINVAL; + + if (user_bank >= AB8500_NUM_BANKS) { + dev_err(dev, "debugfs error input > number of banks\n"); + return -EINVAL; + } + + debug_bank = user_bank; + + return buf_size; } static int ab8500_address_print(struct seq_file *s, void *p) { - return seq_printf(s, "0x%02X\n", debug_address); + return seq_printf(s, "0x%02X\n", debug_address); } static int ab8500_address_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_address_print, inode->i_private); + return single_open(file, ab8500_address_print, inode->i_private); } static ssize_t ab8500_address_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) + const char __user *user_buf, + size_t count, loff_t *ppos) { - struct device *dev = ((struct seq_file *)(file->private_data))->private; - char buf[32]; - int buf_size; - unsigned long user_address; - int err; - - /* Get userspace string and assure termination */ - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - buf[buf_size] = 0; - - err = strict_strtoul(buf, 0, &user_address); - if (err) - return -EINVAL; - if (user_address > 0xff) { - dev_err(dev, "debugfs error input > 0xff\n"); - return -EINVAL; - } - debug_address = user_address; - return buf_size; + struct device *dev = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_address; + int err; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_address); + if (err) + return -EINVAL; + if (user_address > 0xff) { + dev_err(dev, "debugfs error input > 0xff\n"); + return -EINVAL; + } + debug_address = user_address; + return buf_size; } static int ab8500_val_print(struct seq_file *s, void *p) { - struct device *dev = s->private; - int ret; - u8 regvalue; - - ret = abx500_get_register_interruptible(dev, - (u8)debug_bank, (u8)debug_address, ®value); - if (ret < 0) { - dev_err(dev, "abx500_get_reg fail %d, %d\n", - ret, __LINE__); - return -EINVAL; - } - seq_printf(s, "0x%02X\n", regvalue); - - return 0; + struct device *dev = s->private; + int ret; + u8 regvalue; + + ret = abx500_get_register_interruptible(dev, + (u8)debug_bank, (u8)debug_address, ®value); + if (ret < 0) { + dev_err(dev, "abx500_get_reg fail %d, %d\n", + ret, __LINE__); + return -EINVAL; + } + seq_printf(s, "0x%02X\n", regvalue); + + return 0; } static int ab8500_val_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_val_print, inode->i_private); + return single_open(file, ab8500_val_print, inode->i_private); } static ssize_t ab8500_val_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) + const char __user *user_buf, + size_t count, loff_t *ppos) { - struct device *dev = ((struct seq_file *)(file->private_data))->private; - char buf[32]; - int buf_size; - unsigned long user_val; - int err; - - /* Get userspace string and assure termination */ - buf_size = min(count, (sizeof(buf)-1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - buf[buf_size] = 0; - - err = strict_strtoul(buf, 0, &user_val); - if (err) - return -EINVAL; - if (user_val > 0xff) { - dev_err(dev, "debugfs error input > 0xff\n"); - return -EINVAL; - } - err = abx500_set_register_interruptible(dev, - (u8)debug_bank, debug_address, (u8)user_val); - if (err < 0) { - printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__); - return -EINVAL; - } - - return buf_size; + struct device *dev = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_val; + int err; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_val); + if (err) + return -EINVAL; + if (user_val > 0xff) { + dev_err(dev, "debugfs error input > 0xff\n"); + return -EINVAL; + } + err = abx500_set_register_interruptible(dev, + (u8)debug_bank, debug_address, (u8)user_val); + if (err < 0) { + printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__); + return -EINVAL; + } + + return buf_size; } static const struct file_operations ab8500_bank_fops = { - .open = ab8500_bank_open, - .write = ab8500_bank_write, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, + .open = ab8500_bank_open, + .write = ab8500_bank_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, }; static const struct file_operations ab8500_address_fops = { - .open = ab8500_address_open, - .write = ab8500_address_write, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, + .open = ab8500_address_open, + .write = ab8500_address_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, }; static const struct file_operations ab8500_val_fops = { - .open = ab8500_val_open, - .write = ab8500_val_write, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, + .open = ab8500_val_open, + .write = ab8500_val_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, }; static struct dentry *ab8500_dir; @@ -572,77 +572,77 @@ static struct dentry *ab8500_val_file; static int __devinit ab8500_debug_probe(struct platform_device *plf) { - debug_bank = AB8500_MISC; - debug_address = AB8500_REV_REG & 0x00FF; + debug_bank = AB8500_MISC; + debug_address = AB8500_REV_REG & 0x00FF; - ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL); - if (!ab8500_dir) - goto exit_no_debugfs; + ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL); + if (!ab8500_dir) + goto exit_no_debugfs; - ab8500_reg_file = debugfs_create_file("all-bank-registers", - S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops); - if (!ab8500_reg_file) - goto exit_destroy_dir; + ab8500_reg_file = debugfs_create_file("all-bank-registers", + S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops); + if (!ab8500_reg_file) + goto exit_destroy_dir; - ab8500_bank_file = debugfs_create_file("register-bank", - (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops); - if (!ab8500_bank_file) - goto exit_destroy_reg; + ab8500_bank_file = debugfs_create_file("register-bank", + (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops); + if (!ab8500_bank_file) + goto exit_destroy_reg; - ab8500_address_file = debugfs_create_file("register-address", - (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, - &ab8500_address_fops); - if (!ab8500_address_file) - goto exit_destroy_bank; + ab8500_address_file = debugfs_create_file("register-address", + (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, + &ab8500_address_fops); + if (!ab8500_address_file) + goto exit_destroy_bank; - ab8500_val_file = debugfs_create_file("register-value", - (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops); - if (!ab8500_val_file) - goto exit_destroy_address; + ab8500_val_file = debugfs_create_file("register-value", + (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops); + if (!ab8500_val_file) + goto exit_destroy_address; - return 0; + return 0; exit_destroy_address: - debugfs_remove(ab8500_address_file); + debugfs_remove(ab8500_address_file); exit_destroy_bank: - debugfs_remove(ab8500_bank_file); + debugfs_remove(ab8500_bank_file); exit_destroy_reg: - debugfs_remove(ab8500_reg_file); + debugfs_remove(ab8500_reg_file); exit_destroy_dir: - debugfs_remove(ab8500_dir); + debugfs_remove(ab8500_dir); exit_no_debugfs: - dev_err(&plf->dev, "failed to create debugfs entries.\n"); - return -ENOMEM; + dev_err(&plf->dev, "failed to create debugfs entries.\n"); + return -ENOMEM; } static int __devexit ab8500_debug_remove(struct platform_device *plf) { - debugfs_remove(ab8500_val_file); - debugfs_remove(ab8500_address_file); - debugfs_remove(ab8500_bank_file); - debugfs_remove(ab8500_reg_file); - debugfs_remove(ab8500_dir); + debugfs_remove(ab8500_val_file); + debugfs_remove(ab8500_address_file); + debugfs_remove(ab8500_bank_file); + debugfs_remove(ab8500_reg_file); + debugfs_remove(ab8500_dir); - return 0; + return 0; } static struct platform_driver ab8500_debug_driver = { - .driver = { - .name = "ab8500-debug", - .owner = THIS_MODULE, - }, - .probe = ab8500_debug_probe, - .remove = __devexit_p(ab8500_debug_remove) + .driver = { + .name = "ab8500-debug", + .owner = THIS_MODULE, + }, + .probe = ab8500_debug_probe, + .remove = __devexit_p(ab8500_debug_remove) }; static int __init ab8500_debug_init(void) { - return platform_driver_register(&ab8500_debug_driver); + return platform_driver_register(&ab8500_debug_driver); } static void __exit ab8500_debug_exit(void) { - platform_driver_unregister(&ab8500_debug_driver); + platform_driver_unregister(&ab8500_debug_driver); } subsys_initcall(ab8500_debug_init); module_exit(ab8500_debug_exit); diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c deleted file mode 100644 index b1653421edb5..000000000000 --- a/drivers/mfd/ab8500-spi.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (C) ST-Ericsson SA 2010 - * - * License Terms: GNU General Public License v2 - * Author: Srinidhi Kasagar - */ - -#include -#include -#include -#include -#include -#include -#include - -/* - * This funtion writes to any AB8500 registers using - * SPI protocol & before it writes it packs the data - * in the below 24 bit frame format - * - * *|------------------------------------| - * *| 23|22...18|17.......10|9|8|7......0| - * *| r/w bank adr data | - * * ------------------------------------ - * - * This function shouldn't be called from interrupt - * context - */ -static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data) -{ - struct spi_device *spi = container_of(ab8500->dev, struct spi_device, - dev); - unsigned long spi_data = addr << 10 | data; - struct spi_transfer xfer; - struct spi_message msg; - - ab8500->tx_buf[0] = spi_data; - ab8500->rx_buf[0] = 0; - - xfer.tx_buf = ab8500->tx_buf; - xfer.rx_buf = NULL; - xfer.len = sizeof(unsigned long); - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - - return spi_sync(spi, &msg); -} - -static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr) -{ - struct spi_device *spi = container_of(ab8500->dev, struct spi_device, - dev); - unsigned long spi_data = 1 << 23 | addr << 10; - struct spi_transfer xfer; - struct spi_message msg; - int ret; - - ab8500->tx_buf[0] = spi_data; - ab8500->rx_buf[0] = 0; - - xfer.tx_buf = ab8500->tx_buf; - xfer.rx_buf = ab8500->rx_buf; - xfer.len = sizeof(unsigned long); - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - - ret = spi_sync(spi, &msg); - if (!ret) - /* - * Only the 8 lowermost bytes are - * defined with value, the rest may - * vary depending on chip/board noise. - */ - ret = ab8500->rx_buf[0] & 0xFFU; - - return ret; -} - -static int __devinit ab8500_spi_probe(struct spi_device *spi) -{ - struct ab8500 *ab8500; - int ret; - - spi->bits_per_word = 24; - ret = spi_setup(spi); - if (ret < 0) - return ret; - - ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL); - if (!ab8500) - return -ENOMEM; - - ab8500->dev = &spi->dev; - ab8500->irq = spi->irq; - - ab8500->read = ab8500_spi_read; - ab8500->write = ab8500_spi_write; - - spi_set_drvdata(spi, ab8500); - - ret = ab8500_init(ab8500); - if (ret) - kfree(ab8500); - - return ret; -} - -static int __devexit ab8500_spi_remove(struct spi_device *spi) -{ - struct ab8500 *ab8500 = spi_get_drvdata(spi); - - ab8500_exit(ab8500); - kfree(ab8500); - - return 0; -} - -static struct spi_driver ab8500_spi_driver = { - .driver = { - .name = "ab8500-spi", - .owner = THIS_MODULE, - }, - .probe = ab8500_spi_probe, - .remove = __devexit_p(ab8500_spi_remove) -}; - -static int __init ab8500_spi_init(void) -{ - return spi_register_driver(&ab8500_spi_driver); -} -subsys_initcall(ab8500_spi_init); - -static void __exit ab8500_spi_exit(void) -{ - spi_unregister_driver(&ab8500_spi_driver); -} -module_exit(ab8500_spi_exit); - -MODULE_AUTHOR("Srinidhi KASAGAR > asic->bus_shift)); } -void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set) +static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set) { unsigned long flags; u32 val; @@ -226,14 +226,14 @@ static inline int asic3_irq_to_index(struct asic3 *asic, int irq) return (irq - asic->irq_base) & 0xf; } -static void asic3_mask_gpio_irq(unsigned int irq) +static void asic3_mask_gpio_irq(struct irq_data *data) { - struct asic3 *asic = get_irq_chip_data(irq); + struct asic3 *asic = irq_data_get_irq_chip_data(data); u32 val, bank, index; unsigned long flags; - bank = asic3_irq_to_bank(asic, irq); - index = asic3_irq_to_index(asic, irq); + bank = asic3_irq_to_bank(asic, data->irq); + index = asic3_irq_to_index(asic, data->irq); spin_lock_irqsave(&asic->lock, flags); val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK); @@ -242,9 +242,9 @@ static void asic3_mask_gpio_irq(unsigned int irq) spin_unlock_irqrestore(&asic->lock, flags); } -static void asic3_mask_irq(unsigned int irq) +static void asic3_mask_irq(struct irq_data *data) { - struct asic3 *asic = get_irq_chip_data(irq); + struct asic3 *asic = irq_data_get_irq_chip_data(data); int regval; unsigned long flags; @@ -254,7 +254,7 @@ static void asic3_mask_irq(unsigned int irq) ASIC3_INTR_INT_MASK); regval &= ~(ASIC3_INTMASK_MASK0 << - (irq - (asic->irq_base + ASIC3_NUM_GPIOS))); + (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS))); asic3_write_register(asic, ASIC3_INTR_BASE + @@ -263,14 +263,14 @@ static void asic3_mask_irq(unsigned int irq) spin_unlock_irqrestore(&asic->lock, flags); } -static void asic3_unmask_gpio_irq(unsigned int irq) +static void asic3_unmask_gpio_irq(struct irq_data *data) { - struct asic3 *asic = get_irq_chip_data(irq); + struct asic3 *asic = irq_data_get_irq_chip_data(data); u32 val, bank, index; unsigned long flags; - bank = asic3_irq_to_bank(asic, irq); - index = asic3_irq_to_index(asic, irq); + bank = asic3_irq_to_bank(asic, data->irq); + index = asic3_irq_to_index(asic, data->irq); spin_lock_irqsave(&asic->lock, flags); val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK); @@ -279,9 +279,9 @@ static void asic3_unmask_gpio_irq(unsigned int irq) spin_unlock_irqrestore(&asic->lock, flags); } -static void asic3_unmask_irq(unsigned int irq) +static void asic3_unmask_irq(struct irq_data *data) { - struct asic3 *asic = get_irq_chip_data(irq); + struct asic3 *asic = irq_data_get_irq_chip_data(data); int regval; unsigned long flags; @@ -291,7 +291,7 @@ static void asic3_unmask_irq(unsigned int irq) ASIC3_INTR_INT_MASK); regval |= (ASIC3_INTMASK_MASK0 << - (irq - (asic->irq_base + ASIC3_NUM_GPIOS))); + (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS))); asic3_write_register(asic, ASIC3_INTR_BASE + @@ -300,15 +300,15 @@ static void asic3_unmask_irq(unsigned int irq) spin_unlock_irqrestore(&asic->lock, flags); } -static int asic3_gpio_irq_type(unsigned int irq, unsigned int type) +static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type) { - struct asic3 *asic = get_irq_chip_data(irq); + struct asic3 *asic = irq_data_get_irq_chip_data(data); u32 bank, index; u16 trigger, level, edge, bit; unsigned long flags; - bank = asic3_irq_to_bank(asic, irq); - index = asic3_irq_to_index(asic, irq); + bank = asic3_irq_to_bank(asic, data->irq); + index = asic3_irq_to_index(asic, data->irq); bit = 1<lock, flags); @@ -318,7 +318,7 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type) bank + ASIC3_GPIO_EDGE_TRIGGER); trigger = asic3_read_register(asic, bank + ASIC3_GPIO_TRIGGER_TYPE); - asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit; + asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] &= ~bit; if (type == IRQ_TYPE_EDGE_RISING) { trigger |= bit; @@ -328,11 +328,11 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type) edge &= ~bit; } else if (type == IRQ_TYPE_EDGE_BOTH) { trigger |= bit; - if (asic3_gpio_get(&asic->gpio, irq - asic->irq_base)) + if (asic3_gpio_get(&asic->gpio, data->irq - asic->irq_base)) edge &= ~bit; else edge |= bit; - asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit; + asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] |= bit; } else if (type == IRQ_TYPE_LEVEL_LOW) { trigger &= ~bit; level &= ~bit; @@ -359,17 +359,17 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type) static struct irq_chip asic3_gpio_irq_chip = { .name = "ASIC3-GPIO", - .ack = asic3_mask_gpio_irq, - .mask = asic3_mask_gpio_irq, - .unmask = asic3_unmask_gpio_irq, - .set_type = asic3_gpio_irq_type, + .irq_ack = asic3_mask_gpio_irq, + .irq_mask = asic3_mask_gpio_irq, + .irq_unmask = asic3_unmask_gpio_irq, + .irq_set_type = asic3_gpio_irq_type, }; static struct irq_chip asic3_irq_chip = { .name = "ASIC3", - .ack = asic3_mask_irq, - .mask = asic3_mask_irq, - .unmask = asic3_unmask_irq, + .irq_ack = asic3_mask_irq, + .irq_mask = asic3_mask_irq, + .irq_unmask = asic3_unmask_irq, }; static int __init asic3_irq_probe(struct platform_device *pdev) @@ -635,7 +635,7 @@ static struct resource ds1wm_resources[] = { }, { .start = ASIC3_IRQ_OWM, - .start = ASIC3_IRQ_OWM, + .end = ASIC3_IRQ_OWM, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c new file mode 100644 index 000000000000..59ca6f151e78 --- /dev/null +++ b/drivers/mfd/cs5535-mfd.c @@ -0,0 +1,151 @@ +/* + * cs5535-mfd.c - core MFD driver for CS5535/CS5536 southbridges + * + * The CS5535 and CS5536 has an ISA bridge on the PCI bus that is + * used for accessing GPIOs, MFGPTs, ACPI, etc. Each subdevice has + * an IO range that's specified in a single BAR. The BAR order is + * hardcoded in the CS553x specifications. + * + * Copyright (c) 2010 Andres Salomon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include + +#define DRV_NAME "cs5535-mfd" + +enum cs5535_mfd_bars { + SMB_BAR = 0, + GPIO_BAR = 1, + MFGPT_BAR = 2, + PMS_BAR = 4, + ACPI_BAR = 5, + NR_BARS, +}; + +static __devinitdata struct resource cs5535_mfd_resources[NR_BARS]; + +static __devinitdata struct mfd_cell cs5535_mfd_cells[] = { + { + .id = SMB_BAR, + .name = "cs5535-smb", + .num_resources = 1, + .resources = &cs5535_mfd_resources[SMB_BAR], + }, + { + .id = GPIO_BAR, + .name = "cs5535-gpio", + .num_resources = 1, + .resources = &cs5535_mfd_resources[GPIO_BAR], + }, + { + .id = MFGPT_BAR, + .name = "cs5535-mfgpt", + .num_resources = 1, + .resources = &cs5535_mfd_resources[MFGPT_BAR], + }, + { + .id = PMS_BAR, + .name = "cs5535-pms", + .num_resources = 1, + .resources = &cs5535_mfd_resources[PMS_BAR], + }, + { + .id = ACPI_BAR, + .name = "cs5535-acpi", + .num_resources = 1, + .resources = &cs5535_mfd_resources[ACPI_BAR], + }, +}; + +static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err, i; + + err = pci_enable_device(pdev); + if (err) + return err; + + /* fill in IO range for each cell; subdrivers handle the region */ + for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) { + int bar = cs5535_mfd_cells[i].id; + struct resource *r = &cs5535_mfd_resources[bar]; + + r->flags = IORESOURCE_IO; + r->start = pci_resource_start(pdev, bar); + r->end = pci_resource_end(pdev, bar); + + /* id is used for temporarily storing BAR; unset it now */ + cs5535_mfd_cells[i].id = 0; + } + + err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells, + ARRAY_SIZE(cs5535_mfd_cells), NULL, 0); + if (err) { + dev_err(&pdev->dev, "MFD add devices failed: %d\n", err); + goto err_disable; + } + + dev_info(&pdev->dev, "%zu devices registered.\n", + ARRAY_SIZE(cs5535_mfd_cells)); + + return 0; + +err_disable: + pci_disable_device(pdev); + return err; +} + +static void __devexit cs5535_mfd_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); + pci_disable_device(pdev); +} + +static struct pci_device_id cs5535_mfd_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl); + +static struct pci_driver cs5535_mfd_drv = { + .name = DRV_NAME, + .id_table = cs5535_mfd_pci_tbl, + .probe = cs5535_mfd_probe, + .remove = __devexit_p(cs5535_mfd_remove), +}; + +static int __init cs5535_mfd_init(void) +{ + return pci_register_driver(&cs5535_mfd_drv); +} + +static void __exit cs5535_mfd_exit(void) +{ + pci_unregister_driver(&cs5535_mfd_drv); +} + +module_init(cs5535_mfd_init); +module_exit(cs5535_mfd_exit); + +MODULE_AUTHOR("Andres Salomon "); +MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index c2b698d69a93..9e2d8dd5f9e5 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -144,26 +144,26 @@ int pcap_to_irq(struct pcap_chip *pcap, int irq) } EXPORT_SYMBOL_GPL(pcap_to_irq); -static void pcap_mask_irq(unsigned int irq) +static void pcap_mask_irq(struct irq_data *d) { - struct pcap_chip *pcap = get_irq_chip_data(irq); + struct pcap_chip *pcap = irq_data_get_irq_chip_data(d); - pcap->msr |= 1 << irq_to_pcap(pcap, irq); + pcap->msr |= 1 << irq_to_pcap(pcap, d->irq); queue_work(pcap->workqueue, &pcap->msr_work); } -static void pcap_unmask_irq(unsigned int irq) +static void pcap_unmask_irq(struct irq_data *d) { - struct pcap_chip *pcap = get_irq_chip_data(irq); + struct pcap_chip *pcap = irq_data_get_irq_chip_data(d); - pcap->msr &= ~(1 << irq_to_pcap(pcap, irq)); + pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq)); queue_work(pcap->workqueue, &pcap->msr_work); } static struct irq_chip pcap_irq_chip = { - .name = "pcap", - .mask = pcap_mask_irq, - .unmask = pcap_unmask_irq, + .name = "pcap", + .irq_mask = pcap_mask_irq, + .irq_unmask = pcap_unmask_irq, }; static void pcap_msr_work(struct work_struct *work) @@ -199,8 +199,7 @@ static void pcap_isr_work(struct work_struct *work) if (service & 1) { struct irq_desc *desc = irq_to_desc(irq); - if (WARN(!desc, KERN_WARNING - "Invalid PCAP IRQ %d\n", irq)) + if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq)) break; if (desc->status & IRQ_DISABLED) @@ -218,7 +217,7 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) { struct pcap_chip *pcap = get_irq_data(irq); - desc->chip->ack(irq); + desc->irq_data.chip->irq_ack(&desc->irq_data); queue_work(pcap->workqueue, &pcap->isr_work); return; } @@ -282,7 +281,7 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap) mutex_lock(&pcap->adc_mutex); req = pcap->adc_queue[pcap->adc_head]; - if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) { + if (WARN(!req, "adc irq without pending request\n")) { mutex_unlock(&pcap->adc_mutex); return IRQ_HANDLED; } diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c index d3e74f8585e0..d00b6d1a69e5 100644 --- a/drivers/mfd/htc-egpio.c +++ b/drivers/mfd/htc-egpio.c @@ -70,31 +70,32 @@ static inline void ack_irqs(struct egpio_info *ei) ei->ack_write, ei->ack_register << ei->bus_shift); } -static void egpio_ack(unsigned int irq) +static void egpio_ack(struct irq_data *data) { } /* There does not appear to be a way to proactively mask interrupts * on the egpio chip itself. So, we simply ignore interrupts that * aren't desired. */ -static void egpio_mask(unsigned int irq) +static void egpio_mask(struct irq_data *data) { - struct egpio_info *ei = get_irq_chip_data(irq); - ei->irqs_enabled &= ~(1 << (irq - ei->irq_start)); - pr_debug("EGPIO mask %d %04x\n", irq, ei->irqs_enabled); + struct egpio_info *ei = irq_data_get_irq_chip_data(data); + ei->irqs_enabled &= ~(1 << (data->irq - ei->irq_start)); + pr_debug("EGPIO mask %d %04x\n", data->irq, ei->irqs_enabled); } -static void egpio_unmask(unsigned int irq) + +static void egpio_unmask(struct irq_data *data) { - struct egpio_info *ei = get_irq_chip_data(irq); - ei->irqs_enabled |= 1 << (irq - ei->irq_start); - pr_debug("EGPIO unmask %d %04x\n", irq, ei->irqs_enabled); + struct egpio_info *ei = irq_data_get_irq_chip_data(data); + ei->irqs_enabled |= 1 << (data->irq - ei->irq_start); + pr_debug("EGPIO unmask %d %04x\n", data->irq, ei->irqs_enabled); } static struct irq_chip egpio_muxed_chip = { - .name = "htc-egpio", - .ack = egpio_ack, - .mask = egpio_mask, - .unmask = egpio_unmask, + .name = "htc-egpio", + .irq_ack = egpio_ack, + .irq_mask = egpio_mask, + .irq_unmask = egpio_unmask, }; static void egpio_handler(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c index 594c9a8e25e1..296ad1562f69 100644 --- a/drivers/mfd/htc-i2cpld.c +++ b/drivers/mfd/htc-i2cpld.c @@ -82,25 +82,25 @@ struct htcpld_data { /* There does not appear to be a way to proactively mask interrupts * on the htcpld chip itself. So, we simply ignore interrupts that * aren't desired. */ -static void htcpld_mask(unsigned int irq) +static void htcpld_mask(struct irq_data *data) { - struct htcpld_chip *chip = get_irq_chip_data(irq); - chip->irqs_enabled &= ~(1 << (irq - chip->irq_start)); - pr_debug("HTCPLD mask %d %04x\n", irq, chip->irqs_enabled); + struct htcpld_chip *chip = irq_data_get_irq_chip_data(data); + chip->irqs_enabled &= ~(1 << (data->irq - chip->irq_start)); + pr_debug("HTCPLD mask %d %04x\n", data->irq, chip->irqs_enabled); } -static void htcpld_unmask(unsigned int irq) +static void htcpld_unmask(struct irq_data *data) { - struct htcpld_chip *chip = get_irq_chip_data(irq); - chip->irqs_enabled |= 1 << (irq - chip->irq_start); - pr_debug("HTCPLD unmask %d %04x\n", irq, chip->irqs_enabled); + struct htcpld_chip *chip = irq_data_get_irq_chip_data(data); + chip->irqs_enabled |= 1 << (data->irq - chip->irq_start); + pr_debug("HTCPLD unmask %d %04x\n", data->irq, chip->irqs_enabled); } -static int htcpld_set_type(unsigned int irq, unsigned int flags) +static int htcpld_set_type(struct irq_data *data, unsigned int flags) { - struct irq_desc *d = irq_to_desc(irq); + struct irq_desc *d = irq_to_desc(data->irq); if (!d) { - pr_err("HTCPLD invalid IRQ: %d\n", irq); + pr_err("HTCPLD invalid IRQ: %d\n", data->irq); return -EINVAL; } @@ -118,10 +118,10 @@ static int htcpld_set_type(unsigned int irq, unsigned int flags) } static struct irq_chip htcpld_muxed_chip = { - .name = "htcpld", - .mask = htcpld_mask, - .unmask = htcpld_unmask, - .set_type = htcpld_set_type, + .name = "htcpld", + .irq_mask = htcpld_mask, + .irq_unmask = htcpld_unmask, + .irq_set_type = htcpld_set_type, }; /* To properly dispatch IRQ events, we need to read from the @@ -235,7 +235,7 @@ static irqreturn_t htcpld_handler(int irq, void *dev) * and that work is scheduled in the set routine. The kernel can then run * the I2C functions, which will sleep, in process context. */ -void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val) +static void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val) { struct i2c_client *client; struct htcpld_chip *chip_data; @@ -259,7 +259,7 @@ void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val) schedule_work(&(chip_data->set_val_work)); } -void htcpld_chip_set_ni(struct work_struct *work) +static void htcpld_chip_set_ni(struct work_struct *work) { struct htcpld_chip *chip_data; struct i2c_client *client; @@ -269,7 +269,7 @@ void htcpld_chip_set_ni(struct work_struct *work) i2c_smbus_read_byte_data(client, chip_data->cache_out); } -int htcpld_chip_get(struct gpio_chip *chip, unsigned offset) +static int htcpld_chip_get(struct gpio_chip *chip, unsigned offset) { struct htcpld_chip *chip_data; int val = 0; @@ -316,7 +316,7 @@ static int htcpld_direction_input(struct gpio_chip *chip, return (offset < chip->ngpio) ? 0 : -EINVAL; } -int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset) +static int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset) { struct htcpld_chip *chip_data; @@ -328,7 +328,7 @@ int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset) return -EINVAL; } -void htcpld_chip_reset(struct i2c_client *client) +static void htcpld_chip_reset(struct i2c_client *client) { struct htcpld_chip *chip_data = i2c_get_clientdata(client); if (!chip_data) diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 9dd1b33f2275..0cc59795f600 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c @@ -84,31 +84,30 @@ static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq, spin_unlock_irqrestore(&adc->lock, flags); } -static void jz4740_adc_irq_mask(unsigned int irq) +static void jz4740_adc_irq_mask(struct irq_data *data) { - struct jz4740_adc *adc = get_irq_chip_data(irq); - jz4740_adc_irq_set_masked(adc, irq, true); + struct jz4740_adc *adc = irq_data_get_irq_chip_data(data); + jz4740_adc_irq_set_masked(adc, data->irq, true); } -static void jz4740_adc_irq_unmask(unsigned int irq) +static void jz4740_adc_irq_unmask(struct irq_data *data) { - struct jz4740_adc *adc = get_irq_chip_data(irq); - jz4740_adc_irq_set_masked(adc, irq, false); + struct jz4740_adc *adc = irq_data_get_irq_chip_data(data); + jz4740_adc_irq_set_masked(adc, data->irq, false); } -static void jz4740_adc_irq_ack(unsigned int irq) +static void jz4740_adc_irq_ack(struct irq_data *data) { - struct jz4740_adc *adc = get_irq_chip_data(irq); - - irq -= adc->irq_base; + struct jz4740_adc *adc = irq_data_get_irq_chip_data(data); + unsigned int irq = data->irq - adc->irq_base; writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS); } static struct irq_chip jz4740_adc_irq_chip = { .name = "jz4740-adc", - .mask = jz4740_adc_irq_mask, - .unmask = jz4740_adc_irq_unmask, - .ack = jz4740_adc_irq_ack, + .irq_mask = jz4740_adc_irq_mask, + .irq_unmask = jz4740_adc_irq_unmask, + .irq_ack = jz4740_adc_irq_ack, }; static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 44695f5a1800..0e998dc4e7d8 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -407,16 +407,16 @@ static irqreturn_t max8925_tsc_irq(int irq, void *data) return IRQ_HANDLED; } -static void max8925_irq_lock(unsigned int irq) +static void max8925_irq_lock(struct irq_data *data) { - struct max8925_chip *chip = get_irq_chip_data(irq); + struct max8925_chip *chip = irq_data_get_irq_chip_data(data); mutex_lock(&chip->irq_lock); } -static void max8925_irq_sync_unlock(unsigned int irq) +static void max8925_irq_sync_unlock(struct irq_data *data) { - struct max8925_chip *chip = get_irq_chip_data(irq); + struct max8925_chip *chip = irq_data_get_irq_chip_data(data); struct max8925_irq_data *irq_data; static unsigned char cache_chg[2] = {0xff, 0xff}; static unsigned char cache_on[2] = {0xff, 0xff}; @@ -492,25 +492,25 @@ static void max8925_irq_sync_unlock(unsigned int irq) mutex_unlock(&chip->irq_lock); } -static void max8925_irq_enable(unsigned int irq) +static void max8925_irq_enable(struct irq_data *data) { - struct max8925_chip *chip = get_irq_chip_data(irq); - max8925_irqs[irq - chip->irq_base].enable - = max8925_irqs[irq - chip->irq_base].offs; + struct max8925_chip *chip = irq_data_get_irq_chip_data(data); + max8925_irqs[data->irq - chip->irq_base].enable + = max8925_irqs[data->irq - chip->irq_base].offs; } -static void max8925_irq_disable(unsigned int irq) +static void max8925_irq_disable(struct irq_data *data) { - struct max8925_chip *chip = get_irq_chip_data(irq); - max8925_irqs[irq - chip->irq_base].enable = 0; + struct max8925_chip *chip = irq_data_get_irq_chip_data(data); + max8925_irqs[data->irq - chip->irq_base].enable = 0; } static struct irq_chip max8925_irq_chip = { .name = "max8925", - .bus_lock = max8925_irq_lock, - .bus_sync_unlock = max8925_irq_sync_unlock, - .enable = max8925_irq_enable, - .disable = max8925_irq_disable, + .irq_bus_lock = max8925_irq_lock, + .irq_bus_sync_unlock = max8925_irq_sync_unlock, + .irq_enable = max8925_irq_enable, + .irq_disable = max8925_irq_disable, }; static int max8925_irq_init(struct max8925_chip *chip, int irq, diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c index 45bfe77b639b..3903e1fbb334 100644 --- a/drivers/mfd/max8998-irq.c +++ b/drivers/mfd/max8998-irq.c @@ -102,16 +102,16 @@ irq_to_max8998_irq(struct max8998_dev *max8998, int irq) return &max8998_irqs[irq - max8998->irq_base]; } -static void max8998_irq_lock(unsigned int irq) +static void max8998_irq_lock(struct irq_data *data) { - struct max8998_dev *max8998 = get_irq_chip_data(irq); + struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data); mutex_lock(&max8998->irqlock); } -static void max8998_irq_sync_unlock(unsigned int irq) +static void max8998_irq_sync_unlock(struct irq_data *data) { - struct max8998_dev *max8998 = get_irq_chip_data(irq); + struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data); int i; for (i = 0; i < ARRAY_SIZE(max8998->irq_masks_cur); i++) { @@ -129,28 +129,30 @@ static void max8998_irq_sync_unlock(unsigned int irq) mutex_unlock(&max8998->irqlock); } -static void max8998_irq_unmask(unsigned int irq) +static void max8998_irq_unmask(struct irq_data *data) { - struct max8998_dev *max8998 = get_irq_chip_data(irq); - struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq); + struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data); + struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, + data->irq); max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; } -static void max8998_irq_mask(unsigned int irq) +static void max8998_irq_mask(struct irq_data *data) { - struct max8998_dev *max8998 = get_irq_chip_data(irq); - struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq); + struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data); + struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, + data->irq); max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; } static struct irq_chip max8998_irq_chip = { .name = "max8998", - .bus_lock = max8998_irq_lock, - .bus_sync_unlock = max8998_irq_sync_unlock, - .mask = max8998_irq_mask, - .unmask = max8998_irq_unmask, + .irq_bus_lock = max8998_irq_lock, + .irq_bus_sync_unlock = max8998_irq_sync_unlock, + .irq_mask = max8998_irq_mask, + .irq_unmask = max8998_irq_unmask, }; static irqreturn_t max8998_irq_thread(int irq, void *data) @@ -181,6 +183,13 @@ static irqreturn_t max8998_irq_thread(int irq, void *data) return IRQ_HANDLED; } +int max8998_irq_resume(struct max8998_dev *max8998) +{ + if (max8998->irq && max8998->irq_base) + max8998_irq_thread(max8998->irq_base, max8998); + return 0; +} + int max8998_irq_init(struct max8998_dev *max8998) { int i; diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c index bb9977bebe78..bbfe86732602 100644 --- a/drivers/mfd/max8998.c +++ b/drivers/mfd/max8998.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include #include @@ -40,6 +42,14 @@ static struct mfd_cell max8998_devs[] = { }, }; +static struct mfd_cell lp3974_devs[] = { + { + .name = "lp3974-pmic", + }, { + .name = "lp3974-rtc", + }, +}; + int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest) { struct max8998_dev *max8998 = i2c_get_clientdata(i2c); @@ -135,6 +145,7 @@ static int max8998_i2c_probe(struct i2c_client *i2c, if (pdata) { max8998->ono = pdata->ono; max8998->irq_base = pdata->irq_base; + max8998->wakeup = pdata->wakeup; } mutex_init(&max8998->iolock); @@ -143,9 +154,23 @@ static int max8998_i2c_probe(struct i2c_client *i2c, max8998_irq_init(max8998); - ret = mfd_add_devices(max8998->dev, -1, - max8998_devs, ARRAY_SIZE(max8998_devs), - NULL, 0); + pm_runtime_set_active(max8998->dev); + + switch (id->driver_data) { + case TYPE_LP3974: + ret = mfd_add_devices(max8998->dev, -1, + lp3974_devs, ARRAY_SIZE(lp3974_devs), + NULL, 0); + break; + case TYPE_MAX8998: + ret = mfd_add_devices(max8998->dev, -1, + max8998_devs, ARRAY_SIZE(max8998_devs), + NULL, 0); + break; + default: + ret = -EINVAL; + } + if (ret < 0) goto err; @@ -178,10 +203,113 @@ static const struct i2c_device_id max8998_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, max8998_i2c_id); +static int max8998_suspend(struct device *dev) +{ + struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); + struct max8998_dev *max8998 = i2c_get_clientdata(i2c); + + if (max8998->wakeup) + set_irq_wake(max8998->irq, 1); + return 0; +} + +static int max8998_resume(struct device *dev) +{ + struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); + struct max8998_dev *max8998 = i2c_get_clientdata(i2c); + + if (max8998->wakeup) + set_irq_wake(max8998->irq, 0); + /* + * In LP3974, if IRQ registers are not "read & clear" + * when it's set during sleep, the interrupt becomes + * disabled. + */ + return max8998_irq_resume(i2c_get_clientdata(i2c)); +} + +struct max8998_reg_dump { + u8 addr; + u8 val; +}; +#define SAVE_ITEM(x) { .addr = (x), .val = 0x0, } +struct max8998_reg_dump max8998_dump[] = { + SAVE_ITEM(MAX8998_REG_IRQM1), + SAVE_ITEM(MAX8998_REG_IRQM2), + SAVE_ITEM(MAX8998_REG_IRQM3), + SAVE_ITEM(MAX8998_REG_IRQM4), + SAVE_ITEM(MAX8998_REG_STATUSM1), + SAVE_ITEM(MAX8998_REG_STATUSM2), + SAVE_ITEM(MAX8998_REG_CHGR1), + SAVE_ITEM(MAX8998_REG_CHGR2), + SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1), + SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1), + SAVE_ITEM(MAX8998_REG_BUCK_ACTIVE_DISCHARGE3), + SAVE_ITEM(MAX8998_REG_ONOFF1), + SAVE_ITEM(MAX8998_REG_ONOFF2), + SAVE_ITEM(MAX8998_REG_ONOFF3), + SAVE_ITEM(MAX8998_REG_ONOFF4), + SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE1), + SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE2), + SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE3), + SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE4), + SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE1), + SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE2), + SAVE_ITEM(MAX8998_REG_LDO2_LDO3), + SAVE_ITEM(MAX8998_REG_LDO4), + SAVE_ITEM(MAX8998_REG_LDO5), + SAVE_ITEM(MAX8998_REG_LDO6), + SAVE_ITEM(MAX8998_REG_LDO7), + SAVE_ITEM(MAX8998_REG_LDO8_LDO9), + SAVE_ITEM(MAX8998_REG_LDO10_LDO11), + SAVE_ITEM(MAX8998_REG_LDO12), + SAVE_ITEM(MAX8998_REG_LDO13), + SAVE_ITEM(MAX8998_REG_LDO14), + SAVE_ITEM(MAX8998_REG_LDO15), + SAVE_ITEM(MAX8998_REG_LDO16), + SAVE_ITEM(MAX8998_REG_LDO17), + SAVE_ITEM(MAX8998_REG_BKCHR), + SAVE_ITEM(MAX8998_REG_LBCNFG1), + SAVE_ITEM(MAX8998_REG_LBCNFG2), +}; +/* Save registers before hibernation */ +static int max8998_freeze(struct device *dev) +{ + struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); + int i; + + for (i = 0; i < ARRAY_SIZE(max8998_dump); i++) + max8998_read_reg(i2c, max8998_dump[i].addr, + &max8998_dump[i].val); + + return 0; +} + +/* Restore registers after hibernation */ +static int max8998_restore(struct device *dev) +{ + struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); + int i; + + for (i = 0; i < ARRAY_SIZE(max8998_dump); i++) + max8998_write_reg(i2c, max8998_dump[i].addr, + max8998_dump[i].val); + + return 0; +} + +const struct dev_pm_ops max8998_pm = { + .suspend = max8998_suspend, + .resume = max8998_resume, + .freeze = max8998_freeze, + .restore = max8998_restore, +}; + static struct i2c_driver max8998_i2c_driver = { .driver = { .name = "max8998", .owner = THIS_MODULE, + .pm = &max8998_pm, }, .probe = max8998_i2c_probe, .remove = max8998_i2c_remove, diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index a2ac2ed6d64c..b9fcaf0004da 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -749,7 +749,7 @@ static int mc13xxx_probe(struct spi_device *spi) if (ret) { err_mask: err_revision: - mutex_unlock(&mc13xxx->lock); + mc13xxx_unlock(mc13xxx); dev_set_drvdata(&spi->dev, NULL); kfree(mc13xxx); return ret; diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index ec99f681e773..d83ad0f141af 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include static int mfd_add_device(struct device *parent, int id, @@ -82,6 +83,9 @@ static int mfd_add_device(struct device *parent, int id, if (ret) goto fail_res; + if (cell->pm_runtime_no_callbacks) + pm_runtime_no_callbacks(&pdev->dev); + kfree(res); return 0; diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index bc9275c12133..5de3a760ea1e 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -26,7 +26,7 @@ #include #include -#include +#include struct sm501_device { struct list_head list; @@ -745,11 +745,8 @@ static int sm501_register_device(struct sm501_devdata *sm, int ret; for (ptr = 0; ptr < pdev->num_resources; ptr++) { - printk(KERN_DEBUG "%s[%d] flags %08lx: %08llx..%08llx\n", - pdev->name, ptr, - pdev->resource[ptr].flags, - (unsigned long long)pdev->resource[ptr].start, - (unsigned long long)pdev->resource[ptr].end); + printk(KERN_DEBUG "%s[%d] %pR\n", + pdev->name, ptr, &pdev->resource[ptr]); } ret = platform_device_register(pdev); diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index b11487f1e1cb..3e5732b58c49 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -699,16 +699,16 @@ static irqreturn_t stmpe_irq(int irq, void *data) return IRQ_HANDLED; } -static void stmpe_irq_lock(unsigned int irq) +static void stmpe_irq_lock(struct irq_data *data) { - struct stmpe *stmpe = get_irq_chip_data(irq); + struct stmpe *stmpe = irq_data_get_irq_chip_data(data); mutex_lock(&stmpe->irq_lock); } -static void stmpe_irq_sync_unlock(unsigned int irq) +static void stmpe_irq_sync_unlock(struct irq_data *data) { - struct stmpe *stmpe = get_irq_chip_data(irq); + struct stmpe *stmpe = irq_data_get_irq_chip_data(data); struct stmpe_variant_info *variant = stmpe->variant; int num = DIV_ROUND_UP(variant->num_irqs, 8); int i; @@ -727,20 +727,20 @@ static void stmpe_irq_sync_unlock(unsigned int irq) mutex_unlock(&stmpe->irq_lock); } -static void stmpe_irq_mask(unsigned int irq) +static void stmpe_irq_mask(struct irq_data *data) { - struct stmpe *stmpe = get_irq_chip_data(irq); - int offset = irq - stmpe->irq_base; + struct stmpe *stmpe = irq_data_get_irq_chip_data(data); + int offset = data->irq - stmpe->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); stmpe->ier[regoffset] &= ~mask; } -static void stmpe_irq_unmask(unsigned int irq) +static void stmpe_irq_unmask(struct irq_data *data) { - struct stmpe *stmpe = get_irq_chip_data(irq); - int offset = irq - stmpe->irq_base; + struct stmpe *stmpe = irq_data_get_irq_chip_data(data); + int offset = data->irq - stmpe->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -749,10 +749,10 @@ static void stmpe_irq_unmask(unsigned int irq) static struct irq_chip stmpe_irq_chip = { .name = "stmpe", - .bus_lock = stmpe_irq_lock, - .bus_sync_unlock = stmpe_irq_sync_unlock, - .mask = stmpe_irq_mask, - .unmask = stmpe_irq_unmask, + .irq_bus_lock = stmpe_irq_lock, + .irq_bus_sync_unlock = stmpe_irq_sync_unlock, + .irq_mask = stmpe_irq_mask, + .irq_unmask = stmpe_irq_unmask, }; static int __devinit stmpe_irq_init(struct stmpe *stmpe) diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 006c121f3f0d..9caeb4ac6ea6 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -199,37 +199,37 @@ static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) generic_handle_irq(irq_base + i); } -static void t7l66xb_irq_mask(unsigned int irq) +static void t7l66xb_irq_mask(struct irq_data *data) { - struct t7l66xb *t7l66xb = get_irq_chip_data(irq); + struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&t7l66xb->lock, flags); imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); - imr |= 1 << (irq - t7l66xb->irq_base); + imr |= 1 << (data->irq - t7l66xb->irq_base); tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); spin_unlock_irqrestore(&t7l66xb->lock, flags); } -static void t7l66xb_irq_unmask(unsigned int irq) +static void t7l66xb_irq_unmask(struct irq_data *data) { - struct t7l66xb *t7l66xb = get_irq_chip_data(irq); + struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&t7l66xb->lock, flags); imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); - imr &= ~(1 << (irq - t7l66xb->irq_base)); + imr &= ~(1 << (data->irq - t7l66xb->irq_base)); tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); spin_unlock_irqrestore(&t7l66xb->lock, flags); } static struct irq_chip t7l66xb_chip = { - .name = "t7l66xb", - .ack = t7l66xb_irq_mask, - .mask = t7l66xb_irq_mask, - .unmask = t7l66xb_irq_unmask, + .name = "t7l66xb", + .irq_ack = t7l66xb_irq_mask, + .irq_mask = t7l66xb_irq_mask, + .irq_unmask = t7l66xb_irq_unmask, }; /*--------------------------------------------------------------------------*/ diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 1ea80d8ad915..9a238633a54d 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c @@ -527,41 +527,41 @@ tc6393xb_irq(unsigned int irq, struct irq_desc *desc) } } -static void tc6393xb_irq_ack(unsigned int irq) +static void tc6393xb_irq_ack(struct irq_data *data) { } -static void tc6393xb_irq_mask(unsigned int irq) +static void tc6393xb_irq_mask(struct irq_data *data) { - struct tc6393xb *tc6393xb = get_irq_chip_data(irq); + struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&tc6393xb->lock, flags); imr = tmio_ioread8(tc6393xb->scr + SCR_IMR); - imr |= 1 << (irq - tc6393xb->irq_base); + imr |= 1 << (data->irq - tc6393xb->irq_base); tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR); spin_unlock_irqrestore(&tc6393xb->lock, flags); } -static void tc6393xb_irq_unmask(unsigned int irq) +static void tc6393xb_irq_unmask(struct irq_data *data) { - struct tc6393xb *tc6393xb = get_irq_chip_data(irq); + struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&tc6393xb->lock, flags); imr = tmio_ioread8(tc6393xb->scr + SCR_IMR); - imr &= ~(1 << (irq - tc6393xb->irq_base)); + imr &= ~(1 << (data->irq - tc6393xb->irq_base)); tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR); spin_unlock_irqrestore(&tc6393xb->lock, flags); } static struct irq_chip tc6393xb_chip = { - .name = "tc6393xb", - .ack = tc6393xb_irq_ack, - .mask = tc6393xb_irq_mask, - .unmask = tc6393xb_irq_unmask, + .name = "tc6393xb", + .irq_ack = tc6393xb_irq_ack, + .irq_mask = tc6393xb_irq_mask, + .irq_unmask = tc6393xb_irq_unmask, }; static void tc6393xb_attach_irq(struct platform_device *dev) diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index 90187fe33e04..93d5fdf020c7 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -34,7 +34,7 @@ #include -#include +#include /*-------------------------------------------------------------------------*/ diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index b4931ab34929..627cf577b16d 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -46,8 +46,6 @@ /* device id */ #define TPS6586X_VERSIONCRC 0xcd -#define TPS658621A_VERSIONCRC 0x15 -#define TPS658621C_VERSIONCRC 0x2c struct tps6586x_irq_data { u8 mask_reg; @@ -325,37 +323,37 @@ static int tps6586x_remove_subdevs(struct tps6586x *tps6586x) return device_for_each_child(tps6586x->dev, NULL, __remove_subdev); } -static void tps6586x_irq_lock(unsigned int irq) +static void tps6586x_irq_lock(struct irq_data *data) { - struct tps6586x *tps6586x = get_irq_chip_data(irq); + struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data); mutex_lock(&tps6586x->irq_lock); } -static void tps6586x_irq_enable(unsigned int irq) +static void tps6586x_irq_enable(struct irq_data *irq_data) { - struct tps6586x *tps6586x = get_irq_chip_data(irq); - unsigned int __irq = irq - tps6586x->irq_base; + struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data); + unsigned int __irq = irq_data->irq - tps6586x->irq_base; const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq]; tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask; tps6586x->irq_en |= (1 << __irq); } -static void tps6586x_irq_disable(unsigned int irq) +static void tps6586x_irq_disable(struct irq_data *irq_data) { - struct tps6586x *tps6586x = get_irq_chip_data(irq); + struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data); - unsigned int __irq = irq - tps6586x->irq_base; + unsigned int __irq = irq_data->irq - tps6586x->irq_base; const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq]; tps6586x->mask_reg[data->mask_reg] |= data->mask_mask; tps6586x->irq_en &= ~(1 << __irq); } -static void tps6586x_irq_sync_unlock(unsigned int irq) +static void tps6586x_irq_sync_unlock(struct irq_data *data) { - struct tps6586x *tps6586x = get_irq_chip_data(irq); + struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data); int i; for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) { @@ -421,10 +419,10 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq, tps6586x->irq_base = irq_base; tps6586x->irq_chip.name = "tps6586x"; - tps6586x->irq_chip.enable = tps6586x_irq_enable; - tps6586x->irq_chip.disable = tps6586x_irq_disable; - tps6586x->irq_chip.bus_lock = tps6586x_irq_lock; - tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock; + tps6586x->irq_chip.irq_enable = tps6586x_irq_enable; + tps6586x->irq_chip.irq_disable = tps6586x_irq_disable; + tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock; + tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock; for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) { int __irq = i + tps6586x->irq_base; @@ -498,11 +496,7 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client, return -EIO; } - if ((ret != TPS658621A_VERSIONCRC) && - (ret != TPS658621C_VERSIONCRC)) { - dev_err(&client->dev, "Unsupported chip ID: %x\n", ret); - return -ENODEV; - } + dev_info(&client->dev, "VERSIONCRC is %02x\n", ret); tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL); if (tps6586x == NULL) diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 12abd5b924b3..a35fa7dcbf53 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -1003,7 +1003,7 @@ static int twl_remove(struct i2c_client *client) } /* NOTE: this driver only handles a single twl4030/tps659x0 chip */ -static int __init +static int __devinit twl_probe(struct i2c_client *client, const struct i2c_device_id *id) { int status; diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 5d3a1478004b..63a30e88908f 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -599,38 +599,38 @@ static void twl4030_sih_do_edge(struct work_struct *work) * completion, potentially including some re-ordering, of these requests. */ -static void twl4030_sih_mask(unsigned irq) +static void twl4030_sih_mask(struct irq_data *data) { - struct sih_agent *sih = get_irq_chip_data(irq); + struct sih_agent *sih = irq_data_get_irq_chip_data(data); unsigned long flags; spin_lock_irqsave(&sih_agent_lock, flags); - sih->imr |= BIT(irq - sih->irq_base); + sih->imr |= BIT(data->irq - sih->irq_base); sih->imr_change_pending = true; queue_work(wq, &sih->mask_work); spin_unlock_irqrestore(&sih_agent_lock, flags); } -static void twl4030_sih_unmask(unsigned irq) +static void twl4030_sih_unmask(struct irq_data *data) { - struct sih_agent *sih = get_irq_chip_data(irq); + struct sih_agent *sih = irq_data_get_irq_chip_data(data); unsigned long flags; spin_lock_irqsave(&sih_agent_lock, flags); - sih->imr &= ~BIT(irq - sih->irq_base); + sih->imr &= ~BIT(data->irq - sih->irq_base); sih->imr_change_pending = true; queue_work(wq, &sih->mask_work); spin_unlock_irqrestore(&sih_agent_lock, flags); } -static int twl4030_sih_set_type(unsigned irq, unsigned trigger) +static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger) { - struct sih_agent *sih = get_irq_chip_data(irq); - struct irq_desc *desc = irq_to_desc(irq); + struct sih_agent *sih = irq_data_get_irq_chip_data(data); + struct irq_desc *desc = irq_to_desc(data->irq); unsigned long flags; if (!desc) { - pr_err("twl4030: Invalid IRQ: %d\n", irq); + pr_err("twl4030: Invalid IRQ: %d\n", data->irq); return -EINVAL; } @@ -641,7 +641,7 @@ static int twl4030_sih_set_type(unsigned irq, unsigned trigger) if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) { desc->status &= ~IRQ_TYPE_SENSE_MASK; desc->status |= trigger; - sih->edge_change |= BIT(irq - sih->irq_base); + sih->edge_change |= BIT(data->irq - sih->irq_base); queue_work(wq, &sih->edge_work); } spin_unlock_irqrestore(&sih_agent_lock, flags); @@ -650,9 +650,9 @@ static int twl4030_sih_set_type(unsigned irq, unsigned trigger) static struct irq_chip twl4030_sih_irq_chip = { .name = "twl4030", - .mask = twl4030_sih_mask, - .unmask = twl4030_sih_unmask, - .set_type = twl4030_sih_set_type, + .irq_mask = twl4030_sih_mask, + .irq_unmask = twl4030_sih_unmask, + .irq_set_type = twl4030_sih_set_type, }; /*----------------------------------------------------------------------*/ diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 06c8955907e9..4082ed73613f 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c @@ -332,7 +332,7 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) */ twl6030_irq_chip = dummy_irq_chip; twl6030_irq_chip.name = "twl6030"; - twl6030_irq_chip.set_type = NULL; + twl6030_irq_chip.irq_set_type = NULL; for (i = irq_base; i < irq_end; i++) { set_irq_chip_and_handler(i, &twl6030_irq_chip, diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c index ebb059765edd..348052aa5dbf 100644 --- a/drivers/mfd/vx855.c +++ b/drivers/mfd/vx855.c @@ -112,7 +112,7 @@ out: return ret; } -static void vx855_remove(struct pci_dev *pdev) +static void __devexit vx855_remove(struct pci_dev *pdev) { mfd_remove_devices(&pdev->dev); pci_disable_device(pdev); diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 76cadcf3b1fe..3fe9a58fe6c7 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -1541,6 +1541,12 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) dev_info(wm831x->dev, "WM8325 revision %c\n", 'A' + rev); break; + case WM8326: + parent = WM8326; + wm831x->num_gpio = 12; + dev_info(wm831x->dev, "WM8326 revision %c\n", 'A' + rev); + break; + default: dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret); ret = -EINVAL; @@ -1610,18 +1616,9 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) break; case WM8320: - ret = mfd_add_devices(wm831x->dev, -1, - wm8320_devs, ARRAY_SIZE(wm8320_devs), - NULL, 0); - break; - case WM8321: - ret = mfd_add_devices(wm831x->dev, -1, - wm8320_devs, ARRAY_SIZE(wm8320_devs), - NULL, 0); - break; - case WM8325: + case WM8326: ret = mfd_add_devices(wm831x->dev, -1, wm8320_devs, ARRAY_SIZE(wm8320_devs), NULL, wm831x->irq_base); diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c index 156b19859e81..3853fa8e7cc2 100644 --- a/drivers/mfd/wm831x-i2c.c +++ b/drivers/mfd/wm831x-i2c.c @@ -94,9 +94,9 @@ static int wm831x_i2c_remove(struct i2c_client *i2c) return 0; } -static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg) +static int wm831x_i2c_suspend(struct device *dev) { - struct wm831x *wm831x = i2c_get_clientdata(i2c); + struct wm831x *wm831x = dev_get_drvdata(dev); return wm831x_device_suspend(wm831x); } @@ -108,19 +108,23 @@ static const struct i2c_device_id wm831x_i2c_id[] = { { "wm8320", WM8320 }, { "wm8321", WM8321 }, { "wm8325", WM8325 }, + { "wm8326", WM8326 }, { } }; MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id); +static const struct dev_pm_ops wm831x_pm_ops = { + .suspend = wm831x_i2c_suspend, +}; static struct i2c_driver wm831x_i2c_driver = { .driver = { - .name = "wm831x", - .owner = THIS_MODULE, + .name = "wm831x", + .owner = THIS_MODULE, + .pm = &wm831x_pm_ops, }, .probe = wm831x_i2c_probe, .remove = wm831x_i2c_remove, - .suspend = wm831x_i2c_suspend, .id_table = wm831x_i2c_id, }; diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 294183b6260b..f7192d438aab 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -345,16 +345,16 @@ static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x, return &wm831x_irqs[irq - wm831x->irq_base]; } -static void wm831x_irq_lock(unsigned int irq) +static void wm831x_irq_lock(struct irq_data *data) { - struct wm831x *wm831x = get_irq_chip_data(irq); + struct wm831x *wm831x = irq_data_get_irq_chip_data(data); mutex_lock(&wm831x->irq_lock); } -static void wm831x_irq_sync_unlock(unsigned int irq) +static void wm831x_irq_sync_unlock(struct irq_data *data) { - struct wm831x *wm831x = get_irq_chip_data(irq); + struct wm831x *wm831x = irq_data_get_irq_chip_data(data); int i; for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { @@ -371,28 +371,30 @@ static void wm831x_irq_sync_unlock(unsigned int irq) mutex_unlock(&wm831x->irq_lock); } -static void wm831x_irq_unmask(unsigned int irq) +static void wm831x_irq_unmask(struct irq_data *data) { - struct wm831x *wm831x = get_irq_chip_data(irq); - struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq); + struct wm831x *wm831x = irq_data_get_irq_chip_data(data); + struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, + data->irq); wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; } -static void wm831x_irq_mask(unsigned int irq) +static void wm831x_irq_mask(struct irq_data *data) { - struct wm831x *wm831x = get_irq_chip_data(irq); - struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq); + struct wm831x *wm831x = irq_data_get_irq_chip_data(data); + struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, + data->irq); wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; } -static int wm831x_irq_set_type(unsigned int irq, unsigned int type) +static int wm831x_irq_set_type(struct irq_data *data, unsigned int type) { - struct wm831x *wm831x = get_irq_chip_data(irq); - int val; + struct wm831x *wm831x = irq_data_get_irq_chip_data(data); + int val, irq; - irq = irq - wm831x->irq_base; + irq = data->irq - wm831x->irq_base; if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { /* Ignore internal-only IRQs */ @@ -421,12 +423,12 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type) } static struct irq_chip wm831x_irq_chip = { - .name = "wm831x", - .bus_lock = wm831x_irq_lock, - .bus_sync_unlock = wm831x_irq_sync_unlock, - .mask = wm831x_irq_mask, - .unmask = wm831x_irq_unmask, - .set_type = wm831x_irq_set_type, + .name = "wm831x", + .irq_bus_lock = wm831x_irq_lock, + .irq_bus_sync_unlock = wm831x_irq_sync_unlock, + .irq_mask = wm831x_irq_mask, + .irq_unmask = wm831x_irq_unmask, + .irq_set_type = wm831x_irq_set_type, }; /* The processing of the primary interrupt occurs in a thread so that @@ -515,6 +517,17 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) return 0; } + /* Try to flag /IRQ as a wake source; there are a number of + * unconditional wake sources in the PMIC so this isn't + * conditional but we don't actually care *too* much if it + * fails. + */ + ret = enable_irq_wake(irq); + if (ret != 0) { + dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n", + ret); + } + wm831x->irq = irq; wm831x->irq_base = pdata->irq_base; diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c index 2789b151b0f9..0a8f772be88c 100644 --- a/drivers/mfd/wm831x-spi.c +++ b/drivers/mfd/wm831x-spi.c @@ -81,6 +81,8 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi) type = WM8321; else if (strcmp(spi->modalias, "wm8325") == 0) type = WM8325; + else if (strcmp(spi->modalias, "wm8326") == 0) + type = WM8326; else { dev_err(&spi->dev, "Unknown device type\n"); return -EINVAL; @@ -184,6 +186,17 @@ static struct spi_driver wm8325_spi_driver = { .suspend = wm831x_spi_suspend, }; +static struct spi_driver wm8326_spi_driver = { + .driver = { + .name = "wm8326", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = wm831x_spi_probe, + .remove = __devexit_p(wm831x_spi_remove), + .suspend = wm831x_spi_suspend, +}; + static int __init wm831x_spi_init(void) { int ret; @@ -212,12 +225,17 @@ static int __init wm831x_spi_init(void) if (ret != 0) pr_err("Failed to register WM8325 SPI driver: %d\n", ret); + ret = spi_register_driver(&wm8326_spi_driver); + if (ret != 0) + pr_err("Failed to register WM8326 SPI driver: %d\n", ret); + return 0; } subsys_initcall(wm831x_spi_init); static void __exit wm831x_spi_exit(void) { + spi_unregister_driver(&wm8326_spi_driver); spi_unregister_driver(&wm8325_spi_driver); spi_unregister_driver(&wm8321_spi_driver); spi_unregister_driver(&wm8320_spi_driver); diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c index f56c9adf9493..5839966ebd85 100644 --- a/drivers/mfd/wm8350-irq.c +++ b/drivers/mfd/wm8350-irq.c @@ -417,16 +417,16 @@ static irqreturn_t wm8350_irq(int irq, void *irq_data) return IRQ_HANDLED; } -static void wm8350_irq_lock(unsigned int irq) +static void wm8350_irq_lock(struct irq_data *data) { - struct wm8350 *wm8350 = get_irq_chip_data(irq); + struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data); mutex_lock(&wm8350->irq_lock); } -static void wm8350_irq_sync_unlock(unsigned int irq) +static void wm8350_irq_sync_unlock(struct irq_data *data) { - struct wm8350 *wm8350 = get_irq_chip_data(irq); + struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data); int i; for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) { @@ -442,28 +442,30 @@ static void wm8350_irq_sync_unlock(unsigned int irq) mutex_unlock(&wm8350->irq_lock); } -static void wm8350_irq_enable(unsigned int irq) +static void wm8350_irq_enable(struct irq_data *data) { - struct wm8350 *wm8350 = get_irq_chip_data(irq); - struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq); + struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data); + struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, + data->irq); wm8350->irq_masks[irq_data->reg] &= ~irq_data->mask; } -static void wm8350_irq_disable(unsigned int irq) +static void wm8350_irq_disable(struct irq_data *data) { - struct wm8350 *wm8350 = get_irq_chip_data(irq); - struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq); + struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data); + struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, + data->irq); wm8350->irq_masks[irq_data->reg] |= irq_data->mask; } static struct irq_chip wm8350_irq_chip = { - .name = "wm8350", - .bus_lock = wm8350_irq_lock, - .bus_sync_unlock = wm8350_irq_sync_unlock, - .disable = wm8350_irq_disable, - .enable = wm8350_irq_enable, + .name = "wm8350", + .irq_bus_lock = wm8350_irq_lock, + .irq_bus_sync_unlock = wm8350_irq_sync_unlock, + .irq_disable = wm8350_irq_disable, + .irq_enable = wm8350_irq_enable, }; int wm8350_irq_init(struct wm8350 *wm8350, int irq, diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index b3b2aaf89dbe..41233c7fa581 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -169,8 +170,16 @@ out: EXPORT_SYMBOL_GPL(wm8994_set_bits); static struct mfd_cell wm8994_regulator_devs[] = { - { .name = "wm8994-ldo", .id = 1 }, - { .name = "wm8994-ldo", .id = 2 }, + { + .name = "wm8994-ldo", + .id = 1, + .pm_runtime_no_callbacks = true, + }, + { + .name = "wm8994-ldo", + .id = 2, + .pm_runtime_no_callbacks = true, + }, }; static struct resource wm8994_codec_resources[] = { @@ -200,6 +209,7 @@ static struct mfd_cell wm8994_devs[] = { .name = "wm8994-gpio", .num_resources = ARRAY_SIZE(wm8994_gpio_resources), .resources = wm8994_gpio_resources, + .pm_runtime_no_callbacks = true, }, }; @@ -218,8 +228,20 @@ static const char *wm8994_main_supplies[] = { "SPKVDD2", }; +static const char *wm8958_main_supplies[] = { + "DBVDD1", + "DBVDD2", + "DBVDD3", + "DCVDD", + "AVDD1", + "AVDD2", + "CPVDD", + "SPKVDD1", + "SPKVDD2", +}; + #ifdef CONFIG_PM -static int wm8994_device_suspend(struct device *dev) +static int wm8994_suspend(struct device *dev) { struct wm8994 *wm8994 = dev_get_drvdata(dev); int ret; @@ -239,7 +261,7 @@ static int wm8994_device_suspend(struct device *dev) if (ret < 0) dev_err(dev, "Failed to save LDO registers: %d\n", ret); - ret = regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies), + ret = regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(dev, "Failed to disable supplies: %d\n", ret); @@ -249,12 +271,12 @@ static int wm8994_device_suspend(struct device *dev) return 0; } -static int wm8994_device_resume(struct device *dev) +static int wm8994_resume(struct device *dev) { struct wm8994 *wm8994 = dev_get_drvdata(dev); int ret; - ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies), + ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(dev, "Failed to enable supplies: %d\n", ret); @@ -305,9 +327,10 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo) /* * Instantiate the generic non-control parts of the device. */ -static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq) +static int wm8994_device_init(struct wm8994 *wm8994, int irq) { struct wm8994_pdata *pdata = wm8994->dev->platform_data; + const char *devname; int ret, i; mutex_init(&wm8994->io_lock); @@ -323,25 +346,48 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq) goto err; } + switch (wm8994->type) { + case WM8994: + wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies); + break; + case WM8958: + wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies); + break; + default: + BUG(); + return -EINVAL; + } + wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) * - ARRAY_SIZE(wm8994_main_supplies), + wm8994->num_supplies, GFP_KERNEL); if (!wm8994->supplies) { ret = -ENOMEM; goto err; } - for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++) - wm8994->supplies[i].supply = wm8994_main_supplies[i]; - - ret = regulator_bulk_get(wm8994->dev, ARRAY_SIZE(wm8994_main_supplies), + switch (wm8994->type) { + case WM8994: + for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++) + wm8994->supplies[i].supply = wm8994_main_supplies[i]; + break; + case WM8958: + for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++) + wm8994->supplies[i].supply = wm8958_main_supplies[i]; + break; + default: + BUG(); + return -EINVAL; + } + + ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); goto err_supplies; } - ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies), + ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); @@ -353,7 +399,22 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq) dev_err(wm8994->dev, "Failed to read ID register\n"); goto err_enable; } - if (ret != 0x8994) { + switch (ret) { + case 0x8994: + devname = "WM8994"; + if (wm8994->type != WM8994) + dev_warn(wm8994->dev, "Device registered as type %d\n", + wm8994->type); + wm8994->type = WM8994; + break; + case 0x8958: + devname = "WM8958"; + if (wm8994->type != WM8958) + dev_warn(wm8994->dev, "Device registered as type %d\n", + wm8994->type); + wm8994->type = WM8958; + break; + default: dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n", ret); ret = -EINVAL; @@ -370,14 +431,16 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq) switch (ret) { case 0: case 1: - dev_warn(wm8994->dev, "revision %c not fully supported\n", - 'A' + ret); + if (wm8994->type == WM8994) + dev_warn(wm8994->dev, + "revision %c not fully supported\n", + 'A' + ret); break; default: - dev_info(wm8994->dev, "revision %c\n", 'A' + ret); break; } + dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret); if (pdata) { wm8994->irq_base = pdata->irq_base; @@ -418,15 +481,18 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq) goto err_irq; } + pm_runtime_enable(wm8994->dev); + pm_runtime_resume(wm8994->dev); + return 0; err_irq: wm8994_irq_exit(wm8994); err_enable: - regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies), + regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); err_get: - regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies); + regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); err_supplies: kfree(wm8994->supplies); err: @@ -437,11 +503,12 @@ err: static void wm8994_device_exit(struct wm8994 *wm8994) { + pm_runtime_disable(wm8994->dev); mfd_remove_devices(wm8994->dev); wm8994_irq_exit(wm8994); - regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies), + regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); - regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies); + regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); kfree(wm8994->supplies); kfree(wm8994); } @@ -506,8 +573,9 @@ static int wm8994_i2c_probe(struct i2c_client *i2c, wm8994->read_dev = wm8994_i2c_read_device; wm8994->write_dev = wm8994_i2c_write_device; wm8994->irq = i2c->irq; + wm8994->type = id->driver_data; - return wm8994_device_init(wm8994, id->driver_data, i2c->irq); + return wm8994_device_init(wm8994, i2c->irq); } static int wm8994_i2c_remove(struct i2c_client *i2c) @@ -519,36 +587,23 @@ static int wm8994_i2c_remove(struct i2c_client *i2c) return 0; } -#ifdef CONFIG_PM -static int wm8994_i2c_suspend(struct i2c_client *i2c, pm_message_t state) -{ - return wm8994_device_suspend(&i2c->dev); -} - -static int wm8994_i2c_resume(struct i2c_client *i2c) -{ - return wm8994_device_resume(&i2c->dev); -} -#else -#define wm8994_i2c_suspend NULL -#define wm8994_i2c_resume NULL -#endif - static const struct i2c_device_id wm8994_i2c_id[] = { - { "wm8994", 0 }, + { "wm8994", WM8994 }, + { "wm8958", WM8958 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id); +UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume, NULL); + static struct i2c_driver wm8994_i2c_driver = { .driver = { - .name = "wm8994", - .owner = THIS_MODULE, + .name = "wm8994", + .owner = THIS_MODULE, + .pm = &wm8994_pm_ops, }, .probe = wm8994_i2c_probe, .remove = wm8994_i2c_remove, - .suspend = wm8994_i2c_suspend, - .resume = wm8994_i2c_resume, .id_table = wm8994_i2c_id, }; diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c index 8400eb1ee5db..29e8faf9c01c 100644 --- a/drivers/mfd/wm8994-irq.c +++ b/drivers/mfd/wm8994-irq.c @@ -156,16 +156,16 @@ static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994, return &wm8994_irqs[irq - wm8994->irq_base]; } -static void wm8994_irq_lock(unsigned int irq) +static void wm8994_irq_lock(struct irq_data *data) { - struct wm8994 *wm8994 = get_irq_chip_data(irq); + struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data); mutex_lock(&wm8994->irq_lock); } -static void wm8994_irq_sync_unlock(unsigned int irq) +static void wm8994_irq_sync_unlock(struct irq_data *data) { - struct wm8994 *wm8994 = get_irq_chip_data(irq); + struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data); int i; for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) { @@ -182,28 +182,30 @@ static void wm8994_irq_sync_unlock(unsigned int irq) mutex_unlock(&wm8994->irq_lock); } -static void wm8994_irq_unmask(unsigned int irq) +static void wm8994_irq_unmask(struct irq_data *data) { - struct wm8994 *wm8994 = get_irq_chip_data(irq); - struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq); + struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data); + struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, + data->irq); wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; } -static void wm8994_irq_mask(unsigned int irq) +static void wm8994_irq_mask(struct irq_data *data) { - struct wm8994 *wm8994 = get_irq_chip_data(irq); - struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq); + struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data); + struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, + data->irq); wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; } static struct irq_chip wm8994_irq_chip = { - .name = "wm8994", - .bus_lock = wm8994_irq_lock, - .bus_sync_unlock = wm8994_irq_sync_unlock, - .mask = wm8994_irq_mask, - .unmask = wm8994_irq_unmask, + .name = "wm8994", + .irq_bus_lock = wm8994_irq_lock, + .irq_bus_sync_unlock = wm8994_irq_sync_unlock, + .irq_mask = wm8994_irq_mask, + .irq_unmask = wm8994_irq_unmask, }; /* The processing of the primary interrupt occurs in a thread so that diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 4d073f1e4502..cc8e49db45fe 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -64,7 +64,7 @@ config ATMEL_PWM config AB8500_PWM bool "AB8500 PWM support" - depends on AB8500_CORE + depends on AB8500_CORE && ARCH_U8500 select HAVE_PWM help This driver exports functions to enable/disble/config/free Pulse @@ -402,7 +402,7 @@ config TI_DAC7512 DAC7512 16-bit digital-to-analog converter. This driver can also be built as a module. If so, the module - will be calles ti_dac7512. + will be called ti_dac7512. config VMWARE_BALLOON tristate "VMware Balloon Driver" diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c index 9e3879ef58f2..fe8616a8d287 100644 --- a/drivers/misc/arm-charlcd.c +++ b/drivers/misc/arm-charlcd.c @@ -313,7 +313,7 @@ static int __init charlcd_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work); schedule_delayed_work(&lcd->init_work, 0); - dev_info(&pdev->dev, "initalized ARM character LCD at %08x\n", + dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n", lcd->phybase); return 0; diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c index 6f6218061b0d..d02d302ee6d5 100644 --- a/drivers/misc/cs5535-mfgpt.c +++ b/drivers/misc/cs5535-mfgpt.c @@ -16,12 +16,11 @@ #include #include #include -#include +#include #include #include #define DRV_NAME "cs5535-mfgpt" -#define MFGPT_BAR 2 static int mfgpt_reset_timers; module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); @@ -37,7 +36,7 @@ static struct cs5535_mfgpt_chip { DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); resource_size_t base; - struct pci_dev *pdev; + struct platform_device *pdev; spinlock_t lock; int initialized; } cs5535_mfgpt_chip; @@ -290,10 +289,10 @@ static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) return timers; } -static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_id) +static int __devinit cs5535_mfgpt_probe(struct platform_device *pdev) { - int err, t; + struct resource *res; + int err = -EIO, t; /* There are two ways to get the MFGPT base address; one is by * fetching it from MSR_LBAR_MFGPT, the other is by reading the @@ -302,29 +301,27 @@ static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, * it turns out to be unreliable in the face of crappy BIOSes, we * can always go back to using MSRs.. */ - err = pci_enable_device_io(pdev); - if (err) { - dev_err(&pdev->dev, "can't enable device IO\n"); + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(&pdev->dev, "can't fetch device resource info\n"); goto done; } - err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR); + if (!request_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "can't request region\n"); goto done; } /* set up the driver-specific struct */ - cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR); + cs5535_mfgpt_chip.base = res->start; cs5535_mfgpt_chip.pdev = pdev; spin_lock_init(&cs5535_mfgpt_chip.lock); - dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR, - (unsigned long long) cs5535_mfgpt_chip.base); + dev_info(&pdev->dev, "reserved resource region %pR\n", res); /* detect the available timers */ t = scan_timers(&cs5535_mfgpt_chip); - dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t); + dev_info(&pdev->dev, "%d MFGPT timers available\n", t); cs5535_mfgpt_chip.initialized = 1; return 0; @@ -332,47 +329,18 @@ done: return err; } -static struct pci_device_id cs5535_mfgpt_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, - { 0, }, +static struct platform_driver cs5535_mfgpt_drv = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = cs5535_mfgpt_probe, }; -MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl); -/* - * Just like with the cs5535-gpio driver, we can't use the standard PCI driver - * registration stuff. It only allows only one driver to bind to each PCI - * device, and we want the GPIO and MFGPT drivers to be able to share a PCI - * device. Instead, we manually scan for the PCI device, request a single - * region, and keep track of the devices that we're using. - */ - -static int __init cs5535_mfgpt_scan_pci(void) -{ - struct pci_dev *pdev; - int err = -ENODEV; - int i; - - for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) { - pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor, - cs5535_mfgpt_pci_tbl[i].device, NULL); - if (pdev) { - err = cs5535_mfgpt_probe(pdev, - &cs5535_mfgpt_pci_tbl[i]); - if (err) - pci_dev_put(pdev); - - /* we only support a single CS5535/6 southbridge */ - break; - } - } - - return err; -} static int __init cs5535_mfgpt_init(void) { - return cs5535_mfgpt_scan_pci(); + return platform_driver_register(&cs5535_mfgpt_drv); } module_init(cs5535_mfgpt_init); @@ -380,3 +348,4 @@ module_init(cs5535_mfgpt_init); MODULE_AUTHOR("Andres Salomon "); MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 2a1e804a71aa..4d2ea8e80140 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -45,7 +45,7 @@ MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); -MODULE_VERSION("1.2.1.1-k"); +MODULE_VERSION("1.2.1.2-k"); MODULE_ALIAS("dmi:*:svnVMware*:*"); MODULE_ALIAS("vmware_vmmemctl"); MODULE_LICENSE("GPL"); @@ -315,7 +315,8 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) * fear that guest will need it. Host may reject some pages, we need to * check the return value and maybe submit a different page. */ -static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn) +static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, + unsigned int *hv_status) { unsigned long status, dummy; u32 pfn32; @@ -326,7 +327,7 @@ static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn) STATS_INC(b->stats.lock); - status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); + *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); if (vmballoon_check_status(b, status)) return true; @@ -410,6 +411,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) { struct page *page; gfp_t flags; + unsigned int hv_status; bool locked = false; do { @@ -429,11 +431,12 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) } /* inform monitor */ - locked = vmballoon_send_lock_page(b, page_to_pfn(page)); + locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); if (!locked) { STATS_INC(b->stats.refused_alloc); - if (b->reset_required) { + if (hv_status == VMW_BALLOON_ERROR_RESET || + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { __free_page(page); return -EIO; } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 217f82037fc1..bfc8a8ae55df 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -257,7 +257,7 @@ static u32 get_card_status(struct mmc_card *card, struct request *req) cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) - printk(KERN_ERR "%s: error %d sending status comand", + printk(KERN_ERR "%s: error %d sending status command", req->rq_disk->disk_name, err); return cmd.resp[0]; } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index c22a4c039988..afe8c6fa166a 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -501,7 +501,7 @@ config MMC_SH_MMCIF tristate "SuperH Internal MMCIF support" depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) help - This selects the MMC Host Interface controler (MMCIF). + This selects the MMC Host Interface controller (MMCIF). This driver supports MMCIF in sh7724/sh7757/sh7372. diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index 41e5a60493ad..ef72e874ca36 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -192,7 +192,7 @@ static inline void SEND_STOP(struct au1xmmc_host *host) au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); au_sync(); - /* Send the stop commmand */ + /* Send the stop command */ au_writel(STOP_CMD, HOST_CMD(host)); } diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index fa19d849a920..dd84124f4209 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -13,6 +13,7 @@ * your option) any later version. */ +#include #include #include #include @@ -20,8 +21,12 @@ #include #include #include +#include +#include #include +#ifdef CONFIG_PPC #include +#endif #include "sdhci-of.h" #include "sdhci.h" @@ -112,7 +117,11 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) return true; /* Old device trees don't have the wp-inverted property. */ +#ifdef CONFIG_PPC return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); +#else + return false; +#endif } static int __devinit sdhci_of_probe(struct platform_device *ofdev, diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index f472c2714eb8..bbc298fd2a15 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -446,7 +446,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev, mmc->max_seg_size = 1024 * 512; mmc->max_blk_size = 512; - /* reset the controler */ + /* reset the controller */ if (sdricoh_reset(host)) { dev_dbg(dev, "could not reset\n"); result = -EIO; @@ -478,7 +478,7 @@ static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev) dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); - /* search pci cardbus bridge that contains the mmc controler */ + /* search pci cardbus bridge that contains the mmc controller */ /* the io region is already claimed by yenta_socket... */ while ((pci_dev = pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 2cf0cc6a4189..f29a6f9df6e7 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -224,7 +224,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) if (dev->blkdev) { invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1); - close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE); + blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } kfree(dev); @@ -234,6 +234,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) /* FIXME: ensure that mtd->size % erase_size == 0 */ static struct block2mtd_dev *add_device(char *devname, int erase_size) { + const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; struct block_device *bdev; struct block2mtd_dev *dev; char *name; @@ -246,7 +247,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) return NULL; /* Get a handle on the device */ - bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL); + bdev = blkdev_get_by_path(devname, mode, dev); #ifndef MODULE if (IS_ERR(bdev)) { @@ -254,9 +255,8 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) to resolve the device name by other means. */ dev_t devt = name_to_dev_t(devname); - if (devt) { - bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ); - } + if (devt) + bdev = blkdev_get_by_dev(devt, mode, dev); } #endif diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index f511dd15fd31..ee4bb3330bdf 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1134,7 +1134,7 @@ static const struct file_operations mtd_fops = { static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC); + return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC); } static struct file_system_type mtd_inodefs_type = { diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 1ee72f3f0512..c948150079be 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -307,6 +307,11 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, unsigned long l1_cpy, l2_cpy; char *dst; + if (reason != KMSG_DUMP_OOPS && + reason != KMSG_DUMP_PANIC && + reason != KMSG_DUMP_KEXEC) + return; + /* Only dump oopses if dump_oops is set */ if (reason == KMSG_DUMP_OOPS && !dump_oops) return; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 1f75a1b1f7c3..31bf376b82a0 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -821,7 +821,7 @@ retry: * * Wait for command done. This is a helper function for nand_wait used when * we are in interrupt context. May happen when in panic and trying to write - * an oops trough mtdoops. + * an oops through mtdoops. */ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip, unsigned long timeo) diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 54c6d849cf25..62d6f88cbab5 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c @@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value) } /** - * ks8695_get_settings - Get device-specific settings. + * ks8695_wan_get_settings - Get device-specific settings. * @ndev: The network device to read settings from * @cmd: The ethtool structure to read into */ static int -ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; @@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) SUPPORTED_TP | SUPPORTED_MII); cmd->transceiver = XCVR_INTERNAL; - /* Port specific extras */ - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - cmd->phy_address = 0; - /* not supported for HPNA */ - cmd->autoneg = AUTONEG_DISABLE; + cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + cmd->port = PORT_MII; + cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); + cmd->phy_address = 0; - /* BUG: Erm, dtype hpna implies no phy regs */ - /* - ctrl = readl(KS8695_MISC_VA + KS8695_HMC); - cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10; - cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF; - */ - return -EOPNOTSUPP; - case KS8695_DTYPE_WAN: - cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; - cmd->port = PORT_MII; - cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); - cmd->phy_address = 0; + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + if ((ctrl & WMC_WAND) == 0) { + /* auto-negotiation is enabled */ + cmd->advertising |= ADVERTISED_Autoneg; + if (ctrl & WMC_WANA100F) + cmd->advertising |= ADVERTISED_100baseT_Full; + if (ctrl & WMC_WANA100H) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (ctrl & WMC_WANA10F) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (ctrl & WMC_WANA10H) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (ctrl & WMC_WANAP) + cmd->advertising |= ADVERTISED_Pause; + cmd->autoneg = AUTONEG_ENABLE; + + cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; + cmd->duplex = (ctrl & WMC_WDS) ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + /* auto-negotiation is disabled */ + cmd->autoneg = AUTONEG_DISABLE; - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - if ((ctrl & WMC_WAND) == 0) { - /* auto-negotiation is enabled */ - cmd->advertising |= ADVERTISED_Autoneg; - if (ctrl & WMC_WANA100F) - cmd->advertising |= ADVERTISED_100baseT_Full; - if (ctrl & WMC_WANA100H) - cmd->advertising |= ADVERTISED_100baseT_Half; - if (ctrl & WMC_WANA10F) - cmd->advertising |= ADVERTISED_10baseT_Full; - if (ctrl & WMC_WANA10H) - cmd->advertising |= ADVERTISED_10baseT_Half; - if (ctrl & WMC_WANAP) - cmd->advertising |= ADVERTISED_Pause; - cmd->autoneg = AUTONEG_ENABLE; - - cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; - cmd->duplex = (ctrl & WMC_WDS) ? - DUPLEX_FULL : DUPLEX_HALF; - } else { - /* auto-negotiation is disabled */ - cmd->autoneg = AUTONEG_DISABLE; - - cmd->speed = (ctrl & WMC_WANF100) ? - SPEED_100 : SPEED_10; - cmd->duplex = (ctrl & WMC_WANFF) ? - DUPLEX_FULL : DUPLEX_HALF; - } - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; + cmd->speed = (ctrl & WMC_WANF100) ? + SPEED_100 : SPEED_10; + cmd->duplex = (ctrl & WMC_WANFF) ? + DUPLEX_FULL : DUPLEX_HALF; } return 0; } /** - * ks8695_set_settings - Set device-specific settings. + * ks8695_wan_set_settings - Set device-specific settings. * @ndev: The network device to configure * @cmd: The settings to configure */ static int -ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; @@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ADVERTISED_100baseT_Full)) == 0) return -EINVAL; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* HPNA does not support auto-negotiation. */ - return -EINVAL; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | - WMC_WANA10F | WMC_WANA10H); - if (cmd->advertising & ADVERTISED_100baseT_Full) - ctrl |= WMC_WANA100F; - if (cmd->advertising & ADVERTISED_100baseT_Half) - ctrl |= WMC_WANA100H; - if (cmd->advertising & ADVERTISED_10baseT_Full) - ctrl |= WMC_WANA10F; - if (cmd->advertising & ADVERTISED_10baseT_Half) - ctrl |= WMC_WANA10H; - - /* force a re-negotiation */ - ctrl |= WMC_WANR; - writel(ctrl, ksp->phyiface_regs + KS8695_WMC); - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | + WMC_WANA10F | WMC_WANA10H); + if (cmd->advertising & ADVERTISED_100baseT_Full) + ctrl |= WMC_WANA100F; + if (cmd->advertising & ADVERTISED_100baseT_Half) + ctrl |= WMC_WANA100H; + if (cmd->advertising & ADVERTISED_10baseT_Full) + ctrl |= WMC_WANA10F; + if (cmd->advertising & ADVERTISED_10baseT_Half) + ctrl |= WMC_WANA10H; + + /* force a re-negotiation */ + ctrl |= WMC_WANR; + writel(ctrl, ksp->phyiface_regs + KS8695_WMC); } else { - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* BUG: dtype_hpna implies no phy registers */ - /* - ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC); - - ctrl &= ~(HMC_HSS | HMC_HDS); - if (cmd->speed == SPEED_100) - ctrl |= HMC_HSS; - if (cmd->duplex == DUPLEX_FULL) - ctrl |= HMC_HDS; - - __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC); - */ - return -EOPNOTSUPP; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - /* disable auto-negotiation */ - ctrl |= WMC_WAND; - ctrl &= ~(WMC_WANF100 | WMC_WANFF); - - if (cmd->speed == SPEED_100) - ctrl |= WMC_WANF100; - if (cmd->duplex == DUPLEX_FULL) - ctrl |= WMC_WANFF; - - writel(ctrl, ksp->phyiface_regs + KS8695_WMC); - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + + /* disable auto-negotiation */ + ctrl |= WMC_WAND; + ctrl &= ~(WMC_WANF100 | WMC_WANFF); + + if (cmd->speed == SPEED_100) + ctrl |= WMC_WANF100; + if (cmd->duplex == DUPLEX_FULL) + ctrl |= WMC_WANFF; + + writel(ctrl, ksp->phyiface_regs + KS8695_WMC); } return 0; } /** - * ks8695_nwayreset - Restart the autonegotiation on the port. + * ks8695_wan_nwayreset - Restart the autonegotiation on the port. * @ndev: The network device to restart autoneotiation on */ static int -ks8695_nwayreset(struct net_device *ndev) +ks8695_wan_nwayreset(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* No phy means no autonegotiation on hpna */ - return -EINVAL; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - if ((ctrl & WMC_WAND) == 0) - writel(ctrl | WMC_WANR, - ksp->phyiface_regs + KS8695_WMC); - else - /* auto-negotiation not enabled */ - return -EINVAL; - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } - - return 0; -} + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); -/** - * ks8695_get_link - Retrieve link status of network interface - * @ndev: The network interface to retrive the link status of. - */ -static u32 -ks8695_get_link(struct net_device *ndev) -{ - struct ks8695_priv *ksp = netdev_priv(ndev); - u32 ctrl; + if ((ctrl & WMC_WAND) == 0) + writel(ctrl | WMC_WANR, + ksp->phyiface_regs + KS8695_WMC); + else + /* auto-negotiation not enabled */ + return -EINVAL; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* HPNA always has link */ - return 1; - case KS8695_DTYPE_WAN: - /* WAN we can read the PHY for */ - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - return ctrl & WMC_WLS; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } return 0; } /** - * ks8695_get_pause - Retrieve network pause/flow-control advertising + * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising * @ndev: The device to retrieve settings from * @param: The structure to fill out with the information */ static void -ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) +ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* No phy link on hpna to configure */ - return; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - /* advertise Pause */ - param->autoneg = (ctrl & WMC_WANAP); + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - /* current Rx Flow-control */ - ctrl = ks8695_readreg(ksp, KS8695_DRXC); - param->rx_pause = (ctrl & DRXC_RFCE); + /* advertise Pause */ + param->autoneg = (ctrl & WMC_WANAP); - /* current Tx Flow-control */ - ctrl = ks8695_readreg(ksp, KS8695_DTXC); - param->tx_pause = (ctrl & DTXC_TFCE); - break; - case KS8695_DTYPE_LAN: - /* The LAN's "phy" is a direct-attached switch */ - return; - } -} + /* current Rx Flow-control */ + ctrl = ks8695_readreg(ksp, KS8695_DRXC); + param->rx_pause = (ctrl & DRXC_RFCE); -/** - * ks8695_set_pause - Configure pause/flow-control - * @ndev: The device to configure - * @param: The pause parameters to set - * - * TODO: Implement this - */ -static int -ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param) -{ - return -EOPNOTSUPP; + /* current Tx Flow-control */ + ctrl = ks8695_readreg(ksp, KS8695_DTXC); + param->tx_pause = (ctrl & DTXC_TFCE); } /** @@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) static const struct ethtool_ops ks8695_ethtool_ops = { .get_msglevel = ks8695_get_msglevel, .set_msglevel = ks8695_set_msglevel, - .get_settings = ks8695_get_settings, - .set_settings = ks8695_set_settings, - .nway_reset = ks8695_nwayreset, - .get_link = ks8695_get_link, - .get_pauseparam = ks8695_get_pause, - .set_pauseparam = ks8695_set_pause, + .get_drvinfo = ks8695_get_drvinfo, +}; + +static const struct ethtool_ops ks8695_wan_ethtool_ops = { + .get_msglevel = ks8695_get_msglevel, + .set_msglevel = ks8695_set_msglevel, + .get_settings = ks8695_wan_get_settings, + .set_settings = ks8695_wan_set_settings, + .nway_reset = ks8695_wan_nwayreset, + .get_link = ethtool_op_get_link, + .get_pauseparam = ks8695_wan_get_pause, .get_drvinfo = ks8695_get_drvinfo, }; @@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev) /* driver system setup */ ndev->netdev_ops = &ks8695_netdev_ops; - SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); ndev->watchdog_timeo = msecs_to_jiffies(watchdog); netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); @@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev) if (ksp->phyiface_regs && ksp->link_irq == -1) { ks8695_init_switch(ksp); ksp->dtype = KS8695_DTYPE_LAN; + SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); } else if (ksp->phyiface_regs && ksp->link_irq != -1) { ks8695_init_wan_phy(ksp); ksp->dtype = KS8695_DTYPE_WAN; + SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); } else { /* No initialisation since HPNA does not have a PHY */ ksp->dtype = KS8695_DTYPE_HPNA; + SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); } /* And bring up the net_device with the net core */ diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 0b9fc5173aef..22abfb39d813 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -1284,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev) { u32 emac_hashhi, emac_hashlo; struct netdev_hw_addr *ha; - char *addrs; u32 crc; emac_hashhi = emac_hashlo = 0; netdev_for_each_mc_addr(ha, dev) { - addrs = ha->addr; - - /* skip non-multicast addresses */ - if (!(*addrs & 1)) - continue; - - crc = ether_crc(ETH_ALEN, addrs); + crc = ether_crc(ETH_ALEN, ha->addr); crc >>= 26; if (crc & 0x20) diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c index 99be5ae91991..142d6047da27 100644 --- a/drivers/net/bna/bnad_ethtool.c +++ b/drivers/net/bna/bnad_ethtool.c @@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); if (ioc_attr) { - memset(ioc_attr, 0, sizeof(*ioc_attr)); spin_lock_irqsave(&bnad->bna_lock, flags); bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); spin_unlock_irqrestore(&bnad->bna_lock, flags); diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 6a858a29db56..a6cd335c9436 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -1415,12 +1415,12 @@ struct bnx2x_func_init_params { else /* skip rx queue - * if FCOE l2 support is diabled and this is the fcoe L2 queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue */ #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) /* skip tx queue - * if FCOE l2 support is diabled and this is the fcoe L2 queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue */ #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 84e1af4d65e1..8cdcf5b39d1e 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -5037,7 +5037,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); memset(&ilt, 0, sizeof(struct bnx2x_ilt)); - /* initalize dummy TM client */ + /* initialize dummy TM client */ ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 38ef7ca9f21d..c939683e3d61 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -1633,7 +1633,7 @@ (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ #define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses - in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 - + in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - timer 8 */ #define MISC_REG_SW_TIMER_VAL 0xa5c0 /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 48cf24ff4e6f..171782e2bb39 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -840,7 +840,7 @@ static int ad_lacpdu_send(struct port *port) lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); - /* Note: source addres is set to be the member's PERMANENT address, + /* Note: source address is set to be the member's PERMANENT address, because we use it to identify loopback lacpdus in receive. */ memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; @@ -881,7 +881,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) marker_header = (struct bond_marker_header *)skb_put(skb, length); memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); - /* Note: source addres is set to be the member's PERMANENT address, + /* Note: source address is set to be the member's PERMANENT address, because we use it to identify loopback MARKERs in receive. */ memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); marker_header->hdr.h_proto = PKT_TYPE_LACPDU; @@ -1916,7 +1916,7 @@ int bond_3ad_bind_slave(struct slave *slave) return -1; } - //check that the slave has not been intialized yet. + //check that the slave has not been initialized yet. if (SLAVE_AD_INFO(slave).port.slave != slave) { // port initialization diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 7206ab2cbbf8..3437613f0454 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ int mac_off = 0; -#if defined(CONFIG_OF) +#if defined(CONFIG_SPARC) const unsigned char *addr; #endif @@ -3354,7 +3354,7 @@ use_random_mac_addr: if (found & VPD_FOUND_MAC) goto done; -#if defined(CONFIG_OF) +#if defined(CONFIG_SPARC) addr = of_get_property(cp->of_node, "local-mac-address", NULL); if (addr != NULL) { memcpy(dev_addr, addr, 6); @@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : cassini_debug; -#if defined(CONFIG_OF) +#if defined(CONFIG_SPARC) cp->of_node = pci_device_to_OF_node(pdev); #endif diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 63ebf76d2390..8a43c7e19701 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c @@ -556,7 +556,7 @@ struct chelsio_vpd_t { #define EEPROM_MAX_POLL 4 /* - * Read SEEPROM. A zero is written to the flag register when the addres is + * Read SEEPROM. A zero is written to the flag register when the address is * written to the Control register. The hardware device will set the flag to a * one when 4B have been transferred to the Data register. */ diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c index a8766fb2f9ab..e13b7fe9d082 100644 --- a/drivers/net/cxgb3/mc5.c +++ b/drivers/net/cxgb3/mc5.c @@ -318,7 +318,7 @@ static void mc5_dbgi_mode_disable(const struct mc5 *mc5) /* * Initialization that requires the OS and protocol layers to already - * be intialized goes here. + * be initialized goes here. */ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, unsigned int nroutes) diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index ec8579a0a808..d55db6b38e7b 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -607,7 +607,7 @@ struct t3_vpd { * * Read a 32-bit word from a location in VPD EEPROM using the card's PCI * VPD ROM capability. A zero is written to the flag bit when the - * addres is written to the control register. The hardware device will + * address is written to the control register. The hardware device will * set the flag to 1 when 4 bytes have been read into the data register. */ int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index f5514a0d5be6..196eeda2dd6c 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -41,7 +41,7 @@ struct e1000_hw; struct e1000_hw_stats; /* Enumerated types specific to the e1000 hardware */ -/* Media Access Controlers */ +/* Media Access Controllers */ typedef enum { e1000_undefined = 0, e1000_82542_rev2_0, diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4ff88a683f61..bfab14092d2c 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2233,7 +2233,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) * addresses take precedence to avoid disabling unicast filtering * when possible. * - * RAR 0 is used for the station MAC adddress + * RAR 0 is used for the station MAC address * if there are not 14 addresses, go ahead and clear the filters */ i = 1; @@ -3478,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); - if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) + if (unlikely((!icr))) return IRQ_NONE; /* Not our interrupt */ + /* + * we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; /* guard against interrupt when we're going down */ diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index cb6c7b1c1fb8..89a69035e538 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -328,7 +328,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) /* * Ensure that the inter-port SWSM.SMBI lock bit is clear before - * first NVM or PHY acess. This should be done for single-port + * first NVM or PHY access. This should be done for single-port * devices, and for one port only on dual-port devices so that * for those devices we can still use the SMBI lock to synchronize * inter-port accesses to the PHY & NVM. @@ -1310,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) * apply workaround for hardware errata documented in errata * docs Fixes issue where some error prone or unreliable PCIe * completions are occurring, particularly with ASPM enabled. - * Without fix, issue can cause tx timeouts. + * Without fix, issue can cause Tx timeouts. */ reg = er32(GCR2); reg |= 1; diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile index 360c91369f35..28519acacd2d 100644 --- a/drivers/net/e1000e/Makefile +++ b/drivers/net/e1000e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2008 Intel Corporation. +# Copyright(c) 1999 - 2011 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 7245dc2e0b7c..13149983d07e 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 5255be753746..e610e1369053 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index e45a61c8930a..2fefa820302b 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index f8ed03dab9b1..fa08b6336cfb 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index e774380c7cec..bc0860a598c9 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -102,7 +102,7 @@ enum e1e_registers { E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) - E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ + E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ /* Convenience macros * diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 5328a2927731..fb46974cfec1 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -321,7 +321,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) } /* - * Reset the PHY before any acccess to it. Doing so, ensures that + * Reset the PHY before any access to it. Doing so, ensures that * the PHY is in a known good state before we read/write PHY registers. * The generic reset is sufficient here, because we haven't determined * the PHY type yet. diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index ff2872153b21..68aa1749bf66 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) mac->autoneg_failed = 1; return 0; } - e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) mac->autoneg_failed = 1; return 0; } - e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). + * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but we - * do not support receiving pause frames). + * do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. */ switch (hw->fc.current_mode) { @@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause - * frames but not send pause frames). + * frames but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames - * frames but we do not receive pause frames). + * frames but we do not receive pause frames). * 3: Both Rx and Tx flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ @@ -1189,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) } else { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = " - "RX PAUSE frames only.\r\n"); + "Rx PAUSE frames only.\r\n"); } } /* diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index fa5b60452547..1c18f26b0812 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -77,17 +77,17 @@ struct e1000_reg_info { char *name; }; -#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ -#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ -#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ -#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ -#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ -#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ static const struct e1000_reg_info e1000_reg_info_tbl[] = { @@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { /* Interrupt Registers */ {E1000_ICR, "ICR"}, - /* RX Registers */ + /* Rx Registers */ {E1000_RCTL, "RCTL"}, {E1000_RDLEN, "RDLEN"}, {E1000_RDH, "RDH"}, @@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { {E1000_RDFTS, "RDFTS"}, {E1000_RDFPC, "RDFPC"}, - /* TX Registers */ + /* Tx Registers */ {E1000_TCTL, "TCTL"}, {E1000_TDBAL, "TDBAL"}, {E1000_TDBAH, "TDBAH"}, @@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) break; default: printk(KERN_INFO "%-15s %08x\n", - reginfo->name, __er32(hw, reginfo->ofs)); + reginfo->name, __er32(hw, reginfo->ofs)); return; } @@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) printk(KERN_CONT "\n"); } - /* - * e1000e_dump - Print registers, tx-ring and rx-ring + * e1000e_dump - Print registers, Tx-ring and Rx-ring */ static void e1000e_dump(struct e1000_adapter *adapter) { @@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter) struct e1000_reg_info *reginfo; struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_tx_desc *tx_desc; - struct my_u0 { u64 a; u64 b; } *u0; + struct my_u0 { + u64 a; + u64 b; + } *u0; struct e1000_buffer *buffer_info; struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_packet_split *rx_desc_ps; struct e1000_rx_desc *rx_desc; - struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; + struct my_u1 { + u64 a; + u64 b; + u64 c; + u64 d; + } *u1; u32 staterr; int i = 0; @@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); printk(KERN_INFO "Device Name state " - "trans_start last_rx\n"); + "trans_start last_rx\n"); printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", - netdev->name, - netdev->state, - netdev->trans_start, - netdev->last_rx); + netdev->name, netdev->state, netdev->trans_start, + netdev->last_rx); } /* Print Registers */ @@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter) e1000_regdump(hw, reginfo); } - /* Print TX Ring Summary */ + /* Print Tx Ring Summary */ if (!netdev || !netif_running(netdev)) goto exit; - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" - " leng ntw timestamp\n"); + " leng ntw timestamp\n"); buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", - 0, tx_ring->next_to_use, tx_ring->next_to_clean, - (unsigned long long)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp); + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); - /* Print TX Rings */ + /* Print Tx Ring */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) * @@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 */ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Legacy format\n"); + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Legacy format\n"); printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Context format\n"); + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Context format\n"); printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Data format\n"); + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Data format\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; u0 = (struct my_u0 *)tx_desc; printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " - "%04X %3X %016llX %p", - (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : - ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, + "%04X %3X %016llX %p", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, (unsigned long long)le64_to_cpu(u0->a), (unsigned long long)le64_to_cpu(u0->b), (unsigned long long)buffer_info->dma, @@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, phys_to_virt(buffer_info->dma), + buffer_info->length, true); } - /* Print RX Rings Summary */ + /* Print Rx Ring Summary */ rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC]\n"); printk(KERN_INFO " %5d %5X %5X\n", 0, - rx_ring->next_to_use, rx_ring->next_to_clean); + rx_ring->next_to_use, rx_ring->next_to_clean); - /* Print RX Rings */ + /* Print Rx Ring */ if (!netif_msg_rx_status(adapter)) goto exit; - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); switch (adapter->rx_ps_pages) { case 1: case 2: @@ -329,7 +334,7 @@ rx_ring_summary: * +-----------------------------------------------------+ */ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " - "[buffer 1 63:0 ] " + "[buffer 1 63:0 ] " "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " "[bi->skb] <-- Ext Pkt Split format\n"); /* [Extended] Receive Descriptor (Write-Back) Format @@ -344,7 +349,7 @@ rx_ring_summary: * 63 48 47 32 31 20 19 0 */ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " - "[vl l0 ee es] " + "[vl l0 ee es] " "[ l3 l2 l1 hs] [reserved ] ---------------- " "[bi->skb] <-- Ext Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { @@ -352,26 +357,26 @@ rx_ring_summary: rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); u1 = (struct my_u1 *)rx_desc_ps; staterr = - le32_to_cpu(rx_desc_ps->wb.middle.status_error); + le32_to_cpu(rx_desc_ps->wb.middle.status_error); if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ printk(KERN_INFO "RWB[0x%03X] %016llX " - "%016llX %016llX %016llX " - "---------------- %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - buffer_info->skb); + "%016llX %016llX %016llX " + "---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb); } else { printk(KERN_INFO "R [0x%03X] %016llX " - "%016llX %016llX %016llX %016llX %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - (unsigned long long)buffer_info->dma, - buffer_info->skb); + "%016llX %016llX %016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb); if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", @@ -400,18 +405,18 @@ rx_ring_summary: * 63 48 47 40 39 32 31 16 15 0 */ printk(KERN_INFO "Rl[desc] [address 63:0 ] " - "[vl er S cks ln] [bi->dma ] [bi->skb] " - "<-- Legacy format\n"); + "[vl er S cks ln] [bi->dma ] [bi->skb] " + "<-- Legacy format\n"); for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { rx_desc = E1000_RX_DESC(*rx_ring, i); buffer_info = &rx_ring->buffer_info[i]; u0 = (struct my_u0 *)rx_desc; printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " - "%016llX %p", i, - (unsigned long long)le64_to_cpu(u0->a), - (unsigned long long)le64_to_cpu(u0->b), - (unsigned long long)buffer_info->dma, - buffer_info->skb); + "%016llX %p", i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); if (i == rx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == rx_ring->next_to_clean) @@ -421,9 +426,10 @@ rx_ring_summary: if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - adapter->rx_buffer_len, true); + DUMP_PREFIX_ADDRESS, + 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_buffer_len, true); } } @@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring) * @skb: pointer to sk_buff to be indicated to stack **/ static void e1000_receive_skb(struct e1000_adapter *adapter, - struct net_device *netdev, - struct sk_buff *skb, + struct net_device *netdev, struct sk_buff *skb, u8 status, __le16 vlan) { skb->protocol = eth_type_trans(skb, netdev); @@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, } /** - * e1000_rx_checksum - Receive Checksum Offload for 82543 + * e1000_rx_checksum - Receive Checksum Offload * @adapter: board private structure * @status_err: receive descriptor status and error fields * @csum: receive descriptor csum field @@ -548,7 +553,7 @@ map_skb: adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_err(&pdev->dev, "RX DMA map failed\n"); + dev_err(&pdev->dev, "Rx DMA map failed\n"); adapter->rx_dma_failed++; break; } @@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, ps_page = &buffer_info->ps_pages[j]; if (j >= adapter->rx_ps_pages) { /* all unused desc entries get hw null ptr */ - rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); + rx_desc->read.buffer_addr[j + 1] = + ~cpu_to_le64(0); continue; } if (!ps_page->page) { @@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, if (dma_mapping_error(&pdev->dev, ps_page->dma)) { dev_err(&adapter->pdev->dev, - "RX DMA page map failed\n"); + "Rx DMA page map failed\n"); adapter->rx_dma_failed++; goto no_buffers; } @@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, * didn't change because each write-back * erases this info. */ - rx_desc->read.buffer_addr[j+1] = - cpu_to_le64(ps_page->dma); + rx_desc->read.buffer_addr[j + 1] = + cpu_to_le64(ps_page->dma); } skb = netdev_alloc_skb_ip_align(netdev, @@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, adapter->rx_ps_bsize0, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_err(&pdev->dev, "RX DMA map failed\n"); + dev_err(&pdev->dev, "Rx DMA map failed\n"); adapter->rx_dma_failed++; /* cleanup skb */ dev_kfree_skb_any(skb); @@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, * such as IA-64). */ wmb(); - writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); + writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); } i++; @@ -1106,11 +1112,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, cleaned = 1; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_bsize0, - DMA_FROM_DEVICE); + adapter->rx_ps_bsize0, DMA_FROM_DEVICE); buffer_info->dma = 0; - /* see !EOP comment in other rx routine */ + /* see !EOP comment in other Rx routine */ if (!(staterr & E1000_RXD_STAT_EOP)) adapter->flags2 |= FLAG2_IS_DISCARDING; @@ -2610,7 +2615,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) } /** - * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * e1000_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. @@ -2663,7 +2668,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * hthresh = 1 ==> prefetch when one or more available * pthresh = 0x1f ==> prefetch if internal cache 31 or less * BEWARE: this seems to work but should be considered first if - * there are tx hangs or other tx related bugs + * there are Tx hangs or other Tx related bugs */ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; ew32(TXDCTL(0), txdctl); @@ -2877,7 +2882,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) if (adapter->rx_ps_pages) { /* this is a 32 byte descriptor */ rdlen = rx_ring->count * - sizeof(union e1000_rx_desc_packet_split); + sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { @@ -2900,7 +2905,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) /* * set the writeback threshold (only takes effect if the RDTR * is set). set GRAN=1 and write back up to 0x4 worth, and - * enable prefetching of 0x20 rx descriptors + * enable prefetching of 0x20 Rx descriptors * granularity = 01 * wthresh = 04, * hthresh = 04, @@ -2981,12 +2986,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * excessive C-state transition latencies result in * dropped transactions. */ - pm_qos_update_request( - &adapter->netdev->pm_qos_req, 55); + pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); } else { - pm_qos_update_request( - &adapter->netdev->pm_qos_req, - PM_QOS_DEFAULT_VALUE); + pm_qos_update_request(&adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); } } @@ -3152,7 +3155,7 @@ void e1000e_reset(struct e1000_adapter *adapter) /* lower 16 bits has Rx packet buffer allocation size in KB */ pba &= 0xffff; /* - * the Tx fifo also stores 16 bytes of information about the tx + * the Tx fifo also stores 16 bytes of information about the Tx * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + @@ -3175,7 +3178,7 @@ void e1000e_reset(struct e1000_adapter *adapter) pba -= min_tx_space - tx_space; /* - * if short on Rx space, Rx wins and must trump tx + * if short on Rx space, Rx wins and must trump Tx * adjustment or use Early Receive if available */ if ((pba < min_rx_space) && @@ -4039,11 +4042,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) adapter->netdev->name, adapter->link_speed, (adapter->link_duplex == FULL_DUPLEX) ? - "Full Duplex" : "Half Duplex", + "Full Duplex" : "Half Duplex", ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? - "RX/TX" : - ((ctrl & E1000_CTRL_RFCE) ? "RX" : - ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); + "Rx/Tx" : + ((ctrl & E1000_CTRL_RFCE) ? "Rx" : + ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); } static bool e1000e_has_link(struct e1000_adapter *adapter) @@ -4338,7 +4341,7 @@ link_up: /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = 1; - /* flush partial descriptors to memory before detecting tx hang */ + /* flush partial descriptors to memory before detecting Tx hang */ if (adapter->flags2 & FLAG2_DMA_BURST) { ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); @@ -4529,7 +4532,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->next_to_watch = i; buffer_info->dma = dma_map_single(&pdev->dev, skb->data + offset, - size, DMA_TO_DEVICE); + size, DMA_TO_DEVICE); buffer_info->mapped_as_page = false; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; @@ -4576,7 +4579,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } } - segs = skb_shinfo(skb)->gso_segs ?: 1; + segs = skb_shinfo(skb)->gso_segs ? : 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; @@ -4588,13 +4591,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, return count; dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); + dev_err(&pdev->dev, "Tx DMA map failed\n"); buffer_info->dma = 0; if (count) count--; while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -6193,7 +6196,7 @@ static int __init e1000_init_module(void) int ret; pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index a9612b0e4bca..4dd9b63273f6 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak, module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); - /* * Transmit Interrupt Delay in units of 1.024 microseconds - * Tx interrupt delay needs to typically be set to something non zero + * Tx interrupt delay needs to typically be set to something non-zero * * Valid Range: 0-65535 */ @@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); #define DEFAULT_ITR 3 #define MAX_ITR 100000 #define MIN_ITR 100 + /* IntMode (Interrupt Mode) * * Valid Range: 0 - 2 diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index a640f1c369ae..6bea051b134b 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -640,7 +640,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) s32 ret_val; u16 phy_data; - /* Enable CRS on TX. This must be set for half-duplex operation. */ + /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); if (ret_val) goto out; @@ -2986,7 +2986,7 @@ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) } /** - * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page + * e1000_get_phy_addr_for_hv_page - Get PHY address based on page * @page: page to be accessed **/ static u32 e1000_get_phy_addr_for_hv_page(u32 page) diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 4fa8d2a4aef3..eb35951a2442 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1761,7 +1761,7 @@ module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(mem, int, NULL, 0); module_param(autodetect, int, 0); -MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)"); +MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)"); MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 6de4675016b5..119aa2000c24 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -434,7 +434,6 @@ static void gfar_init_mac(struct net_device *ndev) static struct net_device_stats *gfar_get_stats(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct netdev_queue *txq; unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; unsigned long tx_packets = 0, tx_bytes = 0; int i = 0; @@ -450,9 +449,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) dev->stats.rx_dropped = rx_dropped; for (i = 0; i < priv->num_tx_queues; i++) { - txq = netdev_get_tx_queue(dev, i); - tx_bytes += txq->tx_bytes; - tx_packets += txq->tx_packets; + tx_bytes += priv->tx_queue[i]->stats.tx_bytes; + tx_packets += priv->tx_queue[i]->stats.tx_packets; } dev->stats.tx_bytes = tx_bytes; @@ -2109,8 +2107,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Update transmit stats */ - txq->tx_bytes += skb->len; - txq->tx_packets ++; + tx_queue->stats.tx_bytes += skb->len; + tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; lstatus = txbdp->lstatus; diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 68984eb88ae0..54de4135e932 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -907,12 +907,21 @@ enum { MQ_MG_MODE }; +/* + * Per TX queue stats + */ +struct tx_q_stats { + unsigned long tx_packets; + unsigned long tx_bytes; +}; + /** * struct gfar_priv_tx_q - per tx queue structure * @txlock: per queue tx spin lock * @tx_skbuff:skb pointers * @skb_curtx: to be used skb pointer * @skb_dirtytx:the last used skb pointer + * @stats: bytes/packets stats * @qindex: index of this queue * @dev: back pointer to the dev structure * @grp: back pointer to the group to which this queue belongs @@ -934,6 +943,7 @@ struct gfar_priv_tx_q { struct txbd8 *tx_bd_base; struct txbd8 *cur_tx; struct txbd8 *dirty_tx; + struct tx_q_stats stats; struct net_device *dev; struct gfar_priv_grp *grp; u16 skb_curtx; diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 27d6960ce09e..fdb0333f5cb6 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -1,7 +1,7 @@ /* * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. * - * 2005-2009 (c) Aeroflex Gaisler AB + * 2005-2010 (c) Aeroflex Gaisler AB * * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs * available in the GRLIB VHDL IP core library. @@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev) dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); + GRETH_REGSAVE(greth->regs->status, 0xFF); + napi_enable(&greth->napi); greth_enable_irqs(greth); @@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev) napi_disable(&greth->napi); + greth_disable_irqs(greth); greth_disable_tx(greth); + greth_disable_rx(greth); netif_stop_queue(dev); @@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; - u32 status, dma_addr; + u32 status, dma_addr, ctrl; + unsigned long flags; - bdp = greth->tx_bd_base + greth->tx_next; + /* Clean TX Ring */ + greth_clean_tx(greth->netdev); if (unlikely(greth->tx_free <= 0)) { + spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Enable TX IRQ only if not already in poll() routine */ + if (ctrl & GRETH_RXI) + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); + spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_BUSY; } @@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } + bdp = greth->tx_bd_base + greth->tx_next; dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); - status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); + status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { @@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; - /* No more descriptors */ - if (unlikely(greth->tx_free == 0)) { - - /* Free transmitted descriptors */ - greth_clean_tx(dev); - - /* If nothing was cleaned, stop queue & wait for irq */ - if (unlikely(greth->tx_free == 0)) { - status |= GRETH_BD_IE; - netif_stop_queue(dev); - } - } - /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); out: dev_kfree_skb(skb); @@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; - u32 status = 0, dma_addr; + u32 status = 0, dma_addr, ctrl; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; + unsigned long flags; nr_frags = skb_shinfo(skb)->nr_frags; + /* Clean TX Ring */ + greth_clean_tx_gbit(dev); + if (greth->tx_free < nr_frags + 1) { + spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Enable TX IRQ only if not already in poll() routine */ + if (ctrl & GRETH_RXI) + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); + spin_unlock_irqrestore(&greth->devlock, flags); err = NETDEV_TX_BUSY; goto out; } @@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; - status = GRETH_TXBD_CSALL; + status = GRETH_TXBD_CSALL | GRETH_BD_EN; status |= frag->size & GRETH_BD_LEN; /* Wrap around descriptor ring */ @@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; - - /* ... last fragment, check if out of descriptors */ - else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) { - - /* Enable interrupts and stop queue */ - status |= GRETH_BD_IE; - netif_stop_queue(dev); - } + else + status |= GRETH_BD_IE; /* enable IRQ on last fragment */ greth_write_bd(&bdp->stat, status); @@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) wmb(); - /* Enable the descriptors that we configured ... */ - for (i = 0; i < nr_frags + 1; i++) { - bdp = greth->tx_bd_base + greth->tx_next; - greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); - greth->tx_next = NEXT_TX(greth->tx_next); - greth->tx_free--; - } + /* Enable the descriptor chain by enabling the first descriptor */ + bdp = greth->tx_bd_base + greth->tx_next; + greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); + greth->tx_next = curr_tx; + greth->tx_free -= nr_frags + 1; + wmb(); + + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; frag_map_error: - /* Unmap SKB mappings that succeeded */ + /* Unmap SKB mappings that succeeded and disable descriptor */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); + greth_write_bd(&bdp->stat, 0); } map_error: if (net_ratelimit()) @@ -565,12 +574,11 @@ out: return err; } - static irqreturn_t greth_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct greth_private *greth; - u32 status; + u32 status, ctrl; irqreturn_t retval = IRQ_NONE; greth = netdev_priv(dev); @@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) /* Get the interrupt events that caused us to be here. */ status = GRETH_REGLOAD(greth->regs->status); - /* Handle rx and tx interrupts through poll */ - if (status & (GRETH_INT_RX | GRETH_INT_TX)) { - - /* Clear interrupt status */ - GRETH_REGORIN(greth->regs->status, - status & (GRETH_INT_RX | GRETH_INT_TX)); + /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be + * set regardless of whether IRQ is enabled or not. Especially + * important when shared IRQ. + */ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Handle rx and tx interrupts through poll */ + if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || + ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { retval = IRQ_HANDLED; /* Disable interrupts and schedule poll() */ @@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev) while (1) { bdp = greth->tx_bd_base + greth->tx_last; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); stat = greth_read_bd(&bdp->stat); if (unlikely(stat & GRETH_BD_EN)) @@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev) /* We only clean fully completed SKBs */ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); - stat = bdp_last_frag->stat; + + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); + stat = greth_read_bd(&bdp_last_frag->stat); if (stat & GRETH_BD_EN) break; @@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev) greth->tx_free += nr_frags+1; dev_kfree_skb(skb); } - if (greth->tx_free > (MAX_SKB_FRAGS + 1)) { - netif_wake_queue(dev); - } -} -static int greth_pending_packets(struct greth_private *greth) -{ - struct greth_bd *bdp; - u32 status; - bdp = greth->rx_bd_base + greth->rx_cur; - status = greth_read_bd(&bdp->stat); - if (status & GRETH_BD_EN) - return 0; - else - return 1; + if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) + netif_wake_queue(dev); } static int greth_rx(struct net_device *dev, int limit) @@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit) int pkt_len; int bad, count; u32 status, dma_addr; + unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); status = greth_read_bd(&bdp->stat); - dma_addr = greth_read_bd(&bdp->addr); - bad = 0; if (unlikely(status & GRETH_BD_EN)) { break; } + dma_addr = greth_read_bd(&bdp->addr); + bad = 0; + /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { @@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit) dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); + spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } @@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit) int pkt_len; int bad, count = 0; u32 status, dma_addr; + unsigned long flags; greth = netdev_priv(dev); @@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit) bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); status = greth_read_bd(&bdp->stat); bad = 0; @@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) } } - /* Allocate new skb to replace current */ - newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); - - if (!bad && newskb) { + /* Allocate new skb to replace current, not needed if the + * current skb can be reused */ + if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, @@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit) if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); + /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } + } else if (bad) { + /* Bad Frame transfer, the skb is reused */ + dev->stats.rx_dropped++; } else { + /* Failed Allocating a new skb. This is rather stupid + * but the current "filled" skb is reused, as if + * transfer failure. One could argue that RX descriptor + * table handling should be divided into cleaning and + * filling as the TX part of the driver + */ if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); + /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } @@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) wmb(); greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } @@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget) { struct greth_private *greth; int work_done = 0; + unsigned long flags; + u32 mask, ctrl; greth = container_of(napi, struct greth_private, napi); - if (greth->gbit_mac) { - greth_clean_tx_gbit(greth->netdev); - } else { - greth_clean_tx(greth->netdev); +restart_txrx_poll: + if (netif_queue_stopped(greth->netdev)) { + if (greth->gbit_mac) + greth_clean_tx_gbit(greth->netdev); + else + greth_clean_tx(greth->netdev); } -restart_poll: if (greth->gbit_mac) { work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { @@ -949,15 +976,29 @@ restart_poll: if (work_done < budget) { - napi_complete(napi); + spin_lock_irqsave(&greth->devlock, flags); + + ctrl = GRETH_REGLOAD(greth->regs->control); + if (netif_queue_stopped(greth->netdev)) { + GRETH_REGSAVE(greth->regs->control, + ctrl | GRETH_TXI | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE | + GRETH_INT_TX | GRETH_INT_TE; + } else { + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE; + } - if (greth_pending_packets(greth)) { - napi_reschedule(napi); - goto restart_poll; + if (GRETH_REGLOAD(greth->regs->status) & mask) { + GRETH_REGSAVE(greth->regs->control, ctrl); + spin_unlock_irqrestore(&greth->devlock, flags); + goto restart_txrx_poll; + } else { + __napi_complete(napi); + spin_unlock_irqrestore(&greth->devlock, flags); } } - greth_enable_irqs(greth); return work_done; } @@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = { }; static struct net_device_ops greth_netdev_ops = { - .ndo_open = greth_open, - .ndo_stop = greth_close, - .ndo_start_xmit = greth_start_xmit, - .ndo_set_mac_address = greth_set_mac_add, - .ndo_validate_addr = eth_validate_addr, + .ndo_open = greth_open, + .ndo_stop = greth_close, + .ndo_start_xmit = greth_start_xmit, + .ndo_set_mac_address = greth_set_mac_add, + .ndo_validate_addr = eth_validate_addr, }; static inline int wait_for_mdio(struct greth_private *greth) @@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev) struct greth_private *greth = netdev_priv(dev); struct phy_device *phydev = greth->phy; unsigned long flags; - int status_change = 0; + u32 ctrl; spin_lock_irqsave(&greth->devlock, flags); if (phydev->link) { if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { - - GRETH_REGANDIN(greth->regs->control, - ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB)); + ctrl = GRETH_REGLOAD(greth->regs->control) & + ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); if (phydev->duplex) - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); - - if (phydev->speed == SPEED_100) { - - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP); - } + ctrl |= GRETH_CTRL_FD; + if (phydev->speed == SPEED_100) + ctrl |= GRETH_CTRL_SP; else if (phydev->speed == SPEED_1000) - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); + ctrl |= GRETH_CTRL_GB; + GRETH_REGSAVE(greth->regs->control, ctrl); greth->speed = phydev->speed; greth->duplex = phydev->duplex; status_change = 1; @@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = { { .name = "GAISLER_ETHMAC", }, + { + .name = "01_01d", + }, {}, }; diff --git a/drivers/net/greth.h b/drivers/net/greth.h index 03ad903cd676..be0f2062bd14 100644 --- a/drivers/net/greth.h +++ b/drivers/net/greth.h @@ -23,6 +23,7 @@ #define GRETH_BD_LEN 0x7FF #define GRETH_TXEN 0x1 +#define GRETH_INT_TE 0x2 #define GRETH_INT_TX 0x8 #define GRETH_TXI 0x4 #define GRETH_TXBD_STATUS 0x0001C000 @@ -35,6 +36,7 @@ #define GRETH_TXBD_ERR_UE 0x4000 #define GRETH_TXBD_ERR_AL 0x8000 +#define GRETH_INT_RE 0x1 #define GRETH_INT_RX 0x4 #define GRETH_RXEN 0x2 #define GRETH_RXI 0x8 diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h index 4dc39e5f0156..77fcf4459161 100644 --- a/drivers/net/irda/donauboe.h +++ b/drivers/net/irda/donauboe.h @@ -30,7 +30,7 @@ * or the type-DO IR port. * * IrDA chip set list from Toshiba Computer Engineering Corp. - * model method maker controler Version + * model method maker controller Version * Portege 320CT FIR,SIR Toshiba Oboe(Triangle) * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney) * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney) diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 8d316d9cd29d..a21f5817685b 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -1079,7 +1079,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) /* * The defaults in the HW for RX PB 1-7 are not zero and so should be - * intialized to zero for non DCB mode otherwise actual total RX PB + * initialized to zero for non DCB mode otherwise actual total RX PB * would be bigger than programmed and filter space would run into * the PB 0 region. */ @@ -1167,7 +1167,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) /* * The defaults in the HW for RX PB 1-7 are not zero and so should be - * intialized to zero for non DCB mode otherwise actual total RX PB + * initialized to zero for non DCB mode otherwise actual total RX PB * would be bigger than programmed and filter space would run into * the PB 0 region. */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a060610a42db..602078b84892 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -6667,8 +6667,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { - struct net_device *netdev = tx_ring->netdev; - struct netdev_queue *txq; unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; @@ -6765,9 +6763,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) ixgbe_atr(tx_ring, skb, tx_flags, protocol); - txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); - txq->tx_bytes += skb->len; - txq->tx_packets++; ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); @@ -6925,8 +6920,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; - /* accurate rx/tx bytes/packets stats */ - dev_txq_stats_fold(netdev, stats); rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); @@ -6943,6 +6936,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, stats->rx_bytes += bytes; } } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } rcu_read_unlock(); /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index 183765cb7f25..f35554d11441 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -238,7 +238,7 @@ static int temac_dma_bd_init(struct net_device *ndev) goto out; } /* allocate the tx and rx ring buffer descriptors. */ - /* returns a virtual addres and a physical address. */ + /* returns a virtual address and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, &lp->tx_bd_p, GFP_KERNEL); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 21845affea13..5933621ac3ff 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -585,7 +585,7 @@ err: rcu_read_lock_bh(); vlan = rcu_dereference(q->vlan); if (vlan) - netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++; + vlan->dev->stats.tx_dropped++; rcu_read_unlock_bh(); return err; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index a37fcf11ab36..ea5cfe2c3a04 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -3403,9 +3403,7 @@ static int myri10ge_resume(struct pci_dev *pdev) return -EIO; } - status = pci_restore_state(pdev); - if (status) - return status; + pci_restore_state(pdev); status = pci_enable_device(pdev); if (status) { diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index bb8645ab247c..bde7d61f1930 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -554,6 +554,8 @@ struct rtl8169_private { struct mii_if_info mii; struct rtl8169_counters counters; u32 saved_wolopts; + + const struct firmware *fw; }; MODULE_AUTHOR("Realtek and the Linux r8169 crew "); @@ -1766,6 +1768,29 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw) } } +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + release_firmware(tp->fw); + tp->fw = NULL; +} + +static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name) +{ + const struct firmware **fw = &tp->fw; + int rc = !*fw; + + if (rc) { + rc = request_firmware(fw, fw_name, &tp->pci_dev->dev); + if (rc < 0) + goto out; + } + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + rtl_phy_write_fw(tp, *fw); +out: + return rc; +} + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -2139,7 +2164,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) { 0x0d, 0xf880 } }; void __iomem *ioaddr = tp->mmio_addr; - const struct firmware *fw; rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); @@ -2203,11 +2227,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x001b); - if (rtl_readphy(tp, 0x06) == 0xbf00 && - request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) { - rtl_phy_write_fw(tp, fw); - release_firmware(fw); - } else { + if ((rtl_readphy(tp, 0x06) != 0xbf00) || + (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) { netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); } @@ -2257,7 +2278,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) { 0x0d, 0xf880 } }; void __iomem *ioaddr = tp->mmio_addr; - const struct firmware *fw; rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); @@ -2312,11 +2332,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x001b); - if (rtl_readphy(tp, 0x06) == 0xb300 && - request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) { - rtl_phy_write_fw(tp, fw); - release_firmware(fw); - } else { + if ((rtl_readphy(tp, 0x06) != 0xb300) || + (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) { netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); } @@ -3200,6 +3217,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&tp->task); + rtl_release_firmware(tp); + unregister_netdev(dev); if (pci_dev_run_wake(pdev)) diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 711449c6e675..002bac743843 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void) int count; int cpu; + if (rss_cpus) + return rss_cpus; + if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { printk(KERN_WARNING "sfc: RSS disabled due to allocation failure\n"); @@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx) efx->legacy_irq = 0; } -struct efx_tx_queue * -efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) -{ - unsigned tx_channel_offset = - separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; - EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || - type >= EFX_TXQ_TYPES); - return &efx->channel[tx_channel_offset + index]->tx_queue[type]; -} - static void efx_set_channels(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; - unsigned tx_channel_offset = + + efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; /* Channel pointers were set in efx_init_struct() but we now * need to clear them for TX queues in any RX-only channels. */ efx_for_each_channel(channel, efx) { - if (channel->channel - tx_channel_offset >= + if (channel->channel - efx->tx_channel_offset >= efx->n_tx_channels) { efx_for_each_channel_tx_queue(tx_queue, channel) tx_queue->channel = NULL; diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 70e4f7dcce81..61ddd2c6e750 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -1107,22 +1107,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) /* Restore PCI configuration if needed */ if (method == RESET_TYPE_WORLD) { - if (efx_nic_is_dual_func(efx)) { - rc = pci_restore_state(nic_data->pci_dev2); - if (rc) { - netif_err(efx, drv, efx->net_dev, - "failed to restore PCI config for " - "the secondary function\n"); - goto fail3; - } - } - rc = pci_restore_state(efx->pci_dev); - if (rc) { - netif_err(efx, drv, efx->net_dev, - "failed to restore PCI config for the " - "primary function\n"); - goto fail4; - } + if (efx_nic_is_dual_func(efx)) + pci_restore_state(nic_data->pci_dev2); + pci_restore_state(efx->pci_dev); netif_dbg(efx, drv, efx->net_dev, "successfully restored PCI config\n"); } @@ -1133,7 +1120,7 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) rc = -ETIMEDOUT; netif_err(efx, hw, efx->net_dev, "timed out waiting for hardware reset\n"); - goto fail5; + goto fail3; } netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); @@ -1141,11 +1128,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) /* pci_save_state() and pci_restore_state() MUST be called in pairs */ fail2: -fail3: pci_restore_state(efx->pci_dev); fail1: -fail4: -fail5: +fail3: return rc; } diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index bdce66ddf93a..28df8665256a 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -735,6 +735,7 @@ struct efx_nic { unsigned next_buffer_table; unsigned n_channels; unsigned n_rx_channels; + unsigned tx_channel_offset; unsigned n_tx_channels; unsigned int rx_buffer_len; unsigned int rx_buffer_order; @@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index) _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ (_efx)->channel[_channel->channel + 1] : NULL) -extern struct efx_tx_queue * -efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); +static inline struct efx_tx_queue * +efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) +{ + EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || + type >= EFX_TXQ_TYPES); + return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; +} static inline struct efx_tx_queue * efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 581836867098..5976d1d51df1 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -36,7 +36,7 @@ Rev 1.07.06 Nov. 7 2000 Jeff Garzik some bug fix and cleaning Rev 1.07.05 Nov. 6 2000 metapirat contribute media type select by ifconfig Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support - Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule + Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1 Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring Rev 1.06.04 Feb. 11 2000 Jeff Garzik softnet and init for kernel 2.4 diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index 296000bf5a25..3397618d4d96 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -12,7 +12,7 @@ /* * RX HW/SW interaction overview * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * There are 2 types of RX communication channels betwean driver and NIC. + * There are 2 types of RX communication channels between driver and NIC. * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds * info about buffer's location, size and ID. An ID field is used to identify a @@ -821,7 +821,7 @@ static void bdx_setmulti(struct net_device *ndev) } /* use PMF to accept first MAC_MCST_NUM (15) addresses */ - /* TBD: sort addreses and write them in ascending order + /* TBD: sort addresses and write them in ascending order * into RX_MAC_MCST regs. we skip this phase now and accept ALL * multicast frames throu IMF */ /* accept the rest of addresses throu IMF */ @@ -1346,7 +1346,7 @@ static void print_rxfd(struct rxf_desc *rxfd) /* * TX HW/SW interaction overview * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * There are 2 types of TX communication channels betwean driver and NIC. + * There are 2 types of TX communication channels between driver and NIC. * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets * 2) TX Data Fifo - TXD - holds descriptors of full buffers. * diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c index 0e6bac5ec65b..7cb301da7474 100644 --- a/drivers/net/tile/tilepro.c +++ b/drivers/net/tile/tilepro.c @@ -142,14 +142,6 @@ MODULE_AUTHOR("Tilera"); MODULE_LICENSE("GPL"); - -#define IS_MULTICAST(mac_addr) \ - (((u8 *)(mac_addr))[0] & 0x01) - -#define IS_BROADCAST(mac_addr) \ - (((u16 *)(mac_addr))[0] == 0xffff) - - /* * Queue of incoming packets for a specific cpu and device. * @@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) /* * FIXME: Implement HW multicast filter. */ - if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { + if (is_unicast_ether_addr(buf)) { /* Filter packets not for our address. */ const u8 *mine = dev->dev_addr; filter = compare_ether_addr(mine, buf); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7599c457abd1..b100bd50a0d7 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1309,7 +1309,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, break; case SIOCGIFHWADDR: - /* Get hw addres */ + /* Get hw address */ memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); ifr.ifr_hwaddr.sa_family = tun->dev->type; if (copy_to_user(argp, &ifr, ifreq_len)) diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 73a3e0d93237..715e7b47e7e9 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -2032,7 +2032,7 @@ static void ucc_geth_set_multi(struct net_device *dev) netdev_for_each_mc_addr(ha, dev) { /* Only support group multicast for now. */ - if (!(ha->addr[0] & 1)) + if (!is_multicast_ether_addr(ha->addr)) continue; /* Ask CPM to run CRC and set bit in diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 593c104ab199..d776c4a8d3c1 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1021,13 +1021,15 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { pr_debug("invalid frame detected (ignored)" "offset[%u]=%u, length=%u, skb=%p\n", - x, offset, temp, skb); + x, offset, temp, skb_in); if (!x) goto error; break; } else { skb = skb_clone(skb_in, GFP_ATOMIC); + if (!skb) + goto error; skb->len = temp; skb->data = ((u8 *)skb_in->data) + offset; skb_set_tail_pointer(skb, temp); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index cab96ad49e60..09cac704fdd7 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -898,7 +898,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) set_mii_flow_control(vptr); /* - Check if new status is consisent with current status + Check if new status is consistent with current status if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) || (mii_status==curr_status)) { vptr->mii_status=mii_check_media_mode(vptr->mac_regs); diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 1ac9b568f1b0..c81a6512c683 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) "hotplug event.\n"); out: + release_firmware(fw); return ret; } diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index 8c3103fb6442..d48486d6afa1 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h @@ -1695,7 +1695,7 @@ struct vxge_hw_device_stats_sw_err { * struct vxge_hw_device_stats - Contains HW per-device statistics, * including hw. * @devh: HW device handle. - * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats. + * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats. * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory * space. * @hw_info_dma_acch: One more DMA handle used subsequently to free the diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 34cff6ce6d27..4578e5b4b411 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -125,7 +125,7 @@ static u32 dscc4_pci_config_store[16]; /* Module parameters */ MODULE_AUTHOR("Maintainer: Francois Romieu "); -MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler"); +MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller"); MODULE_LICENSE("GPL"); module_param(debug, int, 0); MODULE_PARM_DESC(debug,"Enable/disable extra messages"); diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index f0603327aafa..65bc334ed57b 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -232,7 +232,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m) result); goto error; } - /* Extract MAC addresss */ + /* Extract MAC address */ ddi = (void *) skb->data; BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n", diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 17ecaa41a807..030cbfd31704 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h @@ -186,7 +186,7 @@ enum { * struct i2400m_poke_table - Hardware poke table for the Intel 2400m * * This structure will be used to create a device specific poke table - * to put the device in a consistant state at boot time. + * to put the device in a consistent state at boot time. * * @address: The device address to poke * @@ -703,7 +703,7 @@ enum i2400m_bm_cmd_flags { * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot * rom after reading the MAC address. This is quite a dirty hack, * if you ask me -- the device requires the bootrom to be - * intialized after reading the MAC address. + * initialized after reading the MAC address. */ enum i2400m_bri { I2400M_BRI_SOFT = 1 << 1, diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h index 7ad05d401ab5..fd14b9103951 100644 --- a/drivers/net/wireless/ath/ath5k/reg.h +++ b/drivers/net/wireless/ath/ath5k/reg.h @@ -1064,7 +1064,7 @@ /* * EEPROM command register */ -#define AR5K_EEPROM_CMD 0x6008 /* Register Addres */ +#define AR5K_EEPROM_CMD 0x6008 /* Register Address */ #define AR5K_EEPROM_CMD_READ 0x00000001 /* EEPROM read */ #define AR5K_EEPROM_CMD_WRITE 0x00000002 /* EEPROM write */ #define AR5K_EEPROM_CMD_RESET 0x00000004 /* EEPROM reset */ @@ -1084,7 +1084,7 @@ /* * EEPROM config register */ -#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */ +#define AR5K_EEPROM_CFG 0x6010 /* Register Address */ #define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */ #define AR5K_EEPROM_CFG_SIZE_AUTO 0 #define AR5K_EEPROM_CFG_SIZE_4KBIT 1 @@ -1126,7 +1126,7 @@ * Second station id register (Upper 16 bits of MAC address + PCU settings) */ #define AR5K_STA_ID1 0x8004 /* Register Address */ -#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC addres */ +#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC address */ #define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */ #define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */ #define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */ diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 01880aa13e36..ea2e7d714bda 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -954,6 +954,9 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah) &adc_dc_cal_multi_sample; } ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; + + if (AR_SREV_9287(ah)) + ah->supp_cals &= ~ADC_GAIN_CAL; } } diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 088f141f2006..749a93608664 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -226,6 +226,10 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) eep->baseEepHeader.pwdclkind == 0) ah->need_an_top2_fixup = 1; + if ((common->bus_ops->ath_bus_type == ATH_USB) && + (AR_SREV_9280(ah))) + eep->modalHeader[0].xpaBiasLvl = 0; + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index a099b3e87ed3..1ce506f23110 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -433,6 +433,7 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id, void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, enum htc_endpoint_id ep_id, bool txok); +int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv); void ath9k_htc_station_work(struct work_struct *work); void ath9k_htc_aggr_work(struct work_struct *work); void ath9k_ani_work(struct work_struct *work);; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 845b4c938d16..f4d576bc3ccd 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -301,6 +301,16 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) priv->nstations++; + /* + * Set chainmask etc. on the target. + */ + ret = ath9k_htc_update_cap_target(priv); + if (ret) + ath_dbg(common, ATH_DBG_CONFIG, + "Failed to update capability in target\n"); + + priv->ah->is_monitoring = true; + return 0; err_vif: @@ -328,6 +338,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) } priv->nstations--; + priv->ah->is_monitoring = false; return 0; } @@ -419,7 +430,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, return 0; } -static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) +int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) { struct ath9k_htc_cap_target tcap; int ret; @@ -1186,6 +1197,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) } } + /* + * Monitor interface should be added before + * IEEE80211_CONF_CHANGE_CHANNEL is handled. + */ + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (conf->flags & IEEE80211_CONF_MONITOR) { + if (ath9k_htc_add_monitor_interface(priv)) + ath_err(common, "Failed to set monitor mode\n"); + else + ath_dbg(common, ATH_DBG_CONFIG, + "HW opmode set to Monitor mode\n"); + } + } + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { struct ieee80211_channel *curchan = hw->conf.channel; int pos = curchan->hw_value; @@ -1221,16 +1246,6 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) ath_update_txpow(priv); } - if (changed & IEEE80211_CONF_CHANGE_MONITOR) { - if (conf->flags & IEEE80211_CONF_MONITOR) { - if (ath9k_htc_add_monitor_interface(priv)) - ath_err(common, "Failed to set monitor mode\n"); - else - ath_dbg(common, ATH_DBG_CONFIG, - "HW opmode set to Monitor mode\n"); - } - } - if (changed & IEEE80211_CONF_CHANGE_IDLE) { mutex_lock(&priv->htc_pm_lock); if (!priv->ps_idle) { diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index fde978665e07..1afb8bb85756 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -436,9 +436,10 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah) static int ath9k_hw_post_init(struct ath_hw *ah) { + struct ath_common *common = ath9k_hw_common(ah); int ecode; - if (!AR_SREV_9271(ah)) { + if (common->bus_ops->ath_bus_type != ATH_USB) { if (!ath9k_hw_chip_test(ah)) return -ENODEV; } @@ -1213,7 +1214,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ah->txchainmask = common->tx_chainmask; ah->rxchainmask = common->rx_chainmask; - if (!ah->chip_fullsleep) { + if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) { ath9k_hw_abortpcurecv(ah); if (!ath9k_hw_stopdmarecv(ah)) { ath_dbg(common, ATH_DBG_XMIT, diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 0dc33b65e86b..be4828167012 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -1919,7 +1919,7 @@ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev) b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL); } -/* Intialize B/G PHY power control */ +/* Initialize B/G PHY power control */ static void b43_phy_init_pctl(struct b43_wldev *dev) { struct ssb_bus *bus = dev->dev->bus; diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c index 35033dd342ce..28e477d01587 100644 --- a/drivers/net/wireless/b43legacy/phy.c +++ b/drivers/net/wireless/b43legacy/phy.c @@ -153,7 +153,7 @@ void b43legacy_phy_calibrate(struct b43legacy_wldev *dev) phy->calibrated = 1; } -/* intialize B PHY power control +/* initialize B PHY power control * as described in http://bcm-specs.sipsolutions.net/InitPowerControl */ static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index bd8a4134edeb..2176edede39b 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link) hw_priv->link = link; /* - * Make sure the IRQ handler cannot proceed until at least - * dev->base_addr is initialized. + * We enable IRQ here, but IRQ handler will not proceed + * until dev->base_addr is set below. This protect us from + * receive interrupts when driver is not initialized. */ - spin_lock_irqsave(&local->irq_init_lock, flags); - ret = pcmcia_request_irq(link, prism2_interrupt); if (ret) - goto failed_unlock; + goto failed; ret = pcmcia_enable_device(link); if (ret) - goto failed_unlock; + goto failed; + spin_lock_irqsave(&local->irq_init_lock, flags); dev->irq = link->irq; dev->base_addr = link->resource[0]->start; - spin_unlock_irqrestore(&local->irq_init_lock, flags); local->shutdown = 0; @@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link) return ret; - failed_unlock: - spin_unlock_irqrestore(&local->irq_init_lock, flags); failed: kfree(hw_priv); prism2_release((u_long)link); diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 8d6ed5f6f46f..ae438ed80c2f 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -1973,6 +1973,13 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) inta = ipw_read32(priv, IPW_INTA_RW); inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); + + if (inta == 0xFFFFFFFF) { + /* Hardware disappeared */ + IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n"); + /* Only handle the cached INTA values */ + inta = 0; + } inta &= (IPW_INTA_MASK_ALL & inta_mask); /* Add any cached INTA values that need to be handled */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c index a5dbfea1bfad..b5cb3be0eb4b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c @@ -197,7 +197,7 @@ static irqreturn_t iwl_isr(int irq, void *data) none: /* re-enable interrupts here since we don't have anything to service. */ - /* only Re-enable if diabled by irq and no schedules tasklet. */ + /* only Re-enable if disabled by irq and no schedules tasklet. */ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) iwl_enable_interrupts(priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index f13a83a7e62b..36335b1b54d4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1154,7 +1154,7 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) } /* Re-enable all interrupts */ - /* only Re-enable if diabled by irq */ + /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); @@ -1368,7 +1368,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) } /* Re-enable all interrupts */ - /* only Re-enable if diabled by irq */ + /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); } diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c index a08b4e56e6b1..bb1a742a98a0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-legacy.c +++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c @@ -619,7 +619,7 @@ unplugged: none: /* re-enable interrupts here since we don't have anything to service. */ - /* only Re-enable if diabled by irq */ + /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 4776323b1eba..49493d176515 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -107,7 +107,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv, /* * XXX: The MAC address in the command buffer is often changed from * the original sent to the device. That is, the MAC address - * written to the command buffer often is not the same MAC adress + * written to the command buffer often is not the same MAC address * read from the command buffer when the command returns. This * issue has not yet been resolved and this debugging is left to * observe the problem. diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 76b2318a7dc7..f618b9623e5a 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, else *burst_possible = false; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 2c8cc954d1b6..ec2c75d77cea 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -630,7 +630,7 @@ islpci_alloc_memory(islpci_private *priv) printk(KERN_DEBUG "islpci_alloc_memory\n"); #endif - /* remap the PCI device base address to accessable */ + /* remap the PCI device base address to accessible */ if (!(priv->device_base = ioremap(pci_resource_start(priv->pdev, 0), ISL38XX_PCI_MEM_SIZE))) { @@ -709,7 +709,7 @@ islpci_alloc_memory(islpci_private *priv) PCI_DMA_FROMDEVICE); if (!priv->pci_map_rx_address[counter]) { /* error mapping the buffer to device - accessable memory address */ + accessible memory address */ printk(KERN_ERR "failed to map skb DMA'able\n"); goto out_free; } @@ -773,7 +773,7 @@ islpci_free_memory(islpci_private *priv) priv->data_low_rx[counter] = NULL; } - /* Free the acces control list and the WPA list */ + /* Free the access control list and the WPA list */ prism54_acl_clean(&priv->acl); prism54_wpa_bss_ie_clean(priv); mgt_clean(priv); diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index 2fc52bc2d7dd..d44f8e20cce0 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c @@ -450,7 +450,7 @@ islpci_eth_receive(islpci_private *priv) MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE); if (unlikely(!priv->pci_map_rx_address[index])) { - /* error mapping the buffer to device accessable memory address */ + /* error mapping the buffer to device accessible memory address */ DEBUG(SHOW_ERROR_MESSAGES, "Error mapping DMA address\n"); diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 658542d2efe1..f3da051df39e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -273,7 +273,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, intf->beacon = entry; /* - * The MAC adddress must be configured after the device + * The MAC address must be configured after the device * has been initialized. Otherwise the device can reset * the MAC registers. * The BSSID address must only be configured in AP mode, diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 73631c6fbb30..ace0b668c04e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -363,12 +363,12 @@ int rt2x00pci_resume(struct pci_dev *pci_dev) struct rt2x00_dev *rt2x00dev = hw->priv; if (pci_set_power_state(pci_dev, PCI_D0) || - pci_enable_device(pci_dev) || - pci_restore_state(pci_dev)) { + pci_enable_device(pci_dev)) { ERROR(rt2x00dev, "Failed to resume device.\n"); return -EIO; } + pci_restore_state(pci_dev); return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00pci_resume); diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h index e54b21a4f8b1..efcc3aaca14f 100644 --- a/drivers/net/wireless/wl1251/acx.h +++ b/drivers/net/wireless/wl1251/acx.h @@ -1272,10 +1272,10 @@ struct wl1251_acx_tid_cfg { /* OBSOLETE */ #define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6) -/* Trace meassge on MBOX #A */ +/* Trace message on MBOX #A */ #define WL1251_ACX_INTR_TRACE_A BIT(7) -/* Trace meassge on MBOX #B */ +/* Trace message on MBOX #B */ #define WL1251_ACX_INTR_TRACE_B BIT(8) /* Command processing completion */ diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h index 13fbeeccf609..c0ce2c8b43b8 100644 --- a/drivers/net/wireless/wl1251/wl1251.h +++ b/drivers/net/wireless/wl1251/wl1251.h @@ -419,7 +419,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl); #define WL1251_FW_NAME "wl1251-fw.bin" #define WL1251_NVS_NAME "wl1251-nvs.bin" -#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */ +#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */ #define WL1251_PART_DOWN_MEM_START 0x0 #define WL1251_PART_DOWN_MEM_SIZE 0x16800 diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h index 9cbc3f40c8dd..7bd8e4db4a71 100644 --- a/drivers/net/wireless/wl12xx/acx.h +++ b/drivers/net/wireless/wl12xx/acx.h @@ -47,9 +47,9 @@ #define WL1271_ACX_INTR_HW_AVAILABLE BIT(5) /* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */ #define WL1271_ACX_INTR_DATA BIT(6) -/* Trace meassge on MBOX #A */ +/* Trace message on MBOX #A */ #define WL1271_ACX_INTR_TRACE_A BIT(7) -/* Trace meassge on MBOX #B */ +/* Trace message on MBOX #B */ #define WL1271_ACX_INTR_TRACE_B BIT(8) #define WL1271_ACX_INTR_ALL 0xFFFFFFFF diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h index ce3d31f98c55..9050dd9b62d2 100644 --- a/drivers/net/wireless/wl12xx/wl12xx.h +++ b/drivers/net/wireless/wl12xx/wl12xx.h @@ -416,8 +416,8 @@ int wl1271_plt_stop(struct wl1271 *wl); /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power on in case is has been shut down shortly before */ -#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */ -#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ +#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ +#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */ /* Macros to handle wl1271.sta_rate_set */ #define HW_BG_RATES_MASK 0xffff diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index ee82df62e646..3e5befe4d03b 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -192,7 +192,7 @@ static inline void wl3501_switch_page(struct wl3501_card *this, u8 page) } /* - * Get Ethernet MAC addresss. + * Get Ethernet MAC address. * * WARNING: We switch to FPAGE0 and switc back again. * Making sure there is no other WL function beening called by ISR. diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig new file mode 100644 index 000000000000..ffedfd492754 --- /dev/null +++ b/drivers/nfc/Kconfig @@ -0,0 +1,30 @@ +# +# Near Field Communication (NFC) devices +# + +menuconfig NFC_DEVICES + bool "NFC devices" + default n + ---help--- + You'll have to say Y if your computer contains an NFC device that + you want to use under Linux. + + You can say N here if you don't have any Near Field Communication + devices connected to your computer. + +if NFC_DEVICES + +config PN544_NFC + tristate "PN544 NFC driver" + depends on I2C + select CRC_CCITT + default n + ---help--- + Say yes if you want PN544 Near Field Communication driver. + This is for i2c connected version. If unsure, say N here. + + To compile this driver as a module, choose m here. The module will + be called pn544. + + +endif # NFC_DEVICES diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile new file mode 100644 index 000000000000..a4efb164ec49 --- /dev/null +++ b/drivers/nfc/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for nfc devices +# + +obj-$(CONFIG_PN544_NFC) += pn544.o diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c new file mode 100644 index 000000000000..401c44b6eadb --- /dev/null +++ b/drivers/nfc/pn544.c @@ -0,0 +1,891 @@ +/* + * Driver for the PN544 NFC chip. + * + * Copyright (C) Nokia Corporation + * + * Author: Jari Vanhala + * Contact: Matti Aaltonen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for TCGETS */ +#include + +#define DRIVER_CARD "PN544 NFC" +#define DRIVER_DESC "NFC driver for PN544" + +static struct i2c_device_id pn544_id_table[] = { + { PN544_DRIVER_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, pn544_id_table); + +#define HCI_MODE 0 +#define FW_MODE 1 + +enum pn544_state { + PN544_ST_COLD, + PN544_ST_FW_READY, + PN544_ST_READY, +}; + +enum pn544_irq { + PN544_NONE, + PN544_INT, +}; + +struct pn544_info { + struct miscdevice miscdev; + struct i2c_client *i2c_dev; + struct regulator_bulk_data regs[2]; + + enum pn544_state state; + wait_queue_head_t read_wait; + loff_t read_offset; + enum pn544_irq read_irq; + struct mutex read_mutex; /* Serialize read_irq access */ + struct mutex mutex; /* Serialize info struct access */ + u8 *buf; + unsigned int buflen; +}; + +static const char reg_vdd_io[] = "Vdd_IO"; +static const char reg_vbat[] = "VBat"; + +/* sysfs interface */ +static ssize_t pn544_test(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pn544_info *info = dev_get_drvdata(dev); + struct i2c_client *client = info->i2c_dev; + struct pn544_nfc_platform_data *pdata = client->dev.platform_data; + + return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test()); +} + +static int pn544_enable(struct pn544_info *info, int mode) +{ + struct pn544_nfc_platform_data *pdata; + struct i2c_client *client = info->i2c_dev; + + int r; + + r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs); + if (r < 0) + return r; + + pdata = client->dev.platform_data; + info->read_irq = PN544_NONE; + if (pdata->enable) + pdata->enable(mode); + + if (mode) { + info->state = PN544_ST_FW_READY; + dev_dbg(&client->dev, "now in FW-mode\n"); + } else { + info->state = PN544_ST_READY; + dev_dbg(&client->dev, "now in HCI-mode\n"); + } + + usleep_range(10000, 15000); + + return 0; +} + +static void pn544_disable(struct pn544_info *info) +{ + struct pn544_nfc_platform_data *pdata; + struct i2c_client *client = info->i2c_dev; + + pdata = client->dev.platform_data; + if (pdata->disable) + pdata->disable(); + + info->state = PN544_ST_COLD; + + dev_dbg(&client->dev, "Now in OFF-mode\n"); + + msleep(PN544_RESETVEN_TIME); + + info->read_irq = PN544_NONE; + regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs); +} + +static int check_crc(u8 *buf, int buflen) +{ + u8 len; + u16 crc; + + len = buf[0] + 1; + if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) { + pr_err(PN544_DRIVER_NAME + ": CRC; corrupt packet len %u (%d)\n", len, buflen); + print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE, + 16, 2, buf, buflen, false); + return -EPERM; + } + crc = crc_ccitt(0xffff, buf, len - 2); + crc = ~crc; + + if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) { + pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n", + crc, buf[len-1], buf[len-2]); + + print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE, + 16, 2, buf, buflen, false); + return -EPERM; + } + return 0; +} + +static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len) +{ + int r; + + if (len < 4 || len != (buf[0] + 1)) { + dev_err(&client->dev, "%s: Illegal message length: %d\n", + __func__, len); + return -EINVAL; + } + + if (check_crc(buf, len)) + return -EINVAL; + + usleep_range(3000, 6000); + + r = i2c_master_send(client, buf, len); + dev_dbg(&client->dev, "send: %d\n", r); + + if (r == -EREMOTEIO) { /* Retry, chip was in standby */ + usleep_range(6000, 10000); + r = i2c_master_send(client, buf, len); + dev_dbg(&client->dev, "send2: %d\n", r); + } + + if (r != len) + return -EREMOTEIO; + + return r; +} + +static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen) +{ + int r; + u8 len; + + /* + * You could read a packet in one go, but then you'd need to read + * max size and rest would be 0xff fill, so we do split reads. + */ + r = i2c_master_recv(client, &len, 1); + dev_dbg(&client->dev, "recv1: %d\n", r); + + if (r != 1) + return -EREMOTEIO; + + if (len < PN544_LLC_HCI_OVERHEAD) + len = PN544_LLC_HCI_OVERHEAD; + else if (len > (PN544_MSG_MAX_SIZE - 1)) + len = PN544_MSG_MAX_SIZE - 1; + + if (1 + len > buflen) /* len+(data+crc16) */ + return -EMSGSIZE; + + buf[0] = len; + + r = i2c_master_recv(client, buf + 1, len); + dev_dbg(&client->dev, "recv2: %d\n", r); + + if (r != len) + return -EREMOTEIO; + + usleep_range(3000, 6000); + + return r + 1; +} + +static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len) +{ + int r; + + dev_dbg(&client->dev, "%s\n", __func__); + + if (len < PN544_FW_HEADER_SIZE || + (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len) + return -EINVAL; + + r = i2c_master_send(client, buf, len); + dev_dbg(&client->dev, "fw send: %d\n", r); + + if (r == -EREMOTEIO) { /* Retry, chip was in standby */ + usleep_range(6000, 10000); + r = i2c_master_send(client, buf, len); + dev_dbg(&client->dev, "fw send2: %d\n", r); + } + + if (r != len) + return -EREMOTEIO; + + return r; +} + +static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen) +{ + int r, len; + + if (buflen < PN544_FW_HEADER_SIZE) + return -EINVAL; + + r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE); + dev_dbg(&client->dev, "FW recv1: %d\n", r); + + if (r < 0) + return r; + + if (r < PN544_FW_HEADER_SIZE) + return -EINVAL; + + len = (buf[1] << 8) + buf[2]; + if (len == 0) /* just header, no additional data */ + return r; + + if (len > buflen - PN544_FW_HEADER_SIZE) + return -EMSGSIZE; + + r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len); + dev_dbg(&client->dev, "fw recv2: %d\n", r); + + if (r != len) + return -EINVAL; + + return r + PN544_FW_HEADER_SIZE; +} + +static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id) +{ + struct pn544_info *info = dev_id; + struct i2c_client *client = info->i2c_dev; + + BUG_ON(!info); + BUG_ON(irq != info->i2c_dev->irq); + + dev_dbg(&client->dev, "IRQ\n"); + + mutex_lock(&info->read_mutex); + info->read_irq = PN544_INT; + mutex_unlock(&info->read_mutex); + + wake_up_interruptible(&info->read_wait); + + return IRQ_HANDLED; +} + +static enum pn544_irq pn544_irq_state(struct pn544_info *info) +{ + enum pn544_irq irq; + + mutex_lock(&info->read_mutex); + irq = info->read_irq; + mutex_unlock(&info->read_mutex); + /* + * XXX: should we check GPIO-line status directly? + * return pdata->irq_status() ? PN544_INT : PN544_NONE; + */ + + return irq; +} + +static ssize_t pn544_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + struct pn544_info *info = container_of(file->private_data, + struct pn544_info, miscdev); + struct i2c_client *client = info->i2c_dev; + enum pn544_irq irq; + size_t len; + int r = 0; + + dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__, + info, count); + + mutex_lock(&info->mutex); + + if (info->state == PN544_ST_COLD) { + r = -ENODEV; + goto out; + } + + irq = pn544_irq_state(info); + if (irq == PN544_NONE) { + if (file->f_flags & O_NONBLOCK) { + r = -EAGAIN; + goto out; + } + + if (wait_event_interruptible(info->read_wait, + (info->read_irq == PN544_INT))) { + r = -ERESTARTSYS; + goto out; + } + } + + if (info->state == PN544_ST_FW_READY) { + len = min(count, info->buflen); + + mutex_lock(&info->read_mutex); + r = pn544_fw_read(info->i2c_dev, info->buf, len); + info->read_irq = PN544_NONE; + mutex_unlock(&info->read_mutex); + + if (r < 0) { + dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r); + goto out; + } + + print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE, + 16, 2, info->buf, r, false); + + *offset += r; + if (copy_to_user(buf, info->buf, r)) { + r = -EFAULT; + goto out; + } + } else { + len = min(count, info->buflen); + + mutex_lock(&info->read_mutex); + r = pn544_i2c_read(info->i2c_dev, info->buf, len); + info->read_irq = PN544_NONE; + mutex_unlock(&info->read_mutex); + + if (r < 0) { + dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r); + goto out; + } + print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE, + 16, 2, info->buf, r, false); + + *offset += r; + if (copy_to_user(buf, info->buf, r)) { + r = -EFAULT; + goto out; + } + } + +out: + mutex_unlock(&info->mutex); + + return r; +} + +static unsigned int pn544_poll(struct file *file, poll_table *wait) +{ + struct pn544_info *info = container_of(file->private_data, + struct pn544_info, miscdev); + struct i2c_client *client = info->i2c_dev; + int r = 0; + + dev_dbg(&client->dev, "%s: info: %p\n", __func__, info); + + mutex_lock(&info->mutex); + + if (info->state == PN544_ST_COLD) { + r = -ENODEV; + goto out; + } + + poll_wait(file, &info->read_wait, wait); + + if (pn544_irq_state(info) == PN544_INT) { + r = POLLIN | POLLRDNORM; + goto out; + } +out: + mutex_unlock(&info->mutex); + + return r; +} + +static ssize_t pn544_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct pn544_info *info = container_of(file->private_data, + struct pn544_info, miscdev); + struct i2c_client *client = info->i2c_dev; + ssize_t len; + int r; + + dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__, + info, count); + + mutex_lock(&info->mutex); + + if (info->state == PN544_ST_COLD) { + r = -ENODEV; + goto out; + } + + /* + * XXX: should we detect rset-writes and clean possible + * read_irq state + */ + if (info->state == PN544_ST_FW_READY) { + size_t fw_len; + + if (count < PN544_FW_HEADER_SIZE) { + r = -EINVAL; + goto out; + } + + len = min(count, info->buflen); + if (copy_from_user(info->buf, buf, len)) { + r = -EFAULT; + goto out; + } + + print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE, + 16, 2, info->buf, len, false); + + fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) + + info->buf[2]; + + if (len > fw_len) /* 1 msg at a time */ + len = fw_len; + + r = pn544_fw_write(info->i2c_dev, info->buf, len); + } else { + if (count < PN544_LLC_MIN_SIZE) { + r = -EINVAL; + goto out; + } + + len = min(count, info->buflen); + if (copy_from_user(info->buf, buf, len)) { + r = -EFAULT; + goto out; + } + + print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE, + 16, 2, info->buf, len, false); + + if (len > (info->buf[0] + 1)) /* 1 msg at a time */ + len = info->buf[0] + 1; + + r = pn544_i2c_write(info->i2c_dev, info->buf, len); + } +out: + mutex_unlock(&info->mutex); + + return r; + +} + +static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pn544_info *info = container_of(file->private_data, + struct pn544_info, miscdev); + struct i2c_client *client = info->i2c_dev; + struct pn544_nfc_platform_data *pdata; + unsigned int val; + int r = 0; + + dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd); + + mutex_lock(&info->mutex); + + if (info->state == PN544_ST_COLD) { + r = -ENODEV; + goto out; + } + + pdata = info->i2c_dev->dev.platform_data; + switch (cmd) { + case PN544_GET_FW_MODE: + dev_dbg(&client->dev, "%s: PN544_GET_FW_MODE\n", __func__); + + val = (info->state == PN544_ST_FW_READY); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) { + r = -EFAULT; + goto out; + } + + break; + + case PN544_SET_FW_MODE: + dev_dbg(&client->dev, "%s: PN544_SET_FW_MODE\n", __func__); + + if (copy_from_user(&val, (void __user *)arg, sizeof(val))) { + r = -EFAULT; + goto out; + } + + if (val) { + if (info->state == PN544_ST_FW_READY) + break; + + pn544_disable(info); + r = pn544_enable(info, FW_MODE); + if (r < 0) + goto out; + } else { + if (info->state == PN544_ST_READY) + break; + pn544_disable(info); + r = pn544_enable(info, HCI_MODE); + if (r < 0) + goto out; + } + file->f_pos = info->read_offset; + break; + + case TCGETS: + dev_dbg(&client->dev, "%s: TCGETS\n", __func__); + + r = -ENOIOCTLCMD; + break; + + default: + dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd); + r = -ENOIOCTLCMD; + break; + } + +out: + mutex_unlock(&info->mutex); + + return r; +} + +static int pn544_open(struct inode *inode, struct file *file) +{ + struct pn544_info *info = container_of(file->private_data, + struct pn544_info, miscdev); + struct i2c_client *client = info->i2c_dev; + int r = 0; + + dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__, + info, info->i2c_dev); + + mutex_lock(&info->mutex); + + /* + * Only 1 at a time. + * XXX: maybe user (counter) would work better + */ + if (info->state != PN544_ST_COLD) { + r = -EBUSY; + goto out; + } + + file->f_pos = info->read_offset; + r = pn544_enable(info, HCI_MODE); + +out: + mutex_unlock(&info->mutex); + return r; +} + +static int pn544_close(struct inode *inode, struct file *file) +{ + struct pn544_info *info = container_of(file->private_data, + struct pn544_info, miscdev); + struct i2c_client *client = info->i2c_dev; + + dev_dbg(&client->dev, "%s: info: %p, client %p\n", + __func__, info, info->i2c_dev); + + mutex_lock(&info->mutex); + pn544_disable(info); + mutex_unlock(&info->mutex); + + return 0; +} + +static const struct file_operations pn544_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = pn544_read, + .write = pn544_write, + .poll = pn544_poll, + .open = pn544_open, + .release = pn544_close, + .unlocked_ioctl = pn544_ioctl, +}; + +#ifdef CONFIG_PM +static int pn544_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pn544_info *info; + int r = 0; + + dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client); + + info = i2c_get_clientdata(client); + dev_info(&client->dev, "%s: info: %p, client %p\n", __func__, + info, client); + + mutex_lock(&info->mutex); + + switch (info->state) { + case PN544_ST_FW_READY: + /* Do not suspend while upgrading FW, please! */ + r = -EPERM; + break; + + case PN544_ST_READY: + /* + * CHECK: Device should be in standby-mode. No way to check? + * Allowing low power mode for the regulator is potentially + * dangerous if pn544 does not go to suspension. + */ + break; + + case PN544_ST_COLD: + break; + }; + + mutex_unlock(&info->mutex); + return r; +} + +static int pn544_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pn544_info *info = i2c_get_clientdata(client); + int r = 0; + + dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__, + info, client); + + mutex_lock(&info->mutex); + + switch (info->state) { + case PN544_ST_READY: + /* + * CHECK: If regulator low power mode is allowed in + * pn544_suspend, we should go back to normal mode + * here. + */ + break; + + case PN544_ST_COLD: + break; + + case PN544_ST_FW_READY: + break; + }; + + mutex_unlock(&info->mutex); + + return r; +} + +static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume); +#endif + +static struct device_attribute pn544_attr = + __ATTR(nfc_test, S_IRUGO, pn544_test, NULL); + +static int __devinit pn544_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pn544_info *info; + struct pn544_nfc_platform_data *pdata; + int r = 0; + + dev_dbg(&client->dev, "%s\n", __func__); + dev_dbg(&client->dev, "IRQ: %d\n", client->irq); + + /* private data allocation */ + info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL); + if (!info) { + dev_err(&client->dev, + "Cannot allocate memory for pn544_info.\n"); + r = -ENOMEM; + goto err_info_alloc; + } + + info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER); + info->buf = kzalloc(info->buflen, GFP_KERNEL); + if (!info->buf) { + dev_err(&client->dev, + "Cannot allocate memory for pn544_info->buf.\n"); + r = -ENOMEM; + goto err_buf_alloc; + } + + info->regs[0].supply = reg_vdd_io; + info->regs[1].supply = reg_vbat; + r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), + info->regs); + if (r < 0) + goto err_kmalloc; + + info->i2c_dev = client; + info->state = PN544_ST_COLD; + info->read_irq = PN544_NONE; + mutex_init(&info->read_mutex); + mutex_init(&info->mutex); + init_waitqueue_head(&info->read_wait); + i2c_set_clientdata(client, info); + pdata = client->dev.platform_data; + if (!pdata) { + dev_err(&client->dev, "No platform data\n"); + r = -EINVAL; + goto err_reg; + } + + if (!pdata->request_resources) { + dev_err(&client->dev, "request_resources() missing\n"); + r = -EINVAL; + goto err_reg; + } + + r = pdata->request_resources(client); + if (r) { + dev_err(&client->dev, "Cannot get platform resources\n"); + goto err_reg; + } + + r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn, + IRQF_TRIGGER_RISING, PN544_DRIVER_NAME, + info); + if (r < 0) { + dev_err(&client->dev, "Unable to register IRQ handler\n"); + goto err_res; + } + + /* If we don't have the test we don't need the sysfs file */ + if (pdata->test) { + r = device_create_file(&client->dev, &pn544_attr); + if (r) { + dev_err(&client->dev, + "sysfs registration failed, error %d\n", r); + goto err_irq; + } + } + + info->miscdev.minor = MISC_DYNAMIC_MINOR; + info->miscdev.name = PN544_DRIVER_NAME; + info->miscdev.fops = &pn544_fops; + info->miscdev.parent = &client->dev; + r = misc_register(&info->miscdev); + if (r < 0) { + dev_err(&client->dev, "Device registration failed\n"); + goto err_sysfs; + } + + dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n", + __func__, info, pdata, client); + + return 0; + +err_sysfs: + if (pdata->test) + device_remove_file(&client->dev, &pn544_attr); +err_irq: + free_irq(client->irq, info); +err_res: + if (pdata->free_resources) + pdata->free_resources(); +err_reg: + regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs); +err_kmalloc: + kfree(info->buf); +err_buf_alloc: + kfree(info); +err_info_alloc: + return r; +} + +static __devexit int pn544_remove(struct i2c_client *client) +{ + struct pn544_info *info = i2c_get_clientdata(client); + struct pn544_nfc_platform_data *pdata = client->dev.platform_data; + + dev_dbg(&client->dev, "%s\n", __func__); + + misc_deregister(&info->miscdev); + if (pdata->test) + device_remove_file(&client->dev, &pn544_attr); + + if (info->state != PN544_ST_COLD) { + if (pdata->disable) + pdata->disable(); + + info->read_irq = PN544_NONE; + } + + free_irq(client->irq, info); + if (pdata->free_resources) + pdata->free_resources(); + + regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs); + kfree(info->buf); + kfree(info); + + return 0; +} + +static struct i2c_driver pn544_driver = { + .driver = { + .name = PN544_DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &pn544_pm_ops, +#endif + }, + .probe = pn544_probe, + .id_table = pn544_id_table, + .remove = __devexit_p(pn544_remove), +}; + +static int __init pn544_init(void) +{ + int r; + + pr_debug(DRIVER_DESC ": %s\n", __func__); + + r = i2c_add_driver(&pn544_driver); + if (r) { + pr_err(PN544_DRIVER_NAME ": driver registration failed\n"); + return r; + } + + return 0; +} + +static void __exit pn544_exit(void) +{ + i2c_del_driver(&pn544_driver); + pr_info(DRIVER_DESC ", Exiting.\n"); +} + +module_init(pn544_init); +module_exit(pn544_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 7c24dcef2989..44b0aeee83e5 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -168,8 +168,9 @@ static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) u32 mask_bits = desc->masked; unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; - mask_bits &= ~1; - mask_bits |= flag; + mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; + if (flag) + mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; writel(mask_bits, desc->mask_base + offset); return mask_bits; diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index feff3bee6fe5..65c42f80f23e 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h @@ -6,12 +6,6 @@ #ifndef MSI_H #define MSI_H -#define PCI_MSIX_ENTRY_SIZE 16 -#define PCI_MSIX_ENTRY_LOWER_ADDR 0 -#define PCI_MSIX_ENTRY_UPPER_ADDR 4 -#define PCI_MSIX_ENTRY_DATA 8 -#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 - #define msi_control_reg(base) (base + PCI_MSI_FLAGS) #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) #define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 24e19c594e57..6fe0772e0e7d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -46,9 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) struct pci_dev *pci_dev = context; if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { + pci_wakeup_event(pci_dev); pci_check_pme_status(pci_dev); pm_runtime_resume(&pci_dev->dev); - pci_wakeup_event(pci_dev); if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); } @@ -399,6 +399,7 @@ static int __init acpi_pci_init(void) if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); + pcie_clear_aspm(); pcie_no_aspm(); } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 8a6f797de8e5..88246dd46452 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -338,7 +338,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, } /** - * __pci_device_probe() + * __pci_device_probe - check if a driver wants to claim a specific PCI device * @drv: driver to call to check if it wants the PCI device * @pci_dev: PCI device being probed * @@ -449,7 +449,8 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) return error; } - return pci_restore_state(pci_dev); + pci_restore_state(pci_dev); + return 0; } static void pci_pm_default_resume_early(struct pci_dev *pci_dev) diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index f7b68ca6cc98..775e933c2225 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -47,6 +47,10 @@ static int __init pci_stub_init(void) if (rc) return rc; + /* no ids passed actually */ + if (ids[0] == '\0') + return 0; + /* add ids specified in the module parameter */ p = ids; while ((id = strsep(&p, ","))) { @@ -54,6 +58,9 @@ static int __init pci_stub_init(void) subdevice = PCI_ANY_ID, class=0, class_mask=0; int fields; + if (!strlen(id)) + continue; + fields = sscanf(id, "%x:%x:%x:%x:%x:%x", &vendor, &device, &subvendor, &subdevice, &class, &class_mask); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 63d5042f2079..8ecaac983923 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1149,7 +1149,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) sysfs_bin_attr_init(attr); attr->size = rom_size; attr->attr.name = "rom"; - attr->attr.mode = S_IRUSR; + attr->attr.mode = S_IRUSR | S_IWUSR; attr->read = pci_read_rom; attr->write = pci_write_rom; retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 710c8a29be0d..b714d787bddd 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -937,14 +937,13 @@ pci_save_state(struct pci_dev *dev) * pci_restore_state - Restore the saved state of a PCI device * @dev: - PCI device that we're dealing with */ -int -pci_restore_state(struct pci_dev *dev) +void pci_restore_state(struct pci_dev *dev) { int i; u32 val; if (!dev->state_saved) - return 0; + return; /* PCI Express register must be restored first */ pci_restore_pcie_state(dev); @@ -968,8 +967,6 @@ pci_restore_state(struct pci_dev *dev) pci_restore_iov_state(dev); dev->state_saved = false; - - return 0; } static int do_pci_enable_device(struct pci_dev *dev, int bars) @@ -1300,22 +1297,6 @@ bool pci_check_pme_status(struct pci_dev *dev) return ret; } -/* - * Time to wait before the system can be put into a sleep state after reporting - * a wakeup event signaled by a PCI device. - */ -#define PCI_WAKEUP_COOLDOWN 100 - -/** - * pci_wakeup_event - Report a wakeup event related to a given PCI device. - * @dev: Device to report the wakeup event for. - */ -void pci_wakeup_event(struct pci_dev *dev) -{ - if (device_may_wakeup(&dev->dev)) - pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN); -} - /** * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. * @dev: Device to handle. @@ -1327,8 +1308,8 @@ void pci_wakeup_event(struct pci_dev *dev) static int pci_pme_wakeup(struct pci_dev *dev, void *ign) { if (pci_check_pme_status(dev)) { - pm_request_resume(&dev->dev); pci_wakeup_event(dev); + pm_request_resume(&dev->dev); } return 0; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 7d33f6673868..f69d6e0fda75 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -74,6 +74,12 @@ extern void pci_pm_init(struct pci_dev *dev); extern void platform_pci_wakeup_init(struct pci_dev *dev); extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); +static inline void pci_wakeup_event(struct pci_dev *dev) +{ + /* Wait 100 ms before the system can be put into a sleep state. */ + pm_wakeup_event(&dev->dev, 100); +} + static inline bool pci_is_bridge(struct pci_dev *pci_dev) { return !!(pci_dev->subordinate); @@ -140,14 +146,6 @@ static inline void pci_no_msi(void) { } static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } #endif -#ifdef CONFIG_PCIEAER -void pci_no_aer(void); -bool pci_aer_available(void); -#else -static inline void pci_no_aer(void) { } -static inline bool pci_aer_available(void) { return false; } -#endif - static inline int pci_no_d1d2(struct pci_dev *dev) { unsigned int parent_dstates = 0; diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 2b2b6508efde..58ad7917553c 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 9656e3060412..80c11d131499 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h @@ -132,7 +132,6 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) #ifdef CONFIG_ACPI_APEI extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); -extern bool aer_acpi_firmware_first(void); #else static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) { @@ -140,8 +139,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) return pci_dev->__aer_firmware_first; return 0; } - -static inline bool aer_acpi_firmware_first(void) { return false; } #endif static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 71222814c1ec..3188cd96b338 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -68,7 +68,7 @@ struct pcie_link_state { struct aspm_latency acceptable[8]; }; -static int aspm_disabled, aspm_force; +static int aspm_disabled, aspm_force, aspm_clear_state; static DEFINE_MUTEX(aspm_lock); static LIST_HEAD(link_list); @@ -139,7 +139,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable) { /* Don't enable Clock PM if the link is not Clock PM capable */ if (!link->clkpm_capable && enable) - return; + enable = 0; /* Need nothing if the specified equals to current state */ if (link->clkpm_enabled == enable) return; @@ -498,6 +498,10 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) struct pci_dev *child; int pos; u32 reg32; + + if (aspm_clear_state) + return -EINVAL; + /* * Some functions in a slot might not all be PCIe functions, * very strange. Disable ASPM for the whole slot @@ -563,12 +567,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) struct pcie_link_state *link; int blacklist = !!pcie_aspm_sanity_check(pdev); - if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state) + if (!pci_is_pcie(pdev) || pdev->link_state) return; if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) return; + if (aspm_disabled && !aspm_clear_state) + return; + /* VIA has a strange chipset, root port is under a bridge */ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && pdev->bus->self) @@ -641,7 +648,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link, *root, *parent_link; - if (aspm_disabled || !pci_is_pcie(pdev) || + if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) || !parent || !parent->link_state) return; if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && @@ -899,6 +906,12 @@ static int __init pcie_aspm_disable(char *str) __setup("pcie_aspm=", pcie_aspm_disable); +void pcie_clear_aspm(void) +{ + if (!aspm_force) + aspm_clear_state = 1; +} + void pcie_no_aspm(void) { if (!aspm_force) diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 2f3c90407227..0057344a3fcb 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -26,9 +26,6 @@ #include "../pci.h" #include "portdrv.h" -#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ -#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ - /* * If this switch is set, MSI will not be used for PCIe PME signaling. This * causes the PCIe port driver to use INTx interrupts only, but it turns out @@ -73,22 +70,6 @@ void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) pci_write_config_word(dev, rtctl_pos, rtctl); } -/** - * pcie_pme_clear_status - Clear root port PME interrupt status. - * @dev: PCIe root port or event collector. - */ -static void pcie_pme_clear_status(struct pci_dev *dev) -{ - int rtsta_pos; - u32 rtsta; - - rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; - - pci_read_config_dword(dev, rtsta_pos, &rtsta); - rtsta |= PCI_EXP_RTSTA_PME; - pci_write_config_dword(dev, rtsta_pos, rtsta); -} - /** * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#. * @bus: PCI bus to scan. @@ -103,8 +84,8 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip PCIe devices in case we started from a root port. */ if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { - pm_request_resume(&dev->dev); pci_wakeup_event(dev); + pm_request_resume(&dev->dev); ret = true; } @@ -206,8 +187,8 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) /* The device is there, but we have to check its PME status. */ found = pci_check_pme_status(dev); if (found) { - pm_request_resume(&dev->dev); pci_wakeup_event(dev); + pm_request_resume(&dev->dev); } pci_dev_put(dev); } else if (devfn) { @@ -253,7 +234,7 @@ static void pcie_pme_work_fn(struct work_struct *work) * Clear PME status of the port. If there are other * pending PMEs, the status will be set again. */ - pcie_pme_clear_status(port); + pcie_clear_root_pme_status(port); spin_unlock_irq(&data->lock); pcie_pme_handle_request(port, rtsta & 0xffff); @@ -378,7 +359,7 @@ static int pcie_pme_probe(struct pcie_device *srv) port = srv->port; pcie_pme_interrupt_enable(port, false); - pcie_pme_clear_status(port); + pcie_clear_root_pme_status(port); ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); if (ret) { @@ -402,7 +383,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) spin_lock_irq(&data->lock); pcie_pme_interrupt_enable(port, false); - pcie_pme_clear_status(port); + pcie_clear_root_pme_status(port); data->noirq = true; spin_unlock_irq(&data->lock); @@ -422,7 +403,7 @@ static int pcie_pme_resume(struct pcie_device *srv) spin_lock_irq(&data->lock); data->noirq = false; - pcie_pme_clear_status(port); + pcie_clear_root_pme_status(port); pcie_pme_interrupt_enable(port, true); spin_unlock_irq(&data->lock); diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 7b5aba0a3291..bd00a01aef14 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -20,9 +20,6 @@ #define get_descriptor_id(type, service) (((type - 4) << 4) | service) -extern bool pcie_ports_disabled; -extern bool pcie_ports_auto; - extern struct bus_type pcie_port_bus_type; extern int pcie_port_device_register(struct pci_dev *dev); #ifdef CONFIG_PM @@ -35,6 +32,8 @@ extern void pcie_port_bus_unregister(void); struct pci_dev; +extern void pcie_clear_root_pme_status(struct pci_dev *dev); + #ifdef CONFIG_PCIE_PME extern bool pcie_pme_msi_disabled; diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c index 5982b6a63b89..a86b56e5f2f2 100644 --- a/drivers/pci/pcie/portdrv_acpi.c +++ b/drivers/pci/pcie/portdrv_acpi.c @@ -33,7 +33,7 @@ */ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) { - acpi_status status; + struct acpi_pci_root *root; acpi_handle handle; u32 flags; @@ -44,26 +44,11 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) if (!handle) return -EINVAL; - flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL - | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL - | OSC_PCI_EXPRESS_PME_CONTROL; - - if (pci_aer_available()) { - if (aer_acpi_firmware_first()) - dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n"); - else - flags |= OSC_PCI_EXPRESS_AER_CONTROL; - } - - status = acpi_pci_osc_control_set(handle, &flags, - OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); - if (ACPI_FAILURE(status)) { - dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n", - status); + root = acpi_pci_find_root(handle); + if (!root) return -ENODEV; - } - dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags); + flags = root->osc_control_set; *srv_mask = PCIE_PORT_SERVICE_VC; if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index a9c222d79ebc..5130d0d22390 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -241,17 +241,17 @@ static int get_port_device_capability(struct pci_dev *dev) int cap_mask; int err; + if (pcie_ports_disabled) + return 0; + err = pcie_port_platform_notify(dev, &cap_mask); - if (pcie_ports_auto) { - if (err) { - pcie_no_aspm(); - return 0; - } - } else { + if (!pcie_ports_auto) { cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | PCIE_PORT_SERVICE_VC; if (pci_aer_available()) cap_mask |= PCIE_PORT_SERVICE_AER; + } else if (err) { + return 0; } pos = pci_pcie_cap(dev); @@ -349,15 +349,18 @@ int pcie_port_device_register(struct pci_dev *dev) int status, capabilities, i, nr_service; int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; - /* Get and check PCI Express port services */ - capabilities = get_port_device_capability(dev); - if (!capabilities) - return -ENODEV; - /* Enable PCI Express port device */ status = pci_enable_device(dev); if (status) return status; + + /* Get and check PCI Express port services */ + capabilities = get_port_device_capability(dev); + if (!capabilities) { + pcie_no_aspm(); + return 0; + } + pci_set_master(dev); /* * Initialize service irqs. Don't use service devices that diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index f9033e190fb6..e0610bda1dea 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -57,6 +57,22 @@ __setup("pcie_ports=", pcie_port_setup); /* global data */ +/** + * pcie_clear_root_pme_status - Clear root port PME interrupt status. + * @dev: PCIe root port or event collector. + */ +void pcie_clear_root_pme_status(struct pci_dev *dev) +{ + int rtsta_pos; + u32 rtsta; + + rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; + + pci_read_config_dword(dev, rtsta_pos, &rtsta); + rtsta |= PCI_EXP_RTSTA_PME; + pci_write_config_dword(dev, rtsta_pos, rtsta); +} + static int pcie_portdrv_restore_config(struct pci_dev *dev) { int retval; @@ -69,6 +85,20 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) } #ifdef CONFIG_PM +static int pcie_port_resume_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + /* + * Some BIOSes forget to clear Root PME Status bits after system wakeup + * which breaks ACPI-based runtime wakeup on PCI Express, so clear those + * bits now just in case (shouldn't hurt). + */ + if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) + pcie_clear_root_pme_status(pdev); + return 0; +} + static const struct dev_pm_ops pcie_portdrv_pm_ops = { .suspend = pcie_port_device_suspend, .resume = pcie_port_device_resume, @@ -76,6 +106,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { .thaw = pcie_port_device_resume, .poweroff = pcie_port_device_suspend, .restore = pcie_port_device_resume, + .resume_noirq = pcie_port_resume_noirq, }; #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) @@ -327,10 +358,8 @@ static int __init pcie_portdrv_init(void) { int retval; - if (pcie_ports_disabled) { - pcie_no_aspm(); - return -EACCES; - } + if (pcie_ports_disabled) + return pci_register_driver(&pcie_portdriver); dmi_check_system(pcie_portdrv_dmi_table); diff --git a/drivers/pcmcia/m32r_cfc.h b/drivers/pcmcia/m32r_cfc.h index 8146e3bee2e8..f558e1adf954 100644 --- a/drivers/pcmcia/m32r_cfc.h +++ b/drivers/pcmcia/m32r_cfc.h @@ -9,7 +9,7 @@ #endif /* - * M32R PC Card Controler + * M32R PC Card Controller */ #define M32R_PCC0_BASE 0x00ef7000 #define M32R_PCC1_BASE 0x00ef7020 diff --git a/drivers/pcmcia/m32r_pcc.h b/drivers/pcmcia/m32r_pcc.h index e4fffe417ba9..f95c58563bc8 100644 --- a/drivers/pcmcia/m32r_pcc.h +++ b/drivers/pcmcia/m32r_pcc.h @@ -5,7 +5,7 @@ #define M32R_MAX_PCC 2 /* - * M32R PC Card Controler + * M32R PC Card Controller */ #define M32R_PCC0_BASE 0x00ef7000 #define M32R_PCC1_BASE 0x00ef7020 diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 99d4f23cb435..0db482771fb5 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -1198,7 +1198,7 @@ static int __init m8xx_probe(struct platform_device *ofdev, out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); - /* intialize the fixed memory windows */ + /* initialize the fixed memory windows */ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index ee40d681edd0..c5c4b8c32eb8 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -1021,7 +1021,7 @@ static int update_bl_status(struct backlight_device *bd) return 0; } -static struct backlight_ops acer_bl_ops = { +static const struct backlight_ops acer_bl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index d235f44fd7a3..f3aa6a7fdab6 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -640,7 +640,7 @@ static int update_bl_status(struct backlight_device *bd) return asus_lcd_set(asus, value); } -static struct backlight_ops asusbl_ops = { +static const struct backlight_ops asusbl_ops = { .get_brightness = asus_read_brightness, .update_status = update_bl_status, }; diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index ca05aefd03bf..4633fd8532cc 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1467,7 +1467,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type) return 0; } -static struct backlight_ops asus_backlight_data = { +static const struct backlight_ops asus_backlight_data = { .get_brightness = read_brightness, .update_status = set_brightness_status, }; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index cf8a89a0d8f5..34657f96b5a5 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -546,7 +546,7 @@ out: return buffer->output[1]; } -static struct backlight_ops dell_ops = { +static const struct backlight_ops dell_ops = { .get_brightness = dell_get_intensity, .update_status = dell_send_intensity, }; diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index e9fc530e7dc2..49d9ad708f89 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -1126,7 +1126,7 @@ static int update_bl_status(struct backlight_device *bd) return set_brightness(bd, bd->props.brightness); } -static struct backlight_ops eeepcbl_ops = { +static const struct backlight_ops eeepcbl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index ad88b2ec34a1..95e3b0948e9c 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -437,7 +437,7 @@ static int bl_update_status(struct backlight_device *b) return ret; } -static struct backlight_ops fujitsubl_ops = { +static const struct backlight_ops fujitsubl_ops = { .get_brightness = bl_get_brightness, .update_status = bl_update_status, }; @@ -689,7 +689,7 @@ static int acpi_fujitsu_add(struct acpi_device *device) if (error) goto err_free_input_dev; - result = acpi_bus_get_power(fujitsu->acpi_handle, &state); + result = acpi_bus_update_power(fujitsu->acpi_handle, &state); if (result) { printk(KERN_ERR "Error reading power state\n"); goto err_unregister_input_dev; @@ -857,7 +857,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) if (error) goto err_free_input_dev; - result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state); + result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state); if (result) { printk(KERN_ERR "Error reading power state\n"); goto err_unregister_input_dev; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index b4a95bb2f232..5e83370b0812 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -858,7 +858,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd) } static struct backlight_device *sony_backlight_device; -static struct backlight_ops sony_backlight_ops = { +static const struct backlight_ops sony_backlight_ops = { .update_status = sony_backlight_update_status, .get_brightness = sony_backlight_get_brightness, }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a974ca383cb9..dd599585c6a9 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -6110,7 +6110,7 @@ static void tpacpi_brightness_notify_change(void) BACKLIGHT_UPDATE_HOTKEY); } -static struct backlight_ops ibm_backlight_data = { +static const struct backlight_ops ibm_backlight_data = { .get_brightness = brightness_get, .update_status = brightness_update_status, }; @@ -7194,7 +7194,7 @@ static struct ibm_struct volume_driver_data = { * TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41) * * FIRMWARE BUG: on some models, EC 0x2f might not be initialized at - * boot. Apparently the EC does not intialize it, so unless ACPI DSDT + * boot. Apparently the EC does not initialize it, so unless ACPI DSDT * does so, its initial value is meaningless (0x07). * * For firmware bugs, refer to: diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 4276da7291b8..209cced786c6 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -841,7 +841,7 @@ static void remove_toshiba_proc_entries(void) remove_proc_entry("version", toshiba_proc_dir); } -static struct backlight_ops toshiba_backlight_data = { +static const struct backlight_ops toshiba_backlight_data = { .get_brightness = get_lcd, .update_status = set_lcd_status, }; diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile index 8de3775ec242..bfba893cb321 100644 --- a/drivers/pnp/Makefile +++ b/drivers/pnp/Makefile @@ -2,11 +2,13 @@ # Makefile for the Linux Plug-and-Play Support. # -obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o +obj-y := pnp.o + +pnp-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o obj-$(CONFIG_PNPACPI) += pnpacpi/ obj-$(CONFIG_PNPBIOS) += pnpbios/ obj-$(CONFIG_ISAPNP) += isapnp/ # pnp_system_init goes after pnpacpi/pnpbios init -obj-y += system.o +pnp-y += system.o diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index 0f34d962fd3c..cb6ce42f8e77 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -220,10 +220,5 @@ subsys_initcall(pnp_init); int pnp_debug; #if defined(CONFIG_PNP_DEBUG_MESSAGES) -static int __init pnp_debug_setup(char *__unused) -{ - pnp_debug = 1; - return 1; -} -__setup("pnp.debug", pnp_debug_setup); +module_param_named(debug, pnp_debug, int, 0644); #endif diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index d1dbb9df53fa..00e94032531a 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -189,8 +189,11 @@ static int pnp_bus_resume(struct device *dev) if (!pnp_drv) return 0; - if (pnp_dev->protocol->resume) - pnp_dev->protocol->resume(pnp_dev); + if (pnp_dev->protocol->resume) { + error = pnp_dev->protocol->resume(pnp_dev); + if (error) + return error; + } if (pnp_can_write(pnp_dev)) { error = pnp_start_dev(pnp_dev); diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile index cac18bbfb817..6e607aa33aa3 100644 --- a/drivers/pnp/isapnp/Makefile +++ b/drivers/pnp/isapnp/Makefile @@ -1,7 +1,7 @@ # # Makefile for the kernel ISAPNP driver. # +obj-y += pnp.o +pnp-y := core.o compat.o -isapnp-proc-$(CONFIG_PROC_FS) = proc.o - -obj-y := core.o compat.o $(isapnp-proc-y) +pnp-$(CONFIG_PROC_FS) += proc.o diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile index 905326fcca85..40c93da18252 100644 --- a/drivers/pnp/pnpacpi/Makefile +++ b/drivers/pnp/pnpacpi/Makefile @@ -1,5 +1,6 @@ # # Makefile for the kernel PNPACPI driver. # +obj-y += pnp.o -obj-y := core.o rsparser.o +pnp-y := core.o rsparser.o diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 57313f4658bc..ca84d5099ce7 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -81,12 +81,19 @@ static int pnpacpi_get_resources(struct pnp_dev *dev) static int pnpacpi_set_resources(struct pnp_dev *dev) { - struct acpi_device *acpi_dev = dev->data; - acpi_handle handle = acpi_dev->handle; + struct acpi_device *acpi_dev; + acpi_handle handle; struct acpi_buffer buffer; int ret; pnp_dbg(&dev->dev, "set resources\n"); + + handle = DEVICE_ACPI_HANDLE(&dev->dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); + return -ENODEV; + } + ret = pnpacpi_build_resource_template(dev, &buffer); if (ret) return ret; @@ -105,12 +112,18 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) static int pnpacpi_disable_resources(struct pnp_dev *dev) { - struct acpi_device *acpi_dev = dev->data; - acpi_handle handle = acpi_dev->handle; + struct acpi_device *acpi_dev; + acpi_handle handle; int ret; dev_dbg(&dev->dev, "disable resources\n"); + handle = DEVICE_ACPI_HANDLE(&dev->dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); + return 0; + } + /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ ret = 0; if (acpi_bus_power_manageable(handle)) @@ -124,46 +137,74 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) #ifdef CONFIG_ACPI_SLEEP static bool pnpacpi_can_wakeup(struct pnp_dev *dev) { - struct acpi_device *acpi_dev = dev->data; - acpi_handle handle = acpi_dev->handle; + struct acpi_device *acpi_dev; + acpi_handle handle; + + handle = DEVICE_ACPI_HANDLE(&dev->dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); + return false; + } return acpi_bus_can_wakeup(handle); } static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) { - struct acpi_device *acpi_dev = dev->data; - acpi_handle handle = acpi_dev->handle; - int power_state; + struct acpi_device *acpi_dev; + acpi_handle handle; + int error = 0; + + handle = DEVICE_ACPI_HANDLE(&dev->dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); + return 0; + } if (device_can_wakeup(&dev->dev)) { - int rc = acpi_pm_device_sleep_wake(&dev->dev, + error = acpi_pm_device_sleep_wake(&dev->dev, device_may_wakeup(&dev->dev)); + if (error) + return error; + } + + if (acpi_bus_power_manageable(handle)) { + int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); + + if (power_state < 0) + power_state = (state.event == PM_EVENT_ON) ? + ACPI_STATE_D0 : ACPI_STATE_D3; - if (rc) - return rc; + /* + * acpi_bus_set_power() often fails (keyboard port can't be + * powered-down?), and in any case, our return value is ignored + * by pnp_bus_suspend(). Hence we don't revert the wakeup + * setting if the set_power fails. + */ + error = acpi_bus_set_power(handle, power_state); } - power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); - if (power_state < 0) - power_state = (state.event == PM_EVENT_ON) ? - ACPI_STATE_D0 : ACPI_STATE_D3; - - /* acpi_bus_set_power() often fails (keyboard port can't be - * powered-down?), and in any case, our return value is ignored - * by pnp_bus_suspend(). Hence we don't revert the wakeup - * setting if the set_power fails. - */ - return acpi_bus_set_power(handle, power_state); + + return error; } static int pnpacpi_resume(struct pnp_dev *dev) { - struct acpi_device *acpi_dev = dev->data; - acpi_handle handle = acpi_dev->handle; + struct acpi_device *acpi_dev; + acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + int error = 0; + + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); + return -ENODEV; + } if (device_may_wakeup(&dev->dev)) acpi_pm_device_sleep_wake(&dev->dev, false); - return acpi_bus_set_power(handle, ACPI_STATE_D0); + + if (acpi_bus_power_manageable(handle)) + error = acpi_bus_set_power(handle, ACPI_STATE_D0); + + return error; } #endif diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile index 3cd3ed760605..240b0ffb83ca 100644 --- a/drivers/pnp/pnpbios/Makefile +++ b/drivers/pnp/pnpbios/Makefile @@ -1,7 +1,8 @@ # # Makefile for the kernel PNPBIOS driver. # +obj-y := pnp.o -pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o +pnp-y := core.o bioscalls.o rsparser.o -obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y) +pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 60d83d983a36..61bf5d724139 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -136,6 +136,16 @@ config BATTERY_MAX17040 in handheld and portable equipment. The MAX17040 is configured to operate with a single lithium cell +config BATTERY_MAX17042 + tristate "Maxim MAX17042/8997/8966 Fuel Gauge" + depends on I2C + help + MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries + in handheld and portable equipment. The MAX17042 is configured + to operate with a single lithium cell. MAX8997 and MAX8966 are + multi-function devices that include fuel gauages that are compatible + with MAX17042. + config BATTERY_Z2 tristate "Z2 battery driver" depends on I2C && MACH_ZIPIT2 @@ -185,4 +195,14 @@ config CHARGER_TWL4030 help Say Y here to enable support for TWL4030 Battery Charge Interface. +config CHARGER_GPIO + tristate "GPIO charger" + depends on GPIOLIB + help + Say Y to include support for chargers which report their online status + through a GPIO pin. + + This driver can be build as a module. If so, the module will be + called gpio-charger. + endif # POWER_SUPPLY diff --git a/drivers/power/Makefile b/drivers/power/Makefile index c75772eb157c..8385bfae8728 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o +obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o obj-$(CONFIG_BATTERY_Z2) += z2_battery.o obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o @@ -32,3 +33,4 @@ obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o +obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c index 039f41ae217d..548d263b1ad0 100644 --- a/drivers/power/collie_battery.c +++ b/drivers/power/collie_battery.c @@ -295,7 +295,7 @@ static struct { static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state) { /* flush all pending status updates */ - flush_scheduled_work(); + flush_work_sync(&bat_work); return 0; } @@ -362,7 +362,7 @@ err_psy_reg_bu: err_psy_reg_main: /* see comment in collie_bat_remove */ - flush_scheduled_work(); + cancel_work_sync(&bat_work); i--; err_gpio: @@ -382,12 +382,11 @@ static void __devexit collie_bat_remove(struct ucb1x00_dev *dev) power_supply_unregister(&collie_bat_main.psy); /* - * now flush all pending work. - * we won't get any more schedules, since all - * sources (isr and external_power_changed) - * are unregistered now. + * Now cancel the bat_work. We won't get any more schedules, + * since all sources (isr and external_power_changed) are + * unregistered now. */ - flush_scheduled_work(); + cancel_work_sync(&bat_work); for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--) gpio_free(gpios[i].gpio); diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index e7f89785beef..e534290f3256 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -212,7 +212,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) if (di->rem_capacity > 100) di->rem_capacity = 100; - if (di->current_uA >= 100L) + if (di->current_uA < -100L) di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L) / (di->current_uA / 100L); else diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c new file mode 100644 index 000000000000..25b88ac1d44c --- /dev/null +++ b/drivers/power/gpio-charger.c @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen + * Driver for chargers which report their online status through a GPIO pin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct gpio_charger { + const struct gpio_charger_platform_data *pdata; + unsigned int irq; + + struct power_supply charger; +}; + +static irqreturn_t gpio_charger_irq(int irq, void *devid) +{ + struct power_supply *charger = devid; + + power_supply_changed(charger); + + return IRQ_HANDLED; +} + +static inline struct gpio_charger *psy_to_gpio_charger(struct power_supply *psy) +{ + return container_of(psy, struct gpio_charger, charger); +} + +static int gpio_charger_get_property(struct power_supply *psy, + enum power_supply_property psp, union power_supply_propval *val) +{ + struct gpio_charger *gpio_charger = psy_to_gpio_charger(psy); + const struct gpio_charger_platform_data *pdata = gpio_charger->pdata; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + val->intval = gpio_get_value(pdata->gpio); + val->intval ^= pdata->gpio_active_low; + break; + default: + return -EINVAL; + } + + return 0; +} + +static enum power_supply_property gpio_charger_properties[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static int __devinit gpio_charger_probe(struct platform_device *pdev) +{ + const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data; + struct gpio_charger *gpio_charger; + struct power_supply *charger; + int ret; + int irq; + + if (!pdata) { + dev_err(&pdev->dev, "No platform data\n"); + return -EINVAL; + } + + if (!gpio_is_valid(pdata->gpio)) { + dev_err(&pdev->dev, "Invalid gpio pin\n"); + return -EINVAL; + } + + gpio_charger = kzalloc(sizeof(*gpio_charger), GFP_KERNEL); + if (!gpio_charger) { + dev_err(&pdev->dev, "Failed to alloc driver structure\n"); + return -ENOMEM; + } + + charger = &gpio_charger->charger; + + charger->name = pdata->name ? pdata->name : "gpio-charger"; + charger->type = pdata->type; + charger->properties = gpio_charger_properties; + charger->num_properties = ARRAY_SIZE(gpio_charger_properties); + charger->get_property = gpio_charger_get_property; + charger->supplied_to = pdata->supplied_to; + charger->num_supplicants = pdata->num_supplicants; + + ret = gpio_request(pdata->gpio, dev_name(&pdev->dev)); + if (ret) { + dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret); + goto err_free; + } + ret = gpio_direction_input(pdata->gpio); + if (ret) { + dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret); + goto err_gpio_free; + } + + gpio_charger->pdata = pdata; + + ret = power_supply_register(&pdev->dev, charger); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register power supply: %d\n", + ret); + goto err_gpio_free; + } + + irq = gpio_to_irq(pdata->gpio); + if (irq > 0) { + ret = request_any_context_irq(irq, gpio_charger_irq, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + dev_name(&pdev->dev), charger); + if (ret) + dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret); + else + gpio_charger->irq = irq; + } + + platform_set_drvdata(pdev, gpio_charger); + + return 0; + +err_gpio_free: + gpio_free(pdata->gpio); +err_free: + kfree(gpio_charger); + return ret; +} + +static int __devexit gpio_charger_remove(struct platform_device *pdev) +{ + struct gpio_charger *gpio_charger = platform_get_drvdata(pdev); + + if (gpio_charger->irq) + free_irq(gpio_charger->irq, &gpio_charger->charger); + + power_supply_unregister(&gpio_charger->charger); + + gpio_free(gpio_charger->pdata->gpio); + + platform_set_drvdata(pdev, NULL); + kfree(gpio_charger); + + return 0; +} + +static struct platform_driver gpio_charger_driver = { + .probe = gpio_charger_probe, + .remove = __devexit_p(gpio_charger_remove), + .driver = { + .name = "gpio-charger", + .owner = THIS_MODULE, + }, +}; + +static int __init gpio_charger_init(void) +{ + return platform_driver_register(&gpio_charger_driver); +} +module_init(gpio_charger_init); + +static void __exit gpio_charger_exit(void) +{ + platform_driver_unregister(&gpio_charger_driver); +} +module_exit(gpio_charger_exit); + +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:gpio-charger"); diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c index 36cf402c0677..bce3a01da2f0 100644 --- a/drivers/power/intel_mid_battery.c +++ b/drivers/power/intel_mid_battery.c @@ -765,7 +765,7 @@ static int __devexit platform_pmic_battery_remove(struct platform_device *pdev) power_supply_unregister(&pbi->usb); power_supply_unregister(&pbi->batt); - flush_scheduled_work(); + cancel_work_sync(&pbi->handler); kfree(pbi); return 0; } diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c index 72512185f3e2..2ad9b14a5ce3 100644 --- a/drivers/power/isp1704_charger.c +++ b/drivers/power/isp1704_charger.c @@ -59,10 +59,60 @@ struct isp1704_charger { struct notifier_block nb; struct work_struct work; - char model[7]; + /* properties */ + char model[8]; unsigned present:1; + unsigned online:1; + unsigned current_max; + + /* temp storage variables */ + unsigned long event; + unsigned max_power; }; +/* + * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB + * chargers). + * + * REVISIT: The method is defined in Battery Charging Specification and is + * applicable to any ULPI transceiver. Nothing isp170x specific here. + */ +static inline int isp1704_charger_type(struct isp1704_charger *isp) +{ + u8 reg; + u8 func_ctrl; + u8 otg_ctrl; + int type = POWER_SUPPLY_TYPE_USB_DCP; + + func_ctrl = otg_io_read(isp->otg, ULPI_FUNC_CTRL); + otg_ctrl = otg_io_read(isp->otg, ULPI_OTG_CTRL); + + /* disable pulldowns */ + reg = ULPI_OTG_CTRL_DM_PULLDOWN | ULPI_OTG_CTRL_DP_PULLDOWN; + otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), reg); + + /* full speed */ + otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL), + ULPI_FUNC_CTRL_XCVRSEL_MASK); + otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), + ULPI_FUNC_CTRL_FULL_SPEED); + + /* Enable strong pull-up on DP (1.5K) and reset */ + reg = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET; + otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), reg); + usleep_range(1000, 2000); + + reg = otg_io_read(isp->otg, ULPI_DEBUG); + if ((reg & 3) != 3) + type = POWER_SUPPLY_TYPE_USB_CDP; + + /* recover original state */ + otg_io_write(isp->otg, ULPI_FUNC_CTRL, func_ctrl); + otg_io_write(isp->otg, ULPI_OTG_CTRL, otg_ctrl); + + return type; +} + /* * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger * is actually a dedicated charger, the following steps need to be taken. @@ -127,16 +177,19 @@ static inline int isp1704_charger_verify(struct isp1704_charger *isp) static inline int isp1704_charger_detect(struct isp1704_charger *isp) { unsigned long timeout; - u8 r; + u8 pwr_ctrl; int ret = 0; + pwr_ctrl = otg_io_read(isp->otg, ISP1704_PWR_CTRL); + /* set SW control bit in PWR_CTRL register */ otg_io_write(isp->otg, ISP1704_PWR_CTRL, ISP1704_PWR_CTRL_SWCTRL); /* enable manual charger detection */ - r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN); - otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r); + otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), + ISP1704_PWR_CTRL_SWCTRL + | ISP1704_PWR_CTRL_DPVSRC_EN); usleep_range(1000, 2000); timeout = jiffies + msecs_to_jiffies(300); @@ -147,7 +200,10 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp) ret = isp1704_charger_verify(isp); break; } - } while (!time_after(jiffies, timeout)); + } while (!time_after(jiffies, timeout) && isp->online); + + /* recover original state */ + otg_io_write(isp->otg, ISP1704_PWR_CTRL, pwr_ctrl); return ret; } @@ -155,52 +211,92 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp) static void isp1704_charger_work(struct work_struct *data) { int detect; + unsigned long event; + unsigned power; struct isp1704_charger *isp = container_of(data, struct isp1704_charger, work); + static DEFINE_MUTEX(lock); - /* - * FIXME Only supporting dedicated chargers even though isp1704 can - * detect HUB and HOST chargers. If the device has already been - * enumerated, the detection will break the connection. - */ - if (isp->otg->state != OTG_STATE_B_IDLE) - return; + event = isp->event; + power = isp->max_power; - /* disable data pullups */ - if (isp->otg->gadget) - usb_gadget_disconnect(isp->otg->gadget); + mutex_lock(&lock); + + switch (event) { + case USB_EVENT_VBUS: + isp->online = true; + + /* detect charger */ + detect = isp1704_charger_detect(isp); + + if (detect) { + isp->present = detect; + isp->psy.type = isp1704_charger_type(isp); + } - /* detect charger */ - detect = isp1704_charger_detect(isp); - if (detect) { - isp->present = detect; - power_supply_changed(&isp->psy); + switch (isp->psy.type) { + case POWER_SUPPLY_TYPE_USB_DCP: + isp->current_max = 1800; + break; + case POWER_SUPPLY_TYPE_USB_CDP: + /* + * Only 500mA here or high speed chirp + * handshaking may break + */ + isp->current_max = 500; + /* FALLTHROUGH */ + case POWER_SUPPLY_TYPE_USB: + default: + /* enable data pullups */ + if (isp->otg->gadget) + usb_gadget_connect(isp->otg->gadget); + } + break; + case USB_EVENT_NONE: + isp->online = false; + isp->current_max = 0; + isp->present = 0; + isp->current_max = 0; + isp->psy.type = POWER_SUPPLY_TYPE_USB; + + /* + * Disable data pullups. We need to prevent the controller from + * enumerating. + * + * FIXME: This is here to allow charger detection with Host/HUB + * chargers. The pullups may be enabled elsewhere, so this can + * not be the final solution. + */ + if (isp->otg->gadget) + usb_gadget_disconnect(isp->otg->gadget); + break; + case USB_EVENT_ENUMERATED: + if (isp->present) + isp->current_max = 1800; + else + isp->current_max = power; + break; + default: + goto out; } - /* enable data pullups */ - if (isp->otg->gadget) - usb_gadget_connect(isp->otg->gadget); + power_supply_changed(&isp->psy); +out: + mutex_unlock(&lock); } static int isp1704_notifier_call(struct notifier_block *nb, - unsigned long event, void *unused) + unsigned long event, void *power) { struct isp1704_charger *isp = container_of(nb, struct isp1704_charger, nb); - switch (event) { - case USB_EVENT_VBUS: - schedule_work(&isp->work); - break; - case USB_EVENT_NONE: - if (isp->present) { - isp->present = 0; - power_supply_changed(&isp->psy); - } - break; - default: - return NOTIFY_DONE; - } + isp->event = event; + + if (power) + isp->max_power = *((unsigned *)power); + + schedule_work(&isp->work); return NOTIFY_OK; } @@ -216,6 +312,12 @@ static int isp1704_charger_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_PRESENT: val->intval = isp->present; break; + case POWER_SUPPLY_PROP_ONLINE: + val->intval = isp->online; + break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + val->intval = isp->current_max; + break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = isp->model; break; @@ -230,6 +332,8 @@ static int isp1704_charger_get_property(struct power_supply *psy, static enum power_supply_property power_props[] = { POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, }; @@ -287,13 +391,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev) if (!isp->otg) goto fail0; + isp->dev = &pdev->dev; + platform_set_drvdata(pdev, isp); + ret = isp1704_test_ulpi(isp); if (ret < 0) goto fail1; - isp->dev = &pdev->dev; - platform_set_drvdata(pdev, isp); - isp->psy.name = "isp1704"; isp->psy.type = POWER_SUPPLY_TYPE_USB; isp->psy.properties = power_props; @@ -318,6 +422,23 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev) dev_info(isp->dev, "registered with product id %s\n", isp->model); + /* + * Taking over the D+ pullup. + * + * FIXME: The device will be disconnected if it was already + * enumerated. The charger driver should be always loaded before any + * gadget is loaded. + */ + if (isp->otg->gadget) + usb_gadget_disconnect(isp->otg->gadget); + + /* Detect charger if VBUS is valid (the cable was already plugged). */ + ret = otg_io_read(isp->otg, ULPI_USB_INT_STS); + if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) { + isp->event = USB_EVENT_VBUS; + schedule_work(&isp->work); + } + return 0; fail2: power_supply_unregister(&isp->psy); diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c index a8108a73593e..02414db6a94c 100644 --- a/drivers/power/jz4740-battery.c +++ b/drivers/power/jz4740-battery.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,8 @@ struct jz_battery { struct power_supply battery; struct delayed_work work; + + struct mutex lock; }; static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy) @@ -68,6 +71,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery) unsigned long val; long voltage; + mutex_lock(&battery->lock); + INIT_COMPLETION(battery->read_completion); enable_irq(battery->irq); @@ -91,6 +96,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery) battery->cell->disable(battery->pdev); disable_irq(battery->irq); + mutex_unlock(&battery->lock); + return voltage; } @@ -240,6 +247,11 @@ static int __devinit jz_battery_probe(struct platform_device *pdev) struct jz_battery *jz_battery; struct power_supply *battery; + if (!pdata) { + dev_err(&pdev->dev, "No platform_data supplied\n"); + return -ENXIO; + } + jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL); if (!jz_battery) { dev_err(&pdev->dev, "Failed to allocate driver structure\n"); @@ -291,6 +303,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev) jz_battery->pdev = pdev; init_completion(&jz_battery->read_completion); + mutex_init(&jz_battery->lock); INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work); diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c new file mode 100644 index 000000000000..c5c8805156cb --- /dev/null +++ b/drivers/power/max17042_battery.c @@ -0,0 +1,239 @@ +/* + * Fuel gauge driver for Maxim 17042 / 8966 / 8997 + * Note that Maxim 8966 and 8997 are mfd and this is its subdevice. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * This driver is based on max17040_battery.c + */ + +#include +#include +#include +#include +#include +#include + +enum max17042_register { + MAX17042_STATUS = 0x00, + MAX17042_VALRT_Th = 0x01, + MAX17042_TALRT_Th = 0x02, + MAX17042_SALRT_Th = 0x03, + MAX17042_AtRate = 0x04, + MAX17042_RepCap = 0x05, + MAX17042_RepSOC = 0x06, + MAX17042_Age = 0x07, + MAX17042_TEMP = 0x08, + MAX17042_VCELL = 0x09, + MAX17042_Current = 0x0A, + MAX17042_AvgCurrent = 0x0B, + MAX17042_Qresidual = 0x0C, + MAX17042_SOC = 0x0D, + MAX17042_AvSOC = 0x0E, + MAX17042_RemCap = 0x0F, + MAX17402_FullCAP = 0x10, + MAX17042_TTE = 0x11, + MAX17042_V_empty = 0x12, + + MAX17042_RSLOW = 0x14, + + MAX17042_AvgTA = 0x16, + MAX17042_Cycles = 0x17, + MAX17042_DesignCap = 0x18, + MAX17042_AvgVCELL = 0x19, + MAX17042_MinMaxTemp = 0x1A, + MAX17042_MinMaxVolt = 0x1B, + MAX17042_MinMaxCurr = 0x1C, + MAX17042_CONFIG = 0x1D, + MAX17042_ICHGTerm = 0x1E, + MAX17042_AvCap = 0x1F, + MAX17042_ManName = 0x20, + MAX17042_DevName = 0x21, + MAX17042_DevChem = 0x22, + + MAX17042_TempNom = 0x24, + MAX17042_TempCold = 0x25, + MAX17042_TempHot = 0x26, + MAX17042_AIN = 0x27, + MAX17042_LearnCFG = 0x28, + MAX17042_SHFTCFG = 0x29, + MAX17042_RelaxCFG = 0x2A, + MAX17042_MiscCFG = 0x2B, + MAX17042_TGAIN = 0x2C, + MAx17042_TOFF = 0x2D, + MAX17042_CGAIN = 0x2E, + MAX17042_COFF = 0x2F, + + MAX17042_Q_empty = 0x33, + MAX17042_T_empty = 0x34, + + MAX17042_RCOMP0 = 0x38, + MAX17042_TempCo = 0x39, + MAX17042_Rx = 0x3A, + MAX17042_T_empty0 = 0x3B, + MAX17042_TaskPeriod = 0x3C, + MAX17042_FSTAT = 0x3D, + + MAX17042_SHDNTIMER = 0x3F, + + MAX17042_VFRemCap = 0x4A, + + MAX17042_QH = 0x4D, + MAX17042_QL = 0x4E, +}; + +struct max17042_chip { + struct i2c_client *client; + struct power_supply battery; + struct max17042_platform_data *pdata; +}; + +static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value) +{ + int ret = i2c_smbus_write_word_data(client, reg, value); + + if (ret < 0) + dev_err(&client->dev, "%s: err %d\n", __func__, ret); + + return ret; +} + +static int max17042_read_reg(struct i2c_client *client, u8 reg) +{ + int ret = i2c_smbus_read_word_data(client, reg); + + if (ret < 0) + dev_err(&client->dev, "%s: err %d\n", __func__, ret); + + return ret; +} + +static enum power_supply_property max17042_battery_props[] = { + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_VOLTAGE_AVG, + POWER_SUPPLY_PROP_CAPACITY, +}; + +static int max17042_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct max17042_chip *chip = container_of(psy, + struct max17042_chip, battery); + + switch (psp) { + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = max17042_read_reg(chip->client, + MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */ + break; + case POWER_SUPPLY_PROP_VOLTAGE_AVG: + val->intval = max17042_read_reg(chip->client, + MAX17042_AvgVCELL) * 83; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = max17042_read_reg(chip->client, + MAX17042_SOC) / 256; + break; + default: + return -EINVAL; + } + return 0; +} + +static int __devinit max17042_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct max17042_chip *chip; + int ret; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) + return -EIO; + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->client = client; + chip->pdata = client->dev.platform_data; + + i2c_set_clientdata(client, chip); + + chip->battery.name = "max17042_battery"; + chip->battery.type = POWER_SUPPLY_TYPE_BATTERY; + chip->battery.get_property = max17042_get_property; + chip->battery.properties = max17042_battery_props; + chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props); + + ret = power_supply_register(&client->dev, &chip->battery); + if (ret) { + dev_err(&client->dev, "failed: power supply register\n"); + i2c_set_clientdata(client, NULL); + kfree(chip); + return ret; + } + + if (!chip->pdata->enable_current_sense) { + max17042_write_reg(client, MAX17042_CGAIN, 0x0000); + max17042_write_reg(client, MAX17042_MiscCFG, 0x0003); + max17042_write_reg(client, MAX17042_LearnCFG, 0x0007); + } + + return 0; +} + +static int __devexit max17042_remove(struct i2c_client *client) +{ + struct max17042_chip *chip = i2c_get_clientdata(client); + + power_supply_unregister(&chip->battery); + i2c_set_clientdata(client, NULL); + kfree(chip); + return 0; +} + +static const struct i2c_device_id max17042_id[] = { + { "max17042", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max17042_id); + +static struct i2c_driver max17042_i2c_driver = { + .driver = { + .name = "max17042", + }, + .probe = max17042_probe, + .remove = __devexit_p(max17042_remove), + .id_table = max17042_id, +}; + +static int __init max17042_init(void) +{ + return i2c_add_driver(&max17042_i2c_driver); +} +module_init(max17042_init); + +static void __exit max17042_exit(void) +{ + i2c_del_driver(&max17042_i2c_driver); +} +module_exit(max17042_exit); + +MODULE_AUTHOR("MyungJoo Ham "); +MODULE_DESCRIPTION("MAX17042 Fuel Gauge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 5bc1dcf7785e..0b0ff3a936a6 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -201,6 +201,72 @@ static int olpc_bat_get_tech(union power_supply_propval *val) return ret; } +static int olpc_bat_get_charge_full_design(union power_supply_propval *val) +{ + uint8_t ec_byte; + union power_supply_propval tech; + int ret, mfr; + + ret = olpc_bat_get_tech(&tech); + if (ret) + return ret; + + ec_byte = BAT_ADDR_MFR_TYPE; + ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1); + if (ret) + return ret; + + mfr = ec_byte >> 4; + + switch (tech.intval) { + case POWER_SUPPLY_TECHNOLOGY_NiMH: + switch (mfr) { + case 1: /* Gold Peak */ + val->intval = 3000000*.8; + break; + default: + return -EIO; + } + break; + + case POWER_SUPPLY_TECHNOLOGY_LiFe: + switch (mfr) { + case 1: /* Gold Peak */ + val->intval = 2800000; + break; + case 2: /* BYD */ + val->intval = 3100000; + break; + default: + return -EIO; + } + break; + + default: + return -EIO; + } + + return ret; +} + +static int olpc_bat_get_charge_now(union power_supply_propval *val) +{ + uint8_t soc; + union power_supply_propval full; + int ret; + + ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &soc, 1); + if (ret) + return ret; + + ret = olpc_bat_get_charge_full_design(&full); + if (ret) + return ret; + + val->intval = soc * (full.intval / 100); + return 0; +} + /********************************************************************* * Battery properties *********************************************************************/ @@ -267,6 +333,7 @@ static int olpc_bat_get_property(struct power_supply *psy, return ret; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: + case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2); if (ret) return ret; @@ -274,6 +341,7 @@ static int olpc_bat_get_property(struct power_supply *psy, val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32; break; case POWER_SUPPLY_PROP_CURRENT_AVG: + case POWER_SUPPLY_PROP_CURRENT_NOW: ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2); if (ret) return ret; @@ -294,6 +362,16 @@ static int olpc_bat_get_property(struct power_supply *psy, else val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + ret = olpc_bat_get_charge_full_design(val); + if (ret) + return ret; + break; + case POWER_SUPPLY_PROP_CHARGE_NOW: + ret = olpc_bat_get_charge_now(val); + if (ret) + return ret; + break; case POWER_SUPPLY_PROP_TEMP: ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2); if (ret) @@ -331,16 +409,20 @@ static int olpc_bat_get_property(struct power_supply *psy, return ret; } -static enum power_supply_property olpc_bat_props[] = { +static enum power_supply_property olpc_xo1_bat_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_AVG, + POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_MANUFACTURER, @@ -348,6 +430,27 @@ static enum power_supply_property olpc_bat_props[] = { POWER_SUPPLY_PROP_CHARGE_COUNTER, }; +/* XO-1.5 does not have ambient temperature property */ +static enum power_supply_property olpc_xo15_bat_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_VOLTAGE_AVG, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, + POWER_SUPPLY_PROP_CHARGE_COUNTER, +}; + /* EEPROM reading goes completely around the power_supply API, sadly */ #define EEPROM_START 0x20 @@ -419,8 +522,6 @@ static struct device_attribute olpc_bat_error = { static struct platform_device *bat_pdev; static struct power_supply olpc_bat = { - .properties = olpc_bat_props, - .num_properties = ARRAY_SIZE(olpc_bat_props), .get_property = olpc_bat_get_property, .use_for_apm = 1, }; @@ -466,6 +567,13 @@ static int __init olpc_bat_init(void) goto ac_failed; olpc_bat.name = bat_pdev->name; + if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */ + olpc_bat.properties = olpc_xo15_bat_props; + olpc_bat.num_properties = ARRAY_SIZE(olpc_xo15_bat_props); + } else { /* XO-1 */ + olpc_bat.properties = olpc_xo1_bat_props; + olpc_bat.num_properties = ARRAY_SIZE(olpc_xo1_bat_props); + } ret = power_supply_register(&bat_pdev->dev, &olpc_bat); if (ret) diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 91606bb55318..970f7335d3a7 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -190,10 +190,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy) goto success; create_triggers_failed: - device_unregister(psy->dev); + device_del(dev); kobject_set_name_failed: device_add_failed: - kfree(dev); + put_device(dev); success: return rc; } @@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(power_supply_register); void power_supply_unregister(struct power_supply *psy) { - flush_scheduled_work(); + cancel_work_sync(&psy->changed_work); power_supply_remove_triggers(psy); device_unregister(psy->dev); } diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c index fe16b482e912..4255f2358b13 100644 --- a/drivers/power/s3c_adc_battery.c +++ b/drivers/power/s3c_adc_battery.c @@ -1,5 +1,5 @@ /* - * iPAQ h1930/h1940/rx1950 battery controler driver + * iPAQ h1930/h1940/rx1950 battery controller driver * Copyright (c) Vasily Khoruzhick * Based on h1940_battery.c by Arnaud Patard * @@ -112,6 +112,13 @@ static int calc_full_volt(int volt_val, int cur_val, int impedance) return volt_val + cur_val * impedance / 1000; } +static int charge_finished(struct s3c_adc_bat *bat) +{ + return bat->pdata->gpio_inverted ? + !gpio_get_value(bat->pdata->gpio_charge_finished) : + gpio_get_value(bat->pdata->gpio_charge_finished); +} + static int s3c_adc_bat_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -140,7 +147,7 @@ static int s3c_adc_bat_get_property(struct power_supply *psy, if (bat->cable_plugged && ((bat->pdata->gpio_charge_finished < 0) || - !gpio_get_value(bat->pdata->gpio_charge_finished))) { + !charge_finished(bat))) { lut = bat->pdata->lut_acin; lut_size = bat->pdata->lut_acin_cnt; } @@ -236,8 +243,7 @@ static void s3c_adc_bat_work(struct work_struct *work) } } else { if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) { - is_charged = gpio_get_value( - main_bat.pdata->gpio_charge_finished); + is_charged = charge_finished(&main_bat); if (is_charged) { if (bat->pdata->disable_charger) bat->pdata->disable_charger(); @@ -427,5 +433,5 @@ static void __exit s3c_adc_bat_exit(void) module_exit(s3c_adc_bat_exit); MODULE_AUTHOR("Vasily Khoruzhick "); -MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controler driver"); +MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c index ee04936b2db5..53f0d3524fcd 100644 --- a/drivers/power/tosa_battery.c +++ b/drivers/power/tosa_battery.c @@ -332,7 +332,7 @@ static struct { static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state) { /* flush all pending status updates */ - flush_scheduled_work(); + flush_work_sync(&bat_work); return 0; } @@ -422,7 +422,7 @@ err_psy_reg_jacket: err_psy_reg_main: /* see comment in tosa_bat_remove */ - flush_scheduled_work(); + cancel_work_sync(&bat_work); i--; err_gpio: @@ -445,12 +445,11 @@ static int __devexit tosa_bat_remove(struct platform_device *dev) power_supply_unregister(&tosa_bat_main.psy); /* - * now flush all pending work. - * we won't get any more schedules, since all - * sources (isr and external_power_changed) - * are unregistered now. + * Now cancel the bat_work. We won't get any more schedules, + * since all sources (isr and external_power_changed) are + * unregistered now. */ - flush_scheduled_work(); + cancel_work_sync(&bat_work); for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--) gpio_free(gpios[i].gpio); diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 5071d85ec12d..156559e56fa5 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -147,7 +147,7 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data) #ifdef CONFIG_PM static int wm97xx_bat_suspend(struct device *dev) { - flush_scheduled_work(); + flush_work_sync(&bat_work); return 0; } @@ -273,7 +273,7 @@ static int __devexit wm97xx_bat_remove(struct platform_device *dev) free_irq(gpio_to_irq(pdata->charge_gpio), dev); gpio_free(pdata->charge_gpio); } - flush_scheduled_work(); + cancel_work_sync(&bat_work); power_supply_unregister(&bat_ps); kfree(prop); return 0; diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c index 85064a9f649e..e5ed52d71937 100644 --- a/drivers/power/z2_battery.c +++ b/drivers/power/z2_battery.c @@ -254,7 +254,7 @@ static int __devexit z2_batt_remove(struct i2c_client *client) struct z2_charger *charger = i2c_get_clientdata(client); struct z2_battery_info *info = charger->info; - flush_scheduled_work(); + cancel_work_sync(&charger->bat_work); power_supply_unregister(&charger->batt_ps); kfree(charger->batt_ps.properties); @@ -271,7 +271,9 @@ static int __devexit z2_batt_remove(struct i2c_client *client) #ifdef CONFIG_PM static int z2_batt_suspend(struct i2c_client *client, pm_message_t state) { - flush_scheduled_work(); + struct z2_charger *charger = i2c_get_clientdata(client); + + flush_work_sync(&charger->bat_work); return 0; } diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig index 1afe4e03440f..f0d3376b58ba 100644 --- a/drivers/pps/Kconfig +++ b/drivers/pps/Kconfig @@ -30,6 +30,17 @@ config PPS_DEBUG messages to the system log. Select this if you are having a problem with PPS support and want to see more of what is going on. +config NTP_PPS + bool "PPS kernel consumer support" + depends on PPS && !NO_HZ + help + This option adds support for direct in-kernel time + syncronization using an external PPS signal. + + It doesn't work on tickless systems at the moment. + source drivers/pps/clients/Kconfig +source drivers/pps/generators/Kconfig + endmenu diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile index 98960ddd3188..4483eaadaddd 100644 --- a/drivers/pps/Makefile +++ b/drivers/pps/Makefile @@ -3,7 +3,8 @@ # pps_core-y := pps.o kapi.o sysfs.o +pps_core-$(CONFIG_NTP_PPS) += kc.o obj-$(CONFIG_PPS) := pps_core.o -obj-y += clients/ +obj-y += clients/ generators/ ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig index 4e801bd7254f..8520a7f4dd62 100644 --- a/drivers/pps/clients/Kconfig +++ b/drivers/pps/clients/Kconfig @@ -22,4 +22,11 @@ config PPS_CLIENT_LDISC If you say yes here you get support for a PPS source connected with the CD (Carrier Detect) pin of your serial port. +config PPS_CLIENT_PARPORT + tristate "Parallel port PPS client" + depends on PPS && PARPORT + help + If you say yes here you get support for a PPS source connected + with the interrupt pin of your parallel port. + endif diff --git a/drivers/pps/clients/Makefile b/drivers/pps/clients/Makefile index 812c9b19b430..42517da07049 100644 --- a/drivers/pps/clients/Makefile +++ b/drivers/pps/clients/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_PPS_CLIENT_KTIMER) += pps-ktimer.o obj-$(CONFIG_PPS_CLIENT_LDISC) += pps-ldisc.o +obj-$(CONFIG_PPS_CLIENT_PARPORT) += pps_parport.o ifeq ($(CONFIG_PPS_DEBUG),y) EXTRA_CFLAGS += -DDEBUG diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c index e7ef5b8186d0..2728469d3884 100644 --- a/drivers/pps/clients/pps-ktimer.c +++ b/drivers/pps/clients/pps-ktimer.c @@ -19,6 +19,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include @@ -31,7 +32,7 @@ * Global variables */ -static int source; +static struct pps_device *pps; static struct timer_list ktimer; /* @@ -40,19 +41,14 @@ static struct timer_list ktimer; static void pps_ktimer_event(unsigned long ptr) { - struct timespec __ts; - struct pps_ktime ts; + struct pps_event_time ts; /* First of all we get the time stamp... */ - getnstimeofday(&__ts); + pps_get_ts(&ts); - pr_info("PPS event at %lu\n", jiffies); + dev_info(pps->dev, "PPS event at %lu\n", jiffies); - /* ... and translate it to PPS time data struct */ - ts.sec = __ts.tv_sec; - ts.nsec = __ts.tv_nsec; - - pps_event(source, &ts, PPS_CAPTUREASSERT, NULL); + pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL); mod_timer(&ktimer, jiffies + HZ); } @@ -61,12 +57,11 @@ static void pps_ktimer_event(unsigned long ptr) * The echo function */ -static void pps_ktimer_echo(int source, int event, void *data) +static void pps_ktimer_echo(struct pps_device *pps, int event, void *data) { - pr_info("echo %s %s for source %d\n", + dev_info(pps->dev, "echo %s %s\n", event & PPS_CAPTUREASSERT ? "assert" : "", - event & PPS_CAPTURECLEAR ? "clear" : "", - source); + event & PPS_CAPTURECLEAR ? "clear" : ""); } /* @@ -89,30 +84,27 @@ static struct pps_source_info pps_ktimer_info = { static void __exit pps_ktimer_exit(void) { - del_timer_sync(&ktimer); - pps_unregister_source(source); + dev_info(pps->dev, "ktimer PPS source unregistered\n"); - pr_info("ktimer PPS source unregistered\n"); + del_timer_sync(&ktimer); + pps_unregister_source(pps); } static int __init pps_ktimer_init(void) { - int ret; - - ret = pps_register_source(&pps_ktimer_info, + pps = pps_register_source(&pps_ktimer_info, PPS_CAPTUREASSERT | PPS_OFFSETASSERT); - if (ret < 0) { - printk(KERN_ERR "cannot register ktimer source\n"); - return ret; + if (pps == NULL) { + pr_err("cannot register PPS source\n"); + return -ENOMEM; } - source = ret; setup_timer(&ktimer, pps_ktimer_event, 0); mod_timer(&ktimer, jiffies + HZ); - pr_info("ktimer PPS source registered at %d\n", source); + dev_info(pps->dev, "ktimer PPS source registered\n"); - return 0; + return 0; } module_init(pps_ktimer_init); diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c index 8e1932d29fd4..79451f2dea6a 100644 --- a/drivers/pps/clients/pps-ldisc.c +++ b/drivers/pps/clients/pps-ldisc.c @@ -19,6 +19,8 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -27,30 +29,18 @@ #define PPS_TTY_MAGIC 0x0001 static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status, - struct timespec *ts) + struct pps_event_time *ts) { - int id = (long)tty->disc_data; - struct timespec __ts; - struct pps_ktime pps_ts; - - /* First of all we get the time stamp... */ - getnstimeofday(&__ts); - - /* Does caller give us a timestamp? */ - if (ts) { /* Yes. Let's use it! */ - pps_ts.sec = ts->tv_sec; - pps_ts.nsec = ts->tv_nsec; - } else { /* No. Do it ourself! */ - pps_ts.sec = __ts.tv_sec; - pps_ts.nsec = __ts.tv_nsec; - } + struct pps_device *pps = (struct pps_device *)tty->disc_data; + + BUG_ON(pps == NULL); /* Now do the PPS event report */ - pps_event(id, &pps_ts, status ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR, - NULL); + pps_event(pps, ts, status ? PPS_CAPTUREASSERT : + PPS_CAPTURECLEAR, NULL); - pr_debug("PPS %s at %lu on source #%d\n", - status ? "assert" : "clear", jiffies, id); + dev_dbg(pps->dev, "PPS %s at %lu\n", + status ? "assert" : "clear", jiffies); } static int (*alias_n_tty_open)(struct tty_struct *tty); @@ -60,6 +50,7 @@ static int pps_tty_open(struct tty_struct *tty) struct pps_source_info info; struct tty_driver *drv = tty->driver; int index = tty->index + drv->name_base; + struct pps_device *pps; int ret; info.owner = THIS_MODULE; @@ -70,34 +61,42 @@ static int pps_tty_open(struct tty_struct *tty) PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \ PPS_CANWAIT | PPS_TSFMT_TSPEC; - ret = pps_register_source(&info, PPS_CAPTUREBOTH | \ + pps = pps_register_source(&info, PPS_CAPTUREBOTH | \ PPS_OFFSETASSERT | PPS_OFFSETCLEAR); - if (ret < 0) { + if (pps == NULL) { pr_err("cannot register PPS source \"%s\"\n", info.path); - return ret; + return -ENOMEM; } - tty->disc_data = (void *)(long)ret; + tty->disc_data = pps; /* Should open N_TTY ldisc too */ ret = alias_n_tty_open(tty); - if (ret < 0) - pps_unregister_source((long)tty->disc_data); + if (ret < 0) { + pr_err("cannot open tty ldisc \"%s\"\n", info.path); + goto err_unregister; + } - pr_info("PPS source #%d \"%s\" added\n", ret, info.path); + dev_info(pps->dev, "source \"%s\" added\n", info.path); return 0; + +err_unregister: + tty->disc_data = NULL; + pps_unregister_source(pps); + return ret; } static void (*alias_n_tty_close)(struct tty_struct *tty); static void pps_tty_close(struct tty_struct *tty) { - int id = (long)tty->disc_data; + struct pps_device *pps = (struct pps_device *)tty->disc_data; - pps_unregister_source(id); alias_n_tty_close(tty); - pr_info("PPS source #%d removed\n", id); + tty->disc_data = NULL; + dev_info(pps->dev, "removed\n"); + pps_unregister_source(pps); } static struct tty_ldisc_ops pps_ldisc_ops; diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c new file mode 100644 index 000000000000..32221efd9ca9 --- /dev/null +++ b/drivers/pps/clients/pps_parport.c @@ -0,0 +1,258 @@ +/* + * pps_parport.c -- kernel parallel port PPS client + * + * + * Copyright (C) 2009 Alexander Gordeev + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +/* + * TODO: + * implement echo over SEL pin + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define DRVDESC "parallel port PPS client" + +/* module parameters */ + +#define CLEAR_WAIT_MAX 100 +#define CLEAR_WAIT_MAX_ERRORS 5 + +static unsigned int clear_wait = 100; +MODULE_PARM_DESC(clear_wait, + "Maximum number of port reads when polling for signal clear," + " zero turns clear edge capture off entirely"); +module_param(clear_wait, uint, 0); + + +/* internal per port structure */ +struct pps_client_pp { + struct pardevice *pardev; /* parport device */ + struct pps_device *pps; /* PPS device */ + unsigned int cw; /* port clear timeout */ + unsigned int cw_err; /* number of timeouts */ +}; + +static inline int signal_is_set(struct parport *port) +{ + return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0; +} + +/* parport interrupt handler */ +static void parport_irq(void *handle) +{ + struct pps_event_time ts_assert, ts_clear; + struct pps_client_pp *dev = handle; + struct parport *port = dev->pardev->port; + unsigned int i; + unsigned long flags; + + /* first of all we get the time stamp... */ + pps_get_ts(&ts_assert); + + if (dev->cw == 0) + /* clear edge capture disabled */ + goto out_assert; + + /* try capture the clear edge */ + + /* We have to disable interrupts here. The idea is to prevent + * other interrupts on the same processor to introduce random + * lags while polling the port. Reading from IO port is known + * to take approximately 1us while other interrupt handlers can + * take much more potentially. + * + * Interrupts won't be disabled for a long time because the + * number of polls is limited by clear_wait parameter which is + * kept rather low. So it should never be an issue. + */ + local_irq_save(flags); + /* check the signal (no signal means the pulse is lost this time) */ + if (!signal_is_set(port)) { + local_irq_restore(flags); + dev_err(dev->pps->dev, "lost the signal\n"); + goto out_assert; + } + + /* poll the port until the signal is unset */ + for (i = dev->cw; i; i--) + if (!signal_is_set(port)) { + pps_get_ts(&ts_clear); + local_irq_restore(flags); + dev->cw_err = 0; + goto out_both; + } + local_irq_restore(flags); + + /* timeout */ + dev->cw_err++; + if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) { + dev_err(dev->pps->dev, "disabled clear edge capture after %d" + " timeouts\n", dev->cw_err); + dev->cw = 0; + dev->cw_err = 0; + } + +out_assert: + /* fire assert event */ + pps_event(dev->pps, &ts_assert, + PPS_CAPTUREASSERT, NULL); + return; + +out_both: + /* fire assert event */ + pps_event(dev->pps, &ts_assert, + PPS_CAPTUREASSERT, NULL); + /* fire clear event */ + pps_event(dev->pps, &ts_clear, + PPS_CAPTURECLEAR, NULL); + return; +} + +/* the PPS echo function */ +static void pps_echo(struct pps_device *pps, int event, void *data) +{ + dev_info(pps->dev, "echo %s %s\n", + event & PPS_CAPTUREASSERT ? "assert" : "", + event & PPS_CAPTURECLEAR ? "clear" : ""); +} + +static void parport_attach(struct parport *port) +{ + struct pps_client_pp *device; + struct pps_source_info info = { + .name = KBUILD_MODNAME, + .path = "", + .mode = PPS_CAPTUREBOTH | \ + PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \ + PPS_ECHOASSERT | PPS_ECHOCLEAR | \ + PPS_CANWAIT | PPS_TSFMT_TSPEC, + .echo = pps_echo, + .owner = THIS_MODULE, + .dev = NULL + }; + + device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL); + if (!device) { + pr_err("memory allocation failed, not attaching\n"); + return; + } + + device->pardev = parport_register_device(port, KBUILD_MODNAME, + NULL, NULL, parport_irq, 0, device); + if (!device->pardev) { + pr_err("couldn't register with %s\n", port->name); + goto err_free; + } + + if (parport_claim_or_block(device->pardev) < 0) { + pr_err("couldn't claim %s\n", port->name); + goto err_unregister_dev; + } + + device->pps = pps_register_source(&info, + PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR); + if (device->pps == NULL) { + pr_err("couldn't register PPS source\n"); + goto err_release_dev; + } + + device->cw = clear_wait; + + port->ops->enable_irq(port); + + pr_info("attached to %s\n", port->name); + + return; + +err_release_dev: + parport_release(device->pardev); +err_unregister_dev: + parport_unregister_device(device->pardev); +err_free: + kfree(device); +} + +static void parport_detach(struct parport *port) +{ + struct pardevice *pardev = port->cad; + struct pps_client_pp *device; + + /* FIXME: oooh, this is ugly! */ + if (strcmp(pardev->name, KBUILD_MODNAME)) + /* not our port */ + return; + + device = pardev->private; + + port->ops->disable_irq(port); + pps_unregister_source(device->pps); + parport_release(pardev); + parport_unregister_device(pardev); + kfree(device); +} + +static struct parport_driver pps_parport_driver = { + .name = KBUILD_MODNAME, + .attach = parport_attach, + .detach = parport_detach, +}; + +/* module staff */ + +static int __init pps_parport_init(void) +{ + int ret; + + pr_info(DRVDESC "\n"); + + if (clear_wait > CLEAR_WAIT_MAX) { + pr_err("clear_wait value should be not greater" + " then %d\n", CLEAR_WAIT_MAX); + return -EINVAL; + } + + ret = parport_register_driver(&pps_parport_driver); + if (ret) { + pr_err("unable to register with parport\n"); + return ret; + } + + return 0; +} + +static void __exit pps_parport_exit(void) +{ + parport_unregister_driver(&pps_parport_driver); +} + +module_init(pps_parport_init); +module_exit(pps_parport_exit); + +MODULE_AUTHOR("Alexander Gordeev "); +MODULE_DESCRIPTION(DRVDESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig new file mode 100644 index 000000000000..f3a73dd77660 --- /dev/null +++ b/drivers/pps/generators/Kconfig @@ -0,0 +1,13 @@ +# +# PPS generators configuration +# + +comment "PPS generators support" + +config PPS_GENERATOR_PARPORT + tristate "Parallel port PPS signal generator" + depends on PARPORT + help + If you say yes here you get support for a PPS signal generator which + utilizes STROBE pin of a parallel port to send PPS signals. It uses + parport abstraction layer and hrtimers to precisely control the signal. diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile new file mode 100644 index 000000000000..303304a6b8ec --- /dev/null +++ b/drivers/pps/generators/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for PPS generators. +# + +obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o + +ifeq ($(CONFIG_PPS_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG +endif diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c new file mode 100644 index 000000000000..5c32f8dacf56 --- /dev/null +++ b/drivers/pps/generators/pps_gen_parport.c @@ -0,0 +1,282 @@ +/* + * pps_gen_parport.c -- kernel parallel port PPS signal generator + * + * + * Copyright (C) 2009 Alexander Gordeev + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +/* + * TODO: + * fix issues when realtime clock is adjusted in a leap + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define DRVDESC "parallel port PPS signal generator" + +#define SIGNAL 0 +#define NO_SIGNAL PARPORT_CONTROL_STROBE + +/* module parameters */ + +#define SEND_DELAY_MAX 100000 + +static unsigned int send_delay = 30000; +MODULE_PARM_DESC(delay, + "Delay between setting and dropping the signal (ns)"); +module_param_named(delay, send_delay, uint, 0); + + +#define SAFETY_INTERVAL 3000 /* set the hrtimer earlier for safety (ns) */ + +/* internal per port structure */ +struct pps_generator_pp { + struct pardevice *pardev; /* parport device */ + struct hrtimer timer; + long port_write_time; /* calibrated port write time (ns) */ +}; + +static struct pps_generator_pp device = { + .pardev = NULL, +}; + +static int attached; + +/* calibrated time between a hrtimer event and the reaction */ +static long hrtimer_error = SAFETY_INTERVAL; + +/* the kernel hrtimer event */ +static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) +{ + struct timespec expire_time, ts1, ts2, ts3, dts; + struct pps_generator_pp *dev; + struct parport *port; + long lim, delta; + unsigned long flags; + + /* We have to disable interrupts here. The idea is to prevent + * other interrupts on the same processor to introduce random + * lags while polling the clock. getnstimeofday() takes <1us on + * most machines while other interrupt handlers can take much + * more potentially. + * + * NB: approx time with blocked interrupts = + * send_delay + 3 * SAFETY_INTERVAL + */ + local_irq_save(flags); + + /* first of all we get the time stamp... */ + getnstimeofday(&ts1); + expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer)); + dev = container_of(timer, struct pps_generator_pp, timer); + lim = NSEC_PER_SEC - send_delay - dev->port_write_time; + + /* check if we are late */ + if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { + local_irq_restore(flags); + pr_err("we are late this time %ld.%09ld\n", + ts1.tv_sec, ts1.tv_nsec); + goto done; + } + + /* busy loop until the time is right for an assert edge */ + do { + getnstimeofday(&ts2); + } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); + + /* set the signal */ + port = dev->pardev->port; + port->ops->write_control(port, SIGNAL); + + /* busy loop until the time is right for a clear edge */ + lim = NSEC_PER_SEC - dev->port_write_time; + do { + getnstimeofday(&ts2); + } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); + + /* unset the signal */ + port->ops->write_control(port, NO_SIGNAL); + + getnstimeofday(&ts3); + + local_irq_restore(flags); + + /* update calibrated port write time */ + dts = timespec_sub(ts3, ts2); + dev->port_write_time = + (dev->port_write_time + timespec_to_ns(&dts)) >> 1; + +done: + /* update calibrated hrtimer error */ + dts = timespec_sub(ts1, expire_time); + delta = timespec_to_ns(&dts); + /* If the new error value is bigger then the old, use the new + * value, if not then slowly move towards the new value. This + * way it should be safe in bad conditions and efficient in + * good conditions. + */ + if (delta >= hrtimer_error) + hrtimer_error = delta; + else + hrtimer_error = (3 * hrtimer_error + delta) >> 2; + + /* update the hrtimer expire time */ + hrtimer_set_expires(timer, + ktime_set(expire_time.tv_sec + 1, + NSEC_PER_SEC - (send_delay + + dev->port_write_time + SAFETY_INTERVAL + + 2 * hrtimer_error))); + + return HRTIMER_RESTART; +} + +/* calibrate port write time */ +#define PORT_NTESTS_SHIFT 5 +static void calibrate_port(struct pps_generator_pp *dev) +{ + struct parport *port = dev->pardev->port; + int i; + long acc = 0; + + for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) { + struct timespec a, b; + unsigned long irq_flags; + + local_irq_save(irq_flags); + getnstimeofday(&a); + port->ops->write_control(port, NO_SIGNAL); + getnstimeofday(&b); + local_irq_restore(irq_flags); + + b = timespec_sub(b, a); + acc += timespec_to_ns(&b); + } + + dev->port_write_time = acc >> PORT_NTESTS_SHIFT; + pr_info("port write takes %ldns\n", dev->port_write_time); +} + +static inline ktime_t next_intr_time(struct pps_generator_pp *dev) +{ + struct timespec ts; + + getnstimeofday(&ts); + + return ktime_set(ts.tv_sec + + ((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0), + NSEC_PER_SEC - (send_delay + + dev->port_write_time + 3 * SAFETY_INTERVAL)); +} + +static void parport_attach(struct parport *port) +{ + if (attached) { + /* we already have a port */ + return; + } + + device.pardev = parport_register_device(port, KBUILD_MODNAME, + NULL, NULL, NULL, 0, &device); + if (!device.pardev) { + pr_err("couldn't register with %s\n", port->name); + return; + } + + if (parport_claim_or_block(device.pardev) < 0) { + pr_err("couldn't claim %s\n", port->name); + goto err_unregister_dev; + } + + pr_info("attached to %s\n", port->name); + attached = 1; + + calibrate_port(&device); + + hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + device.timer.function = hrtimer_event; +#ifdef CONFIG_PREEMPT_RT + /* hrtimer interrupt will run in the interrupt context with this */ + device.timer.irqsafe = 1; +#endif + + hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS); + + return; + +err_unregister_dev: + parport_unregister_device(device.pardev); +} + +static void parport_detach(struct parport *port) +{ + if (port->cad != device.pardev) + return; /* not our port */ + + hrtimer_cancel(&device.timer); + parport_release(device.pardev); + parport_unregister_device(device.pardev); +} + +static struct parport_driver pps_gen_parport_driver = { + .name = KBUILD_MODNAME, + .attach = parport_attach, + .detach = parport_detach, +}; + +/* module staff */ + +static int __init pps_gen_parport_init(void) +{ + int ret; + + pr_info(DRVDESC "\n"); + + if (send_delay > SEND_DELAY_MAX) { + pr_err("delay value should be not greater" + " then %d\n", SEND_DELAY_MAX); + return -EINVAL; + } + + ret = parport_register_driver(&pps_gen_parport_driver); + if (ret) { + pr_err("unable to register with parport\n"); + return ret; + } + + return 0; +} + +static void __exit pps_gen_parport_exit(void) +{ + parport_unregister_driver(&pps_gen_parport_driver); + pr_info("hrtimer avg error is %ldns\n", hrtimer_error); +} + +module_init(pps_gen_parport_init); +module_exit(pps_gen_parport_exit); + +MODULE_AUTHOR("Alexander Gordeev "); +MODULE_DESCRIPTION(DRVDESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index 1aa02db3ff4e..cba1b43f7519 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -19,24 +19,20 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include +#include #include -#include #include #include #include -/* - * Global variables - */ - -DEFINE_SPINLOCK(pps_idr_lock); -DEFINE_IDR(pps_idr); +#include "kc.h" /* * Local functions @@ -60,60 +56,6 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset) * Exported functions */ -/* pps_get_source - find a PPS source - * @source: the PPS source ID. - * - * This function is used to find an already registered PPS source into the - * system. - * - * The function returns NULL if found nothing, otherwise it returns a pointer - * to the PPS source data struct (the refcounter is incremented by 1). - */ - -struct pps_device *pps_get_source(int source) -{ - struct pps_device *pps; - unsigned long flags; - - spin_lock_irqsave(&pps_idr_lock, flags); - - pps = idr_find(&pps_idr, source); - if (pps != NULL) - atomic_inc(&pps->usage); - - spin_unlock_irqrestore(&pps_idr_lock, flags); - - return pps; -} - -/* pps_put_source - free the PPS source data - * @pps: a pointer to the PPS source. - * - * This function is used to free a PPS data struct if its refcount is 0. - */ - -void pps_put_source(struct pps_device *pps) -{ - unsigned long flags; - - spin_lock_irqsave(&pps_idr_lock, flags); - BUG_ON(atomic_read(&pps->usage) == 0); - - if (!atomic_dec_and_test(&pps->usage)) { - pps = NULL; - goto exit; - } - - /* No more reference to the PPS source. We can safely remove the - * PPS data struct. - */ - idr_remove(&pps_idr, pps->id); - -exit: - spin_unlock_irqrestore(&pps_idr_lock, flags); - kfree(pps); -} - /* pps_register_source - add a PPS source in the system * @info: the PPS info struct * @default_params: the default PPS parameters of the new source @@ -122,31 +64,31 @@ exit: * source is described by info's fields and it will have, as default PPS * parameters, the ones specified into default_params. * - * The function returns, in case of success, the PPS source ID. + * The function returns, in case of success, the PPS device. Otherwise NULL. */ -int pps_register_source(struct pps_source_info *info, int default_params) +struct pps_device *pps_register_source(struct pps_source_info *info, + int default_params) { struct pps_device *pps; - int id; int err; /* Sanity checks */ if ((info->mode & default_params) != default_params) { - printk(KERN_ERR "pps: %s: unsupported default parameters\n", + pr_err("%s: unsupported default parameters\n", info->name); err = -EINVAL; goto pps_register_source_exit; } if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 && info->echo == NULL) { - printk(KERN_ERR "pps: %s: echo function is not defined\n", + pr_err("%s: echo function is not defined\n", info->name); err = -EINVAL; goto pps_register_source_exit; } if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) { - printk(KERN_ERR "pps: %s: unspecified time format\n", + pr_err("%s: unspecified time format\n", info->name); err = -EINVAL; goto pps_register_source_exit; @@ -168,94 +110,48 @@ int pps_register_source(struct pps_source_info *info, int default_params) init_waitqueue_head(&pps->queue); spin_lock_init(&pps->lock); - atomic_set(&pps->usage, 1); - - /* Get new ID for the new PPS source */ - if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) { - err = -ENOMEM; - goto kfree_pps; - } - - spin_lock_irq(&pps_idr_lock); - - /* Now really allocate the PPS source. - * After idr_get_new() calling the new source will be freely available - * into the kernel. - */ - err = idr_get_new(&pps_idr, pps, &id); - if (err < 0) { - spin_unlock_irq(&pps_idr_lock); - goto kfree_pps; - } - - id = id & MAX_ID_MASK; - if (id >= PPS_MAX_SOURCES) { - spin_unlock_irq(&pps_idr_lock); - - printk(KERN_ERR "pps: %s: too many PPS sources in the system\n", - info->name); - err = -EBUSY; - goto free_idr; - } - pps->id = id; - - spin_unlock_irq(&pps_idr_lock); /* Create the char device */ err = pps_register_cdev(pps); if (err < 0) { - printk(KERN_ERR "pps: %s: unable to create char device\n", + pr_err("%s: unable to create char device\n", info->name); - goto free_idr; + goto kfree_pps; } - pr_info("new PPS source %s at ID %d\n", info->name, id); + dev_info(pps->dev, "new PPS source %s\n", info->name); - return id; - -free_idr: - spin_lock_irq(&pps_idr_lock); - idr_remove(&pps_idr, id); - spin_unlock_irq(&pps_idr_lock); + return pps; kfree_pps: kfree(pps); pps_register_source_exit: - printk(KERN_ERR "pps: %s: unable to register source\n", info->name); + pr_err("%s: unable to register source\n", info->name); - return err; + return NULL; } EXPORT_SYMBOL(pps_register_source); /* pps_unregister_source - remove a PPS source from the system - * @source: the PPS source ID + * @pps: the PPS source * * This function is used to remove a previously registered PPS source from * the system. */ -void pps_unregister_source(int source) +void pps_unregister_source(struct pps_device *pps) { - struct pps_device *pps; - - spin_lock_irq(&pps_idr_lock); - pps = idr_find(&pps_idr, source); - - if (!pps) { - BUG(); - spin_unlock_irq(&pps_idr_lock); - return; - } - spin_unlock_irq(&pps_idr_lock); - + pps_kc_remove(pps); pps_unregister_cdev(pps); - pps_put_source(pps); + + /* don't have to kfree(pps) here because it will be done on + * device destruction */ } EXPORT_SYMBOL(pps_unregister_source); /* pps_event - register a PPS event into the system - * @source: the PPS source ID + * @pps: the PPS device * @ts: the event timestamp * @event: the event type * @data: userdef pointer @@ -263,78 +159,72 @@ EXPORT_SYMBOL(pps_unregister_source); * This function is used by each PPS client in order to register a new * PPS event into the system (it's usually called inside an IRQ handler). * - * If an echo function is associated with the PPS source it will be called + * If an echo function is associated with the PPS device it will be called * as: - * pps->info.echo(source, event, data); + * pps->info.echo(pps, event, data); */ - -void pps_event(int source, struct pps_ktime *ts, int event, void *data) +void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, + void *data) { - struct pps_device *pps; unsigned long flags; int captured = 0; + struct pps_ktime ts_real; - if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) { - printk(KERN_ERR "pps: unknown event (%x) for source %d\n", - event, source); - return; - } + /* check event type */ + BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); - pps = pps_get_source(source); - if (!pps) - return; + dev_dbg(pps->dev, "PPS event at %ld.%09ld\n", + ts->ts_real.tv_sec, ts->ts_real.tv_nsec); - pr_debug("PPS event on source %d at %llu.%06u\n", - pps->id, (unsigned long long) ts->sec, ts->nsec); + timespec_to_pps_ktime(&ts_real, ts->ts_real); spin_lock_irqsave(&pps->lock, flags); /* Must call the echo function? */ if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR))) - pps->info.echo(source, event, data); + pps->info.echo(pps, event, data); /* Check the event */ pps->current_mode = pps->params.mode; - if ((event & PPS_CAPTUREASSERT) & - (pps->params.mode & PPS_CAPTUREASSERT)) { + if (event & pps->params.mode & PPS_CAPTUREASSERT) { /* We have to add an offset? */ if (pps->params.mode & PPS_OFFSETASSERT) - pps_add_offset(ts, &pps->params.assert_off_tu); + pps_add_offset(&ts_real, + &pps->params.assert_off_tu); /* Save the time stamp */ - pps->assert_tu = *ts; + pps->assert_tu = ts_real; pps->assert_sequence++; - pr_debug("capture assert seq #%u for source %d\n", - pps->assert_sequence, source); + dev_dbg(pps->dev, "capture assert seq #%u\n", + pps->assert_sequence); captured = ~0; } - if ((event & PPS_CAPTURECLEAR) & - (pps->params.mode & PPS_CAPTURECLEAR)) { + if (event & pps->params.mode & PPS_CAPTURECLEAR) { /* We have to add an offset? */ if (pps->params.mode & PPS_OFFSETCLEAR) - pps_add_offset(ts, &pps->params.clear_off_tu); + pps_add_offset(&ts_real, + &pps->params.clear_off_tu); /* Save the time stamp */ - pps->clear_tu = *ts; + pps->clear_tu = ts_real; pps->clear_sequence++; - pr_debug("capture clear seq #%u for source %d\n", - pps->clear_sequence, source); + dev_dbg(pps->dev, "capture clear seq #%u\n", + pps->clear_sequence); captured = ~0; } - /* Wake up iif captured somthing */ + pps_kc_event(pps, ts, event); + + /* Wake up if captured something */ if (captured) { - pps->go = ~0; - wake_up_interruptible(&pps->queue); + pps->last_ev++; + wake_up_interruptible_all(&pps->queue); kill_fasync(&pps->async_queue, SIGIO, POLL_IN); } spin_unlock_irqrestore(&pps->lock, flags); - - /* Now we can release the PPS source for (possible) deregistration */ - pps_put_source(pps); } EXPORT_SYMBOL(pps_event); diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c new file mode 100644 index 000000000000..079e930b1938 --- /dev/null +++ b/drivers/pps/kc.c @@ -0,0 +1,122 @@ +/* + * PPS kernel consumer API + * + * Copyright (C) 2009-2010 Alexander Gordeev + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include "kc.h" + +/* + * Global variables + */ + +/* state variables to bind kernel consumer */ +DEFINE_SPINLOCK(pps_kc_hardpps_lock); +/* PPS API (RFC 2783): current source and mode for kernel consumer */ +struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */ +int pps_kc_hardpps_mode; /* mode bits for kernel consumer */ + +/* pps_kc_bind - control PPS kernel consumer binding + * @pps: the PPS source + * @bind_args: kernel consumer bind parameters + * + * This function is used to bind or unbind PPS kernel consumer according to + * supplied parameters. Should not be called in interrupt context. + */ +int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args) +{ + /* Check if another consumer is already bound */ + spin_lock_irq(&pps_kc_hardpps_lock); + + if (bind_args->edge == 0) + if (pps_kc_hardpps_dev == pps) { + pps_kc_hardpps_mode = 0; + pps_kc_hardpps_dev = NULL; + spin_unlock_irq(&pps_kc_hardpps_lock); + dev_info(pps->dev, "unbound kernel" + " consumer\n"); + } else { + spin_unlock_irq(&pps_kc_hardpps_lock); + dev_err(pps->dev, "selected kernel consumer" + " is not bound\n"); + return -EINVAL; + } + else + if (pps_kc_hardpps_dev == NULL || + pps_kc_hardpps_dev == pps) { + pps_kc_hardpps_mode = bind_args->edge; + pps_kc_hardpps_dev = pps; + spin_unlock_irq(&pps_kc_hardpps_lock); + dev_info(pps->dev, "bound kernel consumer: " + "edge=0x%x\n", bind_args->edge); + } else { + spin_unlock_irq(&pps_kc_hardpps_lock); + dev_err(pps->dev, "another kernel consumer" + " is already bound\n"); + return -EINVAL; + } + + return 0; +} + +/* pps_kc_remove - unbind kernel consumer on PPS source removal + * @pps: the PPS source + * + * This function is used to disable kernel consumer on PPS source removal + * if this source was bound to PPS kernel consumer. Can be called on any + * source safely. Should not be called in interrupt context. + */ +void pps_kc_remove(struct pps_device *pps) +{ + spin_lock_irq(&pps_kc_hardpps_lock); + if (pps == pps_kc_hardpps_dev) { + pps_kc_hardpps_mode = 0; + pps_kc_hardpps_dev = NULL; + spin_unlock_irq(&pps_kc_hardpps_lock); + dev_info(pps->dev, "unbound kernel consumer" + " on device removal\n"); + } else + spin_unlock_irq(&pps_kc_hardpps_lock); +} + +/* pps_kc_event - call hardpps() on PPS event + * @pps: the PPS source + * @ts: PPS event timestamp + * @event: PPS event edge + * + * This function calls hardpps() when an event from bound PPS source occurs. + */ +void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts, + int event) +{ + unsigned long flags; + + /* Pass some events to kernel consumer if activated */ + spin_lock_irqsave(&pps_kc_hardpps_lock, flags); + if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) + hardpps(&ts->ts_real, &ts->ts_raw); + spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags); +} diff --git a/drivers/pps/kc.h b/drivers/pps/kc.h new file mode 100644 index 000000000000..d296fcd0a175 --- /dev/null +++ b/drivers/pps/kc.h @@ -0,0 +1,46 @@ +/* + * PPS kernel consumer API header + * + * Copyright (C) 2009-2010 Alexander Gordeev + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef LINUX_PPS_KC_H +#define LINUX_PPS_KC_H + +#include +#include + +#ifdef CONFIG_NTP_PPS + +extern int pps_kc_bind(struct pps_device *pps, + struct pps_bind_args *bind_args); +extern void pps_kc_remove(struct pps_device *pps); +extern void pps_kc_event(struct pps_device *pps, + struct pps_event_time *ts, int event); + + +#else /* CONFIG_NTP_PPS */ + +static inline int pps_kc_bind(struct pps_device *pps, + struct pps_bind_args *bind_args) { return -EOPNOTSUPP; } +static inline void pps_kc_remove(struct pps_device *pps) {} +static inline void pps_kc_event(struct pps_device *pps, + struct pps_event_time *ts, int event) {} + +#endif /* CONFIG_NTP_PPS */ + +#endif /* LINUX_PPS_KC_H */ diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index ca5183bdad85..2baadd21b7a6 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -19,6 +19,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include @@ -26,9 +27,13 @@ #include #include #include +#include #include #include #include +#include + +#include "kc.h" /* * Local variables @@ -37,6 +42,9 @@ static dev_t pps_devt; static struct class *pps_class; +static DEFINE_MUTEX(pps_idr_lock); +static DEFINE_IDR(pps_idr); + /* * Char device methods */ @@ -61,15 +69,13 @@ static long pps_cdev_ioctl(struct file *file, { struct pps_device *pps = file->private_data; struct pps_kparams params; - struct pps_fdata fdata; - unsigned long ticks; void __user *uarg = (void __user *) arg; int __user *iuarg = (int __user *) arg; int err; switch (cmd) { case PPS_GETPARAMS: - pr_debug("PPS_GETPARAMS: source %d\n", pps->id); + dev_dbg(pps->dev, "PPS_GETPARAMS\n"); spin_lock_irq(&pps->lock); @@ -85,7 +91,7 @@ static long pps_cdev_ioctl(struct file *file, break; case PPS_SETPARAMS: - pr_debug("PPS_SETPARAMS: source %d\n", pps->id); + dev_dbg(pps->dev, "PPS_SETPARAMS\n"); /* Check the capabilities */ if (!capable(CAP_SYS_TIME)) @@ -95,14 +101,14 @@ static long pps_cdev_ioctl(struct file *file, if (err) return -EFAULT; if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) { - pr_debug("capture mode unspecified (%x)\n", + dev_dbg(pps->dev, "capture mode unspecified (%x)\n", params.mode); return -EINVAL; } /* Check for supported capabilities */ if ((params.mode & ~pps->info.mode) != 0) { - pr_debug("unsupported capabilities (%x)\n", + dev_dbg(pps->dev, "unsupported capabilities (%x)\n", params.mode); return -EINVAL; } @@ -115,7 +121,7 @@ static long pps_cdev_ioctl(struct file *file, /* Restore the read only parameters */ if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) { /* section 3.3 of RFC 2783 interpreted */ - pr_debug("time format unspecified (%x)\n", + dev_dbg(pps->dev, "time format unspecified (%x)\n", params.mode); pps->params.mode |= PPS_TSFMT_TSPEC; } @@ -128,7 +134,7 @@ static long pps_cdev_ioctl(struct file *file, break; case PPS_GETCAP: - pr_debug("PPS_GETCAP: source %d\n", pps->id); + dev_dbg(pps->dev, "PPS_GETCAP\n"); err = put_user(pps->info.mode, iuarg); if (err) @@ -136,20 +142,26 @@ static long pps_cdev_ioctl(struct file *file, break; - case PPS_FETCH: - pr_debug("PPS_FETCH: source %d\n", pps->id); + case PPS_FETCH: { + struct pps_fdata fdata; + unsigned int ev; + + dev_dbg(pps->dev, "PPS_FETCH\n"); err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata)); if (err) return -EFAULT; - pps->go = 0; + ev = pps->last_ev; /* Manage the timeout */ if (fdata.timeout.flags & PPS_TIME_INVALID) - err = wait_event_interruptible(pps->queue, pps->go); + err = wait_event_interruptible(pps->queue, + ev != pps->last_ev); else { - pr_debug("timeout %lld.%09d\n", + unsigned long ticks; + + dev_dbg(pps->dev, "timeout %lld.%09d\n", (long long) fdata.timeout.sec, fdata.timeout.nsec); ticks = fdata.timeout.sec * HZ; @@ -157,7 +169,9 @@ static long pps_cdev_ioctl(struct file *file, if (ticks != 0) { err = wait_event_interruptible_timeout( - pps->queue, pps->go, ticks); + pps->queue, + ev != pps->last_ev, + ticks); if (err == 0) return -ETIMEDOUT; } @@ -165,7 +179,7 @@ static long pps_cdev_ioctl(struct file *file, /* Check for pending signals */ if (err == -ERESTARTSYS) { - pr_debug("pending signal caught\n"); + dev_dbg(pps->dev, "pending signal caught\n"); return -EINTR; } @@ -185,10 +199,44 @@ static long pps_cdev_ioctl(struct file *file, return -EFAULT; break; + } + case PPS_KC_BIND: { + struct pps_bind_args bind_args; + + dev_dbg(pps->dev, "PPS_KC_BIND\n"); + + /* Check the capabilities */ + if (!capable(CAP_SYS_TIME)) + return -EPERM; + + if (copy_from_user(&bind_args, uarg, + sizeof(struct pps_bind_args))) + return -EFAULT; + /* Check for supported capabilities */ + if ((bind_args.edge & ~pps->info.mode) != 0) { + dev_err(pps->dev, "unsupported capabilities (%x)\n", + bind_args.edge); + return -EINVAL; + } + + /* Validate parameters roughly */ + if (bind_args.tsformat != PPS_TSFMT_TSPEC || + (bind_args.edge & ~PPS_CAPTUREBOTH) != 0 || + bind_args.consumer != PPS_KC_HARDPPS) { + dev_err(pps->dev, "invalid kernel consumer bind" + " parameters (%x)\n", bind_args.edge); + return -EINVAL; + } + + err = pps_kc_bind(pps, &bind_args); + if (err < 0) + return err; + + break; + } default: return -ENOTTY; - break; } return 0; @@ -198,12 +246,6 @@ static int pps_cdev_open(struct inode *inode, struct file *file) { struct pps_device *pps = container_of(inode->i_cdev, struct pps_device, cdev); - int found; - - found = pps_get_source(pps->id) != 0; - if (!found) - return -ENODEV; - file->private_data = pps; return 0; @@ -211,11 +253,6 @@ static int pps_cdev_open(struct inode *inode, struct file *file) static int pps_cdev_release(struct inode *inode, struct file *file) { - struct pps_device *pps = file->private_data; - - /* Free the PPS source and wake up (possible) deregistration */ - pps_put_source(pps); - return 0; } @@ -233,25 +270,67 @@ static const struct file_operations pps_cdev_fops = { .release = pps_cdev_release, }; +static void pps_device_destruct(struct device *dev) +{ + struct pps_device *pps = dev_get_drvdata(dev); + + /* release id here to protect others from using it while it's + * still in use */ + mutex_lock(&pps_idr_lock); + idr_remove(&pps_idr, pps->id); + mutex_unlock(&pps_idr_lock); + + kfree(dev); + kfree(pps); +} + int pps_register_cdev(struct pps_device *pps) { int err; + dev_t devt; + + mutex_lock(&pps_idr_lock); + /* Get new ID for the new PPS source */ + if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) { + mutex_unlock(&pps_idr_lock); + return -ENOMEM; + } + + /* Now really allocate the PPS source. + * After idr_get_new() calling the new source will be freely available + * into the kernel. + */ + err = idr_get_new(&pps_idr, pps, &pps->id); + mutex_unlock(&pps_idr_lock); + + if (err < 0) + return err; + + pps->id &= MAX_ID_MASK; + if (pps->id >= PPS_MAX_SOURCES) { + pr_err("%s: too many PPS sources in the system\n", + pps->info.name); + err = -EBUSY; + goto free_idr; + } + + devt = MKDEV(MAJOR(pps_devt), pps->id); - pps->devno = MKDEV(MAJOR(pps_devt), pps->id); cdev_init(&pps->cdev, &pps_cdev_fops); pps->cdev.owner = pps->info.owner; - err = cdev_add(&pps->cdev, pps->devno, 1); + err = cdev_add(&pps->cdev, devt, 1); if (err) { - printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n", + pr_err("%s: failed to add char device %d:%d\n", pps->info.name, MAJOR(pps_devt), pps->id); - return err; + goto free_idr; } - pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, + pps->dev = device_create(pps_class, pps->info.dev, devt, pps, "pps%d", pps->id); if (IS_ERR(pps->dev)) goto del_cdev; - dev_set_drvdata(pps->dev, pps); + + pps->dev->release = pps_device_destruct; pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, MAJOR(pps_devt), pps->id); @@ -261,12 +340,17 @@ int pps_register_cdev(struct pps_device *pps) del_cdev: cdev_del(&pps->cdev); +free_idr: + mutex_lock(&pps_idr_lock); + idr_remove(&pps_idr, pps->id); + mutex_unlock(&pps_idr_lock); + return err; } void pps_unregister_cdev(struct pps_device *pps) { - device_destroy(pps_class, pps->devno); + device_destroy(pps_class, pps->dev->devt); cdev_del(&pps->cdev); } @@ -286,14 +370,14 @@ static int __init pps_init(void) pps_class = class_create(THIS_MODULE, "pps"); if (!pps_class) { - printk(KERN_ERR "pps: failed to allocate class\n"); + pr_err("failed to allocate class\n"); return -ENOMEM; } pps_class->dev_attrs = pps_attrs; err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps"); if (err < 0) { - printk(KERN_ERR "pps: failed to allocate char device region\n"); + pr_err("failed to allocate char device region\n"); goto remove_class; } diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 1eb82c4c712e..467e82bd0929 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -46,7 +46,6 @@ static void rio_init_em(struct rio_dev *rdev); DEFINE_SPINLOCK(rio_global_list_lock); static int next_destid = 0; -static int next_switchid = 0; static int next_net = 0; static int next_comptag = 1; @@ -378,12 +377,30 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, struct rio_dev *rdev; struct rio_switch *rswitch = NULL; int result, rdid; + size_t size; + u32 swpinfo = 0; - rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL); + size = sizeof(struct rio_dev); + if (rio_mport_read_config_32(port, destid, hopcount, + RIO_PEF_CAR, &result)) + return NULL; + + if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { + rio_mport_read_config_32(port, destid, hopcount, + RIO_SWP_INFO_CAR, &swpinfo); + if (result & RIO_PEF_SWITCH) { + size += (RIO_GET_TOTAL_PORTS(swpinfo) * + sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); + } + } + + rdev = kzalloc(size, GFP_KERNEL); if (!rdev) return NULL; rdev->net = net; + rdev->pef = result; + rdev->swpinfo = swpinfo; rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR, &result); rdev->did = result >> 16; @@ -397,8 +414,6 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, &result); rdev->asm_rev = result >> 16; - rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, - &rdev->pef); if (rdev->pef & RIO_PEF_EXT_FEATURES) { rdev->efptr = result & 0xffff; rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, @@ -408,11 +423,6 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, hopcount, RIO_EFB_ERR_MGMNT); } - if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { - rio_mport_read_config_32(port, destid, hopcount, - RIO_SWP_INFO_CAR, &rdev->swpinfo); - } - rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, &rdev->src_ops); rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR, @@ -427,6 +437,10 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rio_mport_write_config_32(port, destid, hopcount, RIO_COMPONENT_TAG_CSR, next_comptag); rdev->comp_tag = next_comptag++; + } else { + rio_mport_read_config_32(port, destid, hopcount, + RIO_COMPONENT_TAG_CSR, + &rdev->comp_tag); } if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) { @@ -437,21 +451,20 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, next_destid++; } else rdev->destid = rio_get_device_id(port, destid, hopcount); - } else - /* Switch device has an associated destID */ - rdev->destid = RIO_INVALID_DESTID; + + rdev->hopcount = 0xff; + } else { + /* Switch device has an associated destID which + * will be adjusted later + */ + rdev->destid = destid; + rdev->hopcount = hopcount; + } /* If a PE has both switch and other functions, show it as a switch */ if (rio_is_switch(rdev)) { - rswitch = kzalloc(sizeof(*rswitch) + - RIO_GET_TOTAL_PORTS(rdev->swpinfo) * - sizeof(rswitch->nextdev[0]), - GFP_KERNEL); - if (!rswitch) - goto cleanup; - rswitch->switchid = next_switchid; - rswitch->hopcount = hopcount; - rswitch->destid = destid; + rswitch = rdev->rswitch; + rswitch->switchid = rdev->comp_tag & RIO_CTAG_UDEVID; rswitch->port_ok = 0; rswitch->route_table = kzalloc(sizeof(u8)* RIO_MAX_ROUTE_ENTRIES(port->sys_size), @@ -462,15 +475,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size); rdid++) rswitch->route_table[rdid] = RIO_INVALID_ROUTE; - rdev->rswitch = rswitch; - rswitch->rdev = rdev; dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, - rdev->rswitch->switchid); + rswitch->switchid); rio_switch_init(rdev, do_enum); - if (do_enum && rdev->rswitch->clr_table) - rdev->rswitch->clr_table(port, destid, hopcount, - RIO_GLOBAL_TABLE); + if (do_enum && rswitch->clr_table) + rswitch->clr_table(port, destid, hopcount, + RIO_GLOBAL_TABLE); list_add_tail(&rswitch->node, &rio_switches); @@ -506,10 +517,9 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, return rdev; cleanup: - if (rswitch) { + if (rswitch->route_table) kfree(rswitch->route_table); - kfree(rswitch); - } + kfree(rdev); return NULL; } @@ -632,8 +642,7 @@ rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) /** * rio_route_add_entry- Add a route entry to a switch routing table - * @mport: Master port to send transaction - * @rswitch: Switch device + * @rdev: RIO device * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Port number to be routed @@ -647,31 +656,31 @@ rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) * on failure. */ static int -rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, +rio_route_add_entry(struct rio_dev *rdev, u16 table, u16 route_destid, u8 route_port, int lock) { int rc; if (lock) { - rc = rio_lock_device(mport, rswitch->destid, - rswitch->hopcount, 1000); + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); if (rc) return rc; } - rc = rswitch->add_entry(mport, rswitch->destid, - rswitch->hopcount, table, - route_destid, route_port); + rc = rdev->rswitch->add_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); if (lock) - rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); return rc; } /** * rio_route_get_entry- Read a route entry in a switch routing table - * @mport: Master port to send transaction - * @rswitch: Switch device + * @rdev: RIO device * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Pointer to read port number into @@ -685,23 +694,24 @@ rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, * on failure. */ static int -rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, +rio_route_get_entry(struct rio_dev *rdev, u16 table, u16 route_destid, u8 *route_port, int lock) { int rc; if (lock) { - rc = rio_lock_device(mport, rswitch->destid, - rswitch->hopcount, 1000); + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); if (rc) return rc; } - rc = rswitch->get_entry(mport, rswitch->destid, - rswitch->hopcount, table, - route_destid, route_port); + rc = rdev->rswitch->get_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); if (lock) - rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); return rc; } @@ -809,16 +819,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, return -1; if (rio_is_switch(rdev)) { - next_switchid++; sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); - rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, port->host_deviceid, sw_inport, 0); rdev->rswitch->route_table[port->host_deviceid] = sw_inport; for (destid = 0; destid < next_destid; destid++) { if (destid == port->host_deviceid) continue; - rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, destid, sw_inport, 0); rdev->rswitch->route_table[destid] = sw_inport; } @@ -850,8 +859,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, "RIO: scanning device on port %d\n", port_num); rdev->rswitch->port_ok |= (1 << port_num); - rio_route_add_entry(port, rdev->rswitch, - RIO_GLOBAL_TABLE, + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, RIO_ANY_DESTID(port->sys_size), port_num, 0); @@ -865,7 +873,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, destid < next_destid; destid++) { if (destid == port->host_deviceid) continue; - rio_route_add_entry(port, rdev->rswitch, + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, destid, port_num, @@ -904,7 +912,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, next_destid++; } - rdev->rswitch->destid = sw_destid; + rdev->destid = sw_destid; } else pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n", rio_name(rdev), rdev->vid, rdev->did); @@ -941,7 +949,7 @@ static int rio_enum_complete(struct rio_mport *port) */ static int __devinit rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, - u8 hopcount) + u8 hopcount, struct rio_dev *prev, int prev_port) { u8 port_num, route_port; struct rio_dev *rdev; @@ -951,14 +959,15 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) { /* Add device to the global and bus/net specific list. */ list_add_tail(&rdev->net_list, &net->devices); + rdev->prev = prev; + if (prev && rio_is_switch(prev)) + prev->rswitch->nextdev[prev_port] = rdev; } else return -1; if (rio_is_switch(rdev)) { - next_switchid++; - /* Associated destid is how we accessed this switch */ - rdev->rswitch->destid = destid; + rdev->destid = destid; pr_debug( "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", @@ -981,7 +990,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, for (ndestid = 0; ndestid < RIO_ANY_DESTID(port->sys_size); ndestid++) { - rio_route_get_entry(port, rdev->rswitch, + rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, ndestid, &route_port, 0); @@ -992,8 +1001,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, if (ndestid == RIO_ANY_DESTID(port->sys_size)) continue; rio_unlock_device(port, destid, hopcount); - if (rio_disc_peer - (net, port, ndestid, hopcount + 1) < 0) + if (rio_disc_peer(net, port, ndestid, + hopcount + 1, rdev, port_num) < 0) return -1; } } @@ -1069,14 +1078,14 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port) */ static void rio_update_route_tables(struct rio_mport *port) { - struct rio_dev *rdev; + struct rio_dev *rdev, *swrdev; struct rio_switch *rswitch; u8 sport; u16 destid; list_for_each_entry(rdev, &rio_devices, global_list) { - destid = (rio_is_switch(rdev))?rdev->rswitch->destid:rdev->destid; + destid = rdev->destid; list_for_each_entry(rswitch, &rio_switches, node) { @@ -1084,14 +1093,16 @@ static void rio_update_route_tables(struct rio_mport *port) continue; if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { + swrdev = sw_to_rio_dev(rswitch); + /* Skip if destid ends in empty switch*/ - if (rswitch->destid == destid) + if (swrdev->destid == destid) continue; - sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo); + sport = RIO_GET_PORT_NUM(swrdev->swpinfo); if (rswitch->add_entry) { - rio_route_add_entry(port, rswitch, + rio_route_add_entry(swrdev, RIO_GLOBAL_TABLE, destid, sport, 0); rswitch->route_table[destid] = sport; @@ -1203,21 +1214,20 @@ static void rio_build_route_tables(void) list_for_each_entry(rdev, &rio_devices, global_list) if (rio_is_switch(rdev)) { - rio_lock_device(rdev->net->hport, rdev->rswitch->destid, - rdev->rswitch->hopcount, 1000); + rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); i++) { - if (rio_route_get_entry - (rdev->net->hport, rdev->rswitch, - RIO_GLOBAL_TABLE, i, &sport, 0) < 0) + if (rio_route_get_entry(rdev, + RIO_GLOBAL_TABLE, i, &sport, 0) < 0) continue; rdev->rswitch->route_table[i] = sport; } rio_unlock_device(rdev->net->hport, - rdev->rswitch->destid, - rdev->rswitch->hopcount); + rdev->destid, + rdev->hopcount); } } @@ -1284,7 +1294,7 @@ int __devinit rio_disc_mport(struct rio_mport *mport) mport->host_deviceid); if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), - 0) < 0) { + 0, NULL, 0) < 0) { printk(KERN_INFO "RIO: master port %d device has failed discovery\n", mport->id); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 137ed93ee33f..76b41853a877 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c @@ -217,7 +217,7 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev) err = device_create_bin_file(&rdev->dev, &rio_config_attr); - if (!err && rdev->rswitch) { + if (!err && (rdev->pef & RIO_PEF_SWITCH)) { err = device_create_file(&rdev->dev, &dev_attr_routes); if (!err && rdev->rswitch->sw_sysfs) err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE); @@ -239,7 +239,7 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev) void rio_remove_sysfs_dev_files(struct rio_dev *rdev) { device_remove_bin_file(&rdev->dev, &rio_config_attr); - if (rdev->rswitch) { + if (rdev->pef & RIO_PEF_SWITCH) { device_remove_file(&rdev->dev, &dev_attr_routes); if (rdev->rswitch->sw_sysfs) rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 7b5080c45569..cc2a3b74d0f0 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -471,16 +471,9 @@ exit: */ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; u32 regval; - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - rio_mport_read_config_32(rdev->net->hport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), ®val); if (lock) @@ -488,7 +481,7 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) else regval &= ~RIO_PORT_N_CTL_LOCKOUT; - rio_mport_write_config_32(rdev->net->hport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), regval); return 0; @@ -507,7 +500,7 @@ static int rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) { u32 result; - int p_port, dstid, rc = -EIO; + int p_port, rc = -EIO; struct rio_dev *prev = NULL; /* Find switch with failed RIO link */ @@ -522,9 +515,7 @@ rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) if (prev == NULL) goto err_out; - dstid = (rdev->pef & RIO_PEF_SWITCH) ? - rdev->rswitch->destid : rdev->destid; - p_port = prev->rswitch->route_table[dstid]; + p_port = prev->rswitch->route_table[rdev->destid]; if (p_port != RIO_INVALID_ROUTE) { pr_debug("RIO: link failed on [%s]-P%d\n", @@ -567,15 +558,8 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) */ static int rio_chk_dev_access(struct rio_dev *rdev) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; - - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount); + return rio_mport_chk_dev_access(rdev->net->hport, + rdev->destid, rdev->hopcount); } /** @@ -588,23 +572,20 @@ static int rio_chk_dev_access(struct rio_dev *rdev) static int rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) { - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; u32 regval; int checkcount; if (lnkresp) { /* Read from link maintenance response register * to clear valid bit */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), ®val); udelay(50); } /* Issue Input-status command */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum), RIO_MNT_REQ_CMD_IS); @@ -615,7 +596,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) checkcount = 3; while (checkcount--) { udelay(50); - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), ®val); if (regval & RIO_PORT_N_MNT_RSP_RVAL) { @@ -635,15 +616,12 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) */ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) { - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum]; u32 regval; u32 far_ackid, far_linkstat, near_ackid; if (err_status == 0) - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), &err_status); @@ -661,7 +639,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) pnum, regval); far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), ®val); pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); @@ -679,9 +657,8 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) /* Align near outstanding/outbound ackIDs with * far inbound. */ - rio_mport_write_config_32(mport, destid, - hopcount, rdev->phys_efptr + - RIO_PORT_N_ACK_STS_CSR(pnum), + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), (near_ackid << 24) | (far_ackid << 8) | far_ackid); /* Align far outstanding/outbound ackIDs with @@ -698,7 +675,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n"); } rd_err: - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); @@ -710,7 +687,7 @@ rd_err: RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); udelay(50); - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); @@ -730,13 +707,10 @@ rd_err: int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) { struct rio_dev *rdev; - struct rio_mport *mport; - u8 hopcount; - u16 destid; u32 err_status, em_perrdet, em_ltlerrdet; int rc, portnum; - rdev = rio_get_comptag(pw_msg->em.comptag, NULL); + rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); if (rdev == NULL) { /* Device removed or enumeration error */ pr_debug("RIO: %s No matching device for CTag 0x%08x\n", @@ -800,17 +774,13 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) return 0; } - mport = rdev->net->hport; - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - /* * Process the port-write notification from switch */ if (rdev->rswitch->em_handle) rdev->rswitch->em_handle(rdev, portnum); - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), &err_status); pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); @@ -840,7 +810,7 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) rdev->rswitch->port_ok &= ~(1 << portnum); rio_set_port_lockout(rdev, portnum, 1); - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(portnum), RIO_PORT_N_ACK_CLEAR); @@ -851,28 +821,28 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) } } - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); if (em_perrdet) { pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", portnum, em_perrdet); /* Clear EM Port N Error Detect CSR */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); } - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); if (em_ltlerrdet) { pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", em_ltlerrdet); /* Clear EM L/T Layer Error Detect CSR */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); } /* Clear remaining error bits and Port-Write Pending bit */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), err_status); diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 0bb871cb5c40..095016a9dec1 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c @@ -209,9 +209,6 @@ idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, static int idtg2_em_init(struct rio_dev *rdev) { - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; u32 regval; int i, tmp; @@ -220,29 +217,25 @@ idtg2_em_init(struct rio_dev *rdev) * All standard EM configuration should be performed at upper level. */ - pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount); + pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Set Port-Write info CSR: PRIO=3 and CRF=1 */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_PW_INFO_CSR, 0x0000e000); + rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000); /* * Configure LT LAYER error reporting. */ /* Enable standard (RIO.p8) error reporting */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_LT_ERR_REPORT_EN, + rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN, REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR | REM_LTL_ERR_UNSUPTR); /* Use Port-Writes for LT layer error reporting. * Enable per-port reset */ - rio_mport_read_config_32(mport, destid, hopcount, - IDT_DEV_CTRL_1, ®val); - rio_mport_write_config_32(mport, destid, hopcount, - IDT_DEV_CTRL_1, + rio_read_config_32(rdev, IDT_DEV_CTRL_1, ®val); + rio_write_config_32(rdev, IDT_DEV_CTRL_1, regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH); /* @@ -250,45 +243,40 @@ idtg2_em_init(struct rio_dev *rdev) */ /* Report all RIO.p8 errors supported by device */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); + rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); /* Configure reporting of implementation specific errors/events */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED); + rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC, + IDT_PORT_INIT_TX_ACQUIRED); /* Use Port-Writes for port error reporting and enable error logging */ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); for (i = 0; i < tmp; i++) { - rio_mport_read_config_32(mport, destid, hopcount, - IDT_PORT_OPS(i), ®val); - rio_mport_write_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, IDT_PORT_OPS(i), ®val); + rio_write_config_32(rdev, IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW | IDT_PORT_OPS_PL_ELOG | IDT_PORT_OPS_LL_ELOG | IDT_PORT_OPS_LT_ELOG); } /* Overwrite error log if full */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); + rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); /* * Configure LANE error reporting. */ /* Disable line error reporting */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_LANE_ERR_REPORT_EN_BC, 0); + rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0); /* Use Port-Writes for lane error reporting (when enabled) * (do per-lane update because lanes may have different configuration) */ tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16; for (i = 0; i < tmp; i++) { - rio_mport_read_config_32(mport, destid, hopcount, - IDT_LANE_CTRL(i), ®val); - rio_mport_write_config_32(mport, destid, hopcount, - IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW); + rio_read_config_32(rdev, IDT_LANE_CTRL(i), ®val); + rio_write_config_32(rdev, IDT_LANE_CTRL(i), + regval | IDT_LANE_CTRL_GENPW); } /* @@ -296,41 +284,32 @@ idtg2_em_init(struct rio_dev *rdev) */ /* Disable JTAG and I2C Error capture */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_AUX_PORT_ERR_CAP_EN, 0); + rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0); /* Disable JTAG and I2C Error reporting/logging */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_AUX_ERR_REPORT_EN, 0); + rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0); /* Disable Port-Write notification from JTAG */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_JTAG_CTRL, 0); + rio_write_config_32(rdev, IDT_JTAG_CTRL, 0); /* Disable Port-Write notification from I2C */ - rio_mport_read_config_32(mport, destid, hopcount, - IDT_I2C_MCTRL, ®val); - rio_mport_write_config_32(mport, destid, hopcount, - IDT_I2C_MCTRL, - regval & ~IDT_I2C_MCTRL_GENPW); + rio_read_config_32(rdev, IDT_I2C_MCTRL, ®val); + rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW); /* * Configure CFG_BLK error reporting. */ /* Disable Configuration Block error capture */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_CFGBLK_ERR_CAPTURE_EN, 0); + rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0); /* Disable Port-Writes for Configuration Block error reporting */ - rio_mport_read_config_32(mport, destid, hopcount, - IDT_CFGBLK_ERR_REPORT, ®val); - rio_mport_write_config_32(mport, destid, hopcount, - IDT_CFGBLK_ERR_REPORT, - regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); + rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, ®val); + rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT, + regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); /* set TVAL = ~50us */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); return 0; @@ -339,18 +318,15 @@ idtg2_em_init(struct rio_dev *rdev) static int idtg2_em_handler(struct rio_dev *rdev, u8 portnum) { - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; u32 regval, em_perrdet, em_ltlerrdet; - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); if (em_ltlerrdet) { /* Service Logical/Transport Layer Error(s) */ if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) { /* Implementation specific error reported */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, ®val); pr_debug("RIO: %s Implementation Specific LTL errors" \ @@ -358,13 +334,12 @@ idtg2_em_handler(struct rio_dev *rdev, u8 portnum) rio_name(rdev), em_ltlerrdet, regval); /* Clear implementation specific address capture CSR */ - rio_mport_write_config_32(mport, destid, hopcount, - IDT_ISLTL_ADDRESS_CAP, 0); + rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0); } } - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); if (em_perrdet) { /* Service Port-Level Error(s) */ @@ -372,14 +347,14 @@ idtg2_em_handler(struct rio_dev *rdev, u8 portnum) /* Implementation Specific port error reported */ /* Get IS errors reported */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, IDT_PORT_ISERR_DET(portnum), ®val); pr_debug("RIO: %s Implementation Specific Port" \ " errors 0x%x\n", rio_name(rdev), regval); /* Clear all implementation specific events */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, IDT_PORT_ISERR_DET(portnum), 0); } } @@ -391,14 +366,10 @@ static ssize_t idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; ssize_t len = 0; u32 regval; - while (!rio_mport_read_config_32(mport, destid, hopcount, - IDT_ERR_RD, ®val)) { + while (!rio_read_config_32(rdev, IDT_ERR_RD, ®val)) { if (!regval) /* 0 = end of log */ break; len += snprintf(buf + len, PAGE_SIZE - len, @@ -445,3 +416,5 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index fc9f6374f759..3a971077e7bf 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c @@ -117,10 +117,6 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) { - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; - pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); rdev->rswitch->add_entry = idtcps_route_add_entry; rdev->rswitch->get_entry = idtcps_route_get_entry; @@ -132,7 +128,7 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) if (do_enum) { /* set TVAL = ~50us */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); } diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c index b9a389b9f812..3994c00aa01f 100644 --- a/drivers/rapidio/switches/tsi568.c +++ b/drivers/rapidio/switches/tsi568.c @@ -113,22 +113,17 @@ tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, static int tsi568_em_init(struct rio_dev *rdev) { - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; u32 regval; int portnum; - pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount); + pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Make sure that Port-Writes are disabled (for all ports) */ for (portnum = 0; portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { - rio_mport_read_config_32(mport, destid, hopcount, - TSI568_SP_MODE(portnum), ®val); - rio_mport_write_config_32(mport, destid, hopcount, - TSI568_SP_MODE(portnum), - regval | TSI568_SP_MODE_PW_DIS); + rio_read_config_32(rdev, TSI568_SP_MODE(portnum), ®val); + rio_write_config_32(rdev, TSI568_SP_MODE(portnum), + regval | TSI568_SP_MODE_PW_DIS); } return 0; diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 2003fb63c404..1a62934bfebc 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c @@ -158,48 +158,45 @@ tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, static int tsi57x_em_init(struct rio_dev *rdev) { - struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; u32 regval; int portnum; - pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount); + pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); for (portnum = 0; portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { /* Make sure that Port-Writes are enabled (for all ports) */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, TSI578_SP_MODE(portnum), ®val); - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, TSI578_SP_MODE(portnum), regval & ~TSI578_SP_MODE_PW_DIS); /* Clear all pending interrupts */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), ®val); - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), regval & 0x07120214); - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), ®val); - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum), regval & 0x000700bd); /* Enable all interrupts to allow ports to send a port-write */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, TSI578_SP_CTL_INDEP(portnum), ®val); - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, TSI578_SP_CTL_INDEP(portnum), regval | 0x000b0000); /* Skip next (odd) port if the current port is in x4 mode */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), ®val); if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) @@ -207,7 +204,7 @@ tsi57x_em_init(struct rio_dev *rdev) } /* set TVAL = ~50us */ - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8); return 0; @@ -217,14 +214,12 @@ static int tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) { struct rio_mport *mport = rdev->net->hport; - u16 destid = rdev->rswitch->destid; - u8 hopcount = rdev->rswitch->hopcount; u32 intstat, err_status; int sendcount, checkcount; u8 route_port; u32 regval; - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), &err_status); @@ -232,15 +227,15 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | RIO_PORT_N_ERR_STS_PW_INP_ES))) { /* Remove any queued packets by locking/unlocking port */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), ®val); if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), regval | RIO_PORT_N_CTL_LOCKOUT); udelay(50); - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), regval); } @@ -248,7 +243,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) /* Read from link maintenance response register to clear * valid bit */ - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum), ®val); @@ -257,13 +252,12 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) */ sendcount = 3; while (sendcount) { - rio_mport_write_config_32(mport, destid, hopcount, + rio_write_config_32(rdev, TSI578_SP_CS_TX(portnum), 0x40fc8000); checkcount = 3; while (checkcount--) { udelay(50); - rio_mport_read_config_32( - mport, destid, hopcount, + rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum), ®val); @@ -277,25 +271,23 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) exit_es: /* Clear implementation specific error status bits */ - rio_mport_read_config_32(mport, destid, hopcount, - TSI578_SP_INT_STATUS(portnum), &intstat); + rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat); pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n", - destid, hopcount, portnum, intstat); + rdev->destid, rdev->hopcount, portnum, intstat); if (intstat & 0x10000) { - rio_mport_read_config_32(mport, destid, hopcount, + rio_read_config_32(rdev, TSI578_SP_LUT_PEINF(portnum), ®val); regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24); route_port = rdev->rswitch->route_table[regval]; pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n", rio_name(rdev), portnum, regval); - tsi57x_route_add_entry(mport, destid, hopcount, + tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount, RIO_GLOBAL_TABLE, regval, route_port); } - rio_mport_write_config_32(mport, destid, hopcount, - TSI578_SP_INT_STATUS(portnum), - intstat & 0x000700bd); + rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum), + intstat & 0x000700bd); return 0; } diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 2ce2eb71d0f5..dd6308499bd4 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c @@ -249,7 +249,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) } static int pm8607_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); uint8_t val, mask; @@ -263,6 +263,7 @@ static int pm8607_set_voltage(struct regulator_dev *rdev, ret = choose_voltage(rdev, min_uV, max_uV); if (ret < 0) return -EINVAL; + *selector = ret; val = (uint8_t)(ret << info->vol_shift); mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index dd30e883d4a7..e1d943619ab8 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -186,13 +186,25 @@ config REGULATOR_PCAP This driver provides support for the voltage regulators of the PCAP2 PMIC. +config REGULATOR_MC13XXX_CORE + tristate + config REGULATOR_MC13783 tristate "Support regulators on Freescale MC13783 PMIC" depends on MFD_MC13783 + select REGULATOR_MC13XXX_CORE help Say y here to support the regulators found on the Freescale MC13783 PMIC. +config REGULATOR_MC13892 + tristate "Support regulators on Freescale MC13892 PMIC" + depends on MFD_MC13XXX + select REGULATOR_MC13XXX_CORE + help + Say y here to support the regulators found on the Freescale MC13892 + PMIC. + config REGULATOR_AB3100 tristate "ST-Ericsson AB3100 Regulator functions" depends on AB3100_CORE @@ -250,5 +262,15 @@ config REGULATOR_TPS6586X help This driver supports TPS6586X voltage regulator chips. +config REGULATOR_TPS6524X + tristate "TI TPS6524X Power regulators" + depends on SPI + help + This driver supports TPS6524X voltage regulator chips. TPS6524X + provides three step-down converters and two general-purpose LDO + voltage regulators. This device is interfaced using a customized + serial interface currently supported on the sequencer serial + port controller. + endif diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index bff815736780..0b5e88c2b8d7 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -30,10 +30,13 @@ obj-$(CONFIG_REGULATOR_DA903X) += da903x.o obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o +obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o +obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o +obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index b349266a43de..ed6feaf9398d 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -362,7 +362,8 @@ static int ab3100_get_best_voltage_index(struct regulator_dev *reg, } static int ab3100_set_voltage_regulator(struct regulator_dev *reg, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct ab3100_regulator *abreg = reg->reg_data; u8 regval; @@ -373,6 +374,8 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg, if (bestindex < 0) return bestindex; + *selector = bestindex; + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index db6b70f20511..d9a052c53aec 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c @@ -3,18 +3,13 @@ * * License Terms: GNU General Public License v2 * - * Author: Sundar Iyer for ST-Ericsson + * Authors: Sundar Iyer for ST-Ericsson + * Bengt Jonsson for ST-Ericsson * * AB8500 peripheral regulators * - * AB8500 supports the following regulators, - * LDOs - VAUDIO, VANAMIC2/2, VDIGMIC, VINTCORE12, VTVOUT, - * VAUX1/2/3, VANA - * - * for DB8500 cut 1.0 and previous versions of the silicon, all accesses - * to registers are through the DB8500 SPI. In cut 1.1 onwards, these - * accesses are through the DB8500 PRCMU I2C - * + * AB8500 supports the following regulators: + * VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA */ #include #include @@ -28,38 +23,37 @@ /** * struct ab8500_regulator_info - ab8500 regulator information + * @dev: device pointer * @desc: regulator description - * @ab8500: ab8500 parent * @regulator_dev: regulator device * @max_uV: maximum voltage (for variable voltage supplies) * @min_uV: minimum voltage (for variable voltage supplies) * @fixed_uV: typical voltage (for fixed voltage supplies) * @update_bank: bank to control on/off * @update_reg: register to control on/off - * @mask: mask to enable/disable regulator - * @enable: bits to enable the regulator in normal(high power) mode + * @update_mask: mask to enable/disable regulator + * @update_val_enable: bits to enable the regulator in normal (high power) mode * @voltage_bank: bank to control regulator voltage * @voltage_reg: register to control regulator voltage * @voltage_mask: mask to control regulator voltage - * @supported_voltages: supported voltage table + * @voltages: supported voltage table * @voltages_len: number of supported voltages for the regulator */ struct ab8500_regulator_info { struct device *dev; struct regulator_desc desc; - struct ab8500 *ab8500; struct regulator_dev *regulator; int max_uV; int min_uV; int fixed_uV; u8 update_bank; u8 update_reg; - u8 mask; - u8 enable; + u8 update_mask; + u8 update_val_enable; u8 voltage_bank; u8 voltage_reg; u8 voltage_mask; - int const *supported_voltages; + int const *voltages; int voltages_len; }; @@ -83,6 +77,17 @@ static const int ldo_vauxn_voltages[] = { 3300000, }; +static const int ldo_vaux3_voltages[] = { + 1200000, + 1500000, + 1800000, + 2100000, + 2500000, + 2750000, + 2790000, + 2910000, +}; + static const int ldo_vintcore_voltages[] = { 1200000, 1225000, @@ -95,57 +100,80 @@ static const int ldo_vintcore_voltages[] = { static int ab8500_regulator_enable(struct regulator_dev *rdev) { - int regulator_id, ret; + int ret; struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); - regulator_id = rdev_get_id(rdev); - if (regulator_id >= AB8500_NUM_REGULATORS) + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; + } ret = abx500_mask_and_set_register_interruptible(info->dev, - info->update_bank, info->update_reg, info->mask, info->enable); + info->update_bank, info->update_reg, + info->update_mask, info->update_val_enable); if (ret < 0) dev_err(rdev_get_dev(rdev), "couldn't set enable bits for regulator\n"); + + dev_vdbg(rdev_get_dev(rdev), + "%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, info->update_val_enable); + return ret; } static int ab8500_regulator_disable(struct regulator_dev *rdev) { - int regulator_id, ret; + int ret; struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); - regulator_id = rdev_get_id(rdev); - if (regulator_id >= AB8500_NUM_REGULATORS) + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; + } ret = abx500_mask_and_set_register_interruptible(info->dev, - info->update_bank, info->update_reg, info->mask, 0x0); + info->update_bank, info->update_reg, + info->update_mask, 0x0); if (ret < 0) dev_err(rdev_get_dev(rdev), "couldn't set disable bits for regulator\n"); + + dev_vdbg(rdev_get_dev(rdev), + "%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, 0x0); + return ret; } static int ab8500_regulator_is_enabled(struct regulator_dev *rdev) { - int regulator_id, ret; + int ret; struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); - u8 value; + u8 regval; - regulator_id = rdev_get_id(rdev); - if (regulator_id >= AB8500_NUM_REGULATORS) + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; + } ret = abx500_get_register_interruptible(info->dev, - info->update_bank, info->update_reg, &value); + info->update_bank, info->update_reg, ®val); if (ret < 0) { dev_err(rdev_get_dev(rdev), "couldn't read 0x%x register\n", info->update_reg); return ret; } - if (value & info->mask) + dev_vdbg(rdev_get_dev(rdev), + "%s-is_enabled (bank, reg, mask, value): 0x%x, 0x%x, 0x%x," + " 0x%x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, regval); + + if (regval & info->update_mask) return true; else return false; @@ -153,12 +181,12 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev) static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector) { - int regulator_id; struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); - regulator_id = rdev_get_id(rdev); - if (regulator_id >= AB8500_NUM_REGULATORS) + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; + } /* return the uV for the fixed regulators */ if (info->fixed_uV) @@ -167,33 +195,40 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector) if (selector >= info->voltages_len) return -EINVAL; - return info->supported_voltages[selector]; + return info->voltages[selector]; } static int ab8500_regulator_get_voltage(struct regulator_dev *rdev) { - int regulator_id, ret; + int ret, val; struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); - u8 value; + u8 regval; - regulator_id = rdev_get_id(rdev); - if (regulator_id >= AB8500_NUM_REGULATORS) + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; + } - ret = abx500_get_register_interruptible(info->dev, info->voltage_bank, - info->voltage_reg, &value); + ret = abx500_get_register_interruptible(info->dev, + info->voltage_bank, info->voltage_reg, ®val); if (ret < 0) { dev_err(rdev_get_dev(rdev), "couldn't read voltage reg for regulator\n"); return ret; } + dev_vdbg(rdev_get_dev(rdev), + "%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x," + " 0x%x\n", + info->desc.name, info->voltage_bank, info->voltage_reg, + info->voltage_mask, regval); + /* vintcore has a different layout */ - value &= info->voltage_mask; - if (regulator_id == AB8500_LDO_INTCORE) - ret = info->supported_voltages[value >> 0x3]; + val = regval & info->voltage_mask; + if (info->desc.id == AB8500_LDO_INTCORE) + ret = info->voltages[val >> 0x3]; else - ret = info->supported_voltages[value]; + ret = info->voltages[val]; return ret; } @@ -206,8 +241,8 @@ static int ab8500_get_best_voltage_index(struct regulator_dev *rdev, /* check the supported voltage */ for (i = 0; i < info->voltages_len; i++) { - if ((info->supported_voltages[i] >= min_uV) && - (info->supported_voltages[i] <= max_uV)) + if ((info->voltages[i] >= min_uV) && + (info->voltages[i] <= max_uV)) return i; } @@ -215,14 +250,17 @@ static int ab8500_get_best_voltage_index(struct regulator_dev *rdev, } static int ab8500_regulator_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { - int regulator_id, ret; + int ret; struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + u8 regval; - regulator_id = rdev_get_id(rdev); - if (regulator_id >= AB8500_NUM_REGULATORS) + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; + } /* get the appropriate voltages within the range */ ret = ab8500_get_best_voltage_index(rdev, min_uV, max_uV); @@ -232,14 +270,23 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev, return ret; } + *selector = ret; + /* set the registers for the request */ + regval = (u8)ret; ret = abx500_mask_and_set_register_interruptible(info->dev, - info->voltage_bank, info->voltage_reg, - info->voltage_mask, (u8)ret); + info->voltage_bank, info->voltage_reg, + info->voltage_mask, regval); if (ret < 0) dev_err(rdev_get_dev(rdev), "couldn't set voltage reg for regulator\n"); + dev_vdbg(rdev_get_dev(rdev), + "%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x," + " 0x%x\n", + info->desc.name, info->voltage_bank, info->voltage_reg, + info->voltage_mask, regval); + return ret; } @@ -254,17 +301,17 @@ static struct regulator_ops ab8500_regulator_ops = { static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) { - int regulator_id; struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); - regulator_id = rdev_get_id(rdev); - if (regulator_id >= AB8500_NUM_REGULATORS) + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; + } return info->fixed_uV; } -static struct regulator_ops ab8500_ldo_fixed_ops = { +static struct regulator_ops ab8500_regulator_fixed_ops = { .enable = ab8500_regulator_enable, .disable = ab8500_regulator_disable, .is_enabled = ab8500_regulator_is_enabled, @@ -272,88 +319,197 @@ static struct regulator_ops ab8500_ldo_fixed_ops = { .list_voltage = ab8500_list_voltage, }; -#define AB8500_LDO(_id, min, max, bank, reg, reg_mask, \ - reg_enable, volt_bank, volt_reg, volt_mask, \ - voltages, len_volts) \ -{ \ - .desc = { \ - .name = "LDO-" #_id, \ - .ops = &ab8500_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = AB8500_LDO_##_id, \ - .owner = THIS_MODULE, \ - }, \ - .min_uV = (min) * 1000, \ - .max_uV = (max) * 1000, \ - .update_bank = bank, \ - .update_reg = reg, \ - .mask = reg_mask, \ - .enable = reg_enable, \ - .voltage_bank = volt_bank, \ - .voltage_reg = volt_reg, \ - .voltage_mask = volt_mask, \ - .supported_voltages = voltages, \ - .voltages_len = len_volts, \ - .fixed_uV = 0, \ -} - -#define AB8500_FIXED_LDO(_id, fixed, bank, reg, \ - reg_mask, reg_enable) \ -{ \ - .desc = { \ - .name = "LDO-" #_id, \ - .ops = &ab8500_ldo_fixed_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = AB8500_LDO_##_id, \ - .owner = THIS_MODULE, \ - }, \ - .fixed_uV = fixed * 1000, \ - .update_bank = bank, \ - .update_reg = reg, \ - .mask = reg_mask, \ - .enable = reg_enable, \ -} - -static struct ab8500_regulator_info ab8500_regulator_info[] = { +static struct ab8500_regulator_info + ab8500_regulator_info[AB8500_NUM_REGULATORS] = { /* - * Variable Voltage LDOs - * name, min uV, max uV, ctrl bank, ctrl reg, reg mask, enable mask, - * volt ctrl bank, volt ctrl reg, volt ctrl mask, volt table, - * num supported volts + * Variable Voltage Regulators + * name, min mV, max mV, + * update bank, reg, mask, enable val + * volt bank, reg, mask, table, table length */ - AB8500_LDO(AUX1, 1100, 3300, 0x04, 0x09, 0x3, 0x1, 0x04, 0x1f, 0xf, - ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)), - AB8500_LDO(AUX2, 1100, 3300, 0x04, 0x09, 0xc, 0x4, 0x04, 0x20, 0xf, - ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)), - AB8500_LDO(AUX3, 1100, 3300, 0x04, 0x0a, 0x3, 0x1, 0x04, 0x21, 0xf, - ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)), - AB8500_LDO(INTCORE, 1100, 3300, 0x03, 0x80, 0x4, 0x4, 0x03, 0x80, 0x38, - ldo_vintcore_voltages, ARRAY_SIZE(ldo_vintcore_voltages)), + [AB8500_LDO_AUX1] = { + .desc = { + .name = "LDO-AUX1", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUX1, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .update_bank = 0x04, + .update_reg = 0x09, + .update_mask = 0x03, + .update_val_enable = 0x01, + .voltage_bank = 0x04, + .voltage_reg = 0x1f, + .voltage_mask = 0x0f, + .voltages = ldo_vauxn_voltages, + .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages), + }, + [AB8500_LDO_AUX2] = { + .desc = { + .name = "LDO-AUX2", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUX2, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .update_bank = 0x04, + .update_reg = 0x09, + .update_mask = 0x0c, + .update_val_enable = 0x04, + .voltage_bank = 0x04, + .voltage_reg = 0x20, + .voltage_mask = 0x0f, + .voltages = ldo_vauxn_voltages, + .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages), + }, + [AB8500_LDO_AUX3] = { + .desc = { + .name = "LDO-AUX3", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUX3, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .update_bank = 0x04, + .update_reg = 0x0a, + .update_mask = 0x03, + .update_val_enable = 0x01, + .voltage_bank = 0x04, + .voltage_reg = 0x21, + .voltage_mask = 0x07, + .voltages = ldo_vaux3_voltages, + .voltages_len = ARRAY_SIZE(ldo_vaux3_voltages), + }, + [AB8500_LDO_INTCORE] = { + .desc = { + .name = "LDO-INTCORE", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_INTCORE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .update_bank = 0x03, + .update_reg = 0x80, + .update_mask = 0x44, + .update_val_enable = 0x04, + .voltage_bank = 0x03, + .voltage_reg = 0x80, + .voltage_mask = 0x38, + .voltages = ldo_vintcore_voltages, + .voltages_len = ARRAY_SIZE(ldo_vintcore_voltages), + }, /* - * Fixed Voltage LDOs - * name, o/p uV, ctrl bank, ctrl reg, enable, disable + * Fixed Voltage Regulators + * name, fixed mV, + * update bank, reg, mask, enable val */ - AB8500_FIXED_LDO(TVOUT, 2000, 0x03, 0x80, 0x2, 0x2), - AB8500_FIXED_LDO(AUDIO, 2000, 0x03, 0x83, 0x2, 0x2), - AB8500_FIXED_LDO(ANAMIC1, 2050, 0x03, 0x83, 0x4, 0x4), - AB8500_FIXED_LDO(ANAMIC2, 2050, 0x03, 0x83, 0x8, 0x8), - AB8500_FIXED_LDO(DMIC, 1800, 0x03, 0x83, 0x10, 0x10), - AB8500_FIXED_LDO(ANA, 1200, 0x03, 0x83, 0xc, 0x4), -}; + [AB8500_LDO_TVOUT] = { + .desc = { + .name = "LDO-TVOUT", + .ops = &ab8500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_TVOUT, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 2000000, + .update_bank = 0x03, + .update_reg = 0x80, + .update_mask = 0x82, + .update_val_enable = 0x02, + }, + [AB8500_LDO_AUDIO] = { + .desc = { + .name = "LDO-AUDIO", + .ops = &ab8500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUDIO, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 2000000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x02, + .update_val_enable = 0x02, + }, + [AB8500_LDO_ANAMIC1] = { + .desc = { + .name = "LDO-ANAMIC1", + .ops = &ab8500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_ANAMIC1, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 2050000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x08, + .update_val_enable = 0x08, + }, + [AB8500_LDO_ANAMIC2] = { + .desc = { + .name = "LDO-ANAMIC2", + .ops = &ab8500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_ANAMIC2, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 2050000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x10, + .update_val_enable = 0x10, + }, + [AB8500_LDO_DMIC] = { + .desc = { + .name = "LDO-DMIC", + .ops = &ab8500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_DMIC, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1800000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x04, + .update_val_enable = 0x04, + }, + [AB8500_LDO_ANA] = { + .desc = { + .name = "LDO-ANA", + .ops = &ab8500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_ANA, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1200000, + .update_bank = 0x04, + .update_reg = 0x06, + .update_mask = 0x0c, + .update_val_enable = 0x04, + }, -static inline struct ab8500_regulator_info *find_regulator_info(int id) -{ - struct ab8500_regulator_info *info; - int i; - for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { - info = &ab8500_regulator_info[i]; - if (info->desc.id == id) - return info; - } - return NULL; -} +}; static __devinit int ab8500_regulator_probe(struct platform_device *pdev) { @@ -366,6 +522,16 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) return -EINVAL; } pdata = dev_get_platdata(ab8500->dev); + if (!pdata) { + dev_err(&pdev->dev, "null pdata\n"); + return -EINVAL; + } + + /* make sure the platform data has the correct size */ + if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) { + dev_err(&pdev->dev, "platform configuration error\n"); + return -EINVAL; + } /* register all regulators */ for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { @@ -374,10 +540,22 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) /* assign per-regulator data */ info = &ab8500_regulator_info[i]; info->dev = &pdev->dev; - info->ab8500 = ab8500; + /* fix for hardware before ab8500v2.0 */ + if (abx500_get_chip_id(info->dev) < 0x20) { + if (info->desc.id == AB8500_LDO_AUX3) { + info->desc.n_voltages = + ARRAY_SIZE(ldo_vauxn_voltages); + info->voltages = ldo_vauxn_voltages; + info->voltages_len = + ARRAY_SIZE(ldo_vauxn_voltages); + info->voltage_mask = 0xf; + } + } + + /* register regulator with framework */ info->regulator = regulator_register(&info->desc, &pdev->dev, - pdata->regulator[i], info); + &pdata->regulator[i], info); if (IS_ERR(info->regulator)) { err = PTR_ERR(info->regulator); dev_err(&pdev->dev, "failed to register regulator %s\n", @@ -389,6 +567,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) } return err; } + + dev_vdbg(rdev_get_dev(info->regulator), + "%s-probed\n", info->desc.name); } return 0; @@ -401,6 +582,10 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { struct ab8500_regulator_info *info = NULL; info = &ab8500_regulator_info[i]; + + dev_vdbg(rdev_get_dev(info->regulator), + "%s-remove\n", info->desc.name); + regulator_unregister(info->regulator); } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index ba521f0f0fac..9fa20957847d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -13,8 +13,11 @@ * */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + #include #include +#include #include #include #include @@ -25,16 +28,30 @@ #include #include +#define CREATE_TRACE_POINTS +#include + #include "dummy.h" -#define REGULATOR_VERSION "0.5" +#define rdev_err(rdev, fmt, ...) \ + pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) +#define rdev_warn(rdev, fmt, ...) \ + pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) +#define rdev_info(rdev, fmt, ...) \ + pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) +#define rdev_dbg(rdev, fmt, ...) \ + pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) static DEFINE_MUTEX(regulator_list_mutex); static LIST_HEAD(regulator_list); static LIST_HEAD(regulator_map_list); -static int has_full_constraints; +static bool has_full_constraints; static bool board_wants_dummy_regulator; +#ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_root; +#endif + /* * struct regulator_map * @@ -71,6 +88,8 @@ static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); +static int _regulator_do_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV); static const char *rdev_get_name(struct regulator_dev *rdev) { @@ -111,13 +130,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev, BUG_ON(*min_uV > *max_uV); if (!rdev->constraints) { - printk(KERN_ERR "%s: no constraints for %s\n", __func__, - rdev_get_name(rdev)); + rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { - printk(KERN_ERR "%s: operation not allowed for %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "operation not allowed\n"); return -EPERM; } @@ -132,6 +149,27 @@ static int regulator_check_voltage(struct regulator_dev *rdev, return 0; } +/* Make sure we select a voltage that suits the needs of all + * regulator consumers + */ +static int regulator_check_consumers(struct regulator_dev *rdev, + int *min_uV, int *max_uV) +{ + struct regulator *regulator; + + list_for_each_entry(regulator, &rdev->consumer_list, list) { + if (*max_uV > regulator->max_uV) + *max_uV = regulator->max_uV; + if (*min_uV < regulator->min_uV) + *min_uV = regulator->min_uV; + } + + if (*min_uV > *max_uV) + return -EINVAL; + + return 0; +} + /* current constraint check */ static int regulator_check_current_limit(struct regulator_dev *rdev, int *min_uA, int *max_uA) @@ -139,13 +177,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev, BUG_ON(*min_uA > *max_uA); if (!rdev->constraints) { - printk(KERN_ERR "%s: no constraints for %s\n", __func__, - rdev_get_name(rdev)); + rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) { - printk(KERN_ERR "%s: operation not allowed for %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "operation not allowed\n"); return -EPERM; } @@ -174,18 +210,15 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode) } if (!rdev->constraints) { - printk(KERN_ERR "%s: no constraints for %s\n", __func__, - rdev_get_name(rdev)); + rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) { - printk(KERN_ERR "%s: operation not allowed for %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "operation not allowed\n"); return -EPERM; } if (!(rdev->constraints->valid_modes_mask & mode)) { - printk(KERN_ERR "%s: invalid mode %x for %s\n", - __func__, mode, rdev_get_name(rdev)); + rdev_err(rdev, "invalid mode %x\n", mode); return -EINVAL; } return 0; @@ -195,13 +228,11 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode) static int regulator_check_drms(struct regulator_dev *rdev) { if (!rdev->constraints) { - printk(KERN_ERR "%s: no constraints for %s\n", __func__, - rdev_get_name(rdev)); + rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) { - printk(KERN_ERR "%s: operation not allowed for %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "operation not allowed\n"); return -EPERM; } return 0; @@ -553,18 +584,21 @@ static void drms_uA_update(struct regulator_dev *rdev) err = regulator_check_drms(rdev); if (err < 0 || !rdev->desc->ops->get_optimum_mode || - !rdev->desc->ops->get_voltage || !rdev->desc->ops->set_mode) + (!rdev->desc->ops->get_voltage && + !rdev->desc->ops->get_voltage_sel) || + !rdev->desc->ops->set_mode) return; /* get output voltage */ - output_uV = rdev->desc->ops->get_voltage(rdev); + output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) return; /* get input voltage */ - if (rdev->supply && rdev->supply->desc->ops->get_voltage) - input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply); - else + input_uV = 0; + if (rdev->supply) + input_uV = _regulator_get_voltage(rdev); + if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) return; @@ -598,20 +632,17 @@ static int suspend_set_state(struct regulator_dev *rdev, */ if (!rstate->enabled && !rstate->disabled) { if (can_set_state) - printk(KERN_WARNING "%s: No configuration for %s\n", - __func__, rdev_get_name(rdev)); + rdev_warn(rdev, "No configuration\n"); return 0; } if (rstate->enabled && rstate->disabled) { - printk(KERN_ERR "%s: invalid configuration for %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "invalid configuration\n"); return -EINVAL; } if (!can_set_state) { - printk(KERN_ERR "%s: no way to set suspend state\n", - __func__); + rdev_err(rdev, "no way to set suspend state\n"); return -EINVAL; } @@ -620,15 +651,14 @@ static int suspend_set_state(struct regulator_dev *rdev, else ret = rdev->desc->ops->set_suspend_disable(rdev); if (ret < 0) { - printk(KERN_ERR "%s: failed to enabled/disable\n", __func__); + rdev_err(rdev, "failed to enabled/disable\n"); return ret; } if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) { ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV); if (ret < 0) { - printk(KERN_ERR "%s: failed to set voltage\n", - __func__); + rdev_err(rdev, "failed to set voltage\n"); return ret; } } @@ -636,7 +666,7 @@ static int suspend_set_state(struct regulator_dev *rdev, if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) { ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode); if (ret < 0) { - printk(KERN_ERR "%s: failed to set mode\n", __func__); + rdev_err(rdev, "failed to set mode\n"); return ret; } } @@ -714,29 +744,27 @@ static void print_constraints(struct regulator_dev *rdev) if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) count += sprintf(buf + count, "standby"); - printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf); + rdev_info(rdev, "%s\n", buf); } static int machine_constraints_voltage(struct regulator_dev *rdev, struct regulation_constraints *constraints) { struct regulator_ops *ops = rdev->desc->ops; - const char *name = rdev_get_name(rdev); int ret; /* do we need to apply the constraint voltage */ if (rdev->constraints->apply_uV && - rdev->constraints->min_uV == rdev->constraints->max_uV && - ops->set_voltage) { - ret = ops->set_voltage(rdev, - rdev->constraints->min_uV, rdev->constraints->max_uV); - if (ret < 0) { - printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n", - __func__, - rdev->constraints->min_uV, name); - rdev->constraints = NULL; - return ret; - } + rdev->constraints->min_uV == rdev->constraints->max_uV) { + ret = _regulator_do_set_voltage(rdev, + rdev->constraints->min_uV, + rdev->constraints->max_uV); + if (ret < 0) { + rdev_err(rdev, "failed to apply %duV constraint\n", + rdev->constraints->min_uV); + rdev->constraints = NULL; + return ret; + } } /* constrain machine-level voltage specs to fit @@ -765,8 +793,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, /* else require explicit machine-level constraints */ if (cmin <= 0 || cmax <= 0 || cmax < cmin) { - pr_err("%s: %s '%s' voltage constraints\n", - __func__, "invalid", name); + rdev_err(rdev, "invalid voltage constraints\n"); return -EINVAL; } @@ -787,22 +814,19 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, /* final: [min_uV..max_uV] valid iff constraints valid */ if (max_uV < min_uV) { - pr_err("%s: %s '%s' voltage constraints\n", - __func__, "unsupportable", name); + rdev_err(rdev, "unsupportable voltage constraints\n"); return -EINVAL; } /* use regulator's subset of machine constraints */ if (constraints->min_uV < min_uV) { - pr_debug("%s: override '%s' %s, %d -> %d\n", - __func__, name, "min_uV", - constraints->min_uV, min_uV); + rdev_dbg(rdev, "override min_uV, %d -> %d\n", + constraints->min_uV, min_uV); constraints->min_uV = min_uV; } if (constraints->max_uV > max_uV) { - pr_debug("%s: override '%s' %s, %d -> %d\n", - __func__, name, "max_uV", - constraints->max_uV, max_uV); + rdev_dbg(rdev, "override max_uV, %d -> %d\n", + constraints->max_uV, max_uV); constraints->max_uV = max_uV; } } @@ -822,26 +846,25 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, * set_mode. */ static int set_machine_constraints(struct regulator_dev *rdev, - struct regulation_constraints *constraints) + const struct regulation_constraints *constraints) { int ret = 0; - const char *name; struct regulator_ops *ops = rdev->desc->ops; - rdev->constraints = constraints; - - name = rdev_get_name(rdev); + rdev->constraints = kmemdup(constraints, sizeof(*constraints), + GFP_KERNEL); + if (!rdev->constraints) + return -ENOMEM; - ret = machine_constraints_voltage(rdev, constraints); + ret = machine_constraints_voltage(rdev, rdev->constraints); if (ret != 0) goto out; /* do we need to setup our suspend state */ if (constraints->initial_state) { - ret = suspend_prepare(rdev, constraints->initial_state); + ret = suspend_prepare(rdev, rdev->constraints->initial_state); if (ret < 0) { - printk(KERN_ERR "%s: failed to set suspend state for %s\n", - __func__, name); + rdev_err(rdev, "failed to set suspend state\n"); rdev->constraints = NULL; goto out; } @@ -849,17 +872,14 @@ static int set_machine_constraints(struct regulator_dev *rdev, if (constraints->initial_mode) { if (!ops->set_mode) { - printk(KERN_ERR "%s: no set_mode operation for %s\n", - __func__, name); + rdev_err(rdev, "no set_mode operation\n"); ret = -EINVAL; goto out; } - ret = ops->set_mode(rdev, constraints->initial_mode); + ret = ops->set_mode(rdev, rdev->constraints->initial_mode); if (ret < 0) { - printk(KERN_ERR - "%s: failed to set initial mode for %s: %d\n", - __func__, name, ret); + rdev_err(rdev, "failed to set initial mode: %d\n", ret); goto out; } } @@ -867,11 +887,11 @@ static int set_machine_constraints(struct regulator_dev *rdev, /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ - if ((constraints->always_on || constraints->boot_on) && ops->enable) { + if ((rdev->constraints->always_on || rdev->constraints->boot_on) && + ops->enable) { ret = ops->enable(rdev); if (ret < 0) { - printk(KERN_ERR "%s: failed to enable %s\n", - __func__, name); + rdev_err(rdev, "failed to enable\n"); rdev->constraints = NULL; goto out; } @@ -899,9 +919,8 @@ static int set_supply(struct regulator_dev *rdev, err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj, "supply"); if (err) { - printk(KERN_ERR - "%s: could not add device link %s err %d\n", - __func__, supply_rdev->dev.kobj.name, err); + rdev_err(rdev, "could not add device link %s err %d\n", + supply_rdev->dev.kobj.name, err); goto out; } rdev->supply = supply_rdev; @@ -957,10 +976,10 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, continue; dev_dbg(consumer_dev, "%s/%s is '%s' supply; fail %s/%s\n", - dev_name(&node->regulator->dev), - node->regulator->desc->name, - supply, - dev_name(&rdev->dev), rdev_get_name(rdev)); + dev_name(&node->regulator->dev), + node->regulator->desc->name, + supply, + dev_name(&rdev->dev), rdev_get_name(rdev)); return -EBUSY; } @@ -1031,8 +1050,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, regulator->dev_attr.show = device_requested_uA_show; err = device_create_file(dev, ®ulator->dev_attr); if (err < 0) { - printk(KERN_WARNING "%s: could not add regulator_dev" - " load sysfs\n", __func__); + rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n"); goto attr_name_err; } @@ -1049,9 +1067,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj, buf); if (err) { - printk(KERN_WARNING - "%s: could not add device link %s err %d\n", - __func__, dev->kobj.name, err); + rdev_warn(rdev, "could not add device link %s err %d\n", + dev->kobj.name, err); goto link_name_err; } } @@ -1088,7 +1105,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, int ret; if (id == NULL) { - printk(KERN_ERR "regulator: get() with no identifier\n"); + pr_err("get() with no identifier\n"); return regulator; } @@ -1122,8 +1139,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, * substitute in a dummy regulator so consumers can continue. */ if (!has_full_constraints) { - pr_warning("%s supply %s not found, using dummy regulator\n", - devname, id); + pr_warn("%s supply %s not found, using dummy regulator\n", + devname, id); rdev = dummy_regulator_rdev; goto found; } @@ -1274,8 +1291,7 @@ static int _regulator_enable(struct regulator_dev *rdev) ret = _regulator_enable(rdev->supply); mutex_unlock(&rdev->supply->mutex); if (ret < 0) { - printk(KERN_ERR "%s: failed to enable %s: %d\n", - __func__, rdev_get_name(rdev), ret); + rdev_err(rdev, "failed to enable: %d\n", ret); return ret; } } @@ -1302,13 +1318,13 @@ static int _regulator_enable(struct regulator_dev *rdev) if (ret >= 0) { delay = ret; } else { - printk(KERN_WARNING - "%s: enable_time() failed for %s: %d\n", - __func__, rdev_get_name(rdev), - ret); + rdev_warn(rdev, "enable_time() failed: %d\n", + ret); delay = 0; } + trace_regulator_enable(rdev_get_name(rdev)); + /* Allow the regulator to ramp; it would be useful * to extend this for bulk operations so that the * regulators can ramp together. */ @@ -1316,6 +1332,8 @@ static int _regulator_enable(struct regulator_dev *rdev) if (ret < 0) return ret; + trace_regulator_enable_delay(rdev_get_name(rdev)); + if (delay >= 1000) { mdelay(delay / 1000); udelay(delay % 1000); @@ -1323,9 +1341,10 @@ static int _regulator_enable(struct regulator_dev *rdev) udelay(delay); } + trace_regulator_enable_complete(rdev_get_name(rdev)); + } else if (ret < 0) { - printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", - __func__, rdev_get_name(rdev), ret); + rdev_err(rdev, "is_enabled() failed: %d\n", ret); return ret; } /* Fallthrough on positive return values - already enabled */ @@ -1367,8 +1386,7 @@ static int _regulator_disable(struct regulator_dev *rdev, *supply_rdev_ptr = NULL; if (WARN(rdev->use_count <= 0, - "unbalanced disables for %s\n", - rdev_get_name(rdev))) + "unbalanced disables for %s\n", rdev_get_name(rdev))) return -EIO; /* are we the last user and permitted to disable ? */ @@ -1378,13 +1396,16 @@ static int _regulator_disable(struct regulator_dev *rdev, /* we are last user */ if (_regulator_can_change_status(rdev) && rdev->desc->ops->disable) { + trace_regulator_disable(rdev_get_name(rdev)); + ret = rdev->desc->ops->disable(rdev); if (ret < 0) { - printk(KERN_ERR "%s: failed to disable %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "failed to disable\n"); return ret; } + trace_regulator_disable_complete(rdev_get_name(rdev)); + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, NULL); } @@ -1451,8 +1472,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev, /* ah well, who wants to live forever... */ ret = rdev->desc->ops->disable(rdev); if (ret < 0) { - printk(KERN_ERR "%s: failed to force disable %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "failed to force disable\n"); return ret; } /* notify other consumers that power has been forced off */ @@ -1605,6 +1625,62 @@ int regulator_is_supported_voltage(struct regulator *regulator, return 0; } +static int _regulator_do_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + int ret; + unsigned int selector; + + trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); + + if (rdev->desc->ops->set_voltage) { + ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, + &selector); + + if (rdev->desc->ops->list_voltage) + selector = rdev->desc->ops->list_voltage(rdev, + selector); + else + selector = -1; + } else if (rdev->desc->ops->set_voltage_sel) { + int best_val = INT_MAX; + int i; + + selector = 0; + + /* Find the smallest voltage that falls within the specified + * range. + */ + for (i = 0; i < rdev->desc->n_voltages; i++) { + ret = rdev->desc->ops->list_voltage(rdev, i); + if (ret < 0) + continue; + + if (ret < best_val && ret >= min_uV && ret <= max_uV) { + best_val = ret; + selector = i; + } + } + + if (best_val != INT_MAX) { + ret = rdev->desc->ops->set_voltage_sel(rdev, selector); + selector = best_val; + } else { + ret = -EINVAL; + } + } else { + ret = -EINVAL; + } + + if (ret == 0) + _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, + NULL); + + trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector); + + return ret; +} + /** * regulator_set_voltage - set regulator output voltage * @regulator: regulator source @@ -1626,12 +1702,20 @@ int regulator_is_supported_voltage(struct regulator *regulator, int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct regulator_dev *rdev = regulator->rdev; - int ret; + int ret = 0; mutex_lock(&rdev->mutex); + /* If we're setting the same range as last time the change + * should be a noop (some cpufreq implementations use the same + * voltage for multiple frequencies, for example). + */ + if (regulator->min_uV == min_uV && regulator->max_uV == max_uV) + goto out; + /* sanity check */ - if (!rdev->desc->ops->set_voltage) { + if (!rdev->desc->ops->set_voltage && + !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } @@ -1642,18 +1726,76 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) goto out; regulator->min_uV = min_uV; regulator->max_uV = max_uV; - ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV); + + ret = regulator_check_consumers(rdev, &min_uV, &max_uV); + if (ret < 0) + goto out; + + ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); out: - _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL); mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage); +/** + * regulator_sync_voltage - re-apply last regulator output voltage + * @regulator: regulator source + * + * Re-apply the last configured voltage. This is intended to be used + * where some external control source the consumer is cooperating with + * has caused the configured voltage to change. + */ +int regulator_sync_voltage(struct regulator *regulator) +{ + struct regulator_dev *rdev = regulator->rdev; + int ret, min_uV, max_uV; + + mutex_lock(&rdev->mutex); + + if (!rdev->desc->ops->set_voltage && + !rdev->desc->ops->set_voltage_sel) { + ret = -EINVAL; + goto out; + } + + /* This is only going to work if we've had a voltage configured. */ + if (!regulator->min_uV && !regulator->max_uV) { + ret = -EINVAL; + goto out; + } + + min_uV = regulator->min_uV; + max_uV = regulator->max_uV; + + /* This should be a paranoia check... */ + ret = regulator_check_voltage(rdev, &min_uV, &max_uV); + if (ret < 0) + goto out; + + ret = regulator_check_consumers(rdev, &min_uV, &max_uV); + if (ret < 0) + goto out; + + ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); + +out: + mutex_unlock(&rdev->mutex); + return ret; +} +EXPORT_SYMBOL_GPL(regulator_sync_voltage); + static int _regulator_get_voltage(struct regulator_dev *rdev) { - /* sanity check */ + int sel; + + if (rdev->desc->ops->get_voltage_sel) { + sel = rdev->desc->ops->get_voltage_sel(rdev); + if (sel < 0) + return sel; + return rdev->desc->ops->list_voltage(rdev, sel); + } if (rdev->desc->ops->get_voltage) return rdev->desc->ops->get_voltage(rdev); else @@ -1880,21 +2022,20 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) goto out; /* get output voltage */ - output_uV = rdev->desc->ops->get_voltage(rdev); + output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) { - printk(KERN_ERR "%s: invalid output voltage found for %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "invalid output voltage found\n"); goto out; } /* get input voltage */ - if (rdev->supply && rdev->supply->desc->ops->get_voltage) - input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply); - else + input_uV = 0; + if (rdev->supply) + input_uV = _regulator_get_voltage(rdev->supply); + if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { - printk(KERN_ERR "%s: invalid input voltage found for %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "invalid input voltage found\n"); goto out; } @@ -1907,16 +2048,14 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) total_uA_load); ret = regulator_check_mode(rdev, mode); if (ret < 0) { - printk(KERN_ERR "%s: failed to get optimum mode for %s @" - " %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev), - total_uA_load, input_uV, output_uV); + rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", + total_uA_load, input_uV, output_uV); goto out; } ret = rdev->desc->ops->set_mode(rdev, mode); if (ret < 0) { - printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n", - __func__, mode, rdev_get_name(rdev)); + rdev_err(rdev, "failed to set optimum mode %x\n", mode); goto out; } ret = mode; @@ -2047,7 +2186,7 @@ int regulator_bulk_enable(int num_consumers, return 0; err: - printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret); + pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); for (--i; i >= 0; --i) regulator_disable(consumers[i].consumer); @@ -2082,8 +2221,7 @@ int regulator_bulk_disable(int num_consumers, return 0; err: - printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply, - ret); + pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret); for (--i; i >= 0; --i) regulator_enable(consumers[i].consumer); @@ -2166,7 +2304,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev) int status = 0; /* some attributes need specific methods to be displayed */ - if (ops->get_voltage) { + if (ops->get_voltage || ops->get_voltage_sel) { status = device_create_file(dev, &dev_attr_microvolts); if (status < 0) return status; @@ -2207,7 +2345,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev) return status; /* constraints need specific supporting methods */ - if (ops->set_voltage) { + if (ops->set_voltage || ops->set_voltage_sel) { status = device_create_file(dev, &dev_attr_min_microvolts); if (status < 0) return status; @@ -2271,6 +2409,23 @@ static int add_regulator_attributes(struct regulator_dev *rdev) return status; } +static void rdev_init_debugfs(struct regulator_dev *rdev) +{ +#ifdef CONFIG_DEBUG_FS + rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); + if (IS_ERR(rdev->debugfs) || !rdev->debugfs) { + rdev_warn(rdev, "Failed to create debugfs directory\n"); + rdev->debugfs = NULL; + return; + } + + debugfs_create_u32("use_count", 0444, rdev->debugfs, + &rdev->use_count); + debugfs_create_u32("open_count", 0444, rdev->debugfs, + &rdev->open_count); +#endif +} + /** * regulator_register - register regulator * @regulator_desc: regulator to register @@ -2282,7 +2437,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev) * Returns 0 on success. */ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, - struct device *dev, struct regulator_init_data *init_data, + struct device *dev, const struct regulator_init_data *init_data, void *driver_data) { static atomic_t regulator_no = ATOMIC_INIT(0); @@ -2302,6 +2457,22 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, if (!init_data) return ERR_PTR(-EINVAL); + /* Only one of each should be implemented */ + WARN_ON(regulator_desc->ops->get_voltage && + regulator_desc->ops->get_voltage_sel); + WARN_ON(regulator_desc->ops->set_voltage && + regulator_desc->ops->set_voltage_sel); + + /* If we're using selectors we must implement list_voltage. */ + if (regulator_desc->ops->get_voltage_sel && + !regulator_desc->ops->list_voltage) { + return ERR_PTR(-EINVAL); + } + if (regulator_desc->ops->set_voltage_sel && + !regulator_desc->ops->list_voltage) { + return ERR_PTR(-EINVAL); + } + rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL); if (rdev == NULL) return ERR_PTR(-ENOMEM); @@ -2399,6 +2570,8 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, } list_add(&rdev->list, ®ulator_list); + + rdev_init_debugfs(rdev); out: mutex_unlock(®ulator_list_mutex); return rdev; @@ -2431,12 +2604,16 @@ void regulator_unregister(struct regulator_dev *rdev) return; mutex_lock(®ulator_list_mutex); +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(rdev->debugfs); +#endif WARN_ON(rdev->open_count); unset_regulator_supplies(rdev); list_del(&rdev->list); if (rdev->supply) sysfs_remove_link(&rdev->dev.kobj, "supply"); device_unregister(&rdev->dev); + kfree(rdev->constraints); mutex_unlock(®ulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_unregister); @@ -2465,8 +2642,7 @@ int regulator_suspend_prepare(suspend_state_t state) mutex_unlock(&rdev->mutex); if (ret < 0) { - printk(KERN_ERR "%s: failed to prepare %s\n", - __func__, rdev_get_name(rdev)); + rdev_err(rdev, "failed to prepare\n"); goto out; } } @@ -2572,10 +2748,16 @@ static int __init regulator_init(void) { int ret; - printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION); - ret = class_register(®ulator_class); +#ifdef CONFIG_DEBUG_FS + debugfs_root = debugfs_create_dir("regulator", NULL); + if (IS_ERR(debugfs_root) || !debugfs_root) { + pr_warn("regulator: Failed to create debugfs directory\n"); + debugfs_root = NULL; + } +#endif + regulator_dummy_init(); return ret; @@ -2590,7 +2772,6 @@ static int __init regulator_init_complete(void) struct regulator_ops *ops; struct regulation_constraints *c; int enabled, ret; - const char *name; mutex_lock(®ulator_list_mutex); @@ -2602,8 +2783,6 @@ static int __init regulator_init_complete(void) ops = rdev->desc->ops; c = rdev->constraints; - name = rdev_get_name(rdev); - if (!ops->disable || (c && c->always_on)) continue; @@ -2624,13 +2803,10 @@ static int __init regulator_init_complete(void) if (has_full_constraints) { /* We log since this may kill the system if it * goes wrong. */ - printk(KERN_INFO "%s: disabling %s\n", - __func__, name); + rdev_info(rdev, "disabling\n"); ret = ops->disable(rdev); if (ret != 0) { - printk(KERN_ERR - "%s: couldn't disable %s: %d\n", - __func__, name, ret); + rdev_err(rdev, "couldn't disable: %d\n", ret); } } else { /* The intention is that in future we will @@ -2638,9 +2814,7 @@ static int __init regulator_init_complete(void) * so warn even if we aren't going to do * anything here. */ - printk(KERN_WARNING - "%s: incomplete constraints, leaving %s on\n", - __func__, name); + rdev_warn(rdev, "incomplete constraints, leaving on\n"); } unlock: diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c index f8c4661a7a81..362e08221085 100644 --- a/drivers/regulator/da903x.c +++ b/drivers/regulator/da903x.c @@ -107,7 +107,7 @@ static inline int check_range(struct da903x_regulator_info *info, /* DA9030/DA9034 common operations */ static int da903x_set_ldo_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct da903x_regulator_info *info = rdev_get_drvdata(rdev); struct device *da9034_dev = to_da903x_dev(rdev); @@ -119,6 +119,7 @@ static int da903x_set_ldo_voltage(struct regulator_dev *rdev, } val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV; + *selector = val; val <<= info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; @@ -187,7 +188,8 @@ static int da903x_list_voltage(struct regulator_dev *rdev, unsigned selector) /* DA9030 specific operations */ static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct da903x_regulator_info *info = rdev_get_drvdata(rdev); struct device *da903x_dev = to_da903x_dev(rdev); @@ -200,6 +202,7 @@ static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev, } val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV; + *selector = val; val <<= info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; val |= DA9030_LDO_UNLOCK; /* have to set UNLOCK bits */ @@ -214,7 +217,8 @@ static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev, } static int da9030_set_ldo14_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct da903x_regulator_info *info = rdev_get_drvdata(rdev); struct device *da903x_dev = to_da903x_dev(rdev); @@ -234,6 +238,7 @@ static int da9030_set_ldo14_voltage(struct regulator_dev *rdev, val = (min_uV - thresh + info->step_uV - 1) / info->step_uV; } + *selector = val; val <<= info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; @@ -263,7 +268,7 @@ static int da9030_get_ldo14_voltage(struct regulator_dev *rdev) /* DA9034 specific operations */ static int da9034_set_dvc_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct da903x_regulator_info *info = rdev_get_drvdata(rdev); struct device *da9034_dev = to_da903x_dev(rdev); @@ -276,6 +281,7 @@ static int da9034_set_dvc_voltage(struct regulator_dev *rdev, } val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV; + *selector = val; val <<= info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; @@ -289,7 +295,7 @@ static int da9034_set_dvc_voltage(struct regulator_dev *rdev, } static int da9034_set_ldo12_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct da903x_regulator_info *info = rdev_get_drvdata(rdev); struct device *da9034_dev = to_da903x_dev(rdev); @@ -302,6 +308,7 @@ static int da9034_set_ldo12_voltage(struct regulator_dev *rdev, val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV; val = (val >= 20) ? val - 12 : ((val > 7) ? 8 : val); + *selector = val; val <<= info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c index b8cc6389a541..e4b3592e8176 100644 --- a/drivers/regulator/isl6271a-regulator.c +++ b/drivers/regulator/isl6271a-regulator.c @@ -58,7 +58,9 @@ out: return data; } -static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV) +static int isl6271a_set_voltage(struct regulator_dev *dev, + int minuV, int maxuV, + unsigned *selector) { struct isl_pmic *pmic = rdev_get_drvdata(dev); int vsel, err, data; @@ -78,6 +80,8 @@ static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV) /* Convert the microvolts to data for the chip */ data = (vsel - ISL6271A_VOLTAGE_MIN) / ISL6271A_VOLTAGE_STEP; + *selector = data; + mutex_lock(&pmic->mtx); err = i2c_smbus_write_byte(pmic->client, data); @@ -169,7 +173,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c, init_data, pmic); if (IS_ERR(pmic->rdev[i])) { dev_err(&i2c->dev, "failed to register %s\n", id->name); - err = PTR_ERR(pmic->rdev); + err = PTR_ERR(pmic->rdev[i]); goto error; } } diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 3bb82b624e19..0f22ef12601c 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -168,7 +168,8 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev) } static int lp3971_ldo_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned int *selector) { struct lp3971 *lp3971 = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev) - LP3971_LDO1; @@ -187,6 +188,8 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev, if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol) return -EINVAL; + *selector = val; + return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo), val << LDO_VOL_CONTR_SHIFT(ldo)); @@ -256,7 +259,8 @@ static int lp3971_dcdc_get_voltage(struct regulator_dev *dev) } static int lp3971_dcdc_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned int *selector) { struct lp3971 *lp3971 = rdev_get_drvdata(dev); int buck = rdev_get_id(dev) - LP3971_DCDC1; @@ -277,6 +281,8 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev, if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol) return -EINVAL; + *selector = val; + ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), BUCK_TARGET_VOL_MASK, val); if (ret) diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c index e07062fd0b42..6aa1b506fb5d 100644 --- a/drivers/regulator/lp3972.c +++ b/drivers/regulator/lp3972.c @@ -292,7 +292,8 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev) } static int lp3972_ldo_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned int *selector) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev) - LP3972_LDO1; @@ -313,6 +314,8 @@ static int lp3972_ldo_set_voltage(struct regulator_dev *dev, if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol) return -EINVAL; + *selector = val; + shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo); ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo), LP3972_LDO_VOL_MASK(ldo) << shift, val << shift); @@ -416,7 +419,8 @@ static int lp3972_dcdc_get_voltage(struct regulator_dev *dev) } static int lp3972_dcdc_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned int *selector) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int buck = rdev_get_id(dev) - LP3972_DCDC1; @@ -438,6 +442,8 @@ static int lp3972_dcdc_set_voltage(struct regulator_dev *dev, vol_map[val] > max_vol) return -EINVAL; + *selector = val; + ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck), LP3972_BUCK_VOL_MASK, val); if (ret) diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index 559cfa271a44..3f49512c5134 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c @@ -63,12 +63,12 @@ static int max1586_v3_calc_voltage(struct max1586_data *max1586, return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL); } -static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV) +static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned *selector) { struct max1586_data *max1586 = rdev_get_drvdata(rdev); struct i2c_client *client = max1586->client; unsigned range_uV = max1586->max_uV - max1586->min_uV; - unsigned selector; u8 v3_prog; if (min_uV > max1586->max_uV || max_uV < max1586->min_uV) @@ -76,15 +76,15 @@ static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV) if (min_uV < max1586->min_uV) min_uV = max1586->min_uV; - selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL + + *selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL + range_uV - 1) / range_uV; - if (max1586_v3_calc_voltage(max1586, selector) > max_uV) + if (max1586_v3_calc_voltage(max1586, *selector) > max_uV) return -EINVAL; dev_dbg(&client->dev, "changing voltage v3 to %dmv\n", - max1586_v3_calc_voltage(max1586, selector) / 1000); + max1586_v3_calc_voltage(max1586, *selector) / 1000); - v3_prog = I2C_V3_SELECT | (u8) selector; + v3_prog = I2C_V3_SELECT | (u8) *selector; return i2c_smbus_write_byte(client, v3_prog); } @@ -110,10 +110,10 @@ static int max1586_v6_calc_voltage(unsigned selector) return voltages_uv[selector]; } -static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV) +static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned int *selector) { struct i2c_client *client = rdev_get_drvdata(rdev); - unsigned selector; u8 v6_prog; if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV) @@ -122,21 +122,21 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV) return -EINVAL; if (min_uV < 1800000) - selector = 0; + *selector = 0; else if (min_uV < 2500000) - selector = 1; + *selector = 1; else if (min_uV < 3000000) - selector = 2; + *selector = 2; else if (min_uV >= 3000000) - selector = 3; + *selector = 3; - if (max1586_v6_calc_voltage(selector) > max_uV) + if (max1586_v6_calc_voltage(*selector) > max_uV) return -EINVAL; dev_dbg(&client->dev, "changing voltage v6 to %dmv\n", - max1586_v6_calc_voltage(selector) / 1000); + max1586_v6_calc_voltage(*selector) / 1000); - v6_prog = I2C_V6_SELECT | (u8) selector; + v6_prog = I2C_V6_SELECT | (u8) *selector; return i2c_smbus_write_byte(client, v6_prog); } diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 6b60a9c0366b..30eb9e54f7ec 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c @@ -155,7 +155,7 @@ static int max8649_get_voltage(struct regulator_dev *rdev) } static int max8649_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct max8649_regulator_info *info = rdev_get_drvdata(rdev); unsigned char data, mask; @@ -168,6 +168,7 @@ static int max8649_set_voltage(struct regulator_dev *rdev, data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1) / MAX8649_DCDC_STEP; mask = MAX8649_VOL_MASK; + *selector = data & mask; return max8649_set_bits(info->i2c, info->vol_reg, mask, data); } diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c index c570e6eb0db2..33f5d9a492ef 100644 --- a/drivers/regulator/max8660.c +++ b/drivers/regulator/max8660.c @@ -141,7 +141,8 @@ static int max8660_dcdc_get(struct regulator_dev *rdev) return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP; } -static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV) +static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned int *s) { struct max8660 *max8660 = rdev_get_drvdata(rdev); u8 reg, selector, bits; @@ -154,6 +155,7 @@ static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV) selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1)) / MAX8660_DCDC_STEP; + *s = selector; ret = max8660_dcdc_list(rdev, selector); if (ret < 0 || ret > max_uV) @@ -196,7 +198,8 @@ static int max8660_ldo5_get(struct regulator_dev *rdev) return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP; } -static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV) +static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned int *s) { struct max8660 *max8660 = rdev_get_drvdata(rdev); u8 selector; @@ -213,6 +216,8 @@ static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV) if (ret < 0 || ret > max_uV) return -EINVAL; + *s = selector; + ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector); if (ret) return ret; @@ -270,7 +275,8 @@ static int max8660_ldo67_get(struct regulator_dev *rdev) return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP; } -static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV) +static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, + int max_uV, unsigned int *s) { struct max8660 *max8660 = rdev_get_drvdata(rdev); u8 selector; @@ -288,6 +294,8 @@ static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV) if (ret < 0 || ret > max_uV) return -EINVAL; + *s = selector; + if (rdev_get_id(rdev) == MAX8660_V6) return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector); else diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c index 552cad85ae5a..8ae147549c6a 100644 --- a/drivers/regulator/max8925-regulator.c +++ b/drivers/regulator/max8925-regulator.c @@ -55,7 +55,7 @@ static int max8925_list_voltage(struct regulator_dev *rdev, unsigned index) } static int max8925_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned int *selector) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); unsigned char data, mask; @@ -66,6 +66,7 @@ static int max8925_set_voltage(struct regulator_dev *rdev, return -EINVAL; } data = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV; + *selector = data; data <<= info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c index 0d5dda4fd911..a8f4ecfb0843 100644 --- a/drivers/regulator/max8952.c +++ b/drivers/regulator/max8952.c @@ -133,7 +133,7 @@ static int max8952_get_voltage(struct regulator_dev *rdev) } static int max8952_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct max8952_data *max8952 = rdev_get_drvdata(rdev); s8 vid = -1, i; @@ -156,6 +156,7 @@ static int max8952_set_voltage(struct regulator_dev *rdev, if (vid >= 0 && vid < MAX8952_NUM_DVS_MODE) { max8952->vid0 = (vid % 2 == 1); max8952->vid1 = (((vid >> 1) % 2) == 1); + *selector = vid; gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0); gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1); } else diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index 5c20756db607..0ec49ca527a8 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c @@ -304,7 +304,7 @@ static int max8998_get_voltage(struct regulator_dev *rdev) } static int max8998_set_voltage_ldo(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; @@ -331,6 +331,8 @@ static int max8998_set_voltage_ldo(struct regulator_dev *rdev, if (desc->min + desc->step*i > max_vol) return -EINVAL; + *selector = i; + ret = max8998_get_voltage_register(rdev, ®, &shift, &mask); if (ret) return ret; @@ -352,7 +354,7 @@ static inline void buck2_gpio_set(int gpio, int v) } static int max8998_set_voltage_buck(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct max8998_platform_data *pdata = @@ -384,6 +386,8 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev, if (desc->min + desc->step*i > max_vol) return -EINVAL; + *selector = i; + ret = max8998_get_voltage_register(rdev, ®, &shift, &mask); if (ret) return ret; @@ -420,6 +424,9 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev, } } + if (pdata->buck_voltage_lock) + return -EINVAL; + /* no predefine regulator found */ max8998->buck1_idx = (buck1_last_val % 2) + 2; dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n", @@ -447,18 +454,26 @@ buck1_exit: "BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n" , i, max8998->buck2_vol[0], max8998->buck2_vol[1]); if (gpio_is_valid(pdata->buck2_set3)) { - if (max8998->buck2_vol[0] == i) { - max8998->buck1_idx = 0; - buck2_gpio_set(pdata->buck2_set3, 0); - } else { - max8998->buck1_idx = 1; - ret = max8998_get_voltage_register(rdev, ®, - &shift, - &mask); - ret = max8998_write_reg(i2c, reg, i); - max8998->buck2_vol[1] = i; - buck2_gpio_set(pdata->buck2_set3, 1); + + /* check if requested voltage */ + /* value is already defined */ + for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) { + if (max8998->buck2_vol[j] == i) { + max8998->buck2_idx = j; + buck2_gpio_set(pdata->buck2_set3, j); + goto buck2_exit; + } } + + if (pdata->buck_voltage_lock) + return -EINVAL; + + max8998_get_voltage_register(rdev, + ®, &shift, &mask); + ret = max8998_write_reg(i2c, reg, i); + max8998->buck2_vol[max8998->buck2_idx] = i; + buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx); +buck2_exit: dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name, gpio_get_value(pdata->buck2_set3)); } else { @@ -703,6 +718,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, max8998); i2c = max8998->iodev->i2c; + max8998->buck1_idx = pdata->buck1_default_idx; + max8998->buck2_idx = pdata->buck2_default_idx; + /* NOTE: */ /* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */ /* will be displayed */ @@ -735,23 +753,46 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) i = 0; while (buck12_voltage_map_desc.min + buck12_voltage_map_desc.step*i - != (pdata->buck1_max_voltage1 / 1000)) + < (pdata->buck1_voltage1 / 1000)) i++; - printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx); max8998->buck1_vol[0] = i; ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i); + if (ret) + return ret; /* Set predefined value for BUCK1 register 2 */ i = 0; while (buck12_voltage_map_desc.min + buck12_voltage_map_desc.step*i - != (pdata->buck1_max_voltage2 / 1000)) + < (pdata->buck1_voltage2 / 1000)) i++; max8998->buck1_vol[1] = i; - printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx); - ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i) - + ret; + ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i); + if (ret) + return ret; + + /* Set predefined value for BUCK1 register 3 */ + i = 0; + while (buck12_voltage_map_desc.min + + buck12_voltage_map_desc.step*i + < (pdata->buck1_voltage3 / 1000)) + i++; + + max8998->buck1_vol[2] = i; + ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i); + if (ret) + return ret; + + /* Set predefined value for BUCK1 register 4 */ + i = 0; + while (buck12_voltage_map_desc.min + + buck12_voltage_map_desc.step*i + < (pdata->buck1_voltage4 / 1000)) + i++; + + max8998->buck1_vol[3] = i; + ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i); if (ret) return ret; @@ -768,18 +809,28 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) gpio_direction_output(pdata->buck2_set3, max8998->buck2_idx & 0x1); - /* BUCK2 - set preset default voltage value to buck2_vol[0] */ + /* BUCK2 register 1 */ i = 0; while (buck12_voltage_map_desc.min + buck12_voltage_map_desc.step*i - != (pdata->buck2_max_voltage / 1000)) + < (pdata->buck2_voltage1 / 1000)) i++; - printk(KERN_ERR "i:%d, buck2_idx:%d\n", i, max8998->buck2_idx); max8998->buck2_vol[0] = i; ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i); if (ret) return ret; + /* BUCK2 register 2 */ + i = 0; + while (buck12_voltage_map_desc.min + + buck12_voltage_map_desc.step*i + < (pdata->buck2_voltage2 / 1000)) + i++; + printk(KERN_ERR "i2:%d, buck2_idx:%d\n", i, max8998->buck2_idx); + max8998->buck2_vol[1] = i; + ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i); + if (ret) + return ret; } for (i = 0; i < pdata->num_regulators; i++) { @@ -831,6 +882,12 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id max8998_pmic_id[] = { + { "max8998-pmic", TYPE_MAX8998 }, + { "lp3974-pmic", TYPE_LP3974 }, + { } +}; + static struct platform_driver max8998_pmic_driver = { .driver = { .name = "max8998-pmic", @@ -838,6 +895,7 @@ static struct platform_driver max8998_pmic_driver = { }, .probe = max8998_pmic_probe, .remove = __devexit_p(max8998_pmic_remove), + .id_table = max8998_pmic_id, }; static int __init max8998_pmic_init(void) diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index ecd99f59dba8..3e5d0c3b4e53 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c @@ -1,6 +1,7 @@ /* * Regulator Driver for Freescale MC13783 PMIC * + * Copyright 2010 Yong Shen * Copyright (C) 2008 Sascha Hauer, Pengutronix * Copyright 2009 Alberto Panizzo * @@ -17,6 +18,7 @@ #include #include #include +#include "mc13xxx.h" #define MC13783_REG_SWITCHERS5 29 #define MC13783_REG_SWITCHERS5_SW3EN (1 << 20) @@ -89,154 +91,106 @@ #define MC13783_REG_POWERMISC_PWGTSPI_M (3 << 15) -struct mc13783_regulator { - struct regulator_desc desc; - int reg; - int enable_bit; - int vsel_reg; - int vsel_shift; - int vsel_mask; - int const *voltages; -}; - /* Voltage Values */ -static const int const mc13783_sw3_val[] = { +static const int mc13783_sw3_val[] = { 5000000, 5000000, 5000000, 5500000, }; -static const int const mc13783_vaudio_val[] = { +static const int mc13783_vaudio_val[] = { 2775000, }; -static const int const mc13783_viohi_val[] = { +static const int mc13783_viohi_val[] = { 2775000, }; -static const int const mc13783_violo_val[] = { +static const int mc13783_violo_val[] = { 1200000, 1300000, 1500000, 1800000, }; -static const int const mc13783_vdig_val[] = { +static const int mc13783_vdig_val[] = { 1200000, 1300000, 1500000, 1800000, }; -static const int const mc13783_vgen_val[] = { +static const int mc13783_vgen_val[] = { 1200000, 1300000, 1500000, 1800000, 1100000, 2000000, 2775000, 2400000, }; -static const int const mc13783_vrfdig_val[] = { +static const int mc13783_vrfdig_val[] = { 1200000, 1500000, 1800000, 1875000, }; -static const int const mc13783_vrfref_val[] = { +static const int mc13783_vrfref_val[] = { 2475000, 2600000, 2700000, 2775000, }; -static const int const mc13783_vrfcp_val[] = { +static const int mc13783_vrfcp_val[] = { 2700000, 2775000, }; -static const int const mc13783_vsim_val[] = { +static const int mc13783_vsim_val[] = { 1800000, 2900000, 3000000, }; -static const int const mc13783_vesim_val[] = { +static const int mc13783_vesim_val[] = { 1800000, 2900000, }; -static const int const mc13783_vcam_val[] = { +static const int mc13783_vcam_val[] = { 1500000, 1800000, 2500000, 2550000, 2600000, 2750000, 2800000, 3000000, }; -static const int const mc13783_vrfbg_val[] = { +static const int mc13783_vrfbg_val[] = { 1250000, }; -static const int const mc13783_vvib_val[] = { +static const int mc13783_vvib_val[] = { 1300000, 1800000, 2000000, 3000000, }; -static const int const mc13783_vmmc_val[] = { +static const int mc13783_vmmc_val[] = { 1600000, 1800000, 2000000, 2600000, 2700000, 2800000, 2900000, 3000000, }; -static const int const mc13783_vrf_val[] = { +static const int mc13783_vrf_val[] = { 1500000, 1875000, 2700000, 2775000, }; -static const int const mc13783_gpo_val[] = { +static const int mc13783_gpo_val[] = { 3100000, }; -static const int const mc13783_pwgtdrv_val[] = { +static const int mc13783_pwgtdrv_val[] = { 5500000, }; -static struct regulator_ops mc13783_regulator_ops; -static struct regulator_ops mc13783_fixed_regulator_ops; static struct regulator_ops mc13783_gpo_regulator_ops; -#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages) \ - [MC13783_ ## prefix ## _ ## _name] = { \ - .desc = { \ - .name = #prefix "_" #_name, \ - .n_voltages = ARRAY_SIZE(_voltages), \ - .ops = &mc13783_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = MC13783_ ## prefix ## _ ## _name, \ - .owner = THIS_MODULE, \ - }, \ - .reg = MC13783_REG_ ## _reg, \ - .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \ - .vsel_reg = MC13783_REG_ ## _vsel_reg, \ - .vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\ - .vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\ - .voltages = _voltages, \ - } +#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \ + mc13xxx_regulator_ops) -#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages) \ - [MC13783_ ## prefix ## _ ## _name] = { \ - .desc = { \ - .name = #prefix "_" #_name, \ - .n_voltages = ARRAY_SIZE(_voltages), \ - .ops = &mc13783_fixed_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = MC13783_ ## prefix ## _ ## _name, \ - .owner = THIS_MODULE, \ - }, \ - .reg = MC13783_REG_ ## _reg, \ - .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \ - .voltages = _voltages, \ - } +#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages) \ + MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \ + mc13xxx_fixed_regulator_ops) -#define MC13783_GPO_DEFINE(prefix, _name, _reg, _voltages) \ - [MC13783_ ## prefix ## _ ## _name] = { \ - .desc = { \ - .name = #prefix "_" #_name, \ - .n_voltages = ARRAY_SIZE(_voltages), \ - .ops = &mc13783_gpo_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = MC13783_ ## prefix ## _ ## _name, \ - .owner = THIS_MODULE, \ - }, \ - .reg = MC13783_REG_ ## _reg, \ - .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \ - .voltages = _voltages, \ - } +#define MC13783_GPO_DEFINE(prefix, name, reg, voltages) \ + MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \ + mc13783_gpo_regulator_ops) #define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \ - MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages) + MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages) #define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \ - MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages) + MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages) -static struct mc13783_regulator mc13783_regulators[] = { +static struct mc13xxx_regulator mc13783_regulators[] = { MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val), - MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val), - MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val), + MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val), + MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val), MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \ mc13783_violo_val), MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \ @@ -255,7 +209,7 @@ static struct mc13783_regulator mc13783_regulators[] = { mc13783_vesim_val), MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \ mc13783_vcam_val), - MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val), + MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val), MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \ mc13783_vvib_val), MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \ @@ -266,215 +220,24 @@ static struct mc13783_regulator mc13783_regulators[] = { mc13783_vmmc_val), MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \ mc13783_vmmc_val), - MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val), - MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val), -}; - -struct mc13783_regulator_priv { - struct mc13783 *mc13783; - u32 powermisc_pwgt_state; - struct regulator_dev *regulators[]; -}; - -static int mc13783_regulator_enable(struct regulator_dev *rdev) -{ - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); - int ret; - - dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); - - mc13783_lock(priv->mc13783); - ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg, - mc13783_regulators[id].enable_bit, - mc13783_regulators[id].enable_bit); - mc13783_unlock(priv->mc13783); - - return ret; -} - -static int mc13783_regulator_disable(struct regulator_dev *rdev) -{ - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); - int ret; - - dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); - - mc13783_lock(priv->mc13783); - ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg, - mc13783_regulators[id].enable_bit, 0); - mc13783_unlock(priv->mc13783); - - return ret; -} - -static int mc13783_regulator_is_enabled(struct regulator_dev *rdev) -{ - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); - int ret, id = rdev_get_id(rdev); - unsigned int val; - - mc13783_lock(priv->mc13783); - ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val); - mc13783_unlock(priv->mc13783); - - if (ret) - return ret; - - return (val & mc13783_regulators[id].enable_bit) != 0; -} - -static int mc13783_regulator_list_voltage(struct regulator_dev *rdev, - unsigned selector) -{ - int id = rdev_get_id(rdev); - - if (selector >= mc13783_regulators[id].desc.n_voltages) - return -EINVAL; - - return mc13783_regulators[id].voltages[selector]; -} - -static int mc13783_get_best_voltage_index(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - int reg_id = rdev_get_id(rdev); - int i; - int bestmatch; - int bestindex; - - /* - * Locate the minimum voltage fitting the criteria on - * this regulator. The switchable voltages are not - * in strict falling order so we need to check them - * all for the best match. - */ - bestmatch = INT_MAX; - bestindex = -1; - for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) { - if (mc13783_regulators[reg_id].voltages[i] >= min_uV && - mc13783_regulators[reg_id].voltages[i] < bestmatch) { - bestmatch = mc13783_regulators[reg_id].voltages[i]; - bestindex = i; - } - } - - if (bestindex < 0 || bestmatch > max_uV) { - dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n", - min_uV, max_uV); - return -EINVAL; - } - return bestindex; -} - -static int mc13783_regulator_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); - int value, id = rdev_get_id(rdev); - int ret; - - dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", - __func__, id, min_uV, max_uV); - - /* Find the best index */ - value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV); - dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value); - if (value < 0) - return value; - - mc13783_lock(priv->mc13783); - ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg, - mc13783_regulators[id].vsel_mask, - value << mc13783_regulators[id].vsel_shift); - mc13783_unlock(priv->mc13783); - - return ret; -} - -static int mc13783_regulator_get_voltage(struct regulator_dev *rdev) -{ - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); - int ret, id = rdev_get_id(rdev); - unsigned int val; - - dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); - - mc13783_lock(priv->mc13783); - ret = mc13783_reg_read(priv->mc13783, - mc13783_regulators[id].vsel_reg, &val); - mc13783_unlock(priv->mc13783); - - if (ret) - return ret; - - val = (val & mc13783_regulators[id].vsel_mask) - >> mc13783_regulators[id].vsel_shift; - - dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); - - BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages); - - return mc13783_regulators[id].voltages[val]; -} - -static struct regulator_ops mc13783_regulator_ops = { - .enable = mc13783_regulator_enable, - .disable = mc13783_regulator_disable, - .is_enabled = mc13783_regulator_is_enabled, - .list_voltage = mc13783_regulator_list_voltage, - .set_voltage = mc13783_regulator_set_voltage, - .get_voltage = mc13783_regulator_get_voltage, -}; - -static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - int id = rdev_get_id(rdev); - - dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", - __func__, id, min_uV, max_uV); - - if (min_uV >= mc13783_regulators[id].voltages[0] && - max_uV <= mc13783_regulators[id].voltages[0]) - return 0; - else - return -EINVAL; -} - -static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev) -{ - int id = rdev_get_id(rdev); - - dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); - - return mc13783_regulators[id].voltages[0]; -} - -static struct regulator_ops mc13783_fixed_regulator_ops = { - .enable = mc13783_regulator_enable, - .disable = mc13783_regulator_disable, - .is_enabled = mc13783_regulator_is_enabled, - .list_voltage = mc13783_regulator_list_voltage, - .set_voltage = mc13783_fixed_regulator_set_voltage, - .get_voltage = mc13783_fixed_regulator_get_voltage, + MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val), + MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val), }; -static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, - u32 val) +static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, + u32 val) { - struct mc13783 *mc13783 = priv->mc13783; + struct mc13xxx *mc13783 = priv->mc13xxx; int ret; u32 valread; BUG_ON(val & ~mask); - ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread); + ret = mc13xxx_reg_read(mc13783, MC13783_REG_POWERMISC, &valread); if (ret) return ret; @@ -489,34 +252,36 @@ static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) | priv->powermisc_pwgt_state; - return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread); + return mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread); } static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev) { - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; int id = rdev_get_id(rdev); int ret; - u32 en_val = mc13783_regulators[id].enable_bit; + u32 en_val = mc13xxx_regulators[id].enable_bit; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); /* Power Gate enable value is 0 */ - if (id == MC13783_REGU_PWGT1SPI || - id == MC13783_REGU_PWGT2SPI) + if (id == MC13783_REG_PWGT1SPI || + id == MC13783_REG_PWGT2SPI) en_val = 0; - mc13783_lock(priv->mc13783); - ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit, + mc13xxx_lock(priv->mc13xxx); + ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit, en_val); - mc13783_unlock(priv->mc13783); + mc13xxx_unlock(priv->mc13xxx); return ret; } static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev) { - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; int id = rdev_get_id(rdev); int ret; u32 dis_val = 0; @@ -524,27 +289,28 @@ static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev) dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); /* Power Gate disable value is 1 */ - if (id == MC13783_REGU_PWGT1SPI || - id == MC13783_REGU_PWGT2SPI) - dis_val = mc13783_regulators[id].enable_bit; + if (id == MC13783_REG_PWGT1SPI || + id == MC13783_REG_PWGT2SPI) + dis_val = mc13xxx_regulators[id].enable_bit; - mc13783_lock(priv->mc13783); - ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit, + mc13xxx_lock(priv->mc13xxx); + ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit, dis_val); - mc13783_unlock(priv->mc13783); + mc13xxx_unlock(priv->mc13xxx); return ret; } static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev) { - struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; int ret, id = rdev_get_id(rdev); unsigned int val; - mc13783_lock(priv->mc13783); - ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val); - mc13783_unlock(priv->mc13783); + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val); + mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; @@ -554,22 +320,22 @@ static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev) val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) | (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M); - return (val & mc13783_regulators[id].enable_bit) != 0; + return (val & mc13xxx_regulators[id].enable_bit) != 0; } static struct regulator_ops mc13783_gpo_regulator_ops = { .enable = mc13783_gpo_regulator_enable, .disable = mc13783_gpo_regulator_disable, .is_enabled = mc13783_gpo_regulator_is_enabled, - .list_voltage = mc13783_regulator_list_voltage, - .set_voltage = mc13783_fixed_regulator_set_voltage, - .get_voltage = mc13783_fixed_regulator_get_voltage, + .list_voltage = mc13xxx_regulator_list_voltage, + .set_voltage = mc13xxx_fixed_regulator_set_voltage, + .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; static int __devinit mc13783_regulator_probe(struct platform_device *pdev) { - struct mc13783_regulator_priv *priv; - struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent); + struct mc13xxx_regulator_priv *priv; + struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent); struct mc13783_regulator_platform_data *pdata = dev_get_platdata(&pdev->dev); struct mc13783_regulator_init_data *init_data; @@ -583,7 +349,8 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->mc13783 = mc13783; + priv->mc13xxx_regulators = mc13783_regulators; + priv->mc13xxx = mc13783; for (i = 0; i < pdata->num_regulators; i++) { init_data = &pdata->regulators[i]; @@ -613,7 +380,7 @@ err: static int __devexit mc13783_regulator_remove(struct platform_device *pdev) { - struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev); + struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); struct mc13783_regulator_platform_data *pdata = dev_get_platdata(&pdev->dev); int i; diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c new file mode 100644 index 000000000000..1b8f7398a4a8 --- /dev/null +++ b/drivers/regulator/mc13892-regulator.c @@ -0,0 +1,635 @@ +/* + * Regulator Driver for Freescale MC13892 PMIC + * + * Copyright 2010 Yong Shen + * + * Based on draft driver from Arnaud Patard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mc13xxx.h" + +#define MC13892_REVISION 7 + +#define MC13892_POWERCTL0 13 +#define MC13892_POWERCTL0_USEROFFSPI 3 +#define MC13892_POWERCTL0_VCOINCELLVSEL 20 +#define MC13892_POWERCTL0_VCOINCELLVSEL_M (7<<20) +#define MC13892_POWERCTL0_VCOINCELLEN (1<<23) + +#define MC13892_SWITCHERS0_SWxHI (1<<23) + +#define MC13892_SWITCHERS0 24 +#define MC13892_SWITCHERS0_SW1VSEL 0 +#define MC13892_SWITCHERS0_SW1VSEL_M (0x1f<<0) +#define MC13892_SWITCHERS0_SW1HI (1<<23) +#define MC13892_SWITCHERS0_SW1EN 0 + +#define MC13892_SWITCHERS1 25 +#define MC13892_SWITCHERS1_SW2VSEL 0 +#define MC13892_SWITCHERS1_SW2VSEL_M (0x1f<<0) +#define MC13892_SWITCHERS1_SW2HI (1<<23) +#define MC13892_SWITCHERS1_SW2EN 0 + +#define MC13892_SWITCHERS2 26 +#define MC13892_SWITCHERS2_SW3VSEL 0 +#define MC13892_SWITCHERS2_SW3VSEL_M (0x1f<<0) +#define MC13892_SWITCHERS2_SW3HI (1<<23) +#define MC13892_SWITCHERS2_SW3EN 0 + +#define MC13892_SWITCHERS3 27 +#define MC13892_SWITCHERS3_SW4VSEL 0 +#define MC13892_SWITCHERS3_SW4VSEL_M (0x1f<<0) +#define MC13892_SWITCHERS3_SW4HI (1<<23) +#define MC13892_SWITCHERS3_SW4EN 0 + +#define MC13892_SWITCHERS4 28 +#define MC13892_SWITCHERS4_SW1MODE 0 +#define MC13892_SWITCHERS4_SW1MODE_AUTO (8<<0) +#define MC13892_SWITCHERS4_SW1MODE_M (0xf<<0) +#define MC13892_SWITCHERS4_SW2MODE 10 +#define MC13892_SWITCHERS4_SW2MODE_AUTO (8<<10) +#define MC13892_SWITCHERS4_SW2MODE_M (0xf<<10) + +#define MC13892_SWITCHERS5 29 +#define MC13892_SWITCHERS5_SW3MODE 0 +#define MC13892_SWITCHERS5_SW3MODE_AUTO (8<<0) +#define MC13892_SWITCHERS5_SW3MODE_M (0xf<<0) +#define MC13892_SWITCHERS5_SW4MODE 8 +#define MC13892_SWITCHERS5_SW4MODE_AUTO (8<<8) +#define MC13892_SWITCHERS5_SW4MODE_M (0xf<<8) +#define MC13892_SWITCHERS5_SWBSTEN (1<<20) + +#define MC13892_REGULATORSETTING0 30 +#define MC13892_REGULATORSETTING0_VGEN1VSEL 0 +#define MC13892_REGULATORSETTING0_VDIGVSEL 4 +#define MC13892_REGULATORSETTING0_VGEN2VSEL 6 +#define MC13892_REGULATORSETTING0_VPLLVSEL 9 +#define MC13892_REGULATORSETTING0_VUSB2VSEL 11 +#define MC13892_REGULATORSETTING0_VGEN3VSEL 14 +#define MC13892_REGULATORSETTING0_VCAMVSEL 16 + +#define MC13892_REGULATORSETTING0_VGEN1VSEL_M (3<<0) +#define MC13892_REGULATORSETTING0_VDIGVSEL_M (3<<4) +#define MC13892_REGULATORSETTING0_VGEN2VSEL_M (7<<6) +#define MC13892_REGULATORSETTING0_VPLLVSEL_M (3<<9) +#define MC13892_REGULATORSETTING0_VUSB2VSEL_M (3<<11) +#define MC13892_REGULATORSETTING0_VGEN3VSEL_M (1<<14) +#define MC13892_REGULATORSETTING0_VCAMVSEL_M (3<<16) + +#define MC13892_REGULATORSETTING1 31 +#define MC13892_REGULATORSETTING1_VVIDEOVSEL 2 +#define MC13892_REGULATORSETTING1_VAUDIOVSEL 4 +#define MC13892_REGULATORSETTING1_VSDVSEL 6 + +#define MC13892_REGULATORSETTING1_VVIDEOVSEL_M (3<<2) +#define MC13892_REGULATORSETTING1_VAUDIOVSEL_M (3<<4) +#define MC13892_REGULATORSETTING1_VSDVSEL_M (7<<6) + +#define MC13892_REGULATORMODE0 32 +#define MC13892_REGULATORMODE0_VGEN1EN (1<<0) +#define MC13892_REGULATORMODE0_VGEN1STDBY (1<<1) +#define MC13892_REGULATORMODE0_VGEN1MODE (1<<2) +#define MC13892_REGULATORMODE0_VIOHIEN (1<<3) +#define MC13892_REGULATORMODE0_VIOHISTDBY (1<<4) +#define MC13892_REGULATORMODE0_VIOHIMODE (1<<5) +#define MC13892_REGULATORMODE0_VDIGEN (1<<9) +#define MC13892_REGULATORMODE0_VDIGSTDBY (1<<10) +#define MC13892_REGULATORMODE0_VDIGMODE (1<<11) +#define MC13892_REGULATORMODE0_VGEN2EN (1<<12) +#define MC13892_REGULATORMODE0_VGEN2STDBY (1<<13) +#define MC13892_REGULATORMODE0_VGEN2MODE (1<<14) +#define MC13892_REGULATORMODE0_VPLLEN (1<<15) +#define MC13892_REGULATORMODE0_VPLLSTDBY (1<<16) +#define MC13892_REGULATORMODE0_VPLLMODE (1<<17) +#define MC13892_REGULATORMODE0_VUSB2EN (1<<18) +#define MC13892_REGULATORMODE0_VUSB2STDBY (1<<19) +#define MC13892_REGULATORMODE0_VUSB2MODE (1<<20) + +#define MC13892_REGULATORMODE1 33 +#define MC13892_REGULATORMODE1_VGEN3EN (1<<0) +#define MC13892_REGULATORMODE1_VGEN3STDBY (1<<1) +#define MC13892_REGULATORMODE1_VGEN3MODE (1<<2) +#define MC13892_REGULATORMODE1_VCAMEN (1<<6) +#define MC13892_REGULATORMODE1_VCAMSTDBY (1<<7) +#define MC13892_REGULATORMODE1_VCAMMODE (1<<8) +#define MC13892_REGULATORMODE1_VCAMCONFIGEN (1<<9) +#define MC13892_REGULATORMODE1_VVIDEOEN (1<<12) +#define MC13892_REGULATORMODE1_VVIDEOSTDBY (1<<13) +#define MC13892_REGULATORMODE1_VVIDEOMODE (1<<14) +#define MC13892_REGULATORMODE1_VAUDIOEN (1<<15) +#define MC13892_REGULATORMODE1_VAUDIOSTDBY (1<<16) +#define MC13892_REGULATORMODE1_VAUDIOMODE (1<<17) +#define MC13892_REGULATORMODE1_VSDEN (1<<18) +#define MC13892_REGULATORMODE1_VSDSTDBY (1<<19) +#define MC13892_REGULATORMODE1_VSDMODE (1<<20) + +#define MC13892_POWERMISC 34 +#define MC13892_POWERMISC_GPO1EN (1<<6) +#define MC13892_POWERMISC_GPO2EN (1<<8) +#define MC13892_POWERMISC_GPO3EN (1<<10) +#define MC13892_POWERMISC_GPO4EN (1<<12) +#define MC13892_POWERMISC_PWGT1SPIEN (1<<15) +#define MC13892_POWERMISC_PWGT2SPIEN (1<<16) +#define MC13892_POWERMISC_GPO4ADINEN (1<<21) + +#define MC13892_POWERMISC_PWGTSPI_M (3 << 15) + +#define MC13892_USB1 50 +#define MC13892_USB1_VUSBEN (1<<3) + +static const int mc13892_vcoincell[] = { + 2500000, 2700000, 2800000, 2900000, 3000000, 3100000, + 3200000, 3300000, +}; + +static const int mc13892_sw1[] = { + 600000, 625000, 650000, 675000, 700000, 725000, + 750000, 775000, 800000, 825000, 850000, 875000, + 900000, 925000, 950000, 975000, 1000000, 1025000, + 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, + 1350000, 1375000 +}; + +static const int mc13892_sw[] = { + 600000, 625000, 650000, 675000, 700000, 725000, + 750000, 775000, 800000, 825000, 850000, 875000, + 900000, 925000, 950000, 975000, 1000000, 1025000, + 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, + 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, + 1500000, 1525000, 1550000, 1575000, 1600000, 1625000, + 1650000, 1675000, 1700000, 1725000, 1750000, 1775000, + 1800000, 1825000, 1850000, 1875000 +}; + +static const int mc13892_swbst[] = { + 5000000, +}; + +static const int mc13892_viohi[] = { + 2775000, +}; + +static const int mc13892_vpll[] = { + 1050000, 1250000, 1650000, 1800000, +}; + +static const int mc13892_vdig[] = { + 1050000, 1250000, 1650000, 1800000, +}; + +static const int mc13892_vsd[] = { + 1800000, 2000000, 2600000, 2700000, + 2800000, 2900000, 3000000, 3150000, +}; + +static const int mc13892_vusb2[] = { + 2400000, 2600000, 2700000, 2775000, +}; + +static const int mc13892_vvideo[] = { + 2700000, 2775000, 2500000, 2600000, +}; + +static const int mc13892_vaudio[] = { + 2300000, 2500000, 2775000, 3000000, +}; + +static const int mc13892_vcam[] = { + 2500000, 2600000, 2750000, 3000000, +}; + +static const int mc13892_vgen1[] = { + 1200000, 1500000, 2775000, 3150000, +}; + +static const int mc13892_vgen2[] = { + 1200000, 1500000, 1600000, 1800000, + 2700000, 2800000, 3000000, 3150000, +}; + +static const int mc13892_vgen3[] = { + 1800000, 2900000, +}; + +static const int mc13892_vusb[] = { + 3300000, +}; + +static const int mc13892_gpo[] = { + 2750000, +}; + +static const int mc13892_pwgtdrv[] = { + 5000000, +}; + +static struct regulator_ops mc13892_gpo_regulator_ops; +/* sw regulators need special care due to the "hi bit" */ +static struct regulator_ops mc13892_sw_regulator_ops; + + +#define MC13892_FIXED_DEFINE(name, reg, voltages) \ + MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \ + mc13xxx_fixed_regulator_ops) + +#define MC13892_GPO_DEFINE(name, reg, voltages) \ + MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \ + mc13892_gpo_regulator_ops) + +#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ + mc13892_sw_regulator_ops) + +#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ + mc13xxx_regulator_ops) + +static struct mc13xxx_regulator mc13892_regulators[] = { + MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell), + MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1), + MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw), + MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw), + MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), + MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), + MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), + MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \ + mc13892_vpll), + MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \ + mc13892_vdig), + MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \ + mc13892_vsd), + MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \ + mc13892_vusb2), + MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \ + mc13892_vvideo), + MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \ + mc13892_vaudio), + MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \ + mc13892_vcam), + MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \ + mc13892_vgen1), + MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \ + mc13892_vgen2), + MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \ + mc13892_vgen3), + MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), + MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv), + MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv), +}; + +static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, + u32 val) +{ + struct mc13xxx *mc13892 = priv->mc13xxx; + int ret; + u32 valread; + + BUG_ON(val & ~mask); + + ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread); + if (ret) + return ret; + + /* Update the stored state for Power Gates. */ + priv->powermisc_pwgt_state = + (priv->powermisc_pwgt_state & ~mask) | val; + priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M; + + /* Construct the new register value */ + valread = (valread & ~mask) | val; + /* Overwrite the PWGTxEN with the stored version */ + valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) | + priv->powermisc_pwgt_state; + + return mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread); +} + +static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + int ret; + u32 en_val = mc13892_regulators[id].enable_bit; + u32 mask = mc13892_regulators[id].enable_bit; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + /* Power Gate enable value is 0 */ + if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI) + en_val = 0; + + if (id == MC13892_GPO4) + mask |= MC13892_POWERMISC_GPO4ADINEN; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13892_powermisc_rmw(priv, mask, en_val); + mc13xxx_unlock(priv->mc13xxx); + + return ret; +} + +static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + int ret; + u32 dis_val = 0; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + /* Power Gate disable value is 1 */ + if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI) + dis_val = mc13892_regulators[id].enable_bit; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit, + dis_val); + mc13xxx_unlock(priv->mc13xxx); + + return ret; +} + +static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + int ret, id = rdev_get_id(rdev); + unsigned int val; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val); + mc13xxx_unlock(priv->mc13xxx); + + if (ret) + return ret; + + /* Power Gates state is stored in powermisc_pwgt_state + * where the meaning of bits is negated */ + val = (val & ~MC13892_POWERMISC_PWGTSPI_M) | + (priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M); + + return (val & mc13892_regulators[id].enable_bit) != 0; +} + + +static struct regulator_ops mc13892_gpo_regulator_ops = { + .enable = mc13892_gpo_regulator_enable, + .disable = mc13892_gpo_regulator_disable, + .is_enabled = mc13892_gpo_regulator_is_enabled, + .list_voltage = mc13xxx_regulator_list_voltage, + .set_voltage = mc13xxx_fixed_regulator_set_voltage, + .get_voltage = mc13xxx_fixed_regulator_get_voltage, +}; + +static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + int ret, id = rdev_get_id(rdev); + unsigned int val, hi; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_read(priv->mc13xxx, + mc13892_regulators[id].vsel_reg, &val); + mc13xxx_unlock(priv->mc13xxx); + if (ret) + return ret; + + hi = val & MC13892_SWITCHERS0_SWxHI; + val = (val & mc13892_regulators[id].vsel_mask) + >> mc13892_regulators[id].vsel_shift; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); + + if (hi) + val = (25000 * val) + 1100000; + else + val = (25000 * val) + 600000; + + return val; +} + +static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, unsigned *selector) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + int hi, value, val, mask, id = rdev_get_id(rdev); + int ret; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", + __func__, id, min_uV, max_uV); + + /* Find the best index */ + value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV); + dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value); + if (value < 0) + return value; + + value = mc13892_regulators[id].voltages[value]; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_read(priv->mc13xxx, + mc13892_regulators[id].vsel_reg, &val); + if (ret) + goto err; + + hi = val & MC13892_SWITCHERS0_SWxHI; + if (value > 1375) + hi = 1; + if (value < 1100) + hi = 0; + + if (hi) { + value = (value - 1100000) / 25000; + value |= MC13892_SWITCHERS0_SWxHI; + } else + value = (value - 600000) / 25000; + + mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI; + ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, + mask, value << mc13892_regulators[id].vsel_shift); +err: + mc13xxx_unlock(priv->mc13xxx); + + return ret; +} + +static struct regulator_ops mc13892_sw_regulator_ops = { + .is_enabled = mc13xxx_sw_regulator_is_enabled, + .list_voltage = mc13xxx_regulator_list_voltage, + .set_voltage = mc13892_sw_regulator_set_voltage, + .get_voltage = mc13892_sw_regulator_get_voltage, +}; + +static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + unsigned int en_val = 0; + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + int ret, id = rdev_get_id(rdev); + + if (mode == REGULATOR_MODE_FAST) + en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg, + MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val); + mc13xxx_unlock(priv->mc13xxx); + + return ret; +} + +static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + int ret, id = rdev_get_id(rdev); + unsigned int val; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val); + mc13xxx_unlock(priv->mc13xxx); + + if (ret) + return ret; + + if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN) + return REGULATOR_MODE_FAST; + + return REGULATOR_MODE_NORMAL; +} + + +static int __devinit mc13892_regulator_probe(struct platform_device *pdev) +{ + struct mc13xxx_regulator_priv *priv; + struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent); + struct mc13xxx_regulator_platform_data *pdata = + dev_get_platdata(&pdev->dev); + struct mc13xxx_regulator_init_data *init_data; + int i, ret; + u32 val; + + priv = kzalloc(sizeof(*priv) + + pdata->num_regulators * sizeof(priv->regulators[0]), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->mc13xxx_regulators = mc13892_regulators; + priv->mc13xxx = mc13892; + + mc13xxx_lock(mc13892); + ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val); + if (ret) + goto err_free; + + /* enable switch auto mode */ + if ((val & 0x0000FFFF) == 0x45d0) { + ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4, + MC13892_SWITCHERS4_SW1MODE_M | + MC13892_SWITCHERS4_SW2MODE_M, + MC13892_SWITCHERS4_SW1MODE_AUTO | + MC13892_SWITCHERS4_SW2MODE_AUTO); + if (ret) + goto err_free; + + ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5, + MC13892_SWITCHERS5_SW3MODE_M | + MC13892_SWITCHERS5_SW4MODE_M, + MC13892_SWITCHERS5_SW3MODE_AUTO | + MC13892_SWITCHERS5_SW4MODE_AUTO); + if (ret) + goto err_free; + } + mc13xxx_unlock(mc13892); + + mc13892_regulators[MC13892_VCAM].desc.ops->set_mode + = mc13892_vcam_set_mode; + mc13892_regulators[MC13892_VCAM].desc.ops->get_mode + = mc13892_vcam_get_mode; + for (i = 0; i < pdata->num_regulators; i++) { + init_data = &pdata->regulators[i]; + priv->regulators[i] = regulator_register( + &mc13892_regulators[init_data->id].desc, + &pdev->dev, init_data->init_data, priv); + + if (IS_ERR(priv->regulators[i])) { + dev_err(&pdev->dev, "failed to register regulator %s\n", + mc13892_regulators[i].desc.name); + ret = PTR_ERR(priv->regulators[i]); + goto err; + } + } + + platform_set_drvdata(pdev, priv); + + return 0; +err: + while (--i >= 0) + regulator_unregister(priv->regulators[i]); + +err_free: + mc13xxx_unlock(mc13892); + kfree(priv); + + return ret; +} + +static int __devexit mc13892_regulator_remove(struct platform_device *pdev) +{ + struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); + struct mc13xxx_regulator_platform_data *pdata = + dev_get_platdata(&pdev->dev); + int i; + + platform_set_drvdata(pdev, NULL); + + for (i = 0; i < pdata->num_regulators; i++) + regulator_unregister(priv->regulators[i]); + + kfree(priv); + return 0; +} + +static struct platform_driver mc13892_regulator_driver = { + .driver = { + .name = "mc13892-regulator", + .owner = THIS_MODULE, + }, + .remove = __devexit_p(mc13892_regulator_remove), + .probe = mc13892_regulator_probe, +}; + +static int __init mc13892_regulator_init(void) +{ + return platform_driver_register(&mc13892_regulator_driver); +} +subsys_initcall(mc13892_regulator_init); + +static void __exit mc13892_regulator_exit(void) +{ + platform_driver_unregister(&mc13892_regulator_driver); +} +module_exit(mc13892_regulator_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Yong Shen "); +MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC"); +MODULE_ALIAS("platform:mc13892-regulator"); diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c new file mode 100644 index 000000000000..f53d31b950d4 --- /dev/null +++ b/drivers/regulator/mc13xxx-regulator-core.c @@ -0,0 +1,241 @@ +/* + * Regulator Driver for Freescale MC13xxx PMIC + * + * Copyright 2010 Yong Shen + * + * Based on mc13783 regulator driver : + * Copyright (C) 2008 Sascha Hauer, Pengutronix + * Copyright 2009 Alberto Panizzo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file + * from freescale + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mc13xxx.h" + +static int mc13xxx_regulator_enable(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int id = rdev_get_id(rdev); + int ret; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg, + mc13xxx_regulators[id].enable_bit, + mc13xxx_regulators[id].enable_bit); + mc13xxx_unlock(priv->mc13xxx); + + return ret; +} + +static int mc13xxx_regulator_disable(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int id = rdev_get_id(rdev); + int ret; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg, + mc13xxx_regulators[id].enable_bit, 0); + mc13xxx_unlock(priv->mc13xxx); + + return ret; +} + +static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int ret, id = rdev_get_id(rdev); + unsigned int val; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val); + mc13xxx_unlock(priv->mc13xxx); + + if (ret) + return ret; + + return (val & mc13xxx_regulators[id].enable_bit) != 0; +} + +int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + int id = rdev_get_id(rdev); + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + + if (selector >= mc13xxx_regulators[id].desc.n_voltages) + return -EINVAL; + + return mc13xxx_regulators[id].voltages[selector]; +} +EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage); + +int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int reg_id = rdev_get_id(rdev); + int i; + int bestmatch; + int bestindex; + + /* + * Locate the minimum voltage fitting the criteria on + * this regulator. The switchable voltages are not + * in strict falling order so we need to check them + * all for the best match. + */ + bestmatch = INT_MAX; + bestindex = -1; + for (i = 0; i < mc13xxx_regulators[reg_id].desc.n_voltages; i++) { + if (mc13xxx_regulators[reg_id].voltages[i] >= min_uV && + mc13xxx_regulators[reg_id].voltages[i] < bestmatch) { + bestmatch = mc13xxx_regulators[reg_id].voltages[i]; + bestindex = i; + } + } + + if (bestindex < 0 || bestmatch > max_uV) { + dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n", + min_uV, max_uV); + return -EINVAL; + } + return bestindex; +} +EXPORT_SYMBOL_GPL(mc13xxx_get_best_voltage_index); + +static int mc13xxx_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, + int max_uV, unsigned *selector) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int value, id = rdev_get_id(rdev); + int ret; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", + __func__, id, min_uV, max_uV); + + /* Find the best index */ + value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV); + dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value); + if (value < 0) + return value; + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg, + mc13xxx_regulators[id].vsel_mask, + value << mc13xxx_regulators[id].vsel_shift); + mc13xxx_unlock(priv->mc13xxx); + + return ret; +} + +static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int ret, id = rdev_get_id(rdev); + unsigned int val; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + mc13xxx_lock(priv->mc13xxx); + ret = mc13xxx_reg_read(priv->mc13xxx, + mc13xxx_regulators[id].vsel_reg, &val); + mc13xxx_unlock(priv->mc13xxx); + + if (ret) + return ret; + + val = (val & mc13xxx_regulators[id].vsel_mask) + >> mc13xxx_regulators[id].vsel_shift; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); + + BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); + + return mc13xxx_regulators[id].voltages[val]; +} + +struct regulator_ops mc13xxx_regulator_ops = { + .enable = mc13xxx_regulator_enable, + .disable = mc13xxx_regulator_disable, + .is_enabled = mc13xxx_regulator_is_enabled, + .list_voltage = mc13xxx_regulator_list_voltage, + .set_voltage = mc13xxx_regulator_set_voltage, + .get_voltage = mc13xxx_regulator_get_voltage, +}; +EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops); + +int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, + int max_uV, unsigned *selector) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int id = rdev_get_id(rdev); + + dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", + __func__, id, min_uV, max_uV); + + if (min_uV >= mc13xxx_regulators[id].voltages[0] && + max_uV <= mc13xxx_regulators[id].voltages[0]) + return 0; + else + return -EINVAL; +} +EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage); + +int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev) +{ + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + int id = rdev_get_id(rdev); + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + return mc13xxx_regulators[id].voltages[0]; +} +EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage); + +struct regulator_ops mc13xxx_fixed_regulator_ops = { + .enable = mc13xxx_regulator_enable, + .disable = mc13xxx_regulator_disable, + .is_enabled = mc13xxx_regulator_is_enabled, + .list_voltage = mc13xxx_regulator_list_voltage, + .set_voltage = mc13xxx_fixed_regulator_set_voltage, + .get_voltage = mc13xxx_fixed_regulator_get_voltage, +}; +EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops); + +int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev) +{ + return 1; +} +EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Yong Shen "); +MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC"); +MODULE_ALIAS("mc13xxx-regulator-core"); diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h new file mode 100644 index 000000000000..27758267e122 --- /dev/null +++ b/drivers/regulator/mc13xxx.h @@ -0,0 +1,101 @@ +/* + * mc13xxx.h - regulators for the Freescale mc13xxx PMIC + * + * Copyright (C) 2010 Yong Shen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_REGULATOR_MC13XXX_H +#define __LINUX_REGULATOR_MC13XXX_H + +#include + +struct mc13xxx_regulator { + struct regulator_desc desc; + int reg; + int enable_bit; + int vsel_reg; + int vsel_shift; + int vsel_mask; + int hi_bit; + int const *voltages; +}; + +struct mc13xxx_regulator_priv { + struct mc13xxx *mc13xxx; + u32 powermisc_pwgt_state; + struct mc13xxx_regulator *mc13xxx_regulators; + struct regulator_dev *regulators[]; +}; + +extern int mc13xxx_sw_regulator(struct regulator_dev *rdev); +extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev); +extern int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev, + int min_uV, int max_uV); +extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev, + unsigned selector); +extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, unsigned *selector); +extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev); + +extern struct regulator_ops mc13xxx_regulator_ops; +extern struct regulator_ops mc13xxx_fixed_regulator_ops; + +#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \ + [prefix ## _name] = { \ + .desc = { \ + .name = #prefix "_" #_name, \ + .n_voltages = ARRAY_SIZE(_voltages), \ + .ops = &_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = prefix ## _name, \ + .owner = THIS_MODULE, \ + }, \ + .reg = prefix ## _reg, \ + .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ + .vsel_reg = prefix ## _vsel_reg, \ + .vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\ + .vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\ + .voltages = _voltages, \ + } + +#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \ + [prefix ## _name] = { \ + .desc = { \ + .name = #prefix "_" #_name, \ + .n_voltages = ARRAY_SIZE(_voltages), \ + .ops = &_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = prefix ## _name, \ + .owner = THIS_MODULE, \ + }, \ + .reg = prefix ## _reg, \ + .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ + .voltages = _voltages, \ + } + +#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \ + [prefix ## _name] = { \ + .desc = { \ + .name = #prefix "_" #_name, \ + .n_voltages = ARRAY_SIZE(_voltages), \ + .ops = &_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = prefix ## _name, \ + .owner = THIS_MODULE, \ + }, \ + .reg = prefix ## _reg, \ + .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ + .voltages = _voltages, \ + } + +#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \ + MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops) +#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops) \ + MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops) + +#endif diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c index 29d0566379ae..31f6e11a7f16 100644 --- a/drivers/regulator/pcap-regulator.c +++ b/drivers/regulator/pcap-regulator.c @@ -151,7 +151,8 @@ static struct pcap_regulator vreg_table[] = { }; static int pcap_regulator_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); @@ -170,10 +171,12 @@ static int pcap_regulator_set_voltage(struct regulator_dev *rdev, i = 0; uV = vreg->voltage_table[i] * 1000; - if (min_uV <= uV && uV <= max_uV) + if (min_uV <= uV && uV <= max_uV) { + *selector = i; return ezx_pcap_set_bits(pcap, vreg->reg, (vreg->n_voltages - 1) << vreg->index, i << vreg->index); + } if (i == 0 && rdev_get_id(rdev) == V1) i = vreg->n_voltages - 1; diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c index c8f41dc05b76..69a11d9dd87f 100644 --- a/drivers/regulator/pcf50633-regulator.c +++ b/drivers/regulator/pcf50633-regulator.c @@ -108,7 +108,8 @@ static unsigned int ldo_voltage_value(u8 bits) } static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct pcf50633 *pcf; int regulator_id, millivolts; @@ -147,6 +148,8 @@ static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev, return -EINVAL; } + *selector = volt_bits; + return pcf50633_reg_write(pcf, regnr, volt_bits); } diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index cd6d4fc9d74f..60a7ca5409e9 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c @@ -321,7 +321,8 @@ static int tps65023_dcdc_get_voltage(struct regulator_dev *dev) } static int tps65023_dcdc_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct tps_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); @@ -346,6 +347,8 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev, break; } + *selector = vsel; + /* write to the register in case we found a match */ if (vsel == tps->info[dcdc]->table_len) return -EINVAL; @@ -371,7 +374,7 @@ static int tps65023_ldo_get_voltage(struct regulator_dev *dev) } static int tps65023_ldo_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct tps_pmic *tps = rdev_get_drvdata(dev); int data, vsel, ldo = rdev_get_id(dev); @@ -396,6 +399,8 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev, if (vsel == tps->info[ldo]->table_len) return -EINVAL; + *selector = vsel; + data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL); if (data < 0) return data; diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index 020f5878d7ff..064755290599 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c @@ -369,7 +369,8 @@ static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev) } static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, dcdc = rdev_get_id(dev); @@ -415,6 +416,8 @@ static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev, if (vsel == tps->info[dcdc]->table_len) return -EINVAL; + *selector = vsel; + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; @@ -450,7 +453,8 @@ static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev) } static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, ldo = rdev_get_id(dev); @@ -483,6 +487,8 @@ static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev, if (vsel == tps->info[ldo]->table_len) return -EINVAL; + *selector = vsel; + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c new file mode 100644 index 000000000000..176a6be5a8ce --- /dev/null +++ b/drivers/regulator/tps6524x-regulator.c @@ -0,0 +1,693 @@ +/* + * Regulator driver for TPS6524x PMIC + * + * Copyright (C) 2010 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, + * whether express or implied; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define REG_LDO_SET 0x0 +#define LDO_ILIM_MASK 1 /* 0 = 400-800, 1 = 900-1500 */ +#define LDO_VSEL_MASK 0x0f +#define LDO2_ILIM_SHIFT 12 +#define LDO2_VSEL_SHIFT 4 +#define LDO1_ILIM_SHIFT 8 +#define LDO1_VSEL_SHIFT 0 + +#define REG_BLOCK_EN 0x1 +#define BLOCK_MASK 1 +#define BLOCK_LDO1_SHIFT 0 +#define BLOCK_LDO2_SHIFT 1 +#define BLOCK_LCD_SHIFT 2 +#define BLOCK_USB_SHIFT 3 + +#define REG_DCDC_SET 0x2 +#define DCDC_VDCDC_MASK 0x1f +#define DCDC_VDCDC1_SHIFT 0 +#define DCDC_VDCDC2_SHIFT 5 +#define DCDC_VDCDC3_SHIFT 10 + +#define REG_DCDC_EN 0x3 +#define DCDCDCDC_EN_MASK 0x1 +#define DCDCDCDC1_EN_SHIFT 0 +#define DCDCDCDC1_PG_MSK BIT(1) +#define DCDCDCDC2_EN_SHIFT 2 +#define DCDCDCDC2_PG_MSK BIT(3) +#define DCDCDCDC3_EN_SHIFT 4 +#define DCDCDCDC3_PG_MSK BIT(5) + +#define REG_USB 0x4 +#define USB_ILIM_SHIFT 0 +#define USB_ILIM_MASK 0x3 +#define USB_TSD_SHIFT 2 +#define USB_TSD_MASK 0x3 +#define USB_TWARN_SHIFT 4 +#define USB_TWARN_MASK 0x3 +#define USB_IWARN_SD BIT(6) +#define USB_FAST_LOOP BIT(7) + +#define REG_ALARM 0x5 +#define ALARM_LDO1 BIT(0) +#define ALARM_DCDC1 BIT(1) +#define ALARM_DCDC2 BIT(2) +#define ALARM_DCDC3 BIT(3) +#define ALARM_LDO2 BIT(4) +#define ALARM_USB_WARN BIT(5) +#define ALARM_USB_ALARM BIT(6) +#define ALARM_LCD BIT(9) +#define ALARM_TEMP_WARM BIT(10) +#define ALARM_TEMP_HOT BIT(11) +#define ALARM_NRST BIT(14) +#define ALARM_POWERUP BIT(15) + +#define REG_INT_ENABLE 0x6 +#define INT_LDO1 BIT(0) +#define INT_DCDC1 BIT(1) +#define INT_DCDC2 BIT(2) +#define INT_DCDC3 BIT(3) +#define INT_LDO2 BIT(4) +#define INT_USB_WARN BIT(5) +#define INT_USB_ALARM BIT(6) +#define INT_LCD BIT(9) +#define INT_TEMP_WARM BIT(10) +#define INT_TEMP_HOT BIT(11) +#define INT_GLOBAL_EN BIT(15) + +#define REG_INT_STATUS 0x7 +#define STATUS_LDO1 BIT(0) +#define STATUS_DCDC1 BIT(1) +#define STATUS_DCDC2 BIT(2) +#define STATUS_DCDC3 BIT(3) +#define STATUS_LDO2 BIT(4) +#define STATUS_USB_WARN BIT(5) +#define STATUS_USB_ALARM BIT(6) +#define STATUS_LCD BIT(9) +#define STATUS_TEMP_WARM BIT(10) +#define STATUS_TEMP_HOT BIT(11) + +#define REG_SOFTWARE_RESET 0xb +#define REG_WRITE_ENABLE 0xd +#define REG_REV_ID 0xf + +#define N_DCDC 3 +#define N_LDO 2 +#define N_SWITCH 2 +#define N_REGULATORS (3 /* DCDC */ + \ + 2 /* LDO */ + \ + 2 /* switch */) + +#define FIXED_ILIMSEL BIT(0) +#define FIXED_VOLTAGE BIT(1) + +#define CMD_READ(reg) ((reg) << 6) +#define CMD_WRITE(reg) (BIT(5) | (reg) << 6) +#define STAT_CLK BIT(3) +#define STAT_WRITE BIT(2) +#define STAT_INVALID BIT(1) +#define STAT_WP BIT(0) + +struct field { + int reg; + int shift; + int mask; +}; + +struct supply_info { + const char *name; + int n_voltages; + const int *voltages; + int fixed_voltage; + int n_ilimsels; + const int *ilimsels; + int fixed_ilimsel; + int flags; + struct field enable, voltage, ilimsel; +}; + +struct tps6524x { + struct device *dev; + struct spi_device *spi; + struct mutex lock; + struct regulator_desc desc[N_REGULATORS]; + struct regulator_dev *rdev[N_REGULATORS]; +}; + +static int __read_reg(struct tps6524x *hw, int reg) +{ + int error = 0; + u16 cmd = CMD_READ(reg), in; + u8 status; + struct spi_message m; + struct spi_transfer t[3]; + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = &cmd; + t[0].len = 2; + t[0].bits_per_word = 12; + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = ∈ + t[1].len = 2; + t[1].bits_per_word = 16; + spi_message_add_tail(&t[1], &m); + + t[2].rx_buf = &status; + t[2].len = 1; + t[2].bits_per_word = 4; + spi_message_add_tail(&t[2], &m); + + error = spi_sync(hw->spi, &m); + if (error < 0) + return error; + + dev_dbg(hw->dev, "read reg %d, data %x, status %x\n", + reg, in, status); + + if (!(status & STAT_CLK) || (status & STAT_WRITE)) + return -EIO; + + if (status & STAT_INVALID) + return -EINVAL; + + return in; +} + +static int read_reg(struct tps6524x *hw, int reg) +{ + int ret; + + mutex_lock(&hw->lock); + ret = __read_reg(hw, reg); + mutex_unlock(&hw->lock); + + return ret; +} + +static int __write_reg(struct tps6524x *hw, int reg, int val) +{ + int error = 0; + u16 cmd = CMD_WRITE(reg), out = val; + u8 status; + struct spi_message m; + struct spi_transfer t[3]; + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = &cmd; + t[0].len = 2; + t[0].bits_per_word = 12; + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = &out; + t[1].len = 2; + t[1].bits_per_word = 16; + spi_message_add_tail(&t[1], &m); + + t[2].rx_buf = &status; + t[2].len = 1; + t[2].bits_per_word = 4; + spi_message_add_tail(&t[2], &m); + + error = spi_sync(hw->spi, &m); + if (error < 0) + return error; + + dev_dbg(hw->dev, "wrote reg %d, data %x, status %x\n", + reg, out, status); + + if (!(status & STAT_CLK) || !(status & STAT_WRITE)) + return -EIO; + + if (status & (STAT_INVALID | STAT_WP)) + return -EINVAL; + + return error; +} + +static int __rmw_reg(struct tps6524x *hw, int reg, int mask, int val) +{ + int ret; + + ret = __read_reg(hw, reg); + if (ret < 0) + return ret; + + ret &= ~mask; + ret |= val; + + ret = __write_reg(hw, reg, ret); + + return (ret < 0) ? ret : 0; +} + +static int rmw_protect(struct tps6524x *hw, int reg, int mask, int val) +{ + int ret; + + mutex_lock(&hw->lock); + + ret = __write_reg(hw, REG_WRITE_ENABLE, 1); + if (ret) { + dev_err(hw->dev, "failed to set write enable\n"); + goto error; + } + + ret = __rmw_reg(hw, reg, mask, val); + if (ret) + dev_err(hw->dev, "failed to rmw register %d\n", reg); + + ret = __write_reg(hw, REG_WRITE_ENABLE, 0); + if (ret) { + dev_err(hw->dev, "failed to clear write enable\n"); + goto error; + } + +error: + mutex_unlock(&hw->lock); + + return ret; +} + +static int read_field(struct tps6524x *hw, const struct field *field) +{ + int tmp; + + tmp = read_reg(hw, field->reg); + if (tmp < 0) + return tmp; + + return (tmp >> field->shift) & field->mask; +} + +static int write_field(struct tps6524x *hw, const struct field *field, + int val) +{ + if (val & ~field->mask) + return -EOVERFLOW; + + return rmw_protect(hw, field->reg, + field->mask << field->shift, + val << field->shift); +} + +static const int dcdc1_voltages[] = { + 800000, 825000, 850000, 875000, + 900000, 925000, 950000, 975000, + 1000000, 1025000, 1050000, 1075000, + 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, + 1300000, 1325000, 1350000, 1375000, + 1400000, 1425000, 1450000, 1475000, + 1500000, 1525000, 1550000, 1575000, +}; + +static const int dcdc2_voltages[] = { + 1400000, 1450000, 1500000, 1550000, + 1600000, 1650000, 1700000, 1750000, + 1800000, 1850000, 1900000, 1950000, + 2000000, 2050000, 2100000, 2150000, + 2200000, 2250000, 2300000, 2350000, + 2400000, 2450000, 2500000, 2550000, + 2600000, 2650000, 2700000, 2750000, + 2800000, 2850000, 2900000, 2950000, +}; + +static const int dcdc3_voltages[] = { + 2400000, 2450000, 2500000, 2550000, 2600000, + 2650000, 2700000, 2750000, 2800000, 2850000, + 2900000, 2950000, 3000000, 3050000, 3100000, + 3150000, 3200000, 3250000, 3300000, 3350000, + 3400000, 3450000, 3500000, 3550000, 3600000, +}; + +static const int ldo1_voltages[] = { + 4300000, 4350000, 4400000, 4450000, + 4500000, 4550000, 4600000, 4650000, + 4700000, 4750000, 4800000, 4850000, + 4900000, 4950000, 5000000, 5050000, +}; + +static const int ldo2_voltages[] = { + 1100000, 1150000, 1200000, 1250000, + 1300000, 1700000, 1750000, 1800000, + 1850000, 1900000, 3150000, 3200000, + 3250000, 3300000, 3350000, 3400000, +}; + +static const int ldo_ilimsel[] = { + 400000, 1500000 +}; + +static const int usb_ilimsel[] = { + 200000, 400000, 800000, 1000000 +}; + +#define __MK_FIELD(_reg, _mask, _shift) \ + { .reg = (_reg), .mask = (_mask), .shift = (_shift), } + +static const struct supply_info supply_info[N_REGULATORS] = { + { + .name = "DCDC1", + .flags = FIXED_ILIMSEL, + .n_voltages = ARRAY_SIZE(dcdc1_voltages), + .voltages = dcdc1_voltages, + .fixed_ilimsel = 2400000, + .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, + DCDCDCDC1_EN_SHIFT), + .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, + DCDC_VDCDC1_SHIFT), + }, + { + .name = "DCDC2", + .flags = FIXED_ILIMSEL, + .n_voltages = ARRAY_SIZE(dcdc2_voltages), + .voltages = dcdc2_voltages, + .fixed_ilimsel = 1200000, + .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, + DCDCDCDC2_EN_SHIFT), + .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, + DCDC_VDCDC2_SHIFT), + }, + { + .name = "DCDC3", + .flags = FIXED_ILIMSEL, + .n_voltages = ARRAY_SIZE(dcdc3_voltages), + .voltages = dcdc3_voltages, + .fixed_ilimsel = 1200000, + .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, + DCDCDCDC3_EN_SHIFT), + .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, + DCDC_VDCDC3_SHIFT), + }, + { + .name = "LDO1", + .n_voltages = ARRAY_SIZE(ldo1_voltages), + .voltages = ldo1_voltages, + .n_ilimsels = ARRAY_SIZE(ldo_ilimsel), + .ilimsels = ldo_ilimsel, + .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, + BLOCK_LDO1_SHIFT), + .voltage = __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK, + LDO1_VSEL_SHIFT), + .ilimsel = __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK, + LDO1_ILIM_SHIFT), + }, + { + .name = "LDO2", + .n_voltages = ARRAY_SIZE(ldo2_voltages), + .voltages = ldo2_voltages, + .n_ilimsels = ARRAY_SIZE(ldo_ilimsel), + .ilimsels = ldo_ilimsel, + .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, + BLOCK_LDO2_SHIFT), + .voltage = __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK, + LDO2_VSEL_SHIFT), + .ilimsel = __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK, + LDO2_ILIM_SHIFT), + }, + { + .name = "USB", + .flags = FIXED_VOLTAGE, + .fixed_voltage = 5000000, + .n_ilimsels = ARRAY_SIZE(usb_ilimsel), + .ilimsels = usb_ilimsel, + .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, + BLOCK_USB_SHIFT), + .ilimsel = __MK_FIELD(REG_USB, USB_ILIM_MASK, + USB_ILIM_SHIFT), + }, + { + .name = "LCD", + .flags = FIXED_VOLTAGE | FIXED_ILIMSEL, + .fixed_voltage = 5000000, + .fixed_ilimsel = 400000, + .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, + BLOCK_LCD_SHIFT), + }, +}; + +static int list_voltage(struct regulator_dev *rdev, unsigned selector) +{ + const struct supply_info *info; + struct tps6524x *hw; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + if (info->flags & FIXED_VOLTAGE) + return selector ? -EINVAL : info->fixed_voltage; + + return ((selector < info->n_voltages) ? + info->voltages[selector] : -EINVAL); +} + +static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned *selector) +{ + const struct supply_info *info; + struct tps6524x *hw; + unsigned i; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + if (info->flags & FIXED_VOLTAGE) + return -EINVAL; + + for (i = 0; i < info->n_voltages; i++) + if (min_uV <= info->voltages[i] && + max_uV >= info->voltages[i]) + break; + + if (i >= info->n_voltages) + i = info->n_voltages - 1; + + *selector = info->voltages[i]; + + return write_field(hw, &info->voltage, i); +} + +static int get_voltage(struct regulator_dev *rdev) +{ + const struct supply_info *info; + struct tps6524x *hw; + int ret; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + if (info->flags & FIXED_VOLTAGE) + return info->fixed_voltage; + + ret = read_field(hw, &info->voltage); + if (ret < 0) + return ret; + if (WARN_ON(ret >= info->n_voltages)) + return -EIO; + + return info->voltages[ret]; +} + +static int set_current_limit(struct regulator_dev *rdev, int min_uA, + int max_uA) +{ + const struct supply_info *info; + struct tps6524x *hw; + int i; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + if (info->flags & FIXED_ILIMSEL) + return -EINVAL; + + for (i = 0; i < info->n_ilimsels; i++) + if (min_uA <= info->ilimsels[i] && + max_uA >= info->ilimsels[i]) + break; + + if (i >= info->n_ilimsels) + return -EINVAL; + + return write_field(hw, &info->ilimsel, i); +} + +static int get_current_limit(struct regulator_dev *rdev) +{ + const struct supply_info *info; + struct tps6524x *hw; + int ret; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + if (info->flags & FIXED_ILIMSEL) + return info->fixed_ilimsel; + + ret = read_field(hw, &info->ilimsel); + if (ret < 0) + return ret; + if (WARN_ON(ret >= info->n_ilimsels)) + return -EIO; + + return info->ilimsels[ret]; +} + +static int enable_supply(struct regulator_dev *rdev) +{ + const struct supply_info *info; + struct tps6524x *hw; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + return write_field(hw, &info->enable, 1); +} + +static int disable_supply(struct regulator_dev *rdev) +{ + const struct supply_info *info; + struct tps6524x *hw; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + return write_field(hw, &info->enable, 0); +} + +static int is_supply_enabled(struct regulator_dev *rdev) +{ + const struct supply_info *info; + struct tps6524x *hw; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + return read_field(hw, &info->enable); +} + +static struct regulator_ops regulator_ops = { + .is_enabled = is_supply_enabled, + .enable = enable_supply, + .disable = disable_supply, + .get_voltage = get_voltage, + .set_voltage = set_voltage, + .list_voltage = list_voltage, + .set_current_limit = set_current_limit, + .get_current_limit = get_current_limit, +}; + +static int __devexit pmic_remove(struct spi_device *spi) +{ + struct tps6524x *hw = spi_get_drvdata(spi); + int i; + + if (!hw) + return 0; + for (i = 0; i < N_REGULATORS; i++) { + if (hw->rdev[i]) + regulator_unregister(hw->rdev[i]); + hw->rdev[i] = NULL; + } + spi_set_drvdata(spi, NULL); + kfree(hw); + return 0; +} + +static int __devinit pmic_probe(struct spi_device *spi) +{ + struct tps6524x *hw; + struct device *dev = &spi->dev; + const struct supply_info *info = supply_info; + struct regulator_init_data *init_data; + int ret = 0, i; + + init_data = dev->platform_data; + if (!init_data) { + dev_err(dev, "could not find regulator platform data\n"); + return -EINVAL; + } + + hw = kzalloc(sizeof(struct tps6524x), GFP_KERNEL); + if (!hw) { + dev_err(dev, "cannot allocate regulator private data\n"); + return -ENOMEM; + } + spi_set_drvdata(spi, hw); + + memset(hw, 0, sizeof(struct tps6524x)); + hw->dev = dev; + hw->spi = spi_dev_get(spi); + mutex_init(&hw->lock); + + for (i = 0; i < N_REGULATORS; i++, info++, init_data++) { + hw->desc[i].name = info->name; + hw->desc[i].id = i; + hw->desc[i].n_voltages = info->n_voltages; + hw->desc[i].ops = ®ulator_ops; + hw->desc[i].type = REGULATOR_VOLTAGE; + hw->desc[i].owner = THIS_MODULE; + + if (info->flags & FIXED_VOLTAGE) + hw->desc[i].n_voltages = 1; + + hw->rdev[i] = regulator_register(&hw->desc[i], dev, + init_data, hw); + if (IS_ERR(hw->rdev[i])) { + ret = PTR_ERR(hw->rdev[i]); + hw->rdev[i] = NULL; + goto fail; + } + } + + return 0; + +fail: + pmic_remove(spi); + return ret; +} + +static struct spi_driver pmic_driver = { + .probe = pmic_probe, + .remove = __devexit_p(pmic_remove), + .driver = { + .name = "tps6524x", + .owner = THIS_MODULE, + }, +}; + +static int __init pmic_driver_init(void) +{ + return spi_register_driver(&pmic_driver); +} +module_init(pmic_driver_init); + +static void __exit pmic_driver_exit(void) +{ + spi_unregister_driver(&pmic_driver); +} +module_exit(pmic_driver_exit); + +MODULE_DESCRIPTION("TPS6524X PMIC Driver"); +MODULE_AUTHOR("Cyril Chemparathy"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:tps6524x"); diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index 6d20b0454a1d..bb04a75a4c98 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -85,7 +85,8 @@ static int tps6586x_ldo_list_voltage(struct regulator_dev *rdev, static int __tps6586x_ldo_set_voltage(struct device *parent, struct tps6586x_regulator *ri, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { int val, uV; uint8_t mask; @@ -100,6 +101,8 @@ static int __tps6586x_ldo_set_voltage(struct device *parent, /* use the first in-range value */ if (min_uV <= uV && uV <= max_uV) { + *selector = val; + val <<= ri->volt_shift; mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; @@ -111,12 +114,13 @@ static int __tps6586x_ldo_set_voltage(struct device *parent, } static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct tps6586x_regulator *ri = rdev_get_drvdata(rdev); struct device *parent = to_tps6586x_dev(rdev); - return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV); + return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV, + selector); } static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev) @@ -140,13 +144,14 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev) } static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct tps6586x_regulator *ri = rdev_get_drvdata(rdev); struct device *parent = to_tps6586x_dev(rdev); int ret; - ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV); + ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV, + selector); if (ret) return ret; diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index a57262a4fa6c..bd332cf1cc3f 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -329,7 +329,8 @@ static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) } static int -twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) +twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned *selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel; @@ -345,9 +346,11 @@ twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) /* REVISIT for VAUX2, first match may not be best/lowest */ /* use the first in-range value */ - if (min_uV <= uV && uV <= max_uV) + if (min_uV <= uV && uV <= max_uV) { + *selector = vsel; return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel); + } } return -EDOM; @@ -389,7 +392,8 @@ static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) } static int -twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) +twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned *selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel; @@ -402,6 +406,7 @@ twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) * mV = 1000mv + 100mv * (vsel - 1) */ vsel = (min_uV/1000 - 1000)/100 + 1; + *selector = vsel; return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel); } diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index dbfaf5945e48..8b0d2c4bde91 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -302,7 +302,7 @@ static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) } static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; @@ -314,6 +314,8 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, if (vsel < 0) return vsel; + *selector = vsel; + /* If this value is already set then do a GPIO update if we can */ if (dcdc->dvs_gpio && dcdc->on_vsel == vsel) return wm831x_buckv_set_dvs(rdev, 0); @@ -375,14 +377,14 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel); } -static int wm831x_buckv_get_voltage(struct regulator_dev *rdev) +static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); if (dcdc->dvs_gpio && dcdc->dvs_gpio_state) - return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel); + return dcdc->dvs_vsel; else - return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel); + return dcdc->on_vsel; } /* Current limit options */ @@ -424,7 +426,7 @@ static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev) static struct regulator_ops wm831x_buckv_ops = { .set_voltage = wm831x_buckv_set_voltage, - .get_voltage = wm831x_buckv_get_voltage, + .get_voltage_sel = wm831x_buckv_get_voltage_sel, .list_voltage = wm831x_buckv_list_voltage, .set_suspend_voltage = wm831x_buckv_set_suspend_voltage, .set_current_limit = wm831x_buckv_set_current_limit, @@ -636,7 +638,7 @@ static int wm831x_buckp_list_voltage(struct regulator_dev *rdev, } static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg, - int min_uV, int max_uV) + int min_uV, int max_uV, int *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; @@ -650,16 +652,20 @@ static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg, if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV) return -EINVAL; + *selector = vsel; + return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel); } static int wm831x_buckp_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; - return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV); + return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); } static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, @@ -667,11 +673,12 @@ static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; + unsigned selector; - return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV); + return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector); } -static int wm831x_buckp_get_voltage(struct regulator_dev *rdev) +static int wm831x_buckp_get_voltage_sel(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; @@ -682,12 +689,12 @@ static int wm831x_buckp_get_voltage(struct regulator_dev *rdev) if (val < 0) return val; - return wm831x_buckp_list_voltage(rdev, val & WM831X_DC3_ON_VSEL_MASK); + return val & WM831X_DC3_ON_VSEL_MASK; } static struct regulator_ops wm831x_buckp_ops = { .set_voltage = wm831x_buckp_set_voltage, - .get_voltage = wm831x_buckp_get_voltage, + .get_voltage_sel = wm831x_buckp_get_voltage_sel, .list_voltage = wm831x_buckp_list_voltage, .set_suspend_voltage = wm831x_buckp_set_suspend_voltage, diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index 9edf8f692341..c94fc5b7cd5b 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c @@ -113,7 +113,8 @@ static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev, } static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); struct wm831x *wm831x = ldo->wm831x; @@ -133,16 +134,20 @@ static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg, if (ret < min_uV || ret > max_uV) return -EINVAL; + *selector = vsel; + return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel); } static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); int reg = ldo->base + WM831X_LDO_ON_CONTROL; - return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV); + return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); } static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev, @@ -150,11 +155,12 @@ static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev, { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; + unsigned int selector; - return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV); + return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector); } -static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev) +static int wm831x_gp_ldo_get_voltage_sel(struct regulator_dev *rdev) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); struct wm831x *wm831x = ldo->wm831x; @@ -167,7 +173,7 @@ static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev) ret &= WM831X_LDO1_ON_VSEL_MASK; - return wm831x_gp_ldo_list_voltage(rdev, ret); + return ret; } static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev) @@ -287,7 +293,7 @@ static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev, static struct regulator_ops wm831x_gp_ldo_ops = { .list_voltage = wm831x_gp_ldo_list_voltage, - .get_voltage = wm831x_gp_ldo_get_voltage, + .get_voltage_sel = wm831x_gp_ldo_get_voltage_sel, .set_voltage = wm831x_gp_ldo_set_voltage, .set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage, .get_mode = wm831x_gp_ldo_get_mode, @@ -413,7 +419,8 @@ static int wm831x_aldo_list_voltage(struct regulator_dev *rdev, } static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); struct wm831x *wm831x = ldo->wm831x; @@ -433,16 +440,19 @@ static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg, if (ret < min_uV || ret > max_uV) return -EINVAL; + *selector = vsel; + return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel); } static int wm831x_aldo_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); int reg = ldo->base + WM831X_LDO_ON_CONTROL; - return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV); + return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); } static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev, @@ -450,11 +460,12 @@ static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev, { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; + unsigned int selector; - return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV); + return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector); } -static int wm831x_aldo_get_voltage(struct regulator_dev *rdev) +static int wm831x_aldo_get_voltage_sel(struct regulator_dev *rdev) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); struct wm831x *wm831x = ldo->wm831x; @@ -467,7 +478,7 @@ static int wm831x_aldo_get_voltage(struct regulator_dev *rdev) ret &= WM831X_LDO7_ON_VSEL_MASK; - return wm831x_aldo_list_voltage(rdev, ret); + return ret; } static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev) @@ -548,7 +559,7 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev) static struct regulator_ops wm831x_aldo_ops = { .list_voltage = wm831x_aldo_list_voltage, - .get_voltage = wm831x_aldo_get_voltage, + .get_voltage_sel = wm831x_aldo_get_voltage_sel, .set_voltage = wm831x_aldo_set_voltage, .set_suspend_voltage = wm831x_aldo_set_suspend_voltage, .get_mode = wm831x_aldo_get_mode, @@ -666,7 +677,8 @@ static int wm831x_alive_ldo_list_voltage(struct regulator_dev *rdev, static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev, int reg, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); struct wm831x *wm831x = ldo->wm831x; @@ -680,16 +692,20 @@ static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev, if (ret < min_uV || ret > max_uV) return -EINVAL; + *selector = vsel; + return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel); } static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, + unsigned *selector) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL; - return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV); + return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); } static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev, @@ -697,11 +713,12 @@ static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev, { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL; + unsigned selector; - return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV); + return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector); } -static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev) +static int wm831x_alive_ldo_get_voltage_sel(struct regulator_dev *rdev) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); struct wm831x *wm831x = ldo->wm831x; @@ -714,7 +731,7 @@ static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev) ret &= WM831X_LDO11_ON_VSEL_MASK; - return wm831x_alive_ldo_list_voltage(rdev, ret); + return ret; } static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev) @@ -736,7 +753,7 @@ static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev) static struct regulator_ops wm831x_alive_ldo_ops = { .list_voltage = wm831x_alive_ldo_list_voltage, - .get_voltage = wm831x_alive_ldo_get_voltage, + .get_voltage_sel = wm831x_alive_ldo_get_voltage_sel, .set_voltage = wm831x_alive_ldo_set_voltage, .set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage, .get_status = wm831x_alive_ldo_get_status, diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index fe4b8a8a9dfd..1bcb22c44095 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c @@ -360,7 +360,7 @@ int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode, EXPORT_SYMBOL_GPL(wm8350_isink_set_flash); static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV, - int max_uV) + int max_uV, unsigned *selector) { struct wm8350 *wm8350 = rdev_get_drvdata(rdev); int volt_reg, dcdc = rdev_get_id(rdev), mV, @@ -397,17 +397,18 @@ static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV, return -EINVAL; } + *selector = mV; + /* all DCDCs have same mV bits */ val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK; wm8350_reg_write(wm8350, volt_reg, val | mV); return 0; } -static int wm8350_dcdc_get_voltage(struct regulator_dev *rdev) +static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev) { struct wm8350 *wm8350 = rdev_get_drvdata(rdev); int volt_reg, dcdc = rdev_get_id(rdev); - u16 val; switch (dcdc) { case WM8350_DCDC_1: @@ -429,8 +430,7 @@ static int wm8350_dcdc_get_voltage(struct regulator_dev *rdev) } /* all DCDCs have same mV bits */ - val = wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK; - return wm8350_dcdc_val_to_mvolts(val) * 1000; + return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK; } static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev, @@ -754,7 +754,7 @@ static int wm8350_ldo_set_suspend_disable(struct regulator_dev *rdev) } static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV, - int max_uV) + int max_uV, unsigned *selector) { struct wm8350 *wm8350 = rdev_get_drvdata(rdev); int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000, @@ -797,17 +797,18 @@ static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV, return -EINVAL; } + *selector = mV; + /* all LDOs have same mV bits */ val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK; wm8350_reg_write(wm8350, volt_reg, val | mV); return 0; } -static int wm8350_ldo_get_voltage(struct regulator_dev *rdev) +static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev) { struct wm8350 *wm8350 = rdev_get_drvdata(rdev); int volt_reg, ldo = rdev_get_id(rdev); - u16 val; switch (ldo) { case WM8350_LDO_1: @@ -827,8 +828,7 @@ static int wm8350_ldo_get_voltage(struct regulator_dev *rdev) } /* all LDOs have same mV bits */ - val = wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK; - return wm8350_ldo_val_to_mvolts(val) * 1000; + return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK; } static int wm8350_ldo_list_voltage(struct regulator_dev *rdev, @@ -1225,7 +1225,7 @@ static int wm8350_ldo_is_enabled(struct regulator_dev *rdev) static struct regulator_ops wm8350_dcdc_ops = { .set_voltage = wm8350_dcdc_set_voltage, - .get_voltage = wm8350_dcdc_get_voltage, + .get_voltage_sel = wm8350_dcdc_get_voltage_sel, .list_voltage = wm8350_dcdc_list_voltage, .enable = wm8350_dcdc_enable, .disable = wm8350_dcdc_disable, @@ -1249,7 +1249,7 @@ static struct regulator_ops wm8350_dcdc2_5_ops = { static struct regulator_ops wm8350_ldo_ops = { .set_voltage = wm8350_ldo_set_voltage, - .get_voltage = wm8350_ldo_get_voltage, + .get_voltage_sel = wm8350_ldo_get_voltage_sel, .list_voltage = wm8350_ldo_list_voltage, .enable = wm8350_ldo_enable, .disable = wm8350_ldo_disable, diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c index 924c7eb29ee9..b42d01cef35a 100644 --- a/drivers/regulator/wm8400-regulator.c +++ b/drivers/regulator/wm8400-regulator.c @@ -67,7 +67,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev) } static int wm8400_ldo_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; @@ -93,6 +93,8 @@ static int wm8400_ldo_set_voltage(struct regulator_dev *dev, val += 0xf; } + *selector = val; + return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev), WM8400_LDO1_VSEL_MASK, val); } @@ -156,7 +158,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev) } static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *selector) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; @@ -171,6 +173,8 @@ static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, return -EINVAL; BUG_ON(850000 + (25000 * val) < min_uV); + *selector = val; + return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_VSEL_MASK, val); } diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index 03713bc66e4a..35b2958d5106 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -86,7 +86,7 @@ static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev, return (selector * 100000) + 2400000; } -static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev) +static int wm8994_ldo1_get_voltage_sel(struct regulator_dev *rdev) { struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); int val; @@ -95,13 +95,11 @@ static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev) if (val < 0) return val; - val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT; - - return wm8994_ldo1_list_voltage(rdev, val); + return (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT; } static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *s) { struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); int selector, v; @@ -111,6 +109,7 @@ static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev, if (v < 0 || v > max_uV) return -EINVAL; + *s = selector; selector <<= WM8994_LDO1_VSEL_SHIFT; return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1, @@ -124,20 +123,29 @@ static struct regulator_ops wm8994_ldo1_ops = { .enable_time = wm8994_ldo_enable_time, .list_voltage = wm8994_ldo1_list_voltage, - .get_voltage = wm8994_ldo1_get_voltage, + .get_voltage_sel = wm8994_ldo1_get_voltage_sel, .set_voltage = wm8994_ldo1_set_voltage, }; static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev, unsigned int selector) { + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + if (selector > WM8994_LDO2_MAX_SELECTOR) return -EINVAL; - return (selector * 100000) + 900000; + switch (ldo->wm8994->type) { + case WM8994: + return (selector * 100000) + 900000; + case WM8958: + return (selector * 100000) + 1000000; + default: + return -EINVAL; + } } -static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev) +static int wm8994_ldo2_get_voltage_sel(struct regulator_dev *rdev) { struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); int val; @@ -146,22 +154,31 @@ static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev) if (val < 0) return val; - val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT; - - return wm8994_ldo2_list_voltage(rdev, val); + return (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT; } static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) + int min_uV, int max_uV, unsigned *s) { struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); int selector, v; - selector = (min_uV - 900000) / 100000; + switch (ldo->wm8994->type) { + case WM8994: + selector = (min_uV - 900000) / 100000; + break; + case WM8958: + selector = (min_uV - 1000000) / 100000; + break; + default: + return -EINVAL; + } + v = wm8994_ldo2_list_voltage(rdev, selector); if (v < 0 || v > max_uV) return -EINVAL; + *s = selector; selector <<= WM8994_LDO2_VSEL_SHIFT; return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2, @@ -175,7 +192,7 @@ static struct regulator_ops wm8994_ldo2_ops = { .enable_time = wm8994_ldo_enable_time, .list_voltage = wm8994_ldo2_list_voltage, - .get_voltage = wm8994_ldo2_get_voltage, + .get_voltage_sel = wm8994_ldo2_get_voltage_sel, .set_voltage = wm8994_ldo2_set_voltage, }; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7e6ce626b7f1..c7ff8df347e7 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -36,6 +36,7 @@ #include #include #include +#include /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include @@ -851,7 +852,7 @@ static void __exit cmos_do_remove(struct device *dev) #ifdef CONFIG_PM -static int cmos_suspend(struct device *dev, pm_message_t mesg) +static int cmos_suspend(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp; @@ -899,7 +900,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) */ static inline int cmos_poweroff(struct device *dev) { - return cmos_suspend(dev, PMSG_HIBERNATE); + return cmos_suspend(dev); } static int cmos_resume(struct device *dev) @@ -946,9 +947,9 @@ static int cmos_resume(struct device *dev) return 0; } +static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); + #else -#define cmos_suspend NULL -#define cmos_resume NULL static inline int cmos_poweroff(struct device *dev) { @@ -1078,7 +1079,7 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp) static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg) { - return cmos_suspend(&pnp->dev, mesg); + return cmos_suspend(&pnp->dev); } static int cmos_pnp_resume(struct pnp_dev *pnp) @@ -1158,8 +1159,9 @@ static struct platform_driver cmos_platform_driver = { .shutdown = cmos_platform_shutdown, .driver = { .name = (char *) driver_name, - .suspend = cmos_suspend, - .resume = cmos_resume, +#ifdef CONFIG_PM + .pm = &cmos_pm_ops, +#endif } }; diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c index 657403ebd54a..0ec3f588a255 100644 --- a/drivers/rtc/rtc-max6902.c +++ b/drivers/rtc/rtc-max6902.c @@ -139,12 +139,13 @@ static int __devinit max6902_probe(struct spi_device *spi) if (IS_ERR(rtc)) return PTR_ERR(rtc); + dev_set_drvdata(&spi->dev, rtc); return 0; } static int __devexit max6902_remove(struct spi_device *spi) { - struct rtc_device *rtc = platform_get_drvdata(spi); + struct rtc_device *rtc = dev_get_drvdata(&spi->dev); rtc_device_unregister(rtc); return 0; diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index f22dee35f330..3f7bc6b9fefa 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c @@ -20,6 +20,7 @@ #include #include #include +#include #define MAX8998_RTC_SEC 0x00 #define MAX8998_RTC_MIN 0x01 @@ -73,6 +74,7 @@ struct max8998_rtc_info { struct i2c_client *rtc; struct rtc_device *rtc_dev; int irq; + bool lp3974_bug_workaround; }; static void max8998_data_to_tm(u8 *data, struct rtc_time *tm) @@ -124,10 +126,16 @@ static int max8998_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct max8998_rtc_info *info = dev_get_drvdata(dev); u8 data[8]; + int ret; max8998_tm_to_data(tm, data); - return max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data); + ret = max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data); + + if (info->lp3974_bug_workaround) + msleep(2000); + + return ret; } static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) @@ -163,12 +171,29 @@ static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info) { - return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0); + int ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0); + + if (info->lp3974_bug_workaround) + msleep(2000); + + return ret; } static int max8998_rtc_start_alarm(struct max8998_rtc_info *info) { - return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0x77); + int ret; + u8 alarm0_conf = 0x77; + + /* LP3974 with delay bug chips has rtc alarm bugs with "MONTH" field */ + if (info->lp3974_bug_workaround) + alarm0_conf = 0x57; + + ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, alarm0_conf); + + if (info->lp3974_bug_workaround) + msleep(2000); + + return ret; } static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) @@ -187,10 +212,13 @@ static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) if (ret < 0) return ret; + if (info->lp3974_bug_workaround) + msleep(2000); + if (alrm->enabled) - return max8998_rtc_start_alarm(info); + ret = max8998_rtc_start_alarm(info); - return 0; + return ret; } static int max8998_rtc_alarm_irq_enable(struct device *dev, @@ -224,6 +252,7 @@ static const struct rtc_class_ops max8998_rtc_ops = { static int __devinit max8998_rtc_probe(struct platform_device *pdev) { struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent); + struct max8998_platform_data *pdata = dev_get_platdata(max8998->dev); struct max8998_rtc_info *info; int ret; @@ -249,10 +278,18 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, "rtc-alarm0", info); + if (ret < 0) dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n", info->irq, ret); + dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name); + if (pdata->rtc_delay) { + info->lp3974_bug_workaround = true; + dev_warn(&pdev->dev, "LP3974 with RTC REGERR option." + " RTC updates will be extremely slow.\n"); + } + return 0; out_rtc: @@ -273,6 +310,12 @@ static int __devexit max8998_rtc_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id max8998_rtc_id[] = { + { "max8998-rtc", TYPE_MAX8998 }, + { "lp3974-rtc", TYPE_LP3974 }, + { } +}; + static struct platform_driver max8998_rtc_driver = { .driver = { .name = "max8998-rtc", @@ -280,6 +323,7 @@ static struct platform_driver max8998_rtc_driver = { }, .probe = max8998_rtc_probe, .remove = __devexit_p(max8998_rtc_remove), + .id_table = max8998_rtc_id, }; static int __init max8998_rtc_init(void) diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 73377b0d65da..e72b523c79a5 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -429,13 +429,14 @@ fail1: fail0: iounmap(rtc_base); fail: - release_resource(mem); + release_mem_region(mem->start, resource_size(mem)); return -EIO; } static int __exit omap_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); + struct resource *mem = dev_get_drvdata(&rtc->dev); device_init_wakeup(&pdev->dev, 0); @@ -447,8 +448,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev) if (omap_rtc_timer != omap_rtc_alarm) free_irq(omap_rtc_alarm, rtc); - release_resource(dev_get_drvdata(&rtc->dev)); rtc_device_unregister(rtc); + iounmap(rtc_base); + release_mem_region(mem->start, resource_size(mem)); return 0; } diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 30a1ca3d08b7..5505bc07e1e7 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -103,7 +103,7 @@ int dasd_scan_partitions(struct dasd_block *block) struct block_device *bdev; bdev = bdget_disk(block->gdp, 0); - if (!bdev || blkdev_get(bdev, FMODE_READ) < 0) + if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0) return -ENODEV; /* * See fs/partition/check.c:register_disk,rescan_partitions diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e8391b89eff4..b7eaff9ca19e 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1835,6 +1835,7 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) * available again. Kick re-detection. */ cdev->private->flags.resuming = 1; + cdev->private->path_new_mask = LPM_ANYPATH; css_schedule_eval(sch->schid); spin_unlock_irq(sch->lock); css_complete_work(); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 09e7a053c844..30b2a820e670 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -841,7 +841,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) } /** - * Emit buffer of a lan comand. + * Emit buffer of a lan command. */ static void lcs_lancmd_timeout(unsigned long data) diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index 46342fee394d..303dde09d294 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -317,7 +317,7 @@ static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) /** * zfcp_cfdc_port_denied - Process "access denied" for port - * @port: The port where the acces has been denied + * @port: The port where the access has been denied * @qual: The FSF status qualifier for the access denied FSF status */ void zfcp_cfdc_port_denied(struct zfcp_port *port, diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index dc5ac6e528c4..a391090a17c5 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -416,7 +416,7 @@ static u8 orc_load_firmware(struct orc_host * host) /* Go back and check they match */ outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */ - bios_addr -= 0x1000; /* Reset the BIOS adddress */ + bios_addr -= 0x1000; /* Reset the BIOS address */ for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */ i < 0x1000; /* Firmware code size = 4K */ i++, bios_addr++) { diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index afc9aeba5edb..060ac4bd5a14 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -91,7 +91,7 @@ void aac_fib_map_free(struct aac_dev *dev) * aac_fib_setup - setup the fibs * @dev: Adapter to set up * - * Allocate the PCI space for the fibs, map it and then intialise the + * Allocate the PCI space for the fibs, map it and then initialise the * fib area, the unmapped fib data and also the free list */ diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq index 5997e7c3a191..1565be9ebd49 100644 --- a/drivers/scsi/aic7xxx_old/aic7xxx.seq +++ b/drivers/scsi/aic7xxx_old/aic7xxx.seq @@ -1178,7 +1178,7 @@ notFound: /* * Retrieve an SCB by SCBID first searching the disconnected list falling * back to DMA'ing the SCB down from the host. This routine assumes that - * ARG_1 is the SCBID of interrest and that SINDEX is the position in the + * ARG_1 is the SCBID of interest and that SINDEX is the position in the * disconnected list to start the search from. If SINDEX is SCB_LIST_NULL, * we go directly to the host for the SCB. */ diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h index 28aaf349c111..40273a747d29 100644 --- a/drivers/scsi/aic94xx/aic94xx_reg_def.h +++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h @@ -1689,7 +1689,7 @@ #define PHY_START_CAL 0x01 /* - * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC) + * HST_PCIX2 Registers, Address Range: (0x00-0xFC) */ #define PCIX_REG_BASE_ADR 0xB8040000 @@ -1802,7 +1802,7 @@ #define PCIC_TP_CTRL 0xFC /* - * EXSI Registers, Addresss Range: (0x00-0xFC) + * EXSI Registers, Address Range: (0x00-0xFC) */ #define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index c43698b1cb64..29593275201a 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -867,7 +867,7 @@ void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, * resources they have with this SCB, and then call this one at the * end of their timeout function. To do this, one should initialize * the ascb->timer.{function, data, expires} prior to calling the post - * funcion. The timer is started by the post function. + * function. The timer is started by the post function. */ void asd_ascb_timedout(unsigned long data) { diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c index 74374618010c..390168f62a13 100644 --- a/drivers/scsi/aic94xx/aic94xx_seq.c +++ b/drivers/scsi/aic94xx/aic94xx_seq.c @@ -797,7 +797,7 @@ static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq) int j; /* Start from Page 1 of Mode 0 and 1. */ moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE; - /* All the fields of page 1 can be intialized to 0. */ + /* All the fields of page 1 can be initialized to 0. */ for (j = 0; j < LSEQ_PAGE_SIZE; j += 4) asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0); } @@ -938,7 +938,7 @@ static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha) asd_write_reg_dword(asd_ha, SCBPRO, 0); asd_write_reg_dword(asd_ha, CSEQCON, 0); - /* Intialize CSEQ Mode 11 Interrupt Vectors. + /* Initialize CSEQ Mode 11 Interrupt Vectors. * The addresses are 16 bit wide and in dword units. * The values of their macros are in byte units. * Thus we have to divide by 4. */ @@ -961,7 +961,7 @@ static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha) asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); for (i = 0; i < 8; i++) { - /* Intialize Mode n Link m Interrupt Enable. */ + /* Initialize Mode n Link m Interrupt Enable. */ asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF); /* Initialize Mode n Request Mailbox. */ asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0); diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 9c410b21db6d..c0353cdca929 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -1838,7 +1838,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) case BFA_IOIM_SM_ABORT: /* - * IO is alraedy being cleaned up implicitly + * IO is already being cleaned up implicitly */ ioim->io_cbfn = __bfa_cb_ioim_abort; break; diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 4e2eb92ba028..43fa986bb586 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -5646,7 +5646,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) switch (status) { case BFA_STATUS_OK: /* - * Initialiaze the V-Port fields + * Initialize the V-Port fields */ __vport_fcid(vport) = vport->lps->lp_pid; vport->vport_stats.fdisc_accepts++; diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 8f1b5c8bf903..b0f8523e665f 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -3796,7 +3796,7 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, * adapter_add_device - Adds the device instance to the adaptor instance. * * @acb: The adapter device to be updated - * @dcb: A newly created and intialised device instance to add. + * @dcb: A newly created and initialised device instance to add. **/ static void adapter_add_device(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) @@ -4498,7 +4498,7 @@ static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb) * init_adapter - Grab the resource for the card, setup the adapter * information, set the card into a known state, create the various * tables etc etc. This basically gets all adapter information all up - * to date, intialised and gets the chip in sync with it. + * to date, initialised and gets the chip in sync with it. * * @host: This hosts adapter structure * @io_port: The base I/O port @@ -4789,7 +4789,7 @@ static void banner_display(void) * that it finds in the system. The pci_dev strcuture indicates which * instance we are being called from. * - * @dev: The PCI device to intialize. + * @dev: The PCI device to initialize. * @id: Looks like a pointer to the entry in our pci device table * that was actually matched by the PCI subsystem. * @@ -4860,7 +4860,7 @@ fail: * dc395x_remove_one - Called to remove a single instance of the * adapter. * - * @dev: The PCI device to intialize. + * @dev: The PCI device to initialize. **/ static void __devexit dc395x_remove_one(struct pci_dev *dev) { diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index d3c5905b22ec..9c5c8be72231 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -7515,16 +7515,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; volatile u32 int_reg; - int rc; ENTER; ioa_cfg->pdev->state_saved = true; - rc = pci_restore_state(ioa_cfg->pdev); - - if (rc != PCIBIOS_SUCCESSFUL) { - ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); - return IPR_RC_JOB_CONTINUE; - } + pci_restore_state(ioa_cfg->pdev); if (ipr_set_pcix_cmd_reg(ioa_cfg)) { ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index cdc06cda76e5..5962d1a5a674 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1250,7 +1250,7 @@ static void fc_lun_reset_send(unsigned long data) /** * fc_lun_reset() - Send a LUN RESET command to a device * and wait for the reply - * @lport: The local port to sent the comand on + * @lport: The local port to sent the command on * @fsp: The FCP packet that identifies the LUN to be reset * @id: The SCSI command ID * @lun: The LUN ID to be reset diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c06491b5862f..3512abb8a587 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1335,7 +1335,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ } /** - * lpfc_param_init - Intializes a cfg attribute + * lpfc_param_init - Initializes a cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f9f160ab2ee9..bb015960dbc9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -2852,7 +2852,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) if (unlikely(!fcf_record)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "2554 Could not allocate memmory for " + "2554 Could not allocate memory for " "fcf record\n"); rc = -ENODEV; goto out; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 462242dcdd0a..6d0b36aa3389 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -8071,7 +8071,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) * the HBA. */ - /* HBA interrupt will be diabled after this call */ + /* HBA interrupt will be disabled after this call */ lpfc_sli_hba_down(phba); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 634b2fea9c4d..a359d2b873ce 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10172,7 +10172,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) * lpfc_sli4_queue_free - free a queue structure and associated memory * @queue: The queue structure to free. * - * This function frees a queue structure and the DMAable memeory used for + * This function frees a queue structure and the DMAable memory used for * the host resident queue. This function must be called after destroying the * queue on the HBA. **/ diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index f5644745e24e..853411911b2e 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -13,7 +13,7 @@ */ /* - * Comand coalescing - This feature allows the driver to be able to combine + * Command coalescing - This feature allows the driver to be able to combine * two or more commands and issue as one command in order to boost I/O * performance. Useful if the nature of the I/O is sequential. It is not very * useful for random natured I/Os. diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index a7008c0c24f9..25506c777381 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -224,7 +224,7 @@ mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, { int err; - /* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */ + /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */ mutex_lock(&mraid_mm_mutex); err = mraid_mm_ioctl(filep, cmd, arg); mutex_unlock(&mraid_mm_mutex); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index f8c86b28f03f..b95285f3383f 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -603,7 +603,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) #endif intx: - /* intialize the INT-X interrupt */ + /* initialize the INT-X interrupt */ rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 300d59f389da..321cf3ae8630 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -2228,12 +2228,7 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd) /* Once either bist or pci reset is done, restore PCI config * space. If this fails, proceed with hard reset again */ - if (pci_restore_state(pinstance->pdev)) { - pmcraid_info("config-space error resetting again\n"); - pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; - pmcraid_reset_alert(cmd); - break; - } + pci_restore_state(pinstance->pdev); /* fail all pending commands */ pmcraid_fail_outstanding_cmds(pinstance); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 501f67bef719..9045c52abd25 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1977,8 +1977,7 @@ EXPORT_SYMBOL(scsi_mode_sense); * in. * * Returns zero if unsuccessful or an error if TUR failed. For - * removable media, a return of NOT_READY or UNIT_ATTENTION is - * translated to success, with the ->changed flag updated. + * removable media, UNIT_ATTENTION sets ->changed flag. **/ int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, @@ -2005,16 +2004,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, } while (scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION && --retries); - if (!sshdr) - /* could not allocate sense buffer, so can't process it */ - return result; - - if (sdev->removable && scsi_sense_valid(sshdr) && - (sshdr->sense_key == UNIT_ATTENTION || - sshdr->sense_key == NOT_READY)) { - sdev->changed = 1; - result = 0; - } if (!sshdr_external) kfree(sshdr); return result; diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index d53e6503c6d5..a2ed201885ae 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -477,7 +477,7 @@ EXPORT_SYMBOL_GPL(scsi_nl_remove_driver); /** - * scsi_netlink_init - Called by SCSI subsystem to intialize + * scsi_netlink_init - Called by SCSI subsystem to initialize * the SCSI transport netlink interface * **/ diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 4c68d36f9ac2..490ce213204e 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -864,13 +864,15 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) error = device_add(&sdev->sdev_gendev); if (error) { - printk(KERN_INFO "error 1\n"); + sdev_printk(KERN_INFO, sdev, + "failed to add device: %d\n", error); return error; } device_enable_async_suspend(&sdev->sdev_dev); error = device_add(&sdev->sdev_dev); if (error) { - printk(KERN_INFO "error 2\n"); + sdev_printk(KERN_INFO, sdev, + "failed to add class device: %d\n", error); device_del(&sdev->sdev_gendev); return error; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 365024b0c407..e56730214c05 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -990,30 +990,51 @@ out: static void set_media_not_present(struct scsi_disk *sdkp) { - sdkp->media_present = 0; - sdkp->capacity = 0; - sdkp->device->changed = 1; + if (sdkp->media_present) + sdkp->device->changed = 1; + + if (sdkp->device->removable) { + sdkp->media_present = 0; + sdkp->capacity = 0; + } +} + +static int media_not_present(struct scsi_disk *sdkp, + struct scsi_sense_hdr *sshdr) +{ + if (!scsi_sense_valid(sshdr)) + return 0; + + /* not invoked for commands that could return deferred errors */ + switch (sshdr->sense_key) { + case UNIT_ATTENTION: + case NOT_READY: + /* medium not present */ + if (sshdr->asc == 0x3A) { + set_media_not_present(sdkp); + return 1; + } + } + return 0; } /** - * sd_media_changed - check if our medium changed - * @disk: kernel device descriptor + * sd_check_events - check media events + * @disk: kernel device descriptor + * @clearing: disk events currently being cleared * - * Returns 0 if not applicable or no change; 1 if change + * Returns mask of DISK_EVENT_*. * * Note: this function is invoked from the block subsystem. **/ -static int sd_media_changed(struct gendisk *disk) +static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; struct scsi_sense_hdr *sshdr = NULL; int retval; - SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n")); - - if (!sdp->removable) - return 0; + SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); /* * If the device is offline, don't send any commands - just pretend as @@ -1043,48 +1064,32 @@ static int sd_media_changed(struct gendisk *disk) sshdr); } - /* - * Unable to test, unit probably not ready. This usually - * means there is no disc in the drive. Mark as changed, - * and we will figure it out later once the drive is - * available again. - */ - if (retval || (scsi_sense_valid(sshdr) && - /* 0x3a is medium not present */ - sshdr->asc == 0x3a)) { + /* failed to execute TUR, assume media not present */ + if (host_byte(retval)) { set_media_not_present(sdkp); goto out; } + if (media_not_present(sdkp, sshdr)) + goto out; + /* * For removable scsi disk we have to recognise the presence - * of a disk in the drive. This is kept in the struct scsi_disk - * struct and tested at open ! Daniel Roche (dan@lectra.fr) + * of a disk in the drive. */ + if (!sdkp->media_present) + sdp->changed = 1; sdkp->media_present = 1; - out: /* - * Report a media change under the following conditions: + * sdp->changed is set under the following conditions: * - * Medium is present now and wasn't present before. - * Medium wasn't present before and is present now. - * Medium was present at all times, but it changed while - * we weren't looking (sdp->changed is set). - * - * If there was no medium before and there is no medium now then - * don't report a change, even if a medium was inserted and removed - * while we weren't looking. + * Medium present state has changed in either direction. + * Device has indicated UNIT_ATTENTION. */ - retval = (sdkp->media_present != sdkp->previous_state || - (sdkp->media_present && sdp->changed)); - if (retval) - sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL); - sdkp->previous_state = sdkp->media_present; - - /* sdp->changed indicates medium was changed or is not present */ - sdp->changed = !sdkp->media_present; kfree(sshdr); + retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; + sdp->changed = 0; return retval; } @@ -1177,7 +1182,7 @@ static const struct block_device_operations sd_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = sd_compat_ioctl, #endif - .media_changed = sd_media_changed, + .check_events = sd_check_events, .revalidate_disk = sd_revalidate_disk, .unlock_native_capacity = sd_unlock_native_capacity, }; @@ -1320,23 +1325,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) return good_bytes; } -static int media_not_present(struct scsi_disk *sdkp, - struct scsi_sense_hdr *sshdr) -{ - - if (!scsi_sense_valid(sshdr)) - return 0; - /* not invoked for commands that could return deferred errors */ - if (sshdr->sense_key != NOT_READY && - sshdr->sense_key != UNIT_ATTENTION) - return 0; - if (sshdr->asc != 0x3A) /* medium not present */ - return 0; - - set_media_not_present(sdkp); - return 1; -} - /* * spinup disk - called only in sd_revalidate_disk() */ @@ -1511,7 +1499,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, */ if (sdp->removable && sense_valid && sshdr->sense_key == NOT_READY) - sdp->changed = 1; + set_media_not_present(sdkp); /* * We used to set media_present to 0 here to indicate no media @@ -2397,8 +2385,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie) gd->driverfs_dev = &sdp->sdev_gendev; gd->flags = GENHD_FL_EXT_DEVT; - if (sdp->removable) + if (sdp->removable) { gd->flags |= GENHD_FL_REMOVABLE; + gd->events |= DISK_EVENT_MEDIA_CHANGE; + } add_disk(gd); sd_dif_config_host(sdkp); @@ -2480,7 +2470,6 @@ static int sd_probe(struct device *dev) sdkp->disk = gd; sdkp->index = index; atomic_set(&sdkp->openers, 0); - sdkp->previous_state = 1; if (!sdp->request_queue->rq_timeout) { if (sdp->type != TYPE_MOD) diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 55488faf0815..c9d8f6ca49e2 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -55,7 +55,6 @@ struct scsi_disk { u8 media_present; u8 write_prot; u8 protection_type;/* Data Integrity Field */ - unsigned previous_state : 1; unsigned ATO : 1; /* state of disk ATO bit */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index d7b383c96d5d..aefadc6a1607 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -104,14 +104,15 @@ static void sr_release(struct cdrom_device_info *); static void get_sectorsize(struct scsi_cd *); static void get_capabilities(struct scsi_cd *); -static int sr_media_change(struct cdrom_device_info *, int); +static unsigned int sr_check_events(struct cdrom_device_info *cdi, + unsigned int clearing, int slot); static int sr_packet(struct cdrom_device_info *, struct packet_command *); static struct cdrom_device_ops sr_dops = { .open = sr_open, .release = sr_release, .drive_status = sr_drive_status, - .media_changed = sr_media_change, + .check_events = sr_check_events, .tray_move = sr_tray_move, .lock_door = sr_lock_door, .select_speed = sr_select_speed, @@ -165,90 +166,92 @@ static void scsi_cd_put(struct scsi_cd *cd) mutex_unlock(&sr_ref_mutex); } -/* identical to scsi_test_unit_ready except that it doesn't - * eat the NOT_READY returns for removable media */ -int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr) +static unsigned int sr_get_events(struct scsi_device *sdev) { - int retries = MAX_RETRIES; - int the_result; - u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 }; + u8 buf[8]; + u8 cmd[] = { GET_EVENT_STATUS_NOTIFICATION, + 1, /* polled */ + 0, 0, /* reserved */ + 1 << 4, /* notification class: media */ + 0, 0, /* reserved */ + 0, sizeof(buf), /* allocation length */ + 0, /* control */ + }; + struct event_header *eh = (void *)buf; + struct media_event_desc *med = (void *)(buf + 4); + struct scsi_sense_hdr sshdr; + int result; - /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION - * conditions are gone, or a timeout happens - */ - do { - the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, - 0, sshdr, SR_TIMEOUT, - retries--, NULL); - if (scsi_sense_valid(sshdr) && - sshdr->sense_key == UNIT_ATTENTION) - sdev->changed = 1; - - } while (retries > 0 && - (!scsi_status_is_good(the_result) || - (scsi_sense_valid(sshdr) && - sshdr->sense_key == UNIT_ATTENTION))); - return the_result; + result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, sizeof(buf), + &sshdr, SR_TIMEOUT, MAX_RETRIES, NULL); + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION) + return DISK_EVENT_MEDIA_CHANGE; + + if (result || be16_to_cpu(eh->data_len) < sizeof(*med)) + return 0; + + if (eh->nea || eh->notification_class != 0x4) + return 0; + + if (med->media_event_code == 1) + return DISK_EVENT_EJECT_REQUEST; + else if (med->media_event_code == 2) + return DISK_EVENT_MEDIA_CHANGE; + return 0; } /* - * This function checks to see if the media has been changed in the - * CDROM drive. It is possible that we have already sensed a change, - * or the drive may have sensed one and not yet reported it. We must - * be ready for either case. This function always reports the current - * value of the changed bit. If flag is 0, then the changed bit is reset. - * This function could be done as an ioctl, but we would need to have - * an inode for that to work, and we do not always have one. + * This function checks to see if the media has been changed or eject + * button has been pressed. It is possible that we have already + * sensed a change, or the drive may have sensed one and not yet + * reported it. The past events are accumulated in sdev->changed and + * returned together with the current state. */ - -static int sr_media_change(struct cdrom_device_info *cdi, int slot) +static unsigned int sr_check_events(struct cdrom_device_info *cdi, + unsigned int clearing, int slot) { struct scsi_cd *cd = cdi->handle; - int retval; - struct scsi_sense_hdr *sshdr; + bool last_present; + struct scsi_sense_hdr sshdr; + unsigned int events; + int ret; - if (CDSL_CURRENT != slot) { - /* no changer support */ - return -EINVAL; - } + /* no changer support */ + if (CDSL_CURRENT != slot) + return 0; - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); - retval = sr_test_unit_ready(cd->device, sshdr); - if (retval || (scsi_sense_valid(sshdr) && - /* 0x3a is medium not present */ - sshdr->asc == 0x3a)) { - /* Media not present or unable to test, unit probably not - * ready. This usually means there is no disc in the drive. - * Mark as changed, and we will figure it out later once - * the drive is available again. - */ - cd->device->changed = 1; - /* This will force a flush, if called from check_disk_change */ - retval = 1; - goto out; - }; + events = sr_get_events(cd->device); + /* + * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE + * is being cleared. Note that there are devices which hang + * if asked to execute TUR repeatedly. + */ + if (!(clearing & DISK_EVENT_MEDIA_CHANGE)) + goto skip_tur; + + /* let's see whether the media is there with TUR */ + last_present = cd->media_present; + ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); + + /* + * Media is considered to be present if TUR succeeds or fails with + * sense data indicating something other than media-not-present + * (ASC 0x3a). + */ + cd->media_present = scsi_status_is_good(ret) || + (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a); - retval = cd->device->changed; - cd->device->changed = 0; - /* If the disk changed, the capacity will now be different, - * so we force a re-read of this information */ - if (retval) { - /* check multisession offset etc */ - sr_cd_check(cdi); - get_sectorsize(cd); + if (last_present != cd->media_present) + events |= DISK_EVENT_MEDIA_CHANGE; +skip_tur: + if (cd->device->changed) { + events |= DISK_EVENT_MEDIA_CHANGE; + cd->device->changed = 0; } -out: - /* Notify userspace, that media has changed. */ - if (retval != cd->previous_state) - sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE, - GFP_KERNEL); - cd->previous_state = retval; - kfree(sshdr); - - return retval; + return events; } - + /* * sr_done is the interrupt routine for the device driver. * @@ -533,10 +536,25 @@ out: return ret; } -static int sr_block_media_changed(struct gendisk *disk) +static unsigned int sr_block_check_events(struct gendisk *disk, + unsigned int clearing) { struct scsi_cd *cd = scsi_cd(disk); - return cdrom_media_changed(&cd->cdi); + return cdrom_check_events(&cd->cdi, clearing); +} + +static int sr_block_revalidate_disk(struct gendisk *disk) +{ + struct scsi_cd *cd = scsi_cd(disk); + struct scsi_sense_hdr sshdr; + + /* if the unit is not ready, nothing more to do */ + if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) + return 0; + + sr_cd_check(&cd->cdi); + get_sectorsize(cd); + return 0; } static const struct block_device_operations sr_bdops = @@ -545,7 +563,8 @@ static const struct block_device_operations sr_bdops = .open = sr_block_open, .release = sr_block_release, .ioctl = sr_block_ioctl, - .media_changed = sr_block_media_changed, + .check_events = sr_block_check_events, + .revalidate_disk = sr_block_revalidate_disk, /* * No compat_ioctl for now because sr_block_ioctl never * seems to pass arbitary ioctls down to host drivers. @@ -618,6 +637,7 @@ static int sr_probe(struct device *dev) sprintf(disk->disk_name, "sr%d", minor); disk->fops = &sr_bdops; disk->flags = GENHD_FL_CD; + disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); @@ -627,7 +647,7 @@ static int sr_probe(struct device *dev) cd->disk = disk; cd->capacity = 0x1fffff; cd->device->changed = 1; /* force recheck CD type */ - cd->previous_state = 1; + cd->media_present = 1; cd->use = 1; cd->readcd_known = 0; cd->readcd_cdda = 0; @@ -780,7 +800,7 @@ static void get_capabilities(struct scsi_cd *cd) } /* eat unit attentions */ - sr_test_unit_ready(cd->device, &sshdr); + scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); /* ask for mode page 0x2a */ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 1e144dfdbd4b..e036f1dc83c8 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -40,7 +40,7 @@ typedef struct scsi_cd { unsigned xa_flag:1; /* CD has XA sectors ? */ unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ unsigned readcd_cdda:1; /* reading audio data using READ_CD */ - unsigned previous_state:1; /* media has changed */ + unsigned media_present:1; /* media is present */ struct cdrom_device_info cdi; /* We hold gendisk and scsi_device references on probe and use * the refs on this kref to decide when to release them */ @@ -61,7 +61,6 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed); int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); int sr_is_xa(Scsi_CD *); -int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr); /* sr_vendor.c */ void sr_vendor_init(Scsi_CD *); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 3cd8ffbad577..8be30554119b 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -307,7 +307,7 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot) /* we have no changer support */ return -EINVAL; } - if (0 == sr_test_unit_ready(cd->device, &sshdr)) + if (!scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) return CDS_DISC_OK; /* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */ diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 6b97ded9d45d..b4543f575f46 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1866,7 +1866,7 @@ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev) * * This routine is similar to sym_set_workarounds(), except * that, at this point, we already know that the device was - * successfully intialized at least once before, and so most + * successfully initialized at least once before, and so most * of the steps taken there are un-needed here. */ static void sym2_reset_workarounds(struct pci_dev *pdev) diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 3892666b5fbd..2a1d52fb4936 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -1732,6 +1732,11 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); platform_set_drvdata(pdev, port); + if (port->rs485.flags & SER_RS485_ENABLED) { + UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL); + UART_PUT_CR(&port->uart, ATMEL_US_RTSEN); + } + return 0; err_add_port: diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index c291b3add1d2..92c91c83edde 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -3,7 +3,7 @@ * * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) * - * Copyright (C) 2002 - 2008 Paul Mundt + * Copyright (C) 2002 - 2011 Paul Mundt * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). * * based off of the old drivers/char/sh-sci.c by: @@ -81,14 +81,22 @@ struct sci_port { struct timer_list break_timer; int break_flag; + /* SCSCR initialization */ + unsigned int scscr; + + /* SCBRR calculation algo */ + unsigned int scbrr_algo_id; + /* Interface clock */ struct clk *iclk; /* Function clock */ struct clk *fclk; struct list_head node; + struct dma_chan *chan_tx; struct dma_chan *chan_rx; + #ifdef CONFIG_SERIAL_SH_SCI_DMA struct device *dma_dev; unsigned int slave_tx; @@ -415,9 +423,9 @@ static void sci_transmit_chars(struct uart_port *port) if (!(status & SCxSR_TDxE(port))) { ctrl = sci_in(port, SCSCR); if (uart_circ_empty(xmit)) - ctrl &= ~SCI_CTRL_FLAGS_TIE; + ctrl &= ~SCSCR_TIE; else - ctrl |= SCI_CTRL_FLAGS_TIE; + ctrl |= SCSCR_TIE; sci_out(port, SCSCR, ctrl); return; } @@ -459,7 +467,7 @@ static void sci_transmit_chars(struct uart_port *port) sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); } - ctrl |= SCI_CTRL_FLAGS_TIE; + ctrl |= SCSCR_TIE; sci_out(port, SCSCR, ctrl); } } @@ -708,7 +716,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr) disable_irq_nosync(irq); scr |= 0x4000; } else { - scr &= ~SCI_CTRL_FLAGS_RIE; + scr &= ~SCSCR_RIE; } sci_out(port, SCSCR, scr); /* Clear current interrupt */ @@ -777,6 +785,18 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr) return IRQ_HANDLED; } +static inline unsigned long port_rx_irq_mask(struct uart_port *port) +{ + /* + * Not all ports (such as SCIFA) will support REIE. Rather than + * special-casing the port type, we check the port initialization + * IRQ enable mask to see whether the IRQ is desired at all. If + * it's unset, it's logically inferred that there's no point in + * testing for it. + */ + return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE); +} + static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) { unsigned short ssr_status, scr_status, err_enabled; @@ -786,22 +806,25 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) ssr_status = sci_in(port, SCxSR); scr_status = sci_in(port, SCSCR); - err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); + err_enabled = scr_status & port_rx_irq_mask(port); /* Tx Interrupt */ - if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) && + if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) && !s->chan_tx) ret = sci_tx_interrupt(irq, ptr); + /* * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / * DR flags */ if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && - (scr_status & SCI_CTRL_FLAGS_RIE)) + (scr_status & SCSCR_RIE)) ret = sci_rx_interrupt(irq, ptr); + /* Error Interrupt */ if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) ret = sci_er_interrupt(irq, ptr); + /* Break Interrupt */ if ((ssr_status & SCxSR_BRK(port)) && err_enabled) ret = sci_br_interrupt(irq, ptr); @@ -951,7 +974,7 @@ static void sci_dma_tx_complete(void *arg) schedule_work(&s->work_tx); } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { u16 ctrl = sci_in(port, SCSCR); - sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE); + sci_out(port, SCSCR, ctrl & ~SCSCR_TIE); } spin_unlock_irqrestore(&port->lock, flags); @@ -1214,14 +1237,16 @@ static void sci_start_tx(struct uart_port *port) if (new != scr) sci_out(port, SCSCR, new); } + if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) schedule_work(&s->work_tx); #endif + if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ ctrl = sci_in(port, SCSCR); - sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE); + sci_out(port, SCSCR, ctrl | SCSCR_TIE); } } @@ -1231,20 +1256,24 @@ static void sci_stop_tx(struct uart_port *port) /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ ctrl = sci_in(port, SCSCR); + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) ctrl &= ~0x8000; - ctrl &= ~SCI_CTRL_FLAGS_TIE; + + ctrl &= ~SCSCR_TIE; + sci_out(port, SCSCR, ctrl); } static void sci_start_rx(struct uart_port *port) { - unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; + unsigned short ctrl; + + ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port); - /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ - ctrl |= sci_in(port, SCSCR); if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) ctrl &= ~0x4000; + sci_out(port, SCSCR, ctrl); } @@ -1252,11 +1281,13 @@ static void sci_stop_rx(struct uart_port *port) { unsigned short ctrl; - /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ ctrl = sci_in(port, SCSCR); + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) ctrl &= ~0x4000; - ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); + + ctrl &= ~port_rx_irq_mask(port); + sci_out(port, SCSCR, ctrl); } @@ -1296,7 +1327,7 @@ static void rx_timer_fn(unsigned long arg) scr &= ~0x4000; enable_irq(s->irqs[1]); } - sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); + sci_out(port, SCSCR, scr | SCSCR_RIE); dev_dbg(port->dev, "DMA Rx timed out\n"); schedule_work(&s->work_rx); } @@ -1442,12 +1473,31 @@ static void sci_shutdown(struct uart_port *port) s->disable(port); } +static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, + unsigned long freq) +{ + switch (algo_id) { + case SCBRR_ALGO_1: + return ((freq + 16 * bps) / (16 * bps) - 1); + case SCBRR_ALGO_2: + return ((freq + 16 * bps) / (32 * bps) - 1); + case SCBRR_ALGO_3: + return (((freq * 2) + 16 * bps) / (16 * bps) - 1); + case SCBRR_ALGO_4: + return (((freq * 2) + 16 * bps) / (32 * bps) - 1); + case SCBRR_ALGO_5: + return (((freq * 1000 / 32) / bps) - 1); + } + + /* Warn, but use a safe default */ + WARN_ON(1); + return ((freq + 16 * bps) / (32 * bps) - 1); +} + static void sci_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { -#ifdef CONFIG_SERIAL_SH_SCI_DMA struct sci_port *s = to_sci_port(port); -#endif unsigned int status, baud, smr_val, max_baud; int t = -1; u16 scfcr = 0; @@ -1464,7 +1514,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, baud = uart_get_baud_rate(port, termios, old, 0, max_baud); if (likely(baud && port->uartclk)) - t = SCBRR_VALUE(baud, port->uartclk); + t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk); do { status = sci_in(port, SCxSR); @@ -1490,7 +1540,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, sci_out(port, SCSMR, smr_val); dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, - SCSCR_INIT(port)); + s->scscr); if (t > 0) { if (t >= 256) { @@ -1506,7 +1556,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, sci_init_pins(port, termios->c_cflag); sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0)); - sci_out(port, SCSCR, SCSCR_INIT(port)); + sci_out(port, SCSCR, s->scscr); #ifdef CONFIG_SERIAL_SH_SCI_DMA /* @@ -1679,9 +1729,11 @@ static int __devinit sci_init_single(struct platform_device *dev, port->mapbase = p->mapbase; port->membase = p->membase; - port->irq = p->irqs[SCIx_TXI_IRQ]; - port->flags = p->flags; - sci_port->type = port->type = p->type; + port->irq = p->irqs[SCIx_TXI_IRQ]; + port->flags = p->flags; + sci_port->type = port->type = p->type; + sci_port->scscr = p->scscr; + sci_port->scbrr_algo_id = p->scbrr_algo_id; #ifdef CONFIG_SERIAL_SH_SCI_DMA sci_port->dma_dev = p->dma_dev; diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 4bc614e4221c..b223d6cbf33a 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -15,27 +15,17 @@ defined(CONFIG_CPU_SUBTYPE_SH7709) # define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */ # define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */ -# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7705) # define SCIF0 0xA4400000 # define SCIF2 0xA4410000 -# define SCSMR_Ir 0xA44A0000 -# define IRDA_SCIF SCIF0 # define SCPCR 0xA4000116 # define SCPDR 0xA4000136 - -/* Set the clock source, - * SCIF2 (0xA4410000) -> External clock, SCK pin used as clock input - * SCIF0 (0xA4400000) -> Internal clock, SCK pin as serial clock output - */ -# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ defined(CONFIG_ARCH_SH73A0) || \ defined(CONFIG_ARCH_SH7367) || \ defined(CONFIG_ARCH_SH7377) || \ defined(CONFIG_ARCH_SH7372) -# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ # define PORT_PTCR 0xA405011EUL # define PORT_PVCR 0xA4050122UL # define SCIF_ORER 0x0200 /* overrun error bit */ @@ -43,7 +33,6 @@ # define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ @@ -53,39 +42,31 @@ # define SCSPTR1 0xffe0001c /* 8 bit SCI */ # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) (((port)->type == PORT_SCI) ? \ - 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \ - 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ ) #elif defined(CONFIG_CPU_SUBTYPE_SH7760) # define SCSPTR0 0xfe600024 /* 16 bit SCIF */ # define SCSPTR1 0xfe610024 /* 16 bit SCIF */ # define SCSPTR2 0xfe620024 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) # define SCSPTR0 0xA4400000 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ # define PACR 0xa4050100 # define PBCR 0xa4050102 -# define SCSCR_INIT(port) 0x3B #elif defined(CONFIG_CPU_SUBTYPE_SH7343) # define SCSPTR0 0xffe00010 /* 16 bit SCIF */ # define SCSPTR1 0xffe10010 /* 16 bit SCIF */ # define SCSPTR2 0xffe20010 /* 16 bit SCIF */ # define SCSPTR3 0xffe30010 /* 16 bit SCIF */ -# define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7722) # define PADR 0xA4050120 # define PSDR 0xA405013e # define PWDR 0xA4050166 # define PSCR 0xA405011E # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7366) # define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ # define SCSPTR0 SCPDR0 # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7723) # define SCSPTR0 0xa4050160 # define SCSPTR1 0xa405013e @@ -94,62 +75,38 @@ # define SCSPTR4 0xa4050128 # define SCSPTR5 0xa4050128 # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7724) # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) ((port)->type == PORT_SCIFA ? \ - 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \ - 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ ) #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) -# define SCIF_BASE_ADDR 0x01030000 -# define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR # define SCIF_PTR2_OFFS 0x0000020 -# define SCIF_LSR2_OFFS 0x0000024 # define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */ -# define SCLSR2 ((port->mapbase)+SCIF_LSR2_OFFS) /* 16 bit SCIF */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0, TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_H83007) || defined(CONFIG_H83068) -# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) #elif defined(CONFIG_H8S2678) -# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) #elif defined(CONFIG_CPU_SUBTYPE_SH7757) # define SCSPTR0 0xfe4b0020 # define SCSPTR1 0xfe4b0020 # define SCSPTR2 0xfe4b0020 # define SCIF_ORER 0x0001 -# define SCSCR_INIT(port) 0x38 # define SCIF_ONLY #elif defined(CONFIG_CPU_SUBTYPE_SH7763) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ # define SCSPTR1 0xffe08024 /* 16 bit SCIF */ # define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */ # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7770) # define SCSPTR0 0xff923020 /* 16 bit SCIF */ # define SCSPTR1 0xff924020 /* 16 bit SCIF */ # define SCSPTR2 0xff925020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x3c /* TIE=0,RIE=0,TE=1,RE=1,REIE=1,cke=2 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7780) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ # define SCSPTR1 0xffe10024 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* Overrun error bit */ - -#if defined(CONFIG_SH_SH2007) -/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=0 */ -# define SCSCR_INIT(port) 0x38 -#else -/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=1 */ -# define SCSCR_INIT(port) 0x3a -#endif - #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ defined(CONFIG_CPU_SUBTYPE_SH7786) # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ @@ -159,7 +116,6 @@ # define SCSPTR4 0xffee0024 /* 16 bit SCIF */ # define SCSPTR5 0xffef0024 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* Overrun error bit */ -# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ defined(CONFIG_CPU_SUBTYPE_SH7203) || \ defined(CONFIG_CPU_SUBTYPE_SH7206) || \ @@ -174,52 +130,21 @@ # define SCSPTR6 0xfffeB020 /* 16 bit SCIF */ # define SCSPTR7 0xfffeB820 /* 16 bit SCIF */ # endif -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7619) # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ # define SCSPTR1 0xf8410020 /* 16 bit SCIF */ # define SCSPTR2 0xf8420020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SHX3) # define SCSPTR0 0xffc30020 /* 16 bit SCIF */ # define SCSPTR1 0xffc40020 /* 16 bit SCIF */ # define SCSPTR2 0xffc50020 /* 16 bit SCIF */ # define SCSPTR3 0xffc60020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* Overrun error bit */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #else # error CPU subtype not defined #endif -/* SCSCR */ -#define SCI_CTRL_FLAGS_TIE 0x80 /* all */ -#define SCI_CTRL_FLAGS_RIE 0x40 /* all */ -#define SCI_CTRL_FLAGS_TE 0x20 /* all */ -#define SCI_CTRL_FLAGS_RE 0x10 /* all */ -#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ - defined(CONFIG_CPU_SUBTYPE_SH7091) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7722) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) || \ - defined(CONFIG_CPU_SUBTYPE_SHX3) -#define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7724) -#define SCI_CTRL_FLAGS_REIE ((port)->type == PORT_SCIFA ? 0 : 8) -#else -#define SCI_CTRL_FLAGS_REIE 0 -#endif -/* SCI_CTRL_FLAGS_MPIE 0x08 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -/* SCI_CTRL_FLAGS_TEIE 0x04 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -/* SCI_CTRL_FLAGS_CKE1 0x02 * all */ -/* SCI_CTRL_FLAGS_CKE0 0x01 * 7707 SCI/SCIF, 7708 SCI, 7709 SCI/SCIF, 7750 SCI */ - /* SCxSR SCI */ #define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ #define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ @@ -300,23 +225,11 @@ /* SCFCR */ #define SCFCR_RFRST 0x0002 #define SCFCR_TFRST 0x0004 -#define SCFCR_TCRST 0x4000 #define SCFCR_MCE 0x0008 #define SCI_MAJOR 204 #define SCI_MINOR_START 8 -/* Generic serial flags */ -#define SCI_RX_THROTTLE 0x0000001 - -#define SCI_MAGIC 0xbabeface - -/* - * Events are used to schedule things to happen at timer-interrupt - * time, instead of at rs interrupt time. - */ -#define SCI_EVENT_WRITE_WAKEUP 0 - #define SCI_IN(size, offset) \ if ((size) == 8) { \ return ioread8(port->membase + (offset)); \ @@ -445,8 +358,6 @@ SCIF_FNS(SCSMR, 0x00, 16) SCIF_FNS(SCBRR, 0x04, 8) SCIF_FNS(SCSCR, 0x08, 16) -SCIF_FNS(SCTDSR, 0x0c, 8) -SCIF_FNS(SCFER, 0x10, 16) SCIF_FNS(SCxSR, 0x14, 16) SCIF_FNS(SCFCR, 0x18, 16) SCIF_FNS(SCFDR, 0x1c, 16) @@ -476,8 +387,6 @@ SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8) SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16) SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8) SCIx_FNS(SCSPTR, 0, 0, 0, 0) -SCIF_FNS(SCTDSR, 0x0c, 8) -SCIF_FNS(SCFER, 0x10, 16) SCIF_FNS(SCFCR, 0x18, 16) SCIF_FNS(SCFDR, 0x1c, 16) SCIF_FNS(SCLSR, 0x24, 16) @@ -503,7 +412,6 @@ SCIF_FNS(SCLSR, 0, 0, 0x28, 16) #elif defined(CONFIG_CPU_SUBTYPE_SH7763) SCIF_FNS(SCFDR, 0, 0, 0x1C, 16) SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16) -SCIF_FNS(SCLSR2, 0, 0, 0x24, 16) SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) @@ -597,64 +505,3 @@ static inline int sci_rxd_in(struct uart_port *port) return 1; } #endif - -/* - * Values for the BitRate Register (SCBRR) - * - * The values are actually divisors for a frequency which can - * be internal to the SH3 (14.7456MHz) or derived from an external - * clock source. This driver assumes the internal clock is used; - * to support using an external clock source, config options or - * possibly command-line options would need to be added. - * - * Also, to support speeds below 2400 (why?) the lower 2 bits of - * the SCSMR register would also need to be set to non-zero values. - * - * -- Greg Banks 27Feb2000 - * - * Answer: The SCBRR register is only eight bits, and the value in - * it gets larger with lower baud rates. At around 2400 (depending on - * the peripherial module clock) you run out of bits. However the - * lower two bits of SCSMR allow the module clock to be divided down, - * scaling the value which is needed in SCBRR. - * - * -- Stuart Menefy - 23 May 2000 - * - * I meant, why would anyone bother with bitrates below 2400. - * - * -- Greg Banks - 7Jul2000 - * - * You "speedist"! How will I use my 110bps ASR-33 teletype with paper - * tape reader as a console! - * - * -- Mitch Davis - 15 Jul 2000 - */ - -#if (defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786)) && \ - !defined(CONFIG_SH_SH2007) -#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1) -#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH73A0) || \ - defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) -#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1) -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ - defined(CONFIG_CPU_SUBTYPE_SH7724) -static inline int scbrr_calc(struct uart_port *port, int bps, int clk) -{ - if (port->type == PORT_SCIF) - return (clk+16*bps)/(32*bps)-1; - else - return ((clk*2)+16*bps)/(16*bps)-1; -} -#define SCBRR_VALUE(bps, clk) scbrr_calc(port, bps, clk) -#elif defined(__H8300H__) || defined(__H8300S__) -#define SCBRR_VALUE(bps, clk) (((clk*1000/32)/bps)-1) -#else /* Generic SH */ -#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(32*bps)-1) -#endif diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c index ceba593dc84f..04113e5304a0 100644 --- a/drivers/sfi/sfi_core.c +++ b/drivers/sfi/sfi_core.c @@ -101,7 +101,7 @@ static void __iomem * __ref sfi_map_memory(u64 phys, u32 size) return NULL; if (sfi_use_ioremap) - return ioremap(phys, size); + return ioremap_cache(phys, size); else return early_ioremap(phys, size); } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 1906840c1113..13bfa9d48082 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -156,10 +156,10 @@ config SPI_IMX_VER_0_4 def_bool y if ARCH_MX31 config SPI_IMX_VER_0_7 - def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 + def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53 config SPI_IMX_VER_2_3 - def_bool y if ARCH_MX51 + def_bool y if ARCH_MX51 || ARCH_MX53 config SPI_IMX tristate "Freescale i.MX SPI controllers" @@ -310,8 +310,8 @@ config SPI_S3C24XX_GPIO config SPI_S3C64XX tristate "Samsung S3C64XX series type SPI" - depends on ARCH_S3C64XX && EXPERIMENTAL - select S3C64XX_DMA + depends on (ARCH_S3C64XX || ARCH_S5P64X0) + select S3C64XX_DMA if ARCH_S3C64XX help SPI driver for Samsung S3C64XX and newer SoCs. diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index a2a5921c730a..71a1219a995d 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -1795,7 +1795,7 @@ static int pl022_setup(struct spi_device *spi) { struct pl022_config_chip const *chip_info; struct chip_data *chip; - struct ssp_clock_params clk_freq; + struct ssp_clock_params clk_freq = {0, }; int status = 0; struct pl022 *pl022 = spi_master_get_devdata(spi->master); unsigned int bits = spi->bits_per_word; diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index a067046c9da2..1a478bf88c9d 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -341,9 +341,9 @@ static void atmel_spi_next_message(struct spi_master *master) /* * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: * - The buffer is either valid for CPU access, else NULL - * - If the buffer is valid, so is its DMA addresss + * - If the buffer is valid, so is its DMA address * - * This driver manages the dma addresss unless message->is_dma_mapped. + * This driver manages the dma address unless message->is_dma_mapped. */ static int atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c index db35bd9c1b24..2fa012c109bc 100644 --- a/drivers/spi/dw_spi_mmio.c +++ b/drivers/spi/dw_spi_mmio.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -68,8 +69,8 @@ static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) } dwsmmio->clk = clk_get(&pdev->dev, NULL); - if (!dwsmmio->clk) { - ret = -ENODEV; + if (IS_ERR(dwsmmio->clk)) { + ret = PTR_ERR(dwsmmio->clk); goto err_irq; } clk_enable(dwsmmio->clk); diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 9469564e6888..1cf9d5faabf4 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c @@ -742,6 +742,12 @@ static struct platform_device_id spi_imx_devtype[] = { }, { .name = "imx51-ecspi", .driver_data = SPI_IMX_VER_2_3, + }, { + .name = "imx53-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx53-ecspi", + .driver_data = SPI_IMX_VER_2_3, }, { /* sentinel */ } diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c index bb7df02a5472..891e5909038c 100644 --- a/drivers/spi/spi_tegra.c +++ b/drivers/spi/spi_tegra.c @@ -513,7 +513,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev) } tspi->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR_OR_NULL(tspi->clk)) { + if (IS_ERR(tspi->clk)) { dev_err(&pdev->dev, "can not get clock\n"); ret = PTR_ERR(tspi->clk); goto err2; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 4e6245e67995..603428213d21 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -38,7 +38,7 @@ /* - * This supports acccess to SPI devices using normal userspace I/O calls. + * This supports access to SPI devices using normal userspace I/O calls. * Note that while traditional UNIX/POSIX I/O semantics are half duplex, * and often mask message boundaries, full SPI support requires full duplex * transfers. There are several kinds of internal message boundaries to diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c index 5a0985d4ce15..29884c00c4d5 100644 --- a/drivers/ssb/scan.c +++ b/drivers/ssb/scan.c @@ -420,6 +420,16 @@ int ssb_bus_scan(struct ssb_bus *bus, bus->pcicore.dev = dev; #endif /* CONFIG_SSB_DRIVER_PCICORE */ break; + case SSB_DEV_ETHERNET: + if (bus->bustype == SSB_BUSTYPE_PCI) { + if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM && + (bus->host_pci->device & 0xFF00) == 0x4300) { + /* This is a dangling ethernet core on a + * wireless device. Ignore it. */ + continue; + } + } + break; default: break; } diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index e2d586903432..5c8fcfc42c3e 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -123,6 +123,8 @@ source "drivers/staging/sep/Kconfig" source "drivers/staging/iio/Kconfig" +source "drivers/staging/cs5535_gpio/Kconfig" + source "drivers/staging/zram/Kconfig" source "drivers/staging/wlags49_h2/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index c7d222413c07..d53886317826 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_VME_BUS) += vme/ obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ obj-$(CONFIG_DX_SEP) += sep/ obj-$(CONFIG_IIO) += iio/ +obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/ obj-$(CONFIG_ZRAM) += zram/ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ diff --git a/drivers/staging/cs5535_gpio/Kconfig b/drivers/staging/cs5535_gpio/Kconfig new file mode 100644 index 000000000000..a1b3a8d2b866 --- /dev/null +++ b/drivers/staging/cs5535_gpio/Kconfig @@ -0,0 +1,11 @@ +config CS5535_GPIO + tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)" + depends on X86_32 + help + Note: this driver is DEPRECATED. Please use the cs5535-gpio module + in the GPIO section instead (CONFIG_GPIO_CS5535). + + Give userspace access to the GPIO pins on the AMD CS5535 and + CS5536 Geode companion devices. + + If compiled as a module, it will be called cs5535_gpio. diff --git a/drivers/staging/cs5535_gpio/Makefile b/drivers/staging/cs5535_gpio/Makefile new file mode 100644 index 000000000000..d67c4b85f191 --- /dev/null +++ b/drivers/staging/cs5535_gpio/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o diff --git a/drivers/staging/cs5535_gpio/TODO b/drivers/staging/cs5535_gpio/TODO new file mode 100644 index 000000000000..98d1cd1e2363 --- /dev/null +++ b/drivers/staging/cs5535_gpio/TODO @@ -0,0 +1,6 @@ +This is an obsolete driver for some the CS5535 and CS5536 southbridge GPIOs. +It has been replaced by a driver that makes use of the Linux GPIO subsystem. +Please switch to that driver, and let dilinger@queued.net know if there's +anything missing from the new driver. + +This driver is scheduled for removal in 2.6.40. diff --git a/drivers/char/cs5535_gpio.c b/drivers/staging/cs5535_gpio/cs5535_gpio.c similarity index 100% rename from drivers/char/cs5535_gpio.c rename to drivers/staging/cs5535_gpio/cs5535_gpio.c diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c index 033fc9486e01..2a8077511fc0 100644 --- a/drivers/staging/msm/msm_fb_bl.c +++ b/drivers/staging/msm/msm_fb_bl.c @@ -42,7 +42,7 @@ static int msm_fb_bl_update_status(struct backlight_device *pbd) return 0; } -static struct backlight_ops msm_fb_bl_ops = { +static const struct backlight_ops msm_fb_bl_ops = { .get_brightness = msm_fb_bl_get_brightness, .update_status = msm_fb_bl_update_status, }; diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO index ac2d3d023715..35f9cda7be11 100644 --- a/drivers/staging/olpc_dcon/TODO +++ b/drivers/staging/olpc_dcon/TODO @@ -1,6 +1,5 @@ TODO: - checkpatch.pl cleanups - - port geode gpio calls to newer cs5535 API - see if vx855 gpio API can be made similar enough to cs5535 so we can share more code - allow simultaneous XO-1 and XO-1.5 support diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 4ca45ec7fd84..9f26dc9408bb 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -49,7 +48,7 @@ struct dcon_platform_data { int (*init)(void); void (*bus_stabilize_wiggle)(void); void (*set_dconload)(int); - int (*read_status)(void); + u8 (*read_status)(void); }; static struct dcon_platform_data *pdata; @@ -615,7 +614,7 @@ static struct device_attribute dcon_device_files[] = { __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store), }; -static struct backlight_ops dcon_bl_ops = { +static const struct backlight_ops dcon_bl_ops = { .get_brightness = dconbl_get, .update_status = dconbl_set }; diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h index 6453ca4ba0ee..e566d213da2a 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.h +++ b/drivers/staging/olpc_dcon/olpc_dcon.h @@ -29,26 +29,6 @@ #define DCON_REG_SCAN_INT 9 #define DCON_REG_BRIGHT 10 -/* GPIO registers (CS5536) */ - -#define MSR_LBAR_GPIO 0x5140000C - -#define GPIOx_OUT_VAL 0x00 -#define GPIOx_OUT_EN 0x04 -#define GPIOx_IN_EN 0x20 -#define GPIOx_INV_EN 0x24 -#define GPIOx_IN_FLTR_EN 0x28 -#define GPIOx_EVNTCNT_EN 0x2C -#define GPIOx_READ_BACK 0x30 -#define GPIOx_EVNT_EN 0x38 -#define GPIOx_NEGEDGE_EN 0x44 -#define GPIOx_NEGEDGE_STS 0x4C -#define GPIO_FLT7_AMNT 0xD8 -#define GPIO_MAP_X 0xE0 -#define GPIO_MAP_Y 0xE4 -#define GPIO_FE7_SEL 0xF7 - - /* Status values */ #define DCONSTAT_SCANINT 0 diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c index 779fb7d7b30c..043198dc6ff7 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c +++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c @@ -10,54 +10,70 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ - +#include +#include #include #include "olpc_dcon.h" -/* Base address of the GPIO registers */ -static unsigned long gpio_base; - -/* - * List of GPIOs that we care about: - * (in) GPIO12 -- DCONBLANK - * (in) GPIO[56] -- DCONSTAT[01] - * (out) GPIO11 -- DCONLOAD - */ - -#define IN_GPIOS ((1<<5) | (1<<6) | (1<<7) | (1<<12)) -#define OUT_GPIOS (1<<11) - static int dcon_init_xo_1(void) { - unsigned long lo, hi; unsigned char lob; - rdmsr(MSR_LBAR_GPIO, lo, hi); - - /* Check the mask and whether GPIO is enabled (sanity check) */ - if (hi != 0x0000f001) { - printk(KERN_ERR "GPIO not enabled -- cannot use DCON\n"); - return -ENODEV; + if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) { + printk(KERN_ERR "olpc-dcon: failed to request STAT0 GPIO\n"); + return -EIO; + } + if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) { + printk(KERN_ERR "olpc-dcon: failed to request STAT1 GPIO\n"); + goto err_gp_stat1; + } + if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) { + printk(KERN_ERR "olpc-dcon: failed to request IRQ GPIO\n"); + goto err_gp_irq; + } + if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) { + printk(KERN_ERR "olpc-dcon: failed to request LOAD GPIO\n"); + goto err_gp_load; + } + if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) { + printk(KERN_ERR "olpc-dcon: failed to request BLANK GPIO\n"); + goto err_gp_blank; } - - /* Mask off the IO base address */ - gpio_base = lo & 0x0000ff00; /* Turn off the event enable for GPIO7 just to be safe */ - outl(1 << (16+7), gpio_base + GPIOx_EVNT_EN); + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); + + /* + * Determine the current state by reading the GPIO bit; earlier + * stages of the boot process have established the state. + * + * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here; + * this is because OFW will disable input for the pin and set a value.. + * READ_BACK will only contain a valid value if input is enabled and + * then a value is set. So, future readings of the pin can use + * READ_BACK, but the first one cannot. Awesome, huh? + */ + dcon_source = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL) + ? DCON_SOURCE_CPU + : DCON_SOURCE_DCON; + dcon_pending = dcon_source; /* Set the directions for the GPIO pins */ - outl(OUT_GPIOS | (IN_GPIOS << 16), gpio_base + GPIOx_OUT_EN); - outl(IN_GPIOS | (OUT_GPIOS << 16), gpio_base + GPIOx_IN_EN); + gpio_direction_input(OLPC_GPIO_DCON_STAT0); + gpio_direction_input(OLPC_GPIO_DCON_STAT1); + gpio_direction_input(OLPC_GPIO_DCON_IRQ); + gpio_direction_input(OLPC_GPIO_DCON_BLANK); + gpio_direction_output(OLPC_GPIO_DCON_LOAD, + dcon_source == DCON_SOURCE_CPU); /* Set up the interrupt mappings */ /* Set the IRQ to pair 2 */ - geode_gpio_event_irq(OLPC_GPIO_DCON_IRQ, 2); + cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0); /* Enable group 2 to trigger the DCON interrupt */ - geode_gpio_set_irq(2, DCON_IRQ); + cs5535_gpio_set_irq(2, DCON_IRQ); /* Select edge level for interrupt (in PIC) */ lob = inb(0x4d0); @@ -65,52 +81,61 @@ static int dcon_init_xo_1(void) outb(lob, 0x4d0); /* Register the interupt handler */ - if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver)) - return -EIO; + if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver)) { + printk(KERN_ERR "olpc-dcon: failed to request DCON's irq\n"); + goto err_req_irq; + } /* Clear INV_EN for GPIO7 (DCONIRQ) */ - outl((1<<(16+7)), gpio_base + GPIOx_INV_EN); + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT); /* Enable filter for GPIO12 (DCONBLANK) */ - outl(1<<(12), gpio_base + GPIOx_IN_FLTR_EN); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER); /* Disable filter for GPIO7 */ - outl(1<<(16+7), gpio_base + GPIOx_IN_FLTR_EN); + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER); /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ - - outl(1<<(16+7), gpio_base + GPIOx_EVNTCNT_EN); - outl(1<<(16+12), gpio_base + GPIOx_EVNTCNT_EN); + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT); + cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT); /* Add GPIO12 to the Filter Event Pair #7 */ - outb(12, gpio_base + GPIO_FE7_SEL); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL); /* Turn off negative Edge Enable for GPIO12 */ - outl(1<<(16+12), gpio_base + GPIOx_NEGEDGE_EN); + cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN); /* Enable negative Edge Enable for GPIO7 */ - outl(1<<7, gpio_base + GPIOx_NEGEDGE_EN); + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN); /* Zero the filter amount for Filter Event Pair #7 */ - outw(0, gpio_base + GPIO_FLT7_AMNT); + cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT); /* Clear the negative edge status for GPIO7 and GPIO12 */ - outl((1<<7) | (1<<12), gpio_base+0x4c); + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS); /* FIXME: Clear the posiitive status as well, just to be sure */ - outl((1<<7) | (1<<12), gpio_base+0x48); + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS); /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ - outl((1<<(7))|(1<<12), gpio_base + GPIOx_EVNT_EN); - - /* Determine the current state by reading the GPIO bit */ - /* Earlier stages of the boot process have established the state */ - dcon_source = inl(gpio_base + GPIOx_OUT_VAL) & (1<<11) - ? DCON_SOURCE_CPU - : DCON_SOURCE_DCON; - dcon_pending = dcon_source; + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE); return 0; + +err_req_irq: + gpio_free(OLPC_GPIO_DCON_BLANK); +err_gp_blank: + gpio_free(OLPC_GPIO_DCON_LOAD); +err_gp_load: + gpio_free(OLPC_GPIO_DCON_IRQ); +err_gp_irq: + gpio_free(OLPC_GPIO_DCON_STAT1); +err_gp_stat1: + gpio_free(OLPC_GPIO_DCON_STAT0); + return -EIO; } static void dcon_wiggle_xo_1(void) @@ -128,37 +153,44 @@ static void dcon_wiggle_xo_1(void) * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and * GPIO15. */ - geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL); - geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE); - geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); - geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2); - geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); + cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2); + cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); + cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); for (x = 0; x < 16; x++) { udelay(5); - geode_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); udelay(5); - geode_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); } udelay(5); - geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); - geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); } static void dcon_set_dconload_1(int val) { - if (val) - outl(1<<11, gpio_base + GPIOx_OUT_VAL); - else - outl(1<<(11 + 16), gpio_base + GPIOx_OUT_VAL); + gpio_set_value(OLPC_GPIO_DCON_LOAD, val); } -static int dcon_read_status_xo_1(void) +static u8 dcon_read_status_xo_1(void) { - int status = inl(gpio_base + GPIOx_READ_BACK) >> 5; - + u8 status; + + status = gpio_get_value(OLPC_GPIO_DCON_STAT0); + status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1; + /* Clear the negative edge status for GPIO7 */ - outl(1 << 7, gpio_base + GPIOx_NEGEDGE_STS); + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); return status; } diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c index cca6a235ef96..4f56098bb366 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c +++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c @@ -195,9 +195,9 @@ static void dcon_set_dconload_xo_1_5(int val) } } -static int dcon_read_status_xo_1_5(void) +static u8 dcon_read_status_xo_1_5(void) { - int status; + u8 status; if (!dcon_was_irq()) return -1; diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c index 9279897ff161..b2e918622088 100644 --- a/drivers/staging/pohmelfs/net.c +++ b/drivers/staging/pohmelfs/net.c @@ -413,7 +413,7 @@ static int pohmelfs_readdir_response(struct netfs_state *st) if (dentry) { alias = d_materialise_unique(dentry, &npi->vfs_inode); if (alias) - dput(dentry); + dput(alias); } dput(dentry); diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c index ac2bf11e1119..701e8d52a9fa 100644 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ b/drivers/staging/samsung-laptop/samsung-laptop.c @@ -269,7 +269,7 @@ static int update_status(struct backlight_device *bd) return 0; } -static struct backlight_ops backlight_ops = { +static const struct backlight_ops backlight_ops = { .get_brightness = get_brightness, .update_status = update_status, }; diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c index f4b163f7338a..0bc113c44d39 100644 --- a/drivers/staging/sm7xx/smtcfb.c +++ b/drivers/staging/sm7xx/smtcfb.c @@ -1071,7 +1071,7 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev) /* when resuming, restore pci data and fb cursor */ if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) { retv = pci_set_power_state(pdev, PCI_D0); - retv = pci_restore_state(pdev); + pci_restore_state(pdev); if (pci_enable_device(pdev)) return -1; pci_set_master(pdev); diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c index dd612f50749f..87a3a9bd5842 100644 --- a/drivers/staging/smbfs/dir.c +++ b/drivers/staging/smbfs/dir.c @@ -403,12 +403,6 @@ smb_delete_dentry(const struct dentry *dentry) void smb_new_dentry(struct dentry *dentry) { - struct smb_sb_info *server = server_from_dentry(dentry); - - if (server->mnt->flags & SMB_MOUNT_CASE) - d_set_d_op(dentry, &smbfs_dentry_operations_case); - else - d_set_d_op(dentry, &smbfs_dentry_operations); dentry->d_time = jiffies; } @@ -440,7 +434,6 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) struct smb_fattr finfo; struct inode *inode; int error; - struct smb_sb_info *server; error = -ENAMETOOLONG; if (dentry->d_name.len > SMB_MAXNAMELEN) @@ -468,12 +461,6 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) inode = smb_iget(dir->i_sb, &finfo); if (inode) { add_entry: - server = server_from_dentry(dentry); - if (server->mnt->flags & SMB_MOUNT_CASE) - d_set_d_op(dentry, &smbfs_dentry_operations_case); - else - d_set_d_op(dentry, &smbfs_dentry_operations); - d_add(dentry, inode); smb_renew_times(dentry); error = 0; diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c index 244319dc9702..0778589d9e9e 100644 --- a/drivers/staging/smbfs/inode.c +++ b/drivers/staging/smbfs/inode.c @@ -614,6 +614,10 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent) printk(KERN_ERR "smbfs: failed to start smbiod\n"); goto out_no_smbiod; } + if (server->mnt->flags & SMB_MOUNT_CASE) + sb->s_d_op = &smbfs_dentry_operations_case; + else + sb->s_d_op = &smbfs_dentry_operations; /* * Keep the super block locked while we get the root inode. diff --git a/drivers/staging/smbfs/proto.h b/drivers/staging/smbfs/proto.h index 05939a6f43e6..3883cb16a3f6 100644 --- a/drivers/staging/smbfs/proto.h +++ b/drivers/staging/smbfs/proto.h @@ -38,6 +38,8 @@ extern void smb_install_null_ops(struct smb_ops *ops); extern const struct file_operations smb_dir_operations; extern const struct inode_operations smb_dir_inode_operations; extern const struct inode_operations smb_dir_inode_operations_unix; +extern const struct dentry_operations smbfs_dentry_operations_case; +extern const struct dentry_operations smbfs_dentry_operations; extern void smb_new_dentry(struct dentry *dentry); extern void smb_renew_times(struct dentry *dentry); /* cache.c */ diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig new file mode 100644 index 000000000000..2fac3be209ac --- /dev/null +++ b/drivers/target/Kconfig @@ -0,0 +1,32 @@ + +menuconfig TARGET_CORE + tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" + depends on SCSI && BLOCK + select CONFIGFS_FS + default n + help + Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled + control path for target_core_mod. This includes built-in TCM RAMDISK + subsystem logic for virtual LUN 0 access + +if TARGET_CORE + +config TCM_IBLOCK + tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" + help + Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered + access to Linux/Block devices using BIO + +config TCM_FILEIO + tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS" + help + Say Y here to enable the TCM/FILEIO subsystem plugin for buffered + access to Linux/VFS struct file or struct block_device + +config TCM_PSCSI + tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI" + help + Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered + passthrough access to Linux/SCSI device + +endif diff --git a/drivers/target/Makefile b/drivers/target/Makefile new file mode 100644 index 000000000000..5cfd70819f08 --- /dev/null +++ b/drivers/target/Makefile @@ -0,0 +1,24 @@ +EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/ + +target_core_mod-y := target_core_configfs.o \ + target_core_device.o \ + target_core_fabric_configfs.o \ + target_core_fabric_lib.o \ + target_core_hba.o \ + target_core_pr.o \ + target_core_alua.o \ + target_core_scdb.o \ + target_core_tmr.o \ + target_core_tpg.o \ + target_core_transport.o \ + target_core_cdb.o \ + target_core_ua.o \ + target_core_rd.o \ + target_core_mib.o + +obj-$(CONFIG_TARGET_CORE) += target_core_mod.o + +# Subsystem modules +obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o +obj-$(CONFIG_TCM_FILEIO) += target_core_file.o +obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c new file mode 100644 index 000000000000..2c5fcfed5934 --- /dev/null +++ b/drivers/target/target_core_alua.c @@ -0,0 +1,1991 @@ +/******************************************************************************* + * Filename: target_core_alua.c + * + * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) + * + * Copyright (c) 2009-2010 Rising Tide Systems + * Copyright (c) 2009-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "target_core_alua.h" +#include "target_core_hba.h" +#include "target_core_ua.h" + +static int core_alua_check_transition(int state, int *primary); +static int core_alua_set_tg_pt_secondary_state( + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, + struct se_port *port, int explict, int offline); + +/* + * REPORT_TARGET_PORT_GROUPS + * + * See spc4r17 section 6.27 + */ +int core_emulate_report_target_port_groups(struct se_cmd *cmd) +{ + struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; + struct se_port *port; + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first + Target port group descriptor */ + + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, + tg_pt_gp_list) { + /* + * PREF: Preferred target port bit, determine if this + * bit should be set for port group. + */ + if (tg_pt_gp->tg_pt_gp_pref) + buf[off] = 0x80; + /* + * Set the ASYMMETRIC ACCESS State + */ + buf[off++] |= (atomic_read( + &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); + /* + * Set supported ASYMMETRIC ACCESS State bits + */ + buf[off] = 0x80; /* T_SUP */ + buf[off] |= 0x40; /* O_SUP */ + buf[off] |= 0x8; /* U_SUP */ + buf[off] |= 0x4; /* S_SUP */ + buf[off] |= 0x2; /* AN_SUP */ + buf[off++] |= 0x1; /* AO_SUP */ + /* + * TARGET PORT GROUP + */ + buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); + buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); + + off++; /* Skip over Reserved */ + /* + * STATUS CODE + */ + buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); + /* + * Vendor Specific field + */ + buf[off++] = 0x00; + /* + * TARGET PORT COUNT + */ + buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); + rd_len += 8; + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, + tg_pt_gp_mem_list) { + port = tg_pt_gp_mem->tg_pt; + /* + * Start Target Port descriptor format + * + * See spc4r17 section 6.2.7 Table 247 + */ + off += 2; /* Skip over Obsolete */ + /* + * Set RELATIVE TARGET PORT IDENTIFIER + */ + buf[off++] = ((port->sep_rtpi >> 8) & 0xff); + buf[off++] = (port->sep_rtpi & 0xff); + rd_len += 4; + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + } + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + /* + * Set the RETURN DATA LENGTH set in the header of the DataIN Payload + */ + buf[0] = ((rd_len >> 24) & 0xff); + buf[1] = ((rd_len >> 16) & 0xff); + buf[2] = ((rd_len >> 8) & 0xff); + buf[3] = (rd_len & 0xff); + + return 0; +} + +/* + * SET_TARGET_PORT_GROUPS for explict ALUA operation. + * + * See spc4r17 section 6.35 + */ +int core_emulate_set_target_port_groups(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; + struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep; + struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl; + struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ + u32 len = 4; /* Skip over RESERVED area in header */ + int alua_access_state, primary = 0, rc; + u16 tg_pt_id, rtpi; + + if (!(l_port)) + return PYX_TRANSPORT_LU_COMM_FAILURE; + /* + * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed + * for the local tg_pt_gp. + */ + l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; + if (!(l_tg_pt_gp_mem)) { + printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); + l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; + if (!(l_tg_pt_gp)) { + spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); + printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); + spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); + + if (!(rc)) { + printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" + " while TPGS_EXPLICT_ALUA is disabled\n"); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + + while (len < cmd->data_length) { + alua_access_state = (ptr[0] & 0x0f); + /* + * Check the received ALUA access state, and determine if + * the state is a primary or secondary target port asymmetric + * access state. + */ + rc = core_alua_check_transition(alua_access_state, &primary); + if (rc != 0) { + /* + * If the SET TARGET PORT GROUPS attempts to establish + * an invalid combination of target port asymmetric + * access states or attempts to establish an + * unsupported target port asymmetric access state, + * then the command shall be terminated with CHECK + * CONDITION status, with the sense key set to ILLEGAL + * REQUEST, and the additional sense code set to INVALID + * FIELD IN PARAMETER LIST. + */ + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + rc = -1; + /* + * If the ASYMMETRIC ACCESS STATE field (see table 267) + * specifies a primary target port asymmetric access state, + * then the TARGET PORT GROUP OR TARGET PORT field specifies + * a primary target port group for which the primary target + * port asymmetric access state shall be changed. If the + * ASYMMETRIC ACCESS STATE field specifies a secondary target + * port asymmetric access state, then the TARGET PORT GROUP OR + * TARGET PORT field specifies the relative target port + * identifier (see 3.1.120) of the target port for which the + * secondary target port asymmetric access state shall be + * changed. + */ + if (primary) { + tg_pt_id = ((ptr[2] << 8) & 0xff); + tg_pt_id |= (ptr[3] & 0xff); + /* + * Locate the matching target port group ID from + * the global tg_pt_gp list + */ + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, + &T10_ALUA(su_dev)->tg_pt_gps_list, + tg_pt_gp_list) { + if (!(tg_pt_gp->tg_pt_gp_valid_id)) + continue; + + if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) + continue; + + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic_inc(); + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + + rc = core_alua_do_port_transition(tg_pt_gp, + dev, l_port, nacl, + alua_access_state, 1); + + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic_dec(); + break; + } + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + /* + * If not matching target port group ID can be located + * throw an exception with ASCQ: INVALID_PARAMETER_LIST + */ + if (rc != 0) + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } else { + /* + * Extact the RELATIVE TARGET PORT IDENTIFIER to identify + * the Target Port in question for the the incoming + * SET_TARGET_PORT_GROUPS op. + */ + rtpi = ((ptr[2] << 8) & 0xff); + rtpi |= (ptr[3] & 0xff); + /* + * Locate the matching relative target port identifer + * for the struct se_device storage object. + */ + spin_lock(&dev->se_port_lock); + list_for_each_entry(port, &dev->dev_sep_list, + sep_list) { + if (port->sep_rtpi != rtpi) + continue; + + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + spin_unlock(&dev->se_port_lock); + + rc = core_alua_set_tg_pt_secondary_state( + tg_pt_gp_mem, port, 1, 1); + + spin_lock(&dev->se_port_lock); + break; + } + spin_unlock(&dev->se_port_lock); + /* + * If not matching relative target port identifier can + * be located, throw an exception with ASCQ: + * INVALID_PARAMETER_LIST + */ + if (rc != 0) + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + + ptr += 4; + len += 4; + } + + return 0; +} + +static inline int core_alua_state_nonoptimized( + struct se_cmd *cmd, + unsigned char *cdb, + int nonop_delay_msecs, + u8 *alua_ascq) +{ + /* + * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked + * later to determine if processing of this cmd needs to be + * temporarily delayed for the Active/NonOptimized primary access state. + */ + cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; + cmd->alua_nonop_delay = nonop_delay_msecs; + return 0; +} + +static inline int core_alua_state_standby( + struct se_cmd *cmd, + unsigned char *cdb, + u8 *alua_ascq) +{ + /* + * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by + * spc4r17 section 5.9.2.4.4 + */ + switch (cdb[0]) { + case INQUIRY: + case LOG_SELECT: + case LOG_SENSE: + case MODE_SELECT: + case MODE_SENSE: + case REPORT_LUNS: + case RECEIVE_DIAGNOSTIC: + case SEND_DIAGNOSTIC: + case MAINTENANCE_IN: + switch (cdb[1]) { + case MI_REPORT_TARGET_PGS: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + return 1; + } + case MAINTENANCE_OUT: + switch (cdb[1]) { + case MO_SET_TARGET_PGS: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + return 1; + } + case REQUEST_SENSE: + case PERSISTENT_RESERVE_IN: + case PERSISTENT_RESERVE_OUT: + case READ_BUFFER: + case WRITE_BUFFER: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + return 1; + } + + return 0; +} + +static inline int core_alua_state_unavailable( + struct se_cmd *cmd, + unsigned char *cdb, + u8 *alua_ascq) +{ + /* + * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by + * spc4r17 section 5.9.2.4.5 + */ + switch (cdb[0]) { + case INQUIRY: + case REPORT_LUNS: + case MAINTENANCE_IN: + switch (cdb[1]) { + case MI_REPORT_TARGET_PGS: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + return 1; + } + case MAINTENANCE_OUT: + switch (cdb[1]) { + case MO_SET_TARGET_PGS: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + return 1; + } + case REQUEST_SENSE: + case READ_BUFFER: + case WRITE_BUFFER: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + return 1; + } + + return 0; +} + +static inline int core_alua_state_transition( + struct se_cmd *cmd, + unsigned char *cdb, + u8 *alua_ascq) +{ + /* + * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by + * spc4r17 section 5.9.2.5 + */ + switch (cdb[0]) { + case INQUIRY: + case REPORT_LUNS: + case MAINTENANCE_IN: + switch (cdb[1]) { + case MI_REPORT_TARGET_PGS: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; + return 1; + } + case REQUEST_SENSE: + case READ_BUFFER: + case WRITE_BUFFER: + return 0; + default: + *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; + return 1; + } + + return 0; +} + +/* + * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED + * in transport_cmd_sequencer(). This function is assigned to + * struct t10_alua *->state_check() in core_setup_alua() + */ +static int core_alua_state_check_nop( + struct se_cmd *cmd, + unsigned char *cdb, + u8 *alua_ascq) +{ + return 0; +} + +/* + * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer(). + * This function is assigned to struct t10_alua *->state_check() in + * core_setup_alua() + * + * Also, this function can return three different return codes to + * signal transport_generic_cmd_sequencer() + * + * return 1: Is used to signal LUN not accecsable, and check condition/not ready + * return 0: Used to signal success + * reutrn -1: Used to signal failure, and invalid cdb field + */ +static int core_alua_state_check( + struct se_cmd *cmd, + unsigned char *cdb, + u8 *alua_ascq) +{ + struct se_lun *lun = SE_LUN(cmd); + struct se_port *port = lun->lun_sep; + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + int out_alua_state, nonop_delay_msecs; + + if (!(port)) + return 0; + /* + * First, check for a struct se_port specific secondary ALUA target port + * access state: OFFLINE + */ + if (atomic_read(&port->sep_tg_pt_secondary_offline)) { + *alua_ascq = ASCQ_04H_ALUA_OFFLINE; + printk(KERN_INFO "ALUA: Got secondary offline status for local" + " target port\n"); + *alua_ascq = ASCQ_04H_ALUA_OFFLINE; + return 1; + } + /* + * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the + * ALUA target port group, to obtain current ALUA access state. + * Otherwise look for the underlying struct se_device association with + * a ALUA logical unit group. + */ + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + /* + * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional + * statement so the complier knows explictly to check this case first. + * For the Optimized ALUA access state case, we want to process the + * incoming fabric cmd ASAP.. + */ + if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) + return 0; + + switch (out_alua_state) { + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + return core_alua_state_nonoptimized(cmd, cdb, + nonop_delay_msecs, alua_ascq); + case ALUA_ACCESS_STATE_STANDBY: + return core_alua_state_standby(cmd, cdb, alua_ascq); + case ALUA_ACCESS_STATE_UNAVAILABLE: + return core_alua_state_unavailable(cmd, cdb, alua_ascq); + case ALUA_ACCESS_STATE_TRANSITION: + return core_alua_state_transition(cmd, cdb, alua_ascq); + /* + * OFFLINE is a secondary ALUA target port group access state, that is + * handled above with struct se_port->sep_tg_pt_secondary_offline=1 + */ + case ALUA_ACCESS_STATE_OFFLINE: + default: + printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", + out_alua_state); + return -1; + } + + return 0; +} + +/* + * Check implict and explict ALUA state change request. + */ +static int core_alua_check_transition(int state, int *primary) +{ + switch (state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + case ALUA_ACCESS_STATE_STANDBY: + case ALUA_ACCESS_STATE_UNAVAILABLE: + /* + * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are + * defined as primary target port asymmetric access states. + */ + *primary = 1; + break; + case ALUA_ACCESS_STATE_OFFLINE: + /* + * OFFLINE state is defined as a secondary target port + * asymmetric access state. + */ + *primary = 0; + break; + default: + printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); + return -1; + } + + return 0; +} + +static char *core_alua_dump_state(int state) +{ + switch (state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + return "Active/Optimized"; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + return "Active/NonOptimized"; + case ALUA_ACCESS_STATE_STANDBY: + return "Standby"; + case ALUA_ACCESS_STATE_UNAVAILABLE: + return "Unavailable"; + case ALUA_ACCESS_STATE_OFFLINE: + return "Offline"; + default: + return "Unknown"; + } + + return NULL; +} + +char *core_alua_dump_status(int status) +{ + switch (status) { + case ALUA_STATUS_NONE: + return "None"; + case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: + return "Altered by Explict STPG"; + case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: + return "Altered by Implict ALUA"; + default: + return "Unknown"; + } + + return NULL; +} + +/* + * Used by fabric modules to determine when we need to delay processing + * for the Active/NonOptimized paths.. + */ +int core_alua_check_nonop_delay( + struct se_cmd *cmd) +{ + if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) + return 0; + if (in_interrupt()) + return 0; + /* + * The ALUA Active/NonOptimized access state delay can be disabled + * in via configfs with a value of zero + */ + if (!(cmd->alua_nonop_delay)) + return 0; + /* + * struct se_cmd->alua_nonop_delay gets set by a target port group + * defined interval in core_alua_state_nonoptimized() + */ + msleep_interruptible(cmd->alua_nonop_delay); + return 0; +} +EXPORT_SYMBOL(core_alua_check_nonop_delay); + +/* + * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex + * + */ +static int core_alua_write_tpg_metadata( + const char *path, + unsigned char *md_buf, + u32 md_buf_len) +{ + mm_segment_t old_fs; + struct file *file; + struct iovec iov[1]; + int flags = O_RDWR | O_CREAT | O_TRUNC, ret; + + memset(iov, 0, sizeof(struct iovec)); + + file = filp_open(path, flags, 0600); + if (IS_ERR(file) || !file || !file->f_dentry) { + printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", + path); + return -ENODEV; + } + + iov[0].iov_base = &md_buf[0]; + iov[0].iov_len = md_buf_len; + + old_fs = get_fs(); + set_fs(get_ds()); + ret = vfs_writev(file, &iov[0], 1, &file->f_pos); + set_fs(old_fs); + + if (ret < 0) { + printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); + filp_close(file, NULL); + return -EIO; + } + filp_close(file, NULL); + + return 0; +} + +/* + * Called with tg_pt_gp->tg_pt_gp_md_mutex held + */ +static int core_alua_update_tpg_primary_metadata( + struct t10_alua_tg_pt_gp *tg_pt_gp, + int primary_state, + unsigned char *md_buf) +{ + struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; + struct t10_wwn *wwn = &su_dev->t10_wwn; + char path[ALUA_METADATA_PATH_LEN]; + int len; + + memset(path, 0, ALUA_METADATA_PATH_LEN); + + len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, + "tg_pt_gp_id=%hu\n" + "alua_access_state=0x%02x\n" + "alua_access_status=0x%02x\n", + tg_pt_gp->tg_pt_gp_id, primary_state, + tg_pt_gp->tg_pt_gp_alua_access_status); + + snprintf(path, ALUA_METADATA_PATH_LEN, + "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], + config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); + + return core_alua_write_tpg_metadata(path, md_buf, len); +} + +static int core_alua_do_transition_tg_pt( + struct t10_alua_tg_pt_gp *tg_pt_gp, + struct se_port *l_port, + struct se_node_acl *nacl, + unsigned char *md_buf, + int new_state, + int explict) +{ + struct se_dev_entry *se_deve; + struct se_lun_acl *lacl; + struct se_port *port; + struct t10_alua_tg_pt_gp_member *mem; + int old_state = 0; + /* + * Save the old primary ALUA access state, and set the current state + * to ALUA_ACCESS_STATE_TRANSITION. + */ + old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + ALUA_ACCESS_STATE_TRANSITION); + tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? + ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; + /* + * Check for the optional ALUA primary state transition delay + */ + if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) + msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, + tg_pt_gp_mem_list) { + port = mem->tg_pt; + /* + * After an implicit target port asymmetric access state + * change, a device server shall establish a unit attention + * condition for the initiator port associated with every I_T + * nexus with the additional sense code set to ASYMMETRIC + * ACCESS STATE CHAGED. + * + * After an explicit target port asymmetric access state + * change, a device server shall establish a unit attention + * condition with the additional sense code set to ASYMMETRIC + * ACCESS STATE CHANGED for the initiator port associated with + * every I_T nexus other than the I_T nexus on which the SET + * TARGET PORT GROUPS command + */ + atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); + smp_mb__after_atomic_inc(); + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + + spin_lock_bh(&port->sep_alua_lock); + list_for_each_entry(se_deve, &port->sep_alua_list, + alua_port_list) { + lacl = se_deve->se_lun_acl; + /* + * se_deve->se_lun_acl pointer may be NULL for a + * entry created without explict Node+MappedLUN ACLs + */ + if (!(lacl)) + continue; + + if (explict && + (nacl != NULL) && (nacl == lacl->se_lun_nacl) && + (l_port != NULL) && (l_port == port)) + continue; + + core_scsi3_ua_allocate(lacl->se_lun_nacl, + se_deve->mapped_lun, 0x2A, + ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); + } + spin_unlock_bh(&port->sep_alua_lock); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); + smp_mb__after_atomic_dec(); + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + /* + * Update the ALUA metadata buf that has been allocated in + * core_alua_do_port_transition(), this metadata will be written + * to struct file. + * + * Note that there is the case where we do not want to update the + * metadata when the saved metadata is being parsed in userspace + * when setting the existing port access state and access status. + * + * Also note that the failure to write out the ALUA metadata to + * struct file does NOT affect the actual ALUA transition. + */ + if (tg_pt_gp->tg_pt_gp_write_metadata) { + mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); + core_alua_update_tpg_primary_metadata(tg_pt_gp, + new_state, md_buf); + mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); + } + /* + * Set the current primary ALUA access state to the requested new state + */ + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); + + printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" + " from primary access state %s to %s\n", (explict) ? "explict" : + "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), + core_alua_dump_state(new_state)); + + return 0; +} + +int core_alua_do_port_transition( + struct t10_alua_tg_pt_gp *l_tg_pt_gp, + struct se_device *l_dev, + struct se_port *l_port, + struct se_node_acl *l_nacl, + int new_state, + int explict) +{ + struct se_device *dev; + struct se_port *port; + struct se_subsystem_dev *su_dev; + struct se_node_acl *nacl; + struct t10_alua_lu_gp *lu_gp; + struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; + struct t10_alua_tg_pt_gp *tg_pt_gp; + unsigned char *md_buf; + int primary; + + if (core_alua_check_transition(new_state, &primary) != 0) + return -EINVAL; + + md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); + if (!(md_buf)) { + printk("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } + + local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; + spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); + lu_gp = local_lu_gp_mem->lu_gp; + atomic_inc(&lu_gp->lu_gp_ref_cnt); + smp_mb__after_atomic_inc(); + spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); + /* + * For storage objects that are members of the 'default_lu_gp', + * we only do transition on the passed *l_tp_pt_gp, and not + * on all of the matching target port groups IDs in default_lu_gp. + */ + if (!(lu_gp->lu_gp_id)) { + /* + * core_alua_do_transition_tg_pt() will always return + * success. + */ + core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, + md_buf, new_state, explict); + atomic_dec(&lu_gp->lu_gp_ref_cnt); + smp_mb__after_atomic_dec(); + kfree(md_buf); + return 0; + } + /* + * For all other LU groups aside from 'default_lu_gp', walk all of + * the associated storage objects looking for a matching target port + * group ID from the local target port group. + */ + spin_lock(&lu_gp->lu_gp_lock); + list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, + lu_gp_mem_list) { + + dev = lu_gp_mem->lu_gp_mem_dev; + su_dev = dev->se_sub_dev; + atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); + smp_mb__after_atomic_inc(); + spin_unlock(&lu_gp->lu_gp_lock); + + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, + &T10_ALUA(su_dev)->tg_pt_gps_list, + tg_pt_gp_list) { + + if (!(tg_pt_gp->tg_pt_gp_valid_id)) + continue; + /* + * If the target behavior port asymmetric access state + * is changed for any target port group accessiable via + * a logical unit within a LU group, the target port + * behavior group asymmetric access states for the same + * target port group accessible via other logical units + * in that LU group will also change. + */ + if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) + continue; + + if (l_tg_pt_gp == tg_pt_gp) { + port = l_port; + nacl = l_nacl; + } else { + port = NULL; + nacl = NULL; + } + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic_inc(); + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + /* + * core_alua_do_transition_tg_pt() will always return + * success. + */ + core_alua_do_transition_tg_pt(tg_pt_gp, port, + nacl, md_buf, new_state, explict); + + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic_dec(); + } + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + + spin_lock(&lu_gp->lu_gp_lock); + atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); + smp_mb__after_atomic_dec(); + } + spin_unlock(&lu_gp->lu_gp_lock); + + printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" + " Group IDs: %hu %s transition to primary state: %s\n", + config_item_name(&lu_gp->lu_gp_group.cg_item), + l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", + core_alua_dump_state(new_state)); + + atomic_dec(&lu_gp->lu_gp_ref_cnt); + smp_mb__after_atomic_dec(); + kfree(md_buf); + return 0; +} + +/* + * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held + */ +static int core_alua_update_tpg_secondary_metadata( + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, + struct se_port *port, + unsigned char *md_buf, + u32 md_buf_len) +{ + struct se_portal_group *se_tpg = port->sep_tpg; + char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; + int len; + + memset(path, 0, ALUA_METADATA_PATH_LEN); + memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); + + len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", + TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg)); + + if (TPG_TFO(se_tpg)->tpg_get_tag != NULL) + snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", + TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); + + len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" + "alua_tg_pt_status=0x%02x\n", + atomic_read(&port->sep_tg_pt_secondary_offline), + port->sep_tg_pt_secondary_stat); + + snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", + TPG_TFO(se_tpg)->get_fabric_name(), wwn, + port->sep_lun->unpacked_lun); + + return core_alua_write_tpg_metadata(path, md_buf, len); +} + +static int core_alua_set_tg_pt_secondary_state( + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, + struct se_port *port, + int explict, + int offline) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + unsigned char *md_buf; + u32 md_buf_len; + int trans_delay_msecs; + + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if (!(tg_pt_gp)) { + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + printk(KERN_ERR "Unable to complete secondary state" + " transition\n"); + return -1; + } + trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; + /* + * Set the secondary ALUA target port access state to OFFLINE + * or release the previously secondary state for struct se_port + */ + if (offline) + atomic_set(&port->sep_tg_pt_secondary_offline, 1); + else + atomic_set(&port->sep_tg_pt_secondary_offline, 0); + + md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; + port->sep_tg_pt_secondary_stat = (explict) ? + ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; + + printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" + " to secondary access state: %s\n", (explict) ? "explict" : + "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); + + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + /* + * Do the optional transition delay after we set the secondary + * ALUA access state. + */ + if (trans_delay_msecs != 0) + msleep_interruptible(trans_delay_msecs); + /* + * See if we need to update the ALUA fabric port metadata for + * secondary state and status + */ + if (port->sep_tg_pt_secondary_write_md) { + md_buf = kzalloc(md_buf_len, GFP_KERNEL); + if (!(md_buf)) { + printk(KERN_ERR "Unable to allocate md_buf for" + " secondary ALUA access metadata\n"); + return -1; + } + mutex_lock(&port->sep_tg_pt_md_mutex); + core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, + md_buf, md_buf_len); + mutex_unlock(&port->sep_tg_pt_md_mutex); + + kfree(md_buf); + } + + return 0; +} + +struct t10_alua_lu_gp * +core_alua_allocate_lu_gp(const char *name, int def_group) +{ + struct t10_alua_lu_gp *lu_gp; + + lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); + if (!(lu_gp)) { + printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); + return ERR_PTR(-ENOMEM);; + } + INIT_LIST_HEAD(&lu_gp->lu_gp_list); + INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); + spin_lock_init(&lu_gp->lu_gp_lock); + atomic_set(&lu_gp->lu_gp_ref_cnt, 0); + + if (def_group) { + lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;; + lu_gp->lu_gp_valid_id = 1; + se_global->alua_lu_gps_count++; + } + + return lu_gp; +} + +int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) +{ + struct t10_alua_lu_gp *lu_gp_tmp; + u16 lu_gp_id_tmp; + /* + * The lu_gp->lu_gp_id may only be set once.. + */ + if (lu_gp->lu_gp_valid_id) { + printk(KERN_WARNING "ALUA LU Group already has a valid ID," + " ignoring request\n"); + return -1; + } + + spin_lock(&se_global->lu_gps_lock); + if (se_global->alua_lu_gps_count == 0x0000ffff) { + printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:" + " 0x0000ffff reached\n"); + spin_unlock(&se_global->lu_gps_lock); + kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); + return -1; + } +again: + lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : + se_global->alua_lu_gps_counter++; + + list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) { + if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { + if (!(lu_gp_id)) + goto again; + + printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" + " already exists, ignoring request\n", + lu_gp_id); + spin_unlock(&se_global->lu_gps_lock); + return -1; + } + } + + lu_gp->lu_gp_id = lu_gp_id_tmp; + lu_gp->lu_gp_valid_id = 1; + list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list); + se_global->alua_lu_gps_count++; + spin_unlock(&se_global->lu_gps_lock); + + return 0; +} + +static struct t10_alua_lu_gp_member * +core_alua_allocate_lu_gp_mem(struct se_device *dev) +{ + struct t10_alua_lu_gp_member *lu_gp_mem; + + lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); + if (!(lu_gp_mem)) { + printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); + spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); + atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); + + lu_gp_mem->lu_gp_mem_dev = dev; + dev->dev_alua_lu_gp_mem = lu_gp_mem; + + return lu_gp_mem; +} + +void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) +{ + struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; + /* + * Once we have reached this point, config_item_put() has + * already been called from target_core_alua_drop_lu_gp(). + * + * Here, we remove the *lu_gp from the global list so that + * no associations can be made while we are releasing + * struct t10_alua_lu_gp. + */ + spin_lock(&se_global->lu_gps_lock); + atomic_set(&lu_gp->lu_gp_shutdown, 1); + list_del(&lu_gp->lu_gp_list); + se_global->alua_lu_gps_count--; + spin_unlock(&se_global->lu_gps_lock); + /* + * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() + * in target_core_configfs.c:target_core_store_alua_lu_gp() to be + * released with core_alua_put_lu_gp_from_name() + */ + while (atomic_read(&lu_gp->lu_gp_ref_cnt)) + cpu_relax(); + /* + * Release reference to struct t10_alua_lu_gp * from all associated + * struct se_device. + */ + spin_lock(&lu_gp->lu_gp_lock); + list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, + &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { + if (lu_gp_mem->lu_gp_assoc) { + list_del(&lu_gp_mem->lu_gp_mem_list); + lu_gp->lu_gp_members--; + lu_gp_mem->lu_gp_assoc = 0; + } + spin_unlock(&lu_gp->lu_gp_lock); + /* + * + * lu_gp_mem is assoicated with a single + * struct se_device->dev_alua_lu_gp_mem, and is released when + * struct se_device is released via core_alua_free_lu_gp_mem(). + * + * If the passed lu_gp does NOT match the default_lu_gp, assume + * we want to re-assocate a given lu_gp_mem with default_lu_gp. + */ + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + if (lu_gp != se_global->default_lu_gp) + __core_alua_attach_lu_gp_mem(lu_gp_mem, + se_global->default_lu_gp); + else + lu_gp_mem->lu_gp = NULL; + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + spin_lock(&lu_gp->lu_gp_lock); + } + spin_unlock(&lu_gp->lu_gp_lock); + + kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); +} + +void core_alua_free_lu_gp_mem(struct se_device *dev) +{ + struct se_subsystem_dev *su_dev = dev->se_sub_dev; + struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua_lu_gp *lu_gp; + struct t10_alua_lu_gp_member *lu_gp_mem; + + if (alua->alua_type != SPC3_ALUA_EMULATED) + return; + + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!(lu_gp_mem)) + return; + + while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) + cpu_relax(); + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if ((lu_gp)) { + spin_lock(&lu_gp->lu_gp_lock); + if (lu_gp_mem->lu_gp_assoc) { + list_del(&lu_gp_mem->lu_gp_mem_list); + lu_gp->lu_gp_members--; + lu_gp_mem->lu_gp_assoc = 0; + } + spin_unlock(&lu_gp->lu_gp_lock); + lu_gp_mem->lu_gp = NULL; + } + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); +} + +struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) +{ + struct t10_alua_lu_gp *lu_gp; + struct config_item *ci; + + spin_lock(&se_global->lu_gps_lock); + list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) { + if (!(lu_gp->lu_gp_valid_id)) + continue; + ci = &lu_gp->lu_gp_group.cg_item; + if (!(strcmp(config_item_name(ci), name))) { + atomic_inc(&lu_gp->lu_gp_ref_cnt); + spin_unlock(&se_global->lu_gps_lock); + return lu_gp; + } + } + spin_unlock(&se_global->lu_gps_lock); + + return NULL; +} + +void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) +{ + spin_lock(&se_global->lu_gps_lock); + atomic_dec(&lu_gp->lu_gp_ref_cnt); + spin_unlock(&se_global->lu_gps_lock); +} + +/* + * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock + */ +void __core_alua_attach_lu_gp_mem( + struct t10_alua_lu_gp_member *lu_gp_mem, + struct t10_alua_lu_gp *lu_gp) +{ + spin_lock(&lu_gp->lu_gp_lock); + lu_gp_mem->lu_gp = lu_gp; + lu_gp_mem->lu_gp_assoc = 1; + list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); + lu_gp->lu_gp_members++; + spin_unlock(&lu_gp->lu_gp_lock); +} + +/* + * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock + */ +void __core_alua_drop_lu_gp_mem( + struct t10_alua_lu_gp_member *lu_gp_mem, + struct t10_alua_lu_gp *lu_gp) +{ + spin_lock(&lu_gp->lu_gp_lock); + list_del(&lu_gp_mem->lu_gp_mem_list); + lu_gp_mem->lu_gp = NULL; + lu_gp_mem->lu_gp_assoc = 0; + lu_gp->lu_gp_members--; + spin_unlock(&lu_gp->lu_gp_lock); +} + +struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( + struct se_subsystem_dev *su_dev, + const char *name, + int def_group) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + + tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); + if (!(tg_pt_gp)) { + printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); + return NULL; + } + INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); + INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); + mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); + spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); + atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); + tg_pt_gp->tg_pt_gp_su_dev = su_dev; + tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); + /* + * Enable both explict and implict ALUA support by default + */ + tg_pt_gp->tg_pt_gp_alua_access_type = + TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; + /* + * Set the default Active/NonOptimized Delay in milliseconds + */ + tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; + tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; + + if (def_group) { + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + tg_pt_gp->tg_pt_gp_id = + T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; + tg_pt_gp->tg_pt_gp_valid_id = 1; + T10_ALUA(su_dev)->alua_tg_pt_gps_count++; + list_add_tail(&tg_pt_gp->tg_pt_gp_list, + &T10_ALUA(su_dev)->tg_pt_gps_list); + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + } + + return tg_pt_gp; +} + +int core_alua_set_tg_pt_gp_id( + struct t10_alua_tg_pt_gp *tg_pt_gp, + u16 tg_pt_gp_id) +{ + struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; + struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; + u16 tg_pt_gp_id_tmp; + /* + * The tg_pt_gp->tg_pt_gp_id may only be set once.. + */ + if (tg_pt_gp->tg_pt_gp_valid_id) { + printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," + " ignoring request\n"); + return -1; + } + + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) { + printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" + " 0x0000ffff reached\n"); + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); + return -1; + } +again: + tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : + T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; + + list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list, + tg_pt_gp_list) { + if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { + if (!(tg_pt_gp_id)) + goto again; + + printk(KERN_ERR "ALUA Target Port Group ID: %hu already" + " exists, ignoring request\n", tg_pt_gp_id); + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + return -1; + } + } + + tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; + tg_pt_gp->tg_pt_gp_valid_id = 1; + list_add_tail(&tg_pt_gp->tg_pt_gp_list, + &T10_ALUA(su_dev)->tg_pt_gps_list); + T10_ALUA(su_dev)->alua_tg_pt_gps_count++; + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + + return 0; +} + +struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( + struct se_port *port) +{ + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + + tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, + GFP_KERNEL); + if (!(tg_pt_gp_mem)) { + printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); + spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); + + tg_pt_gp_mem->tg_pt = port; + port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; + atomic_set(&port->sep_tg_pt_gp_active, 1); + + return tg_pt_gp_mem; +} + +void core_alua_free_tg_pt_gp( + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; + /* + * Once we have reached this point, config_item_put() has already + * been called from target_core_alua_drop_tg_pt_gp(). + * + * Here we remove *tg_pt_gp from the global list so that + * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS + * can be made while we are releasing struct t10_alua_tg_pt_gp. + */ + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + list_del(&tg_pt_gp->tg_pt_gp_list); + T10_ALUA(su_dev)->alua_tg_pt_gps_counter--; + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + /* + * Allow a struct t10_alua_tg_pt_gp_member * referenced by + * core_alua_get_tg_pt_gp_by_name() in + * target_core_configfs.c:target_core_store_alua_tg_pt_gp() + * to be released with core_alua_put_tg_pt_gp_from_name(). + */ + while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) + cpu_relax(); + /* + * Release reference to struct t10_alua_tg_pt_gp from all associated + * struct se_port. + */ + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, + &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { + if (tg_pt_gp_mem->tg_pt_gp_assoc) { + list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); + tg_pt_gp->tg_pt_gp_members--; + tg_pt_gp_mem->tg_pt_gp_assoc = 0; + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + /* + * tg_pt_gp_mem is assoicated with a single + * se_port->sep_alua_tg_pt_gp_mem, and is released via + * core_alua_free_tg_pt_gp_mem(). + * + * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, + * assume we want to re-assocate a given tg_pt_gp_mem with + * default_tg_pt_gp. + */ + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) { + __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, + T10_ALUA(su_dev)->default_tg_pt_gp); + } else + tg_pt_gp_mem->tg_pt_gp = NULL; + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + + kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); +} + +void core_alua_free_tg_pt_gp_mem(struct se_port *port) +{ + struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; + struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + + if (alua->alua_type != SPC3_ALUA_EMULATED) + return; + + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (!(tg_pt_gp_mem)) + return; + + while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) + cpu_relax(); + + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if ((tg_pt_gp)) { + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + if (tg_pt_gp_mem->tg_pt_gp_assoc) { + list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); + tg_pt_gp->tg_pt_gp_members--; + tg_pt_gp_mem->tg_pt_gp_assoc = 0; + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + tg_pt_gp_mem->tg_pt_gp = NULL; + } + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + + kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); +} + +static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( + struct se_subsystem_dev *su_dev, + const char *name) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct config_item *ci; + + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, + tg_pt_gp_list) { + if (!(tg_pt_gp->tg_pt_gp_valid_id)) + continue; + ci = &tg_pt_gp->tg_pt_gp_group.cg_item; + if (!(strcmp(config_item_name(ci), name))) { + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + return tg_pt_gp; + } + } + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + + return NULL; +} + +static void core_alua_put_tg_pt_gp_from_name( + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; + + spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); + spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); +} + +/* + * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held + */ +void __core_alua_attach_tg_pt_gp_mem( + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; + tg_pt_gp_mem->tg_pt_gp_assoc = 1; + list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, + &tg_pt_gp->tg_pt_gp_mem_list); + tg_pt_gp->tg_pt_gp_members++; + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); +} + +/* + * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held + */ +static void __core_alua_drop_tg_pt_gp_mem( + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); + tg_pt_gp_mem->tg_pt_gp = NULL; + tg_pt_gp_mem->tg_pt_gp_assoc = 0; + tg_pt_gp->tg_pt_gp_members--; + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); +} + +ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) +{ + struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; + struct config_item *tg_pt_ci; + struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + ssize_t len = 0; + + if (alua->alua_type != SPC3_ALUA_EMULATED) + return len; + + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (!(tg_pt_gp_mem)) + return len; + + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if ((tg_pt_gp)) { + tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; + len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" + " %hu\nTG Port Primary Access State: %s\nTG Port " + "Primary Access Status: %s\nTG Port Secondary Access" + " State: %s\nTG Port Secondary Access Status: %s\n", + config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(atomic_read( + &tg_pt_gp->tg_pt_gp_alua_access_state)), + core_alua_dump_status( + tg_pt_gp->tg_pt_gp_alua_access_status), + (atomic_read(&port->sep_tg_pt_secondary_offline)) ? + "Offline" : "None", + core_alua_dump_status(port->sep_tg_pt_secondary_stat)); + } + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + + return len; +} + +ssize_t core_alua_store_tg_pt_gp_info( + struct se_port *port, + const char *page, + size_t count) +{ + struct se_portal_group *tpg; + struct se_lun *lun; + struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; + struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + unsigned char buf[TG_PT_GROUP_NAME_BUF]; + int move = 0; + + tpg = port->sep_tpg; + lun = port->sep_lun; + + if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { + printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" + " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg), + TPG_TFO(tpg)->tpg_get_tag(tpg), + config_item_name(&lun->lun_group.cg_item)); + return -EINVAL; + } + + if (count > TG_PT_GROUP_NAME_BUF) { + printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); + return -EINVAL; + } + memset(buf, 0, TG_PT_GROUP_NAME_BUF); + memcpy(buf, page, count); + /* + * Any ALUA target port group alias besides "NULL" means we will be + * making a new group association. + */ + if (strcmp(strstrip(buf), "NULL")) { + /* + * core_alua_get_tg_pt_gp_by_name() will increment reference to + * struct t10_alua_tg_pt_gp. This reference is released with + * core_alua_put_tg_pt_gp_from_name() below. + */ + tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, + strstrip(buf)); + if (!(tg_pt_gp_new)) + return -ENODEV; + } + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (!(tg_pt_gp_mem)) { + if (tg_pt_gp_new) + core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); + printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); + return -EINVAL; + } + + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if ((tg_pt_gp)) { + /* + * Clearing an existing tg_pt_gp association, and replacing + * with the default_tg_pt_gp. + */ + if (!(tg_pt_gp_new)) { + printk(KERN_INFO "Target_Core_ConfigFS: Moving" + " %s/tpgt_%hu/%s from ALUA Target Port Group:" + " alua/%s, ID: %hu back to" + " default_tg_pt_gp\n", + TPG_TFO(tpg)->tpg_get_wwn(tpg), + TPG_TFO(tpg)->tpg_get_tag(tpg), + config_item_name(&lun->lun_group.cg_item), + config_item_name( + &tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id); + + __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); + __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, + T10_ALUA(su_dev)->default_tg_pt_gp); + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + + return count; + } + /* + * Removing existing association of tg_pt_gp_mem with tg_pt_gp + */ + __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); + move = 1; + } + /* + * Associate tg_pt_gp_mem with tg_pt_gp_new. + */ + __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" + " Target Port Group: alua/%s, ID: %hu\n", (move) ? + "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg), + TPG_TFO(tpg)->tpg_get_tag(tpg), + config_item_name(&lun->lun_group.cg_item), + config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), + tg_pt_gp_new->tg_pt_gp_id); + + core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); + return count; +} + +ssize_t core_alua_show_access_type( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && + (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) + return sprintf(page, "Implict and Explict\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) + return sprintf(page, "Implict\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) + return sprintf(page, "Explict\n"); + else + return sprintf(page, "None\n"); +} + +ssize_t core_alua_store_access_type( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract alua_access_type\n"); + return -EINVAL; + } + if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { + printk(KERN_ERR "Illegal value for alua_access_type:" + " %lu\n", tmp); + return -EINVAL; + } + if (tmp == 3) + tg_pt_gp->tg_pt_gp_alua_access_type = + TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; + else if (tmp == 2) + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; + else if (tmp == 1) + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; + else + tg_pt_gp->tg_pt_gp_alua_access_type = 0; + + return count; +} + +ssize_t core_alua_show_nonop_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); +} + +ssize_t core_alua_store_nonop_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); + return -EINVAL; + } + if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { + printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" + " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, + ALUA_MAX_NONOP_DELAY_MSECS); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; + + return count; +} + +ssize_t core_alua_show_trans_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); +} + +ssize_t core_alua_store_trans_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); + return -EINVAL; + } + if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { + printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" + " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, + ALUA_MAX_TRANS_DELAY_MSECS); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; + + return count; +} + +ssize_t core_alua_show_preferred_bit( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); +} + +ssize_t core_alua_store_preferred_bit( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract preferred ALUA value\n"); + return -EINVAL; + } + if ((tmp != 0) && (tmp != 1)) { + printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_pref = (int)tmp; + + return count; +} + +ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) +{ + if (!(lun->lun_sep)) + return -ENODEV; + + return sprintf(page, "%d\n", + atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); +} + +ssize_t core_alua_store_offline_bit( + struct se_lun *lun, + const char *page, + size_t count) +{ + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + unsigned long tmp; + int ret; + + if (!(lun->lun_sep)) + return -ENODEV; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); + return -EINVAL; + } + if ((tmp != 0) && (tmp != 1)) { + printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", + tmp); + return -EINVAL; + } + tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; + if (!(tg_pt_gp_mem)) { + printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); + return -EINVAL; + } + + ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, + lun->lun_sep, 0, (int)tmp); + if (ret < 0) + return -EINVAL; + + return count; +} + +ssize_t core_alua_show_secondary_status( + struct se_lun *lun, + char *page) +{ + return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); +} + +ssize_t core_alua_store_secondary_status( + struct se_lun *lun, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); + return -EINVAL; + } + if ((tmp != ALUA_STATUS_NONE) && + (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && + (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", + tmp); + return -EINVAL; + } + lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; + + return count; +} + +ssize_t core_alua_show_secondary_write_metadata( + struct se_lun *lun, + char *page) +{ + return sprintf(page, "%d\n", + lun->lun_sep->sep_tg_pt_secondary_write_md); +} + +ssize_t core_alua_store_secondary_write_metadata( + struct se_lun *lun, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); + return -EINVAL; + } + if ((tmp != 0) && (tmp != 1)) { + printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" + " %lu\n", tmp); + return -EINVAL; + } + lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; + + return count; +} + +int core_setup_alua(struct se_device *dev, int force_pt) +{ + struct se_subsystem_dev *su_dev = dev->se_sub_dev; + struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua_lu_gp_member *lu_gp_mem; + /* + * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic + * of the Underlying SCSI hardware. In Linux/SCSI terms, this can + * cause a problem because libata and some SATA RAID HBAs appear + * under Linux/SCSI, but emulate SCSI logic themselves. + */ + if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && + !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) { + alua->alua_type = SPC_ALUA_PASSTHROUGH; + alua->alua_state_check = &core_alua_state_check_nop; + printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" + " emulation\n", TRANSPORT(dev)->name); + return 0; + } + /* + * If SPC-3 or above is reported by real or emulated struct se_device, + * use emulated ALUA. + */ + if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { + printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" + " device\n", TRANSPORT(dev)->name); + /* + * Assoicate this struct se_device with the default ALUA + * LUN Group. + */ + lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); + if (IS_ERR(lu_gp_mem) || !lu_gp_mem) + return -1; + + alua->alua_type = SPC3_ALUA_EMULATED; + alua->alua_state_check = &core_alua_state_check; + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + __core_alua_attach_lu_gp_mem(lu_gp_mem, + se_global->default_lu_gp); + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + printk(KERN_INFO "%s: Adding to default ALUA LU Group:" + " core/alua/lu_gps/default_lu_gp\n", + TRANSPORT(dev)->name); + } else { + alua->alua_type = SPC2_ALUA_DISABLED; + alua->alua_state_check = &core_alua_state_check_nop; + printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" + " device\n", TRANSPORT(dev)->name); + } + + return 0; +} diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h new file mode 100644 index 000000000000..c86f97a081ed --- /dev/null +++ b/drivers/target/target_core_alua.h @@ -0,0 +1,126 @@ +#ifndef TARGET_CORE_ALUA_H +#define TARGET_CORE_ALUA_H + +/* + * INQUIRY response data, TPGS Field + * + * from spc4r17 section 6.4.2 Table 135 + */ +#define TPGS_NO_ALUA 0x00 +#define TPGS_IMPLICT_ALUA 0x10 +#define TPGS_EXPLICT_ALUA 0x20 + +/* + * ASYMMETRIC ACCESS STATE field + * + * from spc4r17 section 6.27 Table 245 + */ +#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0 +#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 +#define ALUA_ACCESS_STATE_STANDBY 0x2 +#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 +#define ALUA_ACCESS_STATE_OFFLINE 0xe +#define ALUA_ACCESS_STATE_TRANSITION 0xf + +/* + * REPORT_TARGET_PORT_GROUP STATUS CODE + * + * from spc4r17 section 6.27 Table 246 + */ +#define ALUA_STATUS_NONE 0x00 +#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01 +#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02 + +/* + * From spc4r17, Table D.1: ASC and ASCQ Assignement + */ +#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a +#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b +#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c +#define ASCQ_04H_ALUA_OFFLINE 0x12 + +/* + * Used as the default for Active/NonOptimized delay (in milliseconds) + * This can also be changed via configfs on a per target port group basis.. + */ +#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 +#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ +/* + * Used for implict and explict ALUA transitional delay, that is disabled + * by default, and is intended to be used for debugging client side ALUA code. + */ +#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 +#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ +/* + * Used by core_alua_update_tpg_primary_metadata() and + * core_alua_update_tpg_secondary_metadata() + */ +#define ALUA_METADATA_PATH_LEN 512 +/* + * Used by core_alua_update_tpg_secondary_metadata() + */ +#define ALUA_SECONDARY_METADATA_WWN_LEN 256 + +extern struct kmem_cache *t10_alua_lu_gp_cache; +extern struct kmem_cache *t10_alua_lu_gp_mem_cache; +extern struct kmem_cache *t10_alua_tg_pt_gp_cache; +extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; + +extern int core_emulate_report_target_port_groups(struct se_cmd *); +extern int core_emulate_set_target_port_groups(struct se_cmd *); +extern int core_alua_check_nonop_delay(struct se_cmd *); +extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, + struct se_device *, struct se_port *, + struct se_node_acl *, int, int); +extern char *core_alua_dump_status(int); +extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); +extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); +extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); +extern void core_alua_free_lu_gp_mem(struct se_device *); +extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *); +extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *); +extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *, + struct t10_alua_lu_gp *); +extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *, + struct t10_alua_lu_gp *); +extern void core_alua_drop_lu_gp_dev(struct se_device *); +extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( + struct se_subsystem_dev *, const char *, int); +extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); +extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( + struct se_port *); +extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *); +extern void core_alua_free_tg_pt_gp_mem(struct se_port *); +extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *, + struct t10_alua_tg_pt_gp *); +extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *); +extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *, + size_t); +extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *); +extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *); +extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *, + size_t); +extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *); +extern ssize_t core_alua_store_secondary_status(struct se_lun *, + const char *, size_t); +extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *, + char *); +extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, + const char *, size_t); +extern int core_setup_alua(struct se_device *, int); + +#endif /* TARGET_CORE_ALUA_H */ diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c new file mode 100644 index 000000000000..366080baf474 --- /dev/null +++ b/drivers/target/target_core_cdb.c @@ -0,0 +1,1131 @@ +/* + * CDB emulation for non-READ/WRITE commands. + * + * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include + +#include +#include +#include +#include "target_core_ua.h" + +static void +target_fill_alua_data(struct se_port *port, unsigned char *buf) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + + /* + * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. + */ + buf[5] = 0x80; + + /* + * Set TPGS field for explict and/or implict ALUA access type + * and opteration. + * + * See spc4r17 section 6.4.2 Table 135 + */ + if (!port) + return; + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (!tg_pt_gp_mem) + return; + + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if (tg_pt_gp) + buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); +} + +static int +target_emulate_inquiry_std(struct se_cmd *cmd) +{ + struct se_lun *lun = SE_LUN(cmd); + struct se_device *dev = SE_DEV(cmd); + unsigned char *buf = cmd->t_task->t_task_buf; + + /* + * Make sure we at least have 6 bytes of INQUIRY response + * payload going back for EVPD=0 + */ + if (cmd->data_length < 6) { + printk(KERN_ERR "SCSI Inquiry payload length: %u" + " too small for EVPD=0\n", cmd->data_length); + return -1; + } + + buf[0] = dev->transport->get_device_type(dev); + if (buf[0] == TYPE_TAPE) + buf[1] = 0x80; + buf[2] = dev->transport->get_device_rev(dev); + + /* + * Enable SCCS and TPGS fields for Emulated ALUA + */ + if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED) + target_fill_alua_data(lun->lun_sep, buf); + + if (cmd->data_length < 8) { + buf[4] = 1; /* Set additional length to 1 */ + return 0; + } + + buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ + + /* + * Do not include vendor, product, reversion info in INQUIRY + * response payload for cdbs with a small allocation length. + */ + if (cmd->data_length < 36) { + buf[4] = 3; /* Set additional length to 3 */ + return 0; + } + + snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); + snprintf((unsigned char *)&buf[16], 16, "%s", + &DEV_T10_WWN(dev)->model[0]); + snprintf((unsigned char *)&buf[32], 4, "%s", + &DEV_T10_WWN(dev)->revision[0]); + buf[4] = 31; /* Set additional length to 31 */ + return 0; +} + +/* supported vital product data pages */ +static int +target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) +{ + buf[1] = 0x00; + if (cmd->data_length < 8) + return 0; + + buf[4] = 0x0; + /* + * Only report the INQUIRY EVPD=1 pages after a valid NAA + * Registered Extended LUN WWN has been set via ConfigFS + * during device creation/restart. + */ + if (SE_DEV(cmd)->se_sub_dev->su_dev_flags & + SDF_EMULATED_VPD_UNIT_SERIAL) { + buf[3] = 3; + buf[5] = 0x80; + buf[6] = 0x83; + buf[7] = 0x86; + } + + return 0; +} + +/* unit serial number */ +static int +target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = SE_DEV(cmd); + u16 len = 0; + + buf[1] = 0x80; + if (dev->se_sub_dev->su_dev_flags & + SDF_EMULATED_VPD_UNIT_SERIAL) { + u32 unit_serial_len; + + unit_serial_len = + strlen(&DEV_T10_WWN(dev)->unit_serial[0]); + unit_serial_len++; /* For NULL Terminator */ + + if (((len + 4) + unit_serial_len) > cmd->data_length) { + len += unit_serial_len; + buf[2] = ((len >> 8) & 0xff); + buf[3] = (len & 0xff); + return 0; + } + len += sprintf((unsigned char *)&buf[4], "%s", + &DEV_T10_WWN(dev)->unit_serial[0]); + len++; /* Extra Byte for NULL Terminator */ + buf[3] = len; + } + return 0; +} + +/* + * Device identification VPD, for a complete list of + * DESIGNATOR TYPEs see spc4r17 Table 459. + */ +static int +target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_lun *lun = SE_LUN(cmd); + struct se_port *port = NULL; + struct se_portal_group *tpg = NULL; + struct t10_alua_lu_gp_member *lu_gp_mem; + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + unsigned char binary, binary_new; + unsigned char *prod = &DEV_T10_WWN(dev)->model[0]; + u32 prod_len; + u32 unit_serial_len, off = 0; + int i; + u16 len = 0, id_len; + + buf[1] = 0x83; + off = 4; + + /* + * NAA IEEE Registered Extended Assigned designator format, see + * spc4r17 section 7.7.3.6.5 + * + * We depend upon a target_core_mod/ConfigFS provided + * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial + * value in order to return the NAA id. + */ + if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) + goto check_t10_vend_desc; + + if (off + 20 > cmd->data_length) + goto check_t10_vend_desc; + + /* CODE SET == Binary */ + buf[off++] = 0x1; + + /* Set ASSOICATION == addressed logical unit: 0)b */ + buf[off] = 0x00; + + /* Identifier/Designator type == NAA identifier */ + buf[off++] = 0x3; + off++; + + /* Identifier/Designator length */ + buf[off++] = 0x10; + + /* + * Start NAA IEEE Registered Extended Identifier/Designator + */ + buf[off++] = (0x6 << 4); + + /* + * Use OpenFabrics IEEE Company ID: 00 14 05 + */ + buf[off++] = 0x01; + buf[off++] = 0x40; + buf[off] = (0x5 << 4); + + /* + * Return ConfigFS Unit Serial Number information for + * VENDOR_SPECIFIC_IDENTIFIER and + * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION + */ + binary = transport_asciihex_to_binaryhex( + &DEV_T10_WWN(dev)->unit_serial[0]); + buf[off++] |= (binary & 0xf0) >> 4; + for (i = 0; i < 24; i += 2) { + binary_new = transport_asciihex_to_binaryhex( + &DEV_T10_WWN(dev)->unit_serial[i+2]); + buf[off] = (binary & 0x0f) << 4; + buf[off++] |= (binary_new & 0xf0) >> 4; + binary = binary_new; + } + len = 20; + off = (len + 4); + +check_t10_vend_desc: + /* + * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 + */ + id_len = 8; /* For Vendor field */ + prod_len = 4; /* For VPD Header */ + prod_len += 8; /* For Vendor field */ + prod_len += strlen(prod); + prod_len++; /* For : */ + + if (dev->se_sub_dev->su_dev_flags & + SDF_EMULATED_VPD_UNIT_SERIAL) { + unit_serial_len = + strlen(&DEV_T10_WWN(dev)->unit_serial[0]); + unit_serial_len++; /* For NULL Terminator */ + + if ((len + (id_len + 4) + + (prod_len + unit_serial_len)) > + cmd->data_length) { + len += (prod_len + unit_serial_len); + goto check_port; + } + id_len += sprintf((unsigned char *)&buf[off+12], + "%s:%s", prod, + &DEV_T10_WWN(dev)->unit_serial[0]); + } + buf[off] = 0x2; /* ASCII */ + buf[off+1] = 0x1; /* T10 Vendor ID */ + buf[off+2] = 0x0; + memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8); + /* Extra Byte for NULL Terminator */ + id_len++; + /* Identifier Length */ + buf[off+3] = id_len; + /* Header size for Designation descriptor */ + len += (id_len + 4); + off += (id_len + 4); + /* + * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD + */ +check_port: + port = lun->lun_sep; + if (port) { + struct t10_alua_lu_gp *lu_gp; + u32 padding, scsi_name_len; + u16 lu_gp_id = 0; + u16 tg_pt_gp_id = 0; + u16 tpgt; + + tpg = port->sep_tpg; + /* + * Relative target port identifer, see spc4r17 + * section 7.7.3.7 + * + * Get the PROTOCOL IDENTIFIER as defined by spc4r17 + * section 7.5.1 Table 362 + */ + if (((len + 4) + 8) > cmd->data_length) { + len += 8; + goto check_tpgi; + } + buf[off] = + (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + buf[off++] |= 0x1; /* CODE SET == Binary */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOICATION == target port: 01b */ + buf[off] |= 0x10; + /* DESIGNATOR TYPE == Relative target port identifer */ + buf[off++] |= 0x4; + off++; /* Skip over Reserved */ + buf[off++] = 4; /* DESIGNATOR LENGTH */ + /* Skip over Obsolete field in RTPI payload + * in Table 472 */ + off += 2; + buf[off++] = ((port->sep_rtpi >> 8) & 0xff); + buf[off++] = (port->sep_rtpi & 0xff); + len += 8; /* Header size + Designation descriptor */ + /* + * Target port group identifier, see spc4r17 + * section 7.7.3.8 + * + * Get the PROTOCOL IDENTIFIER as defined by spc4r17 + * section 7.5.1 Table 362 + */ +check_tpgi: + if (T10_ALUA(dev->se_sub_dev)->alua_type != + SPC3_ALUA_EMULATED) + goto check_scsi_name; + + if (((len + 4) + 8) > cmd->data_length) { + len += 8; + goto check_lu_gp; + } + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (!tg_pt_gp_mem) + goto check_lu_gp; + + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if (!(tg_pt_gp)) { + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + goto check_lu_gp; + } + tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + + buf[off] = + (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + buf[off++] |= 0x1; /* CODE SET == Binary */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOICATION == target port: 01b */ + buf[off] |= 0x10; + /* DESIGNATOR TYPE == Target port group identifier */ + buf[off++] |= 0x5; + off++; /* Skip over Reserved */ + buf[off++] = 4; /* DESIGNATOR LENGTH */ + off += 2; /* Skip over Reserved Field */ + buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); + buf[off++] = (tg_pt_gp_id & 0xff); + len += 8; /* Header size + Designation descriptor */ + /* + * Logical Unit Group identifier, see spc4r17 + * section 7.7.3.8 + */ +check_lu_gp: + if (((len + 4) + 8) > cmd->data_length) { + len += 8; + goto check_scsi_name; + } + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!(lu_gp_mem)) + goto check_scsi_name; + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if (!(lu_gp)) { + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + goto check_scsi_name; + } + lu_gp_id = lu_gp->lu_gp_id; + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + buf[off++] |= 0x1; /* CODE SET == Binary */ + /* DESIGNATOR TYPE == Logical Unit Group identifier */ + buf[off++] |= 0x6; + off++; /* Skip over Reserved */ + buf[off++] = 4; /* DESIGNATOR LENGTH */ + off += 2; /* Skip over Reserved Field */ + buf[off++] = ((lu_gp_id >> 8) & 0xff); + buf[off++] = (lu_gp_id & 0xff); + len += 8; /* Header size + Designation descriptor */ + /* + * SCSI name string designator, see spc4r17 + * section 7.7.3.11 + * + * Get the PROTOCOL IDENTIFIER as defined by spc4r17 + * section 7.5.1 Table 362 + */ +check_scsi_name: + scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg)); + /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ + scsi_name_len += 10; + /* Check for 4-byte padding */ + padding = ((-scsi_name_len) & 3); + if (padding != 0) + scsi_name_len += padding; + /* Header size + Designation descriptor */ + scsi_name_len += 4; + + if (((len + 4) + scsi_name_len) > cmd->data_length) { + len += scsi_name_len; + goto set_len; + } + buf[off] = + (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + buf[off++] |= 0x3; /* CODE SET == UTF-8 */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOICATION == target port: 01b */ + buf[off] |= 0x10; + /* DESIGNATOR TYPE == SCSI name string */ + buf[off++] |= 0x8; + off += 2; /* Skip over Reserved and length */ + /* + * SCSI name string identifer containing, $FABRIC_MOD + * dependent information. For LIO-Target and iSCSI + * Target Port, this means ",t,0x in + * UTF-8 encoding. + */ + tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); + scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", + TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt); + scsi_name_len += 1 /* Include NULL terminator */; + /* + * The null-terminated, null-padded (see 4.4.2) SCSI + * NAME STRING field contains a UTF-8 format string. + * The number of bytes in the SCSI NAME STRING field + * (i.e., the value in the DESIGNATOR LENGTH field) + * shall be no larger than 256 and shall be a multiple + * of four. + */ + if (padding) + scsi_name_len += padding; + + buf[off-1] = scsi_name_len; + off += scsi_name_len; + /* Header size + Designation descriptor */ + len += (scsi_name_len + 4); + } +set_len: + buf[2] = ((len >> 8) & 0xff); + buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ + return 0; +} + +/* Extended INQUIRY Data VPD Page */ +static int +target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) +{ + if (cmd->data_length < 60) + return 0; + + buf[1] = 0x86; + buf[2] = 0x3c; + /* Set HEADSUP, ORDSUP, SIMPSUP */ + buf[5] = 0x07; + + /* If WriteCache emulation is enabled, set V_SUP */ + if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0) + buf[6] = 0x01; + return 0; +} + +/* Block Limits VPD page */ +static int +target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = SE_DEV(cmd); + int have_tp = 0; + + /* + * Following sbc3r22 section 6.5.3 Block Limits VPD page, when + * emulate_tpu=1 or emulate_tpws=1 we will be expect a + * different page length for Thin Provisioning. + */ + if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + have_tp = 1; + + if (cmd->data_length < (0x10 + 4)) { + printk(KERN_INFO "Received data_length: %u" + " too small for EVPD 0xb0\n", + cmd->data_length); + return -1; + } + + if (have_tp && cmd->data_length < (0x3c + 4)) { + printk(KERN_INFO "Received data_length: %u" + " too small for TPE=1 EVPD 0xb0\n", + cmd->data_length); + have_tp = 0; + } + + buf[0] = dev->transport->get_device_type(dev); + buf[1] = 0xb0; + buf[3] = have_tp ? 0x3c : 0x10; + + /* + * Set OPTIMAL TRANSFER LENGTH GRANULARITY + */ + put_unaligned_be16(1, &buf[6]); + + /* + * Set MAXIMUM TRANSFER LENGTH + */ + put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]); + + /* + * Set OPTIMAL TRANSFER LENGTH + */ + put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]); + + /* + * Exit now if we don't support TP or the initiator sent a too + * short buffer. + */ + if (!have_tp || cmd->data_length < (0x3c + 4)) + return 0; + + /* + * Set MAXIMUM UNMAP LBA COUNT + */ + put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]); + + /* + * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT + */ + put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count, + &buf[24]); + + /* + * Set OPTIMAL UNMAP GRANULARITY + */ + put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]); + + /* + * UNMAP GRANULARITY ALIGNMENT + */ + put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment, + &buf[32]); + if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0) + buf[32] |= 0x80; /* Set the UGAVALID bit */ + + return 0; +} + +/* Thin Provisioning VPD */ +static int +target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = SE_DEV(cmd); + + /* + * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: + * + * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to + * zero, then the page length shall be set to 0004h. If the DP bit + * is set to one, then the page length shall be set to the value + * defined in table 162. + */ + buf[0] = dev->transport->get_device_type(dev); + buf[1] = 0xb2; + + /* + * Set Hardcoded length mentioned above for DP=0 + */ + put_unaligned_be16(0x0004, &buf[2]); + + /* + * The THRESHOLD EXPONENT field indicates the threshold set size in + * LBAs as a power of 2 (i.e., the threshold set size is equal to + * 2(threshold exponent)). + * + * Note that this is currently set to 0x00 as mkp says it will be + * changing again. We can enable this once it has settled in T10 + * and is actually used by Linux/SCSI ML code. + */ + buf[4] = 0x00; + + /* + * A TPU bit set to one indicates that the device server supports + * the UNMAP command (see 5.25). A TPU bit set to zero indicates + * that the device server does not support the UNMAP command. + */ + if (DEV_ATTRIB(dev)->emulate_tpu != 0) + buf[5] = 0x80; + + /* + * A TPWS bit set to one indicates that the device server supports + * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. + * A TPWS bit set to zero indicates that the device server does not + * support the use of the WRITE SAME (16) command to unmap LBAs. + */ + if (DEV_ATTRIB(dev)->emulate_tpws != 0) + buf[5] |= 0x40; + + return 0; +} + +static int +target_emulate_inquiry(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + unsigned char *buf = cmd->t_task->t_task_buf; + unsigned char *cdb = cmd->t_task->t_task_cdb; + + if (!(cdb[1] & 0x1)) + return target_emulate_inquiry_std(cmd); + + /* + * Make sure we at least have 4 bytes of INQUIRY response + * payload for 0x00 going back for EVPD=1. Note that 0x80 + * and 0x83 will check for enough payload data length and + * jump to set_len: label when there is not enough inquiry EVPD + * payload length left for the next outgoing EVPD metadata + */ + if (cmd->data_length < 4) { + printk(KERN_ERR "SCSI Inquiry payload length: %u" + " too small for EVPD=1\n", cmd->data_length); + return -1; + } + buf[0] = dev->transport->get_device_type(dev); + + switch (cdb[2]) { + case 0x00: + return target_emulate_evpd_00(cmd, buf); + case 0x80: + return target_emulate_evpd_80(cmd, buf); + case 0x83: + return target_emulate_evpd_83(cmd, buf); + case 0x86: + return target_emulate_evpd_86(cmd, buf); + case 0xb0: + return target_emulate_evpd_b0(cmd, buf); + case 0xb2: + return target_emulate_evpd_b2(cmd, buf); + default: + printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); + return -1; + } + + return 0; +} + +static int +target_emulate_readcapacity(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + unsigned char *buf = cmd->t_task->t_task_buf; + u32 blocks = dev->transport->get_blocks(dev); + + buf[0] = (blocks >> 24) & 0xff; + buf[1] = (blocks >> 16) & 0xff; + buf[2] = (blocks >> 8) & 0xff; + buf[3] = blocks & 0xff; + buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; + buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; + buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; + buf[7] = DEV_ATTRIB(dev)->block_size & 0xff; + /* + * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 + */ + if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + put_unaligned_be32(0xFFFFFFFF, &buf[0]); + + return 0; +} + +static int +target_emulate_readcapacity_16(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + unsigned char *buf = cmd->t_task->t_task_buf; + unsigned long long blocks = dev->transport->get_blocks(dev); + + buf[0] = (blocks >> 56) & 0xff; + buf[1] = (blocks >> 48) & 0xff; + buf[2] = (blocks >> 40) & 0xff; + buf[3] = (blocks >> 32) & 0xff; + buf[4] = (blocks >> 24) & 0xff; + buf[5] = (blocks >> 16) & 0xff; + buf[6] = (blocks >> 8) & 0xff; + buf[7] = blocks & 0xff; + buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; + buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; + buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; + buf[11] = DEV_ATTRIB(dev)->block_size & 0xff; + /* + * Set Thin Provisioning Enable bit following sbc3r22 in section + * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. + */ + if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + buf[14] = 0x80; + + return 0; +} + +static int +target_modesense_rwrecovery(unsigned char *p) +{ + p[0] = 0x01; + p[1] = 0x0a; + + return 12; +} + +static int +target_modesense_control(struct se_device *dev, unsigned char *p) +{ + p[0] = 0x0a; + p[1] = 0x0a; + p[2] = 2; + /* + * From spc4r17, section 7.4.6 Control mode Page + * + * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b + * + * 00b: The logical unit shall clear any unit attention condition + * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION + * status and shall not establish a unit attention condition when a com- + * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT + * status. + * + * 10b: The logical unit shall not clear any unit attention condition + * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION + * status and shall not establish a unit attention condition when + * a command is completed with BUSY, TASK SET FULL, or RESERVATION + * CONFLICT status. + * + * 11b a The logical unit shall not clear any unit attention condition + * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION + * status and shall establish a unit attention condition for the + * initiator port associated with the I_T nexus on which the BUSY, + * TASK SET FULL, or RESERVATION CONFLICT status is being returned. + * Depending on the status, the additional sense code shall be set to + * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS + * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE + * command, a unit attention condition shall be established only once + * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless + * to the number of commands completed with one of those status codes. + */ + p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 : + (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; + /* + * From spc4r17, section 7.4.6 Control mode Page + * + * Task Aborted Status (TAS) bit set to zero. + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00; + p[8] = 0xff; + p[9] = 0xff; + p[11] = 30; + + return 12; +} + +static int +target_modesense_caching(struct se_device *dev, unsigned char *p) +{ + p[0] = 0x08; + p[1] = 0x12; + if (DEV_ATTRIB(dev)->emulate_write_cache > 0) + p[2] = 0x04; /* Write Cache Enable */ + p[12] = 0x20; /* Disabled Read Ahead */ + + return 20; +} + +static void +target_modesense_write_protect(unsigned char *buf, int type) +{ + /* + * I believe that the WP bit (bit 7) in the mode header is the same for + * all device types.. + */ + switch (type) { + case TYPE_DISK: + case TYPE_TAPE: + default: + buf[0] |= 0x80; /* WP bit */ + break; + } +} + +static void +target_modesense_dpofua(unsigned char *buf, int type) +{ + switch (type) { + case TYPE_DISK: + buf[0] |= 0x10; /* DPOFUA bit */ + break; + default: + break; + } +} + +static int +target_emulate_modesense(struct se_cmd *cmd, int ten) +{ + struct se_device *dev = SE_DEV(cmd); + char *cdb = cmd->t_task->t_task_cdb; + unsigned char *rbuf = cmd->t_task->t_task_buf; + int type = dev->transport->get_device_type(dev); + int offset = (ten) ? 8 : 4; + int length = 0; + unsigned char buf[SE_MODE_PAGE_BUF]; + + memset(buf, 0, SE_MODE_PAGE_BUF); + + switch (cdb[2] & 0x3f) { + case 0x01: + length = target_modesense_rwrecovery(&buf[offset]); + break; + case 0x08: + length = target_modesense_caching(dev, &buf[offset]); + break; + case 0x0a: + length = target_modesense_control(dev, &buf[offset]); + break; + case 0x3f: + length = target_modesense_rwrecovery(&buf[offset]); + length += target_modesense_caching(dev, &buf[offset+length]); + length += target_modesense_control(dev, &buf[offset+length]); + break; + default: + printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", + cdb[2] & 0x3f); + return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; + } + offset += length; + + if (ten) { + offset -= 2; + buf[0] = (offset >> 8) & 0xff; + buf[1] = offset & 0xff; + + if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || + (cmd->se_deve && + (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) + target_modesense_write_protect(&buf[3], type); + + if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && + (DEV_ATTRIB(dev)->emulate_fua_write > 0)) + target_modesense_dpofua(&buf[3], type); + + if ((offset + 2) > cmd->data_length) + offset = cmd->data_length; + + } else { + offset -= 1; + buf[0] = offset & 0xff; + + if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || + (cmd->se_deve && + (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) + target_modesense_write_protect(&buf[2], type); + + if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && + (DEV_ATTRIB(dev)->emulate_fua_write > 0)) + target_modesense_dpofua(&buf[2], type); + + if ((offset + 1) > cmd->data_length) + offset = cmd->data_length; + } + memcpy(rbuf, buf, offset); + + return 0; +} + +static int +target_emulate_request_sense(struct se_cmd *cmd) +{ + unsigned char *cdb = cmd->t_task->t_task_cdb; + unsigned char *buf = cmd->t_task->t_task_buf; + u8 ua_asc = 0, ua_ascq = 0; + + if (cdb[1] & 0x01) { + printk(KERN_ERR "REQUEST_SENSE description emulation not" + " supported\n"); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { + /* + * CURRENT ERROR, UNIT ATTENTION + */ + buf[0] = 0x70; + buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; + /* + * Make sure request data length is enough for additional + * sense data. + */ + if (cmd->data_length <= 18) { + buf[7] = 0x00; + return 0; + } + /* + * The Additional Sense Code (ASC) from the UNIT ATTENTION + */ + buf[SPC_ASC_KEY_OFFSET] = ua_asc; + buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; + buf[7] = 0x0A; + } else { + /* + * CURRENT ERROR, NO SENSE + */ + buf[0] = 0x70; + buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; + /* + * Make sure request data length is enough for additional + * sense data. + */ + if (cmd->data_length <= 18) { + buf[7] = 0x00; + return 0; + } + /* + * NO ADDITIONAL SENSE INFORMATION + */ + buf[SPC_ASC_KEY_OFFSET] = 0x00; + buf[7] = 0x0A; + } + + return 0; +} + +/* + * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. + * Note this is not used for TCM/pSCSI passthrough + */ +static int +target_emulate_unmap(struct se_task *task) +{ + struct se_cmd *cmd = TASK_CMD(task); + struct se_device *dev = SE_DEV(cmd); + unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; + unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + sector_t lba; + unsigned int size = cmd->data_length, range; + int ret, offset; + unsigned short dl, bd_dl; + + /* First UNMAP block descriptor starts at 8 byte offset */ + offset = 8; + size -= 8; + dl = get_unaligned_be16(&cdb[0]); + bd_dl = get_unaligned_be16(&cdb[2]); + ptr = &buf[offset]; + printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" + " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); + + while (size) { + lba = get_unaligned_be64(&ptr[0]); + range = get_unaligned_be32(&ptr[8]); + printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", + (unsigned long long)lba, range); + + ret = dev->transport->do_discard(dev, lba, range); + if (ret < 0) { + printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", + ret); + return -1; + } + + ptr += 16; + size -= 16; + } + + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + return 0; +} + +/* + * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. + * Note this is not used for TCM/pSCSI passthrough + */ +static int +target_emulate_write_same(struct se_task *task) +{ + struct se_cmd *cmd = TASK_CMD(task); + struct se_device *dev = SE_DEV(cmd); + sector_t lba = cmd->t_task->t_task_lba; + unsigned int range; + int ret; + + range = (cmd->data_length / DEV_ATTRIB(dev)->block_size); + + printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n", + (unsigned long long)lba, range); + + ret = dev->transport->do_discard(dev, lba, range); + if (ret < 0) { + printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); + return -1; + } + + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + return 0; +} + +int +transport_emulate_control_cdb(struct se_task *task) +{ + struct se_cmd *cmd = TASK_CMD(task); + struct se_device *dev = SE_DEV(cmd); + unsigned short service_action; + int ret = 0; + + switch (cmd->t_task->t_task_cdb[0]) { + case INQUIRY: + ret = target_emulate_inquiry(cmd); + break; + case READ_CAPACITY: + ret = target_emulate_readcapacity(cmd); + break; + case MODE_SENSE: + ret = target_emulate_modesense(cmd, 0); + break; + case MODE_SENSE_10: + ret = target_emulate_modesense(cmd, 1); + break; + case SERVICE_ACTION_IN: + switch (cmd->t_task->t_task_cdb[1] & 0x1f) { + case SAI_READ_CAPACITY_16: + ret = target_emulate_readcapacity_16(cmd); + break; + default: + printk(KERN_ERR "Unsupported SA: 0x%02x\n", + cmd->t_task->t_task_cdb[1] & 0x1f); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + break; + case REQUEST_SENSE: + ret = target_emulate_request_sense(cmd); + break; + case UNMAP: + if (!dev->transport->do_discard) { + printk(KERN_ERR "UNMAP emulation not supported for: %s\n", + dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + ret = target_emulate_unmap(task); + break; + case WRITE_SAME_16: + if (!dev->transport->do_discard) { + printk(KERN_ERR "WRITE_SAME_16 emulation not supported" + " for: %s\n", dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + ret = target_emulate_write_same(task); + break; + case VARIABLE_LENGTH_CMD: + service_action = + get_unaligned_be16(&cmd->t_task->t_task_cdb[8]); + switch (service_action) { + case WRITE_SAME_32: + if (!dev->transport->do_discard) { + printk(KERN_ERR "WRITE_SAME_32 SA emulation not" + " supported for: %s\n", + dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + ret = target_emulate_write_same(task); + break; + default: + printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" + " 0x%02x\n", service_action); + break; + } + break; + case SYNCHRONIZE_CACHE: + case 0x91: /* SYNCHRONIZE_CACHE_16: */ + if (!dev->transport->do_sync_cache) { + printk(KERN_ERR + "SYNCHRONIZE_CACHE emulation not supported" + " for: %s\n", dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + dev->transport->do_sync_cache(task); + break; + case ALLOW_MEDIUM_REMOVAL: + case ERASE: + case REZERO_UNIT: + case SEEK_10: + case SPACE: + case START_STOP: + case TEST_UNIT_READY: + case VERIFY: + case WRITE_FILEMARKS: + break; + default: + printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", + cmd->t_task->t_task_cdb[0], dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + + if (ret < 0) + return ret; + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; +} diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c new file mode 100644 index 000000000000..2764510798b0 --- /dev/null +++ b/drivers/target/target_core_configfs.c @@ -0,0 +1,3225 @@ +/******************************************************************************* + * Filename: target_core_configfs.c + * + * This file contains ConfigFS logic for the Generic Target Engine project. + * + * Copyright (c) 2008-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * based on configfs Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "target_core_alua.h" +#include "target_core_hba.h" +#include "target_core_pr.h" +#include "target_core_rd.h" + +static struct list_head g_tf_list; +static struct mutex g_tf_lock; + +struct target_core_configfs_attribute { + struct configfs_attribute attr; + ssize_t (*show)(void *, char *); + ssize_t (*store)(void *, const char *, size_t); +}; + +static inline struct se_hba * +item_to_hba(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_hba, hba_group); +} + +/* + * Attributes for /sys/kernel/config/target/ + */ +static ssize_t target_core_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" + " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION, + utsname()->sysname, utsname()->machine); +} + +static struct configfs_item_operations target_core_fabric_item_ops = { + .show_attribute = target_core_attr_show, +}; + +static struct configfs_attribute target_core_item_attr_version = { + .ca_owner = THIS_MODULE, + .ca_name = "version", + .ca_mode = S_IRUGO, +}; + +static struct target_fabric_configfs *target_core_get_fabric( + const char *name) +{ + struct target_fabric_configfs *tf; + + if (!(name)) + return NULL; + + mutex_lock(&g_tf_lock); + list_for_each_entry(tf, &g_tf_list, tf_list) { + if (!(strcmp(tf->tf_name, name))) { + atomic_inc(&tf->tf_access_cnt); + mutex_unlock(&g_tf_lock); + return tf; + } + } + mutex_unlock(&g_tf_lock); + + return NULL; +} + +/* + * Called from struct target_core_group_ops->make_group() + */ +static struct config_group *target_core_register_fabric( + struct config_group *group, + const char *name) +{ + struct target_fabric_configfs *tf; + int ret; + + printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:" + " %s\n", group, name); + /* + * Ensure that TCM subsystem plugins are loaded at this point for + * using the RAMDISK_DR virtual LUN 0 and all other struct se_port + * LUN symlinks. + */ + if (transport_subsystem_check_init() < 0) + return ERR_PTR(-EINVAL); + + /* + * Below are some hardcoded request_module() calls to automatically + * local fabric modules when the following is called: + * + * mkdir -p /sys/kernel/config/target/$MODULE_NAME + * + * Note that this does not limit which TCM fabric module can be + * registered, but simply provids auto loading logic for modules with + * mkdir(2) system calls with known TCM fabric modules. + */ + if (!(strncmp(name, "iscsi", 5))) { + /* + * Automatically load the LIO Target fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/iscsi + */ + ret = request_module("iscsi_target_mod"); + if (ret < 0) { + printk(KERN_ERR "request_module() failed for" + " iscsi_target_mod.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } + } else if (!(strncmp(name, "loopback", 8))) { + /* + * Automatically load the tcm_loop fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/loopback + */ + ret = request_module("tcm_loop"); + if (ret < 0) { + printk(KERN_ERR "request_module() failed for" + " tcm_loop.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } + } + + tf = target_core_get_fabric(name); + if (!(tf)) { + printk(KERN_ERR "target_core_get_fabric() failed for %s\n", + name); + return ERR_PTR(-EINVAL); + } + printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:" + " %s\n", tf->tf_name); + /* + * On a successful target_core_get_fabric() look, the returned + * struct target_fabric_configfs *tf will contain a usage reference. + */ + printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", + &TF_CIT_TMPL(tf)->tfc_wwn_cit); + + tf->tf_group.default_groups = tf->tf_default_groups; + tf->tf_group.default_groups[0] = &tf->tf_disc_group; + tf->tf_group.default_groups[1] = NULL; + + config_group_init_type_name(&tf->tf_group, name, + &TF_CIT_TMPL(tf)->tfc_wwn_cit); + config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", + &TF_CIT_TMPL(tf)->tfc_discovery_cit); + + printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" + " %s\n", tf->tf_group.cg_item.ci_name); + /* + * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() + */ + tf->tf_ops.tf_subsys = tf->tf_subsys; + tf->tf_fabric = &tf->tf_group.cg_item; + printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" + " for %s\n", name); + + return &tf->tf_group; +} + +/* + * Called from struct target_core_group_ops->drop_item() + */ +static void target_core_deregister_fabric( + struct config_group *group, + struct config_item *item) +{ + struct target_fabric_configfs *tf = container_of( + to_config_group(item), struct target_fabric_configfs, tf_group); + struct config_group *tf_group; + struct config_item *df_item; + int i; + + printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" + " tf list\n", config_item_name(item)); + + printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:" + " %s\n", tf->tf_name); + atomic_dec(&tf->tf_access_cnt); + + printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing" + " tf->tf_fabric for %s\n", tf->tf_name); + tf->tf_fabric = NULL; + + printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci" + " %s\n", config_item_name(item)); + + tf_group = &tf->tf_group; + for (i = 0; tf_group->default_groups[i]; i++) { + df_item = &tf_group->default_groups[i]->cg_item; + tf_group->default_groups[i] = NULL; + config_item_put(df_item); + } + config_item_put(item); +} + +static struct configfs_group_operations target_core_fabric_group_ops = { + .make_group = &target_core_register_fabric, + .drop_item = &target_core_deregister_fabric, +}; + +/* + * All item attributes appearing in /sys/kernel/target/ appear here. + */ +static struct configfs_attribute *target_core_fabric_item_attrs[] = { + &target_core_item_attr_version, + NULL, +}; + +/* + * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ + */ +static struct config_item_type target_core_fabrics_item = { + .ct_item_ops = &target_core_fabric_item_ops, + .ct_group_ops = &target_core_fabric_group_ops, + .ct_attrs = target_core_fabric_item_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem target_core_fabrics = { + .su_group = { + .cg_item = { + .ci_namebuf = "target", + .ci_type = &target_core_fabrics_item, + }, + }, +}; + +static struct configfs_subsystem *target_core_subsystem[] = { + &target_core_fabrics, + NULL, +}; + +/*############################################################################## +// Start functions called by external Target Fabrics Modules +//############################################################################*/ + +/* + * First function called by fabric modules to: + * + * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. + * 2) Add struct target_fabric_configfs to g_tf_list + * 3) Return struct target_fabric_configfs to fabric module to be passed + * into target_fabric_configfs_register(). + */ +struct target_fabric_configfs *target_fabric_configfs_init( + struct module *fabric_mod, + const char *name) +{ + struct target_fabric_configfs *tf; + + if (!(fabric_mod)) { + printk(KERN_ERR "Missing struct module *fabric_mod pointer\n"); + return NULL; + } + if (!(name)) { + printk(KERN_ERR "Unable to locate passed fabric name\n"); + return NULL; + } + if (strlen(name) > TARGET_FABRIC_NAME_SIZE) { + printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" + "_NAME_SIZE\n", name); + return NULL; + } + + tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); + if (!(tf)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&tf->tf_list); + atomic_set(&tf->tf_access_cnt, 0); + /* + * Setup the default generic struct config_item_type's (cits) in + * struct target_fabric_configfs->tf_cit_tmpl + */ + tf->tf_module = fabric_mod; + target_fabric_setup_cits(tf); + + tf->tf_subsys = target_core_subsystem[0]; + snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); + + mutex_lock(&g_tf_lock); + list_add_tail(&tf->tf_list, &g_tf_list); + mutex_unlock(&g_tf_lock); + + printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" + ">>>>>>>>>>>>>>\n"); + printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for" + " %s\n", tf, tf->tf_name); + return tf; +} +EXPORT_SYMBOL(target_fabric_configfs_init); + +/* + * Called by fabric plugins after FAILED target_fabric_configfs_register() call. + */ +void target_fabric_configfs_free( + struct target_fabric_configfs *tf) +{ + mutex_lock(&g_tf_lock); + list_del(&tf->tf_list); + mutex_unlock(&g_tf_lock); + + kfree(tf); +} +EXPORT_SYMBOL(target_fabric_configfs_free); + +/* + * Perform a sanity check of the passed tf->tf_ops before completing + * TCM fabric module registration. + */ +static int target_fabric_tf_ops_check( + struct target_fabric_configfs *tf) +{ + struct target_core_fabric_ops *tfo = &tf->tf_ops; + + if (!(tfo->get_fabric_name)) { + printk(KERN_ERR "Missing tfo->get_fabric_name()\n"); + return -EINVAL; + } + if (!(tfo->get_fabric_proto_ident)) { + printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n"); + return -EINVAL; + } + if (!(tfo->tpg_get_wwn)) { + printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n"); + return -EINVAL; + } + if (!(tfo->tpg_get_tag)) { + printk(KERN_ERR "Missing tfo->tpg_get_tag()\n"); + return -EINVAL; + } + if (!(tfo->tpg_get_default_depth)) { + printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n"); + return -EINVAL; + } + if (!(tfo->tpg_get_pr_transport_id)) { + printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n"); + return -EINVAL; + } + if (!(tfo->tpg_get_pr_transport_id_len)) { + printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n"); + return -EINVAL; + } + if (!(tfo->tpg_check_demo_mode)) { + printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n"); + return -EINVAL; + } + if (!(tfo->tpg_check_demo_mode_cache)) { + printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n"); + return -EINVAL; + } + if (!(tfo->tpg_check_demo_mode_write_protect)) { + printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n"); + return -EINVAL; + } + if (!(tfo->tpg_check_prod_mode_write_protect)) { + printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n"); + return -EINVAL; + } + if (!(tfo->tpg_alloc_fabric_acl)) { + printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n"); + return -EINVAL; + } + if (!(tfo->tpg_release_fabric_acl)) { + printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n"); + return -EINVAL; + } + if (!(tfo->tpg_get_inst_index)) { + printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); + return -EINVAL; + } + if (!(tfo->release_cmd_to_pool)) { + printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n"); + return -EINVAL; + } + if (!(tfo->release_cmd_direct)) { + printk(KERN_ERR "Missing tfo->release_cmd_direct()\n"); + return -EINVAL; + } + if (!(tfo->shutdown_session)) { + printk(KERN_ERR "Missing tfo->shutdown_session()\n"); + return -EINVAL; + } + if (!(tfo->close_session)) { + printk(KERN_ERR "Missing tfo->close_session()\n"); + return -EINVAL; + } + if (!(tfo->stop_session)) { + printk(KERN_ERR "Missing tfo->stop_session()\n"); + return -EINVAL; + } + if (!(tfo->fall_back_to_erl0)) { + printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n"); + return -EINVAL; + } + if (!(tfo->sess_logged_in)) { + printk(KERN_ERR "Missing tfo->sess_logged_in()\n"); + return -EINVAL; + } + if (!(tfo->sess_get_index)) { + printk(KERN_ERR "Missing tfo->sess_get_index()\n"); + return -EINVAL; + } + if (!(tfo->write_pending)) { + printk(KERN_ERR "Missing tfo->write_pending()\n"); + return -EINVAL; + } + if (!(tfo->write_pending_status)) { + printk(KERN_ERR "Missing tfo->write_pending_status()\n"); + return -EINVAL; + } + if (!(tfo->set_default_node_attributes)) { + printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n"); + return -EINVAL; + } + if (!(tfo->get_task_tag)) { + printk(KERN_ERR "Missing tfo->get_task_tag()\n"); + return -EINVAL; + } + if (!(tfo->get_cmd_state)) { + printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); + return -EINVAL; + } + if (!(tfo->new_cmd_failure)) { + printk(KERN_ERR "Missing tfo->new_cmd_failure()\n"); + return -EINVAL; + } + if (!(tfo->queue_data_in)) { + printk(KERN_ERR "Missing tfo->queue_data_in()\n"); + return -EINVAL; + } + if (!(tfo->queue_status)) { + printk(KERN_ERR "Missing tfo->queue_status()\n"); + return -EINVAL; + } + if (!(tfo->queue_tm_rsp)) { + printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n"); + return -EINVAL; + } + if (!(tfo->set_fabric_sense_len)) { + printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n"); + return -EINVAL; + } + if (!(tfo->get_fabric_sense_len)) { + printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n"); + return -EINVAL; + } + if (!(tfo->is_state_remove)) { + printk(KERN_ERR "Missing tfo->is_state_remove()\n"); + return -EINVAL; + } + if (!(tfo->pack_lun)) { + printk(KERN_ERR "Missing tfo->pack_lun()\n"); + return -EINVAL; + } + /* + * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() + * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in + * target_core_fabric_configfs.c WWN+TPG group context code. + */ + if (!(tfo->fabric_make_wwn)) { + printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n"); + return -EINVAL; + } + if (!(tfo->fabric_drop_wwn)) { + printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n"); + return -EINVAL; + } + if (!(tfo->fabric_make_tpg)) { + printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n"); + return -EINVAL; + } + if (!(tfo->fabric_drop_tpg)) { + printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n"); + return -EINVAL; + } + + return 0; +} + +/* + * Called 2nd from fabric module with returned parameter of + * struct target_fabric_configfs * from target_fabric_configfs_init(). + * + * Upon a successful registration, the new fabric's struct config_item is + * return. Also, a pointer to this struct is set in the passed + * struct target_fabric_configfs. + */ +int target_fabric_configfs_register( + struct target_fabric_configfs *tf) +{ + struct config_group *su_group; + int ret; + + if (!(tf)) { + printk(KERN_ERR "Unable to locate target_fabric_configfs" + " pointer\n"); + return -EINVAL; + } + if (!(tf->tf_subsys)) { + printk(KERN_ERR "Unable to target struct config_subsystem" + " pointer\n"); + return -EINVAL; + } + su_group = &tf->tf_subsys->su_group; + if (!(su_group)) { + printk(KERN_ERR "Unable to locate target struct config_group" + " pointer\n"); + return -EINVAL; + } + ret = target_fabric_tf_ops_check(tf); + if (ret < 0) + return ret; + + printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" + ">>>>>>>>>>\n"); + return 0; +} +EXPORT_SYMBOL(target_fabric_configfs_register); + +void target_fabric_configfs_deregister( + struct target_fabric_configfs *tf) +{ + struct config_group *su_group; + struct configfs_subsystem *su; + + if (!(tf)) { + printk(KERN_ERR "Unable to locate passed target_fabric_" + "configfs\n"); + return; + } + su = tf->tf_subsys; + if (!(su)) { + printk(KERN_ERR "Unable to locate passed tf->tf_subsys" + " pointer\n"); + return; + } + su_group = &tf->tf_subsys->su_group; + if (!(su_group)) { + printk(KERN_ERR "Unable to locate target struct config_group" + " pointer\n"); + return; + } + + printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" + ">>>>>>>>>>>>\n"); + mutex_lock(&g_tf_lock); + if (atomic_read(&tf->tf_access_cnt)) { + mutex_unlock(&g_tf_lock); + printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n", + tf->tf_name); + BUG(); + } + list_del(&tf->tf_list); + mutex_unlock(&g_tf_lock); + + printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" + " %s\n", tf->tf_name); + tf->tf_module = NULL; + tf->tf_subsys = NULL; + kfree(tf); + + printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" + ">>>>>\n"); + return; +} +EXPORT_SYMBOL(target_fabric_configfs_deregister); + +/*############################################################################## +// Stop functions called by external Target Fabrics Modules +//############################################################################*/ + +/* Start functions for struct config_item_type target_core_dev_attrib_cit */ + +#define DEF_DEV_ATTRIB_SHOW(_name) \ +static ssize_t target_core_dev_show_attr_##_name( \ + struct se_dev_attrib *da, \ + char *page) \ +{ \ + struct se_device *dev; \ + struct se_subsystem_dev *se_dev = da->da_sub_dev; \ + ssize_t rb; \ + \ + spin_lock(&se_dev->se_dev_lock); \ + dev = se_dev->se_dev_ptr; \ + if (!(dev)) { \ + spin_unlock(&se_dev->se_dev_lock); \ + return -ENODEV; \ + } \ + rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \ + spin_unlock(&se_dev->se_dev_lock); \ + \ + return rb; \ +} + +#define DEF_DEV_ATTRIB_STORE(_name) \ +static ssize_t target_core_dev_store_attr_##_name( \ + struct se_dev_attrib *da, \ + const char *page, \ + size_t count) \ +{ \ + struct se_device *dev; \ + struct se_subsystem_dev *se_dev = da->da_sub_dev; \ + unsigned long val; \ + int ret; \ + \ + spin_lock(&se_dev->se_dev_lock); \ + dev = se_dev->se_dev_ptr; \ + if (!(dev)) { \ + spin_unlock(&se_dev->se_dev_lock); \ + return -ENODEV; \ + } \ + ret = strict_strtoul(page, 0, &val); \ + if (ret < 0) { \ + spin_unlock(&se_dev->se_dev_lock); \ + printk(KERN_ERR "strict_strtoul() failed with" \ + " ret: %d\n", ret); \ + return -EINVAL; \ + } \ + ret = se_dev_set_##_name(dev, (u32)val); \ + spin_unlock(&se_dev->se_dev_lock); \ + \ + return (!ret) ? count : -EINVAL; \ +} + +#define DEF_DEV_ATTRIB(_name) \ +DEF_DEV_ATTRIB_SHOW(_name); \ +DEF_DEV_ATTRIB_STORE(_name); + +#define DEF_DEV_ATTRIB_RO(_name) \ +DEF_DEV_ATTRIB_SHOW(_name); + +CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); +#define SE_DEV_ATTR(_name, _mode) \ +static struct target_core_dev_attrib_attribute \ + target_core_dev_attrib_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_core_dev_show_attr_##_name, \ + target_core_dev_store_attr_##_name); + +#define SE_DEV_ATTR_RO(_name); \ +static struct target_core_dev_attrib_attribute \ + target_core_dev_attrib_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + target_core_dev_show_attr_##_name); + +DEF_DEV_ATTRIB(emulate_dpo); +SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_fua_write); +SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_fua_read); +SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_write_cache); +SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl); +SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_tas); +SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_tpu); +SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_tpws); +SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(enforce_pr_isids); +SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB_RO(hw_block_size); +SE_DEV_ATTR_RO(hw_block_size); + +DEF_DEV_ATTRIB(block_size); +SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB_RO(hw_max_sectors); +SE_DEV_ATTR_RO(hw_max_sectors); + +DEF_DEV_ATTRIB(max_sectors); +SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(optimal_sectors); +SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB_RO(hw_queue_depth); +SE_DEV_ATTR_RO(hw_queue_depth); + +DEF_DEV_ATTRIB(queue_depth); +SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(task_timeout); +SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(max_unmap_lba_count); +SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(max_unmap_block_desc_count); +SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(unmap_granularity); +SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(unmap_granularity_alignment); +SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR); + +CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); + +static struct configfs_attribute *target_core_dev_attrib_attrs[] = { + &target_core_dev_attrib_emulate_dpo.attr, + &target_core_dev_attrib_emulate_fua_write.attr, + &target_core_dev_attrib_emulate_fua_read.attr, + &target_core_dev_attrib_emulate_write_cache.attr, + &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, + &target_core_dev_attrib_emulate_tas.attr, + &target_core_dev_attrib_emulate_tpu.attr, + &target_core_dev_attrib_emulate_tpws.attr, + &target_core_dev_attrib_enforce_pr_isids.attr, + &target_core_dev_attrib_hw_block_size.attr, + &target_core_dev_attrib_block_size.attr, + &target_core_dev_attrib_hw_max_sectors.attr, + &target_core_dev_attrib_max_sectors.attr, + &target_core_dev_attrib_optimal_sectors.attr, + &target_core_dev_attrib_hw_queue_depth.attr, + &target_core_dev_attrib_queue_depth.attr, + &target_core_dev_attrib_task_timeout.attr, + &target_core_dev_attrib_max_unmap_lba_count.attr, + &target_core_dev_attrib_max_unmap_block_desc_count.attr, + &target_core_dev_attrib_unmap_granularity.attr, + &target_core_dev_attrib_unmap_granularity_alignment.attr, + NULL, +}; + +static struct configfs_item_operations target_core_dev_attrib_ops = { + .show_attribute = target_core_dev_attrib_attr_show, + .store_attribute = target_core_dev_attrib_attr_store, +}; + +static struct config_item_type target_core_dev_attrib_cit = { + .ct_item_ops = &target_core_dev_attrib_ops, + .ct_attrs = target_core_dev_attrib_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_dev_attrib_cit */ + +/* Start functions for struct config_item_type target_core_dev_wwn_cit */ + +CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); +#define SE_DEV_WWN_ATTR(_name, _mode) \ +static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_core_dev_wwn_show_attr_##_name, \ + target_core_dev_wwn_store_attr_##_name); + +#define SE_DEV_WWN_ATTR_RO(_name); \ +do { \ + static struct target_core_dev_wwn_attribute \ + target_core_dev_wwn_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + target_core_dev_wwn_show_attr_##_name); \ +} while (0); + +/* + * VPD page 0x80 Unit serial + */ +static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( + struct t10_wwn *t10_wwn, + char *page) +{ + struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; + struct se_device *dev; + + dev = se_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + return sprintf(page, "T10 VPD Unit Serial Number: %s\n", + &t10_wwn->unit_serial[0]); +} + +static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( + struct t10_wwn *t10_wwn, + const char *page, + size_t count) +{ + struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev; + struct se_device *dev; + unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; + + /* + * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial + * from the struct scsi_device level firmware, do not allow + * VPD Unit Serial to be emulated. + * + * Note this struct scsi_device could also be emulating VPD + * information from its drivers/scsi LLD. But for now we assume + * it is doing 'the right thing' wrt a world wide unique + * VPD Unit Serial Number that OS dependent multipath can depend on. + */ + if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { + printk(KERN_ERR "Underlying SCSI device firmware provided VPD" + " Unit Serial, ignoring request\n"); + return -EOPNOTSUPP; + } + + if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) { + printk(KERN_ERR "Emulated VPD Unit Serial exceeds" + " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); + return -EOVERFLOW; + } + /* + * Check to see if any active $FABRIC_MOD exports exist. If they + * do exist, fail here as changing this information on the fly + * (underneath the initiator side OS dependent multipath code) + * could cause negative effects. + */ + dev = su_dev->se_dev_ptr; + if ((dev)) { + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_ERR "Unable to set VPD Unit Serial while" + " active %d $FABRIC_MOD exports exist\n", + atomic_read(&dev->dev_export_obj.obj_access_count)); + return -EINVAL; + } + } + /* + * This currently assumes ASCII encoding for emulated VPD Unit Serial. + * + * Also, strip any newline added from the userspace + * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial + */ + memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); + snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); + snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, + "%s", strstrip(buf)); + su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; + + printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:" + " %s\n", su_dev->t10_wwn.unit_serial); + + return count; +} + +SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR); + +/* + * VPD page 0x83 Protocol Identifier + */ +static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( + struct t10_wwn *t10_wwn, + char *page) +{ + struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; + struct se_device *dev; + struct t10_vpd *vpd; + unsigned char buf[VPD_TMP_BUF_SIZE]; + ssize_t len = 0; + + dev = se_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + + spin_lock(&t10_wwn->t10_vpd_lock); + list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { + if (!(vpd->protocol_identifier_set)) + continue; + + transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); + + if ((len + strlen(buf) > PAGE_SIZE)) + break; + + len += sprintf(page+len, "%s", buf); + } + spin_unlock(&t10_wwn->t10_vpd_lock); + + return len; +} + +static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier( + struct t10_wwn *t10_wwn, + const char *page, + size_t count) +{ + return -ENOSYS; +} + +SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR); + +/* + * Generic wrapper for dumping VPD identifiers by association. + */ +#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ +static ssize_t target_core_dev_wwn_show_attr_##_name( \ + struct t10_wwn *t10_wwn, \ + char *page) \ +{ \ + struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \ + struct se_device *dev; \ + struct t10_vpd *vpd; \ + unsigned char buf[VPD_TMP_BUF_SIZE]; \ + ssize_t len = 0; \ + \ + dev = se_dev->se_dev_ptr; \ + if (!(dev)) \ + return -ENODEV; \ + \ + spin_lock(&t10_wwn->t10_vpd_lock); \ + list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ + if (vpd->association != _assoc) \ + continue; \ + \ + memset(buf, 0, VPD_TMP_BUF_SIZE); \ + transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ + if ((len + strlen(buf) > PAGE_SIZE)) \ + break; \ + len += sprintf(page+len, "%s", buf); \ + \ + memset(buf, 0, VPD_TMP_BUF_SIZE); \ + transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ + if ((len + strlen(buf) > PAGE_SIZE)) \ + break; \ + len += sprintf(page+len, "%s", buf); \ + \ + memset(buf, 0, VPD_TMP_BUF_SIZE); \ + transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ + if ((len + strlen(buf) > PAGE_SIZE)) \ + break; \ + len += sprintf(page+len, "%s", buf); \ + } \ + spin_unlock(&t10_wwn->t10_vpd_lock); \ + \ + return len; \ +} + +/* + * VPD page 0x83 Assoication: Logical Unit + */ +DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); + +static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit( + struct t10_wwn *t10_wwn, + const char *page, + size_t count) +{ + return -ENOSYS; +} + +SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR); + +/* + * VPD page 0x83 Association: Target Port + */ +DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); + +static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port( + struct t10_wwn *t10_wwn, + const char *page, + size_t count) +{ + return -ENOSYS; +} + +SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR); + +/* + * VPD page 0x83 Association: SCSI Target Device + */ +DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); + +static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device( + struct t10_wwn *t10_wwn, + const char *page, + size_t count) +{ + return -ENOSYS; +} + +SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR); + +CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group); + +static struct configfs_attribute *target_core_dev_wwn_attrs[] = { + &target_core_dev_wwn_vpd_unit_serial.attr, + &target_core_dev_wwn_vpd_protocol_identifier.attr, + &target_core_dev_wwn_vpd_assoc_logical_unit.attr, + &target_core_dev_wwn_vpd_assoc_target_port.attr, + &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr, + NULL, +}; + +static struct configfs_item_operations target_core_dev_wwn_ops = { + .show_attribute = target_core_dev_wwn_attr_show, + .store_attribute = target_core_dev_wwn_attr_store, +}; + +static struct config_item_type target_core_dev_wwn_cit = { + .ct_item_ops = &target_core_dev_wwn_ops, + .ct_attrs = target_core_dev_wwn_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_dev_wwn_cit */ + +/* Start functions for struct config_item_type target_core_dev_pr_cit */ + +CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev); +#define SE_DEV_PR_ATTR(_name, _mode) \ +static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_core_dev_pr_show_attr_##_name, \ + target_core_dev_pr_store_attr_##_name); + +#define SE_DEV_PR_ATTR_RO(_name); \ +static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + target_core_dev_pr_show_attr_##_name); + +/* + * res_holder + */ +static ssize_t target_core_dev_pr_show_spc3_res( + struct se_device *dev, + char *page, + ssize_t *len) +{ + struct se_node_acl *se_nacl; + struct t10_pr_registration *pr_reg; + char i_buf[PR_REG_ISID_ID_LEN]; + int prf_isid; + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + + spin_lock(&dev->dev_reservation_lock); + pr_reg = dev->dev_pr_res_holder; + if (!(pr_reg)) { + *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); + spin_unlock(&dev->dev_reservation_lock); + return *len; + } + se_nacl = pr_reg->pr_reg_nacl; + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + + *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", + TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), + se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); + spin_unlock(&dev->dev_reservation_lock); + + return *len; +} + +static ssize_t target_core_dev_pr_show_spc2_res( + struct se_device *dev, + char *page, + ssize_t *len) +{ + struct se_node_acl *se_nacl; + + spin_lock(&dev->dev_reservation_lock); + se_nacl = dev->dev_reserved_node_acl; + if (!(se_nacl)) { + *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); + spin_unlock(&dev->dev_reservation_lock); + return *len; + } + *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", + TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), + se_nacl->initiatorname); + spin_unlock(&dev->dev_reservation_lock); + + return *len; +} + +static ssize_t target_core_dev_pr_show_attr_res_holder( + struct se_subsystem_dev *su_dev, + char *page) +{ + ssize_t len = 0; + + if (!(su_dev->se_dev_ptr)) + return -ENODEV; + + switch (T10_RES(su_dev)->res_type) { + case SPC3_PERSISTENT_RESERVATIONS: + target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, + page, &len); + break; + case SPC2_RESERVATIONS: + target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr, + page, &len); + break; + case SPC_PASSTHROUGH: + len += sprintf(page+len, "Passthrough\n"); + break; + default: + len += sprintf(page+len, "Unknown\n"); + break; + } + + return len; +} + +SE_DEV_PR_ATTR_RO(res_holder); + +/* + * res_pr_all_tgt_pts + */ +static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( + struct se_subsystem_dev *su_dev, + char *page) +{ + struct se_device *dev; + struct t10_pr_registration *pr_reg; + ssize_t len = 0; + + dev = su_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return len; + + spin_lock(&dev->dev_reservation_lock); + pr_reg = dev->dev_pr_res_holder; + if (!(pr_reg)) { + len = sprintf(page, "No SPC-3 Reservation holder\n"); + spin_unlock(&dev->dev_reservation_lock); + return len; + } + /* + * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3 + * Basic PERSISTENT RESERVER OUT parameter list, page 290 + */ + if (pr_reg->pr_reg_all_tg_pt) + len = sprintf(page, "SPC-3 Reservation: All Target" + " Ports registration\n"); + else + len = sprintf(page, "SPC-3 Reservation: Single" + " Target Port registration\n"); + spin_unlock(&dev->dev_reservation_lock); + + return len; +} + +SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); + +/* + * res_pr_generation + */ +static ssize_t target_core_dev_pr_show_attr_res_pr_generation( + struct se_subsystem_dev *su_dev, + char *page) +{ + if (!(su_dev->se_dev_ptr)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return 0; + + return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation); +} + +SE_DEV_PR_ATTR_RO(res_pr_generation); + +/* + * res_pr_holder_tg_port + */ +static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( + struct se_subsystem_dev *su_dev, + char *page) +{ + struct se_device *dev; + struct se_node_acl *se_nacl; + struct se_lun *lun; + struct se_portal_group *se_tpg; + struct t10_pr_registration *pr_reg; + struct target_core_fabric_ops *tfo; + ssize_t len = 0; + + dev = su_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return len; + + spin_lock(&dev->dev_reservation_lock); + pr_reg = dev->dev_pr_res_holder; + if (!(pr_reg)) { + len = sprintf(page, "No SPC-3 Reservation holder\n"); + spin_unlock(&dev->dev_reservation_lock); + return len; + } + se_nacl = pr_reg->pr_reg_nacl; + se_tpg = se_nacl->se_tpg; + lun = pr_reg->pr_reg_tg_pt_lun; + tfo = TPG_TFO(se_tpg); + + len += sprintf(page+len, "SPC-3 Reservation: %s" + " Target Node Endpoint: %s\n", tfo->get_fabric_name(), + tfo->tpg_get_wwn(se_tpg)); + len += sprintf(page+len, "SPC-3 Reservation: Relative Port" + " Identifer Tag: %hu %s Portal Group Tag: %hu" + " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi, + tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), + tfo->get_fabric_name(), lun->unpacked_lun); + spin_unlock(&dev->dev_reservation_lock); + + return len; +} + +SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); + +/* + * res_pr_registered_i_pts + */ +static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( + struct se_subsystem_dev *su_dev, + char *page) +{ + struct target_core_fabric_ops *tfo; + struct t10_pr_registration *pr_reg; + unsigned char buf[384]; + char i_buf[PR_REG_ISID_ID_LEN]; + ssize_t len = 0; + int reg_count = 0, prf_isid; + + if (!(su_dev->se_dev_ptr)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return len; + + len += sprintf(page+len, "SPC-3 PR Registrations:\n"); + + spin_lock(&T10_RES(su_dev)->registration_lock); + list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + pr_reg_list) { + + memset(buf, 0, 384); + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", + tfo->get_fabric_name(), + pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? + &i_buf[0] : "", pr_reg->pr_res_key, + pr_reg->pr_res_generation); + + if ((len + strlen(buf) > PAGE_SIZE)) + break; + + len += sprintf(page+len, "%s", buf); + reg_count++; + } + spin_unlock(&T10_RES(su_dev)->registration_lock); + + if (!(reg_count)) + len += sprintf(page+len, "None\n"); + + return len; +} + +SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); + +/* + * res_pr_type + */ +static ssize_t target_core_dev_pr_show_attr_res_pr_type( + struct se_subsystem_dev *su_dev, + char *page) +{ + struct se_device *dev; + struct t10_pr_registration *pr_reg; + ssize_t len = 0; + + dev = su_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return len; + + spin_lock(&dev->dev_reservation_lock); + pr_reg = dev->dev_pr_res_holder; + if (!(pr_reg)) { + len = sprintf(page, "No SPC-3 Reservation holder\n"); + spin_unlock(&dev->dev_reservation_lock); + return len; + } + len = sprintf(page, "SPC-3 Reservation Type: %s\n", + core_scsi3_pr_dump_type(pr_reg->pr_res_type)); + spin_unlock(&dev->dev_reservation_lock); + + return len; +} + +SE_DEV_PR_ATTR_RO(res_pr_type); + +/* + * res_type + */ +static ssize_t target_core_dev_pr_show_attr_res_type( + struct se_subsystem_dev *su_dev, + char *page) +{ + ssize_t len = 0; + + if (!(su_dev->se_dev_ptr)) + return -ENODEV; + + switch (T10_RES(su_dev)->res_type) { + case SPC3_PERSISTENT_RESERVATIONS: + len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); + break; + case SPC2_RESERVATIONS: + len = sprintf(page, "SPC2_RESERVATIONS\n"); + break; + case SPC_PASSTHROUGH: + len = sprintf(page, "SPC_PASSTHROUGH\n"); + break; + default: + len = sprintf(page, "UNKNOWN\n"); + break; + } + + return len; +} + +SE_DEV_PR_ATTR_RO(res_type); + +/* + * res_aptpl_active + */ + +static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( + struct se_subsystem_dev *su_dev, + char *page) +{ + if (!(su_dev->se_dev_ptr)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return 0; + + return sprintf(page, "APTPL Bit Status: %s\n", + (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled"); +} + +SE_DEV_PR_ATTR_RO(res_aptpl_active); + +/* + * res_aptpl_metadata + */ +static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( + struct se_subsystem_dev *su_dev, + char *page) +{ + if (!(su_dev->se_dev_ptr)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return 0; + + return sprintf(page, "Ready to process PR APTPL metadata..\n"); +} + +enum { + Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, + Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, + Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, + Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err +}; + +static match_table_t tokens = { + {Opt_initiator_fabric, "initiator_fabric=%s"}, + {Opt_initiator_node, "initiator_node=%s"}, + {Opt_initiator_sid, "initiator_sid=%s"}, + {Opt_sa_res_key, "sa_res_key=%s"}, + {Opt_res_holder, "res_holder=%d"}, + {Opt_res_type, "res_type=%d"}, + {Opt_res_scope, "res_scope=%d"}, + {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, + {Opt_mapped_lun, "mapped_lun=%d"}, + {Opt_target_fabric, "target_fabric=%s"}, + {Opt_target_node, "target_node=%s"}, + {Opt_tpgt, "tpgt=%d"}, + {Opt_port_rtpi, "port_rtpi=%d"}, + {Opt_target_lun, "target_lun=%d"}, + {Opt_err, NULL} +}; + +static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( + struct se_subsystem_dev *su_dev, + const char *page, + size_t count) +{ + struct se_device *dev; + unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL; + unsigned char *isid = NULL; + char *orig, *ptr, *arg_p, *opts; + substring_t args[MAX_OPT_ARGS]; + unsigned long long tmp_ll; + u64 sa_res_key = 0; + u32 mapped_lun = 0, target_lun = 0; + int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; + u16 port_rpti = 0, tpgt = 0; + u8 type = 0, scope; + + dev = su_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return 0; + + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_INFO "Unable to process APTPL metadata while" + " active fabric exports exist\n"); + return -EINVAL; + } + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + while ((ptr = strsep(&opts, ",")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_initiator_fabric: + i_fabric = match_strdup(&args[0]); + break; + case Opt_initiator_node: + i_port = match_strdup(&args[0]); + if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { + printk(KERN_ERR "APTPL metadata initiator_node=" + " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", + PR_APTPL_MAX_IPORT_LEN); + ret = -EINVAL; + break; + } + break; + case Opt_initiator_sid: + isid = match_strdup(&args[0]); + if (strlen(isid) > PR_REG_ISID_LEN) { + printk(KERN_ERR "APTPL metadata initiator_isid" + "= exceeds PR_REG_ISID_LEN: %d\n", + PR_REG_ISID_LEN); + ret = -EINVAL; + break; + } + break; + case Opt_sa_res_key: + arg_p = match_strdup(&args[0]); + ret = strict_strtoull(arg_p, 0, &tmp_ll); + if (ret < 0) { + printk(KERN_ERR "strict_strtoull() failed for" + " sa_res_key=\n"); + goto out; + } + sa_res_key = (u64)tmp_ll; + break; + /* + * PR APTPL Metadata for Reservation + */ + case Opt_res_holder: + match_int(args, &arg); + res_holder = arg; + break; + case Opt_res_type: + match_int(args, &arg); + type = (u8)arg; + break; + case Opt_res_scope: + match_int(args, &arg); + scope = (u8)arg; + break; + case Opt_res_all_tg_pt: + match_int(args, &arg); + all_tg_pt = (int)arg; + break; + case Opt_mapped_lun: + match_int(args, &arg); + mapped_lun = (u32)arg; + break; + /* + * PR APTPL Metadata for Target Port + */ + case Opt_target_fabric: + t_fabric = match_strdup(&args[0]); + break; + case Opt_target_node: + t_port = match_strdup(&args[0]); + if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { + printk(KERN_ERR "APTPL metadata target_node=" + " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", + PR_APTPL_MAX_TPORT_LEN); + ret = -EINVAL; + break; + } + break; + case Opt_tpgt: + match_int(args, &arg); + tpgt = (u16)arg; + break; + case Opt_port_rtpi: + match_int(args, &arg); + port_rpti = (u16)arg; + break; + case Opt_target_lun: + match_int(args, &arg); + target_lun = (u32)arg; + break; + default: + break; + } + } + + if (!(i_port) || !(t_port) || !(sa_res_key)) { + printk(KERN_ERR "Illegal parameters for APTPL registration\n"); + ret = -EINVAL; + goto out; + } + + if (res_holder && !(type)) { + printk(KERN_ERR "Illegal PR type: 0x%02x for reservation" + " holder\n", type); + ret = -EINVAL; + goto out; + } + + ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key, + i_port, isid, mapped_lun, t_port, tpgt, target_lun, + res_holder, all_tg_pt, type); +out: + kfree(orig); + return (ret == 0) ? count : ret; +} + +SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); + +CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group); + +static struct configfs_attribute *target_core_dev_pr_attrs[] = { + &target_core_dev_pr_res_holder.attr, + &target_core_dev_pr_res_pr_all_tgt_pts.attr, + &target_core_dev_pr_res_pr_generation.attr, + &target_core_dev_pr_res_pr_holder_tg_port.attr, + &target_core_dev_pr_res_pr_registered_i_pts.attr, + &target_core_dev_pr_res_pr_type.attr, + &target_core_dev_pr_res_type.attr, + &target_core_dev_pr_res_aptpl_active.attr, + &target_core_dev_pr_res_aptpl_metadata.attr, + NULL, +}; + +static struct configfs_item_operations target_core_dev_pr_ops = { + .show_attribute = target_core_dev_pr_attr_show, + .store_attribute = target_core_dev_pr_attr_store, +}; + +static struct config_item_type target_core_dev_pr_cit = { + .ct_item_ops = &target_core_dev_pr_ops, + .ct_attrs = target_core_dev_pr_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_dev_pr_cit */ + +/* Start functions for struct config_item_type target_core_dev_cit */ + +static ssize_t target_core_show_dev_info(void *p, char *page) +{ + struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_hba *hba = se_dev->se_dev_hba; + struct se_subsystem_api *t = hba->transport; + int bl = 0; + ssize_t read_bytes = 0; + + if (!(se_dev->se_dev_ptr)) + return -ENODEV; + + transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); + read_bytes += bl; + read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes); + return read_bytes; +} + +static struct target_core_configfs_attribute target_core_attr_dev_info = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "info", + .ca_mode = S_IRUGO }, + .show = target_core_show_dev_info, + .store = NULL, +}; + +static ssize_t target_core_store_dev_control( + void *p, + const char *page, + size_t count) +{ + struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_hba *hba = se_dev->se_dev_hba; + struct se_subsystem_api *t = hba->transport; + + if (!(se_dev->se_dev_su_ptr)) { + printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se" + "_dev_su_ptr\n"); + return -EINVAL; + } + + return t->set_configfs_dev_params(hba, se_dev, page, count); +} + +static struct target_core_configfs_attribute target_core_attr_dev_control = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "control", + .ca_mode = S_IWUSR }, + .show = NULL, + .store = target_core_store_dev_control, +}; + +static ssize_t target_core_show_dev_alias(void *p, char *page) +{ + struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + + if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) + return 0; + + return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias); +} + +static ssize_t target_core_store_dev_alias( + void *p, + const char *page, + size_t count) +{ + struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_hba *hba = se_dev->se_dev_hba; + ssize_t read_bytes; + + if (count > (SE_DEV_ALIAS_LEN-1)) { + printk(KERN_ERR "alias count: %d exceeds" + " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, + SE_DEV_ALIAS_LEN-1); + return -EINVAL; + } + + se_dev->su_dev_flags |= SDF_USING_ALIAS; + read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, + "%s", page); + + printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&se_dev->se_dev_group.cg_item), + se_dev->se_dev_alias); + + return read_bytes; +} + +static struct target_core_configfs_attribute target_core_attr_dev_alias = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "alias", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_alias, + .store = target_core_store_dev_alias, +}; + +static ssize_t target_core_show_dev_udev_path(void *p, char *page) +{ + struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + + if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) + return 0; + + return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path); +} + +static ssize_t target_core_store_dev_udev_path( + void *p, + const char *page, + size_t count) +{ + struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_hba *hba = se_dev->se_dev_hba; + ssize_t read_bytes; + + if (count > (SE_UDEV_PATH_LEN-1)) { + printk(KERN_ERR "udev_path count: %d exceeds" + " SE_UDEV_PATH_LEN-1: %u\n", (int)count, + SE_UDEV_PATH_LEN-1); + return -EINVAL; + } + + se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; + read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, + "%s", page); + + printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&se_dev->se_dev_group.cg_item), + se_dev->se_dev_udev_path); + + return read_bytes; +} + +static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "udev_path", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_udev_path, + .store = target_core_store_dev_udev_path, +}; + +static ssize_t target_core_store_dev_enable( + void *p, + const char *page, + size_t count) +{ + struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_device *dev; + struct se_hba *hba = se_dev->se_dev_hba; + struct se_subsystem_api *t = hba->transport; + char *ptr; + + ptr = strstr(page, "1"); + if (!(ptr)) { + printk(KERN_ERR "For dev_enable ops, only valid value" + " is \"1\"\n"); + return -EINVAL; + } + if ((se_dev->se_dev_ptr)) { + printk(KERN_ERR "se_dev->se_dev_ptr already set for storage" + " object\n"); + return -EEXIST; + } + + if (t->check_configfs_dev_params(hba, se_dev) < 0) + return -EINVAL; + + dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); + if (!(dev) || IS_ERR(dev)) + return -EINVAL; + + se_dev->se_dev_ptr = dev; + printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" + " %p\n", se_dev->se_dev_ptr); + + return count; +} + +static struct target_core_configfs_attribute target_core_attr_dev_enable = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "enable", + .ca_mode = S_IWUSR }, + .show = NULL, + .store = target_core_store_dev_enable, +}; + +static ssize_t target_core_show_alua_lu_gp(void *p, char *page) +{ + struct se_device *dev; + struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; + struct config_item *lu_ci; + struct t10_alua_lu_gp *lu_gp; + struct t10_alua_lu_gp_member *lu_gp_mem; + ssize_t len = 0; + + dev = su_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) + return len; + + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!(lu_gp_mem)) { + printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" + " pointer\n"); + return -EINVAL; + } + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if ((lu_gp)) { + lu_ci = &lu_gp->lu_gp_group.cg_item; + len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", + config_item_name(lu_ci), lu_gp->lu_gp_id); + } + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + return len; +} + +static ssize_t target_core_store_alua_lu_gp( + void *p, + const char *page, + size_t count) +{ + struct se_device *dev; + struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; + struct se_hba *hba = su_dev->se_dev_hba; + struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; + struct t10_alua_lu_gp_member *lu_gp_mem; + unsigned char buf[LU_GROUP_NAME_BUF]; + int move = 0; + + dev = su_dev->se_dev_ptr; + if (!(dev)) + return -ENODEV; + + if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { + printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&su_dev->se_dev_group.cg_item)); + return -EINVAL; + } + if (count > LU_GROUP_NAME_BUF) { + printk(KERN_ERR "ALUA LU Group Alias too large!\n"); + return -EINVAL; + } + memset(buf, 0, LU_GROUP_NAME_BUF); + memcpy(buf, page, count); + /* + * Any ALUA logical unit alias besides "NULL" means we will be + * making a new group association. + */ + if (strcmp(strstrip(buf), "NULL")) { + /* + * core_alua_get_lu_gp_by_name() will increment reference to + * struct t10_alua_lu_gp. This reference is released with + * core_alua_get_lu_gp_by_name below(). + */ + lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); + if (!(lu_gp_new)) + return -ENODEV; + } + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!(lu_gp_mem)) { + if (lu_gp_new) + core_alua_put_lu_gp_from_name(lu_gp_new); + printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" + " pointer\n"); + return -EINVAL; + } + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if ((lu_gp)) { + /* + * Clearing an existing lu_gp association, and replacing + * with NULL + */ + if (!(lu_gp_new)) { + printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s" + " from ALUA LU Group: core/alua/lu_gps/%s, ID:" + " %hu\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&su_dev->se_dev_group.cg_item), + config_item_name(&lu_gp->lu_gp_group.cg_item), + lu_gp->lu_gp_id); + + __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + return count; + } + /* + * Removing existing association of lu_gp_mem with lu_gp + */ + __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); + move = 1; + } + /* + * Associate lu_gp_mem with lu_gp_new. + */ + __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" + " core/alua/lu_gps/%s, ID: %hu\n", + (move) ? "Moving" : "Adding", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&su_dev->se_dev_group.cg_item), + config_item_name(&lu_gp_new->lu_gp_group.cg_item), + lu_gp_new->lu_gp_id); + + core_alua_put_lu_gp_from_name(lu_gp_new); + return count; +} + +static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "alua_lu_gp", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_alua_lu_gp, + .store = target_core_store_alua_lu_gp, +}; + +static struct configfs_attribute *lio_core_dev_attrs[] = { + &target_core_attr_dev_info.attr, + &target_core_attr_dev_control.attr, + &target_core_attr_dev_alias.attr, + &target_core_attr_dev_udev_path.attr, + &target_core_attr_dev_enable.attr, + &target_core_attr_dev_alua_lu_gp.attr, + NULL, +}; + +static void target_core_dev_release(struct config_item *item) +{ + struct se_subsystem_dev *se_dev = container_of(to_config_group(item), + struct se_subsystem_dev, se_dev_group); + struct config_group *dev_cg; + + if (!(se_dev)) + return; + + dev_cg = &se_dev->se_dev_group; + kfree(dev_cg->default_groups); +} + +static ssize_t target_core_dev_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct se_subsystem_dev *se_dev = container_of( + to_config_group(item), struct se_subsystem_dev, + se_dev_group); + struct target_core_configfs_attribute *tc_attr = container_of( + attr, struct target_core_configfs_attribute, attr); + + if (!(tc_attr->show)) + return -EINVAL; + + return tc_attr->show((void *)se_dev, page); +} + +static ssize_t target_core_dev_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct se_subsystem_dev *se_dev = container_of( + to_config_group(item), struct se_subsystem_dev, + se_dev_group); + struct target_core_configfs_attribute *tc_attr = container_of( + attr, struct target_core_configfs_attribute, attr); + + if (!(tc_attr->store)) + return -EINVAL; + + return tc_attr->store((void *)se_dev, page, count); +} + +static struct configfs_item_operations target_core_dev_item_ops = { + .release = target_core_dev_release, + .show_attribute = target_core_dev_show, + .store_attribute = target_core_dev_store, +}; + +static struct config_item_type target_core_dev_cit = { + .ct_item_ops = &target_core_dev_item_ops, + .ct_attrs = lio_core_dev_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_dev_cit */ + +/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ + +CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp); +#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \ +static struct target_core_alua_lu_gp_attribute \ + target_core_alua_lu_gp_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_core_alua_lu_gp_show_attr_##_name, \ + target_core_alua_lu_gp_store_attr_##_name); + +#define SE_DEV_ALUA_LU_ATTR_RO(_name) \ +static struct target_core_alua_lu_gp_attribute \ + target_core_alua_lu_gp_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + target_core_alua_lu_gp_show_attr_##_name); + +/* + * lu_gp_id + */ +static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( + struct t10_alua_lu_gp *lu_gp, + char *page) +{ + if (!(lu_gp->lu_gp_valid_id)) + return 0; + + return sprintf(page, "%hu\n", lu_gp->lu_gp_id); +} + +static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( + struct t10_alua_lu_gp *lu_gp, + const char *page, + size_t count) +{ + struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; + unsigned long lu_gp_id; + int ret; + + ret = strict_strtoul(page, 0, &lu_gp_id); + if (ret < 0) { + printk(KERN_ERR "strict_strtoul() returned %d for" + " lu_gp_id\n", ret); + return -EINVAL; + } + if (lu_gp_id > 0x0000ffff) { + printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:" + " 0x0000ffff\n", lu_gp_id); + return -EINVAL; + } + + ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); + if (ret < 0) + return -EINVAL; + + printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit" + " Group: core/alua/lu_gps/%s to ID: %hu\n", + config_item_name(&alua_lu_gp_cg->cg_item), + lu_gp->lu_gp_id); + + return count; +} + +SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR); + +/* + * members + */ +static ssize_t target_core_alua_lu_gp_show_attr_members( + struct t10_alua_lu_gp *lu_gp, + char *page) +{ + struct se_device *dev; + struct se_hba *hba; + struct se_subsystem_dev *su_dev; + struct t10_alua_lu_gp_member *lu_gp_mem; + ssize_t len = 0, cur_len; + unsigned char buf[LU_GROUP_NAME_BUF]; + + memset(buf, 0, LU_GROUP_NAME_BUF); + + spin_lock(&lu_gp->lu_gp_lock); + list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { + dev = lu_gp_mem->lu_gp_mem_dev; + su_dev = dev->se_sub_dev; + hba = su_dev->se_dev_hba; + + cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&su_dev->se_dev_group.cg_item)); + cur_len++; /* Extra byte for NULL terminator */ + + if ((cur_len + len) > PAGE_SIZE) { + printk(KERN_WARNING "Ran out of lu_gp_show_attr" + "_members buffer\n"); + break; + } + memcpy(page+len, buf, cur_len); + len += cur_len; + } + spin_unlock(&lu_gp->lu_gp_lock); + + return len; +} + +SE_DEV_ALUA_LU_ATTR_RO(members); + +CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group); + +static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { + &target_core_alua_lu_gp_lu_gp_id.attr, + &target_core_alua_lu_gp_members.attr, + NULL, +}; + +static struct configfs_item_operations target_core_alua_lu_gp_ops = { + .show_attribute = target_core_alua_lu_gp_attr_show, + .store_attribute = target_core_alua_lu_gp_attr_store, +}; + +static struct config_item_type target_core_alua_lu_gp_cit = { + .ct_item_ops = &target_core_alua_lu_gp_ops, + .ct_attrs = target_core_alua_lu_gp_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_lu_gp_cit */ + +/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ + +static struct config_group *target_core_alua_create_lu_gp( + struct config_group *group, + const char *name) +{ + struct t10_alua_lu_gp *lu_gp; + struct config_group *alua_lu_gp_cg = NULL; + struct config_item *alua_lu_gp_ci = NULL; + + lu_gp = core_alua_allocate_lu_gp(name, 0); + if (IS_ERR(lu_gp)) + return NULL; + + alua_lu_gp_cg = &lu_gp->lu_gp_group; + alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; + + config_group_init_type_name(alua_lu_gp_cg, name, + &target_core_alua_lu_gp_cit); + + printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit" + " Group: core/alua/lu_gps/%s\n", + config_item_name(alua_lu_gp_ci)); + + return alua_lu_gp_cg; + +} + +static void target_core_alua_drop_lu_gp( + struct config_group *group, + struct config_item *item) +{ + struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), + struct t10_alua_lu_gp, lu_gp_group); + + printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" + " Group: core/alua/lu_gps/%s, ID: %hu\n", + config_item_name(item), lu_gp->lu_gp_id); + + config_item_put(item); + core_alua_free_lu_gp(lu_gp); +} + +static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { + .make_group = &target_core_alua_create_lu_gp, + .drop_item = &target_core_alua_drop_lu_gp, +}; + +static struct config_item_type target_core_alua_lu_gps_cit = { + .ct_item_ops = NULL, + .ct_group_ops = &target_core_alua_lu_gps_group_ops, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_lu_gps_cit */ + +/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ + +CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp); +#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \ +static struct target_core_alua_tg_pt_gp_attribute \ + target_core_alua_tg_pt_gp_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_core_alua_tg_pt_gp_show_attr_##_name, \ + target_core_alua_tg_pt_gp_store_attr_##_name); + +#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \ +static struct target_core_alua_tg_pt_gp_attribute \ + target_core_alua_tg_pt_gp_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + target_core_alua_tg_pt_gp_show_attr_##_name); + +/* + * alua_access_state + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state)); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; + unsigned long tmp; + int new_state, ret; + + if (!(tg_pt_gp->tg_pt_gp_valid_id)) { + printk(KERN_ERR "Unable to do implict ALUA on non valid" + " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); + return -EINVAL; + } + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk("Unable to extract new ALUA access state from" + " %s\n", page); + return -EINVAL; + } + new_state = (int)tmp; + + if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { + printk(KERN_ERR "Unable to process implict configfs ALUA" + " transition while TPGS_IMPLICT_ALUA is diabled\n"); + return -EINVAL; + } + + ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr, + NULL, NULL, new_state, 0); + return (!ret) ? count : -EINVAL; +} + +SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR); + +/* + * alua_access_status + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%s\n", + core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int new_status, ret; + + if (!(tg_pt_gp->tg_pt_gp_valid_id)) { + printk(KERN_ERR "Unable to do set ALUA access status on non" + " valid tg_pt_gp ID: %hu\n", + tg_pt_gp->tg_pt_gp_valid_id); + return -EINVAL; + } + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract new ALUA access status" + " from %s\n", page); + return -EINVAL; + } + new_status = (int)tmp; + + if ((new_status != ALUA_STATUS_NONE) && + (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && + (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n", + new_status); + return -EINVAL; + } + + tg_pt_gp->tg_pt_gp_alua_access_status = new_status; + return count; +} + +SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR); + +/* + * alua_access_type + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return core_alua_show_access_type(tg_pt_gp, page); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + return core_alua_store_access_type(tg_pt_gp, page, count); +} + +SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); + +/* + * alua_write_metadata + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract alua_write_metadata\n"); + return -EINVAL; + } + + if ((tmp != 0) && (tmp != 1)) { + printk(KERN_ERR "Illegal value for alua_write_metadata:" + " %lu\n", tmp); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; + + return count; +} + +SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR); + + + +/* + * nonop_delay_msecs + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return core_alua_show_nonop_delay_msecs(tg_pt_gp, page); + +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count); +} + +SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR); + +/* + * trans_delay_msecs + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return core_alua_show_trans_delay_msecs(tg_pt_gp, page); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count); +} + +SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); + +/* + * preferred + */ + +static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return core_alua_show_preferred_bit(tg_pt_gp, page); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + return core_alua_store_preferred_bit(tg_pt_gp, page, count); +} + +SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR); + +/* + * tg_pt_gp_id + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + if (!(tg_pt_gp->tg_pt_gp_valid_id)) + return 0; + + return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; + unsigned long tg_pt_gp_id; + int ret; + + ret = strict_strtoul(page, 0, &tg_pt_gp_id); + if (ret < 0) { + printk(KERN_ERR "strict_strtoul() returned %d for" + " tg_pt_gp_id\n", ret); + return -EINVAL; + } + if (tg_pt_gp_id > 0x0000ffff) { + printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:" + " 0x0000ffff\n", tg_pt_gp_id); + return -EINVAL; + } + + ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); + if (ret < 0) + return -EINVAL; + + printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: " + "core/alua/tg_pt_gps/%s to ID: %hu\n", + config_item_name(&alua_tg_pt_gp_cg->cg_item), + tg_pt_gp->tg_pt_gp_id); + + return count; +} + +SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR); + +/* + * members + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_members( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + struct se_port *port; + struct se_portal_group *tpg; + struct se_lun *lun; + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; + ssize_t len = 0, cur_len; + unsigned char buf[TG_PT_GROUP_NAME_BUF]; + + memset(buf, 0, TG_PT_GROUP_NAME_BUF); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, + tg_pt_gp_mem_list) { + port = tg_pt_gp_mem->tg_pt; + tpg = port->sep_tpg; + lun = port->sep_lun; + + cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" + "/%s\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_wwn(tpg), + TPG_TFO(tpg)->tpg_get_tag(tpg), + config_item_name(&lun->lun_group.cg_item)); + cur_len++; /* Extra byte for NULL terminator */ + + if ((cur_len + len) > PAGE_SIZE) { + printk(KERN_WARNING "Ran out of lu_gp_show_attr" + "_members buffer\n"); + break; + } + memcpy(page+len, buf, cur_len); + len += cur_len; + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + + return len; +} + +SE_DEV_ALUA_TG_PT_ATTR_RO(members); + +CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp, + tg_pt_gp_group); + +static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { + &target_core_alua_tg_pt_gp_alua_access_state.attr, + &target_core_alua_tg_pt_gp_alua_access_status.attr, + &target_core_alua_tg_pt_gp_alua_access_type.attr, + &target_core_alua_tg_pt_gp_alua_write_metadata.attr, + &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, + &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, + &target_core_alua_tg_pt_gp_preferred.attr, + &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, + &target_core_alua_tg_pt_gp_members.attr, + NULL, +}; + +static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { + .show_attribute = target_core_alua_tg_pt_gp_attr_show, + .store_attribute = target_core_alua_tg_pt_gp_attr_store, +}; + +static struct config_item_type target_core_alua_tg_pt_gp_cit = { + .ct_item_ops = &target_core_alua_tg_pt_gp_ops, + .ct_attrs = target_core_alua_tg_pt_gp_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ + +/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ + +static struct config_group *target_core_alua_create_tg_pt_gp( + struct config_group *group, + const char *name) +{ + struct t10_alua *alua = container_of(group, struct t10_alua, + alua_tg_pt_gps_group); + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct se_subsystem_dev *su_dev = alua->t10_sub_dev; + struct config_group *alua_tg_pt_gp_cg = NULL; + struct config_item *alua_tg_pt_gp_ci = NULL; + + tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); + if (!(tg_pt_gp)) + return NULL; + + alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; + alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; + + config_group_init_type_name(alua_tg_pt_gp_cg, name, + &target_core_alua_tg_pt_gp_cit); + + printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port" + " Group: alua/tg_pt_gps/%s\n", + config_item_name(alua_tg_pt_gp_ci)); + + return alua_tg_pt_gp_cg; +} + +static void target_core_alua_drop_tg_pt_gp( + struct config_group *group, + struct config_item *item) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), + struct t10_alua_tg_pt_gp, tg_pt_gp_group); + + printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" + " Group: alua/tg_pt_gps/%s, ID: %hu\n", + config_item_name(item), tg_pt_gp->tg_pt_gp_id); + + config_item_put(item); + core_alua_free_tg_pt_gp(tg_pt_gp); +} + +static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { + .make_group = &target_core_alua_create_tg_pt_gp, + .drop_item = &target_core_alua_drop_tg_pt_gp, +}; + +static struct config_item_type target_core_alua_tg_pt_gps_cit = { + .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ + +/* Start functions for struct config_item_type target_core_alua_cit */ + +/* + * target_core_alua_cit is a ConfigFS group that lives under + * /sys/kernel/config/target/core/alua. There are default groups + * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to + * target_core_alua_cit in target_core_init_configfs() below. + */ +static struct config_item_type target_core_alua_cit = { + .ct_item_ops = NULL, + .ct_attrs = NULL, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_cit */ + +/* Start functions for struct config_item_type target_core_hba_cit */ + +static struct config_group *target_core_make_subdev( + struct config_group *group, + const char *name) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct se_subsystem_dev *se_dev; + struct se_subsystem_api *t; + struct config_item *hba_ci = &group->cg_item; + struct se_hba *hba = item_to_hba(hba_ci); + struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; + + if (mutex_lock_interruptible(&hba->hba_access_mutex)) + return NULL; + + /* + * Locate the struct se_subsystem_api from parent's struct se_hba. + */ + t = hba->transport; + + se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); + if (!se_dev) { + printk(KERN_ERR "Unable to allocate memory for" + " struct se_subsystem_dev\n"); + goto unlock; + } + INIT_LIST_HEAD(&se_dev->g_se_dev_list); + INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); + spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); + INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); + INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); + spin_lock_init(&se_dev->t10_reservation.registration_lock); + spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); + INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); + spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); + spin_lock_init(&se_dev->se_dev_lock); + se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; + se_dev->t10_wwn.t10_sub_dev = se_dev; + se_dev->t10_alua.t10_sub_dev = se_dev; + se_dev->se_dev_attrib.da_sub_dev = se_dev; + + se_dev->se_dev_hba = hba; + dev_cg = &se_dev->se_dev_group; + + dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, + GFP_KERNEL); + if (!(dev_cg->default_groups)) + goto out; + /* + * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr + * for ->allocate_virtdevice() + * + * se_dev->se_dev_ptr will be set after ->create_virtdev() + * has been called successfully in the next level up in the + * configfs tree for device object's struct config_group. + */ + se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); + if (!(se_dev->se_dev_su_ptr)) { + printk(KERN_ERR "Unable to locate subsystem dependent pointer" + " from allocate_virtdevice()\n"); + goto out; + } + spin_lock(&se_global->g_device_lock); + list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list); + spin_unlock(&se_global->g_device_lock); + + config_group_init_type_name(&se_dev->se_dev_group, name, + &target_core_dev_cit); + config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib", + &target_core_dev_attrib_cit); + config_group_init_type_name(&se_dev->se_dev_pr_group, "pr", + &target_core_dev_pr_cit); + config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn", + &target_core_dev_wwn_cit); + config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, + "alua", &target_core_alua_tg_pt_gps_cit); + dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; + dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; + dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; + dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; + dev_cg->default_groups[4] = NULL; + /* + * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp + */ + tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); + if (!(tg_pt_gp)) + goto out; + + tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; + tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, + GFP_KERNEL); + if (!(tg_pt_gp_cg->default_groups)) { + printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->" + "default_groups\n"); + goto out; + } + + config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, + "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); + tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; + tg_pt_gp_cg->default_groups[1] = NULL; + T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; + + printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" + " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); + + mutex_unlock(&hba->hba_access_mutex); + return &se_dev->se_dev_group; +out: + if (T10_ALUA(se_dev)->default_tg_pt_gp) { + core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); + T10_ALUA(se_dev)->default_tg_pt_gp = NULL; + } + if (tg_pt_gp_cg) + kfree(tg_pt_gp_cg->default_groups); + if (dev_cg) + kfree(dev_cg->default_groups); + if (se_dev->se_dev_su_ptr) + t->free_device(se_dev->se_dev_su_ptr); + kfree(se_dev); +unlock: + mutex_unlock(&hba->hba_access_mutex); + return NULL; +} + +static void target_core_drop_subdev( + struct config_group *group, + struct config_item *item) +{ + struct se_subsystem_dev *se_dev = container_of(to_config_group(item), + struct se_subsystem_dev, se_dev_group); + struct se_hba *hba; + struct se_subsystem_api *t; + struct config_item *df_item; + struct config_group *dev_cg, *tg_pt_gp_cg; + int i, ret; + + hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); + + if (mutex_lock_interruptible(&hba->hba_access_mutex)) + goto out; + + t = hba->transport; + + spin_lock(&se_global->g_device_lock); + list_del(&se_dev->g_se_dev_list); + spin_unlock(&se_global->g_device_lock); + + tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; + for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { + df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; + tg_pt_gp_cg->default_groups[i] = NULL; + config_item_put(df_item); + } + kfree(tg_pt_gp_cg->default_groups); + core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); + T10_ALUA(se_dev)->default_tg_pt_gp = NULL; + + dev_cg = &se_dev->se_dev_group; + for (i = 0; dev_cg->default_groups[i]; i++) { + df_item = &dev_cg->default_groups[i]->cg_item; + dev_cg->default_groups[i] = NULL; + config_item_put(df_item); + } + + config_item_put(item); + /* + * This pointer will set when the storage is enabled with: + * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` + */ + if (se_dev->se_dev_ptr) { + printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" + "virtual_device() for se_dev_ptr: %p\n", + se_dev->se_dev_ptr); + + ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); + if (ret < 0) + goto hba_out; + } else { + /* + * Release struct se_subsystem_dev->se_dev_su_ptr.. + */ + printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" + "device() for se_dev_su_ptr: %p\n", + se_dev->se_dev_su_ptr); + + t->free_device(se_dev->se_dev_su_ptr); + } + + printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" + "_dev_t: %p\n", se_dev); + +hba_out: + mutex_unlock(&hba->hba_access_mutex); +out: + kfree(se_dev); +} + +static struct configfs_group_operations target_core_hba_group_ops = { + .make_group = target_core_make_subdev, + .drop_item = target_core_drop_subdev, +}; + +CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba); +#define SE_HBA_ATTR(_name, _mode) \ +static struct target_core_hba_attribute \ + target_core_hba_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_core_hba_show_attr_##_name, \ + target_core_hba_store_attr_##_name); + +#define SE_HBA_ATTR_RO(_name) \ +static struct target_core_hba_attribute \ + target_core_hba_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + target_core_hba_show_attr_##_name); + +static ssize_t target_core_hba_show_attr_hba_info( + struct se_hba *hba, + char *page) +{ + return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", + hba->hba_id, hba->transport->name, + TARGET_CORE_CONFIGFS_VERSION); +} + +SE_HBA_ATTR_RO(hba_info); + +static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba, + char *page) +{ + int hba_mode = 0; + + if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) + hba_mode = 1; + + return sprintf(page, "%d\n", hba_mode); +} + +static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, + const char *page, size_t count) +{ + struct se_subsystem_api *transport = hba->transport; + unsigned long mode_flag; + int ret; + + if (transport->pmode_enable_hba == NULL) + return -EINVAL; + + ret = strict_strtoul(page, 0, &mode_flag); + if (ret < 0) { + printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret); + return -EINVAL; + } + + spin_lock(&hba->device_lock); + if (!(list_empty(&hba->hba_dev_list))) { + printk(KERN_ERR "Unable to set hba_mode with active devices\n"); + spin_unlock(&hba->device_lock); + return -EINVAL; + } + spin_unlock(&hba->device_lock); + + ret = transport->pmode_enable_hba(hba, mode_flag); + if (ret < 0) + return -EINVAL; + if (ret > 0) + hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; + else if (ret == 0) + hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; + + return count; +} + +SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); + +CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); + +static struct configfs_attribute *target_core_hba_attrs[] = { + &target_core_hba_hba_info.attr, + &target_core_hba_hba_mode.attr, + NULL, +}; + +static struct configfs_item_operations target_core_hba_item_ops = { + .show_attribute = target_core_hba_attr_show, + .store_attribute = target_core_hba_attr_store, +}; + +static struct config_item_type target_core_hba_cit = { + .ct_item_ops = &target_core_hba_item_ops, + .ct_group_ops = &target_core_hba_group_ops, + .ct_attrs = target_core_hba_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *target_core_call_addhbatotarget( + struct config_group *group, + const char *name) +{ + char *se_plugin_str, *str, *str2; + struct se_hba *hba; + char buf[TARGET_CORE_NAME_MAX_LEN]; + unsigned long plugin_dep_id = 0; + int ret; + + memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); + if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) { + printk(KERN_ERR "Passed *name strlen(): %d exceeds" + " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), + TARGET_CORE_NAME_MAX_LEN); + return ERR_PTR(-ENAMETOOLONG); + } + snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); + + str = strstr(buf, "_"); + if (!(str)) { + printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); + return ERR_PTR(-EINVAL); + } + se_plugin_str = buf; + /* + * Special case for subsystem plugins that have "_" in their names. + * Namely rd_direct and rd_mcp.. + */ + str2 = strstr(str+1, "_"); + if ((str2)) { + *str2 = '\0'; /* Terminate for *se_plugin_str */ + str2++; /* Skip to start of plugin dependent ID */ + str = str2; + } else { + *str = '\0'; /* Terminate for *se_plugin_str */ + str++; /* Skip to start of plugin dependent ID */ + } + + ret = strict_strtoul(str, 0, &plugin_dep_id); + if (ret < 0) { + printk(KERN_ERR "strict_strtoul() returned %d for" + " plugin_dep_id\n", ret); + return ERR_PTR(-EINVAL); + } + /* + * Load up TCM subsystem plugins if they have not already been loaded. + */ + if (transport_subsystem_check_init() < 0) + return ERR_PTR(-EINVAL); + + hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); + if (IS_ERR(hba)) + return ERR_CAST(hba); + + config_group_init_type_name(&hba->hba_group, name, + &target_core_hba_cit); + + return &hba->hba_group; +} + +static void target_core_call_delhbafromtarget( + struct config_group *group, + struct config_item *item) +{ + struct se_hba *hba = item_to_hba(item); + + config_item_put(item); + core_delete_hba(hba); +} + +static struct configfs_group_operations target_core_group_ops = { + .make_group = target_core_call_addhbatotarget, + .drop_item = target_core_call_delhbafromtarget, +}; + +static struct config_item_type target_core_cit = { + .ct_item_ops = NULL, + .ct_group_ops = &target_core_group_ops, + .ct_attrs = NULL, + .ct_owner = THIS_MODULE, +}; + +/* Stop functions for struct config_item_type target_core_hba_cit */ + +static int target_core_init_configfs(void) +{ + struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; + struct config_group *lu_gp_cg = NULL; + struct configfs_subsystem *subsys; + struct proc_dir_entry *scsi_target_proc = NULL; + struct t10_alua_lu_gp *lu_gp; + int ret; + + printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage" + " Engine: %s on %s/%s on "UTS_RELEASE"\n", + TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); + + subsys = target_core_subsystem[0]; + config_group_init(&subsys->su_group); + mutex_init(&subsys->su_mutex); + + INIT_LIST_HEAD(&g_tf_list); + mutex_init(&g_tf_lock); + init_scsi_index_table(); + ret = init_se_global(); + if (ret < 0) + return -1; + /* + * Create $CONFIGFS/target/core default group for HBA <-> Storage Object + * and ALUA Logical Unit Group and Target Port Group infrastructure. + */ + target_cg = &subsys->su_group; + target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, + GFP_KERNEL); + if (!(target_cg->default_groups)) { + printk(KERN_ERR "Unable to allocate target_cg->default_groups\n"); + goto out_global; + } + + config_group_init_type_name(&se_global->target_core_hbagroup, + "core", &target_core_cit); + target_cg->default_groups[0] = &se_global->target_core_hbagroup; + target_cg->default_groups[1] = NULL; + /* + * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ + */ + hba_cg = &se_global->target_core_hbagroup; + hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, + GFP_KERNEL); + if (!(hba_cg->default_groups)) { + printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); + goto out_global; + } + config_group_init_type_name(&se_global->alua_group, + "alua", &target_core_alua_cit); + hba_cg->default_groups[0] = &se_global->alua_group; + hba_cg->default_groups[1] = NULL; + /* + * Add ALUA Logical Unit Group and Target Port Group ConfigFS + * groups under /sys/kernel/config/target/core/alua/ + */ + alua_cg = &se_global->alua_group; + alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, + GFP_KERNEL); + if (!(alua_cg->default_groups)) { + printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n"); + goto out_global; + } + + config_group_init_type_name(&se_global->alua_lu_gps_group, + "lu_gps", &target_core_alua_lu_gps_cit); + alua_cg->default_groups[0] = &se_global->alua_lu_gps_group; + alua_cg->default_groups[1] = NULL; + /* + * Add core/alua/lu_gps/default_lu_gp + */ + lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); + if (IS_ERR(lu_gp)) + goto out_global; + + lu_gp_cg = &se_global->alua_lu_gps_group; + lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, + GFP_KERNEL); + if (!(lu_gp_cg->default_groups)) { + printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n"); + goto out_global; + } + + config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", + &target_core_alua_lu_gp_cit); + lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; + lu_gp_cg->default_groups[1] = NULL; + se_global->default_lu_gp = lu_gp; + /* + * Register the target_core_mod subsystem with configfs. + */ + ret = configfs_register_subsystem(subsys); + if (ret < 0) { + printk(KERN_ERR "Error %d while registering subsystem %s\n", + ret, subsys->su_group.cg_item.ci_namebuf); + goto out_global; + } + printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric" + " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" + " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); + /* + * Register built-in RAMDISK subsystem logic for virtual LUN 0 + */ + ret = rd_module_init(); + if (ret < 0) + goto out; + + if (core_dev_setup_virtual_lun0() < 0) + goto out; + + scsi_target_proc = proc_mkdir("scsi_target", 0); + if (!(scsi_target_proc)) { + printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); + goto out; + } + ret = init_scsi_target_mib(); + if (ret < 0) + goto out; + + return 0; + +out: + configfs_unregister_subsystem(subsys); + if (scsi_target_proc) + remove_proc_entry("scsi_target", 0); + core_dev_release_virtual_lun0(); + rd_module_exit(); +out_global: + if (se_global->default_lu_gp) { + core_alua_free_lu_gp(se_global->default_lu_gp); + se_global->default_lu_gp = NULL; + } + if (lu_gp_cg) + kfree(lu_gp_cg->default_groups); + if (alua_cg) + kfree(alua_cg->default_groups); + if (hba_cg) + kfree(hba_cg->default_groups); + kfree(target_cg->default_groups); + release_se_global(); + return -1; +} + +static void target_core_exit_configfs(void) +{ + struct configfs_subsystem *subsys; + struct config_group *hba_cg, *alua_cg, *lu_gp_cg; + struct config_item *item; + int i; + + se_global->in_shutdown = 1; + subsys = target_core_subsystem[0]; + + lu_gp_cg = &se_global->alua_lu_gps_group; + for (i = 0; lu_gp_cg->default_groups[i]; i++) { + item = &lu_gp_cg->default_groups[i]->cg_item; + lu_gp_cg->default_groups[i] = NULL; + config_item_put(item); + } + kfree(lu_gp_cg->default_groups); + core_alua_free_lu_gp(se_global->default_lu_gp); + se_global->default_lu_gp = NULL; + + alua_cg = &se_global->alua_group; + for (i = 0; alua_cg->default_groups[i]; i++) { + item = &alua_cg->default_groups[i]->cg_item; + alua_cg->default_groups[i] = NULL; + config_item_put(item); + } + kfree(alua_cg->default_groups); + + hba_cg = &se_global->target_core_hbagroup; + for (i = 0; hba_cg->default_groups[i]; i++) { + item = &hba_cg->default_groups[i]->cg_item; + hba_cg->default_groups[i] = NULL; + config_item_put(item); + } + kfree(hba_cg->default_groups); + + for (i = 0; subsys->su_group.default_groups[i]; i++) { + item = &subsys->su_group.default_groups[i]->cg_item; + subsys->su_group.default_groups[i] = NULL; + config_item_put(item); + } + kfree(subsys->su_group.default_groups); + + configfs_unregister_subsystem(subsys); + printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" + " Infrastructure\n"); + + remove_scsi_target_mib(); + remove_proc_entry("scsi_target", 0); + core_dev_release_virtual_lun0(); + rd_module_exit(); + release_se_global(); + + return; +} + +MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(target_core_init_configfs); +module_exit(target_core_exit_configfs); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c new file mode 100644 index 000000000000..317ce58d426d --- /dev/null +++ b/drivers/target/target_core_device.c @@ -0,0 +1,1694 @@ +/******************************************************************************* + * Filename: target_core_device.c (based on iscsi_target_device.c) + * + * This file contains the iSCSI Virtual Device and Disk Transport + * agnostic related functions. + * + * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "target_core_alua.h" +#include "target_core_hba.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +static void se_dev_start(struct se_device *dev); +static void se_dev_stop(struct se_device *dev); + +int transport_get_lun_for_cmd( + struct se_cmd *se_cmd, + unsigned char *cdb, + u32 unpacked_lun) +{ + struct se_dev_entry *deve; + struct se_lun *se_lun = NULL; + struct se_session *se_sess = SE_SESS(se_cmd); + unsigned long flags; + int read_only = 0; + + spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + deve = se_cmd->se_deve = + &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; + if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { + if (se_cmd) { + deve->total_cmds++; + deve->total_bytes += se_cmd->data_length; + + if (se_cmd->data_direction == DMA_TO_DEVICE) { + if (deve->lun_flags & + TRANSPORT_LUNFLAGS_READ_ONLY) { + read_only = 1; + goto out; + } + deve->write_bytes += se_cmd->data_length; + } else if (se_cmd->data_direction == + DMA_FROM_DEVICE) { + deve->read_bytes += se_cmd->data_length; + } + } + deve->deve_cmds++; + + se_lun = se_cmd->se_lun = deve->se_lun; + se_cmd->pr_res_key = deve->pr_res_key; + se_cmd->orig_fe_lun = unpacked_lun; + se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + } +out: + spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + + if (!se_lun) { + if (read_only) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + " Access for 0x%08x\n", + CMD_TFO(se_cmd)->get_fabric_name(), + unpacked_lun); + return -1; + } else { + /* + * Use the se_portal_group->tpg_virt_lun0 to allow for + * REPORT_LUNS, et al to be returned when no active + * MappedLUN=0 exists for this Initiator Port. + */ + if (unpacked_lun != 0) { + se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" + " Access for 0x%08x\n", + CMD_TFO(se_cmd)->get_fabric_name(), + unpacked_lun); + return -1; + } + /* + * Force WRITE PROTECT for virtual LUN 0 + */ + if ((se_cmd->data_direction != DMA_FROM_DEVICE) && + (se_cmd->data_direction != DMA_NONE)) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + return -1; + } +#if 0 + printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", + CMD_TFO(se_cmd)->get_fabric_name()); +#endif + se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; + se_cmd->orig_fe_lun = 0; + se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + } + } + /* + * Determine if the struct se_lun is online. + */ +/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ + if (se_dev_check_online(se_lun->lun_se_dev) != 0) { + se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + return -1; + } + + { + struct se_device *dev = se_lun->lun_se_dev; + spin_lock(&dev->stats_lock); + dev->num_cmds++; + if (se_cmd->data_direction == DMA_TO_DEVICE) + dev->write_bytes += se_cmd->data_length; + else if (se_cmd->data_direction == DMA_FROM_DEVICE) + dev->read_bytes += se_cmd->data_length; + spin_unlock(&dev->stats_lock); + } + + /* + * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used + * for tracking state of struct se_cmds during LUN shutdown events. + */ + spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); + list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); + atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); +#if 0 + printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", + CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); +#endif + spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); + + return 0; +} +EXPORT_SYMBOL(transport_get_lun_for_cmd); + +int transport_get_lun_for_tmr( + struct se_cmd *se_cmd, + u32 unpacked_lun) +{ + struct se_device *dev = NULL; + struct se_dev_entry *deve; + struct se_lun *se_lun = NULL; + struct se_session *se_sess = SE_SESS(se_cmd); + struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; + + spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + deve = se_cmd->se_deve = + &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; + if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { + se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; + dev = se_tmr->tmr_dev = se_lun->lun_se_dev; + se_cmd->pr_res_key = deve->pr_res_key; + se_cmd->orig_fe_lun = unpacked_lun; + se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; +/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ + } + spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + + if (!se_lun) { + printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" + " Access for 0x%08x\n", + CMD_TFO(se_cmd)->get_fabric_name(), + unpacked_lun); + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + return -1; + } + /* + * Determine if the struct se_lun is online. + */ +/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ + if (se_dev_check_online(se_lun->lun_se_dev) != 0) { + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + return -1; + } + + spin_lock(&dev->se_tmr_lock); + list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); + spin_unlock(&dev->se_tmr_lock); + + return 0; +} +EXPORT_SYMBOL(transport_get_lun_for_tmr); + +/* + * This function is called from core_scsi3_emulate_pro_register_and_move() + * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count + * when a matching rtpi is found. + */ +struct se_dev_entry *core_get_se_deve_from_rtpi( + struct se_node_acl *nacl, + u16 rtpi) +{ + struct se_dev_entry *deve; + struct se_lun *lun; + struct se_port *port; + struct se_portal_group *tpg = nacl->se_tpg; + u32 i; + + spin_lock_irq(&nacl->device_list_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + deve = &nacl->device_list[i]; + + if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) + continue; + + lun = deve->se_lun; + if (!(lun)) { + printk(KERN_ERR "%s device entries device pointer is" + " NULL, but Initiator has access.\n", + TPG_TFO(tpg)->get_fabric_name()); + continue; + } + port = lun->lun_sep; + if (!(port)) { + printk(KERN_ERR "%s device entries device pointer is" + " NULL, but Initiator has access.\n", + TPG_TFO(tpg)->get_fabric_name()); + continue; + } + if (port->sep_rtpi != rtpi) + continue; + + atomic_inc(&deve->pr_ref_count); + smp_mb__after_atomic_inc(); + spin_unlock_irq(&nacl->device_list_lock); + + return deve; + } + spin_unlock_irq(&nacl->device_list_lock); + + return NULL; +} + +int core_free_device_list_for_node( + struct se_node_acl *nacl, + struct se_portal_group *tpg) +{ + struct se_dev_entry *deve; + struct se_lun *lun; + u32 i; + + if (!nacl->device_list) + return 0; + + spin_lock_irq(&nacl->device_list_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + deve = &nacl->device_list[i]; + + if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) + continue; + + if (!deve->se_lun) { + printk(KERN_ERR "%s device entries device pointer is" + " NULL, but Initiator has access.\n", + TPG_TFO(tpg)->get_fabric_name()); + continue; + } + lun = deve->se_lun; + + spin_unlock_irq(&nacl->device_list_lock); + core_update_device_list_for_node(lun, NULL, deve->mapped_lun, + TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); + spin_lock_irq(&nacl->device_list_lock); + } + spin_unlock_irq(&nacl->device_list_lock); + + kfree(nacl->device_list); + nacl->device_list = NULL; + + return 0; +} + +void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) +{ + struct se_dev_entry *deve; + + spin_lock_irq(&se_nacl->device_list_lock); + deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; + deve->deve_cmds--; + spin_unlock_irq(&se_nacl->device_list_lock); + + return; +} + +void core_update_device_list_access( + u32 mapped_lun, + u32 lun_access, + struct se_node_acl *nacl) +{ + struct se_dev_entry *deve; + + spin_lock_irq(&nacl->device_list_lock); + deve = &nacl->device_list[mapped_lun]; + if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { + deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; + deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; + } else { + deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; + deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; + } + spin_unlock_irq(&nacl->device_list_lock); + + return; +} + +/* core_update_device_list_for_node(): + * + * + */ +int core_update_device_list_for_node( + struct se_lun *lun, + struct se_lun_acl *lun_acl, + u32 mapped_lun, + u32 lun_access, + struct se_node_acl *nacl, + struct se_portal_group *tpg, + int enable) +{ + struct se_port *port = lun->lun_sep; + struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; + int trans = 0; + /* + * If the MappedLUN entry is being disabled, the entry in + * port->sep_alua_list must be removed now before clearing the + * struct se_dev_entry pointers below as logic in + * core_alua_do_transition_tg_pt() depends on these being present. + */ + if (!(enable)) { + /* + * deve->se_lun_acl will be NULL for demo-mode created LUNs + * that have not been explictly concerted to MappedLUNs -> + * struct se_lun_acl. + */ + if (!(deve->se_lun_acl)) + return 0; + + spin_lock_bh(&port->sep_alua_lock); + list_del(&deve->alua_port_list); + spin_unlock_bh(&port->sep_alua_lock); + } + + spin_lock_irq(&nacl->device_list_lock); + if (enable) { + /* + * Check if the call is handling demo mode -> explict LUN ACL + * transition. This transition must be for the same struct se_lun + * + mapped_lun that was setup in demo mode.. + */ + if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { + if (deve->se_lun_acl != NULL) { + printk(KERN_ERR "struct se_dev_entry->se_lun_acl" + " already set for demo mode -> explict" + " LUN ACL transition\n"); + return -1; + } + if (deve->se_lun != lun) { + printk(KERN_ERR "struct se_dev_entry->se_lun does" + " match passed struct se_lun for demo mode" + " -> explict LUN ACL transition\n"); + return -1; + } + deve->se_lun_acl = lun_acl; + trans = 1; + } else { + deve->se_lun = lun; + deve->se_lun_acl = lun_acl; + deve->mapped_lun = mapped_lun; + deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; + } + + if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { + deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; + deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; + } else { + deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; + deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; + } + + if (trans) { + spin_unlock_irq(&nacl->device_list_lock); + return 0; + } + deve->creation_time = get_jiffies_64(); + deve->attach_count++; + spin_unlock_irq(&nacl->device_list_lock); + + spin_lock_bh(&port->sep_alua_lock); + list_add_tail(&deve->alua_port_list, &port->sep_alua_list); + spin_unlock_bh(&port->sep_alua_lock); + + return 0; + } + /* + * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE + * PR operation to complete. + */ + spin_unlock_irq(&nacl->device_list_lock); + while (atomic_read(&deve->pr_ref_count) != 0) + cpu_relax(); + spin_lock_irq(&nacl->device_list_lock); + /* + * Disable struct se_dev_entry LUN ACL mapping + */ + core_scsi3_ua_release_all(deve); + deve->se_lun = NULL; + deve->se_lun_acl = NULL; + deve->lun_flags = 0; + deve->creation_time = 0; + deve->attach_count--; + spin_unlock_irq(&nacl->device_list_lock); + + core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); + return 0; +} + +/* core_clear_lun_from_tpg(): + * + * + */ +void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) +{ + struct se_node_acl *nacl; + struct se_dev_entry *deve; + u32 i; + + spin_lock_bh(&tpg->acl_node_lock); + list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { + spin_unlock_bh(&tpg->acl_node_lock); + + spin_lock_irq(&nacl->device_list_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + deve = &nacl->device_list[i]; + if (lun != deve->se_lun) + continue; + spin_unlock_irq(&nacl->device_list_lock); + + core_update_device_list_for_node(lun, NULL, + deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, + nacl, tpg, 0); + + spin_lock_irq(&nacl->device_list_lock); + } + spin_unlock_irq(&nacl->device_list_lock); + + spin_lock_bh(&tpg->acl_node_lock); + } + spin_unlock_bh(&tpg->acl_node_lock); + + return; +} + +static struct se_port *core_alloc_port(struct se_device *dev) +{ + struct se_port *port, *port_tmp; + + port = kzalloc(sizeof(struct se_port), GFP_KERNEL); + if (!(port)) { + printk(KERN_ERR "Unable to allocate struct se_port\n"); + return NULL; + } + INIT_LIST_HEAD(&port->sep_alua_list); + INIT_LIST_HEAD(&port->sep_list); + atomic_set(&port->sep_tg_pt_secondary_offline, 0); + spin_lock_init(&port->sep_alua_lock); + mutex_init(&port->sep_tg_pt_md_mutex); + + spin_lock(&dev->se_port_lock); + if (dev->dev_port_count == 0x0000ffff) { + printk(KERN_WARNING "Reached dev->dev_port_count ==" + " 0x0000ffff\n"); + spin_unlock(&dev->se_port_lock); + return NULL; + } +again: + /* + * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device + * Here is the table from spc4r17 section 7.7.3.8. + * + * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field + * + * Code Description + * 0h Reserved + * 1h Relative port 1, historically known as port A + * 2h Relative port 2, historically known as port B + * 3h to FFFFh Relative port 3 through 65 535 + */ + port->sep_rtpi = dev->dev_rpti_counter++; + if (!(port->sep_rtpi)) + goto again; + + list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { + /* + * Make sure RELATIVE TARGET PORT IDENTIFER is unique + * for 16-bit wrap.. + */ + if (port->sep_rtpi == port_tmp->sep_rtpi) + goto again; + } + spin_unlock(&dev->se_port_lock); + + return port; +} + +static void core_export_port( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_port *port, + struct se_lun *lun) +{ + struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; + + spin_lock(&dev->se_port_lock); + spin_lock(&lun->lun_sep_lock); + port->sep_tpg = tpg; + port->sep_lun = lun; + lun->lun_sep = port; + spin_unlock(&lun->lun_sep_lock); + + list_add_tail(&port->sep_list, &dev->dev_sep_list); + spin_unlock(&dev->se_port_lock); + + if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { + tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); + if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { + printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" + "_gp_member_t\n"); + return; + } + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, + T10_ALUA(su_dev)->default_tg_pt_gp); + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" + " Group: alua/default_tg_pt_gp\n", + TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); + } + + dev->dev_port_count++; + port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ +} + +/* + * Called with struct se_device->se_port_lock spinlock held. + */ +static void core_release_port(struct se_device *dev, struct se_port *port) +{ + /* + * Wait for any port reference for PR ALL_TG_PT=1 operation + * to complete in __core_scsi3_alloc_registration() + */ + spin_unlock(&dev->se_port_lock); + if (atomic_read(&port->sep_tg_pt_ref_cnt)) + cpu_relax(); + spin_lock(&dev->se_port_lock); + + core_alua_free_tg_pt_gp_mem(port); + + list_del(&port->sep_list); + dev->dev_port_count--; + kfree(port); + + return; +} + +int core_dev_export( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_lun *lun) +{ + struct se_port *port; + + port = core_alloc_port(dev); + if (!(port)) + return -1; + + lun->lun_se_dev = dev; + se_dev_start(dev); + + atomic_inc(&dev->dev_export_obj.obj_access_count); + core_export_port(dev, tpg, port, lun); + return 0; +} + +void core_dev_unexport( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_lun *lun) +{ + struct se_port *port = lun->lun_sep; + + spin_lock(&lun->lun_sep_lock); + if (lun->lun_se_dev == NULL) { + spin_unlock(&lun->lun_sep_lock); + return; + } + spin_unlock(&lun->lun_sep_lock); + + spin_lock(&dev->se_port_lock); + atomic_dec(&dev->dev_export_obj.obj_access_count); + core_release_port(dev, port); + spin_unlock(&dev->se_port_lock); + + se_dev_stop(dev); + lun->lun_se_dev = NULL; +} + +int transport_core_report_lun_response(struct se_cmd *se_cmd) +{ + struct se_dev_entry *deve; + struct se_lun *se_lun; + struct se_session *se_sess = SE_SESS(se_cmd); + struct se_task *se_task; + unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; + u32 cdb_offset = 0, lun_count = 0, offset = 8; + u64 i, lun; + + list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) + break; + + if (!(se_task)) { + printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + + /* + * If no struct se_session pointer is present, this struct se_cmd is + * coming via a target_core_mod PASSTHROUGH op, and not through + * a $FABRIC_MOD. In that case, report LUN=0 only. + */ + if (!(se_sess)) { + lun = 0; + buf[offset++] = ((lun >> 56) & 0xff); + buf[offset++] = ((lun >> 48) & 0xff); + buf[offset++] = ((lun >> 40) & 0xff); + buf[offset++] = ((lun >> 32) & 0xff); + buf[offset++] = ((lun >> 24) & 0xff); + buf[offset++] = ((lun >> 16) & 0xff); + buf[offset++] = ((lun >> 8) & 0xff); + buf[offset++] = (lun & 0xff); + lun_count = 1; + goto done; + } + + spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + deve = &SE_NODE_ACL(se_sess)->device_list[i]; + if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) + continue; + se_lun = deve->se_lun; + /* + * We determine the correct LUN LIST LENGTH even once we + * have reached the initial allocation length. + * See SPC2-R20 7.19. + */ + lun_count++; + if ((cdb_offset + 8) >= se_cmd->data_length) + continue; + + lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); + buf[offset++] = ((lun >> 56) & 0xff); + buf[offset++] = ((lun >> 48) & 0xff); + buf[offset++] = ((lun >> 40) & 0xff); + buf[offset++] = ((lun >> 32) & 0xff); + buf[offset++] = ((lun >> 24) & 0xff); + buf[offset++] = ((lun >> 16) & 0xff); + buf[offset++] = ((lun >> 8) & 0xff); + buf[offset++] = (lun & 0xff); + cdb_offset += 8; + } + spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + + /* + * See SPC3 r07, page 159. + */ +done: + lun_count *= 8; + buf[0] = ((lun_count >> 24) & 0xff); + buf[1] = ((lun_count >> 16) & 0xff); + buf[2] = ((lun_count >> 8) & 0xff); + buf[3] = (lun_count & 0xff); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; +} + +/* se_release_device_for_hba(): + * + * + */ +void se_release_device_for_hba(struct se_device *dev) +{ + struct se_hba *hba = dev->se_hba; + + if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || + (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || + (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || + (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || + (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) + se_dev_stop(dev); + + if (dev->dev_ptr) { + kthread_stop(dev->process_thread); + if (dev->transport->free_device) + dev->transport->free_device(dev->dev_ptr); + } + + spin_lock(&hba->device_lock); + list_del(&dev->dev_list); + hba->dev_count--; + spin_unlock(&hba->device_lock); + + core_scsi3_free_all_registrations(dev); + se_release_vpd_for_dev(dev); + + kfree(dev->dev_status_queue_obj); + kfree(dev->dev_queue_obj); + kfree(dev); + + return; +} + +void se_release_vpd_for_dev(struct se_device *dev) +{ + struct t10_vpd *vpd, *vpd_tmp; + + spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); + list_for_each_entry_safe(vpd, vpd_tmp, + &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { + list_del(&vpd->vpd_list); + kfree(vpd); + } + spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); + + return; +} + +/* + * Called with struct se_hba->device_lock held. + */ +void se_clear_dev_ports(struct se_device *dev) +{ + struct se_hba *hba = dev->se_hba; + struct se_lun *lun; + struct se_portal_group *tpg; + struct se_port *sep, *sep_tmp; + + spin_lock(&dev->se_port_lock); + list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { + spin_unlock(&dev->se_port_lock); + spin_unlock(&hba->device_lock); + + lun = sep->sep_lun; + tpg = sep->sep_tpg; + spin_lock(&lun->lun_sep_lock); + if (lun->lun_se_dev == NULL) { + spin_unlock(&lun->lun_sep_lock); + continue; + } + spin_unlock(&lun->lun_sep_lock); + + core_dev_del_lun(tpg, lun->unpacked_lun); + + spin_lock(&hba->device_lock); + spin_lock(&dev->se_port_lock); + } + spin_unlock(&dev->se_port_lock); + + return; +} + +/* se_free_virtual_device(): + * + * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. + */ +int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) +{ + spin_lock(&hba->device_lock); + se_clear_dev_ports(dev); + spin_unlock(&hba->device_lock); + + core_alua_free_lu_gp_mem(dev); + se_release_device_for_hba(dev); + + return 0; +} + +static void se_dev_start(struct se_device *dev) +{ + struct se_hba *hba = dev->se_hba; + + spin_lock(&hba->device_lock); + atomic_inc(&dev->dev_obj.obj_access_count); + if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { + if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { + dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; + dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; + } else if (dev->dev_status & + TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { + dev->dev_status &= + ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; + dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; + } + } + spin_unlock(&hba->device_lock); +} + +static void se_dev_stop(struct se_device *dev) +{ + struct se_hba *hba = dev->se_hba; + + spin_lock(&hba->device_lock); + atomic_dec(&dev->dev_obj.obj_access_count); + if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { + if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { + dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; + dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; + } else if (dev->dev_status & + TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { + dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; + dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; + } + } + spin_unlock(&hba->device_lock); + + while (atomic_read(&hba->dev_mib_access_count)) + cpu_relax(); +} + +int se_dev_check_online(struct se_device *dev) +{ + int ret; + + spin_lock_irq(&dev->dev_status_lock); + ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || + (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; + spin_unlock_irq(&dev->dev_status_lock); + + return ret; +} + +int se_dev_check_shutdown(struct se_device *dev) +{ + int ret; + + spin_lock_irq(&dev->dev_status_lock); + ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); + spin_unlock_irq(&dev->dev_status_lock); + + return ret; +} + +void se_dev_set_default_attribs( + struct se_device *dev, + struct se_dev_limits *dev_limits) +{ + struct queue_limits *limits = &dev_limits->limits; + + DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; + DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; + DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; + DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; + DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; + DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; + DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; + DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; + DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; + DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; + DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + /* + * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK + * iblock_create_virtdevice() from struct queue_limits values + * if blk_queue_discard()==1 + */ + DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; + DEV_ATTRIB(dev)->max_unmap_block_desc_count = + DA_MAX_UNMAP_BLOCK_DESC_COUNT; + DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; + DEV_ATTRIB(dev)->unmap_granularity_alignment = + DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; + /* + * block_size is based on subsystem plugin dependent requirements. + */ + DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; + DEV_ATTRIB(dev)->block_size = limits->logical_block_size; + /* + * max_sectors is based on subsystem plugin dependent requirements. + */ + DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; + DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; + /* + * Set optimal_sectors from max_sectors, which can be lowered via + * configfs. + */ + DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; + /* + * queue_depth is based on subsystem plugin dependent requirements. + */ + DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; + DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; +} + +int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) +{ + if (task_timeout > DA_TASK_TIMEOUT_MAX) { + printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" + " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); + return -1; + } else { + DEV_ATTRIB(dev)->task_timeout = task_timeout; + printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", + dev, task_timeout); + } + + return 0; +} + +int se_dev_set_max_unmap_lba_count( + struct se_device *dev, + u32 max_unmap_lba_count) +{ + DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; + printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", + dev, DEV_ATTRIB(dev)->max_unmap_lba_count); + return 0; +} + +int se_dev_set_max_unmap_block_desc_count( + struct se_device *dev, + u32 max_unmap_block_desc_count) +{ + DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; + printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", + dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); + return 0; +} + +int se_dev_set_unmap_granularity( + struct se_device *dev, + u32 unmap_granularity) +{ + DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; + printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", + dev, DEV_ATTRIB(dev)->unmap_granularity); + return 0; +} + +int se_dev_set_unmap_granularity_alignment( + struct se_device *dev, + u32 unmap_granularity_alignment) +{ + DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; + printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", + dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); + return 0; +} + +int se_dev_set_emulate_dpo(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + if (TRANSPORT(dev)->dpo_emulated == NULL) { + printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); + return -1; + } + if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { + printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); + return -1; + } + DEV_ATTRIB(dev)->emulate_dpo = flag; + printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" + " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); + return 0; +} + +int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + if (TRANSPORT(dev)->fua_write_emulated == NULL) { + printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); + return -1; + } + if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { + printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); + return -1; + } + DEV_ATTRIB(dev)->emulate_fua_write = flag; + printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", + dev, DEV_ATTRIB(dev)->emulate_fua_write); + return 0; +} + +int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + if (TRANSPORT(dev)->fua_read_emulated == NULL) { + printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); + return -1; + } + if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { + printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); + return -1; + } + DEV_ATTRIB(dev)->emulate_fua_read = flag; + printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", + dev, DEV_ATTRIB(dev)->emulate_fua_read); + return 0; +} + +int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + if (TRANSPORT(dev)->write_cache_emulated == NULL) { + printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); + return -1; + } + if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { + printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); + return -1; + } + DEV_ATTRIB(dev)->emulate_write_cache = flag; + printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", + dev, DEV_ATTRIB(dev)->emulate_write_cache); + return 0; +} + +int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1) && (flag != 2)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_ERR "dev[%p]: Unable to change SE Device" + " UA_INTRLCK_CTRL while dev_export_obj: %d count" + " exists\n", dev, + atomic_read(&dev->dev_export_obj.obj_access_count)); + return -1; + } + DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; + printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", + dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); + + return 0; +} + +int se_dev_set_emulate_tas(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" + " dev_export_obj: %d count exists\n", dev, + atomic_read(&dev->dev_export_obj.obj_access_count)); + return -1; + } + DEV_ATTRIB(dev)->emulate_tas = flag; + printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", + dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); + + return 0; +} + +int se_dev_set_emulate_tpu(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + /* + * We expect this value to be non-zero when generic Block Layer + * Discard supported is detected iblock_create_virtdevice(). + */ + if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { + printk(KERN_ERR "Generic Block Discard not supported\n"); + return -ENOSYS; + } + + DEV_ATTRIB(dev)->emulate_tpu = flag; + printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", + dev, flag); + return 0; +} + +int se_dev_set_emulate_tpws(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + /* + * We expect this value to be non-zero when generic Block Layer + * Discard supported is detected iblock_create_virtdevice(). + */ + if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { + printk(KERN_ERR "Generic Block Discard not supported\n"); + return -ENOSYS; + } + + DEV_ATTRIB(dev)->emulate_tpws = flag; + printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", + dev, flag); + return 0; +} + +int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -1; + } + DEV_ATTRIB(dev)->enforce_pr_isids = flag; + printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, + (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); + return 0; +} + +/* + * Note, this can only be called on unexported SE Device Object. + */ +int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) +{ + u32 orig_queue_depth = dev->queue_depth; + + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" + " dev_export_obj: %d count exists\n", dev, + atomic_read(&dev->dev_export_obj.obj_access_count)); + return -1; + } + if (!(queue_depth)) { + printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" + "_depth\n", dev); + return -1; + } + + if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { + printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" + " exceeds TCM/SE_Device TCQ: %u\n", + dev, queue_depth, + DEV_ATTRIB(dev)->hw_queue_depth); + return -1; + } + } else { + if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { + if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { + printk(KERN_ERR "dev[%p]: Passed queue_depth:" + " %u exceeds TCM/SE_Device MAX" + " TCQ: %u\n", dev, queue_depth, + DEV_ATTRIB(dev)->hw_queue_depth); + return -1; + } + } + } + + DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; + if (queue_depth > orig_queue_depth) + atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); + else if (queue_depth < orig_queue_depth) + atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); + + printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", + dev, queue_depth); + return 0; +} + +int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) +{ + int force = 0; /* Force setting for VDEVS */ + + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_ERR "dev[%p]: Unable to change SE Device" + " max_sectors while dev_export_obj: %d count exists\n", + dev, atomic_read(&dev->dev_export_obj.obj_access_count)); + return -1; + } + if (!(max_sectors)) { + printk(KERN_ERR "dev[%p]: Illegal ZERO value for" + " max_sectors\n", dev); + return -1; + } + if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { + printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" + " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, + DA_STATUS_MAX_SECTORS_MIN); + return -1; + } + if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { + printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + " greater than TCM/SE_Device max_sectors:" + " %u\n", dev, max_sectors, + DEV_ATTRIB(dev)->hw_max_sectors); + return -1; + } + } else { + if (!(force) && (max_sectors > + DEV_ATTRIB(dev)->hw_max_sectors)) { + printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + " greater than TCM/SE_Device max_sectors" + ": %u, use force=1 to override.\n", dev, + max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); + return -1; + } + if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { + printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + " greater than DA_STATUS_MAX_SECTORS_MAX:" + " %u\n", dev, max_sectors, + DA_STATUS_MAX_SECTORS_MAX); + return -1; + } + } + + DEV_ATTRIB(dev)->max_sectors = max_sectors; + printk("dev[%p]: SE Device max_sectors changed to %u\n", + dev, max_sectors); + return 0; +} + +int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) +{ + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_ERR "dev[%p]: Unable to change SE Device" + " optimal_sectors while dev_export_obj: %d count exists\n", + dev, atomic_read(&dev->dev_export_obj.obj_access_count)); + return -EINVAL; + } + if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" + " changed for TCM/pSCSI\n", dev); + return -EINVAL; + } + if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { + printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" + " greater than max_sectors: %u\n", dev, + optimal_sectors, DEV_ATTRIB(dev)->max_sectors); + return -EINVAL; + } + + DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; + printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", + dev, optimal_sectors); + return 0; +} + +int se_dev_set_block_size(struct se_device *dev, u32 block_size) +{ + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { + printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" + " while dev_export_obj: %d count exists\n", dev, + atomic_read(&dev->dev_export_obj.obj_access_count)); + return -1; + } + + if ((block_size != 512) && + (block_size != 1024) && + (block_size != 2048) && + (block_size != 4096)) { + printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" + " for SE device, must be 512, 1024, 2048 or 4096\n", + dev, block_size); + return -1; + } + + if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" + " Physical Device, use for Linux/SCSI to change" + " block_size for underlying hardware\n", dev); + return -1; + } + + DEV_ATTRIB(dev)->block_size = block_size; + printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", + dev, block_size); + return 0; +} + +struct se_lun *core_dev_add_lun( + struct se_portal_group *tpg, + struct se_hba *hba, + struct se_device *dev, + u32 lun) +{ + struct se_lun *lun_p; + u32 lun_access = 0; + + if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { + printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", + atomic_read(&dev->dev_access_obj.obj_access_count)); + return NULL; + } + + lun_p = core_tpg_pre_addlun(tpg, lun); + if ((IS_ERR(lun_p)) || !(lun_p)) + return NULL; + + if (dev->dev_flags & DF_READ_ONLY) + lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; + else + lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; + + if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) + return NULL; + + printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" + " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, + TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); + /* + * Update LUN maps for dynamically added initiators when + * generate_node_acl is enabled. + */ + if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { + struct se_node_acl *acl; + spin_lock_bh(&tpg->acl_node_lock); + list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { + if (acl->dynamic_node_acl) { + spin_unlock_bh(&tpg->acl_node_lock); + core_tpg_add_node_to_devs(acl, tpg); + spin_lock_bh(&tpg->acl_node_lock); + } + } + spin_unlock_bh(&tpg->acl_node_lock); + } + + return lun_p; +} + +/* core_dev_del_lun(): + * + * + */ +int core_dev_del_lun( + struct se_portal_group *tpg, + u32 unpacked_lun) +{ + struct se_lun *lun; + int ret = 0; + + lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); + if (!(lun)) + return ret; + + core_tpg_post_dellun(tpg, lun); + + printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" + " device object\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, + TPG_TFO(tpg)->get_fabric_name()); + + return 0; +} + +struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) +{ + struct se_lun *lun; + + spin_lock(&tpg->tpg_lun_lock); + if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { + printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" + "_PER_TPG-1: %u for Target Portal Group: %hu\n", + TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + TRANSPORT_MAX_LUNS_PER_TPG-1, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock(&tpg->tpg_lun_lock); + return NULL; + } + lun = &tpg->tpg_lun_list[unpacked_lun]; + + if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { + printk(KERN_ERR "%s Logical Unit Number: %u is not free on" + " Target Portal Group: %hu, ignoring request.\n", + TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock(&tpg->tpg_lun_lock); + return NULL; + } + spin_unlock(&tpg->tpg_lun_lock); + + return lun; +} + +/* core_dev_get_lun(): + * + * + */ +static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) +{ + struct se_lun *lun; + + spin_lock(&tpg->tpg_lun_lock); + if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { + printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" + "_TPG-1: %u for Target Portal Group: %hu\n", + TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + TRANSPORT_MAX_LUNS_PER_TPG-1, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock(&tpg->tpg_lun_lock); + return NULL; + } + lun = &tpg->tpg_lun_list[unpacked_lun]; + + if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { + printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + " Target Portal Group: %hu, ignoring request.\n", + TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock(&tpg->tpg_lun_lock); + return NULL; + } + spin_unlock(&tpg->tpg_lun_lock); + + return lun; +} + +struct se_lun_acl *core_dev_init_initiator_node_lun_acl( + struct se_portal_group *tpg, + u32 mapped_lun, + char *initiatorname, + int *ret) +{ + struct se_lun_acl *lacl; + struct se_node_acl *nacl; + + if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { + printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", + TPG_TFO(tpg)->get_fabric_name()); + *ret = -EOVERFLOW; + return NULL; + } + nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); + if (!(nacl)) { + *ret = -EINVAL; + return NULL; + } + lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); + if (!(lacl)) { + printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); + *ret = -ENOMEM; + return NULL; + } + + INIT_LIST_HEAD(&lacl->lacl_list); + lacl->mapped_lun = mapped_lun; + lacl->se_lun_nacl = nacl; + snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); + + return lacl; +} + +int core_dev_add_initiator_node_lun_acl( + struct se_portal_group *tpg, + struct se_lun_acl *lacl, + u32 unpacked_lun, + u32 lun_access) +{ + struct se_lun *lun; + struct se_node_acl *nacl; + + lun = core_dev_get_lun(tpg, unpacked_lun); + if (!(lun)) { + printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + " Target Portal Group: %hu, ignoring request.\n", + TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + return -EINVAL; + } + + nacl = lacl->se_lun_nacl; + if (!(nacl)) + return -EINVAL; + + if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && + (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) + lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; + + lacl->se_lun = lun; + + if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, + lun_access, nacl, tpg, 1) < 0) + return -EINVAL; + + spin_lock(&lun->lun_acl_lock); + list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); + atomic_inc(&lun->lun_acl_count); + smp_mb__after_atomic_inc(); + spin_unlock(&lun->lun_acl_lock); + + printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " + " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, + (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", + lacl->initiatorname); + /* + * Check to see if there are any existing persistent reservation APTPL + * pre-registrations that need to be enabled for this LUN ACL.. + */ + core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); + return 0; +} + +/* core_dev_del_initiator_node_lun_acl(): + * + * + */ +int core_dev_del_initiator_node_lun_acl( + struct se_portal_group *tpg, + struct se_lun *lun, + struct se_lun_acl *lacl) +{ + struct se_node_acl *nacl; + + nacl = lacl->se_lun_nacl; + if (!(nacl)) + return -EINVAL; + + spin_lock(&lun->lun_acl_lock); + list_del(&lacl->lacl_list); + atomic_dec(&lun->lun_acl_count); + smp_mb__after_atomic_dec(); + spin_unlock(&lun->lun_acl_lock); + + core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, + TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); + + lacl->se_lun = NULL; + + printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" + " InitiatorNode: %s Mapped LUN: %u\n", + TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, + lacl->initiatorname, lacl->mapped_lun); + + return 0; +} + +void core_dev_free_initiator_node_lun_acl( + struct se_portal_group *tpg, + struct se_lun_acl *lacl) +{ + printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" + " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), + TPG_TFO(tpg)->get_fabric_name(), + lacl->initiatorname, lacl->mapped_lun); + + kfree(lacl); +} + +int core_dev_setup_virtual_lun0(void) +{ + struct se_hba *hba; + struct se_device *dev; + struct se_subsystem_dev *se_dev = NULL; + struct se_subsystem_api *t; + char buf[16]; + int ret; + + hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); + if (IS_ERR(hba)) + return PTR_ERR(hba); + + se_global->g_lun0_hba = hba; + t = hba->transport; + + se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); + if (!(se_dev)) { + printk(KERN_ERR "Unable to allocate memory for" + " struct se_subsystem_dev\n"); + ret = -ENOMEM; + goto out; + } + INIT_LIST_HEAD(&se_dev->g_se_dev_list); + INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); + spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); + INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); + INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); + spin_lock_init(&se_dev->t10_reservation.registration_lock); + spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); + INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); + spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); + spin_lock_init(&se_dev->se_dev_lock); + se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; + se_dev->t10_wwn.t10_sub_dev = se_dev; + se_dev->t10_alua.t10_sub_dev = se_dev; + se_dev->se_dev_attrib.da_sub_dev = se_dev; + se_dev->se_dev_hba = hba; + + se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); + if (!(se_dev->se_dev_su_ptr)) { + printk(KERN_ERR "Unable to locate subsystem dependent pointer" + " from allocate_virtdevice()\n"); + ret = -ENOMEM; + goto out; + } + se_global->g_lun0_su_dev = se_dev; + + memset(buf, 0, 16); + sprintf(buf, "rd_pages=8"); + t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); + + dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); + if (!(dev) || IS_ERR(dev)) { + ret = -ENOMEM; + goto out; + } + se_dev->se_dev_ptr = dev; + se_global->g_lun0_dev = dev; + + return 0; +out: + se_global->g_lun0_su_dev = NULL; + kfree(se_dev); + if (se_global->g_lun0_hba) { + core_delete_hba(se_global->g_lun0_hba); + se_global->g_lun0_hba = NULL; + } + return ret; +} + + +void core_dev_release_virtual_lun0(void) +{ + struct se_hba *hba = se_global->g_lun0_hba; + struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; + + if (!(hba)) + return; + + if (se_global->g_lun0_dev) + se_free_virtual_device(se_global->g_lun0_dev, hba); + + kfree(su_dev); + core_delete_hba(hba); +} diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c new file mode 100644 index 000000000000..32b148d7e261 --- /dev/null +++ b/drivers/target/target_core_fabric_configfs.c @@ -0,0 +1,996 @@ +/******************************************************************************* +* Filename: target_core_fabric_configfs.c + * + * This file contains generic fabric module configfs infrastructure for + * TCM v4.x code + * + * Copyright (c) 2010 Rising Tide Systems + * Copyright (c) 2010 Linux-iSCSI.org + * + * Copyright (c) 2010 Nicholas A. Bellinger +* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "target_core_alua.h" +#include "target_core_hba.h" +#include "target_core_pr.h" + +#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ +static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ +{ \ + struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ + struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = _attrs; \ + cit->ct_owner = tf->tf_module; \ + printk("Setup generic %s\n", __stringify(_name)); \ +} + +/* Start of tfc_tpg_mappedlun_cit */ + +static int target_fabric_mappedlun_link( + struct config_item *lun_acl_ci, + struct config_item *lun_ci) +{ + struct se_dev_entry *deve; + struct se_lun *lun = container_of(to_config_group(lun_ci), + struct se_lun, lun_group); + struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), + struct se_lun_acl, se_lun_group); + struct se_portal_group *se_tpg; + struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; + int ret = 0, lun_access; + /* + * Ensure that the source port exists + */ + if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) { + printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep" + "_tpg does not exist\n"); + return -EINVAL; + } + se_tpg = lun->lun_sep->sep_tpg; + + nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; + tpg_ci = &nacl_ci->ci_group->cg_item; + wwn_ci = &tpg_ci->ci_group->cg_item; + tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item; + wwn_ci_s = &tpg_ci_s->ci_group->cg_item; + /* + * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT + */ + if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { + printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n", + config_item_name(wwn_ci)); + return -EINVAL; + } + if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { + printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s" + " TPGT: %s\n", config_item_name(wwn_ci), + config_item_name(tpg_ci)); + return -EINVAL; + } + /* + * If this struct se_node_acl was dynamically generated with + * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags, + * which be will write protected (READ-ONLY) when + * tpg_1/attrib/demo_mode_write_protect=1 + */ + spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); + deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun]; + if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) + lun_access = deve->lun_flags; + else + lun_access = + (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect( + se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : + TRANSPORT_LUNFLAGS_READ_WRITE; + spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); + /* + * Determine the actual mapped LUN value user wants.. + * + * This value is what the SCSI Initiator actually sees the + * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. + */ + ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl, + lun->unpacked_lun, lun_access); + + return (ret < 0) ? -EINVAL : 0; +} + +static int target_fabric_mappedlun_unlink( + struct config_item *lun_acl_ci, + struct config_item *lun_ci) +{ + struct se_lun *lun; + struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), + struct se_lun_acl, se_lun_group); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun]; + struct se_portal_group *se_tpg; + /* + * Determine if the underlying MappedLUN has already been released.. + */ + if (!(deve->se_lun)) + return 0; + + lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); + se_tpg = lun->lun_sep->sep_tpg; + + core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl); + return 0; +} + +CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); +#define TCM_MAPPEDLUN_ATTR(_name, _mode) \ +static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_fabric_mappedlun_show_##_name, \ + target_fabric_mappedlun_store_##_name); + +static ssize_t target_fabric_mappedlun_show_write_protect( + struct se_lun_acl *lacl, + char *page) +{ + struct se_node_acl *se_nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t len; + + spin_lock_irq(&se_nacl->device_list_lock); + deve = &se_nacl->device_list[lacl->mapped_lun]; + len = sprintf(page, "%d\n", + (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? + 1 : 0); + spin_unlock_irq(&se_nacl->device_list_lock); + + return len; +} + +static ssize_t target_fabric_mappedlun_store_write_protect( + struct se_lun_acl *lacl, + const char *page, + size_t count) +{ + struct se_node_acl *se_nacl = lacl->se_lun_nacl; + struct se_portal_group *se_tpg = se_nacl->se_tpg; + unsigned long op; + + if (strict_strtoul(page, 0, &op)) + return -EINVAL; + + if ((op != 1) && (op != 0)) + return -EINVAL; + + core_update_device_list_access(lacl->mapped_lun, (op) ? + TRANSPORT_LUNFLAGS_READ_ONLY : + TRANSPORT_LUNFLAGS_READ_WRITE, + lacl->se_lun_nacl); + + printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" + " Mapped LUN: %u Write Protect bit to %s\n", + TPG_TFO(se_tpg)->get_fabric_name(), + lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); + + return count; + +} + +TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); + +CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); + +static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { + &target_fabric_mappedlun_write_protect.attr, + NULL, +}; + +static struct configfs_item_operations target_fabric_mappedlun_item_ops = { + .show_attribute = target_fabric_mappedlun_attr_show, + .store_attribute = target_fabric_mappedlun_attr_store, + .allow_link = target_fabric_mappedlun_link, + .drop_link = target_fabric_mappedlun_unlink, +}; + +TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL, + target_fabric_mappedlun_attrs); + +/* End of tfc_tpg_mappedlun_cit */ + +/* Start of tfc_tpg_nacl_attrib_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); + +static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = { + .show_attribute = target_fabric_nacl_attrib_attr_show, + .store_attribute = target_fabric_nacl_attrib_attr_store, +}; + +TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL); + +/* End of tfc_tpg_nacl_attrib_cit */ + +/* Start of tfc_tpg_nacl_auth_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group); + +static struct configfs_item_operations target_fabric_nacl_auth_item_ops = { + .show_attribute = target_fabric_nacl_auth_attr_show, + .store_attribute = target_fabric_nacl_auth_attr_store, +}; + +TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL); + +/* End of tfc_tpg_nacl_auth_cit */ + +/* Start of tfc_tpg_nacl_param_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group); + +static struct configfs_item_operations target_fabric_nacl_param_item_ops = { + .show_attribute = target_fabric_nacl_param_attr_show, + .store_attribute = target_fabric_nacl_param_attr_store, +}; + +TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL); + +/* End of tfc_tpg_nacl_param_cit */ + +/* Start of tfc_tpg_nacl_base_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group); + +static struct config_group *target_fabric_make_mappedlun( + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl = container_of(group, + struct se_node_acl, acl_group); + struct se_portal_group *se_tpg = se_nacl->se_tpg; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_lun_acl *lacl; + struct config_item *acl_ci; + char *buf; + unsigned long mapped_lun; + int ret = 0; + + acl_ci = &group->cg_item; + if (!(acl_ci)) { + printk(KERN_ERR "Unable to locatel acl_ci\n"); + return NULL; + } + + buf = kzalloc(strlen(name) + 1, GFP_KERNEL); + if (!(buf)) { + printk(KERN_ERR "Unable to allocate memory for name buf\n"); + return ERR_PTR(-ENOMEM); + } + snprintf(buf, strlen(name) + 1, "%s", name); + /* + * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. + */ + if (strstr(buf, "lun_") != buf) { + printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s" + " name: %s\n", buf, name); + ret = -EINVAL; + goto out; + } + /* + * Determine the Mapped LUN value. This is what the SCSI Initiator + * Port will actually see. + */ + if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) { + ret = -EINVAL; + goto out; + } + + lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, + config_item_name(acl_ci), &ret); + if (!(lacl)) + goto out; + + config_group_init_type_name(&lacl->se_lun_group, name, + &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); + + kfree(buf); + return &lacl->se_lun_group; +out: + kfree(buf); + return ERR_PTR(ret); +} + +static void target_fabric_drop_mappedlun( + struct config_group *group, + struct config_item *item) +{ + struct se_lun_acl *lacl = container_of(to_config_group(item), + struct se_lun_acl, se_lun_group); + struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; + + config_item_put(item); + core_dev_free_initiator_node_lun_acl(se_tpg, lacl); +} + +static struct configfs_item_operations target_fabric_nacl_base_item_ops = { + .show_attribute = target_fabric_nacl_base_attr_show, + .store_attribute = target_fabric_nacl_base_attr_store, +}; + +static struct configfs_group_operations target_fabric_nacl_base_group_ops = { + .make_group = target_fabric_make_mappedlun, + .drop_item = target_fabric_drop_mappedlun, +}; + +TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, + &target_fabric_nacl_base_group_ops, NULL); + +/* End of tfc_tpg_nacl_base_cit */ + +/* Start of tfc_tpg_nacl_cit */ + +static struct config_group *target_fabric_make_nodeacl( + struct config_group *group, + const char *name) +{ + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_acl_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_node_acl *se_nacl; + struct config_group *nacl_cg; + + if (!(tf->tf_ops.fabric_make_nodeacl)) { + printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n"); + return ERR_PTR(-ENOSYS); + } + + se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); + if (IS_ERR(se_nacl)) + return ERR_PTR(PTR_ERR(se_nacl)); + + nacl_cg = &se_nacl->acl_group; + nacl_cg->default_groups = se_nacl->acl_default_groups; + nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; + nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; + nacl_cg->default_groups[2] = &se_nacl->acl_param_group; + nacl_cg->default_groups[3] = NULL; + + config_group_init_type_name(&se_nacl->acl_group, name, + &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); + config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", + &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); + config_group_init_type_name(&se_nacl->acl_auth_group, "auth", + &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); + config_group_init_type_name(&se_nacl->acl_param_group, "param", + &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); + + return &se_nacl->acl_group; +} + +static void target_fabric_drop_nodeacl( + struct config_group *group, + struct config_item *item) +{ + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_acl_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_node_acl *se_nacl = container_of(to_config_group(item), + struct se_node_acl, acl_group); + struct config_item *df_item; + struct config_group *nacl_cg; + int i; + + nacl_cg = &se_nacl->acl_group; + for (i = 0; nacl_cg->default_groups[i]; i++) { + df_item = &nacl_cg->default_groups[i]->cg_item; + nacl_cg->default_groups[i] = NULL; + config_item_put(df_item); + } + + config_item_put(item); + tf->tf_ops.fabric_drop_nodeacl(se_nacl); +} + +static struct configfs_group_operations target_fabric_nacl_group_ops = { + .make_group = target_fabric_make_nodeacl, + .drop_item = target_fabric_drop_nodeacl, +}; + +TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); + +/* End of tfc_tpg_nacl_cit */ + +/* Start of tfc_tpg_np_base_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); + +static struct configfs_item_operations target_fabric_np_base_item_ops = { + .show_attribute = target_fabric_np_base_attr_show, + .store_attribute = target_fabric_np_base_attr_store, +}; + +TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL); + +/* End of tfc_tpg_np_base_cit */ + +/* Start of tfc_tpg_np_cit */ + +static struct config_group *target_fabric_make_np( + struct config_group *group, + const char *name) +{ + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_np_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_tpg_np *se_tpg_np; + + if (!(tf->tf_ops.fabric_make_np)) { + printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n"); + return ERR_PTR(-ENOSYS); + } + + se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); + if (!(se_tpg_np) || IS_ERR(se_tpg_np)) + return ERR_PTR(-EINVAL); + + config_group_init_type_name(&se_tpg_np->tpg_np_group, name, + &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); + + return &se_tpg_np->tpg_np_group; +} + +static void target_fabric_drop_np( + struct config_group *group, + struct config_item *item) +{ + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_np_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), + struct se_tpg_np, tpg_np_group); + + config_item_put(item); + tf->tf_ops.fabric_drop_np(se_tpg_np); +} + +static struct configfs_group_operations target_fabric_np_group_ops = { + .make_group = &target_fabric_make_np, + .drop_item = &target_fabric_drop_np, +}; + +TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL); + +/* End of tfc_tpg_np_cit */ + +/* Start of tfc_tpg_port_cit */ + +CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun); +#define TCM_PORT_ATTR(_name, _mode) \ +static struct target_fabric_port_attribute target_fabric_port_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + target_fabric_port_show_attr_##_name, \ + target_fabric_port_store_attr_##_name); + +#define TCM_PORT_ATTOR_RO(_name) \ + __CONFIGFS_EATTR_RO(_name, \ + target_fabric_port_show_attr_##_name); + +/* + * alua_tg_pt_gp + */ +static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( + struct se_lun *lun, + char *page) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); +} + +static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( + struct se_lun *lun, + const char *page, + size_t count) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); +} + +TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); + +/* + * alua_tg_pt_offline + */ +static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( + struct se_lun *lun, + char *page) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_show_offline_bit(lun, page); +} + +static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( + struct se_lun *lun, + const char *page, + size_t count) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_store_offline_bit(lun, page, count); +} + +TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR); + +/* + * alua_tg_pt_status + */ +static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( + struct se_lun *lun, + char *page) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_show_secondary_status(lun, page); +} + +static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( + struct se_lun *lun, + const char *page, + size_t count) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_store_secondary_status(lun, page, count); +} + +TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR); + +/* + * alua_tg_pt_write_md + */ +static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( + struct se_lun *lun, + char *page) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_show_secondary_write_metadata(lun, page); +} + +static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( + struct se_lun *lun, + const char *page, + size_t count) +{ + if (!(lun)) + return -ENODEV; + + if (!(lun->lun_sep)) + return -ENODEV; + + return core_alua_store_secondary_write_metadata(lun, page, count); +} + +TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR); + + +static struct configfs_attribute *target_fabric_port_attrs[] = { + &target_fabric_port_alua_tg_pt_gp.attr, + &target_fabric_port_alua_tg_pt_offline.attr, + &target_fabric_port_alua_tg_pt_status.attr, + &target_fabric_port_alua_tg_pt_write_md.attr, + NULL, +}; + +CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group); + +static int target_fabric_port_link( + struct config_item *lun_ci, + struct config_item *se_dev_ci) +{ + struct config_item *tpg_ci; + struct se_device *dev; + struct se_lun *lun = container_of(to_config_group(lun_ci), + struct se_lun, lun_group); + struct se_lun *lun_p; + struct se_portal_group *se_tpg; + struct se_subsystem_dev *se_dev = container_of( + to_config_group(se_dev_ci), struct se_subsystem_dev, + se_dev_group); + struct target_fabric_configfs *tf; + int ret; + + tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; + se_tpg = container_of(to_config_group(tpg_ci), + struct se_portal_group, tpg_group); + tf = se_tpg->se_tpg_wwn->wwn_tf; + + if (lun->lun_se_dev != NULL) { + printk(KERN_ERR "Port Symlink already exists\n"); + return -EEXIST; + } + + dev = se_dev->se_dev_ptr; + if (!(dev)) { + printk(KERN_ERR "Unable to locate struct se_device pointer from" + " %s\n", config_item_name(se_dev_ci)); + ret = -ENODEV; + goto out; + } + + lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, + lun->unpacked_lun); + if ((IS_ERR(lun_p)) || !(lun_p)) { + printk(KERN_ERR "core_dev_add_lun() failed\n"); + ret = -EINVAL; + goto out; + } + + if (tf->tf_ops.fabric_post_link) { + /* + * Call the optional fabric_post_link() to allow a + * fabric module to setup any additional state once + * core_dev_add_lun() has been called.. + */ + tf->tf_ops.fabric_post_link(se_tpg, lun); + } + + return 0; +out: + return ret; +} + +static int target_fabric_port_unlink( + struct config_item *lun_ci, + struct config_item *se_dev_ci) +{ + struct se_lun *lun = container_of(to_config_group(lun_ci), + struct se_lun, lun_group); + struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + + if (tf->tf_ops.fabric_pre_unlink) { + /* + * Call the optional fabric_pre_unlink() to allow a + * fabric module to release any additional stat before + * core_dev_del_lun() is called. + */ + tf->tf_ops.fabric_pre_unlink(se_tpg, lun); + } + + core_dev_del_lun(se_tpg, lun->unpacked_lun); + return 0; +} + +static struct configfs_item_operations target_fabric_port_item_ops = { + .show_attribute = target_fabric_port_attr_show, + .store_attribute = target_fabric_port_attr_store, + .allow_link = target_fabric_port_link, + .drop_link = target_fabric_port_unlink, +}; + +TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs); + +/* End of tfc_tpg_port_cit */ + +/* Start of tfc_tpg_lun_cit */ + +static struct config_group *target_fabric_make_lun( + struct config_group *group, + const char *name) +{ + struct se_lun *lun; + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_lun_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + unsigned long unpacked_lun; + + if (strstr(name, "lun_") != name) { + printk(KERN_ERR "Unable to locate \'_\" in" + " \"lun_$LUN_NUMBER\"\n"); + return ERR_PTR(-EINVAL); + } + if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX) + return ERR_PTR(-EINVAL); + + lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); + if (!(lun)) + return ERR_PTR(-EINVAL); + + config_group_init_type_name(&lun->lun_group, name, + &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); + + return &lun->lun_group; +} + +static void target_fabric_drop_lun( + struct config_group *group, + struct config_item *item) +{ + config_item_put(item); +} + +static struct configfs_group_operations target_fabric_lun_group_ops = { + .make_group = &target_fabric_make_lun, + .drop_item = &target_fabric_drop_lun, +}; + +TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL); + +/* End of tfc_tpg_lun_cit */ + +/* Start of tfc_tpg_attrib_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group); + +static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = { + .show_attribute = target_fabric_tpg_attrib_attr_show, + .store_attribute = target_fabric_tpg_attrib_attr_store, +}; + +TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); + +/* End of tfc_tpg_attrib_cit */ + +/* Start of tfc_tpg_param_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group); + +static struct configfs_item_operations target_fabric_tpg_param_item_ops = { + .show_attribute = target_fabric_tpg_param_attr_show, + .store_attribute = target_fabric_tpg_param_attr_store, +}; + +TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); + +/* End of tfc_tpg_param_cit */ + +/* Start of tfc_tpg_base_cit */ +/* + * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO() + */ +CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); + +static struct configfs_item_operations target_fabric_tpg_base_item_ops = { + .show_attribute = target_fabric_tpg_attr_show, + .store_attribute = target_fabric_tpg_attr_store, +}; + +TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL); + +/* End of tfc_tpg_base_cit */ + +/* Start of tfc_tpg_cit */ + +static struct config_group *target_fabric_make_tpg( + struct config_group *group, + const char *name) +{ + struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); + struct target_fabric_configfs *tf = wwn->wwn_tf; + struct se_portal_group *se_tpg; + + if (!(tf->tf_ops.fabric_make_tpg)) { + printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n"); + return ERR_PTR(-ENOSYS); + } + + se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); + if (!(se_tpg) || IS_ERR(se_tpg)) + return ERR_PTR(-EINVAL); + /* + * Setup default groups from pre-allocated se_tpg->tpg_default_groups + */ + se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups; + se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group; + se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group; + se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group; + se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group; + se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group; + se_tpg->tpg_group.default_groups[5] = NULL; + + config_group_init_type_name(&se_tpg->tpg_group, name, + &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); + config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", + &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); + config_group_init_type_name(&se_tpg->tpg_np_group, "np", + &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); + config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", + &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); + config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", + &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); + config_group_init_type_name(&se_tpg->tpg_param_group, "param", + &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); + + return &se_tpg->tpg_group; +} + +static void target_fabric_drop_tpg( + struct config_group *group, + struct config_item *item) +{ + struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); + struct target_fabric_configfs *tf = wwn->wwn_tf; + struct se_portal_group *se_tpg = container_of(to_config_group(item), + struct se_portal_group, tpg_group); + struct config_group *tpg_cg = &se_tpg->tpg_group; + struct config_item *df_item; + int i; + /* + * Release default groups, but do not release tpg_cg->default_groups + * memory as it is statically allocated at se_tpg->tpg_default_groups. + */ + for (i = 0; tpg_cg->default_groups[i]; i++) { + df_item = &tpg_cg->default_groups[i]->cg_item; + tpg_cg->default_groups[i] = NULL; + config_item_put(df_item); + } + + config_item_put(item); + tf->tf_ops.fabric_drop_tpg(se_tpg); +} + +static struct configfs_group_operations target_fabric_tpg_group_ops = { + .make_group = target_fabric_make_tpg, + .drop_item = target_fabric_drop_tpg, +}; + +TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); + +/* End of tfc_tpg_cit */ + +/* Start of tfc_wwn_cit */ + +static struct config_group *target_fabric_make_wwn( + struct config_group *group, + const char *name) +{ + struct target_fabric_configfs *tf = container_of(group, + struct target_fabric_configfs, tf_group); + struct se_wwn *wwn; + + if (!(tf->tf_ops.fabric_make_wwn)) { + printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n"); + return ERR_PTR(-ENOSYS); + } + + wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); + if (!(wwn) || IS_ERR(wwn)) + return ERR_PTR(-EINVAL); + + wwn->wwn_tf = tf; + config_group_init_type_name(&wwn->wwn_group, name, + &TF_CIT_TMPL(tf)->tfc_tpg_cit); + + return &wwn->wwn_group; +} + +static void target_fabric_drop_wwn( + struct config_group *group, + struct config_item *item) +{ + struct target_fabric_configfs *tf = container_of(group, + struct target_fabric_configfs, tf_group); + struct se_wwn *wwn = container_of(to_config_group(item), + struct se_wwn, wwn_group); + + config_item_put(item); + tf->tf_ops.fabric_drop_wwn(wwn); +} + +static struct configfs_group_operations target_fabric_wwn_group_ops = { + .make_group = target_fabric_make_wwn, + .drop_item = target_fabric_drop_wwn, +}; +/* + * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO() + */ +CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group); + +static struct configfs_item_operations target_fabric_wwn_item_ops = { + .show_attribute = target_fabric_wwn_attr_show, + .store_attribute = target_fabric_wwn_attr_store, +}; + +TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL); + +/* End of tfc_wwn_cit */ + +/* Start of tfc_discovery_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs, + tf_disc_group); + +static struct configfs_item_operations target_fabric_discovery_item_ops = { + .show_attribute = target_fabric_discovery_attr_show, + .store_attribute = target_fabric_discovery_attr_store, +}; + +TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL); + +/* End of tfc_discovery_cit */ + +int target_fabric_setup_cits(struct target_fabric_configfs *tf) +{ + target_fabric_setup_discovery_cit(tf); + target_fabric_setup_wwn_cit(tf); + target_fabric_setup_tpg_cit(tf); + target_fabric_setup_tpg_base_cit(tf); + target_fabric_setup_tpg_port_cit(tf); + target_fabric_setup_tpg_lun_cit(tf); + target_fabric_setup_tpg_np_cit(tf); + target_fabric_setup_tpg_np_base_cit(tf); + target_fabric_setup_tpg_attrib_cit(tf); + target_fabric_setup_tpg_param_cit(tf); + target_fabric_setup_tpg_nacl_cit(tf); + target_fabric_setup_tpg_nacl_base_cit(tf); + target_fabric_setup_tpg_nacl_attrib_cit(tf); + target_fabric_setup_tpg_nacl_auth_cit(tf); + target_fabric_setup_tpg_nacl_param_cit(tf); + target_fabric_setup_tpg_mappedlun_cit(tf); + + return 0; +} diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c new file mode 100644 index 000000000000..26285644e4de --- /dev/null +++ b/drivers/target/target_core_fabric_lib.c @@ -0,0 +1,451 @@ +/******************************************************************************* + * Filename: target_core_fabric_lib.c + * + * This file contains generic high level protocol identifier and PR + * handlers for TCM fabric modules + * + * Copyright (c) 2010 Rising Tide Systems, Inc. + * Copyright (c) 2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "target_core_hba.h" +#include "target_core_pr.h" + +/* + * Handlers for Serial Attached SCSI (SAS) + */ +u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + /* + * Return a SAS Serial SCSI Protocol identifier for loopback operations + * This is defined in section 7.5.1 Table 362 in spc4r17 + */ + return 0x6; +} +EXPORT_SYMBOL(sas_get_fabric_proto_ident); + +u32 sas_get_pr_transport_id( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + unsigned char binary, *ptr; + int i; + u32 off = 4; + /* + * Set PROTOCOL IDENTIFIER to 6h for SAS + */ + buf[0] = 0x06; + /* + * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI + * over SAS Serial SCSI Protocol + */ + ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ + + for (i = 0; i < 16; i += 2) { + binary = transport_asciihex_to_binaryhex(&ptr[i]); + buf[off++] = binary; + } + /* + * The SAS Transport ID is a hardcoded 24-byte length + */ + return 24; +} +EXPORT_SYMBOL(sas_get_pr_transport_id); + +u32 sas_get_pr_transport_id_len( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + *format_code = 0; + /* + * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI + * over SAS Serial SCSI Protocol + * + * The SAS Transport ID is a hardcoded 24-byte length + */ + return 24; +} +EXPORT_SYMBOL(sas_get_pr_transport_id_len); + +/* + * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above + * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. + */ +char *sas_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + /* + * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID + * for initiator ports using SCSI over SAS Serial SCSI Protocol + * + * The TransportID for a SAS Initiator Port is of fixed size of + * 24 bytes, and SAS does not contain a I_T nexus identifier, + * so we return the **port_nexus_ptr set to NULL. + */ + *port_nexus_ptr = NULL; + *out_tid_len = 24; + + return (char *)&buf[4]; +} +EXPORT_SYMBOL(sas_parse_pr_out_transport_id); + +/* + * Handlers for Fibre Channel Protocol (FCP) + */ +u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */ +} +EXPORT_SYMBOL(fc_get_fabric_proto_ident); + +u32 fc_get_pr_transport_id_len( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + *format_code = 0; + /* + * The FC Transport ID is a hardcoded 24-byte length + */ + return 24; +} +EXPORT_SYMBOL(fc_get_pr_transport_id_len); + +u32 fc_get_pr_transport_id( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + unsigned char binary, *ptr; + int i; + u32 off = 8; + /* + * PROTOCOL IDENTIFIER is 0h for FCP-2 + * + * From spc4r17, 7.5.4.2 TransportID for initiator ports using + * SCSI over Fibre Channel + * + * We convert the ASCII formatted N Port name into a binary + * encoded TransportID. + */ + ptr = &se_nacl->initiatorname[0]; + + for (i = 0; i < 24; ) { + if (!(strncmp(&ptr[i], ":", 1))) { + i++; + continue; + } + binary = transport_asciihex_to_binaryhex(&ptr[i]); + buf[off++] = binary; + i += 2; + } + /* + * The FC Transport ID is a hardcoded 24-byte length + */ + return 24; +} +EXPORT_SYMBOL(fc_get_pr_transport_id); + +char *fc_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + /* + * The TransportID for a FC N Port is of fixed size of + * 24 bytes, and FC does not contain a I_T nexus identifier, + * so we return the **port_nexus_ptr set to NULL. + */ + *port_nexus_ptr = NULL; + *out_tid_len = 24; + + return (char *)&buf[8]; +} +EXPORT_SYMBOL(fc_parse_pr_out_transport_id); + +/* + * Handlers for Internet Small Computer Systems Interface (iSCSI) + */ + +u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + /* + * This value is defined for "Internet SCSI (iSCSI)" + * in spc4r17 section 7.5.1 Table 362 + */ + return 0x5; +} +EXPORT_SYMBOL(iscsi_get_fabric_proto_ident); + +u32 iscsi_get_pr_transport_id( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + u32 off = 4, padding = 0; + u16 len = 0; + + spin_lock_irq(&se_nacl->nacl_sess_lock); + /* + * Set PROTOCOL IDENTIFIER to 5h for iSCSI + */ + buf[0] = 0x05; + /* + * From spc4r17 Section 7.5.4.6: TransportID for initiator + * ports using SCSI over iSCSI. + * + * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field + * shall contain the iSCSI name of an iSCSI initiator node (see + * RFC 3720). The first ISCSI NAME field byte containing an ASCII + * null character terminates the ISCSI NAME field without regard for + * the specified length of the iSCSI TransportID or the contents of + * the ADDITIONAL LENGTH field. + */ + len = sprintf(&buf[off], "%s", se_nacl->initiatorname); + /* + * Add Extra byte for NULL terminator + */ + len++; + /* + * If there is ISID present with the registration and *format code == 1 + * 1, use iSCSI Initiator port TransportID format. + * + * Otherwise use iSCSI Initiator device TransportID format that + * does not contain the ASCII encoded iSCSI Initiator iSID value + * provied by the iSCSi Initiator during the iSCSI login process. + */ + if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) { + /* + * Set FORMAT CODE 01b for iSCSI Initiator port TransportID + * format. + */ + buf[0] |= 0x40; + /* + * From spc4r17 Section 7.5.4.6: TransportID for initiator + * ports using SCSI over iSCSI. Table 390 + * + * The SEPARATOR field shall contain the five ASCII + * characters ",i,0x". + * + * The null-terminated, null-padded ISCSI INITIATOR SESSION ID + * field shall contain the iSCSI initiator session identifier + * (see RFC 3720) in the form of ASCII characters that are the + * hexadecimal digits converted from the binary iSCSI initiator + * session identifier value. The first ISCSI INITIATOR SESSION + * ID field byte containing an ASCII null character + */ + buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ + buf[off+len] = 0x69; off++; /* ASCII Character: "i" */ + buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ + buf[off+len] = 0x30; off++; /* ASCII Character: "0" */ + buf[off+len] = 0x78; off++; /* ASCII Character: "x" */ + len += 5; + buf[off+len] = pr_reg->pr_reg_isid[0]; off++; + buf[off+len] = pr_reg->pr_reg_isid[1]; off++; + buf[off+len] = pr_reg->pr_reg_isid[2]; off++; + buf[off+len] = pr_reg->pr_reg_isid[3]; off++; + buf[off+len] = pr_reg->pr_reg_isid[4]; off++; + buf[off+len] = pr_reg->pr_reg_isid[5]; off++; + buf[off+len] = '\0'; off++; + len += 7; + } + spin_unlock_irq(&se_nacl->nacl_sess_lock); + /* + * The ADDITIONAL LENGTH field specifies the number of bytes that follow + * in the TransportID. The additional length shall be at least 20 and + * shall be a multiple of four. + */ + padding = ((-len) & 3); + if (padding != 0) + len += padding; + + buf[2] = ((len >> 8) & 0xff); + buf[3] = (len & 0xff); + /* + * Increment value for total payload + header length for + * full status descriptor + */ + len += 4; + + return len; +} +EXPORT_SYMBOL(iscsi_get_pr_transport_id); + +u32 iscsi_get_pr_transport_id_len( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + u32 len = 0, padding = 0; + + spin_lock_irq(&se_nacl->nacl_sess_lock); + len = strlen(se_nacl->initiatorname); + /* + * Add extra byte for NULL terminator + */ + len++; + /* + * If there is ISID present with the registration, use format code: + * 01b: iSCSI Initiator port TransportID format + * + * If there is not an active iSCSI session, use format code: + * 00b: iSCSI Initiator device TransportID format + */ + if (pr_reg->isid_present_at_reg) { + len += 5; /* For ",i,0x" ASCII seperator */ + len += 7; /* For iSCSI Initiator Session ID + Null terminator */ + *format_code = 1; + } else + *format_code = 0; + spin_unlock_irq(&se_nacl->nacl_sess_lock); + /* + * The ADDITIONAL LENGTH field specifies the number of bytes that follow + * in the TransportID. The additional length shall be at least 20 and + * shall be a multiple of four. + */ + padding = ((-len) & 3); + if (padding != 0) + len += padding; + /* + * Increment value for total payload + header length for + * full status descriptor + */ + len += 4; + + return len; +} +EXPORT_SYMBOL(iscsi_get_pr_transport_id_len); + +char *iscsi_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + char *p; + u32 tid_len, padding; + int i; + u16 add_len; + u8 format_code = (buf[0] & 0xc0); + /* + * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: + * + * TransportID for initiator ports using SCSI over iSCSI, + * from Table 388 -- iSCSI TransportID formats. + * + * 00b Initiator port is identified using the world wide unique + * SCSI device name of the iSCSI initiator + * device containing the initiator port (see table 389). + * 01b Initiator port is identified using the world wide unique + * initiator port identifier (see table 390).10b to 11b + * Reserved + */ + if ((format_code != 0x00) && (format_code != 0x40)) { + printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" + " Initiator Transport ID\n", format_code); + return NULL; + } + /* + * If the caller wants the TransportID Length, we set that value for the + * entire iSCSI Tarnsport ID now. + */ + if (out_tid_len != NULL) { + add_len = ((buf[2] >> 8) & 0xff); + add_len |= (buf[3] & 0xff); + + tid_len = strlen((char *)&buf[4]); + tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ + tid_len += 1; /* Add one byte for NULL terminator */ + padding = ((-tid_len) & 3); + if (padding != 0) + tid_len += padding; + + if ((add_len + 4) != tid_len) { + printk(KERN_INFO "LIO-Target Extracted add_len: %hu " + "does not match calculated tid_len: %u," + " using tid_len instead\n", add_len+4, tid_len); + *out_tid_len = tid_len; + } else + *out_tid_len = (add_len + 4); + } + /* + * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator + * Session ID as defined in Table 390 - iSCSI initiator port TransportID + * format. + */ + if (format_code == 0x40) { + p = strstr((char *)&buf[4], ",i,0x"); + if (!(p)) { + printk(KERN_ERR "Unable to locate \",i,0x\" seperator" + " for Initiator port identifier: %s\n", + (char *)&buf[4]); + return NULL; + } + *p = '\0'; /* Terminate iSCSI Name */ + p += 5; /* Skip over ",i,0x" seperator */ + + *port_nexus_ptr = p; + /* + * Go ahead and do the lower case conversion of the received + * 12 ASCII characters representing the ISID in the TransportID + * for comparision against the running iSCSI session's ISID from + * iscsi_target.c:lio_sess_get_initiator_sid() + */ + for (i = 0; i < 12; i++) { + if (isdigit(*p)) { + p++; + continue; + } + *p = tolower(*p); + p++; + } + } + + return (char *)&buf[4]; +} +EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c new file mode 100644 index 000000000000..0aaca885668f --- /dev/null +++ b/drivers/target/target_core_file.c @@ -0,0 +1,688 @@ +/******************************************************************************* + * Filename: target_core_file.c + * + * This file contains the Storage Engine <-> FILEIO transport specific functions + * + * Copyright (c) 2005 PyX Technologies, Inc. + * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "target_core_file.h" + +#if 1 +#define DEBUG_FD_CACHE(x...) printk(x) +#else +#define DEBUG_FD_CACHE(x...) +#endif + +#if 1 +#define DEBUG_FD_FUA(x...) printk(x) +#else +#define DEBUG_FD_FUA(x...) +#endif + +static struct se_subsystem_api fileio_template; + +/* fd_attach_hba(): (Part of se_subsystem_api_t template) + * + * + */ +static int fd_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct fd_host *fd_host; + + fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); + if (!(fd_host)) { + printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); + return -1; + } + + fd_host->fd_host_id = host_id; + + atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); + atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH); + hba->hba_ptr = (void *) fd_host; + + printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" + " Target Core Stack %s\n", hba->hba_id, FD_VERSION, + TARGET_CORE_MOD_VERSION); + printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" + " Target Core with TCQ Depth: %d MaxSectors: %u\n", + hba->hba_id, fd_host->fd_host_id, + atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS); + + return 0; +} + +static void fd_detach_hba(struct se_hba *hba) +{ + struct fd_host *fd_host = hba->hba_ptr; + + printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" + " Target Core\n", hba->hba_id, fd_host->fd_host_id); + + kfree(fd_host); + hba->hba_ptr = NULL; +} + +static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) +{ + struct fd_dev *fd_dev; + struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; + + fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); + if (!(fd_dev)) { + printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); + return NULL; + } + + fd_dev->fd_host = fd_host; + + printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); + + return fd_dev; +} + +/* fd_create_virtdevice(): (Part of se_subsystem_api_t template) + * + * + */ +static struct se_device *fd_create_virtdevice( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + void *p) +{ + char *dev_p = NULL; + struct se_device *dev; + struct se_dev_limits dev_limits; + struct queue_limits *limits; + struct fd_dev *fd_dev = (struct fd_dev *) p; + struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; + mm_segment_t old_fs; + struct file *file; + struct inode *inode = NULL; + int dev_flags = 0, flags; + + memset(&dev_limits, 0, sizeof(struct se_dev_limits)); + + old_fs = get_fs(); + set_fs(get_ds()); + dev_p = getname(fd_dev->fd_dev_name); + set_fs(old_fs); + + if (IS_ERR(dev_p)) { + printk(KERN_ERR "getname(%s) failed: %lu\n", + fd_dev->fd_dev_name, IS_ERR(dev_p)); + goto fail; + } +#if 0 + if (di->no_create_file) + flags = O_RDWR | O_LARGEFILE; + else + flags = O_RDWR | O_CREAT | O_LARGEFILE; +#else + flags = O_RDWR | O_CREAT | O_LARGEFILE; +#endif +/* flags |= O_DIRECT; */ + /* + * If fd_buffered_io=1 has not been set explictly (the default), + * use O_SYNC to force FILEIO writes to disk. + */ + if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) + flags |= O_SYNC; + + file = filp_open(dev_p, flags, 0600); + + if (IS_ERR(file) || !file || !file->f_dentry) { + printk(KERN_ERR "filp_open(%s) failed\n", dev_p); + goto fail; + } + fd_dev->fd_file = file; + /* + * If using a block backend with this struct file, we extract + * fd_dev->fd_[block,dev]_size from struct block_device. + * + * Otherwise, we use the passed fd_size= from configfs + */ + inode = file->f_mapping->host; + if (S_ISBLK(inode->i_mode)) { + struct request_queue *q; + /* + * Setup the local scope queue_limits from struct request_queue->limits + * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. + */ + q = bdev_get_queue(inode->i_bdev); + limits = &dev_limits.limits; + limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); + limits->max_hw_sectors = queue_max_hw_sectors(q); + limits->max_sectors = queue_max_sectors(q); + /* + * Determine the number of bytes from i_size_read() minus + * one (1) logical sector from underlying struct block_device + */ + fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); + fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - + fd_dev->fd_block_size); + + printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" + " block_device blocks: %llu logical_block_size: %d\n", + fd_dev->fd_dev_size, + div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), + fd_dev->fd_block_size); + } else { + if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { + printk(KERN_ERR "FILEIO: Missing fd_dev_size=" + " parameter, and no backing struct" + " block_device\n"); + goto fail; + } + + limits = &dev_limits.limits; + limits->logical_block_size = FD_BLOCKSIZE; + limits->max_hw_sectors = FD_MAX_SECTORS; + limits->max_sectors = FD_MAX_SECTORS; + fd_dev->fd_block_size = FD_BLOCKSIZE; + } + + dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; + dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; + + dev = transport_add_device_to_core_hba(hba, &fileio_template, + se_dev, dev_flags, (void *)fd_dev, + &dev_limits, "FILEIO", FD_VERSION); + if (!(dev)) + goto fail; + + fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; + fd_dev->fd_queue_depth = dev->queue_depth; + + printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," + " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, + fd_dev->fd_dev_name, fd_dev->fd_dev_size); + + putname(dev_p); + return dev; +fail: + if (fd_dev->fd_file) { + filp_close(fd_dev->fd_file, NULL); + fd_dev->fd_file = NULL; + } + putname(dev_p); + return NULL; +} + +/* fd_free_device(): (Part of se_subsystem_api_t template) + * + * + */ +static void fd_free_device(void *p) +{ + struct fd_dev *fd_dev = (struct fd_dev *) p; + + if (fd_dev->fd_file) { + filp_close(fd_dev->fd_file, NULL); + fd_dev->fd_file = NULL; + } + + kfree(fd_dev); +} + +static inline struct fd_request *FILE_REQ(struct se_task *task) +{ + return container_of(task, struct fd_request, fd_task); +} + + +static struct se_task * +fd_alloc_task(struct se_cmd *cmd) +{ + struct fd_request *fd_req; + + fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); + if (!(fd_req)) { + printk(KERN_ERR "Unable to allocate struct fd_request\n"); + return NULL; + } + + fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; + + return &fd_req->fd_task; +} + +static int fd_do_readv(struct se_task *task) +{ + struct fd_request *req = FILE_REQ(task); + struct file *fd = req->fd_dev->fd_file; + struct scatterlist *sg = task->task_sg; + struct iovec *iov; + mm_segment_t old_fs; + loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); + int ret = 0, i; + + iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); + if (!(iov)) { + printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); + return -1; + } + + for (i = 0; i < task->task_sg_num; i++) { + iov[i].iov_len = sg[i].length; + iov[i].iov_base = sg_virt(&sg[i]); + } + + old_fs = get_fs(); + set_fs(get_ds()); + ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); + set_fs(old_fs); + + kfree(iov); + /* + * Return zeros and GOOD status even if the READ did not return + * the expected virt_size for struct file w/o a backing struct + * block_device. + */ + if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { + if (ret < 0 || ret != task->task_size) { + printk(KERN_ERR "vfs_readv() returned %d," + " expecting %d for S_ISBLK\n", ret, + (int)task->task_size); + return -1; + } + } else { + if (ret < 0) { + printk(KERN_ERR "vfs_readv() returned %d for non" + " S_ISBLK\n", ret); + return -1; + } + } + + return 1; +} + +static int fd_do_writev(struct se_task *task) +{ + struct fd_request *req = FILE_REQ(task); + struct file *fd = req->fd_dev->fd_file; + struct scatterlist *sg = task->task_sg; + struct iovec *iov; + mm_segment_t old_fs; + loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); + int ret, i = 0; + + iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); + if (!(iov)) { + printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); + return -1; + } + + for (i = 0; i < task->task_sg_num; i++) { + iov[i].iov_len = sg[i].length; + iov[i].iov_base = sg_virt(&sg[i]); + } + + old_fs = get_fs(); + set_fs(get_ds()); + ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); + set_fs(old_fs); + + kfree(iov); + + if (ret < 0 || ret != task->task_size) { + printk(KERN_ERR "vfs_writev() returned %d\n", ret); + return -1; + } + + return 1; +} + +static void fd_emulate_sync_cache(struct se_task *task) +{ + struct se_cmd *cmd = TASK_CMD(task); + struct se_device *dev = cmd->se_dev; + struct fd_dev *fd_dev = dev->dev_ptr; + int immed = (cmd->t_task->t_task_cdb[1] & 0x2); + loff_t start, end; + int ret; + + /* + * If the Immediate bit is set, queue up the GOOD response + * for this SYNCHRONIZE_CACHE op + */ + if (immed) + transport_complete_sync_cache(cmd, 1); + + /* + * Determine if we will be flushing the entire device. + */ + if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { + start = 0; + end = LLONG_MAX; + } else { + start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; + if (cmd->data_length) + end = start + cmd->data_length; + else + end = LLONG_MAX; + } + + ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); + if (ret != 0) + printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); + + if (!immed) + transport_complete_sync_cache(cmd, ret == 0); +} + +/* + * Tell TCM Core that we are capable of WriteCache emulation for + * an underlying struct se_device. + */ +static int fd_emulated_write_cache(struct se_device *dev) +{ + return 1; +} + +static int fd_emulated_dpo(struct se_device *dev) +{ + return 0; +} +/* + * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs + * for TYPE_DISK. + */ +static int fd_emulated_fua_write(struct se_device *dev) +{ + return 1; +} + +static int fd_emulated_fua_read(struct se_device *dev) +{ + return 0; +} + +/* + * WRITE Force Unit Access (FUA) emulation on a per struct se_task + * LBA range basis.. + */ +static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) +{ + struct se_device *dev = cmd->se_dev; + struct fd_dev *fd_dev = dev->dev_ptr; + loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; + loff_t end = start + task->task_size; + int ret; + + DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", + task->task_lba, task->task_size); + + ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); + if (ret != 0) + printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); +} + +static int fd_do_task(struct se_task *task) +{ + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_dev; + int ret = 0; + + /* + * Call vectorized fileio functions to map struct scatterlist + * physical memory addresses to struct iovec virtual memory. + */ + if (task->task_data_direction == DMA_FROM_DEVICE) { + ret = fd_do_readv(task); + } else { + ret = fd_do_writev(task); + + if (ret > 0 && + DEV_ATTRIB(dev)->emulate_write_cache > 0 && + DEV_ATTRIB(dev)->emulate_fua_write > 0 && + T_TASK(cmd)->t_tasks_fua) { + /* + * We might need to be a bit smarter here + * and return some sense data to let the initiator + * know the FUA WRITE cache sync failed..? + */ + fd_emulate_write_fua(cmd, task); + } + + } + + if (ret < 0) + return ret; + if (ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } + return PYX_TRANSPORT_SENT_TO_TRANSPORT; +} + +/* fd_free_task(): (Part of se_subsystem_api_t template) + * + * + */ +static void fd_free_task(struct se_task *task) +{ + struct fd_request *req = FILE_REQ(task); + + kfree(req); +} + +enum { + Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err +}; + +static match_table_t tokens = { + {Opt_fd_dev_name, "fd_dev_name=%s"}, + {Opt_fd_dev_size, "fd_dev_size=%s"}, + {Opt_fd_buffered_io, "fd_buffered_id=%d"}, + {Opt_err, NULL} +}; + +static ssize_t fd_set_configfs_dev_params( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + const char *page, ssize_t count) +{ + struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; + char *orig, *ptr, *arg_p, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, arg, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_fd_dev_name: + snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, + "%s", match_strdup(&args[0])); + printk(KERN_INFO "FILEIO: Referencing Path: %s\n", + fd_dev->fd_dev_name); + fd_dev->fbd_flags |= FBDF_HAS_PATH; + break; + case Opt_fd_dev_size: + arg_p = match_strdup(&args[0]); + ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); + if (ret < 0) { + printk(KERN_ERR "strict_strtoull() failed for" + " fd_dev_size=\n"); + goto out; + } + printk(KERN_INFO "FILEIO: Referencing Size: %llu" + " bytes\n", fd_dev->fd_dev_size); + fd_dev->fbd_flags |= FBDF_HAS_SIZE; + break; + case Opt_fd_buffered_io: + match_int(args, &arg); + if (arg != 1) { + printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); + ret = -EINVAL; + goto out; + } + + printk(KERN_INFO "FILEIO: Using buffered I/O" + " operations for struct fd_dev\n"); + + fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; + break; + default: + break; + } + } + +out: + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) +{ + struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; + + if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { + printk(KERN_ERR "Missing fd_dev_name=\n"); + return -1; + } + + return 0; +} + +static ssize_t fd_show_configfs_dev_params( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + char *b) +{ + struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; + ssize_t bl = 0; + + bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); + bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", + fd_dev->fd_dev_name, fd_dev->fd_dev_size, + (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? + "Buffered" : "Synchronous"); + return bl; +} + +/* fd_get_cdb(): (Part of se_subsystem_api_t template) + * + * + */ +static unsigned char *fd_get_cdb(struct se_task *task) +{ + struct fd_request *req = FILE_REQ(task); + + return req->fd_scsi_cdb; +} + +/* fd_get_device_rev(): (Part of se_subsystem_api_t template) + * + * + */ +static u32 fd_get_device_rev(struct se_device *dev) +{ + return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ +} + +/* fd_get_device_type(): (Part of se_subsystem_api_t template) + * + * + */ +static u32 fd_get_device_type(struct se_device *dev) +{ + return TYPE_DISK; +} + +static sector_t fd_get_blocks(struct se_device *dev) +{ + struct fd_dev *fd_dev = dev->dev_ptr; + unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, + DEV_ATTRIB(dev)->block_size); + + return blocks_long; +} + +static struct se_subsystem_api fileio_template = { + .name = "fileio", + .owner = THIS_MODULE, + .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .attach_hba = fd_attach_hba, + .detach_hba = fd_detach_hba, + .allocate_virtdevice = fd_allocate_virtdevice, + .create_virtdevice = fd_create_virtdevice, + .free_device = fd_free_device, + .dpo_emulated = fd_emulated_dpo, + .fua_write_emulated = fd_emulated_fua_write, + .fua_read_emulated = fd_emulated_fua_read, + .write_cache_emulated = fd_emulated_write_cache, + .alloc_task = fd_alloc_task, + .do_task = fd_do_task, + .do_sync_cache = fd_emulate_sync_cache, + .free_task = fd_free_task, + .check_configfs_dev_params = fd_check_configfs_dev_params, + .set_configfs_dev_params = fd_set_configfs_dev_params, + .show_configfs_dev_params = fd_show_configfs_dev_params, + .get_cdb = fd_get_cdb, + .get_device_rev = fd_get_device_rev, + .get_device_type = fd_get_device_type, + .get_blocks = fd_get_blocks, +}; + +static int __init fileio_module_init(void) +{ + return transport_subsystem_register(&fileio_template); +} + +static void fileio_module_exit(void) +{ + transport_subsystem_release(&fileio_template); +} + +MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(fileio_module_init); +module_exit(fileio_module_exit); diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h new file mode 100644 index 000000000000..ef4de2b4bd46 --- /dev/null +++ b/drivers/target/target_core_file.h @@ -0,0 +1,50 @@ +#ifndef TARGET_CORE_FILE_H +#define TARGET_CORE_FILE_H + +#define FD_VERSION "4.0" + +#define FD_MAX_DEV_NAME 256 +/* Maximum queuedepth for the FILEIO HBA */ +#define FD_HBA_QUEUE_DEPTH 256 +#define FD_DEVICE_QUEUE_DEPTH 32 +#define FD_MAX_DEVICE_QUEUE_DEPTH 128 +#define FD_BLOCKSIZE 512 +#define FD_MAX_SECTORS 1024 + +#define RRF_EMULATE_CDB 0x01 +#define RRF_GOT_LBA 0x02 + +struct fd_request { + struct se_task fd_task; + /* SCSI CDB from iSCSI Command PDU */ + unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; + /* FILEIO device */ + struct fd_dev *fd_dev; +} ____cacheline_aligned; + +#define FBDF_HAS_PATH 0x01 +#define FBDF_HAS_SIZE 0x02 +#define FDBD_USE_BUFFERED_IO 0x04 + +struct fd_dev { + u32 fbd_flags; + unsigned char fd_dev_name[FD_MAX_DEV_NAME]; + /* Unique Ramdisk Device ID in Ramdisk HBA */ + u32 fd_dev_id; + /* Number of SG tables in sg_table_array */ + u32 fd_table_count; + u32 fd_queue_depth; + u32 fd_block_size; + unsigned long long fd_dev_size; + struct file *fd_file; + /* FILEIO HBA device is connected to */ + struct fd_host *fd_host; +} ____cacheline_aligned; + +struct fd_host { + u32 fd_host_dev_id_count; + /* Unique FILEIO Host ID */ + u32 fd_host_id; +} ____cacheline_aligned; + +#endif /* TARGET_CORE_FILE_H */ diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c new file mode 100644 index 000000000000..4bbe8208b241 --- /dev/null +++ b/drivers/target/target_core_hba.c @@ -0,0 +1,185 @@ +/******************************************************************************* + * Filename: target_core_hba.c + * + * This file copntains the iSCSI HBA Transport related functions. + * + * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "target_core_hba.h" + +static LIST_HEAD(subsystem_list); +static DEFINE_MUTEX(subsystem_mutex); + +int transport_subsystem_register(struct se_subsystem_api *sub_api) +{ + struct se_subsystem_api *s; + + INIT_LIST_HEAD(&sub_api->sub_api_list); + + mutex_lock(&subsystem_mutex); + list_for_each_entry(s, &subsystem_list, sub_api_list) { + if (!(strcmp(s->name, sub_api->name))) { + printk(KERN_ERR "%p is already registered with" + " duplicate name %s, unable to process" + " request\n", s, s->name); + mutex_unlock(&subsystem_mutex); + return -EEXIST; + } + } + list_add_tail(&sub_api->sub_api_list, &subsystem_list); + mutex_unlock(&subsystem_mutex); + + printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" + " %p\n", sub_api->name, sub_api->owner); + return 0; +} +EXPORT_SYMBOL(transport_subsystem_register); + +void transport_subsystem_release(struct se_subsystem_api *sub_api) +{ + mutex_lock(&subsystem_mutex); + list_del(&sub_api->sub_api_list); + mutex_unlock(&subsystem_mutex); +} +EXPORT_SYMBOL(transport_subsystem_release); + +static struct se_subsystem_api *core_get_backend(const char *sub_name) +{ + struct se_subsystem_api *s; + + mutex_lock(&subsystem_mutex); + list_for_each_entry(s, &subsystem_list, sub_api_list) { + if (!strcmp(s->name, sub_name)) + goto found; + } + mutex_unlock(&subsystem_mutex); + return NULL; +found: + if (s->owner && !try_module_get(s->owner)) + s = NULL; + mutex_unlock(&subsystem_mutex); + return s; +} + +struct se_hba * +core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) +{ + struct se_hba *hba; + int ret = 0; + + hba = kzalloc(sizeof(*hba), GFP_KERNEL); + if (!hba) { + printk(KERN_ERR "Unable to allocate struct se_hba\n"); + return ERR_PTR(-ENOMEM); + } + + INIT_LIST_HEAD(&hba->hba_dev_list); + spin_lock_init(&hba->device_lock); + spin_lock_init(&hba->hba_queue_lock); + mutex_init(&hba->hba_access_mutex); + + hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); + hba->hba_flags |= hba_flags; + + atomic_set(&hba->max_queue_depth, 0); + atomic_set(&hba->left_queue_depth, 0); + + hba->transport = core_get_backend(plugin_name); + if (!hba->transport) { + ret = -EINVAL; + goto out_free_hba; + } + + ret = hba->transport->attach_hba(hba, plugin_dep_id); + if (ret < 0) + goto out_module_put; + + spin_lock(&se_global->hba_lock); + hba->hba_id = se_global->g_hba_id_counter++; + list_add_tail(&hba->hba_list, &se_global->g_hba_list); + spin_unlock(&se_global->hba_lock); + + printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" + " Core\n", hba->hba_id); + + return hba; + +out_module_put: + if (hba->transport->owner) + module_put(hba->transport->owner); + hba->transport = NULL; +out_free_hba: + kfree(hba); + return ERR_PTR(ret); +} + +int +core_delete_hba(struct se_hba *hba) +{ + struct se_device *dev, *dev_tmp; + + spin_lock(&hba->device_lock); + list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) { + + se_clear_dev_ports(dev); + spin_unlock(&hba->device_lock); + + se_release_device_for_hba(dev); + + spin_lock(&hba->device_lock); + } + spin_unlock(&hba->device_lock); + + hba->transport->detach_hba(hba); + + spin_lock(&se_global->hba_lock); + list_del(&hba->hba_list); + spin_unlock(&se_global->hba_lock); + + printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" + " Core\n", hba->hba_id); + + if (hba->transport->owner) + module_put(hba->transport->owner); + + hba->transport = NULL; + kfree(hba); + return 0; +} diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h new file mode 100644 index 000000000000..bb0fea5f730c --- /dev/null +++ b/drivers/target/target_core_hba.h @@ -0,0 +1,7 @@ +#ifndef TARGET_CORE_HBA_H +#define TARGET_CORE_HBA_H + +extern struct se_hba *core_alloc_hba(const char *, u32, u32); +extern int core_delete_hba(struct se_hba *); + +#endif /* TARGET_CORE_HBA_H */ diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c new file mode 100644 index 000000000000..c6e0d757e76e --- /dev/null +++ b/drivers/target/target_core_iblock.c @@ -0,0 +1,808 @@ +/******************************************************************************* + * Filename: target_core_iblock.c + * + * This file contains the Storage Engine <-> Linux BlockIO transport + * specific functions. + * + * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "target_core_iblock.h" + +#if 0 +#define DEBUG_IBLOCK(x...) printk(x) +#else +#define DEBUG_IBLOCK(x...) +#endif + +static struct se_subsystem_api iblock_template; + +static void iblock_bio_done(struct bio *, int); + +/* iblock_attach_hba(): (Part of se_subsystem_api_t template) + * + * + */ +static int iblock_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct iblock_hba *ib_host; + + ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); + if (!(ib_host)) { + printk(KERN_ERR "Unable to allocate memory for" + " struct iblock_hba\n"); + return -ENOMEM; + } + + ib_host->iblock_host_id = host_id; + + atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); + atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); + hba->hba_ptr = (void *) ib_host; + + printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" + " Generic Target Core Stack %s\n", hba->hba_id, + IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); + + printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic" + " Target Core TCQ Depth: %d\n", hba->hba_id, + ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth)); + + return 0; +} + +static void iblock_detach_hba(struct se_hba *hba) +{ + struct iblock_hba *ib_host = hba->hba_ptr; + + printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" + " Target Core\n", hba->hba_id, ib_host->iblock_host_id); + + kfree(ib_host); + hba->hba_ptr = NULL; +} + +static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) +{ + struct iblock_dev *ib_dev = NULL; + struct iblock_hba *ib_host = hba->hba_ptr; + + ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); + if (!(ib_dev)) { + printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); + return NULL; + } + ib_dev->ibd_host = ib_host; + + printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); + + return ib_dev; +} + +static struct se_device *iblock_create_virtdevice( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + void *p) +{ + struct iblock_dev *ib_dev = p; + struct se_device *dev; + struct se_dev_limits dev_limits; + struct block_device *bd = NULL; + struct request_queue *q; + struct queue_limits *limits; + u32 dev_flags = 0; + + if (!(ib_dev)) { + printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); + return 0; + } + memset(&dev_limits, 0, sizeof(struct se_dev_limits)); + /* + * These settings need to be made tunable.. + */ + ib_dev->ibd_bio_set = bioset_create(32, 64); + if (!(ib_dev->ibd_bio_set)) { + printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); + return 0; + } + printk(KERN_INFO "IBLOCK: Created bio_set()\n"); + /* + * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path + * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. + */ + printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", + ib_dev->ibd_udev_path); + + bd = blkdev_get_by_path(ib_dev->ibd_udev_path, + FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); + if (!(bd)) + goto failed; + /* + * Setup the local scope queue_limits from struct request_queue->limits + * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. + */ + q = bdev_get_queue(bd); + limits = &dev_limits.limits; + limits->logical_block_size = bdev_logical_block_size(bd); + limits->max_hw_sectors = queue_max_hw_sectors(q); + limits->max_sectors = queue_max_sectors(q); + dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH; + dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH; + + ib_dev->ibd_major = MAJOR(bd->bd_dev); + ib_dev->ibd_minor = MINOR(bd->bd_dev); + ib_dev->ibd_bd = bd; + + dev = transport_add_device_to_core_hba(hba, + &iblock_template, se_dev, dev_flags, (void *)ib_dev, + &dev_limits, "IBLOCK", IBLOCK_VERSION); + if (!(dev)) + goto failed; + + ib_dev->ibd_depth = dev->queue_depth; + + /* + * Check if the underlying struct block_device request_queue supports + * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM + * in ATA and we need to set TPE=1 + */ + if (blk_queue_discard(bdev_get_queue(bd))) { + struct request_queue *q = bdev_get_queue(bd); + + DEV_ATTRIB(dev)->max_unmap_lba_count = + q->limits.max_discard_sectors; + /* + * Currently hardcoded to 1 in Linux/SCSI code.. + */ + DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1; + DEV_ATTRIB(dev)->unmap_granularity = + q->limits.discard_granularity; + DEV_ATTRIB(dev)->unmap_granularity_alignment = + q->limits.discard_alignment; + + printk(KERN_INFO "IBLOCK: BLOCK Discard support available," + " disabled by default\n"); + } + + return dev; + +failed: + if (ib_dev->ibd_bio_set) { + bioset_free(ib_dev->ibd_bio_set); + ib_dev->ibd_bio_set = NULL; + } + ib_dev->ibd_bd = NULL; + ib_dev->ibd_major = 0; + ib_dev->ibd_minor = 0; + return NULL; +} + +static void iblock_free_device(void *p) +{ + struct iblock_dev *ib_dev = p; + + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + bioset_free(ib_dev->ibd_bio_set); + kfree(ib_dev); +} + +static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) +{ + return container_of(task, struct iblock_req, ib_task); +} + +static struct se_task * +iblock_alloc_task(struct se_cmd *cmd) +{ + struct iblock_req *ib_req; + + ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!(ib_req)) { + printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); + return NULL; + } + + ib_req->ib_dev = SE_DEV(cmd)->dev_ptr; + atomic_set(&ib_req->ib_bio_cnt, 0); + return &ib_req->ib_task; +} + +static unsigned long long iblock_emulate_read_cap_with_block_size( + struct se_device *dev, + struct block_device *bd, + struct request_queue *q) +{ + unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), + bdev_logical_block_size(bd)) - 1); + u32 block_size = bdev_logical_block_size(bd); + + if (block_size == DEV_ATTRIB(dev)->block_size) + return blocks_long; + + switch (block_size) { + case 4096: + switch (DEV_ATTRIB(dev)->block_size) { + case 2048: + blocks_long <<= 1; + break; + case 1024: + blocks_long <<= 2; + break; + case 512: + blocks_long <<= 3; + default: + break; + } + break; + case 2048: + switch (DEV_ATTRIB(dev)->block_size) { + case 4096: + blocks_long >>= 1; + break; + case 1024: + blocks_long <<= 1; + break; + case 512: + blocks_long <<= 2; + break; + default: + break; + } + break; + case 1024: + switch (DEV_ATTRIB(dev)->block_size) { + case 4096: + blocks_long >>= 2; + break; + case 2048: + blocks_long >>= 1; + break; + case 512: + blocks_long <<= 1; + break; + default: + break; + } + break; + case 512: + switch (DEV_ATTRIB(dev)->block_size) { + case 4096: + blocks_long >>= 3; + break; + case 2048: + blocks_long >>= 2; + break; + case 1024: + blocks_long >>= 1; + break; + default: + break; + } + break; + default: + break; + } + + return blocks_long; +} + +/* + * Emulate SYCHRONIZE_CACHE_* + */ +static void iblock_emulate_sync_cache(struct se_task *task) +{ + struct se_cmd *cmd = TASK_CMD(task); + struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; + int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2); + sector_t error_sector; + int ret; + + /* + * If the Immediate bit is set, queue up the GOOD response + * for this SYNCHRONIZE_CACHE op + */ + if (immed) + transport_complete_sync_cache(cmd, 1); + + /* + * blkdev_issue_flush() does not support a specifying a range, so + * we have to flush the entire cache. + */ + ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); + if (ret != 0) { + printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " + " error_sector: %llu\n", ret, + (unsigned long long)error_sector); + } + + if (!immed) + transport_complete_sync_cache(cmd, ret == 0); +} + +/* + * Tell TCM Core that we are capable of WriteCache emulation for + * an underlying struct se_device. + */ +static int iblock_emulated_write_cache(struct se_device *dev) +{ + return 1; +} + +static int iblock_emulated_dpo(struct se_device *dev) +{ + return 0; +} + +/* + * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs + * for TYPE_DISK. + */ +static int iblock_emulated_fua_write(struct se_device *dev) +{ + return 1; +} + +static int iblock_emulated_fua_read(struct se_device *dev) +{ + return 0; +} + +static int iblock_do_task(struct se_task *task) +{ + struct se_device *dev = task->task_se_cmd->se_dev; + struct iblock_req *req = IBLOCK_REQ(task); + struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev; + struct request_queue *q = bdev_get_queue(ibd->ibd_bd); + struct bio *bio = req->ib_bio, *nbio = NULL; + int rw; + + if (task->task_data_direction == DMA_TO_DEVICE) { + /* + * Force data to disk if we pretend to not have a volatile + * write cache, or the initiator set the Force Unit Access bit. + */ + if (DEV_ATTRIB(dev)->emulate_write_cache == 0 || + (DEV_ATTRIB(dev)->emulate_fua_write > 0 && + T_TASK(task->task_se_cmd)->t_tasks_fua)) + rw = WRITE_FUA; + else + rw = WRITE; + } else { + rw = READ; + } + + while (bio) { + nbio = bio->bi_next; + bio->bi_next = NULL; + DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" + " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); + + submit_bio(rw, bio); + bio = nbio; + } + + if (q->unplug_fn) + q->unplug_fn(q); + return PYX_TRANSPORT_SENT_TO_TRANSPORT; +} + +static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) +{ + struct iblock_dev *ibd = dev->dev_ptr; + struct block_device *bd = ibd->ibd_bd; + int barrier = 0; + + return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); +} + +static void iblock_free_task(struct se_task *task) +{ + struct iblock_req *req = IBLOCK_REQ(task); + struct bio *bio, *hbio = req->ib_bio; + /* + * We only release the bio(s) here if iblock_bio_done() has not called + * bio_put() -> iblock_bio_destructor(). + */ + while (hbio != NULL) { + bio = hbio; + hbio = hbio->bi_next; + bio->bi_next = NULL; + bio_put(bio); + } + + kfree(req); +} + +enum { + Opt_udev_path, Opt_force, Opt_err +}; + +static match_table_t tokens = { + {Opt_udev_path, "udev_path=%s"}, + {Opt_force, "force=%d"}, + {Opt_err, NULL} +}; + +static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, + struct se_subsystem_dev *se_dev, + const char *page, ssize_t count) +{ + struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; + char *orig, *ptr, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, arg, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_udev_path: + if (ib_dev->ibd_bd) { + printk(KERN_ERR "Unable to set udev_path= while" + " ib_dev->ibd_bd exists\n"); + ret = -EEXIST; + goto out; + } + + ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, + "%s", match_strdup(&args[0])); + printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", + ib_dev->ibd_udev_path); + ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; + break; + case Opt_force: + match_int(args, &arg); + ib_dev->ibd_force = arg; + printk(KERN_INFO "IBLOCK: Set force=%d\n", + ib_dev->ibd_force); + break; + default: + break; + } + } + +out: + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t iblock_check_configfs_dev_params( + struct se_hba *hba, + struct se_subsystem_dev *se_dev) +{ + struct iblock_dev *ibd = se_dev->se_dev_su_ptr; + + if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { + printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); + return -1; + } + + return 0; +} + +static ssize_t iblock_show_configfs_dev_params( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + char *b) +{ + struct iblock_dev *ibd = se_dev->se_dev_su_ptr; + struct block_device *bd = ibd->ibd_bd; + char buf[BDEVNAME_SIZE]; + ssize_t bl = 0; + + if (bd) + bl += sprintf(b + bl, "iBlock device: %s", + bdevname(bd, buf)); + if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { + bl += sprintf(b + bl, " UDEV PATH: %s\n", + ibd->ibd_udev_path); + } else + bl += sprintf(b + bl, "\n"); + + bl += sprintf(b + bl, " "); + if (bd) { + bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", + ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ? + "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? + "CLAIMED: IBLOCK" : "CLAIMED: OS"); + } else { + bl += sprintf(b + bl, "Major: %d Minor: %d\n", + ibd->ibd_major, ibd->ibd_minor); + } + + return bl; +} + +static void iblock_bio_destructor(struct bio *bio) +{ + struct se_task *task = bio->bi_private; + struct iblock_dev *ib_dev = task->se_dev->dev_ptr; + + bio_free(bio, ib_dev->ibd_bio_set); +} + +static struct bio *iblock_get_bio( + struct se_task *task, + struct iblock_req *ib_req, + struct iblock_dev *ib_dev, + int *ret, + sector_t lba, + u32 sg_num) +{ + struct bio *bio; + + bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); + if (!(bio)) { + printk(KERN_ERR "Unable to allocate memory for bio\n"); + *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + return NULL; + } + + DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" + " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); + DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); + + bio->bi_bdev = ib_dev->ibd_bd; + bio->bi_private = (void *) task; + bio->bi_destructor = iblock_bio_destructor; + bio->bi_end_io = &iblock_bio_done; + bio->bi_sector = lba; + atomic_inc(&ib_req->ib_bio_cnt); + + DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); + DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", + atomic_read(&ib_req->ib_bio_cnt)); + return bio; +} + +static int iblock_map_task_SG(struct se_task *task) +{ + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = SE_DEV(cmd); + struct iblock_dev *ib_dev = task->se_dev->dev_ptr; + struct iblock_req *ib_req = IBLOCK_REQ(task); + struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; + struct scatterlist *sg; + int ret = 0; + u32 i, sg_num = task->task_sg_num; + sector_t block_lba; + /* + * Do starting conversion up from non 512-byte blocksize with + * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. + */ + if (DEV_ATTRIB(dev)->block_size == 4096) + block_lba = (task->task_lba << 3); + else if (DEV_ATTRIB(dev)->block_size == 2048) + block_lba = (task->task_lba << 2); + else if (DEV_ATTRIB(dev)->block_size == 1024) + block_lba = (task->task_lba << 1); + else if (DEV_ATTRIB(dev)->block_size == 512) + block_lba = task->task_lba; + else { + printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" + " %u\n", DEV_ATTRIB(dev)->block_size); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + + bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); + if (!(bio)) + return ret; + + ib_req->ib_bio = bio; + hbio = tbio = bio; + /* + * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist + * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. + */ + for_each_sg(task->task_sg, sg, task->task_sg_num, i) { + DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" + " %p len: %u offset: %u\n", task, bio, sg_page(sg), + sg->length, sg->offset); +again: + ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); + if (ret != sg->length) { + + DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", + bio->bi_sector); + DEBUG_IBLOCK("** task->task_size: %u\n", + task->task_size); + DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", + bio->bi_max_vecs); + DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", + bio->bi_vcnt); + + bio = iblock_get_bio(task, ib_req, ib_dev, &ret, + block_lba, sg_num); + if (!(bio)) + goto fail; + + tbio = tbio->bi_next = bio; + DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" + " list, Going to again\n", bio); + goto again; + } + /* Always in 512 byte units for Linux/Block */ + block_lba += sg->length >> IBLOCK_LBA_SHIFT; + sg_num--; + DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" + " sg_num to %u\n", task, sg_num); + DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" + " to %llu\n", task, block_lba); + DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" + " %u\n", task, bio->bi_vcnt); + } + + return 0; +fail: + while (hbio) { + bio = hbio; + hbio = hbio->bi_next; + bio->bi_next = NULL; + bio_put(bio); + } + return ret; +} + +static unsigned char *iblock_get_cdb(struct se_task *task) +{ + return IBLOCK_REQ(task)->ib_scsi_cdb; +} + +static u32 iblock_get_device_rev(struct se_device *dev) +{ + return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ +} + +static u32 iblock_get_device_type(struct se_device *dev) +{ + return TYPE_DISK; +} + +static sector_t iblock_get_blocks(struct se_device *dev) +{ + struct iblock_dev *ibd = dev->dev_ptr; + struct block_device *bd = ibd->ibd_bd; + struct request_queue *q = bdev_get_queue(bd); + + return iblock_emulate_read_cap_with_block_size(dev, bd, q); +} + +static void iblock_bio_done(struct bio *bio, int err) +{ + struct se_task *task = bio->bi_private; + struct iblock_req *ibr = IBLOCK_REQ(task); + /* + * Set -EIO if !BIO_UPTODATE and the passed is still err=0 + */ + if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) + err = -EIO; + + if (err != 0) { + printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," + " err: %d\n", bio, err); + /* + * Bump the ib_bio_err_cnt and release bio. + */ + atomic_inc(&ibr->ib_bio_err_cnt); + smp_mb__after_atomic_inc(); + bio_put(bio); + /* + * Wait to complete the task until the last bio as completed. + */ + if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) + return; + + ibr->ib_bio = NULL; + transport_complete_task(task, 0); + return; + } + DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", + task, bio, task->task_lba, bio->bi_sector, err); + /* + * bio_put() will call iblock_bio_destructor() to release the bio back + * to ibr->ib_bio_set. + */ + bio_put(bio); + /* + * Wait to complete the task until the last bio as completed. + */ + if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) + return; + /* + * Return GOOD status for task if zero ib_bio_err_cnt exists. + */ + ibr->ib_bio = NULL; + transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); +} + +static struct se_subsystem_api iblock_template = { + .name = "iblock", + .owner = THIS_MODULE, + .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .map_task_SG = iblock_map_task_SG, + .attach_hba = iblock_attach_hba, + .detach_hba = iblock_detach_hba, + .allocate_virtdevice = iblock_allocate_virtdevice, + .create_virtdevice = iblock_create_virtdevice, + .free_device = iblock_free_device, + .dpo_emulated = iblock_emulated_dpo, + .fua_write_emulated = iblock_emulated_fua_write, + .fua_read_emulated = iblock_emulated_fua_read, + .write_cache_emulated = iblock_emulated_write_cache, + .alloc_task = iblock_alloc_task, + .do_task = iblock_do_task, + .do_discard = iblock_do_discard, + .do_sync_cache = iblock_emulate_sync_cache, + .free_task = iblock_free_task, + .check_configfs_dev_params = iblock_check_configfs_dev_params, + .set_configfs_dev_params = iblock_set_configfs_dev_params, + .show_configfs_dev_params = iblock_show_configfs_dev_params, + .get_cdb = iblock_get_cdb, + .get_device_rev = iblock_get_device_rev, + .get_device_type = iblock_get_device_type, + .get_blocks = iblock_get_blocks, +}; + +static int __init iblock_module_init(void) +{ + return transport_subsystem_register(&iblock_template); +} + +static void iblock_module_exit(void) +{ + transport_subsystem_release(&iblock_template); +} + +MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(iblock_module_init); +module_exit(iblock_module_exit); diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h new file mode 100644 index 000000000000..64c1f4d69f76 --- /dev/null +++ b/drivers/target/target_core_iblock.h @@ -0,0 +1,40 @@ +#ifndef TARGET_CORE_IBLOCK_H +#define TARGET_CORE_IBLOCK_H + +#define IBLOCK_VERSION "4.0" + +#define IBLOCK_HBA_QUEUE_DEPTH 512 +#define IBLOCK_DEVICE_QUEUE_DEPTH 32 +#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 +#define IBLOCK_MAX_CDBS 16 +#define IBLOCK_LBA_SHIFT 9 + +struct iblock_req { + struct se_task ib_task; + unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE]; + atomic_t ib_bio_cnt; + atomic_t ib_bio_err_cnt; + struct bio *ib_bio; + struct iblock_dev *ib_dev; +} ____cacheline_aligned; + +#define IBDF_HAS_UDEV_PATH 0x01 +#define IBDF_HAS_FORCE 0x02 + +struct iblock_dev { + unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; + int ibd_force; + int ibd_major; + int ibd_minor; + u32 ibd_depth; + u32 ibd_flags; + struct bio_set *ibd_bio_set; + struct block_device *ibd_bd; + struct iblock_hba *ibd_host; +} ____cacheline_aligned; + +struct iblock_hba { + int iblock_host_id; +} ____cacheline_aligned; + +#endif /* TARGET_CORE_IBLOCK_H */ diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c new file mode 100644 index 000000000000..d5a48aa0d2d1 --- /dev/null +++ b/drivers/target/target_core_mib.c @@ -0,0 +1,1078 @@ +/******************************************************************************* + * Filename: target_core_mib.c + * + * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "target_core_hba.h" +#include "target_core_mib.h" + +/* SCSI mib table index */ +static struct scsi_index_table scsi_index_table; + +#ifndef INITIAL_JIFFIES +#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) +#endif + +/* SCSI Instance Table */ +#define SCSI_INST_SW_INDEX 1 +#define SCSI_TRANSPORT_INDEX 1 + +#define NONE "None" +#define ISPRINT(a) ((a >= ' ') && (a <= '~')) + +static inline int list_is_first(const struct list_head *list, + const struct list_head *head) +{ + return list->prev == head; +} + +static void *locate_hba_start( + struct seq_file *seq, + loff_t *pos) +{ + spin_lock(&se_global->g_device_lock); + return seq_list_start(&se_global->g_se_dev_list, *pos); +} + +static void *locate_hba_next( + struct seq_file *seq, + void *v, + loff_t *pos) +{ + return seq_list_next(v, &se_global->g_se_dev_list, pos); +} + +static void locate_hba_stop(struct seq_file *seq, void *v) +{ + spin_unlock(&se_global->g_device_lock); +} + +/**************************************************************************** + * SCSI MIB Tables + ****************************************************************************/ + +/* + * SCSI Instance Table + */ +static void *scsi_inst_seq_start( + struct seq_file *seq, + loff_t *pos) +{ + spin_lock(&se_global->hba_lock); + return seq_list_start(&se_global->g_hba_list, *pos); +} + +static void *scsi_inst_seq_next( + struct seq_file *seq, + void *v, + loff_t *pos) +{ + return seq_list_next(v, &se_global->g_hba_list, pos); +} + +static void scsi_inst_seq_stop(struct seq_file *seq, void *v) +{ + spin_unlock(&se_global->hba_lock); +} + +static int scsi_inst_seq_show(struct seq_file *seq, void *v) +{ + struct se_hba *hba = list_entry(v, struct se_hba, hba_list); + + if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) + seq_puts(seq, "inst sw_indx\n"); + + seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); + seq_printf(seq, "plugin: %s version: %s\n", + hba->transport->name, TARGET_CORE_VERSION); + + return 0; +} + +static const struct seq_operations scsi_inst_seq_ops = { + .start = scsi_inst_seq_start, + .next = scsi_inst_seq_next, + .stop = scsi_inst_seq_stop, + .show = scsi_inst_seq_show +}; + +static int scsi_inst_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_inst_seq_ops); +} + +static const struct file_operations scsi_inst_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_inst_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Device Table + */ +static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) +{ + return locate_hba_start(seq, pos); +} + +static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return locate_hba_next(seq, v, pos); +} + +static void scsi_dev_seq_stop(struct seq_file *seq, void *v) +{ + locate_hba_stop(seq, v); +} + +static int scsi_dev_seq_show(struct seq_file *seq, void *v) +{ + struct se_hba *hba; + struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, + g_se_dev_list); + struct se_device *dev = se_dev->se_dev_ptr; + char str[28]; + int k; + + if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) + seq_puts(seq, "inst indx role ports\n"); + + if (!(dev)) + return 0; + + hba = dev->se_hba; + if (!(hba)) { + /* Log error ? */ + return 0; + } + + seq_printf(seq, "%u %u %s %u\n", hba->hba_index, + dev->dev_index, "Target", dev->dev_port_count); + + memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); + + /* vendor */ + for (k = 0; k < 8; k++) + str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? + DEV_T10_WWN(dev)->vendor[k] : 0x20; + str[k] = 0x20; + + /* model */ + for (k = 0; k < 16; k++) + str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? + DEV_T10_WWN(dev)->model[k] : 0x20; + str[k + 9] = 0; + + seq_printf(seq, "dev_alias: %s\n", str); + + return 0; +} + +static const struct seq_operations scsi_dev_seq_ops = { + .start = scsi_dev_seq_start, + .next = scsi_dev_seq_next, + .stop = scsi_dev_seq_stop, + .show = scsi_dev_seq_show +}; + +static int scsi_dev_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_dev_seq_ops); +} + +static const struct file_operations scsi_dev_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_dev_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Port Table + */ +static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) +{ + return locate_hba_start(seq, pos); +} + +static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return locate_hba_next(seq, v, pos); +} + +static void scsi_port_seq_stop(struct seq_file *seq, void *v) +{ + locate_hba_stop(seq, v); +} + +static int scsi_port_seq_show(struct seq_file *seq, void *v) +{ + struct se_hba *hba; + struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, + g_se_dev_list); + struct se_device *dev = se_dev->se_dev_ptr; + struct se_port *sep, *sep_tmp; + + if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) + seq_puts(seq, "inst device indx role busy_count\n"); + + if (!(dev)) + return 0; + + hba = dev->se_hba; + if (!(hba)) { + /* Log error ? */ + return 0; + } + + /* FIXME: scsiPortBusyStatuses count */ + spin_lock(&dev->se_port_lock); + list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { + seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, + dev->dev_index, sep->sep_index, "Device", + dev->dev_index, 0); + } + spin_unlock(&dev->se_port_lock); + + return 0; +} + +static const struct seq_operations scsi_port_seq_ops = { + .start = scsi_port_seq_start, + .next = scsi_port_seq_next, + .stop = scsi_port_seq_stop, + .show = scsi_port_seq_show +}; + +static int scsi_port_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_port_seq_ops); +} + +static const struct file_operations scsi_port_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_port_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Transport Table + */ +static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) +{ + return locate_hba_start(seq, pos); +} + +static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return locate_hba_next(seq, v, pos); +} + +static void scsi_transport_seq_stop(struct seq_file *seq, void *v) +{ + locate_hba_stop(seq, v); +} + +static int scsi_transport_seq_show(struct seq_file *seq, void *v) +{ + struct se_hba *hba; + struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, + g_se_dev_list); + struct se_device *dev = se_dev->se_dev_ptr; + struct se_port *se, *se_tmp; + struct se_portal_group *tpg; + struct t10_wwn *wwn; + char buf[64]; + + if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) + seq_puts(seq, "inst device indx dev_name\n"); + + if (!(dev)) + return 0; + + hba = dev->se_hba; + if (!(hba)) { + /* Log error ? */ + return 0; + } + + wwn = DEV_T10_WWN(dev); + + spin_lock(&dev->se_port_lock); + list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { + tpg = se->sep_tpg; + sprintf(buf, "scsiTransport%s", + TPG_TFO(tpg)->get_fabric_name()); + + seq_printf(seq, "%u %s %u %s+%s\n", + hba->hba_index, /* scsiTransportIndex */ + buf, /* scsiTransportType */ + (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? + TPG_TFO(tpg)->tpg_get_inst_index(tpg) : + 0, + TPG_TFO(tpg)->tpg_get_wwn(tpg), + (strlen(wwn->unit_serial)) ? + /* scsiTransportDevName */ + wwn->unit_serial : wwn->vendor); + } + spin_unlock(&dev->se_port_lock); + + return 0; +} + +static const struct seq_operations scsi_transport_seq_ops = { + .start = scsi_transport_seq_start, + .next = scsi_transport_seq_next, + .stop = scsi_transport_seq_stop, + .show = scsi_transport_seq_show +}; + +static int scsi_transport_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_transport_seq_ops); +} + +static const struct file_operations scsi_transport_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_transport_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Target Device Table + */ +static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) +{ + return locate_hba_start(seq, pos); +} + +static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return locate_hba_next(seq, v, pos); +} + +static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) +{ + locate_hba_stop(seq, v); +} + + +#define LU_COUNT 1 /* for now */ +static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) +{ + struct se_hba *hba; + struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, + g_se_dev_list); + struct se_device *dev = se_dev->se_dev_ptr; + int non_accessible_lus = 0; + char status[16]; + + if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) + seq_puts(seq, "inst indx num_LUs status non_access_LUs" + " resets\n"); + + if (!(dev)) + return 0; + + hba = dev->se_hba; + if (!(hba)) { + /* Log error ? */ + return 0; + } + + switch (dev->dev_status) { + case TRANSPORT_DEVICE_ACTIVATED: + strcpy(status, "activated"); + break; + case TRANSPORT_DEVICE_DEACTIVATED: + strcpy(status, "deactivated"); + non_accessible_lus = 1; + break; + case TRANSPORT_DEVICE_SHUTDOWN: + strcpy(status, "shutdown"); + non_accessible_lus = 1; + break; + case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: + case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: + strcpy(status, "offline"); + non_accessible_lus = 1; + break; + default: + sprintf(status, "unknown(%d)", dev->dev_status); + non_accessible_lus = 1; + } + + seq_printf(seq, "%u %u %u %s %u %u\n", + hba->hba_index, dev->dev_index, LU_COUNT, + status, non_accessible_lus, dev->num_resets); + + return 0; +} + +static const struct seq_operations scsi_tgt_dev_seq_ops = { + .start = scsi_tgt_dev_seq_start, + .next = scsi_tgt_dev_seq_next, + .stop = scsi_tgt_dev_seq_stop, + .show = scsi_tgt_dev_seq_show +}; + +static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_tgt_dev_seq_ops); +} + +static const struct file_operations scsi_tgt_dev_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_tgt_dev_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Target Port Table + */ +static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) +{ + return locate_hba_start(seq, pos); +} + +static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return locate_hba_next(seq, v, pos); +} + +static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) +{ + locate_hba_stop(seq, v); +} + +static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) +{ + struct se_hba *hba; + struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, + g_se_dev_list); + struct se_device *dev = se_dev->se_dev_ptr; + struct se_port *sep, *sep_tmp; + struct se_portal_group *tpg; + u32 rx_mbytes, tx_mbytes; + unsigned long long num_cmds; + char buf[64]; + + if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) + seq_puts(seq, "inst device indx name port_index in_cmds" + " write_mbytes read_mbytes hs_in_cmds\n"); + + if (!(dev)) + return 0; + + hba = dev->se_hba; + if (!(hba)) { + /* Log error ? */ + return 0; + } + + spin_lock(&dev->se_port_lock); + list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { + tpg = sep->sep_tpg; + sprintf(buf, "%sPort#", + TPG_TFO(tpg)->get_fabric_name()); + + seq_printf(seq, "%u %u %u %s%d %s%s%d ", + hba->hba_index, + dev->dev_index, + sep->sep_index, + buf, sep->sep_index, + TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", + TPG_TFO(tpg)->tpg_get_tag(tpg)); + + spin_lock(&sep->sep_lun->lun_sep_lock); + num_cmds = sep->sep_stats.cmd_pdus; + rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); + tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); + spin_unlock(&sep->sep_lun->lun_sep_lock); + + seq_printf(seq, "%llu %u %u %u\n", num_cmds, + rx_mbytes, tx_mbytes, 0); + } + spin_unlock(&dev->se_port_lock); + + return 0; +} + +static const struct seq_operations scsi_tgt_port_seq_ops = { + .start = scsi_tgt_port_seq_start, + .next = scsi_tgt_port_seq_next, + .stop = scsi_tgt_port_seq_stop, + .show = scsi_tgt_port_seq_show +}; + +static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_tgt_port_seq_ops); +} + +static const struct file_operations scsi_tgt_port_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_tgt_port_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Authorized Initiator Table: + * It contains the SCSI Initiators authorized to be attached to one of the + * local Target ports. + * Iterates through all active TPGs and extracts the info from the ACLs + */ +static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) +{ + spin_lock_bh(&se_global->se_tpg_lock); + return seq_list_start(&se_global->g_se_tpg_list, *pos); +} + +static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, + loff_t *pos) +{ + return seq_list_next(v, &se_global->g_se_tpg_list, pos); +} + +static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) +{ + spin_unlock_bh(&se_global->se_tpg_lock); +} + +static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) +{ + struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, + se_tpg_list); + struct se_dev_entry *deve; + struct se_lun *lun; + struct se_node_acl *se_nacl; + int j; + + if (list_is_first(&se_tpg->se_tpg_list, + &se_global->g_se_tpg_list)) + seq_puts(seq, "inst dev port indx dev_or_port intr_name " + "map_indx att_count num_cmds read_mbytes " + "write_mbytes hs_num_cmds creation_time row_status\n"); + + if (!(se_tpg)) + return 0; + + spin_lock(&se_tpg->acl_node_lock); + list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { + + atomic_inc(&se_nacl->mib_ref_count); + smp_mb__after_atomic_inc(); + spin_unlock(&se_tpg->acl_node_lock); + + spin_lock_irq(&se_nacl->device_list_lock); + for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { + deve = &se_nacl->device_list[j]; + if (!(deve->lun_flags & + TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || + (!deve->se_lun)) + continue; + lun = deve->se_lun; + if (!lun->lun_se_dev) + continue; + + seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" + " %u %s\n", + /* scsiInstIndex */ + (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? + TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : + 0, + /* scsiDeviceIndex */ + lun->lun_se_dev->dev_index, + /* scsiAuthIntrTgtPortIndex */ + TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), + /* scsiAuthIntrIndex */ + se_nacl->acl_index, + /* scsiAuthIntrDevOrPort */ + 1, + /* scsiAuthIntrName */ + se_nacl->initiatorname[0] ? + se_nacl->initiatorname : NONE, + /* FIXME: scsiAuthIntrLunMapIndex */ + 0, + /* scsiAuthIntrAttachedTimes */ + deve->attach_count, + /* scsiAuthIntrOutCommands */ + deve->total_cmds, + /* scsiAuthIntrReadMegaBytes */ + (u32)(deve->read_bytes >> 20), + /* scsiAuthIntrWrittenMegaBytes */ + (u32)(deve->write_bytes >> 20), + /* FIXME: scsiAuthIntrHSOutCommands */ + 0, + /* scsiAuthIntrLastCreation */ + (u32)(((u32)deve->creation_time - + INITIAL_JIFFIES) * 100 / HZ), + /* FIXME: scsiAuthIntrRowStatus */ + "Ready"); + } + spin_unlock_irq(&se_nacl->device_list_lock); + + spin_lock(&se_tpg->acl_node_lock); + atomic_dec(&se_nacl->mib_ref_count); + smp_mb__after_atomic_dec(); + } + spin_unlock(&se_tpg->acl_node_lock); + + return 0; +} + +static const struct seq_operations scsi_auth_intr_seq_ops = { + .start = scsi_auth_intr_seq_start, + .next = scsi_auth_intr_seq_next, + .stop = scsi_auth_intr_seq_stop, + .show = scsi_auth_intr_seq_show +}; + +static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_auth_intr_seq_ops); +} + +static const struct file_operations scsi_auth_intr_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_auth_intr_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Attached Initiator Port Table: + * It lists the SCSI Initiators attached to one of the local Target ports. + * Iterates through all active TPGs and use active sessions from each TPG + * to list the info fo this table. + */ +static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) +{ + spin_lock_bh(&se_global->se_tpg_lock); + return seq_list_start(&se_global->g_se_tpg_list, *pos); +} + +static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, + loff_t *pos) +{ + return seq_list_next(v, &se_global->g_se_tpg_list, pos); +} + +static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) +{ + spin_unlock_bh(&se_global->se_tpg_lock); +} + +static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) +{ + struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, + se_tpg_list); + struct se_dev_entry *deve; + struct se_lun *lun; + struct se_node_acl *se_nacl; + struct se_session *se_sess; + unsigned char buf[64]; + int j; + + if (list_is_first(&se_tpg->se_tpg_list, + &se_global->g_se_tpg_list)) + seq_puts(seq, "inst dev port indx port_auth_indx port_name" + " port_ident\n"); + + if (!(se_tpg)) + return 0; + + spin_lock(&se_tpg->session_lock); + list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { + if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || + (!se_sess->se_node_acl) || + (!se_sess->se_node_acl->device_list)) + continue; + + atomic_inc(&se_sess->mib_ref_count); + smp_mb__after_atomic_inc(); + se_nacl = se_sess->se_node_acl; + atomic_inc(&se_nacl->mib_ref_count); + smp_mb__after_atomic_inc(); + spin_unlock(&se_tpg->session_lock); + + spin_lock_irq(&se_nacl->device_list_lock); + for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { + deve = &se_nacl->device_list[j]; + if (!(deve->lun_flags & + TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || + (!deve->se_lun)) + continue; + + lun = deve->se_lun; + if (!lun->lun_se_dev) + continue; + + memset(buf, 0, 64); + if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) + TPG_TFO(se_tpg)->sess_get_initiator_sid( + se_sess, (unsigned char *)&buf[0], 64); + + seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", + /* scsiInstIndex */ + (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? + TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : + 0, + /* scsiDeviceIndex */ + lun->lun_se_dev->dev_index, + /* scsiPortIndex */ + TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), + /* scsiAttIntrPortIndex */ + (TPG_TFO(se_tpg)->sess_get_index != NULL) ? + TPG_TFO(se_tpg)->sess_get_index(se_sess) : + 0, + /* scsiAttIntrPortAuthIntrIdx */ + se_nacl->acl_index, + /* scsiAttIntrPortName */ + se_nacl->initiatorname[0] ? + se_nacl->initiatorname : NONE, + /* scsiAttIntrPortIdentifier */ + buf); + } + spin_unlock_irq(&se_nacl->device_list_lock); + + spin_lock(&se_tpg->session_lock); + atomic_dec(&se_nacl->mib_ref_count); + smp_mb__after_atomic_dec(); + atomic_dec(&se_sess->mib_ref_count); + smp_mb__after_atomic_dec(); + } + spin_unlock(&se_tpg->session_lock); + + return 0; +} + +static const struct seq_operations scsi_att_intr_port_seq_ops = { + .start = scsi_att_intr_port_seq_start, + .next = scsi_att_intr_port_seq_next, + .stop = scsi_att_intr_port_seq_stop, + .show = scsi_att_intr_port_seq_show +}; + +static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_att_intr_port_seq_ops); +} + +static const struct file_operations scsi_att_intr_port_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_att_intr_port_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * SCSI Logical Unit Table + */ +static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) +{ + return locate_hba_start(seq, pos); +} + +static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return locate_hba_next(seq, v, pos); +} + +static void scsi_lu_seq_stop(struct seq_file *seq, void *v) +{ + locate_hba_stop(seq, v); +} + +#define SCSI_LU_INDEX 1 +static int scsi_lu_seq_show(struct seq_file *seq, void *v) +{ + struct se_hba *hba; + struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, + g_se_dev_list); + struct se_device *dev = se_dev->se_dev_ptr; + int j; + char str[28]; + + if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) + seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" + " dev_type status state-bit num_cmds read_mbytes" + " write_mbytes resets full_stat hs_num_cmds creation_time\n"); + + if (!(dev)) + return 0; + + hba = dev->se_hba; + if (!(hba)) { + /* Log error ? */ + return 0; + } + + /* Fix LU state, if we can read it from the device */ + seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, + dev->dev_index, SCSI_LU_INDEX, + (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ + (strlen(DEV_T10_WWN(dev)->unit_serial)) ? + /* scsiLuWwnName */ + (char *)&DEV_T10_WWN(dev)->unit_serial[0] : + "None"); + + memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); + /* scsiLuVendorId */ + for (j = 0; j < 8; j++) + str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? + DEV_T10_WWN(dev)->vendor[j] : 0x20; + str[8] = 0; + seq_printf(seq, " %s", str); + + /* scsiLuProductId */ + for (j = 0; j < 16; j++) + str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? + DEV_T10_WWN(dev)->model[j] : 0x20; + str[16] = 0; + seq_printf(seq, " %s", str); + + /* scsiLuRevisionId */ + for (j = 0; j < 4; j++) + str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? + DEV_T10_WWN(dev)->revision[j] : 0x20; + str[4] = 0; + seq_printf(seq, " %s", str); + + seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", + /* scsiLuPeripheralType */ + TRANSPORT(dev)->get_device_type(dev), + (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? + "available" : "notavailable", /* scsiLuStatus */ + "exposed", /* scsiLuState */ + (unsigned long long)dev->num_cmds, + /* scsiLuReadMegaBytes */ + (u32)(dev->read_bytes >> 20), + /* scsiLuWrittenMegaBytes */ + (u32)(dev->write_bytes >> 20), + dev->num_resets, /* scsiLuInResets */ + 0, /* scsiLuOutTaskSetFullStatus */ + 0, /* scsiLuHSInCommands */ + (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * + 100 / HZ)); + + return 0; +} + +static const struct seq_operations scsi_lu_seq_ops = { + .start = scsi_lu_seq_start, + .next = scsi_lu_seq_next, + .stop = scsi_lu_seq_stop, + .show = scsi_lu_seq_show +}; + +static int scsi_lu_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_lu_seq_ops); +} + +static const struct file_operations scsi_lu_seq_fops = { + .owner = THIS_MODULE, + .open = scsi_lu_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/****************************************************************************/ + +/* + * Remove proc fs entries + */ +void remove_scsi_target_mib(void) +{ + remove_proc_entry("scsi_target/mib/scsi_inst", NULL); + remove_proc_entry("scsi_target/mib/scsi_dev", NULL); + remove_proc_entry("scsi_target/mib/scsi_port", NULL); + remove_proc_entry("scsi_target/mib/scsi_transport", NULL); + remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); + remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); + remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); + remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); + remove_proc_entry("scsi_target/mib/scsi_lu", NULL); + remove_proc_entry("scsi_target/mib", NULL); +} + +/* + * Create proc fs entries for the mib tables + */ +int init_scsi_target_mib(void) +{ + struct proc_dir_entry *dir_entry; + struct proc_dir_entry *scsi_inst_entry; + struct proc_dir_entry *scsi_dev_entry; + struct proc_dir_entry *scsi_port_entry; + struct proc_dir_entry *scsi_transport_entry; + struct proc_dir_entry *scsi_tgt_dev_entry; + struct proc_dir_entry *scsi_tgt_port_entry; + struct proc_dir_entry *scsi_auth_intr_entry; + struct proc_dir_entry *scsi_att_intr_port_entry; + struct proc_dir_entry *scsi_lu_entry; + + dir_entry = proc_mkdir("scsi_target/mib", NULL); + if (!(dir_entry)) { + printk(KERN_ERR "proc_mkdir() failed.\n"); + return -1; + } + + scsi_inst_entry = + create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); + if (scsi_inst_entry) + scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; + else + goto error; + + scsi_dev_entry = + create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); + if (scsi_dev_entry) + scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; + else + goto error; + + scsi_port_entry = + create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); + if (scsi_port_entry) + scsi_port_entry->proc_fops = &scsi_port_seq_fops; + else + goto error; + + scsi_transport_entry = + create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); + if (scsi_transport_entry) + scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; + else + goto error; + + scsi_tgt_dev_entry = + create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); + if (scsi_tgt_dev_entry) + scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; + else + goto error; + + scsi_tgt_port_entry = + create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); + if (scsi_tgt_port_entry) + scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; + else + goto error; + + scsi_auth_intr_entry = + create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); + if (scsi_auth_intr_entry) + scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; + else + goto error; + + scsi_att_intr_port_entry = + create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); + if (scsi_att_intr_port_entry) + scsi_att_intr_port_entry->proc_fops = + &scsi_att_intr_port_seq_fops; + else + goto error; + + scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); + if (scsi_lu_entry) + scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; + else + goto error; + + return 0; + +error: + printk(KERN_ERR "create_proc_entry() failed.\n"); + remove_scsi_target_mib(); + return -1; +} + +/* + * Initialize the index table for allocating unique row indexes to various mib + * tables + */ +void init_scsi_index_table(void) +{ + memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); + spin_lock_init(&scsi_index_table.lock); +} + +/* + * Allocate a new row index for the entry type specified + */ +u32 scsi_get_new_index(scsi_index_t type) +{ + u32 new_index; + + if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { + printk(KERN_ERR "Invalid index type %d\n", type); + return -1; + } + + spin_lock(&scsi_index_table.lock); + new_index = ++scsi_index_table.scsi_mib_index[type]; + if (new_index == 0) + new_index = ++scsi_index_table.scsi_mib_index[type]; + spin_unlock(&scsi_index_table.lock); + + return new_index; +} +EXPORT_SYMBOL(scsi_get_new_index); diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h new file mode 100644 index 000000000000..277204633850 --- /dev/null +++ b/drivers/target/target_core_mib.h @@ -0,0 +1,28 @@ +#ifndef TARGET_CORE_MIB_H +#define TARGET_CORE_MIB_H + +typedef enum { + SCSI_INST_INDEX, + SCSI_DEVICE_INDEX, + SCSI_AUTH_INTR_INDEX, + SCSI_INDEX_TYPE_MAX +} scsi_index_t; + +struct scsi_index_table { + spinlock_t lock; + u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; +} ____cacheline_aligned; + +/* SCSI Port stats */ +struct scsi_port_stats { + u64 cmd_pdus; + u64 tx_data_octets; + u64 rx_data_octets; +} ____cacheline_aligned; + +extern int init_scsi_target_mib(void); +extern void remove_scsi_target_mib(void); +extern void init_scsi_index_table(void); +extern u32 scsi_get_new_index(scsi_index_t); + +#endif /*** TARGET_CORE_MIB_H ***/ diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c new file mode 100644 index 000000000000..2521f75362c3 --- /dev/null +++ b/drivers/target/target_core_pr.c @@ -0,0 +1,4252 @@ +/******************************************************************************* + * Filename: target_core_pr.c + * + * This file contains SPC-3 compliant persistent reservations and + * legacy SPC-2 reservations with compatible reservation handling (CRH=1) + * + * Copyright (c) 2009, 2010 Rising Tide Systems + * Copyright (c) 2009, 2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "target_core_hba.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +/* + * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT) + */ +struct pr_transport_id_holder { + int dest_local_nexus; + struct t10_pr_registration *dest_pr_reg; + struct se_portal_group *dest_tpg; + struct se_node_acl *dest_node_acl; + struct se_dev_entry *dest_se_deve; + struct list_head dest_list; +}; + +int core_pr_dump_initiator_port( + struct t10_pr_registration *pr_reg, + char *buf, + u32 size) +{ + if (!(pr_reg->isid_present_at_reg)) + return 0; + + snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); + return 1; +} + +static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, + struct t10_pr_registration *, int); + +static int core_scsi2_reservation_seq_non_holder( + struct se_cmd *cmd, + unsigned char *cdb, + u32 pr_reg_type) +{ + switch (cdb[0]) { + case INQUIRY: + case RELEASE: + case RELEASE_10: + return 0; + default: + return 1; + } + + return 1; +} + +static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + int ret; + + if (!(sess)) + return 0; + + spin_lock(&dev->dev_reservation_lock); + if (!dev->dev_reserved_node_acl || !sess) { + spin_unlock(&dev->dev_reservation_lock); + return 0; + } + if (dev->dev_reserved_node_acl != sess->se_node_acl) { + spin_unlock(&dev->dev_reservation_lock); + return -1; + } + if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { + spin_unlock(&dev->dev_reservation_lock); + return 0; + } + ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1; + spin_unlock(&dev->dev_reservation_lock); + + return ret; +} + +static int core_scsi2_reservation_release(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + struct se_portal_group *tpg = sess->se_tpg; + + if (!(sess) || !(tpg)) + return 0; + + spin_lock(&dev->dev_reservation_lock); + if (!dev->dev_reserved_node_acl || !sess) { + spin_unlock(&dev->dev_reservation_lock); + return 0; + } + + if (dev->dev_reserved_node_acl != sess->se_node_acl) { + spin_unlock(&dev->dev_reservation_lock); + return 0; + } + dev->dev_reserved_node_acl = NULL; + dev->dev_flags &= ~DF_SPC2_RESERVATIONS; + if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { + dev->dev_res_bin_isid = 0; + dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; + } + printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" + " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(), + SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, + sess->se_node_acl->initiatorname); + spin_unlock(&dev->dev_reservation_lock); + + return 0; +} + +static int core_scsi2_reservation_reserve(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + struct se_portal_group *tpg = sess->se_tpg; + + if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) && + (T_TASK(cmd)->t_task_cdb[1] & 0x02)) { + printk(KERN_ERR "LongIO and Obselete Bits set, returning" + " ILLEGAL_REQUEST\n"); + return PYX_TRANSPORT_ILLEGAL_REQUEST; + } + /* + * This is currently the case for target_core_mod passthrough struct se_cmd + * ops + */ + if (!(sess) || !(tpg)) + return 0; + + spin_lock(&dev->dev_reservation_lock); + if (dev->dev_reserved_node_acl && + (dev->dev_reserved_node_acl != sess->se_node_acl)) { + printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", + TPG_TFO(tpg)->get_fabric_name()); + printk(KERN_ERR "Original reserver LUN: %u %s\n", + SE_LUN(cmd)->unpacked_lun, + dev->dev_reserved_node_acl->initiatorname); + printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" + " from %s \n", SE_LUN(cmd)->unpacked_lun, + cmd->se_deve->mapped_lun, + sess->se_node_acl->initiatorname); + spin_unlock(&dev->dev_reservation_lock); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + + dev->dev_reserved_node_acl = sess->se_node_acl; + dev->dev_flags |= DF_SPC2_RESERVATIONS; + if (sess->sess_bin_isid != 0) { + dev->dev_res_bin_isid = sess->sess_bin_isid; + dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; + } + printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" + " for %s\n", TPG_TFO(tpg)->get_fabric_name(), + SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, + sess->se_node_acl->initiatorname); + spin_unlock(&dev->dev_reservation_lock); + + return 0; +} + +static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *, + struct se_node_acl *, struct se_session *); +static void core_scsi3_put_pr_reg(struct t10_pr_registration *); + +/* + * Setup in target_core_transport.c:transport_generic_cmd_sequencer() + * and called via struct se_cmd->transport_emulate_cdb() in TCM processing + * thread context. + */ +int core_scsi2_emulate_crh(struct se_cmd *cmd) +{ + struct se_session *se_sess = cmd->se_sess; + struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; + struct t10_pr_registration *pr_reg; + struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation; + unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; + int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS); + int conflict = 0; + + if (!(se_sess)) + return 0; + + if (!(crh)) + goto after_crh; + + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, + se_sess); + if (pr_reg) { + /* + * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE + * behavior + * + * A RESERVE(6) or RESERVE(10) command shall complete with GOOD + * status, but no reservation shall be established and the + * persistent reservation shall not be changed, if the command + * is received from a) and b) below. + * + * A RELEASE(6) or RELEASE(10) command shall complete with GOOD + * status, but the persistent reservation shall not be released, + * if the command is received from a) and b) + * + * a) An I_T nexus that is a persistent reservation holder; or + * b) An I_T nexus that is registered if a registrants only or + * all registrants type persistent reservation is present. + * + * In all other cases, a RESERVE(6) command, RESERVE(10) command, + * RELEASE(6) command, or RELEASE(10) command shall be processed + * as defined in SPC-2. + */ + if (pr_reg->pr_res_holder) { + core_scsi3_put_pr_reg(pr_reg); + return 0; + } + if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || + (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) || + (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { + core_scsi3_put_pr_reg(pr_reg); + return 0; + } + core_scsi3_put_pr_reg(pr_reg); + conflict = 1; + } else { + /* + * Following spc2r20 5.5.1 Reservations overview: + * + * If a logical unit has executed a PERSISTENT RESERVE OUT + * command with the REGISTER or the REGISTER AND IGNORE + * EXISTING KEY service action and is still registered by any + * initiator, all RESERVE commands and all RELEASE commands + * regardless of initiator shall conflict and shall terminate + * with a RESERVATION CONFLICT status. + */ + spin_lock(&pr_tmpl->registration_lock); + conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1; + spin_unlock(&pr_tmpl->registration_lock); + } + + if (conflict) { + printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE" + " while active SPC-3 registrations exist," + " returning RESERVATION_CONFLICT\n"); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + +after_crh: + if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10)) + return core_scsi2_reservation_reserve(cmd); + else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10)) + return core_scsi2_reservation_release(cmd); + else + return PYX_TRANSPORT_INVALID_CDB_FIELD; +} + +/* + * Begin SPC-3/SPC-4 Persistent Reservations emulation support + * + * This function is called by those initiator ports who are *NOT* + * the active PR reservation holder when a reservation is present. + */ +static int core_scsi3_pr_seq_non_holder( + struct se_cmd *cmd, + unsigned char *cdb, + u32 pr_reg_type) +{ + struct se_dev_entry *se_deve; + struct se_session *se_sess = SE_SESS(cmd); + int other_cdb = 0, ignore_reg; + int registered_nexus = 0, ret = 1; /* Conflict by default */ + int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ + int we = 0; /* Write Exclusive */ + int legacy = 0; /* Act like a legacy device and return + * RESERVATION CONFLICT on some CDBs */ + /* + * A legacy SPC-2 reservation is being held. + */ + if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) + return core_scsi2_reservation_seq_non_holder(cmd, + cdb, pr_reg_type); + + se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; + /* + * Determine if the registration should be ignored due to + * non-matching ISIDs in core_scsi3_pr_reservation_check(). + */ + ignore_reg = (pr_reg_type & 0x80000000); + if (ignore_reg) + pr_reg_type &= ~0x80000000; + + switch (pr_reg_type) { + case PR_TYPE_WRITE_EXCLUSIVE: + we = 1; + case PR_TYPE_EXCLUSIVE_ACCESS: + /* + * Some commands are only allowed for the persistent reservation + * holder. + */ + if ((se_deve->def_pr_registered) && !(ignore_reg)) + registered_nexus = 1; + break; + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + we = 1; + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + /* + * Some commands are only allowed for registered I_T Nexuses. + */ + reg_only = 1; + if ((se_deve->def_pr_registered) && !(ignore_reg)) + registered_nexus = 1; + break; + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + we = 1; + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + /* + * Each registered I_T Nexus is a reservation holder. + */ + all_reg = 1; + if ((se_deve->def_pr_registered) && !(ignore_reg)) + registered_nexus = 1; + break; + default: + return -1; + } + /* + * Referenced from spc4r17 table 45 for *NON* PR holder access + */ + switch (cdb[0]) { + case SECURITY_PROTOCOL_IN: + if (registered_nexus) + return 0; + ret = (we) ? 0 : 1; + break; + case MODE_SENSE: + case MODE_SENSE_10: + case READ_ATTRIBUTE: + case READ_BUFFER: + case RECEIVE_DIAGNOSTIC: + if (legacy) { + ret = 1; + break; + } + if (registered_nexus) { + ret = 0; + break; + } + ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ + break; + case PERSISTENT_RESERVE_OUT: + /* + * This follows PERSISTENT_RESERVE_OUT service actions that + * are allowed in the presence of various reservations. + * See spc4r17, table 46 + */ + switch (cdb[1] & 0x1f) { + case PRO_CLEAR: + case PRO_PREEMPT: + case PRO_PREEMPT_AND_ABORT: + ret = (registered_nexus) ? 0 : 1; + break; + case PRO_REGISTER: + case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: + ret = 0; + break; + case PRO_REGISTER_AND_MOVE: + case PRO_RESERVE: + ret = 1; + break; + case PRO_RELEASE: + ret = (registered_nexus) ? 0 : 1; + break; + default: + printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" + " action: 0x%02x\n", cdb[1] & 0x1f); + return -1; + } + break; + case RELEASE: + case RELEASE_10: + /* Handled by CRH=1 in core_scsi2_emulate_crh() */ + ret = 0; + break; + case RESERVE: + case RESERVE_10: + /* Handled by CRH=1 in core_scsi2_emulate_crh() */ + ret = 0; + break; + case TEST_UNIT_READY: + ret = (legacy) ? 1 : 0; /* Conflict for legacy */ + break; + case MAINTENANCE_IN: + switch (cdb[1] & 0x1f) { + case MI_MANAGEMENT_PROTOCOL_IN: + if (registered_nexus) { + ret = 0; + break; + } + ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ + break; + case MI_REPORT_SUPPORTED_OPERATION_CODES: + case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: + if (legacy) { + ret = 1; + break; + } + if (registered_nexus) { + ret = 0; + break; + } + ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ + break; + case MI_REPORT_ALIASES: + case MI_REPORT_IDENTIFYING_INFORMATION: + case MI_REPORT_PRIORITY: + case MI_REPORT_TARGET_PGS: + case MI_REPORT_TIMESTAMP: + ret = 0; /* Allowed */ + break; + default: + printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", + (cdb[1] & 0x1f)); + return -1; + } + break; + case ACCESS_CONTROL_IN: + case ACCESS_CONTROL_OUT: + case INQUIRY: + case LOG_SENSE: + case READ_MEDIA_SERIAL_NUMBER: + case REPORT_LUNS: + case REQUEST_SENSE: + ret = 0; /*/ Allowed CDBs */ + break; + default: + other_cdb = 1; + break; + } + /* + * Case where the CDB is explictly allowed in the above switch + * statement. + */ + if (!(ret) && !(other_cdb)) { +#if 0 + printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s" + " reservation holder\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); +#endif + return ret; + } + /* + * Check if write exclusive initiator ports *NOT* holding the + * WRITE_EXCLUSIVE_* reservation. + */ + if ((we) && !(registered_nexus)) { + if (cmd->data_direction == DMA_TO_DEVICE) { + /* + * Conflict for write exclusive + */ + printk(KERN_INFO "%s Conflict for unregistered nexus" + " %s CDB: 0x%02x to %s reservation\n", + transport_dump_cmd_direction(cmd), + se_sess->se_node_acl->initiatorname, cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + return 1; + } else { + /* + * Allow non WRITE CDBs for all Write Exclusive + * PR TYPEs to pass for registered and + * non-registered_nexuxes NOT holding the reservation. + * + * We only make noise for the unregisterd nexuses, + * as we expect registered non-reservation holding + * nexuses to issue CDBs. + */ +#if 0 + if (!(registered_nexus)) { + printk(KERN_INFO "Allowing implict CDB: 0x%02x" + " for %s reservation on unregistered" + " nexus\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + } +#endif + return 0; + } + } else if ((reg_only) || (all_reg)) { + if (registered_nexus) { + /* + * For PR_*_REG_ONLY and PR_*_ALL_REG reservations, + * allow commands from registered nexuses. + */ +#if 0 + printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s" + " reservation\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); +#endif + return 0; + } + } + printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x" + " for %s reservation\n", transport_dump_cmd_direction(cmd), + (registered_nexus) ? "" : "un", + se_sess->se_node_acl->initiatorname, cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + + return 1; /* Conflict by default */ +} + +static u32 core_scsi3_pr_generation(struct se_device *dev) +{ + struct se_subsystem_dev *su_dev = SU_DEV(dev); + u32 prg; + /* + * PRGeneration field shall contain the value of a 32-bit wrapping + * counter mainted by the device server. + * + * Note that this is done regardless of Active Persist across + * Target PowerLoss (APTPL) + * + * See spc4r17 section 6.3.12 READ_KEYS service action + */ + spin_lock(&dev->dev_reservation_lock); + prg = T10_RES(su_dev)->pr_generation++; + spin_unlock(&dev->dev_reservation_lock); + + return prg; +} + +static int core_scsi3_pr_reservation_check( + struct se_cmd *cmd, + u32 *pr_reg_type) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + int ret; + + if (!(sess)) + return 0; + /* + * A legacy SPC-2 reservation is being held. + */ + if (dev->dev_flags & DF_SPC2_RESERVATIONS) + return core_scsi2_reservation_check(cmd, pr_reg_type); + + spin_lock(&dev->dev_reservation_lock); + if (!(dev->dev_pr_res_holder)) { + spin_unlock(&dev->dev_reservation_lock); + return 0; + } + *pr_reg_type = dev->dev_pr_res_holder->pr_res_type; + cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; + if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { + spin_unlock(&dev->dev_reservation_lock); + return -1; + } + if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { + spin_unlock(&dev->dev_reservation_lock); + return 0; + } + ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == + sess->sess_bin_isid) ? 0 : -1; + /* + * Use bit in *pr_reg_type to notify ISID mismatch in + * core_scsi3_pr_seq_non_holder(). + */ + if (ret != 0) + *pr_reg_type |= 0x80000000; + spin_unlock(&dev->dev_reservation_lock); + + return ret; +} + +static struct t10_pr_registration *__core_scsi3_do_alloc_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_dev_entry *deve, + unsigned char *isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl) +{ + struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct t10_pr_registration *pr_reg; + + pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); + if (!(pr_reg)) { + printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); + return NULL; + } + + pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len, + GFP_ATOMIC); + if (!(pr_reg->pr_aptpl_buf)) { + printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); + kmem_cache_free(t10_pr_reg_cache, pr_reg); + return NULL; + } + + INIT_LIST_HEAD(&pr_reg->pr_reg_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); + atomic_set(&pr_reg->pr_res_holders, 0); + pr_reg->pr_reg_nacl = nacl; + pr_reg->pr_reg_deve = deve; + pr_reg->pr_res_mapped_lun = deve->mapped_lun; + pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun; + pr_reg->pr_res_key = sa_res_key; + pr_reg->pr_reg_all_tg_pt = all_tg_pt; + pr_reg->pr_reg_aptpl = aptpl; + pr_reg->pr_reg_tg_pt_lun = deve->se_lun; + /* + * If an ISID value for this SCSI Initiator Port exists, + * save it to the registration now. + */ + if (isid != NULL) { + pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); + snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); + pr_reg->isid_present_at_reg = 1; + } + + return pr_reg; +} + +static int core_scsi3_lunacl_depend_item(struct se_dev_entry *); +static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *); + +/* + * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0 + * modes. + */ +static struct t10_pr_registration *__core_scsi3_alloc_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_dev_entry *deve, + unsigned char *isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl) +{ + struct se_dev_entry *deve_tmp; + struct se_node_acl *nacl_tmp; + struct se_port *port, *port_tmp; + struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; + struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; + int ret; + /* + * Create a registration for the I_T Nexus upon which the + * PROUT REGISTER was received. + */ + pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, + sa_res_key, all_tg_pt, aptpl); + if (!(pr_reg)) + return NULL; + /* + * Return pointer to pr_reg for ALL_TG_PT=0 + */ + if (!(all_tg_pt)) + return pr_reg; + /* + * Create list of matching SCSI Initiator Port registrations + * for ALL_TG_PT=1 + */ + spin_lock(&dev->se_port_lock); + list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { + atomic_inc(&port->sep_tg_pt_ref_cnt); + smp_mb__after_atomic_inc(); + spin_unlock(&dev->se_port_lock); + + spin_lock_bh(&port->sep_alua_lock); + list_for_each_entry(deve_tmp, &port->sep_alua_list, + alua_port_list) { + /* + * This pointer will be NULL for demo mode MappedLUNs + * that have not been make explict via a ConfigFS + * MappedLUN group for the SCSI Initiator Node ACL. + */ + if (!(deve_tmp->se_lun_acl)) + continue; + + nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; + /* + * Skip the matching struct se_node_acl that is allocated + * above.. + */ + if (nacl == nacl_tmp) + continue; + /* + * Only perform PR registrations for target ports on + * the same fabric module as the REGISTER w/ ALL_TG_PT=1 + * arrived. + */ + if (tfo != nacl_tmp->se_tpg->se_tpg_tfo) + continue; + /* + * Look for a matching Initiator Node ACL in ASCII format + */ + if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) + continue; + + atomic_inc(&deve_tmp->pr_ref_count); + smp_mb__after_atomic_inc(); + spin_unlock_bh(&port->sep_alua_lock); + /* + * Grab a configfs group dependency that is released + * for the exception path at label out: below, or upon + * completion of adding ALL_TG_PT=1 registrations in + * __core_scsi3_add_registration() + */ + ret = core_scsi3_lunacl_depend_item(deve_tmp); + if (ret < 0) { + printk(KERN_ERR "core_scsi3_lunacl_depend" + "_item() failed\n"); + atomic_dec(&port->sep_tg_pt_ref_cnt); + smp_mb__after_atomic_dec(); + atomic_dec(&deve_tmp->pr_ref_count); + smp_mb__after_atomic_dec(); + goto out; + } + /* + * Located a matching SCSI Initiator Port on a different + * port, allocate the pr_reg_atp and attach it to the + * pr_reg->pr_reg_atp_list that will be processed once + * the original *pr_reg is processed in + * __core_scsi3_add_registration() + */ + pr_reg_atp = __core_scsi3_do_alloc_registration(dev, + nacl_tmp, deve_tmp, NULL, + sa_res_key, all_tg_pt, aptpl); + if (!(pr_reg_atp)) { + atomic_dec(&port->sep_tg_pt_ref_cnt); + smp_mb__after_atomic_dec(); + atomic_dec(&deve_tmp->pr_ref_count); + smp_mb__after_atomic_dec(); + core_scsi3_lunacl_undepend_item(deve_tmp); + goto out; + } + + list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, + &pr_reg->pr_reg_atp_list); + spin_lock_bh(&port->sep_alua_lock); + } + spin_unlock_bh(&port->sep_alua_lock); + + spin_lock(&dev->se_port_lock); + atomic_dec(&port->sep_tg_pt_ref_cnt); + smp_mb__after_atomic_dec(); + } + spin_unlock(&dev->se_port_lock); + + return pr_reg; +out: + list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, + &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { + list_del(&pr_reg_tmp->pr_reg_atp_mem_list); + core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); + kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); + } + kmem_cache_free(t10_pr_reg_cache, pr_reg); + return NULL; +} + +int core_scsi3_alloc_aptpl_registration( + struct t10_reservation_template *pr_tmpl, + u64 sa_res_key, + unsigned char *i_port, + unsigned char *isid, + u32 mapped_lun, + unsigned char *t_port, + u16 tpgt, + u32 target_lun, + int res_holder, + int all_tg_pt, + u8 type) +{ + struct t10_pr_registration *pr_reg; + + if (!(i_port) || !(t_port) || !(sa_res_key)) { + printk(KERN_ERR "Illegal parameters for APTPL registration\n"); + return -1; + } + + pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); + if (!(pr_reg)) { + printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); + return -1; + } + pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); + + INIT_LIST_HEAD(&pr_reg->pr_reg_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); + atomic_set(&pr_reg->pr_res_holders, 0); + pr_reg->pr_reg_nacl = NULL; + pr_reg->pr_reg_deve = NULL; + pr_reg->pr_res_mapped_lun = mapped_lun; + pr_reg->pr_aptpl_target_lun = target_lun; + pr_reg->pr_res_key = sa_res_key; + pr_reg->pr_reg_all_tg_pt = all_tg_pt; + pr_reg->pr_reg_aptpl = 1; + pr_reg->pr_reg_tg_pt_lun = NULL; + pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */ + pr_reg->pr_res_type = type; + /* + * If an ISID value had been saved in APTPL metadata for this + * SCSI Initiator Port, restore it now. + */ + if (isid != NULL) { + pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); + snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); + pr_reg->isid_present_at_reg = 1; + } + /* + * Copy the i_port and t_port information from caller. + */ + snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port); + snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port); + pr_reg->pr_reg_tpgt = tpgt; + /* + * Set pr_res_holder from caller, the pr_reg who is the reservation + * holder will get it's pointer set in core_scsi3_aptpl_reserve() once + * the Initiator Node LUN ACL from the fabric module is created for + * this registration. + */ + pr_reg->pr_res_holder = res_holder; + + list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); + printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from" + " metadata\n", (res_holder) ? "+reservation" : ""); + return 0; +} + +static void core_scsi3_aptpl_reserve( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_node_acl *node_acl, + struct t10_pr_registration *pr_reg) +{ + char i_buf[PR_REG_ISID_ID_LEN]; + int prf_isid; + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + + spin_lock(&dev->dev_reservation_lock); + dev->dev_pr_res_holder = pr_reg; + spin_unlock(&dev->dev_reservation_lock); + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" + " new reservation holder TYPE: %s ALL_TG_PT: %d\n", + TPG_TFO(tpg)->get_fabric_name(), + core_scsi3_pr_dump_type(pr_reg->pr_res_type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", + TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname, + (prf_isid) ? &i_buf[0] : ""); +} + +static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *, + struct t10_pr_registration *, int, int); + +static int __core_scsi3_check_aptpl_registration( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_lun *lun, + u32 target_lun, + struct se_node_acl *nacl, + struct se_dev_entry *deve) +{ + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; + unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; + u16 tpgt; + + memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN); + memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN); + /* + * Copy Initiator Port information from struct se_node_acl + */ + snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); + snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", + TPG_TFO(tpg)->tpg_get_wwn(tpg)); + tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); + /* + * Look for the matching registrations+reservation from those + * created from APTPL metadata. Note that multiple registrations + * may exist for fabrics that use ISIDs in their SCSI Initiator Port + * TransportIDs. + */ + spin_lock(&pr_tmpl->aptpl_reg_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, + pr_reg_aptpl_list) { + if (!(strcmp(pr_reg->pr_iport, i_port)) && + (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && + !(strcmp(pr_reg->pr_tport, t_port)) && + (pr_reg->pr_reg_tpgt == tpgt) && + (pr_reg->pr_aptpl_target_lun == target_lun)) { + + pr_reg->pr_reg_nacl = nacl; + pr_reg->pr_reg_deve = deve; + pr_reg->pr_reg_tg_pt_lun = lun; + + list_del(&pr_reg->pr_reg_aptpl_list); + spin_unlock(&pr_tmpl->aptpl_reg_lock); + /* + * At this point all of the pointers in *pr_reg will + * be setup, so go ahead and add the registration. + */ + + __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); + /* + * If this registration is the reservation holder, + * make that happen now.. + */ + if (pr_reg->pr_res_holder) + core_scsi3_aptpl_reserve(dev, tpg, + nacl, pr_reg); + /* + * Reenable pr_aptpl_active to accept new metadata + * updates once the SCSI device is active again.. + */ + spin_lock(&pr_tmpl->aptpl_reg_lock); + pr_tmpl->pr_aptpl_active = 1; + } + } + spin_unlock(&pr_tmpl->aptpl_reg_lock); + + return 0; +} + +int core_scsi3_check_aptpl_registration( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_lun *lun, + struct se_lun_acl *lun_acl) +{ + struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_node_acl *nacl = lun_acl->se_lun_nacl; + struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; + + if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + return 0; + + return __core_scsi3_check_aptpl_registration(dev, tpg, lun, + lun->unpacked_lun, nacl, deve); +} + +static void __core_scsi3_dump_registration( + struct target_core_fabric_ops *tfo, + struct se_device *dev, + struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, + int register_type) +{ + struct se_portal_group *se_tpg = nacl->se_tpg; + char i_buf[PR_REG_ISID_ID_LEN]; + int prf_isid; + + memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN); + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator" + " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? + "_AND_MOVE" : (register_type == 1) ? + "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, + (prf_isid) ? i_buf : ""); + printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", + tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), + tfo->tpg_get_tag(se_tpg)); + printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + " Port(s)\n", tfo->get_fabric_name(), + (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", + TRANSPORT(dev)->name); + printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), + pr_reg->pr_res_key, pr_reg->pr_res_generation, + pr_reg->pr_reg_aptpl); +} + +/* + * this function can be called with struct se_device->dev_reservation_lock + * when register_move = 1 + */ +static void __core_scsi3_add_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, + int register_type, + int register_move) +{ + struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; + struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + + /* + * Increment PRgeneration counter for struct se_device upon a successful + * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action + * + * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service + * action, the struct se_device->dev_reservation_lock will already be held, + * so we do not call core_scsi3_pr_generation() which grabs the lock + * for the REGISTER. + */ + pr_reg->pr_res_generation = (register_move) ? + T10_RES(su_dev)->pr_generation++ : + core_scsi3_pr_generation(dev); + + spin_lock(&pr_tmpl->registration_lock); + list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list); + pr_reg->pr_reg_deve->def_pr_registered = 1; + + __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); + spin_unlock(&pr_tmpl->registration_lock); + /* + * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. + */ + if (!(pr_reg->pr_reg_all_tg_pt) || (register_move)) + return; + /* + * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 + * allocated in __core_scsi3_alloc_registration() + */ + list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, + &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { + list_del(&pr_reg_tmp->pr_reg_atp_mem_list); + + pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev); + + spin_lock(&pr_tmpl->registration_lock); + list_add_tail(&pr_reg_tmp->pr_reg_list, + &pr_tmpl->registration_list); + pr_reg_tmp->pr_reg_deve->def_pr_registered = 1; + + __core_scsi3_dump_registration(tfo, dev, + pr_reg_tmp->pr_reg_nacl, pr_reg_tmp, + register_type); + spin_unlock(&pr_tmpl->registration_lock); + /* + * Drop configfs group dependency reference from + * __core_scsi3_alloc_registration() + */ + core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); + } +} + +static int core_scsi3_alloc_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_dev_entry *deve, + unsigned char *isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl, + int register_type, + int register_move) +{ + struct t10_pr_registration *pr_reg; + + pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, + sa_res_key, all_tg_pt, aptpl); + if (!(pr_reg)) + return -1; + + __core_scsi3_add_registration(dev, nacl, pr_reg, + register_type, register_move); + return 0; +} + +static struct t10_pr_registration *__core_scsi3_locate_pr_reg( + struct se_device *dev, + struct se_node_acl *nacl, + unsigned char *isid) +{ + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + struct se_portal_group *tpg; + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + /* + * First look for a matching struct se_node_acl + */ + if (pr_reg->pr_reg_nacl != nacl) + continue; + + tpg = pr_reg->pr_reg_nacl->se_tpg; + /* + * If this registration does NOT contain a fabric provided + * ISID, then we have found a match. + */ + if (!(pr_reg->isid_present_at_reg)) { + /* + * Determine if this SCSI device server requires that + * SCSI Intiatior TransportID w/ ISIDs is enforced + * for fabric modules (iSCSI) requiring them. + */ + if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { + if (DEV_ATTRIB(dev)->enforce_pr_isids) + continue; + } + atomic_inc(&pr_reg->pr_res_holders); + smp_mb__after_atomic_inc(); + spin_unlock(&pr_tmpl->registration_lock); + return pr_reg; + } + /* + * If the *pr_reg contains a fabric defined ISID for multi-value + * SCSI Initiator Port TransportIDs, then we expect a valid + * matching ISID to be provided by the local SCSI Initiator Port. + */ + if (!(isid)) + continue; + if (strcmp(isid, pr_reg->pr_reg_isid)) + continue; + + atomic_inc(&pr_reg->pr_res_holders); + smp_mb__after_atomic_inc(); + spin_unlock(&pr_tmpl->registration_lock); + return pr_reg; + } + spin_unlock(&pr_tmpl->registration_lock); + + return NULL; +} + +static struct t10_pr_registration *core_scsi3_locate_pr_reg( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_session *sess) +{ + struct se_portal_group *tpg = nacl->se_tpg; + unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; + + if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { + memset(&buf[0], 0, PR_REG_ISID_LEN); + TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0], + PR_REG_ISID_LEN); + isid_ptr = &buf[0]; + } + + return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr); +} + +static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) +{ + atomic_dec(&pr_reg->pr_res_holders); + smp_mb__after_atomic_dec(); +} + +static int core_scsi3_check_implict_release( + struct se_device *dev, + struct t10_pr_registration *pr_reg) +{ + struct se_node_acl *nacl = pr_reg->pr_reg_nacl; + struct t10_pr_registration *pr_res_holder; + int ret = 0; + + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (!(pr_res_holder)) { + spin_unlock(&dev->dev_reservation_lock); + return ret; + } + if (pr_res_holder == pr_reg) { + /* + * Perform an implict RELEASE if the registration that + * is being released is holding the reservation. + * + * From spc4r17, section 5.7.11.1: + * + * e) If the I_T nexus is the persistent reservation holder + * and the persistent reservation is not an all registrants + * type, then a PERSISTENT RESERVE OUT command with REGISTER + * service action or REGISTER AND IGNORE EXISTING KEY + * service action with the SERVICE ACTION RESERVATION KEY + * field set to zero (see 5.7.11.3). + */ + __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0); + ret = 1; + /* + * For 'All Registrants' reservation types, all existing + * registrations are still processed as reservation holders + * in core_scsi3_pr_seq_non_holder() after the initial + * reservation holder is implictly released here. + */ + } else if (pr_reg->pr_reg_all_tg_pt && + (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, + pr_reg->pr_reg_nacl->initiatorname)) && + (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { + printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1" + " UNREGISTER while existing reservation with matching" + " key 0x%016Lx is present from another SCSI Initiator" + " Port\n", pr_reg->pr_res_key); + ret = -1; + } + spin_unlock(&dev->dev_reservation_lock); + + return ret; +} + +/* + * Called with struct t10_reservation_template->registration_lock held. + */ +static void __core_scsi3_free_registration( + struct se_device *dev, + struct t10_pr_registration *pr_reg, + struct list_head *preempt_and_abort_list, + int dec_holders) +{ + struct target_core_fabric_ops *tfo = + pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + char i_buf[PR_REG_ISID_ID_LEN]; + int prf_isid; + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + + pr_reg->pr_reg_deve->def_pr_registered = 0; + pr_reg->pr_reg_deve->pr_res_key = 0; + list_del(&pr_reg->pr_reg_list); + /* + * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), + * so call core_scsi3_put_pr_reg() to decrement our reference. + */ + if (dec_holders) + core_scsi3_put_pr_reg(pr_reg); + /* + * Wait until all reference from any other I_T nexuses for this + * *pr_reg have been released. Because list_del() is called above, + * the last core_scsi3_put_pr_reg(pr_reg) will release this reference + * count back to zero, and we release *pr_reg. + */ + while (atomic_read(&pr_reg->pr_res_holders) != 0) { + spin_unlock(&pr_tmpl->registration_lock); + printk("SPC-3 PR [%s] waiting for pr_res_holders\n", + tfo->get_fabric_name()); + cpu_relax(); + spin_lock(&pr_tmpl->registration_lock); + } + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator" + " Node: %s%s\n", tfo->get_fabric_name(), + pr_reg->pr_reg_nacl->initiatorname, + (prf_isid) ? &i_buf[0] : ""); + printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + " Port(s)\n", tfo->get_fabric_name(), + (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", + TRANSPORT(dev)->name); + printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, + pr_reg->pr_res_generation); + + if (!(preempt_and_abort_list)) { + pr_reg->pr_reg_deve = NULL; + pr_reg->pr_reg_nacl = NULL; + kfree(pr_reg->pr_aptpl_buf); + kmem_cache_free(t10_pr_reg_cache, pr_reg); + return; + } + /* + * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list + * are released once the ABORT_TASK_SET has completed.. + */ + list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list); +} + +void core_scsi3_free_pr_reg_from_nacl( + struct se_device *dev, + struct se_node_acl *nacl) +{ + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; + /* + * If the passed se_node_acl matches the reservation holder, + * release the reservation. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if ((pr_res_holder != NULL) && + (pr_res_holder->pr_reg_nacl == nacl)) + __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0); + spin_unlock(&dev->dev_reservation_lock); + /* + * Release any registration associated with the struct se_node_acl. + */ + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + if (pr_reg->pr_reg_nacl != nacl) + continue; + + __core_scsi3_free_registration(dev, pr_reg, NULL, 0); + } + spin_unlock(&pr_tmpl->registration_lock); +} + +void core_scsi3_free_all_registrations( + struct se_device *dev) +{ + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; + + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (pr_res_holder != NULL) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + __core_scsi3_complete_pro_release(dev, pr_res_nacl, + pr_res_holder, 0); + } + spin_unlock(&dev->dev_reservation_lock); + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + __core_scsi3_free_registration(dev, pr_reg, NULL, 0); + } + spin_unlock(&pr_tmpl->registration_lock); + + spin_lock(&pr_tmpl->aptpl_reg_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, + pr_reg_aptpl_list) { + list_del(&pr_reg->pr_reg_aptpl_list); + kfree(pr_reg->pr_aptpl_buf); + kmem_cache_free(t10_pr_reg_cache, pr_reg); + } + spin_unlock(&pr_tmpl->aptpl_reg_lock); +} + +static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) +{ + return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + &tpg->tpg_group.cg_item); +} + +static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) +{ + configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + &tpg->tpg_group.cg_item); + + atomic_dec(&tpg->tpg_pr_ref_count); + smp_mb__after_atomic_dec(); +} + +static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) +{ + struct se_portal_group *tpg = nacl->se_tpg; + + if (nacl->dynamic_node_acl) + return 0; + + return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + &nacl->acl_group.cg_item); +} + +static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) +{ + struct se_portal_group *tpg = nacl->se_tpg; + + if (nacl->dynamic_node_acl) { + atomic_dec(&nacl->acl_pr_ref_count); + smp_mb__after_atomic_dec(); + return; + } + + configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + &nacl->acl_group.cg_item); + + atomic_dec(&nacl->acl_pr_ref_count); + smp_mb__after_atomic_dec(); +} + +static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) +{ + struct se_lun_acl *lun_acl = se_deve->se_lun_acl; + struct se_node_acl *nacl; + struct se_portal_group *tpg; + /* + * For nacl->dynamic_node_acl=1 + */ + if (!(lun_acl)) + return 0; + + nacl = lun_acl->se_lun_nacl; + tpg = nacl->se_tpg; + + return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + &lun_acl->se_lun_group.cg_item); +} + +static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) +{ + struct se_lun_acl *lun_acl = se_deve->se_lun_acl; + struct se_node_acl *nacl; + struct se_portal_group *tpg; + /* + * For nacl->dynamic_node_acl=1 + */ + if (!(lun_acl)) { + atomic_dec(&se_deve->pr_ref_count); + smp_mb__after_atomic_dec(); + return; + } + nacl = lun_acl->se_lun_nacl; + tpg = nacl->se_tpg; + + configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + &lun_acl->se_lun_group.cg_item); + + atomic_dec(&se_deve->pr_ref_count); + smp_mb__after_atomic_dec(); +} + +static int core_scsi3_decode_spec_i_port( + struct se_cmd *cmd, + struct se_portal_group *tpg, + unsigned char *l_isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_port *tmp_port; + struct se_portal_group *dest_tpg = NULL, *tmp_tpg; + struct se_session *se_sess = SE_SESS(cmd); + struct se_node_acl *dest_node_acl = NULL; + struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; + struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; + struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; + struct list_head tid_dest_list; + struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; + struct target_core_fabric_ops *tmp_tf_ops; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; + char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; + u32 tpdl, tid_len = 0; + int ret, dest_local_nexus, prf_isid; + u32 dest_rtpi = 0; + + memset(dest_iport, 0, 64); + INIT_LIST_HEAD(&tid_dest_list); + + local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; + /* + * Allocate a struct pr_transport_id_holder and setup the + * local_node_acl and local_se_deve pointers and add to + * struct list_head tid_dest_list for add registration + * processing in the loop of tid_dest_list below. + */ + tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); + if (!(tidh_new)) { + printk(KERN_ERR "Unable to allocate tidh_new\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + INIT_LIST_HEAD(&tidh_new->dest_list); + tidh_new->dest_tpg = tpg; + tidh_new->dest_node_acl = se_sess->se_node_acl; + tidh_new->dest_se_deve = local_se_deve; + + local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), + se_sess->se_node_acl, local_se_deve, l_isid, + sa_res_key, all_tg_pt, aptpl); + if (!(local_pr_reg)) { + kfree(tidh_new); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + tidh_new->dest_pr_reg = local_pr_reg; + /* + * The local I_T nexus does not hold any configfs dependances, + * so we set tid_h->dest_local_nexus=1 to prevent the + * configfs_undepend_item() calls in the tid_dest_list loops below. + */ + tidh_new->dest_local_nexus = 1; + list_add_tail(&tidh_new->dest_list, &tid_dest_list); + /* + * For a PERSISTENT RESERVE OUT specify initiator ports payload, + * first extract TransportID Parameter Data Length, and make sure + * the value matches up to the SCSI expected data transfer length. + */ + tpdl = (buf[24] & 0xff) << 24; + tpdl |= (buf[25] & 0xff) << 16; + tpdl |= (buf[26] & 0xff) << 8; + tpdl |= buf[27] & 0xff; + + if ((tpdl + 28) != cmd->data_length) { + printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header" + " does not equal CDB data_length: %u\n", tpdl, + cmd->data_length); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + /* + * Start processing the received transport IDs using the + * receiving I_T Nexus portal's fabric dependent methods to + * obtain the SCSI Initiator Port/Device Identifiers. + */ + ptr = &buf[28]; + + while (tpdl > 0) { + proto_ident = (ptr[0] & 0x0f); + dest_tpg = NULL; + + spin_lock(&dev->se_port_lock); + list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { + tmp_tpg = tmp_port->sep_tpg; + if (!(tmp_tpg)) + continue; + tmp_tf_ops = TPG_TFO(tmp_tpg); + if (!(tmp_tf_ops)) + continue; + if (!(tmp_tf_ops->get_fabric_proto_ident) || + !(tmp_tf_ops->tpg_parse_pr_out_transport_id)) + continue; + /* + * Look for the matching proto_ident provided by + * the received TransportID + */ + tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg); + if (tmp_proto_ident != proto_ident) + continue; + dest_rtpi = tmp_port->sep_rtpi; + + i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( + tmp_tpg, (const char *)ptr, &tid_len, + &iport_ptr); + if (!(i_str)) + continue; + + atomic_inc(&tmp_tpg->tpg_pr_ref_count); + smp_mb__after_atomic_inc(); + spin_unlock(&dev->se_port_lock); + + ret = core_scsi3_tpg_depend_item(tmp_tpg); + if (ret != 0) { + printk(KERN_ERR " core_scsi3_tpg_depend_item()" + " for tmp_tpg\n"); + atomic_dec(&tmp_tpg->tpg_pr_ref_count); + smp_mb__after_atomic_dec(); + ret = PYX_TRANSPORT_LU_COMM_FAILURE; + goto out; + } + /* + * Locate the desination initiator ACL to be registered + * from the decoded fabric module specific TransportID + * at *i_str. + */ + spin_lock_bh(&tmp_tpg->acl_node_lock); + dest_node_acl = __core_tpg_get_initiator_node_acl( + tmp_tpg, i_str); + if (dest_node_acl) { + atomic_inc(&dest_node_acl->acl_pr_ref_count); + smp_mb__after_atomic_inc(); + } + spin_unlock_bh(&tmp_tpg->acl_node_lock); + + if (!(dest_node_acl)) { + core_scsi3_tpg_undepend_item(tmp_tpg); + spin_lock(&dev->se_port_lock); + continue; + } + + ret = core_scsi3_nodeacl_depend_item(dest_node_acl); + if (ret != 0) { + printk(KERN_ERR "configfs_depend_item() failed" + " for dest_node_acl->acl_group\n"); + atomic_dec(&dest_node_acl->acl_pr_ref_count); + smp_mb__after_atomic_dec(); + core_scsi3_tpg_undepend_item(tmp_tpg); + ret = PYX_TRANSPORT_LU_COMM_FAILURE; + goto out; + } + + dest_tpg = tmp_tpg; + printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" + " %s Port RTPI: %hu\n", + TPG_TFO(dest_tpg)->get_fabric_name(), + dest_node_acl->initiatorname, dest_rtpi); + + spin_lock(&dev->se_port_lock); + break; + } + spin_unlock(&dev->se_port_lock); + + if (!(dest_tpg)) { + printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate" + " dest_tpg\n"); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } +#if 0 + printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" + " tid_len: %d for %s + %s\n", + TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length, + tpdl, tid_len, i_str, iport_ptr); +#endif + if (tid_len > tpdl) { + printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:" + " %u for Transport ID: %s\n", tid_len, ptr); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + /* + * Locate the desintation struct se_dev_entry pointer for matching + * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus + * Target Port. + */ + dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, + dest_rtpi); + if (!(dest_se_deve)) { + printk(KERN_ERR "Unable to locate %s dest_se_deve" + " from destination RTPI: %hu\n", + TPG_TFO(dest_tpg)->get_fabric_name(), + dest_rtpi); + + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + + ret = core_scsi3_lunacl_depend_item(dest_se_deve); + if (ret < 0) { + printk(KERN_ERR "core_scsi3_lunacl_depend_item()" + " failed\n"); + atomic_dec(&dest_se_deve->pr_ref_count); + smp_mb__after_atomic_dec(); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = PYX_TRANSPORT_LU_COMM_FAILURE; + goto out; + } +#if 0 + printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" + " dest_se_deve mapped_lun: %u\n", + TPG_TFO(dest_tpg)->get_fabric_name(), + dest_node_acl->initiatorname, dest_se_deve->mapped_lun); +#endif + /* + * Skip any TransportIDs that already have a registration for + * this target port. + */ + pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl, + iport_ptr); + if (pr_reg_e) { + core_scsi3_put_pr_reg(pr_reg_e); + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ptr += tid_len; + tpdl -= tid_len; + tid_len = 0; + continue; + } + /* + * Allocate a struct pr_transport_id_holder and setup + * the dest_node_acl and dest_se_deve pointers for the + * loop below. + */ + tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), + GFP_KERNEL); + if (!(tidh_new)) { + printk(KERN_ERR "Unable to allocate tidh_new\n"); + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = PYX_TRANSPORT_LU_COMM_FAILURE; + goto out; + } + INIT_LIST_HEAD(&tidh_new->dest_list); + tidh_new->dest_tpg = dest_tpg; + tidh_new->dest_node_acl = dest_node_acl; + tidh_new->dest_se_deve = dest_se_deve; + + /* + * Allocate, but do NOT add the registration for the + * TransportID referenced SCSI Initiator port. This + * done because of the following from spc4r17 in section + * 6.14.3 wrt SPEC_I_PT: + * + * "If a registration fails for any initiator port (e.g., if th + * logical unit does not have enough resources available to + * hold the registration information), no registrations shall be + * made, and the command shall be terminated with + * CHECK CONDITION status." + * + * That means we call __core_scsi3_alloc_registration() here, + * and then call __core_scsi3_add_registration() in the + * 2nd loop which will never fail. + */ + dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), + dest_node_acl, dest_se_deve, iport_ptr, + sa_res_key, all_tg_pt, aptpl); + if (!(dest_pr_reg)) { + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + kfree(tidh_new); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + tidh_new->dest_pr_reg = dest_pr_reg; + list_add_tail(&tidh_new->dest_list, &tid_dest_list); + + ptr += tid_len; + tpdl -= tid_len; + tid_len = 0; + + } + /* + * Go ahead and create a registrations from tid_dest_list for the + * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl + * and dest_se_deve. + * + * The SA Reservation Key from the PROUT is set for the + * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1 + * means that the TransportID Initiator port will be + * registered on all of the target ports in the SCSI target device + * ALL_TG_PT=0 means the registration will only be for the + * SCSI target port the PROUT REGISTER with SPEC_I_PT=1 + * was received. + */ + list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { + dest_tpg = tidh->dest_tpg; + dest_node_acl = tidh->dest_node_acl; + dest_se_deve = tidh->dest_se_deve; + dest_pr_reg = tidh->dest_pr_reg; + dest_local_nexus = tidh->dest_local_nexus; + + list_del(&tidh->dest_list); + kfree(tidh); + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + + __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl, + dest_pr_reg, 0, 0); + + printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" + " registered Transport ID for Node: %s%s Mapped LUN:" + " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(), + dest_node_acl->initiatorname, (prf_isid) ? + &i_buf[0] : "", dest_se_deve->mapped_lun); + + if (dest_local_nexus) + continue; + + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + } + + return 0; +out: + /* + * For the failure case, release everything from tid_dest_list + * including *dest_pr_reg and the configfs dependances.. + */ + list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { + dest_tpg = tidh->dest_tpg; + dest_node_acl = tidh->dest_node_acl; + dest_se_deve = tidh->dest_se_deve; + dest_pr_reg = tidh->dest_pr_reg; + dest_local_nexus = tidh->dest_local_nexus; + + list_del(&tidh->dest_list); + kfree(tidh); + /* + * Release any extra ALL_TG_PT=1 registrations for + * the SPEC_I_PT=1 case. + */ + list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, + &dest_pr_reg->pr_reg_atp_list, + pr_reg_atp_mem_list) { + list_del(&pr_reg_tmp->pr_reg_atp_mem_list); + core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); + kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); + } + + kfree(dest_pr_reg->pr_aptpl_buf); + kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); + + if (dest_local_nexus) + continue; + + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + } + return ret; +} + +/* + * Called with struct se_device->dev_reservation_lock held + */ +static int __core_scsi3_update_aptpl_buf( + struct se_device *dev, + unsigned char *buf, + u32 pr_aptpl_buf_len, + int clear_aptpl_metadata) +{ + struct se_lun *lun; + struct se_portal_group *tpg; + struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct t10_pr_registration *pr_reg; + unsigned char tmp[512], isid_buf[32]; + ssize_t len = 0; + int reg_count = 0; + + memset(buf, 0, pr_aptpl_buf_len); + /* + * Called to clear metadata once APTPL has been deactivated. + */ + if (clear_aptpl_metadata) { + snprintf(buf, pr_aptpl_buf_len, + "No Registrations or Reservations\n"); + return 0; + } + /* + * Walk the registration list.. + */ + spin_lock(&T10_RES(su_dev)->registration_lock); + list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + pr_reg_list) { + + tmp[0] = '\0'; + isid_buf[0] = '\0'; + tpg = pr_reg->pr_reg_nacl->se_tpg; + lun = pr_reg->pr_reg_tg_pt_lun; + /* + * Write out any ISID value to APTPL metadata that was included + * in the original registration. + */ + if (pr_reg->isid_present_at_reg) + snprintf(isid_buf, 32, "initiator_sid=%s\n", + pr_reg->pr_reg_isid); + /* + * Include special metadata if the pr_reg matches the + * reservation holder. + */ + if (dev->dev_pr_res_holder == pr_reg) { + snprintf(tmp, 512, "PR_REG_START: %d" + "\ninitiator_fabric=%s\n" + "initiator_node=%s\n%s" + "sa_res_key=%llu\n" + "res_holder=1\nres_type=%02x\n" + "res_scope=%02x\nres_all_tg_pt=%d\n" + "mapped_lun=%u\n", reg_count, + TPG_TFO(tpg)->get_fabric_name(), + pr_reg->pr_reg_nacl->initiatorname, isid_buf, + pr_reg->pr_res_key, pr_reg->pr_res_type, + pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, + pr_reg->pr_res_mapped_lun); + } else { + snprintf(tmp, 512, "PR_REG_START: %d\n" + "initiator_fabric=%s\ninitiator_node=%s\n%s" + "sa_res_key=%llu\nres_holder=0\n" + "res_all_tg_pt=%d\nmapped_lun=%u\n", + reg_count, TPG_TFO(tpg)->get_fabric_name(), + pr_reg->pr_reg_nacl->initiatorname, isid_buf, + pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, + pr_reg->pr_res_mapped_lun); + } + + if ((len + strlen(tmp) > pr_aptpl_buf_len)) { + printk(KERN_ERR "Unable to update renaming" + " APTPL metadata\n"); + spin_unlock(&T10_RES(su_dev)->registration_lock); + return -1; + } + len += sprintf(buf+len, "%s", tmp); + + /* + * Include information about the associated SCSI target port. + */ + snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" + "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" + " %d\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_wwn(tpg), + TPG_TFO(tpg)->tpg_get_tag(tpg), + lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); + + if ((len + strlen(tmp) > pr_aptpl_buf_len)) { + printk(KERN_ERR "Unable to update renaming" + " APTPL metadata\n"); + spin_unlock(&T10_RES(su_dev)->registration_lock); + return -1; + } + len += sprintf(buf+len, "%s", tmp); + reg_count++; + } + spin_unlock(&T10_RES(su_dev)->registration_lock); + + if (!(reg_count)) + len += sprintf(buf+len, "No Registrations or Reservations"); + + return 0; +} + +static int core_scsi3_update_aptpl_buf( + struct se_device *dev, + unsigned char *buf, + u32 pr_aptpl_buf_len, + int clear_aptpl_metadata) +{ + int ret; + + spin_lock(&dev->dev_reservation_lock); + ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, + clear_aptpl_metadata); + spin_unlock(&dev->dev_reservation_lock); + + return ret; +} + +/* + * Called with struct se_device->aptpl_file_mutex held + */ +static int __core_scsi3_write_aptpl_to_file( + struct se_device *dev, + unsigned char *buf, + u32 pr_aptpl_buf_len) +{ + struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn; + struct file *file; + struct iovec iov[1]; + mm_segment_t old_fs; + int flags = O_RDWR | O_CREAT | O_TRUNC; + char path[512]; + int ret; + + memset(iov, 0, sizeof(struct iovec)); + memset(path, 0, 512); + + if (strlen(&wwn->unit_serial[0]) > 512) { + printk(KERN_ERR "WWN value for struct se_device does not fit" + " into path buffer\n"); + return -1; + } + + snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); + file = filp_open(path, flags, 0600); + if (IS_ERR(file) || !file || !file->f_dentry) { + printk(KERN_ERR "filp_open(%s) for APTPL metadata" + " failed\n", path); + return -1; + } + + iov[0].iov_base = &buf[0]; + if (!(pr_aptpl_buf_len)) + iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ + else + iov[0].iov_len = pr_aptpl_buf_len; + + old_fs = get_fs(); + set_fs(get_ds()); + ret = vfs_writev(file, &iov[0], 1, &file->f_pos); + set_fs(old_fs); + + if (ret < 0) { + printk("Error writing APTPL metadata file: %s\n", path); + filp_close(file, NULL); + return -1; + } + filp_close(file, NULL); + + return 0; +} + +static int core_scsi3_update_and_write_aptpl( + struct se_device *dev, + unsigned char *in_buf, + u32 in_pr_aptpl_buf_len) +{ + unsigned char null_buf[64], *buf; + u32 pr_aptpl_buf_len; + int ret, clear_aptpl_metadata = 0; + /* + * Can be called with a NULL pointer from PROUT service action CLEAR + */ + if (!(in_buf)) { + memset(null_buf, 0, 64); + buf = &null_buf[0]; + /* + * This will clear the APTPL metadata to: + * "No Registrations or Reservations" status + */ + pr_aptpl_buf_len = 64; + clear_aptpl_metadata = 1; + } else { + buf = in_buf; + pr_aptpl_buf_len = in_pr_aptpl_buf_len; + } + + ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, + clear_aptpl_metadata); + if (ret != 0) + return -1; + /* + * __core_scsi3_write_aptpl_to_file() will call strlen() + * on the passed buf to determine pr_aptpl_buf_len. + */ + ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); + if (ret != 0) + return -1; + + return ret; +} + +static int core_scsi3_emulate_pro_register( + struct se_cmd *cmd, + u64 res_key, + u64 sa_res_key, + int aptpl, + int all_tg_pt, + int spec_i_pt, + int ignore_key) +{ + struct se_session *se_sess = SE_SESS(cmd); + struct se_device *dev = SE_DEV(cmd); + struct se_dev_entry *se_deve; + struct se_lun *se_lun = SE_LUN(cmd); + struct se_portal_group *se_tpg; + struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + /* Used for APTPL metadata w/ UNREGISTER */ + unsigned char *pr_aptpl_buf = NULL; + unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; + int pr_holder = 0, ret = 0, type; + + if (!(se_sess) || !(se_lun)) { + printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + se_tpg = se_sess->se_tpg; + se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; + + if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { + memset(&isid_buf[0], 0, PR_REG_ISID_LEN); + TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0], + PR_REG_ISID_LEN); + isid_ptr = &isid_buf[0]; + } + /* + * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 + */ + pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); + if (!(pr_reg_e)) { + if (res_key) { + printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero" + " for SA REGISTER, returning CONFLICT\n"); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * Do nothing but return GOOD status. + */ + if (!(sa_res_key)) + return PYX_TRANSPORT_SENT_TO_TRANSPORT; + + if (!(spec_i_pt)) { + /* + * Perform the Service Action REGISTER on the Initiator + * Port Endpoint that the PRO was received from on the + * Logical Unit of the SCSI device server. + */ + ret = core_scsi3_alloc_registration(SE_DEV(cmd), + se_sess->se_node_acl, se_deve, isid_ptr, + sa_res_key, all_tg_pt, aptpl, + ignore_key, 0); + if (ret != 0) { + printk(KERN_ERR "Unable to allocate" + " struct t10_pr_registration\n"); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + } else { + /* + * Register both the Initiator port that received + * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI + * TransportID from Parameter list and loop through + * fabric dependent parameter list while calling + * logic from of core_scsi3_alloc_registration() for + * each TransportID provided SCSI Initiator Port/Device + */ + ret = core_scsi3_decode_spec_i_port(cmd, se_tpg, + isid_ptr, sa_res_key, all_tg_pt, aptpl); + if (ret != 0) + return ret; + } + /* + * Nothing left to do for the APTPL=0 case. + */ + if (!(aptpl)) { + pr_tmpl->pr_aptpl_active = 0; + core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); + printk("SPC-3 PR: Set APTPL Bit Deactivated for" + " REGISTER\n"); + return 0; + } + /* + * Locate the newly allocated local I_T Nexus *pr_reg, and + * update the APTPL metadata information using its + * preallocated *pr_reg->pr_aptpl_buf. + */ + pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), + se_sess->se_node_acl, se_sess); + + ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + &pr_reg->pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) { + pr_tmpl->pr_aptpl_active = 1; + printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); + } + + core_scsi3_put_pr_reg(pr_reg); + return ret; + } else { + /* + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg = pr_reg_e; + type = pr_reg->pr_res_type; + + if (!(ignore_key)) { + if (res_key != pr_reg->pr_res_key) { + printk(KERN_ERR "SPC-3 PR REGISTER: Received" + " res_key: 0x%016Lx does not match" + " existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, + pr_reg->pr_res_key); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + } + if (spec_i_pt) { + printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT" + " set while sa_res_key=0\n"); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + /* + * An existing ALL_TG_PT=1 registration being released + * must also set ALL_TG_PT=1 in the incoming PROUT. + */ + if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { + printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1" + " registration exists, but ALL_TG_PT=1 bit not" + " present in received PROUT\n"); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + /* + * Allocate APTPL metadata buffer used for UNREGISTER ops + */ + if (aptpl) { + pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, + GFP_KERNEL); + if (!(pr_aptpl_buf)) { + printk(KERN_ERR "Unable to allocate" + " pr_aptpl_buf\n"); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + } + /* + * sa_res_key=0 Unregister Reservation Key for registered I_T + * Nexus sa_res_key=1 Change Reservation Key for registered I_T + * Nexus. + */ + if (!(sa_res_key)) { + pr_holder = core_scsi3_check_implict_release( + SE_DEV(cmd), pr_reg); + if (pr_holder < 0) { + kfree(pr_aptpl_buf); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + + spin_lock(&pr_tmpl->registration_lock); + /* + * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port + * and matching pr_res_key. + */ + if (pr_reg->pr_reg_all_tg_pt) { + list_for_each_entry_safe(pr_reg_p, pr_reg_tmp, + &pr_tmpl->registration_list, + pr_reg_list) { + + if (!(pr_reg_p->pr_reg_all_tg_pt)) + continue; + + if (pr_reg_p->pr_res_key != res_key) + continue; + + if (pr_reg == pr_reg_p) + continue; + + if (strcmp(pr_reg->pr_reg_nacl->initiatorname, + pr_reg_p->pr_reg_nacl->initiatorname)) + continue; + + __core_scsi3_free_registration(dev, + pr_reg_p, NULL, 0); + } + } + /* + * Release the calling I_T Nexus registration now.. + */ + __core_scsi3_free_registration(SE_DEV(cmd), pr_reg, + NULL, 1); + /* + * From spc4r17, section 5.7.11.3 Unregistering + * + * If the persistent reservation is a registrants only + * type, the device server shall establish a unit + * attention condition for the initiator port associated + * with every registered I_T nexus except for the I_T + * nexus on which the PERSISTENT RESERVE OUT command was + * received, with the additional sense code set to + * RESERVATIONS RELEASED. + */ + if (pr_holder && + ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || + (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) { + list_for_each_entry(pr_reg_p, + &pr_tmpl->registration_list, + pr_reg_list) { + + core_scsi3_ua_allocate( + pr_reg_p->pr_reg_nacl, + pr_reg_p->pr_res_mapped_lun, + 0x2A, + ASCQ_2AH_RESERVATIONS_RELEASED); + } + } + spin_unlock(&pr_tmpl->registration_lock); + + if (!(aptpl)) { + pr_tmpl->pr_aptpl_active = 0; + core_scsi3_update_and_write_aptpl(dev, NULL, 0); + printk("SPC-3 PR: Set APTPL Bit Deactivated" + " for UNREGISTER\n"); + return 0; + } + + ret = core_scsi3_update_and_write_aptpl(dev, + &pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) { + pr_tmpl->pr_aptpl_active = 1; + printk("SPC-3 PR: Set APTPL Bit Activated" + " for UNREGISTER\n"); + } + + kfree(pr_aptpl_buf); + return ret; + } else { + /* + * Increment PRgeneration counter for struct se_device" + * upon a successful REGISTER, see spc4r17 section 6.3.2 + * READ_KEYS service action. + */ + pr_reg->pr_res_generation = core_scsi3_pr_generation( + SE_DEV(cmd)); + pr_reg->pr_res_key = sa_res_key; + printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" + " Key for %s to: 0x%016Lx PRgeneration:" + " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(), + (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", + pr_reg->pr_reg_nacl->initiatorname, + pr_reg->pr_res_key, pr_reg->pr_res_generation); + + if (!(aptpl)) { + pr_tmpl->pr_aptpl_active = 0; + core_scsi3_update_and_write_aptpl(dev, NULL, 0); + core_scsi3_put_pr_reg(pr_reg); + printk("SPC-3 PR: Set APTPL Bit Deactivated" + " for REGISTER\n"); + return 0; + } + + ret = core_scsi3_update_and_write_aptpl(dev, + &pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) { + pr_tmpl->pr_aptpl_active = 1; + printk("SPC-3 PR: Set APTPL Bit Activated" + " for REGISTER\n"); + } + + kfree(pr_aptpl_buf); + core_scsi3_put_pr_reg(pr_reg); + } + } + return 0; +} + +unsigned char *core_scsi3_pr_dump_type(int type) +{ + switch (type) { + case PR_TYPE_WRITE_EXCLUSIVE: + return "Write Exclusive Access"; + case PR_TYPE_EXCLUSIVE_ACCESS: + return "Exclusive Access"; + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + return "Write Exclusive Access, Registrants Only"; + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + return "Exclusive Access, Registrants Only"; + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + return "Write Exclusive Access, All Registrants"; + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + return "Exclusive Access, All Registrants"; + default: + break; + } + + return "Unknown SPC-3 PR Type"; +} + +static int core_scsi3_pro_reserve( + struct se_cmd *cmd, + struct se_device *dev, + int type, + int scope, + u64 res_key) +{ + struct se_session *se_sess = SE_SESS(cmd); + struct se_dev_entry *se_deve; + struct se_lun *se_lun = SE_LUN(cmd); + struct se_portal_group *se_tpg; + struct t10_pr_registration *pr_reg, *pr_res_holder; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + char i_buf[PR_REG_ISID_ID_LEN]; + int ret, prf_isid; + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + + if (!(se_sess) || !(se_lun)) { + printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + se_tpg = se_sess->se_tpg; + se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; + /* + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + se_sess); + if (!(pr_reg)) { + printk(KERN_ERR "SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for RESERVE\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * An application client creates a persistent reservation by issuing + * a PERSISTENT RESERVE OUT command with RESERVE service action through + * a registered I_T nexus with the following parameters: + * a) RESERVATION KEY set to the value of the reservation key that is + * registered with the logical unit for the I_T nexus; and + */ + if (res_key != pr_reg->pr_res_key) { + printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx" + " does not match existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, pr_reg->pr_res_key); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * From above: + * b) TYPE field and SCOPE field set to the persistent reservation + * being created. + * + * Only one persistent reservation is allowed at a time per logical unit + * and that persistent reservation has a scope of LU_SCOPE. + */ + if (scope != PR_SCOPE_LU_SCOPE) { + printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + /* + * See if we have an existing PR reservation holder pointer at + * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration + * *pr_res_holder. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if ((pr_res_holder)) { + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * If the device server receives a PERSISTENT RESERVE OUT + * command from an I_T nexus other than a persistent reservation + * holder (see 5.7.10) that attempts to create a persistent + * reservation when a persistent reservation already exists for + * the logical unit, then the command shall be completed with + * RESERVATION CONFLICT status. + */ + if (pr_res_holder != pr_reg) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" + " [%s]: %s while reservation already held by" + " [%s]: %s, returning RESERVATION_CONFLICT\n", + CMD_TFO(cmd)->get_fabric_name(), + se_sess->se_node_acl->initiatorname, + TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_holder->pr_reg_nacl->initiatorname); + + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * If a persistent reservation holder attempts to modify the + * type or scope of an existing persistent reservation, the + * command shall be completed with RESERVATION CONFLICT status. + */ + if ((pr_res_holder->pr_res_type != type) || + (pr_res_holder->pr_res_scope != scope)) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" + " [%s]: %s trying to change TYPE and/or SCOPE," + " while reservation already held by [%s]: %s," + " returning RESERVATION_CONFLICT\n", + CMD_TFO(cmd)->get_fabric_name(), + se_sess->se_node_acl->initiatorname, + TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_holder->pr_reg_nacl->initiatorname); + + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * If the device server receives a PERSISTENT RESERVE OUT + * command with RESERVE service action where the TYPE field and + * the SCOPE field contain the same values as the existing type + * and scope from a persistent reservation holder, it shall not + * make any change to the existing persistent reservation and + * shall completethe command with GOOD status. + */ + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_SENT_TO_TRANSPORT; + } + /* + * Otherwise, our *pr_reg becomes the PR reservation holder for said + * TYPE/SCOPE. Also set the received scope and type in *pr_reg. + */ + pr_reg->pr_res_scope = scope; + pr_reg->pr_res_type = type; + pr_reg->pr_res_holder = 1; + dev->dev_pr_res_holder = pr_reg; + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" + " reservation holder TYPE: %s ALL_TG_PT: %d\n", + CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", + CMD_TFO(cmd)->get_fabric_name(), + se_sess->se_node_acl->initiatorname, + (prf_isid) ? &i_buf[0] : ""); + spin_unlock(&dev->dev_reservation_lock); + + if (pr_tmpl->pr_aptpl_active) { + ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + &pr_reg->pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) + printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" + " for RESERVE\n"); + } + + core_scsi3_put_pr_reg(pr_reg); + return 0; +} + +static int core_scsi3_emulate_pro_reserve( + struct se_cmd *cmd, + int type, + int scope, + u64 res_key) +{ + struct se_device *dev = cmd->se_dev; + int ret = 0; + + switch (type) { + case PR_TYPE_WRITE_EXCLUSIVE: + case PR_TYPE_EXCLUSIVE_ACCESS: + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); + break; + default: + printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:" + " 0x%02x\n", type); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + + return ret; +} + +/* + * Called with struct se_device->dev_reservation_lock held. + */ +static void __core_scsi3_complete_pro_release( + struct se_device *dev, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int explict) +{ + struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; + char i_buf[PR_REG_ISID_ID_LEN]; + int prf_isid; + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + /* + * Go ahead and release the current PR reservation holder. + */ + dev->dev_pr_res_holder = NULL; + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared" + " reservation holder TYPE: %s ALL_TG_PT: %d\n", + tfo->get_fabric_name(), (explict) ? "explict" : "implict", + core_scsi3_pr_dump_type(pr_reg->pr_res_type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n", + tfo->get_fabric_name(), se_nacl->initiatorname, + (prf_isid) ? &i_buf[0] : ""); + /* + * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE + */ + pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0; +} + +static int core_scsi3_emulate_pro_release( + struct se_cmd *cmd, + int type, + int scope, + u64 res_key) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *se_sess = SE_SESS(cmd); + struct se_lun *se_lun = SE_LUN(cmd); + struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + int ret, all_reg = 0; + + if (!(se_sess) || !(se_lun)) { + printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + /* + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); + if (!(pr_reg)) { + printk(KERN_ERR "SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for RELEASE\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + /* + * From spc4r17 Section 5.7.11.2 Releasing: + * + * If there is no persistent reservation or in response to a persistent + * reservation release request from a registered I_T nexus that is not a + * persistent reservation holder (see 5.7.10), the device server shall + * do the following: + * + * a) Not release the persistent reservation, if any; + * b) Not remove any registrations; and + * c) Complete the command with GOOD status. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (!(pr_res_holder)) { + /* + * No persistent reservation, return GOOD status. + */ + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_SENT_TO_TRANSPORT; + } + if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) + all_reg = 1; + + if ((all_reg == 0) && (pr_res_holder != pr_reg)) { + /* + * Non 'All Registrants' PR Type cases.. + * Release request from a registered I_T nexus that is not a + * persistent reservation holder. return GOOD status. + */ + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_SENT_TO_TRANSPORT; + } + /* + * From spc4r17 Section 5.7.11.2 Releasing: + * + * Only the persistent reservation holder (see 5.7.10) is allowed to + * release a persistent reservation. + * + * An application client releases the persistent reservation by issuing + * a PERSISTENT RESERVE OUT command with RELEASE service action through + * an I_T nexus that is a persistent reservation holder with the + * following parameters: + * + * a) RESERVATION KEY field set to the value of the reservation key + * that is registered with the logical unit for the I_T nexus; + */ + if (res_key != pr_reg->pr_res_key) { + printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx" + " does not match existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, pr_reg->pr_res_key); + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * From spc4r17 Section 5.7.11.2 Releasing and above: + * + * b) TYPE field and SCOPE field set to match the persistent + * reservation being released. + */ + if ((pr_res_holder->pr_res_type != type) || + (pr_res_holder->pr_res_scope != scope)) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release" + " reservation from [%s]: %s with different TYPE " + "and/or SCOPE while reservation already held by" + " [%s]: %s, returning RESERVATION_CONFLICT\n", + CMD_TFO(cmd)->get_fabric_name(), + se_sess->se_node_acl->initiatorname, + TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_holder->pr_reg_nacl->initiatorname); + + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * In response to a persistent reservation release request from the + * persistent reservation holder the device server shall perform a + * release by doing the following as an uninterrupted series of actions: + * a) Release the persistent reservation; + * b) Not remove any registration(s); + * c) If the released persistent reservation is a registrants only type + * or all registrants type persistent reservation, + * the device server shall establish a unit attention condition for + * the initiator port associated with every regis- + * tered I_T nexus other than I_T nexus on which the PERSISTENT + * RESERVE OUT command with RELEASE service action was received, + * with the additional sense code set to RESERVATIONS RELEASED; and + * d) If the persistent reservation is of any other type, the device + * server shall not establish a unit attention condition. + */ + __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, + pr_reg, 1); + + spin_unlock(&dev->dev_reservation_lock); + + if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) && + (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) && + (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && + (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { + /* + * If no UNIT ATTENTION conditions will be established for + * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS + * go ahead and check for APTPL=1 update+write below + */ + goto write_aptpl; + } + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, + pr_reg_list) { + /* + * Do not establish a UNIT ATTENTION condition + * for the calling I_T Nexus + */ + if (pr_reg_p == pr_reg) + continue; + + core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl, + pr_reg_p->pr_res_mapped_lun, + 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); + } + spin_unlock(&pr_tmpl->registration_lock); + +write_aptpl: + if (pr_tmpl->pr_aptpl_active) { + ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + &pr_reg->pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) + printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); + } + + core_scsi3_put_pr_reg(pr_reg); + return 0; +} + +static int core_scsi3_emulate_pro_clear( + struct se_cmd *cmd, + u64 res_key) +{ + struct se_device *dev = cmd->se_dev; + struct se_node_acl *pr_reg_nacl; + struct se_session *se_sess = SE_SESS(cmd); + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; + u32 pr_res_mapped_lun = 0; + int calling_it_nexus = 0; + /* + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), + se_sess->se_node_acl, se_sess); + if (!(pr_reg_n)) { + printk(KERN_ERR "SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for CLEAR\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + /* + * From spc4r17 section 5.7.11.6, Clearing: + * + * Any application client may release the persistent reservation and + * remove all registrations from a device server by issuing a + * PERSISTENT RESERVE OUT command with CLEAR service action through a + * registered I_T nexus with the following parameter: + * + * a) RESERVATION KEY field set to the value of the reservation key + * that is registered with the logical unit for the I_T nexus. + */ + if (res_key != pr_reg_n->pr_res_key) { + printk(KERN_ERR "SPC-3 PR REGISTER: Received" + " res_key: 0x%016Lx does not match" + " existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); + core_scsi3_put_pr_reg(pr_reg_n); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * a) Release the persistent reservation, if any; + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (pr_res_holder) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + __core_scsi3_complete_pro_release(dev, pr_res_nacl, + pr_res_holder, 0); + } + spin_unlock(&dev->dev_reservation_lock); + /* + * b) Remove all registration(s) (see spc4r17 5.7.7); + */ + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, NULL, + calling_it_nexus); + /* + * e) Establish a unit attention condition for the initiator + * port associated with every registered I_T nexus other + * than the I_T nexus on which the PERSISTENT RESERVE OUT + * command with CLEAR service action was received, with the + * additional sense code set to RESERVATIONS PREEMPTED. + */ + if (!(calling_it_nexus)) + core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, + 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); + } + spin_unlock(&pr_tmpl->registration_lock); + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", + CMD_TFO(cmd)->get_fabric_name()); + + if (pr_tmpl->pr_aptpl_active) { + core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); + printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" + " for CLEAR\n"); + } + + core_scsi3_pr_generation(dev); + return 0; +} + +/* + * Called with struct se_device->dev_reservation_lock held. + */ +static void __core_scsi3_complete_pro_preempt( + struct se_device *dev, + struct t10_pr_registration *pr_reg, + struct list_head *preempt_and_abort_list, + int type, + int scope, + int abort) +{ + struct se_node_acl *nacl = pr_reg->pr_reg_nacl; + struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; + char i_buf[PR_REG_ISID_ID_LEN]; + int prf_isid; + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + /* + * Do an implict RELEASE of the existing reservation. + */ + if (dev->dev_pr_res_holder) + __core_scsi3_complete_pro_release(dev, nacl, + dev->dev_pr_res_holder, 0); + + dev->dev_pr_res_holder = pr_reg; + pr_reg->pr_res_holder = 1; + pr_reg->pr_res_type = type; + pr_reg->pr_res_scope = scope; + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new" + " reservation holder TYPE: %s ALL_TG_PT: %d\n", + tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", + core_scsi3_pr_dump_type(type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", + tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", + nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); + /* + * For PREEMPT_AND_ABORT, add the preempting reservation's + * struct t10_pr_registration to the list that will be compared + * against received CDBs.. + */ + if (preempt_and_abort_list) + list_add_tail(&pr_reg->pr_reg_abort_list, + preempt_and_abort_list); +} + +static void core_scsi3_release_preempt_and_abort( + struct list_head *preempt_and_abort_list, + struct t10_pr_registration *pr_reg_holder) +{ + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + + list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, + pr_reg_abort_list) { + + list_del(&pr_reg->pr_reg_abort_list); + if (pr_reg_holder == pr_reg) + continue; + if (pr_reg->pr_res_holder) { + printk(KERN_WARNING "pr_reg->pr_res_holder still set\n"); + continue; + } + + pr_reg->pr_reg_deve = NULL; + pr_reg->pr_reg_nacl = NULL; + kfree(pr_reg->pr_aptpl_buf); + kmem_cache_free(t10_pr_reg_cache, pr_reg); + } +} + +int core_scsi3_check_cdb_abort_and_preempt( + struct list_head *preempt_and_abort_list, + struct se_cmd *cmd) +{ + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + + list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, + pr_reg_abort_list) { + if (pr_reg->pr_res_key == cmd->pr_res_key) + return 0; + } + + return 1; +} + +static int core_scsi3_pro_preempt( + struct se_cmd *cmd, + int type, + int scope, + u64 res_key, + u64 sa_res_key, + int abort) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_dev_entry *se_deve; + struct se_node_acl *pr_reg_nacl; + struct se_session *se_sess = SE_SESS(cmd); + struct list_head preempt_and_abort_list; + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + u32 pr_res_mapped_lun = 0; + int all_reg = 0, calling_it_nexus = 0, released_regs = 0; + int prh_type = 0, prh_scope = 0, ret; + + if (!(se_sess)) + return PYX_TRANSPORT_LU_COMM_FAILURE; + + se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; + pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + se_sess); + if (!(pr_reg_n)) { + printk(KERN_ERR "SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for PREEMPT%s\n", + (abort) ? "_AND_ABORT" : ""); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + if (pr_reg_n->pr_res_key != res_key) { + core_scsi3_put_pr_reg(pr_reg_n); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + if (scope != PR_SCOPE_LU_SCOPE) { + printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + core_scsi3_put_pr_reg(pr_reg_n); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + INIT_LIST_HEAD(&preempt_and_abort_list); + + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (pr_res_holder && + ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) + all_reg = 1; + + if (!(all_reg) && !(sa_res_key)) { + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg_n); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + /* + * From spc4r17, section 5.7.11.4.4 Removing Registrations: + * + * If the SERVICE ACTION RESERVATION KEY field does not identify a + * persistent reservation holder or there is no persistent reservation + * holder (i.e., there is no persistent reservation), then the device + * server shall perform a preempt by doing the following in an + * uninterrupted series of actions. (See below..) + */ + if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) { + /* + * No existing or SA Reservation Key matching reservations.. + * + * PROUT SA PREEMPT with All Registrant type reservations are + * allowed to be processed without a matching SA Reservation Key + */ + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + /* + * Removing of registrations in non all registrants + * type reservations without a matching SA reservation + * key. + * + * a) Remove the registrations for all I_T nexuses + * specified by the SERVICE ACTION RESERVATION KEY + * field; + * b) Ignore the contents of the SCOPE and TYPE fields; + * c) Process tasks as defined in 5.7.1; and + * d) Establish a unit attention condition for the + * initiator port associated with every I_T nexus + * that lost its registration other than the I_T + * nexus on which the PERSISTENT RESERVE OUT command + * was received, with the additional sense code set + * to REGISTRATIONS PREEMPTED. + */ + if (!(all_reg)) { + if (pr_reg->pr_res_key != sa_res_key) + continue; + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, + (abort) ? &preempt_and_abort_list : + NULL, calling_it_nexus); + released_regs++; + } else { + /* + * Case for any existing all registrants type + * reservation, follow logic in spc4r17 section + * 5.7.11.4 Preempting, Table 52 and Figure 7. + * + * For a ZERO SA Reservation key, release + * all other registrations and do an implict + * release of active persistent reservation. + * + * For a non-ZERO SA Reservation key, only + * release the matching reservation key from + * registrations. + */ + if ((sa_res_key) && + (pr_reg->pr_res_key != sa_res_key)) + continue; + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + if (calling_it_nexus) + continue; + + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, + (abort) ? &preempt_and_abort_list : + NULL, 0); + released_regs++; + } + if (!(calling_it_nexus)) + core_scsi3_ua_allocate(pr_reg_nacl, + pr_res_mapped_lun, 0x2A, + ASCQ_2AH_RESERVATIONS_PREEMPTED); + } + spin_unlock(&pr_tmpl->registration_lock); + /* + * If a PERSISTENT RESERVE OUT with a PREEMPT service action or + * a PREEMPT AND ABORT service action sets the SERVICE ACTION + * RESERVATION KEY field to a value that does not match any + * registered reservation key, then the device server shall + * complete the command with RESERVATION CONFLICT status. + */ + if (!(released_regs)) { + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg_n); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * For an existing all registrants type reservation + * with a zero SA rservation key, preempt the existing + * reservation with the new PR type and scope. + */ + if (pr_res_holder && all_reg && !(sa_res_key)) { + __core_scsi3_complete_pro_preempt(dev, pr_reg_n, + (abort) ? &preempt_and_abort_list : NULL, + type, scope, abort); + + if (abort) + core_scsi3_release_preempt_and_abort( + &preempt_and_abort_list, pr_reg_n); + } + spin_unlock(&dev->dev_reservation_lock); + + if (pr_tmpl->pr_aptpl_active) { + ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + &pr_reg_n->pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) + printk(KERN_INFO "SPC-3 PR: Updated APTPL" + " metadata for PREEMPT%s\n", (abort) ? + "_AND_ABORT" : ""); + } + + core_scsi3_put_pr_reg(pr_reg_n); + core_scsi3_pr_generation(SE_DEV(cmd)); + return 0; + } + /* + * The PREEMPTing SA reservation key matches that of the + * existing persistent reservation, first, we check if + * we are preempting our own reservation. + * From spc4r17, section 5.7.11.4.3 Preempting + * persistent reservations and registration handling + * + * If an all registrants persistent reservation is not + * present, it is not an error for the persistent + * reservation holder to preempt itself (i.e., a + * PERSISTENT RESERVE OUT with a PREEMPT service action + * or a PREEMPT AND ABORT service action with the + * SERVICE ACTION RESERVATION KEY value equal to the + * persistent reservation holder's reservation key that + * is received from the persistent reservation holder). + * In that case, the device server shall establish the + * new persistent reservation and maintain the + * registration. + */ + prh_type = pr_res_holder->pr_res_type; + prh_scope = pr_res_holder->pr_res_scope; + /* + * If the SERVICE ACTION RESERVATION KEY field identifies a + * persistent reservation holder (see 5.7.10), the device + * server shall perform a preempt by doing the following as + * an uninterrupted series of actions: + * + * a) Release the persistent reservation for the holder + * identified by the SERVICE ACTION RESERVATION KEY field; + */ + if (pr_reg_n != pr_res_holder) + __core_scsi3_complete_pro_release(dev, + pr_res_holder->pr_reg_nacl, + dev->dev_pr_res_holder, 0); + /* + * b) Remove the registrations for all I_T nexuses identified + * by the SERVICE ACTION RESERVATION KEY field, except the + * I_T nexus that is being used for the PERSISTENT RESERVE + * OUT command. If an all registrants persistent reservation + * is present and the SERVICE ACTION RESERVATION KEY field + * is set to zero, then all registrations shall be removed + * except for that of the I_T nexus that is being used for + * the PERSISTENT RESERVE OUT command; + */ + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + if (calling_it_nexus) + continue; + + if (pr_reg->pr_res_key != sa_res_key) + continue; + + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, + (abort) ? &preempt_and_abort_list : NULL, + calling_it_nexus); + /* + * e) Establish a unit attention condition for the initiator + * port associated with every I_T nexus that lost its + * persistent reservation and/or registration, with the + * additional sense code set to REGISTRATIONS PREEMPTED; + */ + core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, + ASCQ_2AH_RESERVATIONS_PREEMPTED); + } + spin_unlock(&pr_tmpl->registration_lock); + /* + * c) Establish a persistent reservation for the preempting + * I_T nexus using the contents of the SCOPE and TYPE fields; + */ + __core_scsi3_complete_pro_preempt(dev, pr_reg_n, + (abort) ? &preempt_and_abort_list : NULL, + type, scope, abort); + /* + * d) Process tasks as defined in 5.7.1; + * e) See above.. + * f) If the type or scope has changed, then for every I_T nexus + * whose reservation key was not removed, except for the I_T + * nexus on which the PERSISTENT RESERVE OUT command was + * received, the device server shall establish a unit + * attention condition for the initiator port associated with + * that I_T nexus, with the additional sense code set to + * RESERVATIONS RELEASED. If the type or scope have not + * changed, then no unit attention condition(s) shall be + * established for this reason. + */ + if ((prh_type != type) || (prh_scope != scope)) { + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + if (calling_it_nexus) + continue; + + core_scsi3_ua_allocate(pr_reg->pr_reg_nacl, + pr_reg->pr_res_mapped_lun, 0x2A, + ASCQ_2AH_RESERVATIONS_RELEASED); + } + spin_unlock(&pr_tmpl->registration_lock); + } + spin_unlock(&dev->dev_reservation_lock); + /* + * Call LUN_RESET logic upon list of struct t10_pr_registration, + * All received CDBs for the matching existing reservation and + * registrations undergo ABORT_TASK logic. + * + * From there, core_scsi3_release_preempt_and_abort() will + * release every registration in the list (which have already + * been removed from the primary pr_reg list), except the + * new persistent reservation holder, the calling Initiator Port. + */ + if (abort) { + core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd); + core_scsi3_release_preempt_and_abort(&preempt_and_abort_list, + pr_reg_n); + } + + if (pr_tmpl->pr_aptpl_active) { + ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + &pr_reg_n->pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) + printk("SPC-3 PR: Updated APTPL metadata for PREEMPT" + "%s\n", (abort) ? "_AND_ABORT" : ""); + } + + core_scsi3_put_pr_reg(pr_reg_n); + core_scsi3_pr_generation(SE_DEV(cmd)); + return 0; +} + +static int core_scsi3_emulate_pro_preempt( + struct se_cmd *cmd, + int type, + int scope, + u64 res_key, + u64 sa_res_key, + int abort) +{ + int ret = 0; + + switch (type) { + case PR_TYPE_WRITE_EXCLUSIVE: + case PR_TYPE_EXCLUSIVE_ACCESS: + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + ret = core_scsi3_pro_preempt(cmd, type, scope, + res_key, sa_res_key, abort); + break; + default: + printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s" + " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + + return ret; +} + + +static int core_scsi3_emulate_pro_register_and_move( + struct se_cmd *cmd, + u64 res_key, + u64 sa_res_key, + int aptpl, + int unreg) +{ + struct se_session *se_sess = SE_SESS(cmd); + struct se_device *dev = SE_DEV(cmd); + struct se_dev_entry *se_deve, *dest_se_deve = NULL; + struct se_lun *se_lun = SE_LUN(cmd); + struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; + struct se_port *se_port; + struct se_portal_group *se_tpg, *dest_se_tpg = NULL; + struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; + struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *initiator_str; + char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; + u32 tid_len, tmp_tid_len; + int new_reg = 0, type, scope, ret, matching_iname, prf_isid; + unsigned short rtpi; + unsigned char proto_ident; + + if (!(se_sess) || !(se_lun)) { + printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + memset(dest_iport, 0, 64); + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + se_tpg = se_sess->se_tpg; + tf_ops = TPG_TFO(se_tpg); + se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; + /* + * Follow logic from spc4r17 Section 5.7.8, Table 50 -- + * Register behaviors for a REGISTER AND MOVE service action + * + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + se_sess); + if (!(pr_reg)) { + printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" + " *pr_reg for REGISTER_AND_MOVE\n"); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + /* + * The provided reservation key much match the existing reservation key + * provided during this initiator's I_T nexus registration. + */ + if (res_key != pr_reg->pr_res_key) { + printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received" + " res_key: 0x%016Lx does not match existing SA REGISTER" + " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + /* + * The service active reservation key needs to be non zero + */ + if (!(sa_res_key)) { + printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero" + " sa_res_key\n"); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + /* + * Determine the Relative Target Port Identifier where the reservation + * will be moved to for the TransportID containing SCSI initiator WWN + * information. + */ + rtpi = (buf[18] & 0xff) << 8; + rtpi |= buf[19] & 0xff; + tid_len = (buf[20] & 0xff) << 24; + tid_len |= (buf[21] & 0xff) << 16; + tid_len |= (buf[22] & 0xff) << 8; + tid_len |= buf[23] & 0xff; + + if ((tid_len + 24) != cmd->data_length) { + printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" + " does not equal CDB data_length: %u\n", tid_len, + cmd->data_length); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + + spin_lock(&dev->se_port_lock); + list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) { + if (se_port->sep_rtpi != rtpi) + continue; + dest_se_tpg = se_port->sep_tpg; + if (!(dest_se_tpg)) + continue; + dest_tf_ops = TPG_TFO(dest_se_tpg); + if (!(dest_tf_ops)) + continue; + + atomic_inc(&dest_se_tpg->tpg_pr_ref_count); + smp_mb__after_atomic_inc(); + spin_unlock(&dev->se_port_lock); + + ret = core_scsi3_tpg_depend_item(dest_se_tpg); + if (ret != 0) { + printk(KERN_ERR "core_scsi3_tpg_depend_item() failed" + " for dest_se_tpg\n"); + atomic_dec(&dest_se_tpg->tpg_pr_ref_count); + smp_mb__after_atomic_dec(); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + + spin_lock(&dev->se_port_lock); + break; + } + spin_unlock(&dev->se_port_lock); + + if (!(dest_se_tpg) || (!dest_tf_ops)) { + printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + " fabric ops from Relative Target Port Identifier:" + " %hu\n", rtpi); + core_scsi3_put_pr_reg(pr_reg); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + proto_ident = (buf[24] & 0x0f); +#if 0 + printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" + " 0x%02x\n", proto_ident); +#endif + if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { + printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received" + " proto_ident: 0x%02x does not match ident: 0x%02x" + " from fabric: %s\n", proto_ident, + dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), + dest_tf_ops->get_fabric_name()); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { + printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not" + " containg a valid tpg_parse_pr_out_transport_id" + " function pointer\n"); + ret = PYX_TRANSPORT_LU_COMM_FAILURE; + goto out; + } + initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, + (const char *)&buf[24], &tmp_tid_len, &iport_ptr); + if (!(initiator_str)) { + printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + " initiator_str from Transport ID\n"); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + + printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" + " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? + "port" : "device", initiator_str, (iport_ptr != NULL) ? + iport_ptr : ""); + /* + * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service + * action specifies a TransportID that is the same as the initiator port + * of the I_T nexus for the command received, then the command shall + * be terminated with CHECK CONDITION status, with the sense key set to + * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD + * IN PARAMETER LIST. + */ + pr_reg_nacl = pr_reg->pr_reg_nacl; + matching_iname = (!strcmp(initiator_str, + pr_reg_nacl->initiatorname)) ? 1 : 0; + if (!(matching_iname)) + goto after_iport_check; + + if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) { + printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" + " matches: %s on received I_T Nexus\n", initiator_str, + pr_reg_nacl->initiatorname); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) { + printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" + " matches: %s %s on received I_T Nexus\n", + initiator_str, iport_ptr, pr_reg_nacl->initiatorname, + pr_reg->pr_reg_isid); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } +after_iport_check: + /* + * Locate the destination struct se_node_acl from the received Transport ID + */ + spin_lock_bh(&dest_se_tpg->acl_node_lock); + dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, + initiator_str); + if (dest_node_acl) { + atomic_inc(&dest_node_acl->acl_pr_ref_count); + smp_mb__after_atomic_inc(); + } + spin_unlock_bh(&dest_se_tpg->acl_node_lock); + + if (!(dest_node_acl)) { + printk(KERN_ERR "Unable to locate %s dest_node_acl for" + " TransportID%s\n", dest_tf_ops->get_fabric_name(), + initiator_str); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + ret = core_scsi3_nodeacl_depend_item(dest_node_acl); + if (ret != 0) { + printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for" + " dest_node_acl\n"); + atomic_dec(&dest_node_acl->acl_pr_ref_count); + smp_mb__after_atomic_dec(); + dest_node_acl = NULL; + ret = PYX_TRANSPORT_LU_COMM_FAILURE; + goto out; + } +#if 0 + printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" + " %s from TransportID\n", dest_tf_ops->get_fabric_name(), + dest_node_acl->initiatorname); +#endif + /* + * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET + * PORT IDENTIFIER. + */ + dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); + if (!(dest_se_deve)) { + printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:" + " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + + ret = core_scsi3_lunacl_depend_item(dest_se_deve); + if (ret < 0) { + printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n"); + atomic_dec(&dest_se_deve->pr_ref_count); + smp_mb__after_atomic_dec(); + dest_se_deve = NULL; + ret = PYX_TRANSPORT_LU_COMM_FAILURE; + goto out; + } +#if 0 + printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" + " ACL for dest_se_deve->mapped_lun: %u\n", + dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, + dest_se_deve->mapped_lun); +#endif + /* + * A persistent reservation needs to already existing in order to + * successfully complete the REGISTER_AND_MOVE service action.. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (!(pr_res_holder)) { + printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation" + " currently held\n"); + spin_unlock(&dev->dev_reservation_lock); + ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + goto out; + } + /* + * The received on I_T Nexus must be the reservation holder. + * + * From spc4r17 section 5.7.8 Table 50 -- + * Register behaviors for a REGISTER AND MOVE service action + */ + if (pr_res_holder != pr_reg) { + printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T" + " Nexus is not reservation holder\n"); + spin_unlock(&dev->dev_reservation_lock); + ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + goto out; + } + /* + * From spc4r17 section 5.7.8: registering and moving reservation + * + * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service + * action is received and the established persistent reservation is a + * Write Exclusive - All Registrants type or Exclusive Access - + * All Registrants type reservation, then the command shall be completed + * with RESERVATION CONFLICT status. + */ + if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { + printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move" + " reservation for type: %s\n", + core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); + spin_unlock(&dev->dev_reservation_lock); + ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + goto out; + } + pr_res_nacl = pr_res_holder->pr_reg_nacl; + /* + * b) Ignore the contents of the (received) SCOPE and TYPE fields; + */ + type = pr_res_holder->pr_res_type; + scope = pr_res_holder->pr_res_type; + /* + * c) Associate the reservation key specified in the SERVICE ACTION + * RESERVATION KEY field with the I_T nexus specified as the + * destination of the register and move, where: + * A) The I_T nexus is specified by the TransportID and the + * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and + * B) Regardless of the TransportID format used, the association for + * the initiator port is based on either the initiator port name + * (see 3.1.71) on SCSI transport protocols where port names are + * required or the initiator port identifier (see 3.1.70) on SCSI + * transport protocols where port names are not required; + * d) Register the reservation key specified in the SERVICE ACTION + * RESERVATION KEY field; + * e) Retain the reservation key specified in the SERVICE ACTION + * RESERVATION KEY field and associated information; + * + * Also, It is not an error for a REGISTER AND MOVE service action to + * register an I_T nexus that is already registered with the same + * reservation key or a different reservation key. + */ + dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, + iport_ptr); + if (!(dest_pr_reg)) { + ret = core_scsi3_alloc_registration(SE_DEV(cmd), + dest_node_acl, dest_se_deve, iport_ptr, + sa_res_key, 0, aptpl, 2, 1); + if (ret != 0) { + spin_unlock(&dev->dev_reservation_lock); + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, + iport_ptr); + new_reg = 1; + } + /* + * f) Release the persistent reservation for the persistent reservation + * holder (i.e., the I_T nexus on which the + */ + __core_scsi3_complete_pro_release(dev, pr_res_nacl, + dev->dev_pr_res_holder, 0); + /* + * g) Move the persistent reservation to the specified I_T nexus using + * the same scope and type as the persistent reservation released in + * item f); and + */ + dev->dev_pr_res_holder = dest_pr_reg; + dest_pr_reg->pr_res_holder = 1; + dest_pr_reg->pr_res_type = type; + pr_reg->pr_res_scope = scope; + prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + PR_REG_ISID_ID_LEN); + /* + * Increment PRGeneration for existing registrations.. + */ + if (!(new_reg)) + dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; + spin_unlock(&dev->dev_reservation_lock); + + printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" + " created new reservation holder TYPE: %s on object RTPI:" + " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), + core_scsi3_pr_dump_type(type), rtpi, + dest_pr_reg->pr_res_generation); + printk(KERN_INFO "SPC-3 PR Successfully moved reservation from" + " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", + tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, + (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), + dest_node_acl->initiatorname, (iport_ptr != NULL) ? + iport_ptr : ""); + /* + * It is now safe to release configfs group dependencies for destination + * of Transport ID Initiator Device/Port Identifier + */ + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_se_tpg); + /* + * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T + * nexus on which PERSISTENT RESERVE OUT command was received. + */ + if (unreg) { + spin_lock(&pr_tmpl->registration_lock); + __core_scsi3_free_registration(dev, pr_reg, NULL, 1); + spin_unlock(&pr_tmpl->registration_lock); + } else + core_scsi3_put_pr_reg(pr_reg); + + /* + * Clear the APTPL metadata if APTPL has been disabled, otherwise + * write out the updated metadata to struct file for this SCSI device. + */ + if (!(aptpl)) { + pr_tmpl->pr_aptpl_active = 0; + core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); + printk("SPC-3 PR: Set APTPL Bit Deactivated for" + " REGISTER_AND_MOVE\n"); + } else { + pr_tmpl->pr_aptpl_active = 1; + ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + &dest_pr_reg->pr_aptpl_buf[0], + pr_tmpl->pr_aptpl_buf_len); + if (!(ret)) + printk("SPC-3 PR: Set APTPL Bit Activated for" + " REGISTER_AND_MOVE\n"); + } + + core_scsi3_put_pr_reg(dest_pr_reg); + return 0; +out: + if (dest_se_deve) + core_scsi3_lunacl_undepend_item(dest_se_deve); + if (dest_node_acl) + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_se_tpg); + core_scsi3_put_pr_reg(pr_reg); + return ret; +} + +static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) +{ + unsigned int __v1, __v2; + + __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3]; + __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7]; + + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; +} + +/* + * See spc4r17 section 6.14 Table 170 + */ +static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) +{ + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + u64 res_key, sa_res_key; + int sa, scope, type, aptpl; + int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; + /* + * FIXME: A NULL struct se_session pointer means an this is not coming from + * a $FABRIC_MOD's nexus, but from internal passthrough ops. + */ + if (!(SE_SESS(cmd))) + return PYX_TRANSPORT_LU_COMM_FAILURE; + + if (cmd->data_length < 24) { + printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list" + " length too small: %u\n", cmd->data_length); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + /* + * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB) + */ + sa = (cdb[1] & 0x1f); + scope = (cdb[2] & 0xf0); + type = (cdb[2] & 0x0f); + /* + * From PERSISTENT_RESERVE_OUT parameter list (payload) + */ + res_key = core_scsi3_extract_reservation_key(&buf[0]); + sa_res_key = core_scsi3_extract_reservation_key(&buf[8]); + /* + * REGISTER_AND_MOVE uses a different SA parameter list containing + * SCSI TransportIDs. + */ + if (sa != PRO_REGISTER_AND_MOVE) { + spec_i_pt = (buf[20] & 0x08); + all_tg_pt = (buf[20] & 0x04); + aptpl = (buf[20] & 0x01); + } else { + aptpl = (buf[17] & 0x01); + unreg = (buf[17] & 0x02); + } + /* + * SPEC_I_PT=1 is only valid for Service action: REGISTER + */ + if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + /* + * From spc4r17 section 6.14: + * + * If the SPEC_I_PT bit is set to zero, the service action is not + * REGISTER AND MOVE, and the parameter list length is not 24, then + * the command shall be terminated with CHECK CONDITION status, with + * the sense key set to ILLEGAL REQUEST, and the additional sense + * code set to PARAMETER LIST LENGTH ERROR. + */ + if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && + (cmd->data_length != 24)) { + printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter" + " list length: %u\n", cmd->data_length); + return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + } + /* + * (core_scsi3_emulate_pro_* function parameters + * are defined by spc4r17 Table 174: + * PERSISTENT_RESERVE_OUT service actions and valid parameters. + */ + switch (sa) { + case PRO_REGISTER: + return core_scsi3_emulate_pro_register(cmd, + res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0); + case PRO_RESERVE: + return core_scsi3_emulate_pro_reserve(cmd, + type, scope, res_key); + case PRO_RELEASE: + return core_scsi3_emulate_pro_release(cmd, + type, scope, res_key); + case PRO_CLEAR: + return core_scsi3_emulate_pro_clear(cmd, res_key); + case PRO_PREEMPT: + return core_scsi3_emulate_pro_preempt(cmd, type, scope, + res_key, sa_res_key, 0); + case PRO_PREEMPT_AND_ABORT: + return core_scsi3_emulate_pro_preempt(cmd, type, scope, + res_key, sa_res_key, 1); + case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: + return core_scsi3_emulate_pro_register(cmd, + 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1); + case PRO_REGISTER_AND_MOVE: + return core_scsi3_emulate_pro_register_and_move(cmd, res_key, + sa_res_key, aptpl, unreg); + default: + printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" + " action: 0x%02x\n", cdb[1] & 0x1f); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + + return PYX_TRANSPORT_INVALID_CDB_FIELD; +} + +/* + * PERSISTENT_RESERVE_IN Service Action READ_KEYS + * + * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160 + */ +static int core_scsi3_pri_read_keys(struct se_cmd *cmd) +{ + struct se_device *se_dev = SE_DEV(cmd); + struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct t10_pr_registration *pr_reg; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + u32 add_len = 0, off = 8; + + if (cmd->data_length < 8) { + printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u" + " too small\n", cmd->data_length); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + + buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); + buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); + buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); + buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + + spin_lock(&T10_RES(su_dev)->registration_lock); + list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + pr_reg_list) { + /* + * Check for overflow of 8byte PRI READ_KEYS payload and + * next reservation key list descriptor. + */ + if ((add_len + 8) > (cmd->data_length - 8)) + break; + + buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); + buf[off++] = (pr_reg->pr_res_key & 0xff); + + add_len += 8; + } + spin_unlock(&T10_RES(su_dev)->registration_lock); + + buf[4] = ((add_len >> 24) & 0xff); + buf[5] = ((add_len >> 16) & 0xff); + buf[6] = ((add_len >> 8) & 0xff); + buf[7] = (add_len & 0xff); + + return 0; +} + +/* + * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION + * + * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162 + */ +static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) +{ + struct se_device *se_dev = SE_DEV(cmd); + struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct t10_pr_registration *pr_reg; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + u64 pr_res_key; + u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ + + if (cmd->data_length < 8) { + printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u" + " too small\n", cmd->data_length); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + + buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); + buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); + buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); + buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + + spin_lock(&se_dev->dev_reservation_lock); + pr_reg = se_dev->dev_pr_res_holder; + if ((pr_reg)) { + /* + * Set the hardcoded Additional Length + */ + buf[4] = ((add_len >> 24) & 0xff); + buf[5] = ((add_len >> 16) & 0xff); + buf[6] = ((add_len >> 8) & 0xff); + buf[7] = (add_len & 0xff); + + if (cmd->data_length < 22) { + spin_unlock(&se_dev->dev_reservation_lock); + return 0; + } + /* + * Set the Reservation key. + * + * From spc4r17, section 5.7.10: + * A persistent reservation holder has its reservation key + * returned in the parameter data from a PERSISTENT + * RESERVE IN command with READ RESERVATION service action as + * follows: + * a) For a persistent reservation of the type Write Exclusive + * - All Registrants or Exclusive Access ­ All Regitrants, + * the reservation key shall be set to zero; or + * b) For all other persistent reservation types, the + * reservation key shall be set to the registered + * reservation key for the I_T nexus that holds the + * persistent reservation. + */ + if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) + pr_res_key = 0; + else + pr_res_key = pr_reg->pr_res_key; + + buf[8] = ((pr_res_key >> 56) & 0xff); + buf[9] = ((pr_res_key >> 48) & 0xff); + buf[10] = ((pr_res_key >> 40) & 0xff); + buf[11] = ((pr_res_key >> 32) & 0xff); + buf[12] = ((pr_res_key >> 24) & 0xff); + buf[13] = ((pr_res_key >> 16) & 0xff); + buf[14] = ((pr_res_key >> 8) & 0xff); + buf[15] = (pr_res_key & 0xff); + /* + * Set the SCOPE and TYPE + */ + buf[21] = (pr_reg->pr_res_scope & 0xf0) | + (pr_reg->pr_res_type & 0x0f); + } + spin_unlock(&se_dev->dev_reservation_lock); + + return 0; +} + +/* + * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES + * + * See spc4r17 section 6.13.4 Table 165 + */ +static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + u16 add_len = 8; /* Hardcoded to 8. */ + + if (cmd->data_length < 6) { + printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:" + " %u too small\n", cmd->data_length); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + + buf[0] = ((add_len << 8) & 0xff); + buf[1] = (add_len & 0xff); + buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ + buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ + buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ + buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */ + /* + * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so + * set the TMV: Task Mask Valid bit. + */ + buf[3] |= 0x80; + /* + * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 + */ + buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */ + /* + * PTPL_A: Persistence across Target Power Loss Active bit + */ + if (pr_tmpl->pr_aptpl_active) + buf[3] |= 0x01; + /* + * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167 + */ + buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ + buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ + buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ + buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ + buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + + return 0; +} + +/* + * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS + * + * See spc4r17 section 6.13.5 Table 168 and 169 + */ +static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) +{ + struct se_device *se_dev = SE_DEV(cmd); + struct se_node_acl *se_nacl; + struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct se_portal_group *se_tpg; + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation; + unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; + u32 off = 8; /* off into first Full Status descriptor */ + int format_code = 0; + + if (cmd->data_length < 8) { + printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u" + " too small\n", cmd->data_length); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + + buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); + buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); + buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); + buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + se_nacl = pr_reg->pr_reg_nacl; + se_tpg = pr_reg->pr_reg_nacl->se_tpg; + add_desc_len = 0; + + atomic_inc(&pr_reg->pr_res_holders); + smp_mb__after_atomic_inc(); + spin_unlock(&pr_tmpl->registration_lock); + /* + * Determine expected length of $FABRIC_MOD specific + * TransportID full status descriptor.. + */ + exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len( + se_tpg, se_nacl, pr_reg, &format_code); + + if ((exp_desc_len + add_len) > cmd->data_length) { + printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran" + " out of buffer: %d\n", cmd->data_length); + spin_lock(&pr_tmpl->registration_lock); + atomic_dec(&pr_reg->pr_res_holders); + smp_mb__after_atomic_dec(); + break; + } + /* + * Set RESERVATION KEY + */ + buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); + buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); + buf[off++] = (pr_reg->pr_res_key & 0xff); + off += 4; /* Skip Over Reserved area */ + + /* + * Set ALL_TG_PT bit if PROUT SA REGISTER had this set. + */ + if (pr_reg->pr_reg_all_tg_pt) + buf[off] = 0x02; + /* + * The struct se_lun pointer will be present for the + * reservation holder for PR_HOLDER bit. + * + * Also, if this registration is the reservation + * holder, fill in SCOPE and TYPE in the next byte. + */ + if (pr_reg->pr_res_holder) { + buf[off++] |= 0x01; + buf[off++] = (pr_reg->pr_res_scope & 0xf0) | + (pr_reg->pr_res_type & 0x0f); + } else + off += 2; + + off += 4; /* Skip over reserved area */ + /* + * From spc4r17 6.3.15: + * + * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT + * IDENTIFIER field contains the relative port identifier (see + * 3.1.120) of the target port that is part of the I_T nexus + * described by this full status descriptor. If the ALL_TG_PT + * bit is set to one, the contents of the RELATIVE TARGET PORT + * IDENTIFIER field are not defined by this standard. + */ + if (!(pr_reg->pr_reg_all_tg_pt)) { + struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; + + buf[off++] = ((port->sep_rtpi >> 8) & 0xff); + buf[off++] = (port->sep_rtpi & 0xff); + } else + off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */ + + /* + * Now, have the $FABRIC_MOD fill in the protocol identifier + */ + desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg, + se_nacl, pr_reg, &format_code, &buf[off+4]); + + spin_lock(&pr_tmpl->registration_lock); + atomic_dec(&pr_reg->pr_res_holders); + smp_mb__after_atomic_dec(); + /* + * Set the ADDITIONAL DESCRIPTOR LENGTH + */ + buf[off++] = ((desc_len >> 24) & 0xff); + buf[off++] = ((desc_len >> 16) & 0xff); + buf[off++] = ((desc_len >> 8) & 0xff); + buf[off++] = (desc_len & 0xff); + /* + * Size of full desctipor header minus TransportID + * containing $FABRIC_MOD specific) initiator device/port + * WWN information. + * + * See spc4r17 Section 6.13.5 Table 169 + */ + add_desc_len = (24 + desc_len); + + off += desc_len; + add_len += add_desc_len; + } + spin_unlock(&pr_tmpl->registration_lock); + /* + * Set ADDITIONAL_LENGTH + */ + buf[4] = ((add_len >> 24) & 0xff); + buf[5] = ((add_len >> 16) & 0xff); + buf[6] = ((add_len >> 8) & 0xff); + buf[7] = (add_len & 0xff); + + return 0; +} + +static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) +{ + switch (cdb[1] & 0x1f) { + case PRI_READ_KEYS: + return core_scsi3_pri_read_keys(cmd); + case PRI_READ_RESERVATION: + return core_scsi3_pri_read_reservation(cmd); + case PRI_REPORT_CAPABILITIES: + return core_scsi3_pri_report_capabilities(cmd); + case PRI_READ_FULL_STATUS: + return core_scsi3_pri_read_full_status(cmd); + default: + printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service" + " action: 0x%02x\n", cdb[1] & 0x1f); + return PYX_TRANSPORT_INVALID_CDB_FIELD; + } + +} + +int core_scsi3_emulate_pr(struct se_cmd *cmd) +{ + unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; + struct se_device *dev = cmd->se_dev; + /* + * Following spc2r20 5.5.1 Reservations overview: + * + * If a logical unit has been reserved by any RESERVE command and is + * still reserved by any initiator, all PERSISTENT RESERVE IN and all + * PERSISTENT RESERVE OUT commands shall conflict regardless of + * initiator or service action and shall terminate with a RESERVATION + * CONFLICT status. + */ + if (dev->dev_flags & DF_SPC2_RESERVATIONS) { + printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy" + " SPC-2 reservation is held, returning" + " RESERVATION_CONFLICT\n"); + return PYX_TRANSPORT_RESERVATION_CONFLICT; + } + + return (cdb[0] == PERSISTENT_RESERVE_OUT) ? + core_scsi3_emulate_pr_out(cmd, cdb) : + core_scsi3_emulate_pr_in(cmd, cdb); +} + +static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type) +{ + return 0; +} + +static int core_pt_seq_non_holder( + struct se_cmd *cmd, + unsigned char *cdb, + u32 pr_reg_type) +{ + return 0; +} + +int core_setup_reservations(struct se_device *dev, int force_pt) +{ + struct se_subsystem_dev *su_dev = dev->se_sub_dev; + struct t10_reservation_template *rest = &su_dev->t10_reservation; + /* + * If this device is from Target_Core_Mod/pSCSI, use the reservations + * of the Underlying SCSI hardware. In Linux/SCSI terms, this can + * cause a problem because libata and some SATA RAID HBAs appear + * under Linux/SCSI, but to emulate reservations themselves. + */ + if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && + !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) { + rest->res_type = SPC_PASSTHROUGH; + rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; + rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; + printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" + " emulation\n", TRANSPORT(dev)->name); + return 0; + } + /* + * If SPC-3 or above is reported by real or emulated struct se_device, + * use emulated Persistent Reservations. + */ + if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { + rest->res_type = SPC3_PERSISTENT_RESERVATIONS; + rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; + rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; + printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" + " emulation\n", TRANSPORT(dev)->name); + } else { + rest->res_type = SPC2_RESERVATIONS; + rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; + rest->pr_ops.t10_seq_non_holder = + &core_scsi2_reservation_seq_non_holder; + printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", + TRANSPORT(dev)->name); + } + + return 0; +} diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h new file mode 100644 index 000000000000..5603bcfd86d3 --- /dev/null +++ b/drivers/target/target_core_pr.h @@ -0,0 +1,67 @@ +#ifndef TARGET_CORE_PR_H +#define TARGET_CORE_PR_H +/* + * PERSISTENT_RESERVE_OUT service action codes + * + * spc4r17 section 6.14.2 Table 171 + */ +#define PRO_REGISTER 0x00 +#define PRO_RESERVE 0x01 +#define PRO_RELEASE 0x02 +#define PRO_CLEAR 0x03 +#define PRO_PREEMPT 0x04 +#define PRO_PREEMPT_AND_ABORT 0x05 +#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06 +#define PRO_REGISTER_AND_MOVE 0x07 +/* + * PERSISTENT_RESERVE_IN service action codes + * + * spc4r17 section 6.13.1 Table 159 + */ +#define PRI_READ_KEYS 0x00 +#define PRI_READ_RESERVATION 0x01 +#define PRI_REPORT_CAPABILITIES 0x02 +#define PRI_READ_FULL_STATUS 0x03 +/* + * PERSISTENT_RESERVE_ SCOPE field + * + * spc4r17 section 6.13.3.3 Table 163 + */ +#define PR_SCOPE_LU_SCOPE 0x00 +/* + * PERSISTENT_RESERVE_* TYPE field + * + * spc4r17 section 6.13.3.4 Table 164 + */ +#define PR_TYPE_WRITE_EXCLUSIVE 0x01 +#define PR_TYPE_EXCLUSIVE_ACCESS 0x03 +#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05 +#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06 +#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07 +#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08 + +#define PR_APTPL_MAX_IPORT_LEN 256 +#define PR_APTPL_MAX_TPORT_LEN 256 + +extern struct kmem_cache *t10_pr_reg_cache; + +extern int core_pr_dump_initiator_port(struct t10_pr_registration *, + char *, u32); +extern int core_scsi2_emulate_crh(struct se_cmd *); +extern int core_scsi3_alloc_aptpl_registration( + struct t10_reservation_template *, u64, + unsigned char *, unsigned char *, u32, + unsigned char *, u16, u32, int, int, u8); +extern int core_scsi3_check_aptpl_registration(struct se_device *, + struct se_portal_group *, struct se_lun *, + struct se_lun_acl *); +extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, + struct se_node_acl *); +extern void core_scsi3_free_all_registrations(struct se_device *); +extern unsigned char *core_scsi3_pr_dump_type(int); +extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *, + struct se_cmd *); +extern int core_scsi3_emulate_pr(struct se_cmd *); +extern int core_setup_reservations(struct se_device *, int); + +#endif /* TARGET_CORE_PR_H */ diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c new file mode 100644 index 000000000000..742d24609a9b --- /dev/null +++ b/drivers/target/target_core_pscsi.c @@ -0,0 +1,1470 @@ +/******************************************************************************* + * Filename: target_core_pscsi.c + * + * This file contains the generic target mode <-> Linux SCSI subsystem plugin. + * + * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For TASK_ATTR_* */ + +#include +#include +#include + +#include "target_core_pscsi.h" + +#define ISPRINT(a) ((a >= ' ') && (a <= '~')) + +static struct se_subsystem_api pscsi_template; + +static void pscsi_req_done(struct request *, int); + +/* pscsi_get_sh(): + * + * + */ +static struct Scsi_Host *pscsi_get_sh(u32 host_no) +{ + struct Scsi_Host *sh = NULL; + + sh = scsi_host_lookup(host_no); + if (IS_ERR(sh)) { + printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" + " %u\n", host_no); + return NULL; + } + + return sh; +} + +/* pscsi_attach_hba(): + * + * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. + * from the passed SCSI Host ID. + */ +static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) +{ + int hba_depth; + struct pscsi_hba_virt *phv; + + phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); + if (!(phv)) { + printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); + return -1; + } + phv->phv_host_id = host_id; + phv->phv_mode = PHV_VIRUTAL_HOST_ID; + hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; + atomic_set(&hba->left_queue_depth, hba_depth); + atomic_set(&hba->max_queue_depth, hba_depth); + + hba->hba_ptr = (void *)phv; + + printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" + " Generic Target Core Stack %s\n", hba->hba_id, + PSCSI_VERSION, TARGET_CORE_MOD_VERSION); + printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" + " Target Core with TCQ Depth: %d\n", hba->hba_id, + atomic_read(&hba->max_queue_depth)); + + return 0; +} + +static void pscsi_detach_hba(struct se_hba *hba) +{ + struct pscsi_hba_virt *phv = hba->hba_ptr; + struct Scsi_Host *scsi_host = phv->phv_lld_host; + + if (scsi_host) { + scsi_host_put(scsi_host); + + printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" + " Generic Target Core\n", hba->hba_id, + (scsi_host->hostt->name) ? (scsi_host->hostt->name) : + "Unknown"); + } else + printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" + " from Generic Target Core\n", hba->hba_id); + + kfree(phv); + hba->hba_ptr = NULL; +} + +static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) +{ + struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; + struct Scsi_Host *sh = phv->phv_lld_host; + int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; + /* + * Release the struct Scsi_Host + */ + if (!(mode_flag)) { + if (!(sh)) + return 0; + + phv->phv_lld_host = NULL; + phv->phv_mode = PHV_VIRUTAL_HOST_ID; + atomic_set(&hba->left_queue_depth, hba_depth); + atomic_set(&hba->max_queue_depth, hba_depth); + + printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" + " %s\n", hba->hba_id, (sh->hostt->name) ? + (sh->hostt->name) : "Unknown"); + + scsi_host_put(sh); + return 0; + } + /* + * Otherwise, locate struct Scsi_Host from the original passed + * pSCSI Host ID and enable for phba mode + */ + sh = pscsi_get_sh(phv->phv_host_id); + if (!(sh)) { + printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" + " phv_host_id: %d\n", phv->phv_host_id); + return -1; + } + /* + * Usually the SCSI LLD will use the hostt->can_queue value to define + * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set + * this at all and set sh->can_queue at runtime. + */ + hba_depth = (sh->hostt->can_queue > sh->can_queue) ? + sh->hostt->can_queue : sh->can_queue; + + atomic_set(&hba->left_queue_depth, hba_depth); + atomic_set(&hba->max_queue_depth, hba_depth); + + phv->phv_lld_host = sh; + phv->phv_mode = PHV_LLD_SCSI_HOST_NO; + + printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", + hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); + + return 1; +} + +static void pscsi_tape_read_blocksize(struct se_device *dev, + struct scsi_device *sdev) +{ + unsigned char cdb[MAX_COMMAND_SIZE], *buf; + int ret; + + buf = kzalloc(12, GFP_KERNEL); + if (!buf) + return; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = MODE_SENSE; + cdb[4] = 0x0c; /* 12 bytes */ + + ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, + HZ, 1, NULL); + if (ret) + goto out_free; + + /* + * If MODE_SENSE still returns zero, set the default value to 1024. + */ + sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); + if (!sdev->sector_size) + sdev->sector_size = 1024; +out_free: + kfree(buf); +} + +static void +pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) +{ + unsigned char *buf; + + if (sdev->inquiry_len < INQUIRY_LEN) + return; + + buf = sdev->inquiry; + if (!buf) + return; + /* + * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() + */ + memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); + memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); + memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); +} + +static int +pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) +{ + unsigned char cdb[MAX_COMMAND_SIZE], *buf; + int ret; + + buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); + if (!buf) + return -1; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = INQUIRY; + cdb[1] = 0x01; /* Query VPD */ + cdb[2] = 0x80; /* Unit Serial Number */ + cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; + cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); + + ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, + INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); + if (ret) + goto out_free; + + snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); + + wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; + + kfree(buf); + return 0; + +out_free: + kfree(buf); + return -1; +} + +static void +pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, + struct t10_wwn *wwn) +{ + unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; + int ident_len, page_len, off = 4, ret; + struct t10_vpd *vpd; + + buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); + if (!buf) + return; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = INQUIRY; + cdb[1] = 0x01; /* Query VPD */ + cdb[2] = 0x83; /* Device Identifier */ + cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; + cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); + + ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, + INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, + NULL, HZ, 1, NULL); + if (ret) + goto out; + + page_len = (buf[2] << 8) | buf[3]; + while (page_len > 0) { + /* Grab a pointer to the Identification descriptor */ + page_83 = &buf[off]; + ident_len = page_83[3]; + if (!ident_len) { + printk(KERN_ERR "page_83[3]: identifier" + " length zero!\n"); + break; + } + printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); + + vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); + if (!vpd) { + printk(KERN_ERR "Unable to allocate memory for" + " struct t10_vpd\n"); + goto out; + } + INIT_LIST_HEAD(&vpd->vpd_list); + + transport_set_vpd_proto_id(vpd, page_83); + transport_set_vpd_assoc(vpd, page_83); + + if (transport_set_vpd_ident_type(vpd, page_83) < 0) { + off += (ident_len + 4); + page_len -= (ident_len + 4); + kfree(vpd); + continue; + } + if (transport_set_vpd_ident(vpd, page_83) < 0) { + off += (ident_len + 4); + page_len -= (ident_len + 4); + kfree(vpd); + continue; + } + + list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); + off += (ident_len + 4); + page_len -= (ident_len + 4); + } + +out: + kfree(buf); +} + +/* pscsi_add_device_to_list(): + * + * + */ +static struct se_device *pscsi_add_device_to_list( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + struct pscsi_dev_virt *pdv, + struct scsi_device *sd, + int dev_flags) +{ + struct se_device *dev; + struct se_dev_limits dev_limits; + struct request_queue *q; + struct queue_limits *limits; + + memset(&dev_limits, 0, sizeof(struct se_dev_limits)); + + if (!sd->queue_depth) { + sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; + + printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" + " queue_depth to %d\n", sd->channel, sd->id, + sd->lun, sd->queue_depth); + } + /* + * Setup the local scope queue_limits from struct request_queue->limits + * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. + */ + q = sd->request_queue; + limits = &dev_limits.limits; + limits->logical_block_size = sd->sector_size; + limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? + queue_max_hw_sectors(q) : sd->host->max_sectors; + limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? + queue_max_sectors(q) : sd->host->max_sectors; + dev_limits.hw_queue_depth = sd->queue_depth; + dev_limits.queue_depth = sd->queue_depth; + /* + * Setup our standard INQUIRY info into se_dev->t10_wwn + */ + pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); + + /* + * Set the pointer pdv->pdv_sd to from passed struct scsi_device, + * which has already been referenced with Linux SCSI code with + * scsi_device_get() in this file's pscsi_create_virtdevice(). + * + * The passthrough operations called by the transport_add_device_* + * function below will require this pointer to be set for passthroug + * ops. + * + * For the shutdown case in pscsi_free_device(), this struct + * scsi_device reference is released with Linux SCSI code + * scsi_device_put() and the pdv->pdv_sd cleared. + */ + pdv->pdv_sd = sd; + + dev = transport_add_device_to_core_hba(hba, &pscsi_template, + se_dev, dev_flags, (void *)pdv, + &dev_limits, NULL, NULL); + if (!(dev)) { + pdv->pdv_sd = NULL; + return NULL; + } + + /* + * Locate VPD WWN Information used for various purposes within + * the Storage Engine. + */ + if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { + /* + * If VPD Unit Serial returned GOOD status, try + * VPD Device Identification page (0x83). + */ + pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); + } + + /* + * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. + */ + if (sd->type == TYPE_TAPE) + pscsi_tape_read_blocksize(dev, sd); + return dev; +} + +static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) +{ + struct pscsi_dev_virt *pdv; + + pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); + if (!(pdv)) { + printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); + return NULL; + } + pdv->pdv_se_hba = hba; + + printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); + return (void *)pdv; +} + +/* + * Called with struct Scsi_Host->host_lock called. + */ +static struct se_device *pscsi_create_type_disk( + struct scsi_device *sd, + struct pscsi_dev_virt *pdv, + struct se_subsystem_dev *se_dev, + struct se_hba *hba) +{ + struct se_device *dev; + struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; + struct Scsi_Host *sh = sd->host; + struct block_device *bd; + u32 dev_flags = 0; + + if (scsi_device_get(sd)) { + printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", + sh->host_no, sd->channel, sd->id, sd->lun); + spin_unlock_irq(sh->host_lock); + return NULL; + } + spin_unlock_irq(sh->host_lock); + /* + * Claim exclusive struct block_device access to struct scsi_device + * for TYPE_DISK using supplied udev_path + */ + bd = blkdev_get_by_path(se_dev->se_dev_udev_path, + FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); + if (!(bd)) { + printk("pSCSI: blkdev_get_by_path() failed\n"); + scsi_device_put(sd); + return NULL; + } + pdv->pdv_bd = bd; + + dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); + if (!(dev)) { + blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + scsi_device_put(sd); + return NULL; + } + printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", + phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); + + return dev; +} + +/* + * Called with struct Scsi_Host->host_lock called. + */ +static struct se_device *pscsi_create_type_rom( + struct scsi_device *sd, + struct pscsi_dev_virt *pdv, + struct se_subsystem_dev *se_dev, + struct se_hba *hba) +{ + struct se_device *dev; + struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; + struct Scsi_Host *sh = sd->host; + u32 dev_flags = 0; + + if (scsi_device_get(sd)) { + printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", + sh->host_no, sd->channel, sd->id, sd->lun); + spin_unlock_irq(sh->host_lock); + return NULL; + } + spin_unlock_irq(sh->host_lock); + + dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); + if (!(dev)) { + scsi_device_put(sd); + return NULL; + } + printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", + phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, + sd->channel, sd->id, sd->lun); + + return dev; +} + +/* + *Called with struct Scsi_Host->host_lock called. + */ +static struct se_device *pscsi_create_type_other( + struct scsi_device *sd, + struct pscsi_dev_virt *pdv, + struct se_subsystem_dev *se_dev, + struct se_hba *hba) +{ + struct se_device *dev; + struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; + struct Scsi_Host *sh = sd->host; + u32 dev_flags = 0; + + spin_unlock_irq(sh->host_lock); + dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); + if (!(dev)) + return NULL; + + printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", + phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, + sd->channel, sd->id, sd->lun); + + return dev; +} + +static struct se_device *pscsi_create_virtdevice( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + void *p) +{ + struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; + struct se_device *dev; + struct scsi_device *sd; + struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; + struct Scsi_Host *sh = phv->phv_lld_host; + int legacy_mode_enable = 0; + + if (!(pdv)) { + printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" + " parameter\n"); + return NULL; + } + /* + * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the + * struct Scsi_Host we will need to bring the TCM/pSCSI object online + */ + if (!(sh)) { + if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { + printk(KERN_ERR "pSCSI: Unable to locate struct" + " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); + return NULL; + } + /* + * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device + * reference, we enforce that udev_path has been set + */ + if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { + printk(KERN_ERR "pSCSI: udev_path attribute has not" + " been set before ENABLE=1\n"); + return NULL; + } + /* + * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, + * use the original TCM hba ID to reference Linux/SCSI Host No + * and enable for PHV_LLD_SCSI_HOST_NO mode. + */ + if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { + spin_lock(&hba->device_lock); + if (!(list_empty(&hba->hba_dev_list))) { + printk(KERN_ERR "pSCSI: Unable to set hba_mode" + " with active devices\n"); + spin_unlock(&hba->device_lock); + return NULL; + } + spin_unlock(&hba->device_lock); + + if (pscsi_pmode_enable_hba(hba, 1) != 1) + return NULL; + + legacy_mode_enable = 1; + hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; + sh = phv->phv_lld_host; + } else { + sh = pscsi_get_sh(pdv->pdv_host_id); + if (!(sh)) { + printk(KERN_ERR "pSCSI: Unable to locate" + " pdv_host_id: %d\n", pdv->pdv_host_id); + return NULL; + } + } + } else { + if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { + printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" + " struct Scsi_Host exists\n"); + return NULL; + } + } + + spin_lock_irq(sh->host_lock); + list_for_each_entry(sd, &sh->__devices, siblings) { + if ((pdv->pdv_channel_id != sd->channel) || + (pdv->pdv_target_id != sd->id) || + (pdv->pdv_lun_id != sd->lun)) + continue; + /* + * Functions will release the held struct scsi_host->host_lock + * before calling calling pscsi_add_device_to_list() to register + * struct scsi_device with target_core_mod. + */ + switch (sd->type) { + case TYPE_DISK: + dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); + break; + case TYPE_ROM: + dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); + break; + default: + dev = pscsi_create_type_other(sd, pdv, se_dev, hba); + break; + } + + if (!(dev)) { + if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) + scsi_host_put(sh); + else if (legacy_mode_enable) { + pscsi_pmode_enable_hba(hba, 0); + hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; + } + pdv->pdv_sd = NULL; + return NULL; + } + return dev; + } + spin_unlock_irq(sh->host_lock); + + printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, + pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); + + if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) + scsi_host_put(sh); + else if (legacy_mode_enable) { + pscsi_pmode_enable_hba(hba, 0); + hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; + } + + return NULL; +} + +/* pscsi_free_device(): (Part of se_subsystem_api_t template) + * + * + */ +static void pscsi_free_device(void *p) +{ + struct pscsi_dev_virt *pdv = p; + struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; + struct scsi_device *sd = pdv->pdv_sd; + + if (sd) { + /* + * Release exclusive pSCSI internal struct block_device claim for + * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() + */ + if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { + blkdev_put(pdv->pdv_bd, + FMODE_WRITE|FMODE_READ|FMODE_EXCL); + pdv->pdv_bd = NULL; + } + /* + * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference + * to struct Scsi_Host now. + */ + if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && + (phv->phv_lld_host != NULL)) + scsi_host_put(phv->phv_lld_host); + + if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) + scsi_device_put(sd); + + pdv->pdv_sd = NULL; + } + + kfree(pdv); +} + +static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) +{ + return container_of(task, struct pscsi_plugin_task, pscsi_task); +} + + +/* pscsi_transport_complete(): + * + * + */ +static int pscsi_transport_complete(struct se_task *task) +{ + struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + struct scsi_device *sd = pdv->pdv_sd; + int result; + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + unsigned char *cdb = &pt->pscsi_cdb[0]; + + result = pt->pscsi_result; + /* + * Hack to make sure that Write-Protect modepage is set if R/O mode is + * forced. + */ + if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && + (status_byte(result) << 1) == SAM_STAT_GOOD) { + if (!TASK_CMD(task)->se_deve) + goto after_mode_sense; + + if (TASK_CMD(task)->se_deve->lun_flags & + TRANSPORT_LUNFLAGS_READ_ONLY) { + unsigned char *buf = (unsigned char *) + T_TASK(task->task_se_cmd)->t_task_buf; + + if (cdb[0] == MODE_SENSE_10) { + if (!(buf[3] & 0x80)) + buf[3] |= 0x80; + } else { + if (!(buf[2] & 0x80)) + buf[2] |= 0x80; + } + } + } +after_mode_sense: + + if (sd->type != TYPE_TAPE) + goto after_mode_select; + + /* + * Hack to correctly obtain the initiator requested blocksize for + * TYPE_TAPE. Since this value is dependent upon each tape media, + * struct scsi_device->sector_size will not contain the correct value + * by default, so we go ahead and set it so + * TRANSPORT(dev)->get_blockdev() returns the correct value to the + * storage engine. + */ + if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && + (status_byte(result) << 1) == SAM_STAT_GOOD) { + unsigned char *buf; + struct scatterlist *sg = task->task_sg; + u16 bdl; + u32 blocksize; + + buf = sg_virt(&sg[0]); + if (!(buf)) { + printk(KERN_ERR "Unable to get buf for scatterlist\n"); + goto after_mode_select; + } + + if (cdb[0] == MODE_SELECT) + bdl = (buf[3]); + else + bdl = (buf[6] << 8) | (buf[7]); + + if (!bdl) + goto after_mode_select; + + if (cdb[0] == MODE_SELECT) + blocksize = (buf[9] << 16) | (buf[10] << 8) | + (buf[11]); + else + blocksize = (buf[13] << 16) | (buf[14] << 8) | + (buf[15]); + + sd->sector_size = blocksize; + } +after_mode_select: + + if (status_byte(result) & CHECK_CONDITION) + return 1; + + return 0; +} + +static struct se_task * +pscsi_alloc_task(struct se_cmd *cmd) +{ + struct pscsi_plugin_task *pt; + unsigned char *cdb = T_TASK(cmd)->t_task_cdb; + + pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); + if (!pt) { + printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); + return NULL; + } + + /* + * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, + * allocate the extended CDB buffer for per struct se_task context + * pt->pscsi_cdb now. + */ + if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { + + pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); + if (!(pt->pscsi_cdb)) { + printk(KERN_ERR "pSCSI: Unable to allocate extended" + " pt->pscsi_cdb\n"); + return NULL; + } + } else + pt->pscsi_cdb = &pt->__pscsi_cdb[0]; + + return &pt->pscsi_task; +} + +static inline void pscsi_blk_init_request( + struct se_task *task, + struct pscsi_plugin_task *pt, + struct request *req, + int bidi_read) +{ + /* + * Defined as "scsi command" in include/linux/blkdev.h. + */ + req->cmd_type = REQ_TYPE_BLOCK_PC; + /* + * For the extra BIDI-COMMAND READ struct request we do not + * need to setup the remaining structure members + */ + if (bidi_read) + return; + /* + * Setup the done function pointer for struct request, + * also set the end_io_data pointer.to struct se_task. + */ + req->end_io = pscsi_req_done; + req->end_io_data = (void *)task; + /* + * Load the referenced struct se_task's SCSI CDB into + * include/linux/blkdev.h:struct request->cmd + */ + req->cmd_len = scsi_command_size(pt->pscsi_cdb); + req->cmd = &pt->pscsi_cdb[0]; + /* + * Setup pointer for outgoing sense data. + */ + req->sense = (void *)&pt->pscsi_sense[0]; + req->sense_len = 0; +} + +/* + * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB +*/ +static int pscsi_blk_get_request(struct se_task *task) +{ + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + + pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, + (task->task_data_direction == DMA_TO_DEVICE), + GFP_KERNEL); + if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { + printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", + IS_ERR(pt->pscsi_req)); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + /* + * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, + * and setup rq callback, CDB and sense. + */ + pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); + return 0; +} + +/* pscsi_do_task(): (Part of se_subsystem_api_t template) + * + * + */ +static int pscsi_do_task(struct se_task *task) +{ + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + /* + * Set the struct request->timeout value based on peripheral + * device type from SCSI. + */ + if (pdv->pdv_sd->type == TYPE_DISK) + pt->pscsi_req->timeout = PS_TIMEOUT_DISK; + else + pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; + + pt->pscsi_req->retries = PS_RETRY; + /* + * Queue the struct request into the struct scsi_device->request_queue. + * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd + * descriptor + */ + blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, + (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ), + pscsi_req_done); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; +} + +static void pscsi_free_task(struct se_task *task) +{ + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct se_cmd *cmd = task->task_se_cmd; + + /* + * Release the extended CDB allocation from pscsi_alloc_task() + * if one exists. + */ + if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) + kfree(pt->pscsi_cdb); + /* + * We do not release the bio(s) here associated with this task, as + * this is handled by bio_put() and pscsi_bi_endio(). + */ + kfree(pt); +} + +enum { + Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, + Opt_scsi_lun_id, Opt_err +}; + +static match_table_t tokens = { + {Opt_scsi_host_id, "scsi_host_id=%d"}, + {Opt_scsi_channel_id, "scsi_channel_id=%d"}, + {Opt_scsi_target_id, "scsi_target_id=%d"}, + {Opt_scsi_lun_id, "scsi_lun_id=%d"}, + {Opt_err, NULL} +}; + +static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, + struct se_subsystem_dev *se_dev, + const char *page, + ssize_t count) +{ + struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; + struct pscsi_hba_virt *phv = hba->hba_ptr; + char *orig, *ptr, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, arg, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_scsi_host_id: + if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { + printk(KERN_ERR "PSCSI[%d]: Unable to accept" + " scsi_host_id while phv_mode ==" + " PHV_LLD_SCSI_HOST_NO\n", + phv->phv_host_id); + ret = -EINVAL; + goto out; + } + match_int(args, &arg); + pdv->pdv_host_id = arg; + printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" + " %d\n", phv->phv_host_id, pdv->pdv_host_id); + pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; + break; + case Opt_scsi_channel_id: + match_int(args, &arg); + pdv->pdv_channel_id = arg; + printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" + " ID: %d\n", phv->phv_host_id, + pdv->pdv_channel_id); + pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; + break; + case Opt_scsi_target_id: + match_int(args, &arg); + pdv->pdv_target_id = arg; + printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" + " ID: %d\n", phv->phv_host_id, + pdv->pdv_target_id); + pdv->pdv_flags |= PDF_HAS_TARGET_ID; + break; + case Opt_scsi_lun_id: + match_int(args, &arg); + pdv->pdv_lun_id = arg; + printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" + " %d\n", phv->phv_host_id, pdv->pdv_lun_id); + pdv->pdv_flags |= PDF_HAS_LUN_ID; + break; + default: + break; + } + } + +out: + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t pscsi_check_configfs_dev_params( + struct se_hba *hba, + struct se_subsystem_dev *se_dev) +{ + struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; + + if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || + !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || + !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { + printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" + " scsi_lun_id= parameters\n"); + return -1; + } + + return 0; +} + +static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, + struct se_subsystem_dev *se_dev, + char *b) +{ + struct pscsi_hba_virt *phv = hba->hba_ptr; + struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; + struct scsi_device *sd = pdv->pdv_sd; + unsigned char host_id[16]; + ssize_t bl; + int i; + + if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) + snprintf(host_id, 16, "%d", pdv->pdv_host_id); + else + snprintf(host_id, 16, "PHBA Mode"); + + bl = sprintf(b, "SCSI Device Bus Location:" + " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", + pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, + host_id); + + if (sd) { + bl += sprintf(b + bl, " "); + bl += sprintf(b + bl, "Vendor: "); + for (i = 0; i < 8; i++) { + if (ISPRINT(sd->vendor[i])) /* printable character? */ + bl += sprintf(b + bl, "%c", sd->vendor[i]); + else + bl += sprintf(b + bl, " "); + } + bl += sprintf(b + bl, " Model: "); + for (i = 0; i < 16; i++) { + if (ISPRINT(sd->model[i])) /* printable character ? */ + bl += sprintf(b + bl, "%c", sd->model[i]); + else + bl += sprintf(b + bl, " "); + } + bl += sprintf(b + bl, " Rev: "); + for (i = 0; i < 4; i++) { + if (ISPRINT(sd->rev[i])) /* printable character ? */ + bl += sprintf(b + bl, "%c", sd->rev[i]); + else + bl += sprintf(b + bl, " "); + } + bl += sprintf(b + bl, "\n"); + } + return bl; +} + +static void pscsi_bi_endio(struct bio *bio, int error) +{ + bio_put(bio); +} + +static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) +{ + struct bio *bio; + /* + * Use bio_malloc() following the comment in for bio -> struct request + * in block/blk-core.c:blk_make_request() + */ + bio = bio_kmalloc(GFP_KERNEL, sg_num); + if (!(bio)) { + printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); + return NULL; + } + bio->bi_end_io = pscsi_bi_endio; + + return bio; +} + +#if 0 +#define DEBUG_PSCSI(x...) printk(x) +#else +#define DEBUG_PSCSI(x...) +#endif + +static int __pscsi_map_task_SG( + struct se_task *task, + struct scatterlist *task_sg, + u32 task_sg_num, + int bidi_read) +{ + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; + struct page *page; + struct scatterlist *sg; + u32 data_len = task->task_size, i, len, bytes, off; + int nr_pages = (task->task_size + task_sg[0].offset + + PAGE_SIZE - 1) >> PAGE_SHIFT; + int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + int rw = (task->task_data_direction == DMA_TO_DEVICE); + + if (!task->task_size) + return 0; + /* + * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup + * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> + * struct scatterlist memory. The struct se_task->task_sg[] currently needs + * to be attached to struct bios for submission to Linux/SCSI using + * struct request to struct scsi_device->request_queue. + * + * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI + * is ported to upstream SCSI passthrough functionality that accepts + * struct scatterlist->page_link or struct page as a paraemeter. + */ + DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); + + for_each_sg(task_sg, sg, task_sg_num, i) { + page = sg_page(sg); + off = sg->offset; + len = sg->length; + + DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, + page, len, off); + + while (len > 0 && data_len > 0) { + bytes = min_t(unsigned int, len, PAGE_SIZE - off); + bytes = min(bytes, data_len); + + if (!(bio)) { + nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); + nr_pages -= nr_vecs; + /* + * Calls bio_kmalloc() and sets bio->bi_end_io() + */ + bio = pscsi_get_bio(pdv, nr_vecs); + if (!(bio)) + goto fail; + + if (rw) + bio->bi_rw |= REQ_WRITE; + + DEBUG_PSCSI("PSCSI: Allocated bio: %p," + " dir: %s nr_vecs: %d\n", bio, + (rw) ? "rw" : "r", nr_vecs); + /* + * Set *hbio pointer to handle the case: + * nr_pages > BIO_MAX_PAGES, where additional + * bios need to be added to complete a given + * struct se_task + */ + if (!hbio) + hbio = tbio = bio; + else + tbio = tbio->bi_next = bio; + } + + DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" + " bio: %p page: %p len: %d off: %d\n", i, bio, + page, len, off); + + rc = bio_add_pc_page(pdv->pdv_sd->request_queue, + bio, page, bytes, off); + if (rc != bytes) + goto fail; + + DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", + bio->bi_vcnt, nr_vecs); + + if (bio->bi_vcnt > nr_vecs) { + DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" + " %d i: %d bio: %p, allocating another" + " bio\n", bio->bi_vcnt, i, bio); + /* + * Clear the pointer so that another bio will + * be allocated with pscsi_get_bio() above, the + * current bio has already been set *tbio and + * bio->bi_next. + */ + bio = NULL; + } + + page++; + len -= bytes; + data_len -= bytes; + off = 0; + } + } + /* + * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND + * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] + */ + if (!(bidi_read)) { + /* + * Starting with v2.6.31, call blk_make_request() passing in *hbio to + * allocate the pSCSI task a struct request. + */ + pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, + hbio, GFP_KERNEL); + if (!(pt->pscsi_req)) { + printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); + goto fail; + } + /* + * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, + * and setup rq callback, CDB and sense. + */ + pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); + + return task->task_sg_num; + } + /* + * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND + * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] + */ + pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, + hbio, GFP_KERNEL); + if (!(pt->pscsi_req->next_rq)) { + printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); + goto fail; + } + pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); + + return task->task_sg_num; +fail: + while (hbio) { + bio = hbio; + hbio = hbio->bi_next; + bio->bi_next = NULL; + bio_endio(bio, 0); + } + return ret; +} + +static int pscsi_map_task_SG(struct se_task *task) +{ + int ret; + + /* + * Setup the main struct request for the task->task_sg[] payload + */ + + ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); + if (ret >= 0 && task->task_sg_bidi) { + /* + * If present, set up the extra BIDI-COMMAND SCSI READ + * struct request and payload. + */ + ret = __pscsi_map_task_SG(task, task->task_sg_bidi, + task->task_sg_num, 1); + } + + if (ret < 0) + return PYX_TRANSPORT_LU_COMM_FAILURE; + return 0; +} + +/* pscsi_map_task_non_SG(): + * + * + */ +static int pscsi_map_task_non_SG(struct se_task *task) +{ + struct se_cmd *cmd = TASK_CMD(task); + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + int ret = 0; + + if (pscsi_blk_get_request(task) < 0) + return PYX_TRANSPORT_LU_COMM_FAILURE; + + if (!task->task_size) + return 0; + + ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, + pt->pscsi_req, T_TASK(cmd)->t_task_buf, + task->task_size, GFP_KERNEL); + if (ret < 0) { + printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + return 0; +} + +static int pscsi_CDB_none(struct se_task *task) +{ + return pscsi_blk_get_request(task); +} + +/* pscsi_get_cdb(): + * + * + */ +static unsigned char *pscsi_get_cdb(struct se_task *task) +{ + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + + return pt->pscsi_cdb; +} + +/* pscsi_get_sense_buffer(): + * + * + */ +static unsigned char *pscsi_get_sense_buffer(struct se_task *task) +{ + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + + return (unsigned char *)&pt->pscsi_sense[0]; +} + +/* pscsi_get_device_rev(): + * + * + */ +static u32 pscsi_get_device_rev(struct se_device *dev) +{ + struct pscsi_dev_virt *pdv = dev->dev_ptr; + struct scsi_device *sd = pdv->pdv_sd; + + return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; +} + +/* pscsi_get_device_type(): + * + * + */ +static u32 pscsi_get_device_type(struct se_device *dev) +{ + struct pscsi_dev_virt *pdv = dev->dev_ptr; + struct scsi_device *sd = pdv->pdv_sd; + + return sd->type; +} + +static sector_t pscsi_get_blocks(struct se_device *dev) +{ + struct pscsi_dev_virt *pdv = dev->dev_ptr; + + if (pdv->pdv_bd && pdv->pdv_bd->bd_part) + return pdv->pdv_bd->bd_part->nr_sects; + + dump_stack(); + return 0; +} + +/* pscsi_handle_SAM_STATUS_failures(): + * + * + */ +static inline void pscsi_process_SAM_status( + struct se_task *task, + struct pscsi_plugin_task *pt) +{ + task->task_scsi_status = status_byte(pt->pscsi_result); + if ((task->task_scsi_status)) { + task->task_scsi_status <<= 1; + printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" + " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], + pt->pscsi_result); + } + + switch (host_byte(pt->pscsi_result)) { + case DID_OK: + transport_complete_task(task, (!task->task_scsi_status)); + break; + default: + printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" + " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], + pt->pscsi_result); + task->task_scsi_status = SAM_STAT_CHECK_CONDITION; + task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + TASK_CMD(task)->transport_error_status = + PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + transport_complete_task(task, 0); + break; + } + + return; +} + +static void pscsi_req_done(struct request *req, int uptodate) +{ + struct se_task *task = req->end_io_data; + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + + pt->pscsi_result = req->errors; + pt->pscsi_resid = req->resid_len; + + pscsi_process_SAM_status(task, pt); + /* + * Release BIDI-READ if present + */ + if (req->next_rq != NULL) + __blk_put_request(req->q, req->next_rq); + + __blk_put_request(req->q, req); + pt->pscsi_req = NULL; +} + +static struct se_subsystem_api pscsi_template = { + .name = "pscsi", + .owner = THIS_MODULE, + .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, + .cdb_none = pscsi_CDB_none, + .map_task_non_SG = pscsi_map_task_non_SG, + .map_task_SG = pscsi_map_task_SG, + .attach_hba = pscsi_attach_hba, + .detach_hba = pscsi_detach_hba, + .pmode_enable_hba = pscsi_pmode_enable_hba, + .allocate_virtdevice = pscsi_allocate_virtdevice, + .create_virtdevice = pscsi_create_virtdevice, + .free_device = pscsi_free_device, + .transport_complete = pscsi_transport_complete, + .alloc_task = pscsi_alloc_task, + .do_task = pscsi_do_task, + .free_task = pscsi_free_task, + .check_configfs_dev_params = pscsi_check_configfs_dev_params, + .set_configfs_dev_params = pscsi_set_configfs_dev_params, + .show_configfs_dev_params = pscsi_show_configfs_dev_params, + .get_cdb = pscsi_get_cdb, + .get_sense_buffer = pscsi_get_sense_buffer, + .get_device_rev = pscsi_get_device_rev, + .get_device_type = pscsi_get_device_type, + .get_blocks = pscsi_get_blocks, +}; + +static int __init pscsi_module_init(void) +{ + return transport_subsystem_register(&pscsi_template); +} + +static void pscsi_module_exit(void) +{ + transport_subsystem_release(&pscsi_template); +} + +MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(pscsi_module_init); +module_exit(pscsi_module_exit); diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h new file mode 100644 index 000000000000..a4cd5d352c3a --- /dev/null +++ b/drivers/target/target_core_pscsi.h @@ -0,0 +1,65 @@ +#ifndef TARGET_CORE_PSCSI_H +#define TARGET_CORE_PSCSI_H + +#define PSCSI_VERSION "v4.0" +#define PSCSI_VIRTUAL_HBA_DEPTH 2048 + +/* used in pscsi_find_alloc_len() */ +#ifndef INQUIRY_DATA_SIZE +#define INQUIRY_DATA_SIZE 0x24 +#endif + +/* used in pscsi_add_device_to_list() */ +#define PSCSI_DEFAULT_QUEUEDEPTH 1 + +#define PS_RETRY 5 +#define PS_TIMEOUT_DISK (15*HZ) +#define PS_TIMEOUT_OTHER (500*HZ) + +#include +#include +#include +#include +#include + +struct pscsi_plugin_task { + struct se_task pscsi_task; + unsigned char *pscsi_cdb; + unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE]; + unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; + int pscsi_direction; + int pscsi_result; + u32 pscsi_resid; + struct request *pscsi_req; +} ____cacheline_aligned; + +#define PDF_HAS_CHANNEL_ID 0x01 +#define PDF_HAS_TARGET_ID 0x02 +#define PDF_HAS_LUN_ID 0x04 +#define PDF_HAS_VPD_UNIT_SERIAL 0x08 +#define PDF_HAS_VPD_DEV_IDENT 0x10 +#define PDF_HAS_VIRT_HOST_ID 0x20 + +struct pscsi_dev_virt { + int pdv_flags; + int pdv_host_id; + int pdv_channel_id; + int pdv_target_id; + int pdv_lun_id; + struct block_device *pdv_bd; + struct scsi_device *pdv_sd; + struct se_hba *pdv_se_hba; +} ____cacheline_aligned; + +typedef enum phv_modes { + PHV_VIRUTAL_HOST_ID, + PHV_LLD_SCSI_HOST_NO +} phv_modes_t; + +struct pscsi_hba_virt { + int phv_host_id; + phv_modes_t phv_mode; + struct Scsi_Host *phv_lld_host; +} ____cacheline_aligned; + +#endif /*** TARGET_CORE_PSCSI_H ***/ diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c new file mode 100644 index 000000000000..979aebf20019 --- /dev/null +++ b/drivers/target/target_core_rd.c @@ -0,0 +1,1091 @@ +/******************************************************************************* + * Filename: target_core_rd.c + * + * This file contains the Storage Engine <-> Ramdisk transport + * specific functions. + * + * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "target_core_rd.h" + +static struct se_subsystem_api rd_dr_template; +static struct se_subsystem_api rd_mcp_template; + +/* #define DEBUG_RAMDISK_MCP */ +/* #define DEBUG_RAMDISK_DR */ + +/* rd_attach_hba(): (Part of se_subsystem_api_t template) + * + * + */ +static int rd_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct rd_host *rd_host; + + rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); + if (!(rd_host)) { + printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); + return -ENOMEM; + } + + rd_host->rd_host_id = host_id; + + atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH); + atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH); + hba->hba_ptr = (void *) rd_host; + + printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" + " Generic Target Core Stack %s\n", hba->hba_id, + RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); + printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" + " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, + rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), + RD_MAX_SECTORS); + + return 0; +} + +static void rd_detach_hba(struct se_hba *hba) +{ + struct rd_host *rd_host = hba->hba_ptr; + + printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" + " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); + + kfree(rd_host); + hba->hba_ptr = NULL; +} + +/* rd_release_device_space(): + * + * + */ +static void rd_release_device_space(struct rd_dev *rd_dev) +{ + u32 i, j, page_count = 0, sg_per_table; + struct rd_dev_sg_table *sg_table; + struct page *pg; + struct scatterlist *sg; + + if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) + return; + + sg_table = rd_dev->sg_table_array; + + for (i = 0; i < rd_dev->sg_table_count; i++) { + sg = sg_table[i].sg_table; + sg_per_table = sg_table[i].rd_sg_count; + + for (j = 0; j < sg_per_table; j++) { + pg = sg_page(&sg[j]); + if ((pg)) { + __free_page(pg); + page_count++; + } + } + + kfree(sg); + } + + printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" + " Device ID: %u, pages %u in %u tables total bytes %lu\n", + rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, + rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); + + kfree(sg_table); + rd_dev->sg_table_array = NULL; + rd_dev->sg_table_count = 0; +} + + +/* rd_build_device_space(): + * + * + */ +static int rd_build_device_space(struct rd_dev *rd_dev) +{ + u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + struct rd_dev_sg_table *sg_table; + struct page *pg; + struct scatterlist *sg; + + if (rd_dev->rd_page_count <= 0) { + printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", + rd_dev->rd_page_count); + return -1; + } + total_sg_needed = rd_dev->rd_page_count; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); + if (!(sg_table)) { + printk(KERN_ERR "Unable to allocate memory for Ramdisk" + " scatterlist tables\n"); + return -1; + } + + rd_dev->sg_table_array = sg_table; + rd_dev->sg_table_count = sg_tables; + + while (total_sg_needed) { + sg_per_table = (total_sg_needed > max_sg_per_table) ? + max_sg_per_table : total_sg_needed; + + sg = kzalloc(sg_per_table * sizeof(struct scatterlist), + GFP_KERNEL); + if (!(sg)) { + printk(KERN_ERR "Unable to allocate scatterlist array" + " for struct rd_dev\n"); + return -1; + } + + sg_init_table((struct scatterlist *)&sg[0], sg_per_table); + + sg_table[i].sg_table = sg; + sg_table[i].rd_sg_count = sg_per_table; + sg_table[i].page_start_offset = page_offset; + sg_table[i++].page_end_offset = (page_offset + sg_per_table) + - 1; + + for (j = 0; j < sg_per_table; j++) { + pg = alloc_pages(GFP_KERNEL, 0); + if (!(pg)) { + printk(KERN_ERR "Unable to allocate scatterlist" + " pages for struct rd_dev_sg_table\n"); + return -1; + } + sg_assign_page(&sg[j], pg); + sg[j].length = PAGE_SIZE; + } + + page_offset += sg_per_table; + total_sg_needed -= sg_per_table; + } + + printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_dev->sg_table_count); + + return 0; +} + +static void *rd_allocate_virtdevice( + struct se_hba *hba, + const char *name, + int rd_direct) +{ + struct rd_dev *rd_dev; + struct rd_host *rd_host = hba->hba_ptr; + + rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); + if (!(rd_dev)) { + printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); + return NULL; + } + + rd_dev->rd_host = rd_host; + rd_dev->rd_direct = rd_direct; + + return rd_dev; +} + +static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name) +{ + return rd_allocate_virtdevice(hba, name, 1); +} + +static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) +{ + return rd_allocate_virtdevice(hba, name, 0); +} + +/* rd_create_virtdevice(): + * + * + */ +static struct se_device *rd_create_virtdevice( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + void *p, + int rd_direct) +{ + struct se_device *dev; + struct se_dev_limits dev_limits; + struct rd_dev *rd_dev = p; + struct rd_host *rd_host = hba->hba_ptr; + int dev_flags = 0; + char prod[16], rev[4]; + + memset(&dev_limits, 0, sizeof(struct se_dev_limits)); + + if (rd_build_device_space(rd_dev) < 0) + goto fail; + + snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); + snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : + RD_MCP_VERSION); + + dev_limits.limits.logical_block_size = RD_BLOCKSIZE; + dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; + dev_limits.limits.max_sectors = RD_MAX_SECTORS; + dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; + dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; + + dev = transport_add_device_to_core_hba(hba, + (rd_dev->rd_direct) ? &rd_dr_template : + &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, + &dev_limits, prod, rev); + if (!(dev)) + goto fail; + + rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; + rd_dev->rd_queue_depth = dev->queue_depth; + + printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" + " %u pages in %u tables, %lu total bytes\n", + rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : + "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_dev->sg_table_count, + (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); + + return dev; + +fail: + rd_release_device_space(rd_dev); + return NULL; +} + +static struct se_device *rd_DIRECT_create_virtdevice( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + void *p) +{ + return rd_create_virtdevice(hba, se_dev, p, 1); +} + +static struct se_device *rd_MEMCPY_create_virtdevice( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + void *p) +{ + return rd_create_virtdevice(hba, se_dev, p, 0); +} + +/* rd_free_device(): (Part of se_subsystem_api_t template) + * + * + */ +static void rd_free_device(void *p) +{ + struct rd_dev *rd_dev = p; + + rd_release_device_space(rd_dev); + kfree(rd_dev); +} + +static inline struct rd_request *RD_REQ(struct se_task *task) +{ + return container_of(task, struct rd_request, rd_task); +} + +static struct se_task * +rd_alloc_task(struct se_cmd *cmd) +{ + struct rd_request *rd_req; + + rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); + if (!rd_req) { + printk(KERN_ERR "Unable to allocate struct rd_request\n"); + return NULL; + } + rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; + + return &rd_req->rd_task; +} + +/* rd_get_sg_table(): + * + * + */ +static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) +{ + u32 i; + struct rd_dev_sg_table *sg_table; + + for (i = 0; i < rd_dev->sg_table_count; i++) { + sg_table = &rd_dev->sg_table_array[i]; + if ((sg_table->page_start_offset <= page) && + (sg_table->page_end_offset >= page)) + return sg_table; + } + + printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", + page); + + return NULL; +} + +/* rd_MEMCPY_read(): + * + * + */ +static int rd_MEMCPY_read(struct rd_request *req) +{ + struct se_task *task = &req->rd_task; + struct rd_dev *dev = req->rd_dev; + struct rd_dev_sg_table *table; + struct scatterlist *sg_d, *sg_s; + void *dst, *src; + u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; + u32 length, page_end = 0, table_sg_end; + u32 rd_offset = req->rd_offset; + + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + table_sg_end = (table->page_end_offset - req->rd_page); + sg_d = task->task_sg; + sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" + " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, + req->rd_page, req->rd_offset); +#endif + src_offset = rd_offset; + + while (req->rd_size) { + if ((sg_d[i].length - dst_offset) < + (sg_s[j].length - src_offset)) { + length = (sg_d[i].length - dst_offset); +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" + " offset: %u sg_s[%d].length: %u\n", i, + &sg_d[i], sg_d[i].length, sg_d[i].offset, j, + sg_s[j].length); + printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" + " src_offset: %u\n", length, dst_offset, + src_offset); +#endif + if (length > req->rd_size) + length = req->rd_size; + + dst = sg_virt(&sg_d[i++]) + dst_offset; + if (!dst) + BUG(); + + src = sg_virt(&sg_s[j]) + src_offset; + if (!src) + BUG(); + + dst_offset = 0; + src_offset = length; + page_end = 0; + } else { + length = (sg_s[j].length - src_offset); +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" + " offset: %u sg_s[%d].length: %u\n", i, + &sg_d[i], sg_d[i].length, sg_d[i].offset, + j, sg_s[j].length); + printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" + " src_offset: %u\n", length, dst_offset, + src_offset); +#endif + if (length > req->rd_size) + length = req->rd_size; + + dst = sg_virt(&sg_d[i]) + dst_offset; + if (!dst) + BUG(); + + if (sg_d[i].length == length) { + i++; + dst_offset = 0; + } else + dst_offset = length; + + src = sg_virt(&sg_s[j++]) + src_offset; + if (!src) + BUG(); + + src_offset = 0; + page_end = 1; + } + + memcpy(dst, src, length); + +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "page: %u, remaining size: %u, length: %u," + " i: %u, j: %u\n", req->rd_page, + (req->rd_size - length), length, i, j); +#endif + req->rd_size -= length; + if (!(req->rd_size)) + return 0; + + if (!page_end) + continue; + + if (++req->rd_page <= table->page_end_offset) { +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "page: %u in same page table\n", + req->rd_page); +#endif + continue; + } +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "getting new page table for page: %u\n", + req->rd_page); +#endif + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + sg_s = &table->sg_table[j = 0]; + } + + return 0; +} + +/* rd_MEMCPY_write(): + * + * + */ +static int rd_MEMCPY_write(struct rd_request *req) +{ + struct se_task *task = &req->rd_task; + struct rd_dev *dev = req->rd_dev; + struct rd_dev_sg_table *table; + struct scatterlist *sg_d, *sg_s; + void *dst, *src; + u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; + u32 length, page_end = 0, table_sg_end; + u32 rd_offset = req->rd_offset; + + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + table_sg_end = (table->page_end_offset - req->rd_page); + sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; + sg_s = task->task_sg; +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," + " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, + req->rd_page, req->rd_offset); +#endif + dst_offset = rd_offset; + + while (req->rd_size) { + if ((sg_s[i].length - src_offset) < + (sg_d[j].length - dst_offset)) { + length = (sg_s[i].length - src_offset); +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" + " offset: %d sg_d[%d].length: %u\n", i, + &sg_s[i], sg_s[i].length, sg_s[i].offset, + j, sg_d[j].length); + printk(KERN_INFO "Step 1 - length: %u src_offset: %u" + " dst_offset: %u\n", length, src_offset, + dst_offset); +#endif + if (length > req->rd_size) + length = req->rd_size; + + src = sg_virt(&sg_s[i++]) + src_offset; + if (!src) + BUG(); + + dst = sg_virt(&sg_d[j]) + dst_offset; + if (!dst) + BUG(); + + src_offset = 0; + dst_offset = length; + page_end = 0; + } else { + length = (sg_d[j].length - dst_offset); +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" + " offset: %d sg_d[%d].length: %u\n", i, + &sg_s[i], sg_s[i].length, sg_s[i].offset, + j, sg_d[j].length); + printk(KERN_INFO "Step 2 - length: %u src_offset: %u" + " dst_offset: %u\n", length, src_offset, + dst_offset); +#endif + if (length > req->rd_size) + length = req->rd_size; + + src = sg_virt(&sg_s[i]) + src_offset; + if (!src) + BUG(); + + if (sg_s[i].length == length) { + i++; + src_offset = 0; + } else + src_offset = length; + + dst = sg_virt(&sg_d[j++]) + dst_offset; + if (!dst) + BUG(); + + dst_offset = 0; + page_end = 1; + } + + memcpy(dst, src, length); + +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "page: %u, remaining size: %u, length: %u," + " i: %u, j: %u\n", req->rd_page, + (req->rd_size - length), length, i, j); +#endif + req->rd_size -= length; + if (!(req->rd_size)) + return 0; + + if (!page_end) + continue; + + if (++req->rd_page <= table->page_end_offset) { +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "page: %u in same page table\n", + req->rd_page); +#endif + continue; + } +#ifdef DEBUG_RAMDISK_MCP + printk(KERN_INFO "getting new page table for page: %u\n", + req->rd_page); +#endif + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + sg_d = &table->sg_table[j = 0]; + } + + return 0; +} + +/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) + * + * + */ +static int rd_MEMCPY_do_task(struct se_task *task) +{ + struct se_device *dev = task->se_dev; + struct rd_request *req = RD_REQ(task); + unsigned long long lba; + int ret; + + req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; + lba = task->task_lba; + req->rd_offset = (do_div(lba, + (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * + DEV_ATTRIB(dev)->block_size; + req->rd_size = task->task_size; + + if (task->task_data_direction == DMA_FROM_DEVICE) + ret = rd_MEMCPY_read(req); + else + ret = rd_MEMCPY_write(req); + + if (ret != 0) + return ret; + + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; +} + +/* rd_DIRECT_with_offset(): + * + * + */ +static int rd_DIRECT_with_offset( + struct se_task *task, + struct list_head *se_mem_list, + u32 *se_mem_cnt, + u32 *task_offset) +{ + struct rd_request *req = RD_REQ(task); + struct rd_dev *dev = req->rd_dev; + struct rd_dev_sg_table *table; + struct se_mem *se_mem; + struct scatterlist *sg_s; + u32 j = 0, set_offset = 1; + u32 get_next_table = 0, offset_length, table_sg_end; + + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + table_sg_end = (table->page_end_offset - req->rd_page); + sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n", + (task->task_data_direction == DMA_TO_DEVICE) ? + "Write" : "Read", + task->task_lba, req->rd_size, req->rd_page, req->rd_offset); +#endif + while (req->rd_size) { + se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); + if (!(se_mem)) { + printk(KERN_ERR "Unable to allocate struct se_mem\n"); + return -1; + } + INIT_LIST_HEAD(&se_mem->se_list); + + if (set_offset) { + offset_length = sg_s[j].length - req->rd_offset; + if (offset_length > req->rd_size) + offset_length = req->rd_size; + + se_mem->se_page = sg_page(&sg_s[j++]); + se_mem->se_off = req->rd_offset; + se_mem->se_len = offset_length; + + set_offset = 0; + get_next_table = (j > table_sg_end); + goto check_eot; + } + + offset_length = (req->rd_size < req->rd_offset) ? + req->rd_size : req->rd_offset; + + se_mem->se_page = sg_page(&sg_s[j]); + se_mem->se_len = offset_length; + + set_offset = 1; + +check_eot: +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u" + " se_mem: %p, se_page: %p se_off: %u se_len: %u\n", + req->rd_page, req->rd_size, offset_length, j, se_mem, + se_mem->se_page, se_mem->se_off, se_mem->se_len); +#endif + list_add_tail(&se_mem->se_list, se_mem_list); + (*se_mem_cnt)++; + + req->rd_size -= offset_length; + if (!(req->rd_size)) + goto out; + + if (!set_offset && !get_next_table) + continue; + + if (++req->rd_page <= table->page_end_offset) { +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "page: %u in same page table\n", + req->rd_page); +#endif + continue; + } +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "getting new page table for page: %u\n", + req->rd_page); +#endif + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + sg_s = &table->sg_table[j = 0]; + } + +out: + T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", + *se_mem_cnt); +#endif + return 0; +} + +/* rd_DIRECT_without_offset(): + * + * + */ +static int rd_DIRECT_without_offset( + struct se_task *task, + struct list_head *se_mem_list, + u32 *se_mem_cnt, + u32 *task_offset) +{ + struct rd_request *req = RD_REQ(task); + struct rd_dev *dev = req->rd_dev; + struct rd_dev_sg_table *table; + struct se_mem *se_mem; + struct scatterlist *sg_s; + u32 length, j = 0; + + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n", + (task->task_data_direction == DMA_TO_DEVICE) ? + "Write" : "Read", + task->task_lba, req->rd_size, req->rd_page); +#endif + while (req->rd_size) { + se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); + if (!(se_mem)) { + printk(KERN_ERR "Unable to allocate struct se_mem\n"); + return -1; + } + INIT_LIST_HEAD(&se_mem->se_list); + + length = (req->rd_size < sg_s[j].length) ? + req->rd_size : sg_s[j].length; + + se_mem->se_page = sg_page(&sg_s[j++]); + se_mem->se_len = length; + +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p," + " se_page: %p se_off: %u se_len: %u\n", req->rd_page, + req->rd_size, j, se_mem, se_mem->se_page, + se_mem->se_off, se_mem->se_len); +#endif + list_add_tail(&se_mem->se_list, se_mem_list); + (*se_mem_cnt)++; + + req->rd_size -= length; + if (!(req->rd_size)) + goto out; + + if (++req->rd_page <= table->page_end_offset) { +#ifdef DEBUG_RAMDISK_DR + printk("page: %u in same page table\n", + req->rd_page); +#endif + continue; + } +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "getting new page table for page: %u\n", + req->rd_page); +#endif + table = rd_get_sg_table(dev, req->rd_page); + if (!(table)) + return -1; + + sg_s = &table->sg_table[j = 0]; + } + +out: + T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; +#ifdef DEBUG_RAMDISK_DR + printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", + *se_mem_cnt); +#endif + return 0; +} + +/* rd_DIRECT_do_se_mem_map(): + * + * + */ +static int rd_DIRECT_do_se_mem_map( + struct se_task *task, + struct list_head *se_mem_list, + void *in_mem, + struct se_mem *in_se_mem, + struct se_mem **out_se_mem, + u32 *se_mem_cnt, + u32 *task_offset_in) +{ + struct se_cmd *cmd = task->task_se_cmd; + struct rd_request *req = RD_REQ(task); + u32 task_offset = *task_offset_in; + unsigned long long lba; + int ret; + + req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) / + PAGE_SIZE); + lba = task->task_lba; + req->rd_offset = (do_div(lba, + (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * + DEV_ATTRIB(task->se_dev)->block_size; + req->rd_size = task->task_size; + + if (req->rd_offset) + ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt, + task_offset_in); + else + ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt, + task_offset_in); + + if (ret < 0) + return ret; + + if (CMD_TFO(cmd)->task_sg_chaining == 0) + return 0; + /* + * Currently prevent writers from multiple HW fabrics doing + * pci_map_sg() to RD_DR's internal scatterlist memory. + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + printk(KERN_ERR "DMA_TO_DEVICE not supported for" + " RAMDISK_DR with task_sg_chaining=1\n"); + return -1; + } + /* + * Special case for if task_sg_chaining is enabled, then + * we setup struct se_task->task_sg[], as it will be used by + * transport_do_task_sg_chain() for creating chainged SGLs + * across multiple struct se_task->task_sg[]. + */ + if (!(transport_calc_sg_num(task, + list_entry(T_TASK(cmd)->t_mem_list->next, + struct se_mem, se_list), + task_offset))) + return -1; + + return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, + list_entry(T_TASK(cmd)->t_mem_list->next, + struct se_mem, se_list), + out_se_mem, se_mem_cnt, task_offset_in); +} + +/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template) + * + * + */ +static int rd_DIRECT_do_task(struct se_task *task) +{ + /* + * At this point the locally allocated RD tables have been mapped + * to struct se_mem elements in rd_DIRECT_do_se_mem_map(). + */ + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; +} + +/* rd_free_task(): (Part of se_subsystem_api_t template) + * + * + */ +static void rd_free_task(struct se_task *task) +{ + kfree(RD_REQ(task)); +} + +enum { + Opt_rd_pages, Opt_err +}; + +static match_table_t tokens = { + {Opt_rd_pages, "rd_pages=%d"}, + {Opt_err, NULL} +}; + +static ssize_t rd_set_configfs_dev_params( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + const char *page, + ssize_t count) +{ + struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; + char *orig, *ptr, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, arg, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_rd_pages: + match_int(args, &arg); + rd_dev->rd_page_count = arg; + printk(KERN_INFO "RAMDISK: Referencing Page" + " Count: %u\n", rd_dev->rd_page_count); + rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; + break; + default: + break; + } + } + + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) +{ + struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; + + if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { + printk(KERN_INFO "Missing rd_pages= parameter\n"); + return -1; + } + + return 0; +} + +static ssize_t rd_show_configfs_dev_params( + struct se_hba *hba, + struct se_subsystem_dev *se_dev, + char *b) +{ + struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; + ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", + rd_dev->rd_dev_id, (rd_dev->rd_direct) ? + "rd_direct" : "rd_mcp"); + bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" + " SG_table_count: %u\n", rd_dev->rd_page_count, + PAGE_SIZE, rd_dev->sg_table_count); + return bl; +} + +/* rd_get_cdb(): (Part of se_subsystem_api_t template) + * + * + */ +static unsigned char *rd_get_cdb(struct se_task *task) +{ + struct rd_request *req = RD_REQ(task); + + return req->rd_scsi_cdb; +} + +static u32 rd_get_device_rev(struct se_device *dev) +{ + return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ +} + +static u32 rd_get_device_type(struct se_device *dev) +{ + return TYPE_DISK; +} + +static sector_t rd_get_blocks(struct se_device *dev) +{ + struct rd_dev *rd_dev = dev->dev_ptr; + unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / + DEV_ATTRIB(dev)->block_size) - 1; + + return blocks_long; +} + +static struct se_subsystem_api rd_dr_template = { + .name = "rd_dr", + .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, + .attach_hba = rd_attach_hba, + .detach_hba = rd_detach_hba, + .allocate_virtdevice = rd_DIRECT_allocate_virtdevice, + .create_virtdevice = rd_DIRECT_create_virtdevice, + .free_device = rd_free_device, + .alloc_task = rd_alloc_task, + .do_task = rd_DIRECT_do_task, + .free_task = rd_free_task, + .check_configfs_dev_params = rd_check_configfs_dev_params, + .set_configfs_dev_params = rd_set_configfs_dev_params, + .show_configfs_dev_params = rd_show_configfs_dev_params, + .get_cdb = rd_get_cdb, + .get_device_rev = rd_get_device_rev, + .get_device_type = rd_get_device_type, + .get_blocks = rd_get_blocks, + .do_se_mem_map = rd_DIRECT_do_se_mem_map, +}; + +static struct se_subsystem_api rd_mcp_template = { + .name = "rd_mcp", + .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, + .attach_hba = rd_attach_hba, + .detach_hba = rd_detach_hba, + .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, + .create_virtdevice = rd_MEMCPY_create_virtdevice, + .free_device = rd_free_device, + .alloc_task = rd_alloc_task, + .do_task = rd_MEMCPY_do_task, + .free_task = rd_free_task, + .check_configfs_dev_params = rd_check_configfs_dev_params, + .set_configfs_dev_params = rd_set_configfs_dev_params, + .show_configfs_dev_params = rd_show_configfs_dev_params, + .get_cdb = rd_get_cdb, + .get_device_rev = rd_get_device_rev, + .get_device_type = rd_get_device_type, + .get_blocks = rd_get_blocks, +}; + +int __init rd_module_init(void) +{ + int ret; + + ret = transport_subsystem_register(&rd_dr_template); + if (ret < 0) + return ret; + + ret = transport_subsystem_register(&rd_mcp_template); + if (ret < 0) { + transport_subsystem_release(&rd_dr_template); + return ret; + } + + return 0; +} + +void rd_module_exit(void) +{ + transport_subsystem_release(&rd_dr_template); + transport_subsystem_release(&rd_mcp_template); +} diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h new file mode 100644 index 000000000000..13badfbaf9c0 --- /dev/null +++ b/drivers/target/target_core_rd.h @@ -0,0 +1,73 @@ +#ifndef TARGET_CORE_RD_H +#define TARGET_CORE_RD_H + +#define RD_HBA_VERSION "v4.0" +#define RD_DR_VERSION "4.0" +#define RD_MCP_VERSION "4.0" + +/* Largest piece of memory kmalloc can allocate */ +#define RD_MAX_ALLOCATION_SIZE 65536 +/* Maximum queuedepth for the Ramdisk HBA */ +#define RD_HBA_QUEUE_DEPTH 256 +#define RD_DEVICE_QUEUE_DEPTH 32 +#define RD_MAX_DEVICE_QUEUE_DEPTH 128 +#define RD_BLOCKSIZE 512 +#define RD_MAX_SECTORS 1024 + +extern struct kmem_cache *se_mem_cache; + +/* Used in target_core_init_configfs() for virtual LUN 0 access */ +int __init rd_module_init(void); +void rd_module_exit(void); + +#define RRF_EMULATE_CDB 0x01 +#define RRF_GOT_LBA 0x02 + +struct rd_request { + struct se_task rd_task; + + /* SCSI CDB from iSCSI Command PDU */ + unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; + /* Offset from start of page */ + u32 rd_offset; + /* Starting page in Ramdisk for request */ + u32 rd_page; + /* Total number of pages needed for request */ + u32 rd_page_count; + /* Scatterlist count */ + u32 rd_size; + /* Ramdisk device */ + struct rd_dev *rd_dev; +} ____cacheline_aligned; + +struct rd_dev_sg_table { + u32 page_start_offset; + u32 page_end_offset; + u32 rd_sg_count; + struct scatterlist *sg_table; +} ____cacheline_aligned; + +#define RDF_HAS_PAGE_COUNT 0x01 + +struct rd_dev { + int rd_direct; + u32 rd_flags; + /* Unique Ramdisk Device ID in Ramdisk HBA */ + u32 rd_dev_id; + /* Total page count for ramdisk device */ + u32 rd_page_count; + /* Number of SG tables in sg_table_array */ + u32 sg_table_count; + u32 rd_queue_depth; + /* Array of rd_dev_sg_table_t containing scatterlists */ + struct rd_dev_sg_table *sg_table_array; + /* Ramdisk HBA device is connected to */ + struct rd_host *rd_host; +} ____cacheline_aligned; + +struct rd_host { + u32 rd_host_dev_id_count; + u32 rd_host_id; /* Unique Ramdisk Host ID */ +} ____cacheline_aligned; + +#endif /* TARGET_CORE_RD_H */ diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c new file mode 100644 index 000000000000..dc6fed037ab3 --- /dev/null +++ b/drivers/target/target_core_scdb.c @@ -0,0 +1,105 @@ +/******************************************************************************* + * Filename: target_core_scdb.c + * + * This file contains the generic target engine Split CDB related functions. + * + * Copyright (c) 2004-2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include + +#include +#include + +#include "target_core_scdb.h" + +/* split_cdb_XX_6(): + * + * 21-bit LBA w/ 8-bit SECTORS + */ +void split_cdb_XX_6( + unsigned long long lba, + u32 *sectors, + unsigned char *cdb) +{ + cdb[1] = (lba >> 16) & 0x1f; + cdb[2] = (lba >> 8) & 0xff; + cdb[3] = lba & 0xff; + cdb[4] = *sectors & 0xff; +} + +/* split_cdb_XX_10(): + * + * 32-bit LBA w/ 16-bit SECTORS + */ +void split_cdb_XX_10( + unsigned long long lba, + u32 *sectors, + unsigned char *cdb) +{ + put_unaligned_be32(lba, &cdb[2]); + put_unaligned_be16(*sectors, &cdb[7]); +} + +/* split_cdb_XX_12(): + * + * 32-bit LBA w/ 32-bit SECTORS + */ +void split_cdb_XX_12( + unsigned long long lba, + u32 *sectors, + unsigned char *cdb) +{ + put_unaligned_be32(lba, &cdb[2]); + put_unaligned_be32(*sectors, &cdb[6]); +} + +/* split_cdb_XX_16(): + * + * 64-bit LBA w/ 32-bit SECTORS + */ +void split_cdb_XX_16( + unsigned long long lba, + u32 *sectors, + unsigned char *cdb) +{ + put_unaligned_be64(lba, &cdb[2]); + put_unaligned_be32(*sectors, &cdb[10]); +} + +/* + * split_cdb_XX_32(): + * + * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 + */ +void split_cdb_XX_32( + unsigned long long lba, + u32 *sectors, + unsigned char *cdb) +{ + put_unaligned_be64(lba, &cdb[12]); + put_unaligned_be32(*sectors, &cdb[28]); +} diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h new file mode 100644 index 000000000000..98cd1c01ed83 --- /dev/null +++ b/drivers/target/target_core_scdb.h @@ -0,0 +1,10 @@ +#ifndef TARGET_CORE_SCDB_H +#define TARGET_CORE_SCDB_H + +extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *); +extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *); +extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *); +extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *); +extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *); + +#endif /* TARGET_CORE_SCDB_H */ diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c new file mode 100644 index 000000000000..158cecbec718 --- /dev/null +++ b/drivers/target/target_core_tmr.c @@ -0,0 +1,404 @@ +/******************************************************************************* + * Filename: target_core_tmr.c + * + * This file contains SPC-3 task management infrastructure + * + * Copyright (c) 2009,2010 Rising Tide Systems + * Copyright (c) 2009,2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "target_core_alua.h" +#include "target_core_pr.h" + +#define DEBUG_LUN_RESET +#ifdef DEBUG_LUN_RESET +#define DEBUG_LR(x...) printk(KERN_INFO x) +#else +#define DEBUG_LR(x...) +#endif + +struct se_tmr_req *core_tmr_alloc_req( + struct se_cmd *se_cmd, + void *fabric_tmr_ptr, + u8 function) +{ + struct se_tmr_req *tmr; + + tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL); + if (!(tmr)) { + printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); + return ERR_PTR(-ENOMEM); + } + tmr->task_cmd = se_cmd; + tmr->fabric_tmr_ptr = fabric_tmr_ptr; + tmr->function = function; + INIT_LIST_HEAD(&tmr->tmr_list); + + return tmr; +} +EXPORT_SYMBOL(core_tmr_alloc_req); + +void core_tmr_release_req( + struct se_tmr_req *tmr) +{ + struct se_device *dev = tmr->tmr_dev; + + spin_lock(&dev->se_tmr_lock); + list_del(&tmr->tmr_list); + kmem_cache_free(se_tmr_req_cache, tmr); + spin_unlock(&dev->se_tmr_lock); +} + +static void core_tmr_handle_tas_abort( + struct se_node_acl *tmr_nacl, + struct se_cmd *cmd, + int tas, + int fe_count) +{ + if (!(fe_count)) { + transport_cmd_finish_abort(cmd, 1); + return; + } + /* + * TASK ABORTED status (TAS) bit support + */ + if (((tmr_nacl != NULL) && + (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) + transport_send_task_abort(cmd); + + transport_cmd_finish_abort(cmd, 0); +} + +int core_tmr_lun_reset( + struct se_device *dev, + struct se_tmr_req *tmr, + struct list_head *preempt_and_abort_list, + struct se_cmd *prout_cmd) +{ + struct se_cmd *cmd; + struct se_queue_req *qr, *qr_tmp; + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; + struct se_queue_obj *qobj = dev->dev_queue_obj; + struct se_tmr_req *tmr_p, *tmr_pp; + struct se_task *task, *task_tmp; + unsigned long flags; + int fe_count, state, tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + tas = DEV_ATTRIB(dev)->emulate_tas; + /* + * Determine if this se_tmr is coming from a $FABRIC_MOD + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { + tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; + tmr_tpg = tmr->task_cmd->se_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + DEBUG_LR("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", + TPG_TFO(tmr_tpg)->get_fabric_name(), + tmr_nacl->initiatorname); + } + } + DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + TRANSPORT(dev)->name, tas); + /* + * Release all pending and outgoing TMRs aside from the received + * LUN_RESET tmr.. + */ + spin_lock(&dev->se_tmr_lock); + list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { + /* + * Allow the received TMR to return with FUNCTION_COMPLETE. + */ + if (tmr && (tmr_p == tmr)) + continue; + + cmd = tmr_p->task_cmd; + if (!(cmd)) { + printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); + continue; + } + /* + * If this function was called with a valid pr_res_key + * parameter (eg: for PROUT PREEMPT_AND_ABORT service action + * skip non regisration key matching TMRs. + */ + if ((preempt_and_abort_list != NULL) && + (core_scsi3_check_cdb_abort_and_preempt( + preempt_and_abort_list, cmd) != 0)) + continue; + spin_unlock(&dev->se_tmr_lock); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock(&dev->se_tmr_lock); + continue; + } + if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock(&dev->se_tmr_lock); + continue; + } + DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," + " Response: 0x%02x, t_state: %d\n", + (preempt_and_abort_list) ? "Preempt" : "", tmr_p, + tmr_p->function, tmr_p->response, cmd->t_state); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_cmd_finish_abort_tmr(cmd); + spin_lock(&dev->se_tmr_lock); + } + spin_unlock(&dev->se_tmr_lock); + /* + * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. + * This is following sam4r17, section 5.6 Aborting commands, Table 38 + * for TMR LUN_RESET: + * + * a) "Yes" indicates that each command that is aborted on an I_T nexus + * other than the one that caused the SCSI device condition is + * completed with TASK ABORTED status, if the TAS bit is set to one in + * the Control mode page (see SPC-4). "No" indicates that no status is + * returned for aborted commands. + * + * d) If the logical unit reset is caused by a particular I_T nexus + * (e.g., by a LOGICAL UNIT RESET task management function), then "yes" + * (TASK_ABORTED status) applies. + * + * Otherwise (e.g., if triggered by a hard reset), "no" + * (no TASK_ABORTED SAM status) applies. + * + * Note that this seems to be independent of TAS (Task Aborted Status) + * in the Control Mode Page. + */ + spin_lock_irqsave(&dev->execute_task_lock, flags); + list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, + t_state_list) { + if (!(TASK_CMD(task))) { + printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); + continue; + } + cmd = TASK_CMD(task); + + if (!T_TASK(cmd)) { + printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" + " %p ITT: 0x%08x\n", task, cmd, + CMD_TFO(cmd)->get_task_tag(cmd)); + continue; + } + /* + * For PREEMPT_AND_ABORT usage, only process commands + * with a matching reservation key. + */ + if ((preempt_and_abort_list != NULL) && + (core_scsi3_check_cdb_abort_and_preempt( + preempt_and_abort_list, cmd) != 0)) + continue; + /* + * Not aborting PROUT PREEMPT_AND_ABORT CDB.. + */ + if (prout_cmd == cmd) + continue; + + list_del(&task->t_state_list); + atomic_set(&task->task_state_active, 0); + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" + " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" + "def_t_state: %d/%d cdb: 0x%02x\n", + (preempt_and_abort_list) ? "Preempt" : "", cmd, task, + CMD_TFO(cmd)->get_task_tag(cmd), 0, + CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, + cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]); + DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" + " t_task_cdbs: %d t_task_cdbs_left: %d" + " t_task_cdbs_sent: %d -- t_transport_active: %d" + " t_transport_stop: %d t_transport_sent: %d\n", + CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key, + T_TASK(cmd)->t_task_cdbs, + atomic_read(&T_TASK(cmd)->t_task_cdbs_left), + atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), + atomic_read(&T_TASK(cmd)->t_transport_active), + atomic_read(&T_TASK(cmd)->t_transport_stop), + atomic_read(&T_TASK(cmd)->t_transport_sent)); + + if (atomic_read(&task->task_active)) { + atomic_set(&task->task_stop, 1); + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + + DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" + " for dev: %p\n", task, dev); + wait_for_completion(&task->task_stop_comp); + DEBUG_LR("LUN_RESET Completed task: %p shutdown for" + " dev: %p\n", task, dev); + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + + atomic_set(&task->task_active, 0); + atomic_set(&task->task_stop, 0); + } + __transport_stop_task_timer(task, &flags); + + if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" + " t_task_cdbs_ex_left: %d\n", task, dev, + atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); + + spin_lock_irqsave(&dev->execute_task_lock, flags); + continue; + } + fe_count = atomic_read(&T_TASK(cmd)->t_fe_count); + + if (atomic_read(&T_TASK(cmd)->t_transport_active)) { + DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" + " task: %p, t_fe_count: %d dev: %p\n", task, + fe_count, dev); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); + + spin_lock_irqsave(&dev->execute_task_lock, flags); + continue; + } + DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," + " t_fe_count: %d dev: %p\n", task, fe_count, dev); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); + + spin_lock_irqsave(&dev->execute_task_lock, flags); + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + /* + * Release all commands remaining in the struct se_device cmd queue. + * + * This follows the same logic as above for the struct se_device + * struct se_task state list, where commands are returned with + * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD + * reference, otherwise the struct se_cmd is released. + */ + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { + cmd = (struct se_cmd *)qr->cmd; + if (!(cmd)) { + /* + * Skip these for non PREEMPT_AND_ABORT usage.. + */ + if (preempt_and_abort_list != NULL) + continue; + + atomic_dec(&qobj->queue_cnt); + list_del(&qr->qr_list); + kfree(qr); + continue; + } + /* + * For PREEMPT_AND_ABORT usage, only process commands + * with a matching reservation key. + */ + if ((preempt_and_abort_list != NULL) && + (core_scsi3_check_cdb_abort_and_preempt( + preempt_and_abort_list, cmd) != 0)) + continue; + /* + * Not aborting PROUT PREEMPT_AND_ABORT CDB.. + */ + if (prout_cmd == cmd) + continue; + + atomic_dec(&T_TASK(cmd)->t_transport_queue_active); + atomic_dec(&qobj->queue_cnt); + list_del(&qr->qr_list); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + state = qr->state; + kfree(qr); + + DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" + " %d t_fe_count: %d\n", (preempt_and_abort_list) ? + "Preempt" : "", cmd, state, + atomic_read(&T_TASK(cmd)->t_fe_count)); + /* + * Signal that the command has failed via cmd->se_cmd_flags, + * and call TFO->new_cmd_failure() to wakeup any fabric + * dependent code used to wait for unsolicited data out + * allocation to complete. The fabric module is expected + * to dump any remaining unsolicited data out for the aborted + * command at this point. + */ + transport_new_cmd_failure(cmd); + + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, + atomic_read(&T_TASK(cmd)->t_fe_count)); + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + } + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + /* + * Clear any legacy SPC-2 reservation when called during + * LOGICAL UNIT RESET + */ + if (!(preempt_and_abort_list) && + (dev->dev_flags & DF_SPC2_RESERVATIONS)) { + spin_lock(&dev->dev_reservation_lock); + dev->dev_reserved_node_acl = NULL; + dev->dev_flags &= ~DF_SPC2_RESERVATIONS; + spin_unlock(&dev->dev_reservation_lock); + printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); + } + + spin_lock(&dev->stats_lock); + dev->num_resets++; + spin_unlock(&dev->stats_lock); + + DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + TRANSPORT(dev)->name); + return 0; +} diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c new file mode 100644 index 000000000000..abfa81a57115 --- /dev/null +++ b/drivers/target/target_core_tpg.c @@ -0,0 +1,826 @@ +/******************************************************************************* + * Filename: target_core_tpg.c + * + * This file contains generic Target Portal Group related functions. + * + * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "target_core_hba.h" + +/* core_clear_initiator_node_from_tpg(): + * + * + */ +static void core_clear_initiator_node_from_tpg( + struct se_node_acl *nacl, + struct se_portal_group *tpg) +{ + int i; + struct se_dev_entry *deve; + struct se_lun *lun; + struct se_lun_acl *acl, *acl_tmp; + + spin_lock_irq(&nacl->device_list_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + deve = &nacl->device_list[i]; + + if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) + continue; + + if (!deve->se_lun) { + printk(KERN_ERR "%s device entries device pointer is" + " NULL, but Initiator has access.\n", + TPG_TFO(tpg)->get_fabric_name()); + continue; + } + + lun = deve->se_lun; + spin_unlock_irq(&nacl->device_list_lock); + core_update_device_list_for_node(lun, NULL, deve->mapped_lun, + TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); + + spin_lock(&lun->lun_acl_lock); + list_for_each_entry_safe(acl, acl_tmp, + &lun->lun_acl_list, lacl_list) { + if (!(strcmp(acl->initiatorname, + nacl->initiatorname)) && + (acl->mapped_lun == deve->mapped_lun)) + break; + } + + if (!acl) { + printk(KERN_ERR "Unable to locate struct se_lun_acl for %s," + " mapped_lun: %u\n", nacl->initiatorname, + deve->mapped_lun); + spin_unlock(&lun->lun_acl_lock); + spin_lock_irq(&nacl->device_list_lock); + continue; + } + + list_del(&acl->lacl_list); + spin_unlock(&lun->lun_acl_lock); + + spin_lock_irq(&nacl->device_list_lock); + kfree(acl); + } + spin_unlock_irq(&nacl->device_list_lock); +} + +/* __core_tpg_get_initiator_node_acl(): + * + * spin_lock_bh(&tpg->acl_node_lock); must be held when calling + */ +struct se_node_acl *__core_tpg_get_initiator_node_acl( + struct se_portal_group *tpg, + const char *initiatorname) +{ + struct se_node_acl *acl; + + list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { + if (!(strcmp(acl->initiatorname, initiatorname))) + return acl; + } + + return NULL; +} + +/* core_tpg_get_initiator_node_acl(): + * + * + */ +struct se_node_acl *core_tpg_get_initiator_node_acl( + struct se_portal_group *tpg, + unsigned char *initiatorname) +{ + struct se_node_acl *acl; + + spin_lock_bh(&tpg->acl_node_lock); + list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { + if (!(strcmp(acl->initiatorname, initiatorname)) && + (!(acl->dynamic_node_acl))) { + spin_unlock_bh(&tpg->acl_node_lock); + return acl; + } + } + spin_unlock_bh(&tpg->acl_node_lock); + + return NULL; +} + +/* core_tpg_add_node_to_devs(): + * + * + */ +void core_tpg_add_node_to_devs( + struct se_node_acl *acl, + struct se_portal_group *tpg) +{ + int i = 0; + u32 lun_access = 0; + struct se_lun *lun; + struct se_device *dev; + + spin_lock(&tpg->tpg_lun_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + lun = &tpg->tpg_lun_list[i]; + if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) + continue; + + spin_unlock(&tpg->tpg_lun_lock); + + dev = lun->lun_se_dev; + /* + * By default in LIO-Target $FABRIC_MOD, + * demo_mode_write_protect is ON, or READ_ONLY; + */ + if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) { + if (dev->dev_flags & DF_READ_ONLY) + lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; + else + lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; + } else { + /* + * Allow only optical drives to issue R/W in default RO + * demo mode. + */ + if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) + lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; + else + lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; + } + + printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" + " access for LUN in Demo Mode\n", + TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, + (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? + "READ-WRITE" : "READ-ONLY"); + + core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, + lun_access, acl, tpg, 1); + spin_lock(&tpg->tpg_lun_lock); + } + spin_unlock(&tpg->tpg_lun_lock); +} + +/* core_set_queue_depth_for_node(): + * + * + */ +static int core_set_queue_depth_for_node( + struct se_portal_group *tpg, + struct se_node_acl *acl) +{ + if (!acl->queue_depth) { + printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," + "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(), + acl->initiatorname); + acl->queue_depth = 1; + } + + return 0; +} + +/* core_create_device_list_for_node(): + * + * + */ +static int core_create_device_list_for_node(struct se_node_acl *nacl) +{ + struct se_dev_entry *deve; + int i; + + nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * + TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); + if (!(nacl->device_list)) { + printk(KERN_ERR "Unable to allocate memory for" + " struct se_node_acl->device_list\n"); + return -1; + } + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + deve = &nacl->device_list[i]; + + atomic_set(&deve->ua_count, 0); + atomic_set(&deve->pr_ref_count, 0); + spin_lock_init(&deve->ua_lock); + INIT_LIST_HEAD(&deve->alua_port_list); + INIT_LIST_HEAD(&deve->ua_list); + } + + return 0; +} + +/* core_tpg_check_initiator_node_acl() + * + * + */ +struct se_node_acl *core_tpg_check_initiator_node_acl( + struct se_portal_group *tpg, + unsigned char *initiatorname) +{ + struct se_node_acl *acl; + + acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); + if ((acl)) + return acl; + + if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg))) + return NULL; + + acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg); + if (!(acl)) + return NULL; + + INIT_LIST_HEAD(&acl->acl_list); + INIT_LIST_HEAD(&acl->acl_sess_list); + spin_lock_init(&acl->device_list_lock); + spin_lock_init(&acl->nacl_sess_lock); + atomic_set(&acl->acl_pr_ref_count, 0); + atomic_set(&acl->mib_ref_count, 0); + acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); + snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); + acl->se_tpg = tpg; + acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); + spin_lock_init(&acl->stats_lock); + acl->dynamic_node_acl = 1; + + TPG_TFO(tpg)->set_default_node_attributes(acl); + + if (core_create_device_list_for_node(acl) < 0) { + TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + return NULL; + } + + if (core_set_queue_depth_for_node(tpg, acl) < 0) { + core_free_device_list_for_node(acl, tpg); + TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + return NULL; + } + + core_tpg_add_node_to_devs(acl, tpg); + + spin_lock_bh(&tpg->acl_node_lock); + list_add_tail(&acl->acl_list, &tpg->acl_node_list); + tpg->num_node_acls++; + spin_unlock_bh(&tpg->acl_node_lock); + + printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, + TPG_TFO(tpg)->get_fabric_name(), initiatorname); + + return acl; +} +EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); + +void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) +{ + while (atomic_read(&nacl->acl_pr_ref_count) != 0) + cpu_relax(); +} + +void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) +{ + while (atomic_read(&nacl->mib_ref_count) != 0) + cpu_relax(); +} + +void core_tpg_clear_object_luns(struct se_portal_group *tpg) +{ + int i, ret; + struct se_lun *lun; + + spin_lock(&tpg->tpg_lun_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + lun = &tpg->tpg_lun_list[i]; + + if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || + (lun->lun_se_dev == NULL)) + continue; + + spin_unlock(&tpg->tpg_lun_lock); + ret = core_dev_del_lun(tpg, lun->unpacked_lun); + spin_lock(&tpg->tpg_lun_lock); + } + spin_unlock(&tpg->tpg_lun_lock); +} +EXPORT_SYMBOL(core_tpg_clear_object_luns); + +/* core_tpg_add_initiator_node_acl(): + * + * + */ +struct se_node_acl *core_tpg_add_initiator_node_acl( + struct se_portal_group *tpg, + struct se_node_acl *se_nacl, + const char *initiatorname, + u32 queue_depth) +{ + struct se_node_acl *acl = NULL; + + spin_lock_bh(&tpg->acl_node_lock); + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); + if ((acl)) { + if (acl->dynamic_node_acl) { + acl->dynamic_node_acl = 0; + printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" + " for %s\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname); + spin_unlock_bh(&tpg->acl_node_lock); + /* + * Release the locally allocated struct se_node_acl + * because * core_tpg_add_initiator_node_acl() returned + * a pointer to an existing demo mode node ACL. + */ + if (se_nacl) + TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, + se_nacl); + goto done; + } + + printk(KERN_ERR "ACL entry for %s Initiator" + " Node %s already exists for TPG %u, ignoring" + " request.\n", TPG_TFO(tpg)->get_fabric_name(), + initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock_bh(&tpg->acl_node_lock); + return ERR_PTR(-EEXIST); + } + spin_unlock_bh(&tpg->acl_node_lock); + + if (!(se_nacl)) { + printk("struct se_node_acl pointer is NULL\n"); + return ERR_PTR(-EINVAL); + } + /* + * For v4.x logic the se_node_acl_s is hanging off a fabric + * dependent structure allocated via + * struct target_core_fabric_ops->fabric_make_nodeacl() + */ + acl = se_nacl; + + INIT_LIST_HEAD(&acl->acl_list); + INIT_LIST_HEAD(&acl->acl_sess_list); + spin_lock_init(&acl->device_list_lock); + spin_lock_init(&acl->nacl_sess_lock); + atomic_set(&acl->acl_pr_ref_count, 0); + acl->queue_depth = queue_depth; + snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); + acl->se_tpg = tpg; + acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); + spin_lock_init(&acl->stats_lock); + + TPG_TFO(tpg)->set_default_node_attributes(acl); + + if (core_create_device_list_for_node(acl) < 0) { + TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + return ERR_PTR(-ENOMEM); + } + + if (core_set_queue_depth_for_node(tpg, acl) < 0) { + core_free_device_list_for_node(acl, tpg); + TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + return ERR_PTR(-EINVAL); + } + + spin_lock_bh(&tpg->acl_node_lock); + list_add_tail(&acl->acl_list, &tpg->acl_node_list); + tpg->num_node_acls++; + spin_unlock_bh(&tpg->acl_node_lock); + +done: + printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, + TPG_TFO(tpg)->get_fabric_name(), initiatorname); + + return acl; +} +EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); + +/* core_tpg_del_initiator_node_acl(): + * + * + */ +int core_tpg_del_initiator_node_acl( + struct se_portal_group *tpg, + struct se_node_acl *acl, + int force) +{ + struct se_session *sess, *sess_tmp; + int dynamic_acl = 0; + + spin_lock_bh(&tpg->acl_node_lock); + if (acl->dynamic_node_acl) { + acl->dynamic_node_acl = 0; + dynamic_acl = 1; + } + list_del(&acl->acl_list); + tpg->num_node_acls--; + spin_unlock_bh(&tpg->acl_node_lock); + + spin_lock_bh(&tpg->session_lock); + list_for_each_entry_safe(sess, sess_tmp, + &tpg->tpg_sess_list, sess_list) { + if (sess->se_node_acl != acl) + continue; + /* + * Determine if the session needs to be closed by our context. + */ + if (!(TPG_TFO(tpg)->shutdown_session(sess))) + continue; + + spin_unlock_bh(&tpg->session_lock); + /* + * If the $FABRIC_MOD session for the Initiator Node ACL exists, + * forcefully shutdown the $FABRIC_MOD session/nexus. + */ + TPG_TFO(tpg)->close_session(sess); + + spin_lock_bh(&tpg->session_lock); + } + spin_unlock_bh(&tpg->session_lock); + + core_tpg_wait_for_nacl_pr_ref(acl); + core_tpg_wait_for_mib_ref(acl); + core_clear_initiator_node_from_tpg(acl, tpg); + core_free_device_list_for_node(acl, tpg); + + printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, + TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname); + + return 0; +} +EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); + +/* core_tpg_set_initiator_node_queue_depth(): + * + * + */ +int core_tpg_set_initiator_node_queue_depth( + struct se_portal_group *tpg, + unsigned char *initiatorname, + u32 queue_depth, + int force) +{ + struct se_session *sess, *init_sess = NULL; + struct se_node_acl *acl; + int dynamic_acl = 0; + + spin_lock_bh(&tpg->acl_node_lock); + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); + if (!(acl)) { + printk(KERN_ERR "Access Control List entry for %s Initiator" + " Node %s does not exists for TPG %hu, ignoring" + " request.\n", TPG_TFO(tpg)->get_fabric_name(), + initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock_bh(&tpg->acl_node_lock); + return -ENODEV; + } + if (acl->dynamic_node_acl) { + acl->dynamic_node_acl = 0; + dynamic_acl = 1; + } + spin_unlock_bh(&tpg->acl_node_lock); + + spin_lock_bh(&tpg->session_lock); + list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { + if (sess->se_node_acl != acl) + continue; + + if (!force) { + printk(KERN_ERR "Unable to change queue depth for %s" + " Initiator Node: %s while session is" + " operational. To forcefully change the queue" + " depth and force session reinstatement" + " use the \"force=1\" parameter.\n", + TPG_TFO(tpg)->get_fabric_name(), initiatorname); + spin_unlock_bh(&tpg->session_lock); + + spin_lock_bh(&tpg->acl_node_lock); + if (dynamic_acl) + acl->dynamic_node_acl = 1; + spin_unlock_bh(&tpg->acl_node_lock); + return -EEXIST; + } + /* + * Determine if the session needs to be closed by our context. + */ + if (!(TPG_TFO(tpg)->shutdown_session(sess))) + continue; + + init_sess = sess; + break; + } + + /* + * User has requested to change the queue depth for a Initiator Node. + * Change the value in the Node's struct se_node_acl, and call + * core_set_queue_depth_for_node() to add the requested queue depth. + * + * Finally call TPG_TFO(tpg)->close_session() to force session + * reinstatement to occur if there is an active session for the + * $FABRIC_MOD Initiator Node in question. + */ + acl->queue_depth = queue_depth; + + if (core_set_queue_depth_for_node(tpg, acl) < 0) { + spin_unlock_bh(&tpg->session_lock); + /* + * Force session reinstatement if + * core_set_queue_depth_for_node() failed, because we assume + * the $FABRIC_MOD has already the set session reinstatement + * bit from TPG_TFO(tpg)->shutdown_session() called above. + */ + if (init_sess) + TPG_TFO(tpg)->close_session(init_sess); + + spin_lock_bh(&tpg->acl_node_lock); + if (dynamic_acl) + acl->dynamic_node_acl = 1; + spin_unlock_bh(&tpg->acl_node_lock); + return -EINVAL; + } + spin_unlock_bh(&tpg->session_lock); + /* + * If the $FABRIC_MOD session for the Initiator Node ACL exists, + * forcefully shutdown the $FABRIC_MOD session/nexus. + */ + if (init_sess) + TPG_TFO(tpg)->close_session(init_sess); + + printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" + " Node: %s on %s Target Portal Group: %u\n", queue_depth, + initiatorname, TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg)); + + spin_lock_bh(&tpg->acl_node_lock); + if (dynamic_acl) + acl->dynamic_node_acl = 1; + spin_unlock_bh(&tpg->acl_node_lock); + + return 0; +} +EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); + +static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) +{ + /* Set in core_dev_setup_virtual_lun0() */ + struct se_device *dev = se_global->g_lun0_dev; + struct se_lun *lun = &se_tpg->tpg_virt_lun0; + u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; + int ret; + + lun->unpacked_lun = 0; + lun->lun_status = TRANSPORT_LUN_STATUS_FREE; + atomic_set(&lun->lun_acl_count, 0); + init_completion(&lun->lun_shutdown_comp); + INIT_LIST_HEAD(&lun->lun_acl_list); + INIT_LIST_HEAD(&lun->lun_cmd_list); + spin_lock_init(&lun->lun_acl_lock); + spin_lock_init(&lun->lun_cmd_lock); + spin_lock_init(&lun->lun_sep_lock); + + ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + if (ret < 0) + return -1; + + return 0; +} + +static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg) +{ + struct se_lun *lun = &se_tpg->tpg_virt_lun0; + + core_tpg_post_dellun(se_tpg, lun); +} + +int core_tpg_register( + struct target_core_fabric_ops *tfo, + struct se_wwn *se_wwn, + struct se_portal_group *se_tpg, + void *tpg_fabric_ptr, + int se_tpg_type) +{ + struct se_lun *lun; + u32 i; + + se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * + TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); + if (!(se_tpg->tpg_lun_list)) { + printk(KERN_ERR "Unable to allocate struct se_portal_group->" + "tpg_lun_list\n"); + return -ENOMEM; + } + + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + lun = &se_tpg->tpg_lun_list[i]; + lun->unpacked_lun = i; + lun->lun_status = TRANSPORT_LUN_STATUS_FREE; + atomic_set(&lun->lun_acl_count, 0); + init_completion(&lun->lun_shutdown_comp); + INIT_LIST_HEAD(&lun->lun_acl_list); + INIT_LIST_HEAD(&lun->lun_cmd_list); + spin_lock_init(&lun->lun_acl_lock); + spin_lock_init(&lun->lun_cmd_lock); + spin_lock_init(&lun->lun_sep_lock); + } + + se_tpg->se_tpg_type = se_tpg_type; + se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; + se_tpg->se_tpg_tfo = tfo; + se_tpg->se_tpg_wwn = se_wwn; + atomic_set(&se_tpg->tpg_pr_ref_count, 0); + INIT_LIST_HEAD(&se_tpg->acl_node_list); + INIT_LIST_HEAD(&se_tpg->se_tpg_list); + INIT_LIST_HEAD(&se_tpg->tpg_sess_list); + spin_lock_init(&se_tpg->acl_node_lock); + spin_lock_init(&se_tpg->session_lock); + spin_lock_init(&se_tpg->tpg_lun_lock); + + if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { + if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { + kfree(se_tpg); + return -ENOMEM; + } + } + + spin_lock_bh(&se_global->se_tpg_lock); + list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list); + spin_unlock_bh(&se_global->se_tpg_lock); + + printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" + " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), + (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? + "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? + "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); + + return 0; +} +EXPORT_SYMBOL(core_tpg_register); + +int core_tpg_deregister(struct se_portal_group *se_tpg) +{ + printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" + " for endpoint: %s Portal Tag %u\n", + (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? + "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(), + TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg), + TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); + + spin_lock_bh(&se_global->se_tpg_lock); + list_del(&se_tpg->se_tpg_list); + spin_unlock_bh(&se_global->se_tpg_lock); + + while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) + cpu_relax(); + + if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) + core_tpg_release_virtual_lun0(se_tpg); + + se_tpg->se_tpg_fabric_ptr = NULL; + kfree(se_tpg->tpg_lun_list); + return 0; +} +EXPORT_SYMBOL(core_tpg_deregister); + +struct se_lun *core_tpg_pre_addlun( + struct se_portal_group *tpg, + u32 unpacked_lun) +{ + struct se_lun *lun; + + if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { + printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" + "-1: %u for Target Portal Group: %u\n", + TPG_TFO(tpg)->get_fabric_name(), + unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + return ERR_PTR(-EOVERFLOW); + } + + spin_lock(&tpg->tpg_lun_lock); + lun = &tpg->tpg_lun_list[unpacked_lun]; + if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { + printk(KERN_ERR "TPG Logical Unit Number: %u is already active" + " on %s Target Portal Group: %u, ignoring request.\n", + unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), + TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock(&tpg->tpg_lun_lock); + return ERR_PTR(-EINVAL); + } + spin_unlock(&tpg->tpg_lun_lock); + + return lun; +} + +int core_tpg_post_addlun( + struct se_portal_group *tpg, + struct se_lun *lun, + u32 lun_access, + void *lun_ptr) +{ + if (core_dev_export(lun_ptr, tpg, lun) < 0) + return -1; + + spin_lock(&tpg->tpg_lun_lock); + lun->lun_access = lun_access; + lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; + spin_unlock(&tpg->tpg_lun_lock); + + return 0; +} + +static void core_tpg_shutdown_lun( + struct se_portal_group *tpg, + struct se_lun *lun) +{ + core_clear_lun_from_tpg(lun, tpg); + transport_clear_lun_from_sessions(lun); +} + +struct se_lun *core_tpg_pre_dellun( + struct se_portal_group *tpg, + u32 unpacked_lun, + int *ret) +{ + struct se_lun *lun; + + if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { + printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" + "-1: %u for Target Portal Group: %u\n", + TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + TRANSPORT_MAX_LUNS_PER_TPG-1, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + return ERR_PTR(-EOVERFLOW); + } + + spin_lock(&tpg->tpg_lun_lock); + lun = &tpg->tpg_lun_list[unpacked_lun]; + if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { + printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + " Target Portal Group: %u, ignoring request.\n", + TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + TPG_TFO(tpg)->tpg_get_tag(tpg)); + spin_unlock(&tpg->tpg_lun_lock); + return ERR_PTR(-ENODEV); + } + spin_unlock(&tpg->tpg_lun_lock); + + return lun; +} + +int core_tpg_post_dellun( + struct se_portal_group *tpg, + struct se_lun *lun) +{ + core_tpg_shutdown_lun(tpg, lun); + + core_dev_unexport(lun->lun_se_dev, tpg, lun); + + spin_lock(&tpg->tpg_lun_lock); + lun->lun_status = TRANSPORT_LUN_STATUS_FREE; + spin_unlock(&tpg->tpg_lun_lock); + + return 0; +} diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c new file mode 100644 index 000000000000..28b6292ff298 --- /dev/null +++ b/drivers/target/target_core_transport.c @@ -0,0 +1,6134 @@ +/******************************************************************************* + * Filename: target_core_transport.c + * + * This file contains the Generic Target Engine Core. + * + * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. + * Copyright (c) 2005, 2006, 2007 SBE, Inc. + * Copyright (c) 2007-2010 Rising Tide Systems + * Copyright (c) 2008-2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For TASK_ATTR_* */ + +#include +#include +#include +#include +#include +#include +#include + +#include "target_core_alua.h" +#include "target_core_hba.h" +#include "target_core_pr.h" +#include "target_core_scdb.h" +#include "target_core_ua.h" + +/* #define DEBUG_CDB_HANDLER */ +#ifdef DEBUG_CDB_HANDLER +#define DEBUG_CDB_H(x...) printk(KERN_INFO x) +#else +#define DEBUG_CDB_H(x...) +#endif + +/* #define DEBUG_CMD_MAP */ +#ifdef DEBUG_CMD_MAP +#define DEBUG_CMD_M(x...) printk(KERN_INFO x) +#else +#define DEBUG_CMD_M(x...) +#endif + +/* #define DEBUG_MEM_ALLOC */ +#ifdef DEBUG_MEM_ALLOC +#define DEBUG_MEM(x...) printk(KERN_INFO x) +#else +#define DEBUG_MEM(x...) +#endif + +/* #define DEBUG_MEM2_ALLOC */ +#ifdef DEBUG_MEM2_ALLOC +#define DEBUG_MEM2(x...) printk(KERN_INFO x) +#else +#define DEBUG_MEM2(x...) +#endif + +/* #define DEBUG_SG_CALC */ +#ifdef DEBUG_SG_CALC +#define DEBUG_SC(x...) printk(KERN_INFO x) +#else +#define DEBUG_SC(x...) +#endif + +/* #define DEBUG_SE_OBJ */ +#ifdef DEBUG_SE_OBJ +#define DEBUG_SO(x...) printk(KERN_INFO x) +#else +#define DEBUG_SO(x...) +#endif + +/* #define DEBUG_CMD_VOL */ +#ifdef DEBUG_CMD_VOL +#define DEBUG_VOL(x...) printk(KERN_INFO x) +#else +#define DEBUG_VOL(x...) +#endif + +/* #define DEBUG_CMD_STOP */ +#ifdef DEBUG_CMD_STOP +#define DEBUG_CS(x...) printk(KERN_INFO x) +#else +#define DEBUG_CS(x...) +#endif + +/* #define DEBUG_PASSTHROUGH */ +#ifdef DEBUG_PASSTHROUGH +#define DEBUG_PT(x...) printk(KERN_INFO x) +#else +#define DEBUG_PT(x...) +#endif + +/* #define DEBUG_TASK_STOP */ +#ifdef DEBUG_TASK_STOP +#define DEBUG_TS(x...) printk(KERN_INFO x) +#else +#define DEBUG_TS(x...) +#endif + +/* #define DEBUG_TRANSPORT_STOP */ +#ifdef DEBUG_TRANSPORT_STOP +#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) +#else +#define DEBUG_TRANSPORT_S(x...) +#endif + +/* #define DEBUG_TASK_FAILURE */ +#ifdef DEBUG_TASK_FAILURE +#define DEBUG_TF(x...) printk(KERN_INFO x) +#else +#define DEBUG_TF(x...) +#endif + +/* #define DEBUG_DEV_OFFLINE */ +#ifdef DEBUG_DEV_OFFLINE +#define DEBUG_DO(x...) printk(KERN_INFO x) +#else +#define DEBUG_DO(x...) +#endif + +/* #define DEBUG_TASK_STATE */ +#ifdef DEBUG_TASK_STATE +#define DEBUG_TSTATE(x...) printk(KERN_INFO x) +#else +#define DEBUG_TSTATE(x...) +#endif + +/* #define DEBUG_STATUS_THR */ +#ifdef DEBUG_STATUS_THR +#define DEBUG_ST(x...) printk(KERN_INFO x) +#else +#define DEBUG_ST(x...) +#endif + +/* #define DEBUG_TASK_TIMEOUT */ +#ifdef DEBUG_TASK_TIMEOUT +#define DEBUG_TT(x...) printk(KERN_INFO x) +#else +#define DEBUG_TT(x...) +#endif + +/* #define DEBUG_GENERIC_REQUEST_FAILURE */ +#ifdef DEBUG_GENERIC_REQUEST_FAILURE +#define DEBUG_GRF(x...) printk(KERN_INFO x) +#else +#define DEBUG_GRF(x...) +#endif + +/* #define DEBUG_SAM_TASK_ATTRS */ +#ifdef DEBUG_SAM_TASK_ATTRS +#define DEBUG_STA(x...) printk(KERN_INFO x) +#else +#define DEBUG_STA(x...) +#endif + +struct se_global *se_global; + +static struct kmem_cache *se_cmd_cache; +static struct kmem_cache *se_sess_cache; +struct kmem_cache *se_tmr_req_cache; +struct kmem_cache *se_ua_cache; +struct kmem_cache *se_mem_cache; +struct kmem_cache *t10_pr_reg_cache; +struct kmem_cache *t10_alua_lu_gp_cache; +struct kmem_cache *t10_alua_lu_gp_mem_cache; +struct kmem_cache *t10_alua_tg_pt_gp_cache; +struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; + +/* Used for transport_dev_get_map_*() */ +typedef int (*map_func_t)(struct se_task *, u32); + +static int transport_generic_write_pending(struct se_cmd *); +static int transport_processing_thread(void *); +static int __transport_execute_tasks(struct se_device *dev); +static void transport_complete_task_attr(struct se_cmd *cmd); +static void transport_direct_request_timeout(struct se_cmd *cmd); +static void transport_free_dev_tasks(struct se_cmd *cmd); +static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, + unsigned long long starting_lba, u32 sectors, + enum dma_data_direction data_direction, + struct list_head *mem_list, int set_counts); +static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, + u32 dma_size); +static int transport_generic_remove(struct se_cmd *cmd, + int release_to_pool, int session_reinstatement); +static int transport_get_sectors(struct se_cmd *cmd); +static struct list_head *transport_init_se_mem_list(void); +static int transport_map_sg_to_mem(struct se_cmd *cmd, + struct list_head *se_mem_list, void *in_mem, + u32 *se_mem_cnt); +static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, + unsigned char *dst, struct list_head *se_mem_list); +static void transport_release_fe_cmd(struct se_cmd *cmd); +static void transport_remove_cmd_from_queue(struct se_cmd *cmd, + struct se_queue_obj *qobj); +static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); +static void transport_stop_all_task_timers(struct se_cmd *cmd); + +int transport_emulate_control_cdb(struct se_task *task); + +int init_se_global(void) +{ + struct se_global *global; + + global = kzalloc(sizeof(struct se_global), GFP_KERNEL); + if (!(global)) { + printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); + return -1; + } + + INIT_LIST_HEAD(&global->g_lu_gps_list); + INIT_LIST_HEAD(&global->g_se_tpg_list); + INIT_LIST_HEAD(&global->g_hba_list); + INIT_LIST_HEAD(&global->g_se_dev_list); + spin_lock_init(&global->g_device_lock); + spin_lock_init(&global->hba_lock); + spin_lock_init(&global->se_tpg_lock); + spin_lock_init(&global->lu_gps_lock); + spin_lock_init(&global->plugin_class_lock); + + se_cmd_cache = kmem_cache_create("se_cmd_cache", + sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); + if (!(se_cmd_cache)) { + printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); + goto out; + } + se_tmr_req_cache = kmem_cache_create("se_tmr_cache", + sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), + 0, NULL); + if (!(se_tmr_req_cache)) { + printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" + " failed\n"); + goto out; + } + se_sess_cache = kmem_cache_create("se_sess_cache", + sizeof(struct se_session), __alignof__(struct se_session), + 0, NULL); + if (!(se_sess_cache)) { + printk(KERN_ERR "kmem_cache_create() for struct se_session" + " failed\n"); + goto out; + } + se_ua_cache = kmem_cache_create("se_ua_cache", + sizeof(struct se_ua), __alignof__(struct se_ua), + 0, NULL); + if (!(se_ua_cache)) { + printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); + goto out; + } + se_mem_cache = kmem_cache_create("se_mem_cache", + sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); + if (!(se_mem_cache)) { + printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); + goto out; + } + t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", + sizeof(struct t10_pr_registration), + __alignof__(struct t10_pr_registration), 0, NULL); + if (!(t10_pr_reg_cache)) { + printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" + " failed\n"); + goto out; + } + t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", + sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), + 0, NULL); + if (!(t10_alua_lu_gp_cache)) { + printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" + " failed\n"); + goto out; + } + t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", + sizeof(struct t10_alua_lu_gp_member), + __alignof__(struct t10_alua_lu_gp_member), 0, NULL); + if (!(t10_alua_lu_gp_mem_cache)) { + printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" + "cache failed\n"); + goto out; + } + t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", + sizeof(struct t10_alua_tg_pt_gp), + __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); + if (!(t10_alua_tg_pt_gp_cache)) { + printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + "cache failed\n"); + goto out; + } + t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( + "t10_alua_tg_pt_gp_mem_cache", + sizeof(struct t10_alua_tg_pt_gp_member), + __alignof__(struct t10_alua_tg_pt_gp_member), + 0, NULL); + if (!(t10_alua_tg_pt_gp_mem_cache)) { + printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + "mem_t failed\n"); + goto out; + } + + se_global = global; + + return 0; +out: + if (se_cmd_cache) + kmem_cache_destroy(se_cmd_cache); + if (se_tmr_req_cache) + kmem_cache_destroy(se_tmr_req_cache); + if (se_sess_cache) + kmem_cache_destroy(se_sess_cache); + if (se_ua_cache) + kmem_cache_destroy(se_ua_cache); + if (se_mem_cache) + kmem_cache_destroy(se_mem_cache); + if (t10_pr_reg_cache) + kmem_cache_destroy(t10_pr_reg_cache); + if (t10_alua_lu_gp_cache) + kmem_cache_destroy(t10_alua_lu_gp_cache); + if (t10_alua_lu_gp_mem_cache) + kmem_cache_destroy(t10_alua_lu_gp_mem_cache); + if (t10_alua_tg_pt_gp_cache) + kmem_cache_destroy(t10_alua_tg_pt_gp_cache); + if (t10_alua_tg_pt_gp_mem_cache) + kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); + kfree(global); + return -1; +} + +void release_se_global(void) +{ + struct se_global *global; + + global = se_global; + if (!(global)) + return; + + kmem_cache_destroy(se_cmd_cache); + kmem_cache_destroy(se_tmr_req_cache); + kmem_cache_destroy(se_sess_cache); + kmem_cache_destroy(se_ua_cache); + kmem_cache_destroy(se_mem_cache); + kmem_cache_destroy(t10_pr_reg_cache); + kmem_cache_destroy(t10_alua_lu_gp_cache); + kmem_cache_destroy(t10_alua_lu_gp_mem_cache); + kmem_cache_destroy(t10_alua_tg_pt_gp_cache); + kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); + kfree(global); + + se_global = NULL; +} + +void transport_init_queue_obj(struct se_queue_obj *qobj) +{ + atomic_set(&qobj->queue_cnt, 0); + INIT_LIST_HEAD(&qobj->qobj_list); + init_waitqueue_head(&qobj->thread_wq); + spin_lock_init(&qobj->cmd_queue_lock); +} +EXPORT_SYMBOL(transport_init_queue_obj); + +static int transport_subsystem_reqmods(void) +{ + int ret; + + ret = request_module("target_core_iblock"); + if (ret != 0) + printk(KERN_ERR "Unable to load target_core_iblock\n"); + + ret = request_module("target_core_file"); + if (ret != 0) + printk(KERN_ERR "Unable to load target_core_file\n"); + + ret = request_module("target_core_pscsi"); + if (ret != 0) + printk(KERN_ERR "Unable to load target_core_pscsi\n"); + + ret = request_module("target_core_stgt"); + if (ret != 0) + printk(KERN_ERR "Unable to load target_core_stgt\n"); + + return 0; +} + +int transport_subsystem_check_init(void) +{ + if (se_global->g_sub_api_initialized) + return 0; + /* + * Request the loading of known TCM subsystem plugins.. + */ + if (transport_subsystem_reqmods() < 0) + return -1; + + se_global->g_sub_api_initialized = 1; + return 0; +} + +struct se_session *transport_init_session(void) +{ + struct se_session *se_sess; + + se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); + if (!(se_sess)) { + printk(KERN_ERR "Unable to allocate struct se_session from" + " se_sess_cache\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&se_sess->sess_list); + INIT_LIST_HEAD(&se_sess->sess_acl_list); + atomic_set(&se_sess->mib_ref_count, 0); + + return se_sess; +} +EXPORT_SYMBOL(transport_init_session); + +/* + * Called with spin_lock_bh(&struct se_portal_group->session_lock called. + */ +void __transport_register_session( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct se_session *se_sess, + void *fabric_sess_ptr) +{ + unsigned char buf[PR_REG_ISID_LEN]; + + se_sess->se_tpg = se_tpg; + se_sess->fabric_sess_ptr = fabric_sess_ptr; + /* + * Used by struct se_node_acl's under ConfigFS to locate active se_session-t + * + * Only set for struct se_session's that will actually be moving I/O. + * eg: *NOT* discovery sessions. + */ + if (se_nacl) { + /* + * If the fabric module supports an ISID based TransportID, + * save this value in binary from the fabric I_T Nexus now. + */ + if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { + memset(&buf[0], 0, PR_REG_ISID_LEN); + TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, + &buf[0], PR_REG_ISID_LEN); + se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); + } + spin_lock_irq(&se_nacl->nacl_sess_lock); + /* + * The se_nacl->nacl_sess pointer will be set to the + * last active I_T Nexus for each struct se_node_acl. + */ + se_nacl->nacl_sess = se_sess; + + list_add_tail(&se_sess->sess_acl_list, + &se_nacl->acl_sess_list); + spin_unlock_irq(&se_nacl->nacl_sess_lock); + } + list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); + + printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", + TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); +} +EXPORT_SYMBOL(__transport_register_session); + +void transport_register_session( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct se_session *se_sess, + void *fabric_sess_ptr) +{ + spin_lock_bh(&se_tpg->session_lock); + __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); + spin_unlock_bh(&se_tpg->session_lock); +} +EXPORT_SYMBOL(transport_register_session); + +void transport_deregister_session_configfs(struct se_session *se_sess) +{ + struct se_node_acl *se_nacl; + + /* + * Used by struct se_node_acl's under ConfigFS to locate active struct se_session + */ + se_nacl = se_sess->se_node_acl; + if ((se_nacl)) { + spin_lock_irq(&se_nacl->nacl_sess_lock); + list_del(&se_sess->sess_acl_list); + /* + * If the session list is empty, then clear the pointer. + * Otherwise, set the struct se_session pointer from the tail + * element of the per struct se_node_acl active session list. + */ + if (list_empty(&se_nacl->acl_sess_list)) + se_nacl->nacl_sess = NULL; + else { + se_nacl->nacl_sess = container_of( + se_nacl->acl_sess_list.prev, + struct se_session, sess_acl_list); + } + spin_unlock_irq(&se_nacl->nacl_sess_lock); + } +} +EXPORT_SYMBOL(transport_deregister_session_configfs); + +void transport_free_session(struct se_session *se_sess) +{ + kmem_cache_free(se_sess_cache, se_sess); +} +EXPORT_SYMBOL(transport_free_session); + +void transport_deregister_session(struct se_session *se_sess) +{ + struct se_portal_group *se_tpg = se_sess->se_tpg; + struct se_node_acl *se_nacl; + + if (!(se_tpg)) { + transport_free_session(se_sess); + return; + } + /* + * Wait for possible reference in drivers/target/target_core_mib.c: + * scsi_att_intr_port_seq_show() + */ + while (atomic_read(&se_sess->mib_ref_count) != 0) + cpu_relax(); + + spin_lock_bh(&se_tpg->session_lock); + list_del(&se_sess->sess_list); + se_sess->se_tpg = NULL; + se_sess->fabric_sess_ptr = NULL; + spin_unlock_bh(&se_tpg->session_lock); + + /* + * Determine if we need to do extra work for this initiator node's + * struct se_node_acl if it had been previously dynamically generated. + */ + se_nacl = se_sess->se_node_acl; + if ((se_nacl)) { + spin_lock_bh(&se_tpg->acl_node_lock); + if (se_nacl->dynamic_node_acl) { + if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( + se_tpg))) { + list_del(&se_nacl->acl_list); + se_tpg->num_node_acls--; + spin_unlock_bh(&se_tpg->acl_node_lock); + + core_tpg_wait_for_nacl_pr_ref(se_nacl); + core_tpg_wait_for_mib_ref(se_nacl); + core_free_device_list_for_node(se_nacl, se_tpg); + TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, + se_nacl); + spin_lock_bh(&se_tpg->acl_node_lock); + } + } + spin_unlock_bh(&se_tpg->acl_node_lock); + } + + transport_free_session(se_sess); + + printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", + TPG_TFO(se_tpg)->get_fabric_name()); +} +EXPORT_SYMBOL(transport_deregister_session); + +/* + * Called with T_TASK(cmd)->t_state_lock held. + */ +static void transport_all_task_dev_remove_state(struct se_cmd *cmd) +{ + struct se_device *dev; + struct se_task *task; + unsigned long flags; + + if (!T_TASK(cmd)) + return; + + list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + dev = task->se_dev; + if (!(dev)) + continue; + + if (atomic_read(&task->task_active)) + continue; + + if (!(atomic_read(&task->task_state_active))) + continue; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + list_del(&task->t_state_list); + DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", + CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + atomic_set(&task->task_state_active, 0); + atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); + } +} + +/* transport_cmd_check_stop(): + * + * 'transport_off = 1' determines if t_transport_active should be cleared. + * 'transport_off = 2' determines if task_dev_state should be removed. + * + * A non-zero u8 t_state sets cmd->t_state. + * Returns 1 when command is stopped, else 0. + */ +static int transport_cmd_check_stop( + struct se_cmd *cmd, + int transport_off, + u8 t_state) +{ + unsigned long flags; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + /* + * Determine if IOCTL context caller in requesting the stopping of this + * command for LUN shutdown purposes. + */ + if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { + DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" + " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, + CMD_TFO(cmd)->get_task_tag(cmd)); + + cmd->deferred_t_state = cmd->t_state; + cmd->t_state = TRANSPORT_DEFERRED_CMD; + atomic_set(&T_TASK(cmd)->t_transport_active, 0); + if (transport_off == 2) + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + complete(&T_TASK(cmd)->transport_lun_stop_comp); + return 1; + } + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend excpections. + */ + if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { + DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" + " TRUE for ITT: 0x%08x\n", __func__, __LINE__, + CMD_TFO(cmd)->get_task_tag(cmd)); + + cmd->deferred_t_state = cmd->t_state; + cmd->t_state = TRANSPORT_DEFERRED_CMD; + if (transport_off == 2) + transport_all_task_dev_remove_state(cmd); + + /* + * Clear struct se_cmd->se_lun before the transport_off == 2 handoff + * to FE. + */ + if (transport_off == 2) + cmd->se_lun = NULL; + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + complete(&T_TASK(cmd)->t_transport_stop_comp); + return 1; + } + if (transport_off) { + atomic_set(&T_TASK(cmd)->t_transport_active, 0); + if (transport_off == 2) { + transport_all_task_dev_remove_state(cmd); + /* + * Clear struct se_cmd->se_lun before the transport_off == 2 + * handoff to fabric module. + */ + cmd->se_lun = NULL; + /* + * Some fabric modules like tcm_loop can release + * their internally allocated I/O refrence now and + * struct se_cmd now. + */ + if (CMD_TFO(cmd)->check_stop_free != NULL) { + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + + CMD_TFO(cmd)->check_stop_free(cmd); + return 1; + } + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + return 0; + } else if (t_state) + cmd->t_state = t_state; + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + return 0; +} + +static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) +{ + return transport_cmd_check_stop(cmd, 2, 0); +} + +static void transport_lun_remove_cmd(struct se_cmd *cmd) +{ + struct se_lun *lun = SE_LUN(cmd); + unsigned long flags; + + if (!lun) + return; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + goto check_lun; + } + atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_free_dev_tasks(cmd); + +check_lun: + spin_lock_irqsave(&lun->lun_cmd_lock, flags); + if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { + list_del(&cmd->se_lun_list); + atomic_set(&T_TASK(cmd)->transport_lun_active, 0); +#if 0 + printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" + CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); +#endif + } + spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); +} + +void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) +{ + transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_lun_remove_cmd(cmd); + + if (transport_cmd_check_stop_to_fabric(cmd)) + return; + if (remove) + transport_generic_remove(cmd, 0, 0); +} + +void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) +{ + transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + + if (transport_cmd_check_stop_to_fabric(cmd)) + return; + + transport_generic_remove(cmd, 0, 0); +} + +static int transport_add_cmd_to_queue( + struct se_cmd *cmd, + int t_state) +{ + struct se_device *dev = cmd->se_dev; + struct se_queue_obj *qobj = dev->dev_queue_obj; + struct se_queue_req *qr; + unsigned long flags; + + qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); + if (!(qr)) { + printk(KERN_ERR "Unable to allocate memory for" + " struct se_queue_req\n"); + return -1; + } + INIT_LIST_HEAD(&qr->qr_list); + + qr->cmd = (void *)cmd; + qr->state = t_state; + + if (t_state) { + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + cmd->t_state = t_state; + atomic_set(&T_TASK(cmd)->t_transport_active, 1); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + } + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + list_add_tail(&qr->qr_list, &qobj->qobj_list); + atomic_inc(&T_TASK(cmd)->t_transport_queue_active); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + atomic_inc(&qobj->queue_cnt); + wake_up_interruptible(&qobj->thread_wq); + return 0; +} + +/* + * Called with struct se_queue_obj->cmd_queue_lock held. + */ +static struct se_queue_req * +__transport_get_qr_from_queue(struct se_queue_obj *qobj) +{ + struct se_cmd *cmd; + struct se_queue_req *qr = NULL; + + if (list_empty(&qobj->qobj_list)) + return NULL; + + list_for_each_entry(qr, &qobj->qobj_list, qr_list) + break; + + if (qr->cmd) { + cmd = (struct se_cmd *)qr->cmd; + atomic_dec(&T_TASK(cmd)->t_transport_queue_active); + } + list_del(&qr->qr_list); + atomic_dec(&qobj->queue_cnt); + + return qr; +} + +static struct se_queue_req * +transport_get_qr_from_queue(struct se_queue_obj *qobj) +{ + struct se_cmd *cmd; + struct se_queue_req *qr; + unsigned long flags; + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + if (list_empty(&qobj->qobj_list)) { + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return NULL; + } + + list_for_each_entry(qr, &qobj->qobj_list, qr_list) + break; + + if (qr->cmd) { + cmd = (struct se_cmd *)qr->cmd; + atomic_dec(&T_TASK(cmd)->t_transport_queue_active); + } + list_del(&qr->qr_list); + atomic_dec(&qobj->queue_cnt); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + return qr; +} + +static void transport_remove_cmd_from_queue(struct se_cmd *cmd, + struct se_queue_obj *qobj) +{ + struct se_cmd *q_cmd; + struct se_queue_req *qr = NULL, *qr_p = NULL; + unsigned long flags; + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return; + } + + list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { + q_cmd = (struct se_cmd *)qr->cmd; + if (q_cmd != cmd) + continue; + + atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); + atomic_dec(&qobj->queue_cnt); + list_del(&qr->qr_list); + kfree(qr); + } + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { + printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", + CMD_TFO(cmd)->get_task_tag(cmd), + atomic_read(&T_TASK(cmd)->t_transport_queue_active)); + } +} + +/* + * Completion function used by TCM subsystem plugins (such as FILEIO) + * for queueing up response from struct se_subsystem_api->do_task() + */ +void transport_complete_sync_cache(struct se_cmd *cmd, int good) +{ + struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, + struct se_task, t_list); + + if (good) { + cmd->scsi_status = SAM_STAT_GOOD; + task->task_scsi_status = GOOD; + } else { + task->task_scsi_status = SAM_STAT_CHECK_CONDITION; + task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; + TASK_CMD(task)->transport_error_status = + PYX_TRANSPORT_ILLEGAL_REQUEST; + } + + transport_complete_task(task, good); +} +EXPORT_SYMBOL(transport_complete_sync_cache); + +/* transport_complete_task(): + * + * Called from interrupt and non interrupt context depending + * on the transport plugin. + */ +void transport_complete_task(struct se_task *task, int success) +{ + struct se_cmd *cmd = TASK_CMD(task); + struct se_device *dev = task->se_dev; + int t_state; + unsigned long flags; +#if 0 + printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, + T_TASK(cmd)->t_task_cdb[0], dev); +#endif + if (dev) { + spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); + atomic_inc(&dev->depth_left); + atomic_inc(&SE_HBA(dev)->left_queue_depth); + spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + } + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&task->task_active, 0); + + /* + * See if any sense data exists, if so set the TASK_SENSE flag. + * Also check for any other post completion work that needs to be + * done by the plugins. + */ + if (dev && dev->transport->transport_complete) { + if (dev->transport->transport_complete(task) != 0) { + cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; + task->task_sense = 1; + success = 1; + } + } + + /* + * See if we are waiting for outstanding struct se_task + * to complete for an exception condition + */ + if (atomic_read(&task->task_stop)) { + /* + * Decrement T_TASK(cmd)->t_se_count if this task had + * previously thrown its timeout exception handler. + */ + if (atomic_read(&task->task_timeout)) { + atomic_dec(&T_TASK(cmd)->t_se_count); + atomic_set(&task->task_timeout, 0); + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + complete(&task->task_stop_comp); + return; + } + /* + * If the task's timeout handler has fired, use the t_task_cdbs_timeout + * left counter to determine when the struct se_cmd is ready to be queued to + * the processing thread. + */ + if (atomic_read(&task->task_timeout)) { + if (!(atomic_dec_and_test( + &T_TASK(cmd)->t_task_cdbs_timeout_left))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + flags); + return; + } + t_state = TRANSPORT_COMPLETE_TIMEOUT; + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_add_cmd_to_queue(cmd, t_state); + return; + } + atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); + + /* + * Decrement the outstanding t_task_cdbs_left count. The last + * struct se_task from struct se_cmd will complete itself into the + * device queue depending upon int success. + */ + if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { + if (!success) + T_TASK(cmd)->t_tasks_failed = 1; + + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return; + } + + if (!success || T_TASK(cmd)->t_tasks_failed) { + t_state = TRANSPORT_COMPLETE_FAILURE; + if (!task->task_error_status) { + task->task_error_status = + PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->transport_error_status = + PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + } else { + atomic_set(&T_TASK(cmd)->t_transport_complete, 1); + t_state = TRANSPORT_COMPLETE_OK; + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_add_cmd_to_queue(cmd, t_state); +} +EXPORT_SYMBOL(transport_complete_task); + +/* + * Called by transport_add_tasks_from_cmd() once a struct se_cmd's + * struct se_task list are ready to be added to the active execution list + * struct se_device + + * Called with se_dev_t->execute_task_lock called. + */ +static inline int transport_add_task_check_sam_attr( + struct se_task *task, + struct se_task *task_prev, + struct se_device *dev) +{ + /* + * No SAM Task attribute emulation enabled, add to tail of + * execution queue + */ + if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { + list_add_tail(&task->t_execute_list, &dev->execute_task_list); + return 0; + } + /* + * HEAD_OF_QUEUE attribute for received CDB, which means + * the first task that is associated with a struct se_cmd goes to + * head of the struct se_device->execute_task_list, and task_prev + * after that for each subsequent task + */ + if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { + list_add(&task->t_execute_list, + (task_prev != NULL) ? + &task_prev->t_execute_list : + &dev->execute_task_list); + + DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" + " in execution queue\n", + T_TASK(task->task_se_cmd)->t_task_cdb[0]); + return 1; + } + /* + * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been + * transitioned from Dermant -> Active state, and are added to the end + * of the struct se_device->execute_task_list + */ + list_add_tail(&task->t_execute_list, &dev->execute_task_list); + return 0; +} + +/* __transport_add_task_to_execute_queue(): + * + * Called with se_dev_t->execute_task_lock called. + */ +static void __transport_add_task_to_execute_queue( + struct se_task *task, + struct se_task *task_prev, + struct se_device *dev) +{ + int head_of_queue; + + head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); + atomic_inc(&dev->execute_tasks); + + if (atomic_read(&task->task_state_active)) + return; + /* + * Determine if this task needs to go to HEAD_OF_QUEUE for the + * state list as well. Running with SAM Task Attribute emulation + * will always return head_of_queue == 0 here + */ + if (head_of_queue) + list_add(&task->t_state_list, (task_prev) ? + &task_prev->t_state_list : + &dev->state_task_list); + else + list_add_tail(&task->t_state_list, &dev->state_task_list); + + atomic_set(&task->task_state_active, 1); + + DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", + CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), + task, dev); +} + +static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) +{ + struct se_device *dev; + struct se_task *task; + unsigned long flags; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + dev = task->se_dev; + + if (atomic_read(&task->task_state_active)) + continue; + + spin_lock(&dev->execute_task_lock); + list_add_tail(&task->t_state_list, &dev->state_task_list); + atomic_set(&task->task_state_active, 1); + + DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", + CMD_TFO(task->task_se_cmd)->get_task_tag( + task->task_se_cmd), task, dev); + + spin_unlock(&dev->execute_task_lock); + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); +} + +static void transport_add_tasks_from_cmd(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_task *task, *task_prev = NULL; + unsigned long flags; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + if (atomic_read(&task->task_execute_queue)) + continue; + /* + * __transport_add_task_to_execute_queue() handles the + * SAM Task Attribute emulation if enabled + */ + __transport_add_task_to_execute_queue(task, task_prev, dev); + atomic_set(&task->task_execute_queue, 1); + task_prev = task; + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + return; +} + +/* transport_get_task_from_execute_queue(): + * + * Called with dev->execute_task_lock held. + */ +static struct se_task * +transport_get_task_from_execute_queue(struct se_device *dev) +{ + struct se_task *task; + + if (list_empty(&dev->execute_task_list)) + return NULL; + + list_for_each_entry(task, &dev->execute_task_list, t_execute_list) + break; + + list_del(&task->t_execute_list); + atomic_dec(&dev->execute_tasks); + + return task; +} + +/* transport_remove_task_from_execute_queue(): + * + * + */ +static void transport_remove_task_from_execute_queue( + struct se_task *task, + struct se_device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + list_del(&task->t_execute_list); + atomic_dec(&dev->execute_tasks); + spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) +{ + switch (cmd->data_direction) { + case DMA_NONE: + return "NONE"; + case DMA_FROM_DEVICE: + return "READ"; + case DMA_TO_DEVICE: + return "WRITE"; + case DMA_BIDIRECTIONAL: + return "BIDI"; + default: + break; + } + + return "UNKNOWN"; +} + +void transport_dump_dev_state( + struct se_device *dev, + char *b, + int *bl) +{ + *bl += sprintf(b + *bl, "Status: "); + switch (dev->dev_status) { + case TRANSPORT_DEVICE_ACTIVATED: + *bl += sprintf(b + *bl, "ACTIVATED"); + break; + case TRANSPORT_DEVICE_DEACTIVATED: + *bl += sprintf(b + *bl, "DEACTIVATED"); + break; + case TRANSPORT_DEVICE_SHUTDOWN: + *bl += sprintf(b + *bl, "SHUTDOWN"); + break; + case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: + case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: + *bl += sprintf(b + *bl, "OFFLINE"); + break; + default: + *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); + break; + } + + *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", + atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), + dev->queue_depth); + *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", + DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); + *bl += sprintf(b + *bl, " "); +} + +/* transport_release_all_cmds(): + * + * + */ +static void transport_release_all_cmds(struct se_device *dev) +{ + struct se_cmd *cmd = NULL; + struct se_queue_req *qr = NULL, *qr_p = NULL; + int bug_out = 0, t_state; + unsigned long flags; + + spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); + list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, + qr_list) { + + cmd = (struct se_cmd *)qr->cmd; + t_state = qr->state; + list_del(&qr->qr_list); + kfree(qr); + spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, + flags); + + printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," + " t_state: %u directly\n", + CMD_TFO(cmd)->get_task_tag(cmd), + CMD_TFO(cmd)->get_cmd_state(cmd), t_state); + + transport_release_fe_cmd(cmd); + bug_out = 1; + + spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); + } + spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); +#if 0 + if (bug_out) + BUG(); +#endif +} + +void transport_dump_vpd_proto_id( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int len; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + len = sprintf(buf, "T10 VPD Protocol Identifier: "); + + switch (vpd->protocol_identifier) { + case 0x00: + sprintf(buf+len, "Fibre Channel\n"); + break; + case 0x10: + sprintf(buf+len, "Parallel SCSI\n"); + break; + case 0x20: + sprintf(buf+len, "SSA\n"); + break; + case 0x30: + sprintf(buf+len, "IEEE 1394\n"); + break; + case 0x40: + sprintf(buf+len, "SCSI Remote Direct Memory Access" + " Protocol\n"); + break; + case 0x50: + sprintf(buf+len, "Internet SCSI (iSCSI)\n"); + break; + case 0x60: + sprintf(buf+len, "SAS Serial SCSI Protocol\n"); + break; + case 0x70: + sprintf(buf+len, "Automation/Drive Interface Transport" + " Protocol\n"); + break; + case 0x80: + sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); + break; + default: + sprintf(buf+len, "Unknown 0x%02x\n", + vpd->protocol_identifier); + break; + } + + if (p_buf) + strncpy(p_buf, buf, p_buf_len); + else + printk(KERN_INFO "%s", buf); +} + +void +transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) +{ + /* + * Check if the Protocol Identifier Valid (PIV) bit is set.. + * + * from spc3r23.pdf section 7.5.1 + */ + if (page_83[1] & 0x80) { + vpd->protocol_identifier = (page_83[0] & 0xf0); + vpd->protocol_identifier_set = 1; + transport_dump_vpd_proto_id(vpd, NULL, 0); + } +} +EXPORT_SYMBOL(transport_set_vpd_proto_id); + +int transport_dump_vpd_assoc( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int ret = 0, len; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + len = sprintf(buf, "T10 VPD Identifier Association: "); + + switch (vpd->association) { + case 0x00: + sprintf(buf+len, "addressed logical unit\n"); + break; + case 0x10: + sprintf(buf+len, "target port\n"); + break; + case 0x20: + sprintf(buf+len, "SCSI target device\n"); + break; + default: + sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); + ret = -1; + break; + } + + if (p_buf) + strncpy(p_buf, buf, p_buf_len); + else + printk("%s", buf); + + return ret; +} + +int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) +{ + /* + * The VPD identification association.. + * + * from spc3r23.pdf Section 7.6.3.1 Table 297 + */ + vpd->association = (page_83[1] & 0x30); + return transport_dump_vpd_assoc(vpd, NULL, 0); +} +EXPORT_SYMBOL(transport_set_vpd_assoc); + +int transport_dump_vpd_ident_type( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int ret = 0, len; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + len = sprintf(buf, "T10 VPD Identifier Type: "); + + switch (vpd->device_identifier_type) { + case 0x00: + sprintf(buf+len, "Vendor specific\n"); + break; + case 0x01: + sprintf(buf+len, "T10 Vendor ID based\n"); + break; + case 0x02: + sprintf(buf+len, "EUI-64 based\n"); + break; + case 0x03: + sprintf(buf+len, "NAA\n"); + break; + case 0x04: + sprintf(buf+len, "Relative target port identifier\n"); + break; + case 0x08: + sprintf(buf+len, "SCSI name string\n"); + break; + default: + sprintf(buf+len, "Unsupported: 0x%02x\n", + vpd->device_identifier_type); + ret = -1; + break; + } + + if (p_buf) + strncpy(p_buf, buf, p_buf_len); + else + printk("%s", buf); + + return ret; +} + +int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) +{ + /* + * The VPD identifier type.. + * + * from spc3r23.pdf Section 7.6.3.1 Table 298 + */ + vpd->device_identifier_type = (page_83[1] & 0x0f); + return transport_dump_vpd_ident_type(vpd, NULL, 0); +} +EXPORT_SYMBOL(transport_set_vpd_ident_type); + +int transport_dump_vpd_ident( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int ret = 0; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + + switch (vpd->device_identifier_code_set) { + case 0x01: /* Binary */ + sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", + &vpd->device_identifier[0]); + break; + case 0x02: /* ASCII */ + sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", + &vpd->device_identifier[0]); + break; + case 0x03: /* UTF-8 */ + sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", + &vpd->device_identifier[0]); + break; + default: + sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" + " 0x%02x", vpd->device_identifier_code_set); + ret = -1; + break; + } + + if (p_buf) + strncpy(p_buf, buf, p_buf_len); + else + printk("%s", buf); + + return ret; +} + +int +transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) +{ + static const char hex_str[] = "0123456789abcdef"; + int j = 0, i = 4; /* offset to start of the identifer */ + + /* + * The VPD Code Set (encoding) + * + * from spc3r23.pdf Section 7.6.3.1 Table 296 + */ + vpd->device_identifier_code_set = (page_83[0] & 0x0f); + switch (vpd->device_identifier_code_set) { + case 0x01: /* Binary */ + vpd->device_identifier[j++] = + hex_str[vpd->device_identifier_type]; + while (i < (4 + page_83[3])) { + vpd->device_identifier[j++] = + hex_str[(page_83[i] & 0xf0) >> 4]; + vpd->device_identifier[j++] = + hex_str[page_83[i] & 0x0f]; + i++; + } + break; + case 0x02: /* ASCII */ + case 0x03: /* UTF-8 */ + while (i < (4 + page_83[3])) + vpd->device_identifier[j++] = page_83[i++]; + break; + default: + break; + } + + return transport_dump_vpd_ident(vpd, NULL, 0); +} +EXPORT_SYMBOL(transport_set_vpd_ident); + +static void core_setup_task_attr_emulation(struct se_device *dev) +{ + /* + * If this device is from Target_Core_Mod/pSCSI, disable the + * SAM Task Attribute emulation. + * + * This is currently not available in upsream Linux/SCSI Target + * mode code, and is assumed to be disabled while using TCM/pSCSI. + */ + if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; + return; + } + + dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; + DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" + " device\n", TRANSPORT(dev)->name, + TRANSPORT(dev)->get_device_rev(dev)); +} + +static void scsi_dump_inquiry(struct se_device *dev) +{ + struct t10_wwn *wwn = DEV_T10_WWN(dev); + int i, device_type; + /* + * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer + */ + printk(" Vendor: "); + for (i = 0; i < 8; i++) + if (wwn->vendor[i] >= 0x20) + printk("%c", wwn->vendor[i]); + else + printk(" "); + + printk(" Model: "); + for (i = 0; i < 16; i++) + if (wwn->model[i] >= 0x20) + printk("%c", wwn->model[i]); + else + printk(" "); + + printk(" Revision: "); + for (i = 0; i < 4; i++) + if (wwn->revision[i] >= 0x20) + printk("%c", wwn->revision[i]); + else + printk(" "); + + printk("\n"); + + device_type = TRANSPORT(dev)->get_device_type(dev); + printk(" Type: %s ", scsi_device_type(device_type)); + printk(" ANSI SCSI revision: %02x\n", + TRANSPORT(dev)->get_device_rev(dev)); +} + +struct se_device *transport_add_device_to_core_hba( + struct se_hba *hba, + struct se_subsystem_api *transport, + struct se_subsystem_dev *se_dev, + u32 device_flags, + void *transport_dev, + struct se_dev_limits *dev_limits, + const char *inquiry_prod, + const char *inquiry_rev) +{ + int ret = 0, force_pt; + struct se_device *dev; + + dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); + if (!(dev)) { + printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); + return NULL; + } + dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); + if (!(dev->dev_queue_obj)) { + printk(KERN_ERR "Unable to allocate memory for" + " dev->dev_queue_obj\n"); + kfree(dev); + return NULL; + } + transport_init_queue_obj(dev->dev_queue_obj); + + dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), + GFP_KERNEL); + if (!(dev->dev_status_queue_obj)) { + printk(KERN_ERR "Unable to allocate memory for" + " dev->dev_status_queue_obj\n"); + kfree(dev->dev_queue_obj); + kfree(dev); + return NULL; + } + transport_init_queue_obj(dev->dev_status_queue_obj); + + dev->dev_flags = device_flags; + dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; + dev->dev_ptr = (void *) transport_dev; + dev->se_hba = hba; + dev->se_sub_dev = se_dev; + dev->transport = transport; + atomic_set(&dev->active_cmds, 0); + INIT_LIST_HEAD(&dev->dev_list); + INIT_LIST_HEAD(&dev->dev_sep_list); + INIT_LIST_HEAD(&dev->dev_tmr_list); + INIT_LIST_HEAD(&dev->execute_task_list); + INIT_LIST_HEAD(&dev->delayed_cmd_list); + INIT_LIST_HEAD(&dev->ordered_cmd_list); + INIT_LIST_HEAD(&dev->state_task_list); + spin_lock_init(&dev->execute_task_lock); + spin_lock_init(&dev->delayed_cmd_lock); + spin_lock_init(&dev->ordered_cmd_lock); + spin_lock_init(&dev->state_task_lock); + spin_lock_init(&dev->dev_alua_lock); + spin_lock_init(&dev->dev_reservation_lock); + spin_lock_init(&dev->dev_status_lock); + spin_lock_init(&dev->dev_status_thr_lock); + spin_lock_init(&dev->se_port_lock); + spin_lock_init(&dev->se_tmr_lock); + + dev->queue_depth = dev_limits->queue_depth; + atomic_set(&dev->depth_left, dev->queue_depth); + atomic_set(&dev->dev_ordered_id, 0); + + se_dev_set_default_attribs(dev, dev_limits); + + dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); + dev->creation_time = get_jiffies_64(); + spin_lock_init(&dev->stats_lock); + + spin_lock(&hba->device_lock); + list_add_tail(&dev->dev_list, &hba->hba_dev_list); + hba->dev_count++; + spin_unlock(&hba->device_lock); + /* + * Setup the SAM Task Attribute emulation for struct se_device + */ + core_setup_task_attr_emulation(dev); + /* + * Force PR and ALUA passthrough emulation with internal object use. + */ + force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); + /* + * Setup the Reservations infrastructure for struct se_device + */ + core_setup_reservations(dev, force_pt); + /* + * Setup the Asymmetric Logical Unit Assignment for struct se_device + */ + if (core_setup_alua(dev, force_pt) < 0) + goto out; + + /* + * Startup the struct se_device processing thread + */ + dev->process_thread = kthread_run(transport_processing_thread, dev, + "LIO_%s", TRANSPORT(dev)->name); + if (IS_ERR(dev->process_thread)) { + printk(KERN_ERR "Unable to create kthread: LIO_%s\n", + TRANSPORT(dev)->name); + goto out; + } + + /* + * Preload the initial INQUIRY const values if we are doing + * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI + * passthrough because this is being provided by the backend LLD. + * This is required so that transport_get_inquiry() copies these + * originals once back into DEV_T10_WWN(dev) for the virtual device + * setup. + */ + if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + if (!(inquiry_prod) || !(inquiry_prod)) { + printk(KERN_ERR "All non TCM/pSCSI plugins require" + " INQUIRY consts\n"); + goto out; + } + + strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); + strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); + strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); + } + scsi_dump_inquiry(dev); + +out: + if (!ret) + return dev; + kthread_stop(dev->process_thread); + + spin_lock(&hba->device_lock); + list_del(&dev->dev_list); + hba->dev_count--; + spin_unlock(&hba->device_lock); + + se_release_vpd_for_dev(dev); + + kfree(dev->dev_status_queue_obj); + kfree(dev->dev_queue_obj); + kfree(dev); + + return NULL; +} +EXPORT_SYMBOL(transport_add_device_to_core_hba); + +/* transport_generic_prepare_cdb(): + * + * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will + * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. + * The point of this is since we are mapping iSCSI LUNs to + * SCSI Target IDs having a non-zero LUN in the CDB will throw the + * devices and HBAs for a loop. + */ +static inline void transport_generic_prepare_cdb( + unsigned char *cdb) +{ + switch (cdb[0]) { + case READ_10: /* SBC - RDProtect */ + case READ_12: /* SBC - RDProtect */ + case READ_16: /* SBC - RDProtect */ + case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ + case VERIFY: /* SBC - VRProtect */ + case VERIFY_16: /* SBC - VRProtect */ + case WRITE_VERIFY: /* SBC - VRProtect */ + case WRITE_VERIFY_12: /* SBC - VRProtect */ + break; + default: + cdb[1] &= 0x1f; /* clear logical unit number */ + break; + } +} + +static struct se_task * +transport_generic_get_task(struct se_cmd *cmd, + enum dma_data_direction data_direction) +{ + struct se_task *task; + struct se_device *dev = SE_DEV(cmd); + unsigned long flags; + + task = dev->transport->alloc_task(cmd); + if (!task) { + printk(KERN_ERR "Unable to allocate struct se_task\n"); + return NULL; + } + + INIT_LIST_HEAD(&task->t_list); + INIT_LIST_HEAD(&task->t_execute_list); + INIT_LIST_HEAD(&task->t_state_list); + init_completion(&task->task_stop_comp); + task->task_no = T_TASK(cmd)->t_tasks_no++; + task->task_se_cmd = cmd; + task->se_dev = dev; + task->task_data_direction = data_direction; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + return task; +} + +static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); + +void transport_device_setup_cmd(struct se_cmd *cmd) +{ + cmd->se_dev = SE_LUN(cmd)->lun_se_dev; +} +EXPORT_SYMBOL(transport_device_setup_cmd); + +/* + * Used by fabric modules containing a local struct se_cmd within their + * fabric dependent per I/O descriptor. + */ +void transport_init_se_cmd( + struct se_cmd *cmd, + struct target_core_fabric_ops *tfo, + struct se_session *se_sess, + u32 data_length, + int data_direction, + int task_attr, + unsigned char *sense_buffer) +{ + INIT_LIST_HEAD(&cmd->se_lun_list); + INIT_LIST_HEAD(&cmd->se_delayed_list); + INIT_LIST_HEAD(&cmd->se_ordered_list); + /* + * Setup t_task pointer to t_task_backstore + */ + cmd->t_task = &cmd->t_task_backstore; + + INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); + init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); + init_completion(&T_TASK(cmd)->transport_lun_stop_comp); + init_completion(&T_TASK(cmd)->t_transport_stop_comp); + spin_lock_init(&T_TASK(cmd)->t_state_lock); + atomic_set(&T_TASK(cmd)->transport_dev_active, 1); + + cmd->se_tfo = tfo; + cmd->se_sess = se_sess; + cmd->data_length = data_length; + cmd->data_direction = data_direction; + cmd->sam_task_attr = task_attr; + cmd->sense_buffer = sense_buffer; +} +EXPORT_SYMBOL(transport_init_se_cmd); + +static int transport_check_alloc_task_attr(struct se_cmd *cmd) +{ + /* + * Check if SAM Task Attribute emulation is enabled for this + * struct se_device storage object + */ + if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + return 0; + + if (cmd->sam_task_attr == TASK_ATTR_ACA) { + DEBUG_STA("SAM Task Attribute ACA" + " emulation is not supported\n"); + return -1; + } + /* + * Used to determine when ORDERED commands should go from + * Dormant to Active status. + */ + cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); + smp_mb__after_atomic_inc(); + DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", + cmd->se_ordered_id, cmd->sam_task_attr, + TRANSPORT(cmd->se_dev)->name); + return 0; +} + +void transport_free_se_cmd( + struct se_cmd *se_cmd) +{ + if (se_cmd->se_tmr_req) + core_tmr_release_req(se_cmd->se_tmr_req); + /* + * Check and free any extended CDB buffer that was allocated + */ + if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) + kfree(T_TASK(se_cmd)->t_task_cdb); +} +EXPORT_SYMBOL(transport_free_se_cmd); + +static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); + +/* transport_generic_allocate_tasks(): + * + * Called from fabric RX Thread. + */ +int transport_generic_allocate_tasks( + struct se_cmd *cmd, + unsigned char *cdb) +{ + int ret; + + transport_generic_prepare_cdb(cdb); + + /* + * This is needed for early exceptions. + */ + cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; + + transport_device_setup_cmd(cmd); + /* + * Ensure that the received CDB is less than the max (252 + 8) bytes + * for VARIABLE_LENGTH_CMD + */ + if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { + printk(KERN_ERR "Received SCSI CDB with command_size: %d that" + " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", + scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); + return -1; + } + /* + * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, + * allocate the additional extended CDB buffer now.. Otherwise + * setup the pointer from __t_task_cdb to t_task_cdb. + */ + if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { + T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), + GFP_KERNEL); + if (!(T_TASK(cmd)->t_task_cdb)) { + printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" + " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", + scsi_command_size(cdb), + (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); + return -1; + } + } else + T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; + /* + * Copy the original CDB into T_TASK(cmd). + */ + memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); + /* + * Setup the received CDB based on SCSI defined opcodes and + * perform unit attention, persistent reservations and ALUA + * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb + * pointer is expected to be setup before we reach this point. + */ + ret = transport_generic_cmd_sequencer(cmd, cdb); + if (ret < 0) + return ret; + /* + * Check for SAM Task Attribute Emulation + */ + if (transport_check_alloc_task_attr(cmd) < 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -2; + } + spin_lock(&cmd->se_lun->lun_sep_lock); + if (cmd->se_lun->lun_sep) + cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; + spin_unlock(&cmd->se_lun->lun_sep_lock); + return 0; +} +EXPORT_SYMBOL(transport_generic_allocate_tasks); + +/* + * Used by fabric module frontends not defining a TFO->new_cmd_map() + * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis + */ +int transport_generic_handle_cdb( + struct se_cmd *cmd) +{ + if (!SE_LUN(cmd)) { + dump_stack(); + printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); + return -1; + } + + transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_cdb); + +/* + * Used by fabric module frontends defining a TFO->new_cmd_map() caller + * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to + * complete setup in TCM process context w/ TFO->new_cmd_map(). + */ +int transport_generic_handle_cdb_map( + struct se_cmd *cmd) +{ + if (!SE_LUN(cmd)) { + dump_stack(); + printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); + return -1; + } + + transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_cdb_map); + +/* transport_generic_handle_data(): + * + * + */ +int transport_generic_handle_data( + struct se_cmd *cmd) +{ + /* + * For the software fabric case, then we assume the nexus is being + * failed/shutdown when signals are pending from the kthread context + * caller, so we return a failure. For the HW target mode case running + * in interrupt code, the signal_pending() check is skipped. + */ + if (!in_interrupt() && signal_pending(current)) + return -1; + /* + * If the received CDB has aleady been ABORTED by the generic + * target engine, we now call transport_check_aborted_status() + * to queue any delated TASK_ABORTED status for the received CDB to the + * fabric module as we are expecting no futher incoming DATA OUT + * sequences at this point. + */ + if (transport_check_aborted_status(cmd, 1) != 0) + return 0; + + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_data); + +/* transport_generic_handle_tmr(): + * + * + */ +int transport_generic_handle_tmr( + struct se_cmd *cmd) +{ + /* + * This is needed for early exceptions. + */ + cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; + transport_device_setup_cmd(cmd); + + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_tmr); + +static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) +{ + struct se_task *task, *task_tmp; + unsigned long flags; + int ret = 0; + + DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", + CMD_TFO(cmd)->get_task_tag(cmd)); + + /* + * No tasks remain in the execution queue + */ + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + list_for_each_entry_safe(task, task_tmp, + &T_TASK(cmd)->t_task_list, t_list) { + DEBUG_TS("task_no[%d] - Processing task %p\n", + task->task_no, task); + /* + * If the struct se_task has not been sent and is not active, + * remove the struct se_task from the execution queue. + */ + if (!atomic_read(&task->task_sent) && + !atomic_read(&task->task_active)) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + flags); + transport_remove_task_from_execute_queue(task, + task->se_dev); + + DEBUG_TS("task_no[%d] - Removed from execute queue\n", + task->task_no); + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + continue; + } + + /* + * If the struct se_task is active, sleep until it is returned + * from the plugin. + */ + if (atomic_read(&task->task_active)) { + atomic_set(&task->task_stop, 1); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + flags); + + DEBUG_TS("task_no[%d] - Waiting to complete\n", + task->task_no); + wait_for_completion(&task->task_stop_comp); + DEBUG_TS("task_no[%d] - Stopped successfully\n", + task->task_no); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + + atomic_set(&task->task_active, 0); + atomic_set(&task->task_stop, 0); + } else { + DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); + ret++; + } + + __transport_stop_task_timer(task, &flags); + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + return ret; +} + +static void transport_failure_reset_queue_depth(struct se_device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);; + atomic_inc(&dev->depth_left); + atomic_inc(&SE_HBA(dev)->left_queue_depth); + spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); +} + +/* + * Handle SAM-esque emulation for generic transport request failures. + */ +static void transport_generic_request_failure( + struct se_cmd *cmd, + struct se_device *dev, + int complete, + int sc) +{ + DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" + " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), + T_TASK(cmd)->t_task_cdb[0]); + DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" + " %d/%d transport_error_status: %d\n", + CMD_TFO(cmd)->get_cmd_state(cmd), + cmd->t_state, cmd->deferred_t_state, + cmd->transport_error_status); + DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" + " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" + " t_transport_active: %d t_transport_stop: %d" + " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, + atomic_read(&T_TASK(cmd)->t_task_cdbs_left), + atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), + atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), + atomic_read(&T_TASK(cmd)->t_transport_active), + atomic_read(&T_TASK(cmd)->t_transport_stop), + atomic_read(&T_TASK(cmd)->t_transport_sent)); + + transport_stop_all_task_timers(cmd); + + if (dev) + transport_failure_reset_queue_depth(dev); + /* + * For SAM Task Attribute emulation for failed struct se_cmd + */ + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + transport_complete_task_attr(cmd); + + if (complete) { + transport_direct_request_timeout(cmd); + cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; + } + + switch (cmd->transport_error_status) { + case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + break; + case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: + cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; + break; + case PYX_TRANSPORT_INVALID_CDB_FIELD: + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + break; + case PYX_TRANSPORT_INVALID_PARAMETER_LIST: + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + break; + case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: + if (!sc) + transport_new_cmd_failure(cmd); + /* + * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, + * we force this session to fall back to session + * recovery. + */ + CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); + CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); + + goto check_stop; + case PYX_TRANSPORT_LU_COMM_FAILURE: + case PYX_TRANSPORT_ILLEGAL_REQUEST: + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + break; + case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: + cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; + break; + case PYX_TRANSPORT_WRITE_PROTECTED: + cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + break; + case PYX_TRANSPORT_RESERVATION_CONFLICT: + /* + * No SENSE Data payload for this case, set SCSI Status + * and queue the response to $FABRIC_MOD. + * + * Uses linux/include/scsi/scsi.h SAM status codes defs + */ + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; + /* + * For UA Interlock Code 11b, a RESERVATION CONFLICT will + * establish a UNIT ATTENTION with PREVIOUS RESERVATION + * CONFLICT STATUS. + * + * See spc4r17, section 7.4.6 Control Mode Page, Table 349 + */ + if (SE_SESS(cmd) && + DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + cmd->orig_fe_lun, 0x2C, + ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); + + CMD_TFO(cmd)->queue_status(cmd); + goto check_stop; + case PYX_TRANSPORT_USE_SENSE_REASON: + /* + * struct se_cmd->scsi_sense_reason already set + */ + break; + default: + printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", + T_TASK(cmd)->t_task_cdb[0], + cmd->transport_error_status); + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + break; + } + + if (!sc) + transport_new_cmd_failure(cmd); + else + transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); +check_stop: + transport_lun_remove_cmd(cmd); + if (!(transport_cmd_check_stop_to_fabric(cmd))) + ; +} + +static void transport_direct_request_timeout(struct se_cmd *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return; + } + if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return; + } + + atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), + &T_TASK(cmd)->t_se_count); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); +} + +static void transport_generic_request_timeout(struct se_cmd *cmd) +{ + unsigned long flags; + + /* + * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() + * to allow last call to free memory resources. + */ + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { + int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); + + atomic_sub(tmp, &T_TASK(cmd)->t_se_count); + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_generic_remove(cmd, 0, 0); +} + +static int +transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) +{ + unsigned char *buf; + + buf = kzalloc(data_length, GFP_KERNEL); + if (!(buf)) { + printk(KERN_ERR "Unable to allocate memory for buffer\n"); + return -1; + } + + T_TASK(cmd)->t_tasks_se_num = 0; + T_TASK(cmd)->t_task_buf = buf; + + return 0; +} + +static inline u32 transport_lba_21(unsigned char *cdb) +{ + return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; +} + +static inline u32 transport_lba_32(unsigned char *cdb) +{ + return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; +} + +static inline unsigned long long transport_lba_64(unsigned char *cdb) +{ + unsigned int __v1, __v2; + + __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; + __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; +} + +/* + * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs + */ +static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) +{ + unsigned int __v1, __v2; + + __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; + __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; + + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; +} + +static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; + spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); +} + +/* + * Called from interrupt context. + */ +static void transport_task_timeout_handler(unsigned long data) +{ + struct se_task *task = (struct se_task *)data; + struct se_cmd *cmd = TASK_CMD(task); + unsigned long flags; + + DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (task->task_flags & TF_STOP) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return; + } + task->task_flags &= ~TF_RUNNING; + + /* + * Determine if transport_complete_task() has already been called. + */ + if (!(atomic_read(&task->task_active))) { + DEBUG_TT("transport task: %p cmd: %p timeout task_active" + " == 0\n", task, cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return; + } + + atomic_inc(&T_TASK(cmd)->t_se_count); + atomic_inc(&T_TASK(cmd)->t_transport_timeout); + T_TASK(cmd)->t_tasks_failed = 1; + + atomic_set(&task->task_timeout, 1); + task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; + task->task_scsi_status = 1; + + if (atomic_read(&task->task_stop)) { + DEBUG_TT("transport task: %p cmd: %p timeout task_stop" + " == 1\n", task, cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + complete(&task->task_stop_comp); + return; + } + + if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { + DEBUG_TT("transport task: %p cmd: %p timeout non zero" + " t_task_cdbs_left\n", task, cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return; + } + DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", + task, cmd); + + cmd->t_state = TRANSPORT_COMPLETE_FAILURE; + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); +} + +/* + * Called with T_TASK(cmd)->t_state_lock held. + */ +static void transport_start_task_timer(struct se_task *task) +{ + struct se_device *dev = task->se_dev; + int timeout; + + if (task->task_flags & TF_RUNNING) + return; + /* + * If the task_timeout is disabled, exit now. + */ + timeout = DEV_ATTRIB(dev)->task_timeout; + if (!(timeout)) + return; + + init_timer(&task->task_timer); + task->task_timer.expires = (get_jiffies_64() + timeout * HZ); + task->task_timer.data = (unsigned long) task; + task->task_timer.function = transport_task_timeout_handler; + + task->task_flags |= TF_RUNNING; + add_timer(&task->task_timer); +#if 0 + printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" + " %d\n", task->task_se_cmd, task, timeout); +#endif +} + +/* + * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. + */ +void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) +{ + struct se_cmd *cmd = TASK_CMD(task); + + if (!(task->task_flags & TF_RUNNING)) + return; + + task->task_flags |= TF_STOP; + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); + + del_timer_sync(&task->task_timer); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); + task->task_flags &= ~TF_RUNNING; + task->task_flags &= ~TF_STOP; +} + +static void transport_stop_all_task_timers(struct se_cmd *cmd) +{ + struct se_task *task = NULL, *task_tmp; + unsigned long flags; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + list_for_each_entry_safe(task, task_tmp, + &T_TASK(cmd)->t_task_list, t_list) + __transport_stop_task_timer(task, &flags); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); +} + +static inline int transport_tcq_window_closed(struct se_device *dev) +{ + if (dev->dev_tcq_window_closed++ < + PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { + msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); + } else + msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); + + wake_up_interruptible(&dev->dev_queue_obj->thread_wq); + return 0; +} + +/* + * Called from Fabric Module context from transport_execute_tasks() + * + * The return of this function determins if the tasks from struct se_cmd + * get added to the execution queue in transport_execute_tasks(), + * or are added to the delayed or ordered lists here. + */ +static inline int transport_execute_task_attr(struct se_cmd *cmd) +{ + if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + return 1; + /* + * Check for the existance of HEAD_OF_QUEUE, and if true return 1 + * to allow the passed struct se_cmd list of tasks to the front of the list. + */ + if (cmd->sam_task_attr == TASK_ATTR_HOQ) { + atomic_inc(&SE_DEV(cmd)->dev_hoq_count); + smp_mb__after_atomic_inc(); + DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" + " 0x%02x, se_ordered_id: %u\n", + T_TASK(cmd)->t_task_cdb[0], + cmd->se_ordered_id); + return 1; + } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { + spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); + list_add_tail(&cmd->se_ordered_list, + &SE_DEV(cmd)->ordered_cmd_list); + spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); + + atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); + smp_mb__after_atomic_inc(); + + DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" + " list, se_ordered_id: %u\n", + T_TASK(cmd)->t_task_cdb[0], + cmd->se_ordered_id); + /* + * Add ORDERED command to tail of execution queue if + * no other older commands exist that need to be + * completed first. + */ + if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) + return 1; + } else { + /* + * For SIMPLE and UNTAGGED Task Attribute commands + */ + atomic_inc(&SE_DEV(cmd)->simple_cmds); + smp_mb__after_atomic_inc(); + } + /* + * Otherwise if one or more outstanding ORDERED task attribute exist, + * add the dormant task(s) built for the passed struct se_cmd to the + * execution queue and become in Active state for this struct se_device. + */ + if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { + /* + * Otherwise, add cmd w/ tasks to delayed cmd queue that + * will be drained upon competion of HEAD_OF_QUEUE task. + */ + spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); + cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; + list_add_tail(&cmd->se_delayed_list, + &SE_DEV(cmd)->delayed_cmd_list); + spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); + + DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" + " delayed CMD list, se_ordered_id: %u\n", + T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, + cmd->se_ordered_id); + /* + * Return zero to let transport_execute_tasks() know + * not to add the delayed tasks to the execution list. + */ + return 0; + } + /* + * Otherwise, no ORDERED task attributes exist.. + */ + return 1; +} + +/* + * Called from fabric module context in transport_generic_new_cmd() and + * transport_generic_process_write() + */ +static int transport_execute_tasks(struct se_cmd *cmd) +{ + int add_tasks; + + if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { + if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { + cmd->transport_error_status = + PYX_TRANSPORT_LU_COMM_FAILURE; + transport_generic_request_failure(cmd, NULL, 0, 1); + return 0; + } + } + /* + * Call transport_cmd_check_stop() to see if a fabric exception + * has occured that prevents execution. + */ + if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { + /* + * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE + * attribute for the tasks of the received struct se_cmd CDB + */ + add_tasks = transport_execute_task_attr(cmd); + if (add_tasks == 0) + goto execute_tasks; + /* + * This calls transport_add_tasks_from_cmd() to handle + * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation + * (if enabled) in __transport_add_task_to_execute_queue() and + * transport_add_task_check_sam_attr(). + */ + transport_add_tasks_from_cmd(cmd); + } + /* + * Kick the execution queue for the cmd associated struct se_device + * storage object. + */ +execute_tasks: + __transport_execute_tasks(SE_DEV(cmd)); + return 0; +} + +/* + * Called to check struct se_device tcq depth window, and once open pull struct se_task + * from struct se_device->execute_task_list and + * + * Called from transport_processing_thread() + */ +static int __transport_execute_tasks(struct se_device *dev) +{ + int error; + struct se_cmd *cmd = NULL; + struct se_task *task; + unsigned long flags; + + /* + * Check if there is enough room in the device and HBA queue to send + * struct se_transport_task's to the selected transport. + */ +check_depth: + spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); + if (!(atomic_read(&dev->depth_left)) || + !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { + spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + return transport_tcq_window_closed(dev); + } + dev->dev_tcq_window_closed = 0; + + spin_lock(&dev->execute_task_lock); + task = transport_get_task_from_execute_queue(dev); + spin_unlock(&dev->execute_task_lock); + + if (!task) { + spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + return 0; + } + + atomic_dec(&dev->depth_left); + atomic_dec(&SE_HBA(dev)->left_queue_depth); + spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + + cmd = TASK_CMD(task); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&task->task_active, 1); + atomic_set(&task->task_sent, 1); + atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); + + if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == + T_TASK(cmd)->t_task_cdbs) + atomic_set(&cmd->transport_sent, 1); + + transport_start_task_timer(task); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + /* + * The struct se_cmd->transport_emulate_cdb() function pointer is used + * to grab REPORT_LUNS CDBs before they hit the + * struct se_subsystem_api->do_task() caller below. + */ + if (cmd->transport_emulate_cdb) { + error = cmd->transport_emulate_cdb(cmd); + if (error != 0) { + cmd->transport_error_status = error; + atomic_set(&task->task_active, 0); + atomic_set(&cmd->transport_sent, 0); + transport_stop_tasks_for_cmd(cmd); + transport_generic_request_failure(cmd, dev, 0, 1); + goto check_depth; + } + /* + * Handle the successful completion for transport_emulate_cdb() + * for synchronous operation, following SCF_EMULATE_CDB_ASYNC + * Otherwise the caller is expected to complete the task with + * proper status. + */ + if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { + cmd->scsi_status = SAM_STAT_GOOD; + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } + } else { + /* + * Currently for all virtual TCM plugins including IBLOCK, FILEIO and + * RAMDISK we use the internal transport_emulate_control_cdb() logic + * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK + * LUN emulation code. + * + * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we + * call ->do_task() directly and let the underlying TCM subsystem plugin + * code handle the CDB emulation. + */ + if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && + (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) + error = transport_emulate_control_cdb(task); + else + error = TRANSPORT(dev)->do_task(task); + + if (error != 0) { + cmd->transport_error_status = error; + atomic_set(&task->task_active, 0); + atomic_set(&cmd->transport_sent, 0); + transport_stop_tasks_for_cmd(cmd); + transport_generic_request_failure(cmd, dev, 0, 1); + } + } + + goto check_depth; + + return 0; +} + +void transport_new_cmd_failure(struct se_cmd *se_cmd) +{ + unsigned long flags; + /* + * Any unsolicited data will get dumped for failed command inside of + * the fabric plugin + */ + spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); + + CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); +} + +static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); + +static inline u32 transport_get_sectors_6( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 8-bit sector value. + */ + if (!dev) + goto type_disk; + + /* + * Use 24-bit allocation length for TYPE_TAPE. + */ + if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; + + /* + * Everything else assume TYPE_DISK Sector CDB location. + * Use 8-bit sector value. + */ +type_disk: + return (u32)cdb[4]; +} + +static inline u32 transport_get_sectors_10( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 16-bit sector value. + */ + if (!dev) + goto type_disk; + + /* + * XXX_10 is not defined in SSC, throw an exception + */ + if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { + *ret = -1; + return 0; + } + + /* + * Everything else assume TYPE_DISK Sector CDB location. + * Use 16-bit sector value. + */ +type_disk: + return (u32)(cdb[7] << 8) + cdb[8]; +} + +static inline u32 transport_get_sectors_12( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 32-bit sector value. + */ + if (!dev) + goto type_disk; + + /* + * XXX_12 is not defined in SSC, throw an exception + */ + if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { + *ret = -1; + return 0; + } + + /* + * Everything else assume TYPE_DISK Sector CDB location. + * Use 32-bit sector value. + */ +type_disk: + return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; +} + +static inline u32 transport_get_sectors_16( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 32-bit sector value. + */ + if (!dev) + goto type_disk; + + /* + * Use 24-bit allocation length for TYPE_TAPE. + */ + if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; + +type_disk: + return (u32)(cdb[10] << 24) + (cdb[11] << 16) + + (cdb[12] << 8) + cdb[13]; +} + +/* + * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants + */ +static inline u32 transport_get_sectors_32( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 32-bit sector value. + */ + return (u32)(cdb[28] << 24) + (cdb[29] << 16) + + (cdb[30] << 8) + cdb[31]; + +} + +static inline u32 transport_get_size( + u32 sectors, + unsigned char *cdb, + struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + + if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { + if (cdb[1] & 1) { /* sectors */ + return DEV_ATTRIB(dev)->block_size * sectors; + } else /* bytes */ + return sectors; + } +#if 0 + printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" + " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, + DEV_ATTRIB(dev)->block_size * sectors, + TRANSPORT(dev)->name); +#endif + return DEV_ATTRIB(dev)->block_size * sectors; +} + +unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) +{ + unsigned char result = 0; + /* + * MSB + */ + if ((val[0] >= 'a') && (val[0] <= 'f')) + result = ((val[0] - 'a' + 10) & 0xf) << 4; + else + if ((val[0] >= 'A') && (val[0] <= 'F')) + result = ((val[0] - 'A' + 10) & 0xf) << 4; + else /* digit */ + result = ((val[0] - '0') & 0xf) << 4; + /* + * LSB + */ + if ((val[1] >= 'a') && (val[1] <= 'f')) + result |= ((val[1] - 'a' + 10) & 0xf); + else + if ((val[1] >= 'A') && (val[1] <= 'F')) + result |= ((val[1] - 'A' + 10) & 0xf); + else /* digit */ + result |= ((val[1] - '0') & 0xf); + + return result; +} +EXPORT_SYMBOL(transport_asciihex_to_binaryhex); + +static void transport_xor_callback(struct se_cmd *cmd) +{ + unsigned char *buf, *addr; + struct se_mem *se_mem; + unsigned int offset; + int i; + /* + * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command + * + * 1) read the specified logical block(s); + * 2) transfer logical blocks from the data-out buffer; + * 3) XOR the logical blocks transferred from the data-out buffer with + * the logical blocks read, storing the resulting XOR data in a buffer; + * 4) if the DISABLE WRITE bit is set to zero, then write the logical + * blocks transferred from the data-out buffer; and + * 5) transfer the resulting XOR data to the data-in buffer. + */ + buf = kmalloc(cmd->data_length, GFP_KERNEL); + if (!(buf)) { + printk(KERN_ERR "Unable to allocate xor_callback buf\n"); + return; + } + /* + * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list + * into the locally allocated *buf + */ + transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); + /* + * Now perform the XOR against the BIDI read memory located at + * T_TASK(cmd)->t_mem_bidi_list + */ + + offset = 0; + list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { + addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); + if (!(addr)) + goto out; + + for (i = 0; i < se_mem->se_len; i++) + *(addr + se_mem->se_off + i) ^= *(buf + offset + i); + + offset += se_mem->se_len; + kunmap_atomic(addr, KM_USER0); + } +out: + kfree(buf); +} + +/* + * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd + */ +static int transport_get_sense_data(struct se_cmd *cmd) +{ + unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; + struct se_device *dev; + struct se_task *task = NULL, *task_tmp; + unsigned long flags; + u32 offset = 0; + + if (!SE_LUN(cmd)) { + printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); + return -1; + } + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return 0; + } + + list_for_each_entry_safe(task, task_tmp, + &T_TASK(cmd)->t_task_list, t_list) { + + if (!task->task_sense) + continue; + + dev = task->se_dev; + if (!(dev)) + continue; + + if (!TRANSPORT(dev)->get_sense_buffer) { + printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" + " is NULL\n"); + continue; + } + + sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); + if (!(sense_buffer)) { + printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" + " sense buffer for task with sense\n", + CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); + continue; + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + TRANSPORT_SENSE_BUFFER); + + memcpy((void *)&buffer[offset], (void *)sense_buffer, + TRANSPORT_SENSE_BUFFER); + cmd->scsi_status = task->task_scsi_status; + /* Automatically padded */ + cmd->scsi_sense_length = + (TRANSPORT_SENSE_BUFFER + offset); + + printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" + " and sense\n", + dev->se_hba->hba_id, TRANSPORT(dev)->name, + cmd->scsi_status); + return 0; + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + return -1; +} + +static int transport_allocate_resources(struct se_cmd *cmd) +{ + u32 length = cmd->data_length; + + if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || + (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) + return transport_generic_get_mem(cmd, length, PAGE_SIZE); + else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) + return transport_generic_allocate_buf(cmd, length); + else + return 0; +} + +static int +transport_handle_reservation_conflict(struct se_cmd *cmd) +{ + cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; + /* + * For UA Interlock Code 11b, a RESERVATION CONFLICT will + * establish a UNIT ATTENTION with PREVIOUS RESERVATION + * CONFLICT STATUS. + * + * See spc4r17, section 7.4.6 Control Mode Page, Table 349 + */ + if (SE_SESS(cmd) && + DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + cmd->orig_fe_lun, 0x2C, + ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); + return -2; +} + +/* transport_generic_cmd_sequencer(): + * + * Generic Command Sequencer that should work for most DAS transport + * drivers. + * + * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD + * RX Thread. + * + * FIXME: Need to support other SCSI OPCODES where as well. + */ +static int transport_generic_cmd_sequencer( + struct se_cmd *cmd, + unsigned char *cdb) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; + int ret = 0, sector_ret = 0, passthrough; + u32 sectors = 0, size = 0, pr_reg_type = 0; + u16 service_action; + u8 alua_ascq = 0; + /* + * Check for an existing UNIT ATTENTION condition + */ + if (core_scsi3_ua_check(cmd, cdb) < 0) { + cmd->transport_wait_for_tasks = + &transport_nop_wait_for_tasks; + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; + return -2; + } + /* + * Check status of Asymmetric Logical Unit Assignment port + */ + ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); + if (ret != 0) { + cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; + /* + * Set SCSI additional sense code (ASC) to 'LUN Not Accessable'; + * The ALUA additional sense code qualifier (ASCQ) is determined + * by the ALUA primary or secondary access state.. + */ + if (ret > 0) { +#if 0 + printk(KERN_INFO "[%s]: ALUA TG Port not available," + " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", + CMD_TFO(cmd)->get_fabric_name(), alua_ascq); +#endif + transport_set_sense_codes(cmd, 0x04, alua_ascq); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; + return -2; + } + goto out_invalid_cdb_field; + } + /* + * Check status for SPC-3 Persistent Reservations + */ + if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { + if (T10_PR_OPS(su_dev)->t10_seq_non_holder( + cmd, cdb, pr_reg_type) != 0) + return transport_handle_reservation_conflict(cmd); + /* + * This means the CDB is allowed for the SCSI Initiator port + * when said port is *NOT* holding the legacy SPC-2 or + * SPC-3 Persistent Reservation. + */ + } + + switch (cdb[0]) { + case READ_6: + sectors = transport_get_sectors_6(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_6; + T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case READ_10: + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_10; + T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case READ_12: + sectors = transport_get_sectors_12(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_12; + T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case READ_16: + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_16; + T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_6: + sectors = transport_get_sectors_6(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_6; + T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_10: + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_10; + T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_12: + sectors = transport_get_sectors_12(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_12; + T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_16: + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_16; + T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case XDWRITEREAD_10: + if ((cmd->data_direction != DMA_TO_DEVICE) || + !(T_TASK(cmd)->t_tasks_bidi)) + goto out_invalid_cdb_field; + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->transport_split_cdb = &split_cdb_XX_10; + T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + passthrough = (TRANSPORT(dev)->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV); + /* + * Skip the remaining assignments for TCM/PSCSI passthrough + */ + if (passthrough) + break; + /* + * Setup BIDI XOR callback to be run during transport_generic_complete_ok() + */ + cmd->transport_complete_callback = &transport_xor_callback; + T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + break; + case VARIABLE_LENGTH_CMD: + service_action = get_unaligned_be16(&cdb[8]); + /* + * Determine if this is TCM/PSCSI device and we should disable + * internal emulation for this CDB. + */ + passthrough = (TRANSPORT(dev)->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV); + + switch (service_action) { + case XDWRITEREAD_32: + sectors = transport_get_sectors_32(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + /* + * Use WRITE_32 and READ_32 opcodes for the emulated + * XDWRITE_READ_32 logic. + */ + cmd->transport_split_cdb = &split_cdb_XX_32; + T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + + /* + * Skip the remaining assignments for TCM/PSCSI passthrough + */ + if (passthrough) + break; + + /* + * Setup BIDI XOR callback to be run during + * transport_generic_complete_ok() + */ + cmd->transport_complete_callback = &transport_xor_callback; + T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); + break; + case WRITE_SAME_32: + sectors = transport_get_sectors_32(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + + /* + * Skip the remaining assignments for TCM/PSCSI passthrough + */ + if (passthrough) + break; + + if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { + printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + " bits not supported for Block Discard" + " Emulation\n"); + goto out_invalid_cdb_field; + } + /* + * Currently for the emulated case we only accept + * tpws with the UNMAP=1 bit set. + */ + if (!(cdb[10] & 0x08)) { + printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" + " supported for Block Discard Emulation\n"); + goto out_invalid_cdb_field; + } + break; + default: + printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" + " 0x%04x not supported\n", service_action); + goto out_unsupported_cdb; + } + break; + case 0xa3: + if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + /* MAINTENANCE_IN from SCC-2 */ + /* + * Check for emulated MI_REPORT_TARGET_PGS. + */ + if (cdb[1] == MI_REPORT_TARGET_PGS) { + cmd->transport_emulate_cdb = + (T10_ALUA(su_dev)->alua_type == + SPC3_ALUA_EMULATED) ? + &core_emulate_report_target_port_groups : + NULL; + } + size = (cdb[6] << 24) | (cdb[7] << 16) | + (cdb[8] << 8) | cdb[9]; + } else { + /* GPCMD_SEND_KEY from multi media commands */ + size = (cdb[8] << 8) + cdb[9]; + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case MODE_SELECT: + size = cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case MODE_SELECT_10: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case MODE_SENSE: + size = cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case MODE_SENSE_10: + case GPCMD_READ_BUFFER_CAPACITY: + case GPCMD_SEND_OPC: + case LOG_SELECT: + case LOG_SENSE: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case READ_BLOCK_LIMITS: + size = READ_BLOCK_LEN; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case GPCMD_GET_CONFIGURATION: + case GPCMD_READ_FORMAT_CAPACITIES: + case GPCMD_READ_DISC_INFO: + case GPCMD_READ_TRACK_RZONE_INFO: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case PERSISTENT_RESERVE_IN: + case PERSISTENT_RESERVE_OUT: + cmd->transport_emulate_cdb = + (T10_RES(su_dev)->res_type == + SPC3_PERSISTENT_RESERVATIONS) ? + &core_scsi3_emulate_pr : NULL; + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case GPCMD_MECHANISM_STATUS: + case GPCMD_READ_DVD_STRUCTURE: + size = (cdb[8] << 8) + cdb[9]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case READ_POSITION: + size = READ_POSITION_LEN; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case 0xa4: + if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + /* MAINTENANCE_OUT from SCC-2 + * + * Check for emulated MO_SET_TARGET_PGS. + */ + if (cdb[1] == MO_SET_TARGET_PGS) { + cmd->transport_emulate_cdb = + (T10_ALUA(su_dev)->alua_type == + SPC3_ALUA_EMULATED) ? + &core_emulate_set_target_port_groups : + NULL; + } + + size = (cdb[6] << 24) | (cdb[7] << 16) | + (cdb[8] << 8) | cdb[9]; + } else { + /* GPCMD_REPORT_KEY from multi media commands */ + size = (cdb[8] << 8) + cdb[9]; + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case INQUIRY: + size = (cdb[3] << 8) + cdb[4]; + /* + * Do implict HEAD_OF_QUEUE processing for INQUIRY. + * See spc4r17 section 5.3 + */ + if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + cmd->sam_task_attr = TASK_ATTR_HOQ; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case READ_BUFFER: + size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case READ_CAPACITY: + size = READ_CAP_LEN; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case READ_MEDIA_SERIAL_NUMBER: + case SECURITY_PROTOCOL_IN: + case SECURITY_PROTOCOL_OUT: + size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case SERVICE_ACTION_IN: + case ACCESS_CONTROL_IN: + case ACCESS_CONTROL_OUT: + case EXTENDED_COPY: + case READ_ATTRIBUTE: + case RECEIVE_COPY_RESULTS: + case WRITE_ATTRIBUTE: + size = (cdb[10] << 24) | (cdb[11] << 16) | + (cdb[12] << 8) | cdb[13]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case RECEIVE_DIAGNOSTIC: + case SEND_DIAGNOSTIC: + size = (cdb[3] << 8) | cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; +/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ +#if 0 + case GPCMD_READ_CD: + sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; + size = (2336 * sectors); + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; +#endif + case READ_TOC: + size = cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case REQUEST_SENSE: + size = cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case READ_ELEMENT_STATUS: + size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case WRITE_BUFFER: + size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case RESERVE: + case RESERVE_10: + /* + * The SPC-2 RESERVE does not contain a size in the SCSI CDB. + * Assume the passthrough or $FABRIC_MOD will tell us about it. + */ + if (cdb[0] == RESERVE_10) + size = (cdb[7] << 8) | cdb[8]; + else + size = cmd->data_length; + + /* + * Setup the legacy emulated handler for SPC-2 and + * >= SPC-3 compatible reservation handling (CRH=1) + * Otherwise, we assume the underlying SCSI logic is + * is running in SPC_PASSTHROUGH, and wants reservations + * emulation disabled. + */ + cmd->transport_emulate_cdb = + (T10_RES(su_dev)->res_type != + SPC_PASSTHROUGH) ? + &core_scsi2_emulate_crh : NULL; + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + break; + case RELEASE: + case RELEASE_10: + /* + * The SPC-2 RELEASE does not contain a size in the SCSI CDB. + * Assume the passthrough or $FABRIC_MOD will tell us about it. + */ + if (cdb[0] == RELEASE_10) + size = (cdb[7] << 8) | cdb[8]; + else + size = cmd->data_length; + + cmd->transport_emulate_cdb = + (T10_RES(su_dev)->res_type != + SPC_PASSTHROUGH) ? + &core_scsi2_emulate_crh : NULL; + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + break; + case SYNCHRONIZE_CACHE: + case 0x91: /* SYNCHRONIZE_CACHE_16: */ + /* + * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE + */ + if (cdb[0] == SYNCHRONIZE_CACHE) { + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + } else { + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + } + if (sector_ret) + goto out_unsupported_cdb; + + size = transport_get_size(sectors, cdb, cmd); + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + + /* + * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() + */ + if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + break; + /* + * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation + * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() + */ + cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; + /* + * Check to ensure that LBA + Range does not exceed past end of + * device. + */ + if (transport_get_sectors(cmd) < 0) + goto out_invalid_cdb_field; + break; + case UNMAP: + size = get_unaligned_be16(&cdb[7]); + passthrough = (TRANSPORT(dev)->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV); + /* + * Determine if the received UNMAP used to for direct passthrough + * into Linux/SCSI with struct request via TCM/pSCSI or we are + * signaling the use of internal transport_generic_unmap() emulation + * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO + * subsystem plugin backstores. + */ + if (!(passthrough)) + cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; + + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + case WRITE_SAME_16: + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); + passthrough = (TRANSPORT(dev)->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV); + /* + * Determine if the received WRITE_SAME_16 is used to for direct + * passthrough into Linux/SCSI with struct request via TCM/pSCSI + * or we are signaling the use of internal WRITE_SAME + UNMAP=1 + * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and + * TCM/FILEIO subsystem plugin backstores. + */ + if (!(passthrough)) { + if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { + printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + " bits not supported for Block Discard" + " Emulation\n"); + goto out_invalid_cdb_field; + } + /* + * Currently for the emulated case we only accept + * tpws with the UNMAP=1 bit set. + */ + if (!(cdb[1] & 0x08)) { + printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " + " supported for Block Discard Emulation\n"); + goto out_invalid_cdb_field; + } + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case ALLOW_MEDIUM_REMOVAL: + case GPCMD_CLOSE_TRACK: + case ERASE: + case INITIALIZE_ELEMENT_STATUS: + case GPCMD_LOAD_UNLOAD: + case REZERO_UNIT: + case SEEK_10: + case GPCMD_SET_SPEED: + case SPACE: + case START_STOP: + case TEST_UNIT_READY: + case VERIFY: + case WRITE_FILEMARKS: + case MOVE_MEDIUM: + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + break; + case REPORT_LUNS: + cmd->transport_emulate_cdb = + &transport_core_report_lun_response; + size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + /* + * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS + * See spc4r17 section 5.3 + */ + if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + cmd->sam_task_attr = TASK_ATTR_HOQ; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + break; + default: + printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" + " 0x%02x, sending CHECK_CONDITION.\n", + CMD_TFO(cmd)->get_fabric_name(), cdb[0]); + cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; + goto out_unsupported_cdb; + } + + if (size != cmd->data_length) { + printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" + " %u does not match SCSI CDB Length: %u for SAM Opcode:" + " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), + cmd->data_length, size, cdb[0]); + + cmd->cmd_spdtl = size; + + if (cmd->data_direction == DMA_TO_DEVICE) { + printk(KERN_ERR "Rejecting underflow/overflow" + " WRITE data\n"); + goto out_invalid_cdb_field; + } + /* + * Reject READ_* or WRITE_* with overflow/underflow for + * type SCF_SCSI_DATA_SG_IO_CDB. + */ + if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { + printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" + " CDB on non 512-byte sector setup subsystem" + " plugin: %s\n", TRANSPORT(dev)->name); + /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ + goto out_invalid_cdb_field; + } + + if (size > cmd->data_length) { + cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; + cmd->residual_count = (size - cmd->data_length); + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = (cmd->data_length - size); + } + cmd->data_length = size; + } + + transport_set_supported_SAM_opcode(cmd); + return ret; + +out_unsupported_cdb: + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -2; +out_invalid_cdb_field: + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -2; +} + +static inline void transport_release_tasks(struct se_cmd *); + +/* + * This function will copy a contiguous *src buffer into a destination + * struct scatterlist array. + */ +static void transport_memcpy_write_contig( + struct se_cmd *cmd, + struct scatterlist *sg_d, + unsigned char *src) +{ + u32 i = 0, length = 0, total_length = cmd->data_length; + void *dst; + + while (total_length) { + length = sg_d[i].length; + + if (length > total_length) + length = total_length; + + dst = sg_virt(&sg_d[i]); + + memcpy(dst, src, length); + + if (!(total_length -= length)) + return; + + src += length; + i++; + } +} + +/* + * This function will copy a struct scatterlist array *sg_s into a destination + * contiguous *dst buffer. + */ +static void transport_memcpy_read_contig( + struct se_cmd *cmd, + unsigned char *dst, + struct scatterlist *sg_s) +{ + u32 i = 0, length = 0, total_length = cmd->data_length; + void *src; + + while (total_length) { + length = sg_s[i].length; + + if (length > total_length) + length = total_length; + + src = sg_virt(&sg_s[i]); + + memcpy(dst, src, length); + + if (!(total_length -= length)) + return; + + dst += length; + i++; + } +} + +static void transport_memcpy_se_mem_read_contig( + struct se_cmd *cmd, + unsigned char *dst, + struct list_head *se_mem_list) +{ + struct se_mem *se_mem; + void *src; + u32 length = 0, total_length = cmd->data_length; + + list_for_each_entry(se_mem, se_mem_list, se_list) { + length = se_mem->se_len; + + if (length > total_length) + length = total_length; + + src = page_address(se_mem->se_page) + se_mem->se_off; + + memcpy(dst, src, length); + + if (!(total_length -= length)) + return; + + dst += length; + } +} + +/* + * Called from transport_generic_complete_ok() and + * transport_generic_request_failure() to determine which dormant/delayed + * and ordered cmds need to have their tasks added to the execution queue. + */ +static void transport_complete_task_attr(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_cmd *cmd_p, *cmd_tmp; + int new_active_tasks = 0; + + if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { + atomic_dec(&dev->simple_cmds); + smp_mb__after_atomic_dec(); + dev->dev_cur_ordered_id++; + DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" + " SIMPLE: %u\n", dev->dev_cur_ordered_id, + cmd->se_ordered_id); + } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { + atomic_dec(&dev->dev_hoq_count); + smp_mb__after_atomic_dec(); + dev->dev_cur_ordered_id++; + DEBUG_STA("Incremented dev_cur_ordered_id: %u for" + " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, + cmd->se_ordered_id); + } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { + spin_lock(&dev->ordered_cmd_lock); + list_del(&cmd->se_ordered_list); + atomic_dec(&dev->dev_ordered_sync); + smp_mb__after_atomic_dec(); + spin_unlock(&dev->ordered_cmd_lock); + + dev->dev_cur_ordered_id++; + DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" + " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); + } + /* + * Process all commands up to the last received + * ORDERED task attribute which requires another blocking + * boundary + */ + spin_lock(&dev->delayed_cmd_lock); + list_for_each_entry_safe(cmd_p, cmd_tmp, + &dev->delayed_cmd_list, se_delayed_list) { + + list_del(&cmd_p->se_delayed_list); + spin_unlock(&dev->delayed_cmd_lock); + + DEBUG_STA("Calling add_tasks() for" + " cmd_p: 0x%02x Task Attr: 0x%02x" + " Dormant -> Active, se_ordered_id: %u\n", + T_TASK(cmd_p)->t_task_cdb[0], + cmd_p->sam_task_attr, cmd_p->se_ordered_id); + + transport_add_tasks_from_cmd(cmd_p); + new_active_tasks++; + + spin_lock(&dev->delayed_cmd_lock); + if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) + break; + } + spin_unlock(&dev->delayed_cmd_lock); + /* + * If new tasks have become active, wake up the transport thread + * to do the processing of the Active tasks. + */ + if (new_active_tasks != 0) + wake_up_interruptible(&dev->dev_queue_obj->thread_wq); +} + +static void transport_generic_complete_ok(struct se_cmd *cmd) +{ + int reason = 0; + /* + * Check if we need to move delayed/dormant tasks from cmds on the + * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task + * Attribute. + */ + if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + transport_complete_task_attr(cmd); + /* + * Check if we need to retrieve a sense buffer from + * the struct se_cmd in question. + */ + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + if (transport_get_sense_data(cmd) < 0) + reason = TCM_NON_EXISTENT_LUN; + + /* + * Only set when an struct se_task->task_scsi_status returned + * a non GOOD status. + */ + if (cmd->scsi_status) { + transport_send_check_condition_and_sense( + cmd, reason, 1); + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } + } + /* + * Check for a callback, used by amoungst other things + * XDWRITE_READ_10 emulation. + */ + if (cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd); + + switch (cmd->data_direction) { + case DMA_FROM_DEVICE: + spin_lock(&cmd->se_lun->lun_sep_lock); + if (SE_LUN(cmd)->lun_sep) { + SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + cmd->data_length; + } + spin_unlock(&cmd->se_lun->lun_sep_lock); + /* + * If enabled by TCM fabirc module pre-registered SGL + * memory, perform the memcpy() from the TCM internal + * contigious buffer back to the original SGL. + */ + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) + transport_memcpy_write_contig(cmd, + T_TASK(cmd)->t_task_pt_sgl, + T_TASK(cmd)->t_task_buf); + + CMD_TFO(cmd)->queue_data_in(cmd); + break; + case DMA_TO_DEVICE: + spin_lock(&cmd->se_lun->lun_sep_lock); + if (SE_LUN(cmd)->lun_sep) { + SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += + cmd->data_length; + } + spin_unlock(&cmd->se_lun->lun_sep_lock); + /* + * Check if we need to send READ payload for BIDI-COMMAND + */ + if (T_TASK(cmd)->t_mem_bidi_list != NULL) { + spin_lock(&cmd->se_lun->lun_sep_lock); + if (SE_LUN(cmd)->lun_sep) { + SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + cmd->data_length; + } + spin_unlock(&cmd->se_lun->lun_sep_lock); + CMD_TFO(cmd)->queue_data_in(cmd); + break; + } + /* Fall through for DMA_TO_DEVICE */ + case DMA_NONE: + CMD_TFO(cmd)->queue_status(cmd); + break; + default: + break; + } + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); +} + +static void transport_free_dev_tasks(struct se_cmd *cmd) +{ + struct se_task *task, *task_tmp; + unsigned long flags; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + list_for_each_entry_safe(task, task_tmp, + &T_TASK(cmd)->t_task_list, t_list) { + if (atomic_read(&task->task_active)) + continue; + + kfree(task->task_sg_bidi); + kfree(task->task_sg); + + list_del(&task->t_list); + + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + if (task->se_dev) + TRANSPORT(task->se_dev)->free_task(task); + else + printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", + task->task_no); + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); +} + +static inline void transport_free_pages(struct se_cmd *cmd) +{ + struct se_mem *se_mem, *se_mem_tmp; + int free_page = 1; + + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) + free_page = 0; + if (cmd->se_dev->transport->do_se_mem_map) + free_page = 0; + + if (T_TASK(cmd)->t_task_buf) { + kfree(T_TASK(cmd)->t_task_buf); + T_TASK(cmd)->t_task_buf = NULL; + return; + } + + /* + * Caller will handle releasing of struct se_mem. + */ + if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) + return; + + if (!(T_TASK(cmd)->t_tasks_se_num)) + return; + + list_for_each_entry_safe(se_mem, se_mem_tmp, + T_TASK(cmd)->t_mem_list, se_list) { + /* + * We only release call __free_page(struct se_mem->se_page) when + * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, + */ + if (free_page) + __free_page(se_mem->se_page); + + list_del(&se_mem->se_list); + kmem_cache_free(se_mem_cache, se_mem); + } + + if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { + list_for_each_entry_safe(se_mem, se_mem_tmp, + T_TASK(cmd)->t_mem_bidi_list, se_list) { + /* + * We only release call __free_page(struct se_mem->se_page) when + * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, + */ + if (free_page) + __free_page(se_mem->se_page); + + list_del(&se_mem->se_list); + kmem_cache_free(se_mem_cache, se_mem); + } + } + + kfree(T_TASK(cmd)->t_mem_bidi_list); + T_TASK(cmd)->t_mem_bidi_list = NULL; + kfree(T_TASK(cmd)->t_mem_list); + T_TASK(cmd)->t_mem_list = NULL; + T_TASK(cmd)->t_tasks_se_num = 0; +} + +static inline void transport_release_tasks(struct se_cmd *cmd) +{ + transport_free_dev_tasks(cmd); +} + +static inline int transport_dec_and_check(struct se_cmd *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + flags); + return 1; + } + } + + if (atomic_read(&T_TASK(cmd)->t_se_count)) { + if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + flags); + return 1; + } + } + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + return 0; +} + +static void transport_release_fe_cmd(struct se_cmd *cmd) +{ + unsigned long flags; + + if (transport_dec_and_check(cmd)) + return; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + goto free_pages; + } + atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_release_tasks(cmd); +free_pages: + transport_free_pages(cmd); + transport_free_se_cmd(cmd); + CMD_TFO(cmd)->release_cmd_direct(cmd); +} + +static int transport_generic_remove( + struct se_cmd *cmd, + int release_to_pool, + int session_reinstatement) +{ + unsigned long flags; + + if (!(T_TASK(cmd))) + goto release_cmd; + + if (transport_dec_and_check(cmd)) { + if (session_reinstatement) { + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + flags); + } + return 1; + } + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + goto free_pages; + } + atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + transport_release_tasks(cmd); +free_pages: + transport_free_pages(cmd); + +release_cmd: + if (release_to_pool) { + transport_release_cmd_to_pool(cmd); + } else { + transport_free_se_cmd(cmd); + CMD_TFO(cmd)->release_cmd_direct(cmd); + } + + return 0; +} + +/* + * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map + * @cmd: Associated se_cmd descriptor + * @mem: SGL style memory for TCM WRITE / READ + * @sg_mem_num: Number of SGL elements + * @mem_bidi_in: SGL style memory for TCM BIDI READ + * @sg_mem_bidi_num: Number of BIDI READ SGL elements + * + * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage + * of parameters. + */ +int transport_generic_map_mem_to_cmd( + struct se_cmd *cmd, + struct scatterlist *mem, + u32 sg_mem_num, + struct scatterlist *mem_bidi_in, + u32 sg_mem_bidi_num) +{ + u32 se_mem_cnt_out = 0; + int ret; + + if (!(mem) || !(sg_mem_num)) + return 0; + /* + * Passed *mem will contain a list_head containing preformatted + * struct se_mem elements... + */ + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { + if ((mem_bidi_in) || (sg_mem_bidi_num)) { + printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" + " with BIDI-COMMAND\n"); + return -ENOSYS; + } + + T_TASK(cmd)->t_mem_list = (struct list_head *)mem; + T_TASK(cmd)->t_tasks_se_num = sg_mem_num; + cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; + return 0; + } + /* + * Otherwise, assume the caller is passing a struct scatterlist + * array from include/linux/scatterlist.h + */ + if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || + (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { + /* + * For CDB using TCM struct se_mem linked list scatterlist memory + * processed into a TCM struct se_subsystem_dev, we do the mapping + * from the passed physical memory to struct se_mem->se_page here. + */ + T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); + if (!(T_TASK(cmd)->t_mem_list)) + return -ENOMEM; + + ret = transport_map_sg_to_mem(cmd, + T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); + if (ret < 0) + return -ENOMEM; + + T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; + /* + * Setup BIDI READ list of struct se_mem elements + */ + if ((mem_bidi_in) && (sg_mem_bidi_num)) { + T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); + if (!(T_TASK(cmd)->t_mem_bidi_list)) { + kfree(T_TASK(cmd)->t_mem_list); + return -ENOMEM; + } + se_mem_cnt_out = 0; + + ret = transport_map_sg_to_mem(cmd, + T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, + &se_mem_cnt_out); + if (ret < 0) { + kfree(T_TASK(cmd)->t_mem_list); + return -ENOMEM; + } + + T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; + } + cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + + } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { + if (mem_bidi_in || sg_mem_bidi_num) { + printk(KERN_ERR "BIDI-Commands not supported using " + "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); + return -ENOSYS; + } + /* + * For incoming CDBs using a contiguous buffer internall with TCM, + * save the passed struct scatterlist memory. After TCM storage object + * processing has completed for this struct se_cmd, TCM core will call + * transport_memcpy_[write,read]_contig() as necessary from + * transport_generic_complete_ok() and transport_write_pending() in order + * to copy the TCM buffer to/from the original passed *mem in SGL -> + * struct scatterlist format. + */ + cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; + T_TASK(cmd)->t_task_pt_sgl = mem; + } + + return 0; +} +EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); + + +static inline long long transport_dev_end_lba(struct se_device *dev) +{ + return dev->transport->get_blocks(dev) + 1; +} + +static int transport_get_sectors(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + + T_TASK(cmd)->t_tasks_sectors = + (cmd->data_length / DEV_ATTRIB(dev)->block_size); + if (!(T_TASK(cmd)->t_tasks_sectors)) + T_TASK(cmd)->t_tasks_sectors = 1; + + if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) + return 0; + + if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > + transport_dev_end_lba(dev)) { + printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" + " transport_dev_end_lba(): %llu\n", + T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, + transport_dev_end_lba(dev)); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; + return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; + } + + return 0; +} + +static int transport_new_cmd_obj(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + u32 task_cdbs = 0, rc; + + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { + task_cdbs++; + T_TASK(cmd)->t_task_cdbs++; + } else { + int set_counts = 1; + + /* + * Setup any BIDI READ tasks and memory from + * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks + * are queued first for the non pSCSI passthrough case. + */ + if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && + (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { + rc = transport_generic_get_cdb_count(cmd, + T_TASK(cmd)->t_task_lba, + T_TASK(cmd)->t_tasks_sectors, + DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, + set_counts); + if (!(rc)) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + set_counts = 0; + } + /* + * Setup the tasks and memory from T_TASK(cmd)->t_mem_list + * Note for BIDI transfers this will contain the WRITE payload + */ + task_cdbs = transport_generic_get_cdb_count(cmd, + T_TASK(cmd)->t_task_lba, + T_TASK(cmd)->t_tasks_sectors, + cmd->data_direction, T_TASK(cmd)->t_mem_list, + set_counts); + if (!(task_cdbs)) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + T_TASK(cmd)->t_task_cdbs += task_cdbs; + +#if 0 + printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" + " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, + T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, + T_TASK(cmd)->t_task_cdbs); +#endif + } + + atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); + atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); + atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); + return 0; +} + +static struct list_head *transport_init_se_mem_list(void) +{ + struct list_head *se_mem_list; + + se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); + if (!(se_mem_list)) { + printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); + return NULL; + } + INIT_LIST_HEAD(se_mem_list); + + return se_mem_list; +} + +static int +transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) +{ + unsigned char *buf; + struct se_mem *se_mem; + + T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); + if (!(T_TASK(cmd)->t_mem_list)) + return -ENOMEM; + + /* + * If the device uses memory mapping this is enough. + */ + if (cmd->se_dev->transport->do_se_mem_map) + return 0; + + /* + * Setup BIDI-COMMAND READ list of struct se_mem elements + */ + if (T_TASK(cmd)->t_tasks_bidi) { + T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); + if (!(T_TASK(cmd)->t_mem_bidi_list)) { + kfree(T_TASK(cmd)->t_mem_list); + return -ENOMEM; + } + } + + while (length) { + se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); + if (!(se_mem)) { + printk(KERN_ERR "Unable to allocate struct se_mem\n"); + goto out; + } + INIT_LIST_HEAD(&se_mem->se_list); + se_mem->se_len = (length > dma_size) ? dma_size : length; + +/* #warning FIXME Allocate contigous pages for struct se_mem elements */ + se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); + if (!(se_mem->se_page)) { + printk(KERN_ERR "alloc_pages() failed\n"); + goto out; + } + + buf = kmap_atomic(se_mem->se_page, KM_IRQ0); + if (!(buf)) { + printk(KERN_ERR "kmap_atomic() failed\n"); + goto out; + } + memset(buf, 0, se_mem->se_len); + kunmap_atomic(buf, KM_IRQ0); + + list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); + T_TASK(cmd)->t_tasks_se_num++; + + DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" + " Offset(%u)\n", se_mem->se_page, se_mem->se_len, + se_mem->se_off); + + length -= se_mem->se_len; + } + + DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", + T_TASK(cmd)->t_tasks_se_num); + + return 0; +out: + return -1; +} + +extern u32 transport_calc_sg_num( + struct se_task *task, + struct se_mem *in_se_mem, + u32 task_offset) +{ + struct se_cmd *se_cmd = task->task_se_cmd; + struct se_device *se_dev = SE_DEV(se_cmd); + struct se_mem *se_mem = in_se_mem; + struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); + u32 sg_length, task_size = task->task_size, task_sg_num_padded; + + while (task_size != 0) { + DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" + " se_mem->se_off(%u) task_offset(%u)\n", + se_mem->se_page, se_mem->se_len, + se_mem->se_off, task_offset); + + if (task_offset == 0) { + if (task_size >= se_mem->se_len) { + sg_length = se_mem->se_len; + + if (!(list_is_last(&se_mem->se_list, + T_TASK(se_cmd)->t_mem_list))) + se_mem = list_entry(se_mem->se_list.next, + struct se_mem, se_list); + } else { + sg_length = task_size; + task_size -= sg_length; + goto next; + } + + DEBUG_SC("sg_length(%u) task_size(%u)\n", + sg_length, task_size); + } else { + if ((se_mem->se_len - task_offset) > task_size) { + sg_length = task_size; + task_size -= sg_length; + goto next; + } else { + sg_length = (se_mem->se_len - task_offset); + + if (!(list_is_last(&se_mem->se_list, + T_TASK(se_cmd)->t_mem_list))) + se_mem = list_entry(se_mem->se_list.next, + struct se_mem, se_list); + } + + DEBUG_SC("sg_length(%u) task_size(%u)\n", + sg_length, task_size); + + task_offset = 0; + } + task_size -= sg_length; +next: + DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", + task->task_no, task_size); + + task->task_sg_num++; + } + /* + * Check if the fabric module driver is requesting that all + * struct se_task->task_sg[] be chained together.. If so, + * then allocate an extra padding SG entry for linking and + * marking the end of the chained SGL. + */ + if (tfo->task_sg_chaining) { + task_sg_num_padded = (task->task_sg_num + 1); + task->task_padded_sg = 1; + } else + task_sg_num_padded = task->task_sg_num; + + task->task_sg = kzalloc(task_sg_num_padded * + sizeof(struct scatterlist), GFP_KERNEL); + if (!(task->task_sg)) { + printk(KERN_ERR "Unable to allocate memory for" + " task->task_sg\n"); + return 0; + } + sg_init_table(&task->task_sg[0], task_sg_num_padded); + /* + * Setup task->task_sg_bidi for SCSI READ payload for + * TCM/pSCSI passthrough if present for BIDI-COMMAND + */ + if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && + (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { + task->task_sg_bidi = kzalloc(task_sg_num_padded * + sizeof(struct scatterlist), GFP_KERNEL); + if (!(task->task_sg_bidi)) { + printk(KERN_ERR "Unable to allocate memory for" + " task->task_sg_bidi\n"); + return 0; + } + sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); + } + /* + * For the chaining case, setup the proper end of SGL for the + * initial submission struct task into struct se_subsystem_api. + * This will be cleared later by transport_do_task_sg_chain() + */ + if (task->task_padded_sg) { + sg_mark_end(&task->task_sg[task->task_sg_num - 1]); + /* + * Added the 'if' check before marking end of bi-directional + * scatterlist (which gets created only in case of request + * (RD + WR). + */ + if (task->task_sg_bidi) + sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); + } + + DEBUG_SC("Successfully allocated task->task_sg_num(%u)," + " task_sg_num_padded(%u)\n", task->task_sg_num, + task_sg_num_padded); + + return task->task_sg_num; +} + +static inline int transport_set_tasks_sectors_disk( + struct se_task *task, + struct se_device *dev, + unsigned long long lba, + u32 sectors, + int *max_sectors_set) +{ + if ((lba + sectors) > transport_dev_end_lba(dev)) { + task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); + + if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { + task->task_sectors = DEV_ATTRIB(dev)->max_sectors; + *max_sectors_set = 1; + } + } else { + if (sectors > DEV_ATTRIB(dev)->max_sectors) { + task->task_sectors = DEV_ATTRIB(dev)->max_sectors; + *max_sectors_set = 1; + } else + task->task_sectors = sectors; + } + + return 0; +} + +static inline int transport_set_tasks_sectors_non_disk( + struct se_task *task, + struct se_device *dev, + unsigned long long lba, + u32 sectors, + int *max_sectors_set) +{ + if (sectors > DEV_ATTRIB(dev)->max_sectors) { + task->task_sectors = DEV_ATTRIB(dev)->max_sectors; + *max_sectors_set = 1; + } else + task->task_sectors = sectors; + + return 0; +} + +static inline int transport_set_tasks_sectors( + struct se_task *task, + struct se_device *dev, + unsigned long long lba, + u32 sectors, + int *max_sectors_set) +{ + return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? + transport_set_tasks_sectors_disk(task, dev, lba, sectors, + max_sectors_set) : + transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, + max_sectors_set); +} + +static int transport_map_sg_to_mem( + struct se_cmd *cmd, + struct list_head *se_mem_list, + void *in_mem, + u32 *se_mem_cnt) +{ + struct se_mem *se_mem; + struct scatterlist *sg; + u32 sg_count = 1, cmd_size = cmd->data_length; + + if (!in_mem) { + printk(KERN_ERR "No source scatterlist\n"); + return -1; + } + sg = (struct scatterlist *)in_mem; + + while (cmd_size) { + se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); + if (!(se_mem)) { + printk(KERN_ERR "Unable to allocate struct se_mem\n"); + return -1; + } + INIT_LIST_HEAD(&se_mem->se_list); + DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" + " sg_page: %p offset: %d length: %d\n", cmd_size, + sg_page(sg), sg->offset, sg->length); + + se_mem->se_page = sg_page(sg); + se_mem->se_off = sg->offset; + + if (cmd_size > sg->length) { + se_mem->se_len = sg->length; + sg = sg_next(sg); + sg_count++; + } else + se_mem->se_len = cmd_size; + + cmd_size -= se_mem->se_len; + + DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", + *se_mem_cnt, cmd_size); + DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", + se_mem->se_page, se_mem->se_off, se_mem->se_len); + + list_add_tail(&se_mem->se_list, se_mem_list); + (*se_mem_cnt)++; + } + + DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" + " struct se_mem\n", sg_count, *se_mem_cnt); + + if (sg_count != *se_mem_cnt) + BUG(); + + return 0; +} + +/* transport_map_mem_to_sg(): + * + * + */ +int transport_map_mem_to_sg( + struct se_task *task, + struct list_head *se_mem_list, + void *in_mem, + struct se_mem *in_se_mem, + struct se_mem **out_se_mem, + u32 *se_mem_cnt, + u32 *task_offset) +{ + struct se_cmd *se_cmd = task->task_se_cmd; + struct se_mem *se_mem = in_se_mem; + struct scatterlist *sg = (struct scatterlist *)in_mem; + u32 task_size = task->task_size, sg_no = 0; + + if (!sg) { + printk(KERN_ERR "Unable to locate valid struct" + " scatterlist pointer\n"); + return -1; + } + + while (task_size != 0) { + /* + * Setup the contigious array of scatterlists for + * this struct se_task. + */ + sg_assign_page(sg, se_mem->se_page); + + if (*task_offset == 0) { + sg->offset = se_mem->se_off; + + if (task_size >= se_mem->se_len) { + sg->length = se_mem->se_len; + + if (!(list_is_last(&se_mem->se_list, + T_TASK(se_cmd)->t_mem_list))) { + se_mem = list_entry(se_mem->se_list.next, + struct se_mem, se_list); + (*se_mem_cnt)++; + } + } else { + sg->length = task_size; + /* + * Determine if we need to calculate an offset + * into the struct se_mem on the next go around.. + */ + task_size -= sg->length; + if (!(task_size)) + *task_offset = sg->length; + + goto next; + } + + } else { + sg->offset = (*task_offset + se_mem->se_off); + + if ((se_mem->se_len - *task_offset) > task_size) { + sg->length = task_size; + /* + * Determine if we need to calculate an offset + * into the struct se_mem on the next go around.. + */ + task_size -= sg->length; + if (!(task_size)) + *task_offset += sg->length; + + goto next; + } else { + sg->length = (se_mem->se_len - *task_offset); + + if (!(list_is_last(&se_mem->se_list, + T_TASK(se_cmd)->t_mem_list))) { + se_mem = list_entry(se_mem->se_list.next, + struct se_mem, se_list); + (*se_mem_cnt)++; + } + } + + *task_offset = 0; + } + task_size -= sg->length; +next: + DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" + " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, + sg_page(sg), sg->length, sg->offset, task_size, *task_offset); + + sg_no++; + if (!(task_size)) + break; + + sg = sg_next(sg); + + if (task_size > se_cmd->data_length) + BUG(); + } + *out_se_mem = se_mem; + + DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" + " SGs\n", task->task_no, *se_mem_cnt, sg_no); + + return 0; +} + +/* + * This function can be used by HW target mode drivers to create a linked + * scatterlist from all contiguously allocated struct se_task->task_sg[]. + * This is intended to be called during the completion path by TCM Core + * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. + */ +void transport_do_task_sg_chain(struct se_cmd *cmd) +{ + struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; + struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; + struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; + struct se_task *task; + struct target_core_fabric_ops *tfo = CMD_TFO(cmd); + u32 task_sg_num = 0, sg_count = 0; + int i; + + if (tfo->task_sg_chaining == 0) { + printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" + " %s\n", tfo->get_fabric_name()); + dump_stack(); + return; + } + /* + * Walk the struct se_task list and setup scatterlist chains + * for each contiguosly allocated struct se_task->task_sg[]. + */ + list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + if (!(task->task_sg) || !(task->task_padded_sg)) + continue; + + if (sg_head && sg_link) { + sg_head_cur = &task->task_sg[0]; + sg_link_cur = &task->task_sg[task->task_sg_num]; + /* + * Either add chain or mark end of scatterlist + */ + if (!(list_is_last(&task->t_list, + &T_TASK(cmd)->t_task_list))) { + /* + * Clear existing SGL termination bit set in + * transport_calc_sg_num(), see sg_mark_end() + */ + sg_end_cur = &task->task_sg[task->task_sg_num - 1]; + sg_end_cur->page_link &= ~0x02; + + sg_chain(sg_head, task_sg_num, sg_head_cur); + sg_count += (task->task_sg_num + 1); + } else + sg_count += task->task_sg_num; + + sg_head = sg_head_cur; + sg_link = sg_link_cur; + task_sg_num = task->task_sg_num; + continue; + } + sg_head = sg_first = &task->task_sg[0]; + sg_link = &task->task_sg[task->task_sg_num]; + task_sg_num = task->task_sg_num; + /* + * Check for single task.. + */ + if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { + /* + * Clear existing SGL termination bit set in + * transport_calc_sg_num(), see sg_mark_end() + */ + sg_end = &task->task_sg[task->task_sg_num - 1]; + sg_end->page_link &= ~0x02; + sg_count += (task->task_sg_num + 1); + } else + sg_count += task->task_sg_num; + } + /* + * Setup the starting pointer and total t_tasks_sg_linked_no including + * padding SGs for linking and to mark the end. + */ + T_TASK(cmd)->t_tasks_sg_chained = sg_first; + T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; + + DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, + T_TASK(cmd)->t_tasks_sg_chained_no); + + for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, + T_TASK(cmd)->t_tasks_sg_chained_no, i) { + + DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", + sg, sg_page(sg), sg->length, sg->offset); + if (sg_is_chain(sg)) + DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); + if (sg_is_last(sg)) + DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); + } + +} +EXPORT_SYMBOL(transport_do_task_sg_chain); + +static int transport_do_se_mem_map( + struct se_device *dev, + struct se_task *task, + struct list_head *se_mem_list, + void *in_mem, + struct se_mem *in_se_mem, + struct se_mem **out_se_mem, + u32 *se_mem_cnt, + u32 *task_offset_in) +{ + u32 task_offset = *task_offset_in; + int ret = 0; + /* + * se_subsystem_api_t->do_se_mem_map is used when internal allocation + * has been done by the transport plugin. + */ + if (TRANSPORT(dev)->do_se_mem_map) { + ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, + in_mem, in_se_mem, out_se_mem, se_mem_cnt, + task_offset_in); + if (ret == 0) + T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; + + return ret; + } + /* + * This is the normal path for all normal non BIDI and BIDI-COMMAND + * WRITE payloads.. If we need to do BIDI READ passthrough for + * TCM/pSCSI the first call to transport_do_se_mem_map -> + * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the + * allocation for task->task_sg_bidi, and the subsequent call to + * transport_do_se_mem_map() from transport_generic_get_cdb_count() + */ + if (!(task->task_sg_bidi)) { + /* + * Assume default that transport plugin speaks preallocated + * scatterlists. + */ + if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) + return -1; + /* + * struct se_task->task_sg now contains the struct scatterlist array. + */ + return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, + in_se_mem, out_se_mem, se_mem_cnt, + task_offset_in); + } + /* + * Handle the se_mem_list -> struct task->task_sg_bidi + * memory map for the extra BIDI READ payload + */ + return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, + in_se_mem, out_se_mem, se_mem_cnt, + task_offset_in); +} + +static u32 transport_generic_get_cdb_count( + struct se_cmd *cmd, + unsigned long long lba, + u32 sectors, + enum dma_data_direction data_direction, + struct list_head *mem_list, + int set_counts) +{ + unsigned char *cdb = NULL; + struct se_task *task; + struct se_mem *se_mem = NULL, *se_mem_lout = NULL; + struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; + struct se_device *dev = SE_DEV(cmd); + int max_sectors_set = 0, ret; + u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; + + if (!mem_list) { + printk(KERN_ERR "mem_list is NULL in transport_generic_get" + "_cdb_count()\n"); + return 0; + } + /* + * While using RAMDISK_DR backstores is the only case where + * mem_list will ever be empty at this point. + */ + if (!(list_empty(mem_list))) + se_mem = list_entry(mem_list->next, struct se_mem, se_list); + /* + * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to + * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation + */ + if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && + !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && + (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) + se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, + struct se_mem, se_list); + + while (sectors) { + DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", + CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, + transport_dev_end_lba(dev)); + + task = transport_generic_get_task(cmd, data_direction); + if (!(task)) + goto out; + + transport_set_tasks_sectors(task, dev, lba, sectors, + &max_sectors_set); + + task->task_lba = lba; + lba += task->task_sectors; + sectors -= task->task_sectors; + task->task_size = (task->task_sectors * + DEV_ATTRIB(dev)->block_size); + + cdb = TRANSPORT(dev)->get_cdb(task); + if ((cdb)) { + memcpy(cdb, T_TASK(cmd)->t_task_cdb, + scsi_command_size(T_TASK(cmd)->t_task_cdb)); + cmd->transport_split_cdb(task->task_lba, + &task->task_sectors, cdb); + } + + /* + * Perform the SE OBJ plugin and/or Transport plugin specific + * mapping for T_TASK(cmd)->t_mem_list. And setup the + * task->task_sg and if necessary task->task_sg_bidi + */ + ret = transport_do_se_mem_map(dev, task, mem_list, + NULL, se_mem, &se_mem_lout, &se_mem_cnt, + &task_offset_in); + if (ret < 0) + goto out; + + se_mem = se_mem_lout; + /* + * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi + * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI + * + * Note that the first call to transport_do_se_mem_map() above will + * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() + * -> transport_calc_sg_num(), and the second here will do the + * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. + */ + if (task->task_sg_bidi != NULL) { + ret = transport_do_se_mem_map(dev, task, + T_TASK(cmd)->t_mem_bidi_list, NULL, + se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, + &task_offset_in); + if (ret < 0) + goto out; + + se_mem_bidi = se_mem_bidi_lout; + } + task_cdbs++; + + DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", + task_cdbs, task->task_sg_num); + + if (max_sectors_set) { + max_sectors_set = 0; + continue; + } + + if (!sectors) + break; + } + + if (set_counts) { + atomic_inc(&T_TASK(cmd)->t_fe_count); + atomic_inc(&T_TASK(cmd)->t_se_count); + } + + DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", + CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) + ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); + + return task_cdbs; +out: + return 0; +} + +static int +transport_map_control_cmd_to_task(struct se_cmd *cmd) +{ + struct se_device *dev = SE_DEV(cmd); + unsigned char *cdb; + struct se_task *task; + int ret; + + task = transport_generic_get_task(cmd, cmd->data_direction); + if (!task) + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + + cdb = TRANSPORT(dev)->get_cdb(task); + if (cdb) + memcpy(cdb, cmd->t_task->t_task_cdb, + scsi_command_size(cmd->t_task->t_task_cdb)); + + task->task_size = cmd->data_length; + task->task_sg_num = + (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; + + atomic_inc(&cmd->t_task->t_fe_count); + atomic_inc(&cmd->t_task->t_se_count); + + if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { + struct se_mem *se_mem = NULL, *se_mem_lout = NULL; + u32 se_mem_cnt = 0, task_offset = 0; + + BUG_ON(list_empty(cmd->t_task->t_mem_list)); + + ret = transport_do_se_mem_map(dev, task, + cmd->t_task->t_mem_list, NULL, se_mem, + &se_mem_lout, &se_mem_cnt, &task_offset); + if (ret < 0) + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + + if (dev->transport->map_task_SG) + return dev->transport->map_task_SG(task); + return 0; + } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { + if (dev->transport->map_task_non_SG) + return dev->transport->map_task_non_SG(task); + return 0; + } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { + if (dev->transport->cdb_none) + return dev->transport->cdb_none(task); + return 0; + } else { + BUG(); + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + } +} + +/* transport_generic_new_cmd(): Called from transport_processing_thread() + * + * Allocate storage transport resources from a set of values predefined + * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. + * Any non zero return here is treated as an "out of resource' op here. + */ + /* + * Generate struct se_task(s) and/or their payloads for this CDB. + */ +static int transport_generic_new_cmd(struct se_cmd *cmd) +{ + struct se_portal_group *se_tpg; + struct se_task *task; + struct se_device *dev = SE_DEV(cmd); + int ret = 0; + + /* + * Determine is the TCM fabric module has already allocated physical + * memory, and is directly calling transport_generic_map_mem_to_cmd() + * to setup beforehand the linked list of physical memory at + * T_TASK(cmd)->t_mem_list of struct se_mem->se_page + */ + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { + ret = transport_allocate_resources(cmd); + if (ret < 0) + return ret; + } + + ret = transport_get_sectors(cmd); + if (ret < 0) + return ret; + + ret = transport_new_cmd_obj(cmd); + if (ret < 0) + return ret; + + /* + * Determine if the calling TCM fabric module is talking to + * Linux/NET via kernel sockets and needs to allocate a + * struct iovec array to complete the struct se_cmd + */ + se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; + if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { + ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); + if (ret < 0) + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + } + + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + if (atomic_read(&task->task_sent)) + continue; + if (!dev->transport->map_task_SG) + continue; + + ret = dev->transport->map_task_SG(task); + if (ret < 0) + return ret; + } + } else { + ret = transport_map_control_cmd_to_task(cmd); + if (ret < 0) + return ret; + } + + /* + * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. + * This WRITE struct se_cmd (and all of its associated struct se_task's) + * will be added to the struct se_device execution queue after its WRITE + * data has arrived. (ie: It gets handled by the transport processing + * thread a second time) + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + transport_add_tasks_to_state_queue(cmd); + return transport_generic_write_pending(cmd); + } + /* + * Everything else but a WRITE, add the struct se_cmd's struct se_task's + * to the execution queue. + */ + transport_execute_tasks(cmd); + return 0; +} + +/* transport_generic_process_write(): + * + * + */ +void transport_generic_process_write(struct se_cmd *cmd) +{ +#if 0 + /* + * Copy SCSI Presented DTL sector(s) from received buffers allocated to + * original EDTL + */ + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + if (!T_TASK(cmd)->t_tasks_se_num) { + unsigned char *dst, *buf = + (unsigned char *)T_TASK(cmd)->t_task_buf; + + dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); + if (!(dst)) { + printk(KERN_ERR "Unable to allocate memory for" + " WRITE underflow\n"); + transport_generic_request_failure(cmd, NULL, + PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); + return; + } + memcpy(dst, buf, cmd->cmd_spdtl); + + kfree(T_TASK(cmd)->t_task_buf); + T_TASK(cmd)->t_task_buf = dst; + } else { + struct scatterlist *sg = + (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; + struct scatterlist *orig_sg; + + orig_sg = kzalloc(sizeof(struct scatterlist) * + T_TASK(cmd)->t_tasks_se_num, + GFP_KERNEL))) { + if (!(orig_sg)) { + printk(KERN_ERR "Unable to allocate memory" + " for WRITE underflow\n"); + transport_generic_request_failure(cmd, NULL, + PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); + return; + } + + memcpy(orig_sg, T_TASK(cmd)->t_task_buf, + sizeof(struct scatterlist) * + T_TASK(cmd)->t_tasks_se_num); + + cmd->data_length = cmd->cmd_spdtl; + /* + * FIXME, clear out original struct se_task and state + * information. + */ + if (transport_generic_new_cmd(cmd) < 0) { + transport_generic_request_failure(cmd, NULL, + PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); + kfree(orig_sg); + return; + } + + transport_memcpy_write_sg(cmd, orig_sg); + } + } +#endif + transport_execute_tasks(cmd); +} +EXPORT_SYMBOL(transport_generic_process_write); + +/* transport_generic_write_pending(): + * + * + */ +static int transport_generic_write_pending(struct se_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + /* + * For the TCM control CDBs using a contiguous buffer, do the memcpy + * from the passed Linux/SCSI struct scatterlist located at + * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at + * T_TASK(se_cmd)->t_task_buf. + */ + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) + transport_memcpy_read_contig(cmd, + T_TASK(cmd)->t_task_buf, + T_TASK(cmd)->t_task_pt_sgl); + /* + * Clear the se_cmd for WRITE_PENDING status in order to set + * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data + * can be called from HW target mode interrupt code. This is safe + * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending + * because the se_cmd->se_lun pointer is not being cleared. + */ + transport_cmd_check_stop(cmd, 1, 0); + + /* + * Call the fabric write_pending function here to let the + * frontend know that WRITE buffers are ready. + */ + ret = CMD_TFO(cmd)->write_pending(cmd); + if (ret < 0) + return ret; + + return PYX_TRANSPORT_WRITE_PENDING; +} + +/* transport_release_cmd_to_pool(): + * + * + */ +void transport_release_cmd_to_pool(struct se_cmd *cmd) +{ + BUG_ON(!T_TASK(cmd)); + BUG_ON(!CMD_TFO(cmd)); + + transport_free_se_cmd(cmd); + CMD_TFO(cmd)->release_cmd_to_pool(cmd); +} +EXPORT_SYMBOL(transport_release_cmd_to_pool); + +/* transport_generic_free_cmd(): + * + * Called from processing frontend to release storage engine resources + */ +void transport_generic_free_cmd( + struct se_cmd *cmd, + int wait_for_tasks, + int release_to_pool, + int session_reinstatement) +{ + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) + transport_release_cmd_to_pool(cmd); + else { + core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); + + if (SE_LUN(cmd)) { +#if 0 + printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" + " SE_LUN(cmd)\n", cmd, + CMD_TFO(cmd)->get_task_tag(cmd)); +#endif + transport_lun_remove_cmd(cmd); + } + + if (wait_for_tasks && cmd->transport_wait_for_tasks) + cmd->transport_wait_for_tasks(cmd, 0, 0); + + transport_generic_remove(cmd, release_to_pool, + session_reinstatement); + } +} +EXPORT_SYMBOL(transport_generic_free_cmd); + +static void transport_nop_wait_for_tasks( + struct se_cmd *cmd, + int remove_cmd, + int session_reinstatement) +{ + return; +} + +/* transport_lun_wait_for_tasks(): + * + * Called from ConfigFS context to stop the passed struct se_cmd to allow + * an struct se_lun to be successfully shutdown. + */ +static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) +{ + unsigned long flags; + int ret; + /* + * If the frontend has already requested this struct se_cmd to + * be stopped, we can safely ignore this struct se_cmd. + */ + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { + atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); + DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" + " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + transport_cmd_check_stop(cmd, 1, 0); + return -1; + } + atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + + ret = transport_stop_tasks_for_cmd(cmd); + + DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" + " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); + if (!ret) { + DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", + CMD_TFO(cmd)->get_task_tag(cmd)); + wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); + DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", + CMD_TFO(cmd)->get_task_tag(cmd)); + } + transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + + return 0; +} + +/* #define DEBUG_CLEAR_LUN */ +#ifdef DEBUG_CLEAR_LUN +#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) +#else +#define DEBUG_CLEAR_L(x...) +#endif + +static void __transport_clear_lun_from_sessions(struct se_lun *lun) +{ + struct se_cmd *cmd = NULL; + unsigned long lun_flags, cmd_flags; + /* + * Do exception processing and return CHECK_CONDITION status to the + * Initiator Port. + */ + spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); + while (!list_empty_careful(&lun->lun_cmd_list)) { + cmd = list_entry(lun->lun_cmd_list.next, + struct se_cmd, se_lun_list); + list_del(&cmd->se_lun_list); + + if (!(T_TASK(cmd))) { + printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" + "[i,t]_state: %u/%u\n", + CMD_TFO(cmd)->get_task_tag(cmd), + CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); + BUG(); + } + atomic_set(&T_TASK(cmd)->transport_lun_active, 0); + /* + * This will notify iscsi_target_transport.c: + * transport_cmd_check_stop() that a LUN shutdown is in + * progress for the iscsi_cmd_t. + */ + spin_lock(&T_TASK(cmd)->t_state_lock); + DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" + "_lun_stop for ITT: 0x%08x\n", + SE_LUN(cmd)->unpacked_lun, + CMD_TFO(cmd)->get_task_tag(cmd)); + atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); + spin_unlock(&T_TASK(cmd)->t_state_lock); + + spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); + + if (!(SE_LUN(cmd))) { + printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", + CMD_TFO(cmd)->get_task_tag(cmd), + CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); + BUG(); + } + /* + * If the Storage engine still owns the iscsi_cmd_t, determine + * and/or stop its context. + */ + DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" + "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, + CMD_TFO(cmd)->get_task_tag(cmd)); + + if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { + spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); + continue; + } + + DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" + "_wait_for_tasks(): SUCCESS\n", + SE_LUN(cmd)->unpacked_lun, + CMD_TFO(cmd)->get_task_tag(cmd)); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); + if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + goto check_cond; + } + atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + + transport_free_dev_tasks(cmd); + /* + * The Storage engine stopped this struct se_cmd before it was + * send to the fabric frontend for delivery back to the + * Initiator Node. Return this SCSI CDB back with an + * CHECK_CONDITION status. + */ +check_cond: + transport_send_check_condition_and_sense(cmd, + TCM_NON_EXISTENT_LUN, 0); + /* + * If the fabric frontend is waiting for this iscsi_cmd_t to + * be released, notify the waiting thread now that LU has + * finished accessing it. + */ + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); + if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { + DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" + " struct se_cmd: %p ITT: 0x%08x\n", + lun->unpacked_lun, + cmd, CMD_TFO(cmd)->get_task_tag(cmd)); + + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + cmd_flags); + transport_cmd_check_stop(cmd, 1, 0); + complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); + spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); + continue; + } + DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", + lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); + + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); + } + spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); +} + +static int transport_clear_lun_thread(void *p) +{ + struct se_lun *lun = (struct se_lun *)p; + + __transport_clear_lun_from_sessions(lun); + complete(&lun->lun_shutdown_comp); + + return 0; +} + +int transport_clear_lun_from_sessions(struct se_lun *lun) +{ + struct task_struct *kt; + + kt = kthread_run(transport_clear_lun_thread, (void *)lun, + "tcm_cl_%u", lun->unpacked_lun); + if (IS_ERR(kt)) { + printk(KERN_ERR "Unable to start clear_lun thread\n"); + return -1; + } + wait_for_completion(&lun->lun_shutdown_comp); + + return 0; +} + +/* transport_generic_wait_for_tasks(): + * + * Called from frontend or passthrough context to wait for storage engine + * to pause and/or release frontend generated struct se_cmd. + */ +static void transport_generic_wait_for_tasks( + struct se_cmd *cmd, + int remove_cmd, + int session_reinstatement) +{ + unsigned long flags; + + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) + return; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + /* + * If we are already stopped due to an external event (ie: LUN shutdown) + * sleep until the connection can have the passed struct se_cmd back. + * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by + * transport_clear_lun_from_sessions() once the ConfigFS context caller + * has completed its operation on the struct se_cmd. + */ + if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { + + DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" + " wait_for_completion(&T_TASK(cmd)transport_lun_fe" + "_stop_comp); for ITT: 0x%08x\n", + CMD_TFO(cmd)->get_task_tag(cmd)); + /* + * There is a special case for WRITES where a FE exception + + * LUN shutdown means ConfigFS context is still sleeping on + * transport_lun_stop_comp in transport_lun_wait_for_tasks(). + * We go ahead and up transport_lun_stop_comp just to be sure + * here. + */ + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + complete(&T_TASK(cmd)->transport_lun_stop_comp); + wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + + transport_all_task_dev_remove_state(cmd); + /* + * At this point, the frontend who was the originator of this + * struct se_cmd, now owns the structure and can be released through + * normal means below. + */ + DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" + " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" + "stop_comp); for ITT: 0x%08x\n", + CMD_TFO(cmd)->get_task_tag(cmd)); + + atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); + } + if (!atomic_read(&T_TASK(cmd)->t_transport_active)) + goto remove; + + atomic_set(&T_TASK(cmd)->t_transport_stop, 1); + + DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" + " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" + " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), + CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, + cmd->deferred_t_state); + + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + + wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&T_TASK(cmd)->t_transport_active, 0); + atomic_set(&T_TASK(cmd)->t_transport_stop, 0); + + DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" + "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", + CMD_TFO(cmd)->get_task_tag(cmd)); +remove: + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + if (!remove_cmd) + return; + + transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); +} + +static int transport_get_sense_codes( + struct se_cmd *cmd, + u8 *asc, + u8 *ascq) +{ + *asc = cmd->scsi_asc; + *ascq = cmd->scsi_ascq; + + return 0; +} + +static int transport_set_sense_codes( + struct se_cmd *cmd, + u8 asc, + u8 ascq) +{ + cmd->scsi_asc = asc; + cmd->scsi_ascq = ascq; + + return 0; +} + +int transport_send_check_condition_and_sense( + struct se_cmd *cmd, + u8 reason, + int from_transport) +{ + unsigned char *buffer = cmd->sense_buffer; + unsigned long flags; + int offset; + u8 asc = 0, ascq = 0; + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + return 0; + } + cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; + spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + + if (!reason && from_transport) + goto after_reason; + + if (!from_transport) + cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; + /* + * Data Segment and SenseLength of the fabric response PDU. + * + * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE + * from include/scsi/scsi_cmnd.h + */ + offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + TRANSPORT_SENSE_BUFFER); + /* + * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses + * SENSE KEY values from include/scsi/scsi.h + */ + switch (reason) { + case TCM_NON_EXISTENT_LUN: + case TCM_UNSUPPORTED_SCSI_OPCODE: + case TCM_SECTOR_COUNT_TOO_MANY: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* INVALID COMMAND OPERATION CODE */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; + break; + case TCM_UNKNOWN_MODE_PAGE: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* INVALID FIELD IN CDB */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; + break; + case TCM_CHECK_CONDITION_ABORT_CMD: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ABORTED COMMAND */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + /* BUS DEVICE RESET FUNCTION OCCURRED */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; + buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; + break; + case TCM_INCORRECT_AMOUNT_OF_DATA: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ABORTED COMMAND */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + /* WRITE ERROR */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; + /* NOT ENOUGH UNSOLICITED DATA */ + buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; + break; + case TCM_INVALID_CDB_FIELD: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ABORTED COMMAND */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + /* INVALID FIELD IN CDB */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; + break; + case TCM_INVALID_PARAMETER_LIST: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ABORTED COMMAND */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + /* INVALID FIELD IN PARAMETER LIST */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; + break; + case TCM_UNEXPECTED_UNSOLICITED_DATA: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ABORTED COMMAND */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + /* WRITE ERROR */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; + /* UNEXPECTED_UNSOLICITED_DATA */ + buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; + break; + case TCM_SERVICE_CRC_ERROR: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ABORTED COMMAND */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + /* PROTOCOL SERVICE CRC ERROR */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; + /* N/A */ + buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; + break; + case TCM_SNACK_REJECTED: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ABORTED COMMAND */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + /* READ ERROR */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; + /* FAILED RETRANSMISSION REQUEST */ + buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; + break; + case TCM_WRITE_PROTECTED: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* DATA PROTECT */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; + /* WRITE PROTECTED */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; + break; + case TCM_CHECK_CONDITION_UNIT_ATTENTION: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* UNIT ATTENTION */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; + core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); + buffer[offset+SPC_ASC_KEY_OFFSET] = asc; + buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; + break; + case TCM_CHECK_CONDITION_NOT_READY: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* Not Ready */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; + transport_get_sense_codes(cmd, &asc, &ascq); + buffer[offset+SPC_ASC_KEY_OFFSET] = asc; + buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; + break; + case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: + default: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL UNIT COMMUNICATION FAILURE */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; + break; + } + /* + * This code uses linux/include/scsi/scsi.h SAM status codes! + */ + cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + /* + * Automatically padded, this value is encoded in the fabric's + * data_length response PDU containing the SCSI defined sense data. + */ + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; + +after_reason: + CMD_TFO(cmd)->queue_status(cmd); + return 0; +} +EXPORT_SYMBOL(transport_send_check_condition_and_sense); + +int transport_check_aborted_status(struct se_cmd *cmd, int send_status) +{ + int ret = 0; + + if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { + if (!(send_status) || + (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) + return 1; +#if 0 + printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" + " status for CDB: 0x%02x ITT: 0x%08x\n", + T_TASK(cmd)->t_task_cdb[0], + CMD_TFO(cmd)->get_task_tag(cmd)); +#endif + cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; + CMD_TFO(cmd)->queue_status(cmd); + ret = 1; + } + return ret; +} +EXPORT_SYMBOL(transport_check_aborted_status); + +void transport_send_task_abort(struct se_cmd *cmd) +{ + /* + * If there are still expected incoming fabric WRITEs, we wait + * until until they have completed before sending a TASK_ABORTED + * response. This response with TASK_ABORTED status will be + * queued back to fabric module by transport_check_aborted_status(). + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { + atomic_inc(&T_TASK(cmd)->t_transport_aborted); + smp_mb__after_atomic_inc(); + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + transport_new_cmd_failure(cmd); + return; + } + } + cmd->scsi_status = SAM_STAT_TASK_ABORTED; +#if 0 + printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," + " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], + CMD_TFO(cmd)->get_task_tag(cmd)); +#endif + CMD_TFO(cmd)->queue_status(cmd); +} + +/* transport_generic_do_tmr(): + * + * + */ +int transport_generic_do_tmr(struct se_cmd *cmd) +{ + struct se_cmd *ref_cmd; + struct se_device *dev = SE_DEV(cmd); + struct se_tmr_req *tmr = cmd->se_tmr_req; + int ret; + + switch (tmr->function) { + case ABORT_TASK: + ref_cmd = tmr->ref_cmd; + tmr->response = TMR_FUNCTION_REJECTED; + break; + case ABORT_TASK_SET: + case CLEAR_ACA: + case CLEAR_TASK_SET: + tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; + break; + case LUN_RESET: + ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); + tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : + TMR_FUNCTION_REJECTED; + break; +#if 0 + case TARGET_WARM_RESET: + transport_generic_host_reset(dev->se_hba); + tmr->response = TMR_FUNCTION_REJECTED; + break; + case TARGET_COLD_RESET: + transport_generic_host_reset(dev->se_hba); + transport_generic_cold_reset(dev->se_hba); + tmr->response = TMR_FUNCTION_REJECTED; + break; +#endif + default: + printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", + tmr->function); + tmr->response = TMR_FUNCTION_REJECTED; + break; + } + + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + CMD_TFO(cmd)->queue_tm_rsp(cmd); + + transport_cmd_check_stop(cmd, 2, 0); + return 0; +} + +/* + * Called with spin_lock_irq(&dev->execute_task_lock); held + * + */ +static struct se_task * +transport_get_task_from_state_list(struct se_device *dev) +{ + struct se_task *task; + + if (list_empty(&dev->state_task_list)) + return NULL; + + list_for_each_entry(task, &dev->state_task_list, t_state_list) + break; + + list_del(&task->t_state_list); + atomic_set(&task->task_state_active, 0); + + return task; +} + +static void transport_processing_shutdown(struct se_device *dev) +{ + struct se_cmd *cmd; + struct se_queue_req *qr; + struct se_task *task; + u8 state; + unsigned long flags; + /* + * Empty the struct se_device's struct se_task state list. + */ + spin_lock_irqsave(&dev->execute_task_lock, flags); + while ((task = transport_get_task_from_state_list(dev))) { + if (!(TASK_CMD(task))) { + printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); + continue; + } + cmd = TASK_CMD(task); + + if (!T_TASK(cmd)) { + printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" + " %p ITT: 0x%08x\n", task, cmd, + CMD_TFO(cmd)->get_task_tag(cmd)); + continue; + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + + DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," + " i_state/def_i_state: %d/%d, t_state/def_t_state:" + " %d/%d cdb: 0x%02x\n", cmd, task, + CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, + CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, + cmd->t_state, cmd->deferred_t_state, + T_TASK(cmd)->t_task_cdb[0]); + DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" + " %d t_task_cdbs_sent: %d -- t_transport_active: %d" + " t_transport_stop: %d t_transport_sent: %d\n", + CMD_TFO(cmd)->get_task_tag(cmd), + T_TASK(cmd)->t_task_cdbs, + atomic_read(&T_TASK(cmd)->t_task_cdbs_left), + atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), + atomic_read(&T_TASK(cmd)->t_transport_active), + atomic_read(&T_TASK(cmd)->t_transport_stop), + atomic_read(&T_TASK(cmd)->t_transport_sent)); + + if (atomic_read(&task->task_active)) { + atomic_set(&task->task_stop, 1); + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + + DEBUG_DO("Waiting for task: %p to shutdown for dev:" + " %p\n", task, dev); + wait_for_completion(&task->task_stop_comp); + DEBUG_DO("Completed task: %p shutdown for dev: %p\n", + task, dev); + + spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + + atomic_set(&task->task_active, 0); + atomic_set(&task->task_stop, 0); + } + __transport_stop_task_timer(task, &flags); + + if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + + DEBUG_DO("Skipping task: %p, dev: %p for" + " t_task_cdbs_ex_left: %d\n", task, dev, + atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); + + spin_lock_irqsave(&dev->execute_task_lock, flags); + continue; + } + + if (atomic_read(&T_TASK(cmd)->t_transport_active)) { + DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" + " %p\n", task, dev); + + if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + transport_send_check_condition_and_sense( + cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, + 0); + transport_remove_cmd_from_queue(cmd, + SE_DEV(cmd)->dev_queue_obj); + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop(cmd, 1, 0); + } else { + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + + transport_remove_cmd_from_queue(cmd, + SE_DEV(cmd)->dev_queue_obj); + + transport_lun_remove_cmd(cmd); + + if (transport_cmd_check_stop(cmd, 1, 0)) + transport_generic_remove(cmd, 0, 0); + } + + spin_lock_irqsave(&dev->execute_task_lock, flags); + continue; + } + DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", + task, dev); + + if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + transport_send_check_condition_and_sense(cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + transport_remove_cmd_from_queue(cmd, + SE_DEV(cmd)->dev_queue_obj); + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop(cmd, 1, 0); + } else { + spin_unlock_irqrestore( + &T_TASK(cmd)->t_state_lock, flags); + + transport_remove_cmd_from_queue(cmd, + SE_DEV(cmd)->dev_queue_obj); + transport_lun_remove_cmd(cmd); + + if (transport_cmd_check_stop(cmd, 1, 0)) + transport_generic_remove(cmd, 0, 0); + } + + spin_lock_irqsave(&dev->execute_task_lock, flags); + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + /* + * Empty the struct se_device's struct se_cmd list. + */ + spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); + while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { + spin_unlock_irqrestore( + &dev->dev_queue_obj->cmd_queue_lock, flags); + cmd = (struct se_cmd *)qr->cmd; + state = qr->state; + kfree(qr); + + DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", + cmd, state); + + if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + transport_send_check_condition_and_sense(cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop(cmd, 1, 0); + } else { + transport_lun_remove_cmd(cmd); + if (transport_cmd_check_stop(cmd, 1, 0)) + transport_generic_remove(cmd, 0, 0); + } + spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); + } + spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); +} + +/* transport_processing_thread(): + * + * + */ +static int transport_processing_thread(void *param) +{ + int ret, t_state; + struct se_cmd *cmd; + struct se_device *dev = (struct se_device *) param; + struct se_queue_req *qr; + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, + atomic_read(&dev->dev_queue_obj->queue_cnt) || + kthread_should_stop()); + if (ret < 0) + goto out; + + spin_lock_irq(&dev->dev_status_lock); + if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { + spin_unlock_irq(&dev->dev_status_lock); + transport_processing_shutdown(dev); + continue; + } + spin_unlock_irq(&dev->dev_status_lock); + +get_cmd: + __transport_execute_tasks(dev); + + qr = transport_get_qr_from_queue(dev->dev_queue_obj); + if (!(qr)) + continue; + + cmd = (struct se_cmd *)qr->cmd; + t_state = qr->state; + kfree(qr); + + switch (t_state) { + case TRANSPORT_NEW_CMD_MAP: + if (!(CMD_TFO(cmd)->new_cmd_map)) { + printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" + " NULL for TRANSPORT_NEW_CMD_MAP\n"); + BUG(); + } + ret = CMD_TFO(cmd)->new_cmd_map(cmd); + if (ret < 0) { + cmd->transport_error_status = ret; + transport_generic_request_failure(cmd, NULL, + 0, (cmd->data_direction != + DMA_TO_DEVICE)); + break; + } + /* Fall through */ + case TRANSPORT_NEW_CMD: + ret = transport_generic_new_cmd(cmd); + if (ret < 0) { + cmd->transport_error_status = ret; + transport_generic_request_failure(cmd, NULL, + 0, (cmd->data_direction != + DMA_TO_DEVICE)); + } + break; + case TRANSPORT_PROCESS_WRITE: + transport_generic_process_write(cmd); + break; + case TRANSPORT_COMPLETE_OK: + transport_stop_all_task_timers(cmd); + transport_generic_complete_ok(cmd); + break; + case TRANSPORT_REMOVE: + transport_generic_remove(cmd, 1, 0); + break; + case TRANSPORT_PROCESS_TMR: + transport_generic_do_tmr(cmd); + break; + case TRANSPORT_COMPLETE_FAILURE: + transport_generic_request_failure(cmd, NULL, 1, 1); + break; + case TRANSPORT_COMPLETE_TIMEOUT: + transport_stop_all_task_timers(cmd); + transport_generic_request_timeout(cmd); + break; + default: + printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" + " %d for ITT: 0x%08x i_state: %d on SE LUN:" + " %u\n", t_state, cmd->deferred_t_state, + CMD_TFO(cmd)->get_task_tag(cmd), + CMD_TFO(cmd)->get_cmd_state(cmd), + SE_LUN(cmd)->unpacked_lun); + BUG(); + } + + goto get_cmd; + } + +out: + transport_release_all_cmds(dev); + dev->process_thread = NULL; + return 0; +} diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c new file mode 100644 index 000000000000..a2ef346087e8 --- /dev/null +++ b/drivers/target/target_core_ua.c @@ -0,0 +1,332 @@ +/******************************************************************************* + * Filename: target_core_ua.c + * + * This file contains logic for SPC-3 Unit Attention emulation + * + * Copyright (c) 2009,2010 Rising Tide Systems + * Copyright (c) 2009,2010 Linux-iSCSI.org + * + * Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ******************************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "target_core_alua.h" +#include "target_core_hba.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +int core_scsi3_ua_check( + struct se_cmd *cmd, + unsigned char *cdb) +{ + struct se_dev_entry *deve; + struct se_session *sess = cmd->se_sess; + struct se_node_acl *nacl; + + if (!(sess)) + return 0; + + nacl = sess->se_node_acl; + if (!(nacl)) + return 0; + + deve = &nacl->device_list[cmd->orig_fe_lun]; + if (!(atomic_read(&deve->ua_count))) + return 0; + /* + * From sam4r14, section 5.14 Unit attention condition: + * + * a) if an INQUIRY command enters the enabled command state, the + * device server shall process the INQUIRY command and shall neither + * report nor clear any unit attention condition; + * b) if a REPORT LUNS command enters the enabled command state, the + * device server shall process the REPORT LUNS command and shall not + * report any unit attention condition; + * e) if a REQUEST SENSE command enters the enabled command state while + * a unit attention condition exists for the SCSI initiator port + * associated with the I_T nexus on which the REQUEST SENSE command + * was received, then the device server shall process the command + * and either: + */ + switch (cdb[0]) { + case INQUIRY: + case REPORT_LUNS: + case REQUEST_SENSE: + return 0; + default: + return -1; + } + + return -1; +} + +int core_scsi3_ua_allocate( + struct se_node_acl *nacl, + u32 unpacked_lun, + u8 asc, + u8 ascq) +{ + struct se_dev_entry *deve; + struct se_ua *ua, *ua_p, *ua_tmp; + /* + * PASSTHROUGH OPS + */ + if (!(nacl)) + return -1; + + ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); + if (!(ua)) { + printk(KERN_ERR "Unable to allocate struct se_ua\n"); + return -1; + } + INIT_LIST_HEAD(&ua->ua_dev_list); + INIT_LIST_HEAD(&ua->ua_nacl_list); + + ua->ua_nacl = nacl; + ua->ua_asc = asc; + ua->ua_ascq = ascq; + + spin_lock_irq(&nacl->device_list_lock); + deve = &nacl->device_list[unpacked_lun]; + + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { + /* + * Do not report the same UNIT ATTENTION twice.. + */ + if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { + spin_unlock(&deve->ua_lock); + spin_unlock_irq(&nacl->device_list_lock); + kmem_cache_free(se_ua_cache, ua); + return 0; + } + /* + * Attach the highest priority Unit Attention to + * the head of the list following sam4r14, + * Section 5.14 Unit Attention Condition: + * + * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest + * POWER ON OCCURRED or + * DEVICE INTERNAL RESET + * SCSI BUS RESET OCCURRED or + * MICROCODE HAS BEEN CHANGED or + * protocol specific + * BUS DEVICE RESET FUNCTION OCCURRED + * I_T NEXUS LOSS OCCURRED + * COMMANDS CLEARED BY POWER LOSS NOTIFICATION + * all others Lowest + * + * Each of the ASCQ codes listed above are defined in + * the 29h ASC family, see spc4r17 Table D.1 + */ + if (ua_p->ua_asc == 0x29) { + if ((asc == 0x29) && (ascq > ua_p->ua_ascq)) + list_add(&ua->ua_nacl_list, + &deve->ua_list); + else + list_add_tail(&ua->ua_nacl_list, + &deve->ua_list); + } else if (ua_p->ua_asc == 0x2a) { + /* + * Incoming Family 29h ASCQ codes will override + * Family 2AHh ASCQ codes for Unit Attention condition. + */ + if ((asc == 0x29) || (ascq > ua_p->ua_asc)) + list_add(&ua->ua_nacl_list, + &deve->ua_list); + else + list_add_tail(&ua->ua_nacl_list, + &deve->ua_list); + } else + list_add_tail(&ua->ua_nacl_list, + &deve->ua_list); + spin_unlock(&deve->ua_lock); + spin_unlock_irq(&nacl->device_list_lock); + + atomic_inc(&deve->ua_count); + smp_mb__after_atomic_inc(); + return 0; + } + list_add_tail(&ua->ua_nacl_list, &deve->ua_list); + spin_unlock(&deve->ua_lock); + spin_unlock_irq(&nacl->device_list_lock); + + printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" + " 0x%02x, ASCQ: 0x%02x\n", + TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun, + asc, ascq); + + atomic_inc(&deve->ua_count); + smp_mb__after_atomic_inc(); + return 0; +} + +void core_scsi3_ua_release_all( + struct se_dev_entry *deve) +{ + struct se_ua *ua, *ua_p; + + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { + list_del(&ua->ua_nacl_list); + kmem_cache_free(se_ua_cache, ua); + + atomic_dec(&deve->ua_count); + smp_mb__after_atomic_dec(); + } + spin_unlock(&deve->ua_lock); +} + +void core_scsi3_ua_for_check_condition( + struct se_cmd *cmd, + u8 *asc, + u8 *ascq) +{ + struct se_device *dev = SE_DEV(cmd); + struct se_dev_entry *deve; + struct se_session *sess = cmd->se_sess; + struct se_node_acl *nacl; + struct se_ua *ua = NULL, *ua_p; + int head = 1; + + if (!(sess)) + return; + + nacl = sess->se_node_acl; + if (!(nacl)) + return; + + spin_lock_irq(&nacl->device_list_lock); + deve = &nacl->device_list[cmd->orig_fe_lun]; + if (!(atomic_read(&deve->ua_count))) { + spin_unlock_irq(&nacl->device_list_lock); + return; + } + /* + * The highest priority Unit Attentions are placed at the head of the + * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION + + * sense data for the received CDB. + */ + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { + /* + * For ua_intlck_ctrl code not equal to 00b, only report the + * highest priority UNIT_ATTENTION and ASC/ASCQ without + * clearing it. + */ + if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) { + *asc = ua->ua_asc; + *ascq = ua->ua_ascq; + break; + } + /* + * Otherwise for the default 00b, release the UNIT ATTENTION + * condition. Return the ASC/ASCQ of the higest priority UA + * (head of the list) in the outgoing CHECK_CONDITION + sense. + */ + if (head) { + *asc = ua->ua_asc; + *ascq = ua->ua_ascq; + head = 0; + } + list_del(&ua->ua_nacl_list); + kmem_cache_free(se_ua_cache, ua); + + atomic_dec(&deve->ua_count); + smp_mb__after_atomic_dec(); + } + spin_unlock(&deve->ua_lock); + spin_unlock_irq(&nacl->device_list_lock); + + printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" + " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" + " reported ASC: 0x%02x, ASCQ: 0x%02x\n", + TPG_TFO(nacl->se_tpg)->get_fabric_name(), + (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" : + "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl, + cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq); +} + +int core_scsi3_ua_clear_for_request_sense( + struct se_cmd *cmd, + u8 *asc, + u8 *ascq) +{ + struct se_dev_entry *deve; + struct se_session *sess = cmd->se_sess; + struct se_node_acl *nacl; + struct se_ua *ua = NULL, *ua_p; + int head = 1; + + if (!(sess)) + return -1; + + nacl = sess->se_node_acl; + if (!(nacl)) + return -1; + + spin_lock_irq(&nacl->device_list_lock); + deve = &nacl->device_list[cmd->orig_fe_lun]; + if (!(atomic_read(&deve->ua_count))) { + spin_unlock_irq(&nacl->device_list_lock); + return -1; + } + /* + * The highest priority Unit Attentions are placed at the head of the + * struct se_dev_entry->ua_list. The First (and hence highest priority) + * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the + * matching struct se_lun. + * + * Once the returning ASC/ASCQ values are set, we go ahead and + * release all of the Unit Attention conditions for the assoicated + * struct se_lun. + */ + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { + if (head) { + *asc = ua->ua_asc; + *ascq = ua->ua_ascq; + head = 0; + } + list_del(&ua->ua_nacl_list); + kmem_cache_free(se_ua_cache, ua); + + atomic_dec(&deve->ua_count); + smp_mb__after_atomic_dec(); + } + spin_unlock(&deve->ua_lock); + spin_unlock_irq(&nacl->device_list_lock); + + printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" + " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," + " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(), + cmd->orig_fe_lun, *asc, *ascq); + + return (head) ? -1 : 0; +} diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h new file mode 100644 index 000000000000..6e6b03460a1a --- /dev/null +++ b/drivers/target/target_core_ua.h @@ -0,0 +1,36 @@ +#ifndef TARGET_CORE_UA_H + +/* + * From spc4r17, Table D.1: ASC and ASCQ Assignement + */ +#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00 +#define ASCQ_29H_POWER_ON_OCCURRED 0x01 +#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02 +#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03 +#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04 +#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05 +#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06 +#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07 + +#define ASCQ_2AH_PARAMETERS_CHANGED 0x00 +#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01 +#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02 +#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03 +#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 +#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 +#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 +#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 +#define ASCQ_2AH_PRIORITY_CHANGED 0x08 + +#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 + +extern struct kmem_cache *se_ua_cache; + +extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *); +extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8); +extern void core_scsi3_ua_release_all(struct se_dev_entry *); +extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); +extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, + u8 *, u8 *); + +#endif /* TARGET_CORE_UA_H */ diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index 0d236f4bb8c2..b00101972f20 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -284,12 +284,11 @@ static int samplerate = 100; module_param(ixjdebug, int, 0); -static struct pci_device_id ixj_pci_tbl[] __devinitdata = { +static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = { { PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } }; - MODULE_DEVICE_TABLE(pci, ixj_pci_tbl); /************************************************************************ @@ -6581,7 +6580,8 @@ static long do_ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long ar case IXJCTL_SET_FILTER: if (copy_from_user(&jf, argp, sizeof(jf))) retval = -EFAULT; - retval = ixj_init_filter(j, &jf); + else + retval = ixj_init_filter(j, &jf); break; case IXJCTL_SET_FILTER_RAW: if (copy_from_user(&jfr, argp, sizeof(jfr))) diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index bf7c687519ef..f7a5dba3ca23 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -4,6 +4,7 @@ menuconfig THERMAL tristate "Generic Thermal sysfs driver" + depends on NET help Generic Thermal Sysfs driver offers a generic mechanism for thermal management. Usually it's made up of one or more thermal diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 13c72c629329..7d0e63c79280 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Generic thermal management sysfs support"); @@ -58,6 +60,22 @@ static LIST_HEAD(thermal_tz_list); static LIST_HEAD(thermal_cdev_list); static DEFINE_MUTEX(thermal_list_lock); +static unsigned int thermal_event_seqnum; + +static struct genl_family thermal_event_genl_family = { + .id = GENL_ID_GENERATE, + .name = THERMAL_GENL_FAMILY_NAME, + .version = THERMAL_GENL_VERSION, + .maxattr = THERMAL_GENL_ATTR_MAX, +}; + +static struct genl_multicast_group thermal_event_mcgrp = { + .name = THERMAL_GENL_MCAST_GROUP_NAME, +}; + +static int genetlink_init(void); +static void genetlink_exit(void); + static int get_idr(struct idr *idr, struct mutex *lock, int *id) { int err; @@ -823,11 +841,8 @@ static struct class thermal_class = { * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. */ -struct thermal_cooling_device *thermal_cooling_device_register(char *type, - void *devdata, - struct - thermal_cooling_device_ops - *ops) +struct thermal_cooling_device *thermal_cooling_device_register( + char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { struct thermal_cooling_device *cdev; struct thermal_zone_device *pos; @@ -1048,13 +1063,9 @@ EXPORT_SYMBOL(thermal_zone_device_update); * section 11.1.5.1 of the ACPI specification 3.0. */ struct thermal_zone_device *thermal_zone_device_register(char *type, - int trips, - void *devdata, struct - thermal_zone_device_ops - *ops, int tc1, int - tc2, - int passive_delay, - int polling_delay) + int trips, void *devdata, + const struct thermal_zone_device_ops *ops, + int tc1, int tc2, int passive_delay, int polling_delay) { struct thermal_zone_device *tz; struct thermal_cooling_device *pos; @@ -1214,6 +1225,82 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) EXPORT_SYMBOL(thermal_zone_device_unregister); +int generate_netlink_event(u32 orig, enum events event) +{ + struct sk_buff *skb; + struct nlattr *attr; + struct thermal_genl_event *thermal_event; + void *msg_header; + int size; + int result; + + /* allocate memory */ + size = nla_total_size(sizeof(struct thermal_genl_event)) + \ + nla_total_size(0); + + skb = genlmsg_new(size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + /* add the genetlink message header */ + msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++, + &thermal_event_genl_family, 0, + THERMAL_GENL_CMD_EVENT); + if (!msg_header) { + nlmsg_free(skb); + return -ENOMEM; + } + + /* fill the data */ + attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \ + sizeof(struct thermal_genl_event)); + + if (!attr) { + nlmsg_free(skb); + return -EINVAL; + } + + thermal_event = nla_data(attr); + if (!thermal_event) { + nlmsg_free(skb); + return -EINVAL; + } + + memset(thermal_event, 0, sizeof(struct thermal_genl_event)); + + thermal_event->orig = orig; + thermal_event->event = event; + + /* send multicast genetlink message */ + result = genlmsg_end(skb, msg_header); + if (result < 0) { + nlmsg_free(skb); + return result; + } + + result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); + if (result) + printk(KERN_INFO "failed to send netlink event:%d", result); + + return result; +} +EXPORT_SYMBOL(generate_netlink_event); + +static int genetlink_init(void) +{ + int result; + + result = genl_register_family(&thermal_event_genl_family); + if (result) + return result; + + result = genl_register_mc_group(&thermal_event_genl_family, + &thermal_event_mcgrp); + if (result) + genl_unregister_family(&thermal_event_genl_family); + return result; +} + static int __init thermal_init(void) { int result = 0; @@ -1225,9 +1312,15 @@ static int __init thermal_init(void) mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); } + result = genetlink_init(); return result; } +static void genetlink_exit(void) +{ + genl_unregister_family(&thermal_event_genl_family); +} + static void __exit thermal_exit(void) { class_unregister(&thermal_class); @@ -1235,7 +1328,8 @@ static void __exit thermal_exit(void) idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); + genetlink_exit(); } -subsys_initcall(thermal_init); +fs_initcall(thermal_init); module_exit(thermal_exit); diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c index 1210534822d6..5408186afc35 100644 --- a/drivers/usb/gadget/imx_udc.c +++ b/drivers/usb/gadget/imx_udc.c @@ -1320,7 +1320,7 @@ static struct imx_udc_struct controller = { }; /******************************************************************************* - * USB gadged driver functions + * USB gadget driver functions ******************************************************************************* */ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c index 777972454e3e..1eca8b47ce3c 100644 --- a/drivers/usb/gadget/langwell_udc.c +++ b/drivers/usb/gadget/langwell_udc.c @@ -3086,7 +3086,7 @@ static void langwell_udc_remove(struct pci_dev *pdev) kfree(dev->ep); - /* diable IRQ handler */ + /* disable IRQ handler */ if (dev->got_irq) free_irq(pdev->irq, dev); @@ -3406,7 +3406,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state) /* disable interrupt and set controller to stop state */ langwell_udc_stop(dev); - /* diable IRQ handler */ + /* disable IRQ handler */ if (dev->got_irq) free_irq(pdev->irq, dev); dev->got_irq = 0; diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index 3b513bafaf2a..b015561fd602 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -543,7 +543,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) ro = curlun->initially_ro; if (!ro) { filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); - if (-EROFS == PTR_ERR(filp)) + if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES) ro = 1; } if (ro) @@ -558,10 +558,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (filp->f_path.dentry) inode = filp->f_path.dentry->d_inode; - if (inode && S_ISBLK(inode->i_mode)) { - if (bdev_read_only(inode->i_bdev)) - ro = 1; - } else if (!inode || !S_ISREG(inode->i_mode)) { + if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 20092a27a1e8..12fd184226f2 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -98,13 +98,13 @@ void fhci_usb_enable_interrupt(struct fhci_usb *usb) usb->intr_nesting_cnt--; } -/* diable the usb interrupt */ +/* disable the usb interrupt */ void fhci_usb_disable_interrupt(struct fhci_usb *usb) { struct fhci_hcd *fhci = usb->fhci; if (usb->intr_nesting_cnt == 0) { - /* diable the timer interrupt */ + /* disable the timer interrupt */ disable_irq_nosync(fhci->timer->irq); /* disable the usb interrupt */ diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index 7be548ca2183..38fe058fbe61 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c @@ -271,8 +271,8 @@ void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep, /* * Collect the submitted frames and inform the application about them - * It is also prepearing the TDs for new frames. If the Tx interrupts - * are diabled, the application should call that routine to get + * It is also preparing the TDs for new frames. If the Tx interrupts + * are disabled, the application should call that routine to get * confirmation about the submitted frames. Otherwise, the routine is * called frome the interrupt service routine during the Tx interrupt. * In that case the application is informed by calling the application diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c index e49b75a78000..f90d003f2302 100644 --- a/drivers/usb/host/imx21-hcd.c +++ b/drivers/usb/host/imx21-hcd.c @@ -1658,7 +1658,7 @@ static int imx21_hc_reset(struct usb_hcd *hcd) spin_lock_irqsave(&imx21->lock, flags); - /* Reset the Host controler modules */ + /* Reset the Host controller modules */ writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH | USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC, imx21->regs + USBOTG_RST_CTRL); diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 32149be4ad8e..e0cb12b573f9 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -3094,7 +3094,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf) /* Some boards (mostly VIA?) report bogus overcurrent indications, * causing massive log spam unless we completely ignore them. It - * may be relevant that VIA VT8235 controlers, where PORT_POWER is + * may be relevant that VIA VT8235 controllers, where PORT_POWER is * always set, seem to clear PORT_OCC and PORT_CSC when writing to * PORT_POWER; that's surprising, but maybe within-spec. */ diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 44f8b9225054..a6afd15f6a46 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -717,7 +717,7 @@ static int adu_probe(struct usb_interface *interface, goto exit; } - /* allocate memory for our device state and intialize it */ + /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "Out of memory\n"); diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index c9078e4e1f4d..e573e4704015 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -769,7 +769,7 @@ static int iowarrior_probe(struct usb_interface *interface, int i; int retval = -ENOMEM; - /* allocate memory for our device state and intialize it */ + /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "Out of memory\n"); diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index edffef642337..eefb8275bb7e 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -642,7 +642,7 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id * int i; int retval = -ENOMEM; - /* allocate memory for our device state and intialize it */ + /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 9b162dfaa4fb..ed58c6c8f15c 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1684,7 +1684,7 @@ static inline void __init musb_g_init_endpoints(struct musb *musb) struct musb_hw_ep *hw_ep; unsigned count = 0; - /* intialize endpoint list just once */ + /* initialize endpoint list just once */ INIT_LIST_HEAD(&(musb->g.ep_list)); for (epnum = 0, hw_ep = musb->endpoints; @@ -1765,7 +1765,7 @@ void musb_gadget_cleanup(struct musb *musb) * * -EINVAL something went wrong (not driver) * -EBUSY another gadget is already using the controller - * -ENOMEM no memeory to perform the operation + * -ENOMEM no memory to perform the operation * * @param driver the gadget driver * @param bind the driver's bind function diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c index c7b1d8108de9..8cb9d80207fa 100644 --- a/drivers/usb/wusbcore/wa-rpipe.c +++ b/drivers/usb/wusbcore/wa-rpipe.c @@ -49,7 +49,7 @@ * * USB Stack port number 4 (1 based) * WUSB code port index 3 (0 based) - * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub) + * USB Address 5 (2 based -- 0 is for default, 1 for root hub) * * Now, because we don't use the concept as default address exactly * like the (wired) USB code does, we need to kind of skip it. So we diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 38244f59cdd9..ade0568c07a4 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -97,22 +97,26 @@ void vhost_poll_stop(struct vhost_poll *poll) remove_wait_queue(poll->wqh, &poll->wait); } +static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, + unsigned seq) +{ + int left; + spin_lock_irq(&dev->work_lock); + left = seq - work->done_seq; + spin_unlock_irq(&dev->work_lock); + return left <= 0; +} + static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { unsigned seq; - int left; int flushing; spin_lock_irq(&dev->work_lock); seq = work->queue_seq; work->flushing++; spin_unlock_irq(&dev->work_lock); - wait_event(work->done, ({ - spin_lock_irq(&dev->work_lock); - left = seq - work->done_seq <= 0; - spin_unlock_irq(&dev->work_lock); - left; - })); + wait_event(work->done, vhost_work_seq_done(dev, work, seq)); spin_lock_irq(&dev->work_lock); flushing = --work->flushing; spin_unlock_irq(&dev->work_lock); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 55dc6fb6e909..d916ac04abab 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -11,6 +11,13 @@ config HAVE_FB_ATMEL config HAVE_FB_IMX bool +config SH_MIPI_DSI + tristate + depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK + +config SH_LCD_MIPI_DSI + bool + source "drivers/char/agp/Kconfig" source "drivers/gpu/vga/Kconfig" @@ -414,7 +421,7 @@ config FB_SA1100 Y here. config FB_IMX - tristate "Motorola i.MX LCD support" + tristate "Freescale i.MX LCD support" depends on FB && (HAVE_FB_IMX || ARCH_MX1 || ARCH_MX2) select FB_CFB_FILLRECT select FB_CFB_COPYAREA @@ -1273,7 +1280,7 @@ config FB_MATROX module will be called matroxfb. You can pass several parameters to the driver at boot time or at - module load time. The parameters look like "video=matrox:XXX", and + module load time. The parameters look like "video=matroxfb:XXX", and are described in . config FB_MATROX_MILLENIUM @@ -1990,13 +1997,6 @@ config FB_W100 If unsure, say N. -config SH_MIPI_DSI - tristate - depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK - -config SH_LCD_MIPI_DSI - bool - config FB_SH_MOBILE_LCDC tristate "SuperH Mobile LCDC framebuffer support" depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 8dce25126330..bac163450216 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -111,7 +111,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl) return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL); } -static struct backlight_ops atmel_lcdc_bl_ops = { +static const struct backlight_ops atmel_lcdc_bl_ops = { .update_status = atmel_bl_update_status, .get_brightness = atmel_bl_get_brightness, }; diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 34a0851bcbfa..dd9de2e80580 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -1786,7 +1786,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops aty128_bl_data = { +static const struct backlight_ops aty128_bl_data = { .get_brightness = aty128_bl_get_brightness, .update_status = aty128_bl_update_status, }; diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 5a3ce3ad1ec8..767ab4fb1a05 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -2221,7 +2221,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops aty_bl_data = { +static const struct backlight_ops aty_bl_data = { .get_brightness = aty_bl_get_brightness, .update_status = aty_bl_update_status, }; diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c index 256966e9667d..9b811ddbce83 100644 --- a/drivers/video/aty/radeon_backlight.c +++ b/drivers/video/aty/radeon_backlight.c @@ -128,7 +128,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops radeon_bl_data = { +static const struct backlight_ops radeon_bl_data = { .get_brightness = radeon_bl_get_brightness, .update_status = radeon_bl_update_status, }; diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 38ffc3fbcbe4..c789c46e38af 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -155,7 +155,7 @@ out: return -EINVAL; } -static struct backlight_ops pm860x_backlight_ops = { +static const struct backlight_ops pm860x_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = pm860x_backlight_update_status, .get_brightness = pm860x_backlight_get_brightness, diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index c67801e57aaf..98ad3e5f7c85 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -25,7 +25,7 @@ struct l4f00242t03_priv { struct spi_device *spi; struct lcd_device *ld; - int lcd_on:1; + int lcd_state; struct regulator *io_reg; struct regulator *core_reg; }; @@ -62,11 +62,36 @@ static void l4f00242t03_lcd_init(struct spi_device *spi) regulator_enable(priv->core_reg); } + l4f00242t03_reset(pdata->reset_gpio); + gpio_set_value(pdata->data_enable_gpio, 1); msleep(60); spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16)); } +static void l4f00242t03_lcd_powerdown(struct spi_device *spi) +{ + struct l4f00242t03_pdata *pdata = spi->dev.platform_data; + struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev); + + dev_dbg(&spi->dev, "Powering down LCD\n"); + + gpio_set_value(pdata->data_enable_gpio, 0); + + if (priv->io_reg) + regulator_disable(priv->io_reg); + + if (priv->core_reg) + regulator_disable(priv->core_reg); +} + +static int l4f00242t03_lcd_power_get(struct lcd_device *ld) +{ + struct l4f00242t03_priv *priv = lcd_get_data(ld); + + return priv->lcd_state; +} + static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power) { struct l4f00242t03_priv *priv = lcd_get_data(ld); @@ -79,35 +104,54 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power) const u16 disoff = 0x28; if (power <= FB_BLANK_NORMAL) { - if (priv->lcd_on) - return 0; - - dev_dbg(&spi->dev, "turning on LCD\n"); - - spi_write(spi, (const u8 *)&slpout, sizeof(u16)); - msleep(60); - spi_write(spi, (const u8 *)&dison, sizeof(u16)); - - priv->lcd_on = 1; + if (priv->lcd_state <= FB_BLANK_NORMAL) { + /* Do nothing, the LCD is running */ + } else if (priv->lcd_state < FB_BLANK_POWERDOWN) { + dev_dbg(&spi->dev, "Resuming LCD\n"); + + spi_write(spi, (const u8 *)&slpout, sizeof(u16)); + msleep(60); + spi_write(spi, (const u8 *)&dison, sizeof(u16)); + } else { + /* priv->lcd_state == FB_BLANK_POWERDOWN */ + l4f00242t03_lcd_init(spi); + priv->lcd_state = FB_BLANK_VSYNC_SUSPEND; + l4f00242t03_lcd_power_set(priv->ld, power); + } + } else if (power < FB_BLANK_POWERDOWN) { + if (priv->lcd_state <= FB_BLANK_NORMAL) { + /* Send the display in standby */ + dev_dbg(&spi->dev, "Standby the LCD\n"); + + spi_write(spi, (const u8 *)&disoff, sizeof(u16)); + msleep(60); + spi_write(spi, (const u8 *)&slpin, sizeof(u16)); + } else if (priv->lcd_state < FB_BLANK_POWERDOWN) { + /* Do nothing, the LCD is already in standby */ + } else { + /* priv->lcd_state == FB_BLANK_POWERDOWN */ + l4f00242t03_lcd_init(spi); + priv->lcd_state = FB_BLANK_UNBLANK; + l4f00242t03_lcd_power_set(ld, power); + } } else { - if (!priv->lcd_on) - return 0; - - dev_dbg(&spi->dev, "turning off LCD\n"); - - spi_write(spi, (const u8 *)&disoff, sizeof(u16)); - msleep(60); - spi_write(spi, (const u8 *)&slpin, sizeof(u16)); - - priv->lcd_on = 0; + /* power == FB_BLANK_POWERDOWN */ + if (priv->lcd_state != FB_BLANK_POWERDOWN) { + /* Clear the screen before shutting down */ + spi_write(spi, (const u8 *)&disoff, sizeof(u16)); + msleep(60); + l4f00242t03_lcd_powerdown(spi); + } } + priv->lcd_state = power; + return 0; } static struct lcd_ops l4f_ops = { .set_power = l4f00242t03_lcd_power_set, - .get_power = NULL, + .get_power = l4f00242t03_lcd_power_get, }; static int __devinit l4f00242t03_probe(struct spi_device *spi) @@ -185,9 +229,9 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) } /* Init the LCD */ - l4f00242t03_reset(pdata->reset_gpio); l4f00242t03_lcd_init(spi); - l4f00242t03_lcd_power_set(priv->ld, 1); + priv->lcd_state = FB_BLANK_VSYNC_SUSPEND; + l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK); dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n"); @@ -214,9 +258,11 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi) struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev); struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data; - l4f00242t03_lcd_power_set(priv->ld, 0); + l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); lcd_device_unregister(priv->ld); + dev_set_drvdata(&spi->dev, NULL); + gpio_free(pdata->data_enable_gpio); gpio_free(pdata->reset_gpio); @@ -230,6 +276,15 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi) return 0; } +static void l4f00242t03_shutdown(struct spi_device *spi) +{ + struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev); + + if (priv) + l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); + +} + static struct spi_driver l4f00242t03_driver = { .driver = { .name = "l4f00242t03", @@ -237,6 +292,7 @@ static struct spi_driver l4f00242t03_driver = { }, .probe = l4f00242t03_probe, .remove = __devexit_p(l4f00242t03_remove), + .shutdown = l4f00242t03_shutdown, }; static __init int l4f00242t03_init(void) diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index b2b2c7ba1f63..209acc105cbc 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -92,7 +92,7 @@ static int max8925_backlight_get_brightness(struct backlight_device *bl) return ret; } -static struct backlight_ops max8925_backlight_ops = { +static const struct backlight_ops max8925_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = max8925_backlight_update_status, .get_brightness = max8925_backlight_get_brightness, diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 915448ec75bf..c97491b8b39b 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -375,7 +375,8 @@ static const char *vgacon_startup(void) u16 saved1, saved2; volatile u16 *p; - if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) { + if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB || + screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { no_vga: #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c index 0c99de0562ca..b358d045f130 100644 --- a/drivers/video/ep93xx-fb.c +++ b/drivers/video/ep93xx-fb.c @@ -483,7 +483,7 @@ static void ep93xxfb_dealloc_videomem(struct fb_info *info) info->screen_base, info->fix.smem_start); } -static int __init ep93xxfb_probe(struct platform_device *pdev) +static int __devinit ep93xxfb_probe(struct platform_device *pdev) { struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data; struct fb_info *info; @@ -598,7 +598,7 @@ failed: return err; } -static int ep93xxfb_remove(struct platform_device *pdev) +static int __devexit ep93xxfb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); struct ep93xx_fbi *fbi = info->par; @@ -622,7 +622,7 @@ static int ep93xxfb_remove(struct platform_device *pdev) static struct platform_driver ep93xxfb_driver = { .probe = ep93xxfb_probe, - .remove = ep93xxfb_remove, + .remove = __devexit_p(ep93xxfb_remove), .driver = { .name = "ep93xx-fb", .owner = THIS_MODULE, diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c index 1ab2c2588675..69bd4a581d4a 100644 --- a/drivers/video/imxfb.c +++ b/drivers/video/imxfb.c @@ -974,6 +974,6 @@ static void __exit imxfb_cleanup(void) module_init(imxfb_init); module_exit(imxfb_cleanup); -MODULE_DESCRIPTION("Motorola i.MX framebuffer driver"); +MODULE_DESCRIPTION("Freescale i.MX framebuffer driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c index 052dd9f0b760..a082debe824b 100644 --- a/drivers/video/matrox/matroxfb_base.c +++ b/drivers/video/matrox/matroxfb_base.c @@ -1247,46 +1247,46 @@ static struct { struct fb_bitfield red, green, blue, transp; int bits_per_pixel; }; /* initialized by setup, see explanation at end of file (search for MODULE_PARM_DESC) */ -static unsigned int mem; /* "matrox:mem:xxxxxM" */ +static unsigned int mem; /* "matroxfb:mem:xxxxxM" */ static int option_precise_width = 1; /* cannot be changed, option_precise_width==0 must imply noaccel */ -static int inv24; /* "matrox:inv24" */ -static int cross4MB = -1; /* "matrox:cross4MB" */ -static int disabled; /* "matrox:disabled" */ -static int noaccel; /* "matrox:noaccel" */ -static int nopan; /* "matrox:nopan" */ -static int no_pci_retry; /* "matrox:nopciretry" */ -static int novga; /* "matrox:novga" */ -static int nobios; /* "matrox:nobios" */ -static int noinit = 1; /* "matrox:init" */ -static int inverse; /* "matrox:inverse" */ -static int sgram; /* "matrox:sgram" */ +static int inv24; /* "matroxfb:inv24" */ +static int cross4MB = -1; /* "matroxfb:cross4MB" */ +static int disabled; /* "matroxfb:disabled" */ +static int noaccel; /* "matroxfb:noaccel" */ +static int nopan; /* "matroxfb:nopan" */ +static int no_pci_retry; /* "matroxfb:nopciretry" */ +static int novga; /* "matroxfb:novga" */ +static int nobios; /* "matroxfb:nobios" */ +static int noinit = 1; /* "matroxfb:init" */ +static int inverse; /* "matroxfb:inverse" */ +static int sgram; /* "matroxfb:sgram" */ #ifdef CONFIG_MTRR -static int mtrr = 1; /* "matrox:nomtrr" */ +static int mtrr = 1; /* "matroxfb:nomtrr" */ #endif -static int grayscale; /* "matrox:grayscale" */ -static int dev = -1; /* "matrox:dev:xxxxx" */ -static unsigned int vesa = ~0; /* "matrox:vesa:xxxxx" */ -static int depth = -1; /* "matrox:depth:xxxxx" */ -static unsigned int xres; /* "matrox:xres:xxxxx" */ -static unsigned int yres; /* "matrox:yres:xxxxx" */ -static unsigned int upper = ~0; /* "matrox:upper:xxxxx" */ -static unsigned int lower = ~0; /* "matrox:lower:xxxxx" */ -static unsigned int vslen; /* "matrox:vslen:xxxxx" */ -static unsigned int left = ~0; /* "matrox:left:xxxxx" */ -static unsigned int right = ~0; /* "matrox:right:xxxxx" */ -static unsigned int hslen; /* "matrox:hslen:xxxxx" */ -static unsigned int pixclock; /* "matrox:pixclock:xxxxx" */ -static int sync = -1; /* "matrox:sync:xxxxx" */ -static unsigned int fv; /* "matrox:fv:xxxxx" */ -static unsigned int fh; /* "matrox:fh:xxxxxk" */ -static unsigned int maxclk; /* "matrox:maxclk:xxxxM" */ -static int dfp; /* "matrox:dfp */ -static int dfp_type = -1; /* "matrox:dfp:xxx */ -static int memtype = -1; /* "matrox:memtype:xxx" */ -static char outputs[8]; /* "matrox:outputs:xxx" */ +static int grayscale; /* "matroxfb:grayscale" */ +static int dev = -1; /* "matroxfb:dev:xxxxx" */ +static unsigned int vesa = ~0; /* "matroxfb:vesa:xxxxx" */ +static int depth = -1; /* "matroxfb:depth:xxxxx" */ +static unsigned int xres; /* "matroxfb:xres:xxxxx" */ +static unsigned int yres; /* "matroxfb:yres:xxxxx" */ +static unsigned int upper = ~0; /* "matroxfb:upper:xxxxx" */ +static unsigned int lower = ~0; /* "matroxfb:lower:xxxxx" */ +static unsigned int vslen; /* "matroxfb:vslen:xxxxx" */ +static unsigned int left = ~0; /* "matroxfb:left:xxxxx" */ +static unsigned int right = ~0; /* "matroxfb:right:xxxxx" */ +static unsigned int hslen; /* "matroxfb:hslen:xxxxx" */ +static unsigned int pixclock; /* "matroxfb:pixclock:xxxxx" */ +static int sync = -1; /* "matroxfb:sync:xxxxx" */ +static unsigned int fv; /* "matroxfb:fv:xxxxx" */ +static unsigned int fh; /* "matroxfb:fh:xxxxxk" */ +static unsigned int maxclk; /* "matroxfb:maxclk:xxxxM" */ +static int dfp; /* "matroxfb:dfp */ +static int dfp_type = -1; /* "matroxfb:dfp:xxx */ +static int memtype = -1; /* "matroxfb:memtype:xxx" */ +static char outputs[8]; /* "matroxfb:outputs:xxx" */ #ifndef MODULE -static char videomode[64]; /* "matrox:mode:xxxxx" or "matrox:xxxxx" */ +static char videomode[64]; /* "matroxfb:mode:xxxxx" or "matroxfb:xxxxx" */ #endif static int matroxfb_getmemory(struct matrox_fb_info *minfo, diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c index d2bb365f09b3..48c3ea8652b6 100644 --- a/drivers/video/modedb.c +++ b/drivers/video/modedb.c @@ -32,300 +32,320 @@ const char *fb_mode_option; EXPORT_SYMBOL_GPL(fb_mode_option); - /* - * Standard video mode definitions (taken from XFree86) - */ +/* + * Standard video mode definitions (taken from XFree86) + */ static const struct fb_videomode modedb[] = { - { + /* 640x400 @ 70 Hz, 31.5 kHz hsync */ - NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, 0, + FB_VMODE_NONINTERLACED }, + /* 640x480 @ 60 Hz, 31.5 kHz hsync */ - NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, + FB_VMODE_NONINTERLACED }, + /* 800x600 @ 56 Hz, 35.15 kHz hsync */ - NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, 0, + FB_VMODE_NONINTERLACED }, + /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */ - NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, - 0, FB_VMODE_INTERLACED - }, { + { NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, 0, + FB_VMODE_INTERLACED }, + /* 640x400 @ 85 Hz, 37.86 kHz hsync */ - NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, - FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, + /* 640x480 @ 72 Hz, 36.5 kHz hsync */ - NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 640x480 @ 75 Hz, 37.50 kHz hsync */ - NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 800x600 @ 60 Hz, 37.8 kHz hsync */ - NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 640x480 @ 85 Hz, 43.27 kHz hsync */ - NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */ - NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, - 0, FB_VMODE_INTERLACED - }, { + { NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, 0, + FB_VMODE_INTERLACED }, /* 800x600 @ 72 Hz, 48.0 kHz hsync */ - NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1024x768 @ 60 Hz, 48.4 kHz hsync */ - NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, 0, + FB_VMODE_NONINTERLACED }, + /* 640x480 @ 100 Hz, 53.01 kHz hsync */ - NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, 0, + FB_VMODE_NONINTERLACED }, + /* 1152x864 @ 60 Hz, 53.5 kHz hsync */ - NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, 0, + FB_VMODE_NONINTERLACED }, + /* 800x600 @ 85 Hz, 55.84 kHz hsync */ - NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, 0, + FB_VMODE_NONINTERLACED }, + /* 1024x768 @ 70 Hz, 56.5 kHz hsync */ - NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0, + FB_VMODE_NONINTERLACED }, + /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */ - NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, - 0, FB_VMODE_INTERLACED - }, { + { NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, 0, + FB_VMODE_INTERLACED }, + /* 800x600 @ 100 Hz, 64.02 kHz hsync */ - NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, 0, + FB_VMODE_NONINTERLACED }, + /* 1024x768 @ 76 Hz, 62.5 kHz hsync */ - NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 1152x864 @ 70 Hz, 62.4 kHz hsync */ - NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, 0, + FB_VMODE_NONINTERLACED }, + /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */ - NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 1400x1050 @ 60Hz, 63.9 kHz hsync */ - NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/ - NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/ - NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1024x768 @ 85 Hz, 70.24 kHz hsync */ - NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, 0, + FB_VMODE_NONINTERLACED }, + /* 1152x864 @ 78 Hz, 70.8 kHz hsync */ - NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, 0, + FB_VMODE_NONINTERLACED }, + /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */ - NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, 0, + FB_VMODE_NONINTERLACED }, + /* 1600x1200 @ 60Hz, 75.00 kHz hsync */ - NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1152x864 @ 84 Hz, 76.0 kHz hsync */ - NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, 0, + FB_VMODE_NONINTERLACED }, + /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */ - NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 1024x768 @ 100Hz, 80.21 kHz hsync */ - NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, 0, + FB_VMODE_NONINTERLACED }, + /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */ - NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */ - NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 1152x864 @ 100 Hz, 89.62 kHz hsync */ - NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, 0, + FB_VMODE_NONINTERLACED }, + /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */ - NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */ - NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */ - NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ - NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */ - NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, 0, + FB_VMODE_NONINTERLACED }, + /* 1800x1440 @ 64Hz, 96.15 kHz hsync */ - NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1800x1440 @ 70Hz, 104.52 kHz hsync */ - NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 512x384 @ 78 Hz, 31.50 kHz hsync */ - NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 512x384 @ 85 Hz, 34.38 kHz hsync */ - NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, 0, + FB_VMODE_NONINTERLACED }, + /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */ - NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, 0, + FB_VMODE_DOUBLE }, + /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */ - NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, 0, + FB_VMODE_DOUBLE }, + /* 320x240 @ 72 Hz, 36.5 kHz hsync */ - NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, 0, + FB_VMODE_DOUBLE }, + /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */ - NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, 0, + FB_VMODE_DOUBLE }, + /* 400x300 @ 60 Hz, 37.8 kHz hsync */ - NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, 0, + FB_VMODE_DOUBLE }, + /* 400x300 @ 72 Hz, 48.0 kHz hsync */ - NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, 0, + FB_VMODE_DOUBLE }, + /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */ - NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, 0, + FB_VMODE_DOUBLE }, + /* 480x300 @ 60 Hz, 37.8 kHz hsync */ - NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, 0, + FB_VMODE_DOUBLE }, + /* 480x300 @ 63 Hz, 39.6 kHz hsync */ - NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, 0, + FB_VMODE_DOUBLE }, + /* 480x300 @ 72 Hz, 48.0 kHz hsync */ - NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, - 0, FB_VMODE_DOUBLE - }, { + { NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0, + FB_VMODE_DOUBLE }, + /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ - NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, - FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */ - NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */ - NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, - 0, FB_VMODE_NONINTERLACED - }, { + { NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, 0, + FB_VMODE_NONINTERLACED }, + /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */ - NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, - 0, FB_VMODE_NONINTERLACED - }, { - /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ - NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, - 0, FB_VMODE_INTERLACED - }, { - /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ - NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, - 0, FB_VMODE_INTERLACED - }, { + { NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 0, + FB_VMODE_NONINTERLACED }, + + /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ + { NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, 0, + FB_VMODE_INTERLACED }, + + /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ + { NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, 0, + FB_VMODE_INTERLACED }, + /* 864x480 @ 60 Hz, 35.15 kHz hsync */ - NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0, - 0, FB_VMODE_NONINTERLACED - }, + { NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0, + 0, FB_VMODE_NONINTERLACED }, }; #ifdef CONFIG_FB_MODE_HELPERS const struct fb_videomode cea_modes[64] = { /* #1: 640x480p@59.94/60Hz */ [1] = { - NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, 0, + NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0, + FB_VMODE_NONINTERLACED, 0, }, /* #3: 720x480p@59.94/60Hz */ [3] = { - NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0, FB_VMODE_NONINTERLACED, 0, + NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0, + FB_VMODE_NONINTERLACED, 0, }, /* #5: 1920x1080i@59.94/60Hz */ [5] = { NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_INTERLACED, 0, }, /* #7: 720(1440)x480iH@59.94/60Hz */ [7] = { - NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0, FB_VMODE_INTERLACED, 0, + NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0, + FB_VMODE_INTERLACED, 0, }, /* #9: 720(1440)x240pH@59.94/60Hz */ [9] = { - NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0, FB_VMODE_NONINTERLACED, 0, + NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0, + FB_VMODE_NONINTERLACED, 0, }, /* #18: 720x576pH@50Hz */ [18] = { - NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0, FB_VMODE_NONINTERLACED, 0, + NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0, + FB_VMODE_NONINTERLACED, 0, }, /* #19: 1280x720p@50Hz */ [19] = { NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, 0, }, /* #20: 1920x1080i@50Hz */ [20] = { NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_INTERLACED, 0, }, /* #32: 1920x1080p@23.98/24Hz */ [32] = { NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, 0, }, /* #35: (2880)x480p4x@59.94/60Hz */ [35] = { - NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0, FB_VMODE_NONINTERLACED, 0, + NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0, + FB_VMODE_NONINTERLACED, 0, }, }; @@ -340,10 +360,10 @@ const struct fb_videomode vesa_modes[] = { { NULL, 85, 721, 400, 28169, 108, 36, 42, 01, 72, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 3 640x480-60 VESA */ - { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, + { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 4 640x480-72 VESA */ - { NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2, + { NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 5 640x480-75 VESA */ { NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3, @@ -426,7 +446,7 @@ const struct fb_videomode vesa_modes[] = { FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 26 1600x1200-75 VESA */ - { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, + { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 27 1600x1200-85 VESA */ diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c index 81687ed26ba9..62498bd662fc 100644 --- a/drivers/video/nuc900fb.c +++ b/drivers/video/nuc900fb.c @@ -15,6 +15,7 @@ */ #include #include +#include #include #include #include @@ -597,9 +598,9 @@ static int __devinit nuc900fb_probe(struct platform_device *pdev) } fbi->clk = clk_get(&pdev->dev, NULL); - if (!fbi->clk || IS_ERR(fbi->clk)) { + if (IS_ERR(fbi->clk)) { printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n"); - ret = -ENOENT; + ret = PTR_ERR(fbi->clk); goto release_irq; } diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c index 2fb552a6f32c..6aac6d1b937b 100644 --- a/drivers/video/nvidia/nv_backlight.c +++ b/drivers/video/nvidia/nv_backlight.c @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops nvidia_bl_ops = { +static const struct backlight_ops nvidia_bl_ops = { .get_brightness = nvidia_bl_get_brightness, .update_status = nvidia_bl_update_status, }; diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig index 12327bbfdbbb..940cab394c2e 100644 --- a/drivers/video/omap2/displays/Kconfig +++ b/drivers/video/omap2/displays/Kconfig @@ -1,11 +1,13 @@ menu "OMAP2/3 Display Device Drivers" depends on OMAP2_DSS -config PANEL_GENERIC - tristate "Generic Panel" +config PANEL_GENERIC_DPI + tristate "Generic DPI Panel" help - Generic panel driver. - Used for DVI output for Beagle and OMAP3 SDP. + Generic DPI panel driver. + Supports DVI output for Beagle and OMAP3 SDP. + Supports LCD Panel used in TI SDP3430 and EVM boards, + OMAP3517 EVM boards and CM-T35. config PANEL_SHARP_LS037V7DW01 tristate "Sharp LS037V7DW01 LCD Panel" @@ -14,11 +16,12 @@ config PANEL_SHARP_LS037V7DW01 help LCD Panel used in TI's SDP3430 and EVM boards -config PANEL_SHARP_LQ043T1DG01 - tristate "Sharp LQ043T1DG01 LCD Panel" - depends on OMAP2_DSS - help - LCD Panel used in TI's OMAP3517 EVM boards +config PANEL_NEC_NL8048HL11_01B + tristate "NEC NL8048HL11-01B Panel" + depends on OMAP2_DSS + help + This NEC NL8048HL11-01B panel is TFT LCD + used in the Zoom2/3/3630 sdp boards. config PANEL_TAAL tristate "Taal DSI Panel" @@ -26,12 +29,6 @@ config PANEL_TAAL help Taal DSI command mode panel from TPO. -config PANEL_TOPPOLY_TDO35S - tristate "Toppoly TDO35S LCD Panel support" - depends on OMAP2_DSS - help - LCD Panel used in CM-T35 - config PANEL_TPO_TD043MTEA1 tristate "TPO TD043MTEA1 LCD Panel" depends on OMAP2_DSS && SPI diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile index aa386095d7c4..861f0255ec6b 100644 --- a/drivers/video/omap2/displays/Makefile +++ b/drivers/video/omap2/displays/Makefile @@ -1,8 +1,7 @@ -obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o +obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o -obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o +obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o obj-$(CONFIG_PANEL_TAAL) += panel-taal.o -obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c new file mode 100644 index 000000000000..07eb30ee59c8 --- /dev/null +++ b/drivers/video/omap2/displays/panel-generic-dpi.c @@ -0,0 +1,365 @@ +/* + * Generic DPI Panels support + * + * Copyright (C) 2010 Canonical Ltd. + * Author: Bryan Wu + * + * LCD panel driver for Sharp LQ043T1DG01 + * + * Copyright (C) 2009 Texas Instruments Inc + * Author: Vaibhav Hiremath + * + * LCD panel driver for Toppoly TDO35S + * + * Copyright (C) 2009 CompuLab, Ltd. + * Author: Mike Rapoport + * + * Copyright (C) 2008 Nokia Corporation + * Author: Tomi Valkeinen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include + +#include + +struct panel_config { + struct omap_video_timings timings; + + int acbi; /* ac-bias pin transitions per interrupt */ + /* Unit: line clocks */ + int acb; /* ac-bias pin frequency */ + + enum omap_panel_config config; + + int power_on_delay; + int power_off_delay; + + /* + * Used to match device to panel configuration + * when use generic panel driver + */ + const char *name; +}; + +/* Panel configurations */ +static struct panel_config generic_dpi_panels[] = { + /* Generic Panel */ + { + { + .x_res = 640, + .y_res = 480, + + .pixel_clock = 23500, + + .hfp = 48, + .hsw = 32, + .hbp = 80, + + .vfp = 3, + .vsw = 4, + .vbp = 7, + }, + .acbi = 0x0, + .acb = 0x0, + .config = OMAP_DSS_LCD_TFT, + .power_on_delay = 0, + .power_off_delay = 0, + .name = "generic", + }, + + /* Sharp LQ043T1DG01 */ + { + { + .x_res = 480, + .y_res = 272, + + .pixel_clock = 9000, + + .hsw = 42, + .hfp = 3, + .hbp = 2, + + .vsw = 11, + .vfp = 3, + .vbp = 2, + }, + .acbi = 0x0, + .acb = 0x0, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, + .power_on_delay = 50, + .power_off_delay = 100, + .name = "sharp_lq", + }, + + /* Sharp LS037V7DW01 */ + { + { + .x_res = 480, + .y_res = 640, + + .pixel_clock = 19200, + + .hsw = 2, + .hfp = 1, + .hbp = 28, + + .vsw = 1, + .vfp = 1, + .vbp = 1, + }, + .acbi = 0x0, + .acb = 0x28, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS, + .power_on_delay = 50, + .power_off_delay = 100, + .name = "sharp_ls", + }, + + /* Toppoly TDO35S */ + { + { + .x_res = 480, + .y_res = 640, + + .pixel_clock = 26000, + + .hfp = 104, + .hsw = 8, + .hbp = 8, + + .vfp = 4, + .vsw = 2, + .vbp = 2, + }, + .acbi = 0x0, + .acb = 0x0, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC | + OMAP_DSS_LCD_ONOFF, + .power_on_delay = 0, + .power_off_delay = 0, + .name = "toppoly_tdo35s", + }, +}; + +struct panel_drv_data { + + struct omap_dss_device *dssdev; + + struct panel_config *panel_config; +}; + +static inline struct panel_generic_dpi_data +*get_panel_data(const struct omap_dss_device *dssdev) +{ + return (struct panel_generic_dpi_data *) dssdev->data; +} + +static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev) +{ + int r; + struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev); + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + struct panel_config *panel_config = drv_data->panel_config; + + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) + return 0; + + r = omapdss_dpi_display_enable(dssdev); + if (r) + goto err0; + + /* wait couple of vsyncs until enabling the LCD */ + if (panel_config->power_on_delay) + msleep(panel_config->power_on_delay); + + if (panel_data->platform_enable) { + r = panel_data->platform_enable(dssdev); + if (r) + goto err1; + } + + return 0; +err1: + omapdss_dpi_display_disable(dssdev); +err0: + return r; +} + +static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev) +{ + struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev); + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + struct panel_config *panel_config = drv_data->panel_config; + + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) + return; + + if (panel_data->platform_disable) + panel_data->platform_disable(dssdev); + + /* wait couple of vsyncs after disabling the LCD */ + if (panel_config->power_off_delay) + msleep(panel_config->power_off_delay); + + omapdss_dpi_display_disable(dssdev); +} + +static int generic_dpi_panel_probe(struct omap_dss_device *dssdev) +{ + struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev); + struct panel_config *panel_config = NULL; + struct panel_drv_data *drv_data = NULL; + int i; + + dev_dbg(&dssdev->dev, "probe\n"); + + if (!panel_data || !panel_data->name) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(generic_dpi_panels); i++) { + if (strcmp(panel_data->name, generic_dpi_panels[i].name) == 0) { + panel_config = &generic_dpi_panels[i]; + break; + } + } + + if (!panel_config) + return -EINVAL; + + dssdev->panel.config = panel_config->config; + dssdev->panel.timings = panel_config->timings; + dssdev->panel.acb = panel_config->acb; + dssdev->panel.acbi = panel_config->acbi; + + drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); + if (!drv_data) + return -ENOMEM; + + drv_data->dssdev = dssdev; + drv_data->panel_config = panel_config; + + dev_set_drvdata(&dssdev->dev, drv_data); + + return 0; +} + +static void generic_dpi_panel_remove(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + + dev_dbg(&dssdev->dev, "remove\n"); + + kfree(drv_data); + + dev_set_drvdata(&dssdev->dev, NULL); +} + +static int generic_dpi_panel_enable(struct omap_dss_device *dssdev) +{ + int r = 0; + + r = generic_dpi_panel_power_on(dssdev); + if (r) + return r; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void generic_dpi_panel_disable(struct omap_dss_device *dssdev) +{ + generic_dpi_panel_power_off(dssdev); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev) +{ + generic_dpi_panel_power_off(dssdev); + + dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; + + return 0; +} + +static int generic_dpi_panel_resume(struct omap_dss_device *dssdev) +{ + int r = 0; + + r = generic_dpi_panel_power_on(dssdev); + if (r) + return r; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + dpi_set_timings(dssdev, timings); +} + +static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + *timings = dssdev->panel.timings; +} + +static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + return dpi_check_timings(dssdev, timings); +} + +static struct omap_dss_driver dpi_driver = { + .probe = generic_dpi_panel_probe, + .remove = generic_dpi_panel_remove, + + .enable = generic_dpi_panel_enable, + .disable = generic_dpi_panel_disable, + .suspend = generic_dpi_panel_suspend, + .resume = generic_dpi_panel_resume, + + .set_timings = generic_dpi_panel_set_timings, + .get_timings = generic_dpi_panel_get_timings, + .check_timings = generic_dpi_panel_check_timings, + + .driver = { + .name = "generic_dpi_panel", + .owner = THIS_MODULE, + }, +}; + +static int __init generic_dpi_panel_drv_init(void) +{ + return omap_dss_register_driver(&dpi_driver); +} + +static void __exit generic_dpi_panel_drv_exit(void) +{ + omap_dss_unregister_driver(&dpi_driver); +} + +module_init(generic_dpi_panel_drv_init); +module_exit(generic_dpi_panel_drv_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c deleted file mode 100644 index 395a68de3990..000000000000 --- a/drivers/video/omap2/displays/panel-generic.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Generic panel support - * - * Copyright (C) 2008 Nokia Corporation - * Author: Tomi Valkeinen - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include - -static struct omap_video_timings generic_panel_timings = { - /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */ - .x_res = 640, - .y_res = 480, - .pixel_clock = 23500, - .hfp = 48, - .hsw = 32, - .hbp = 80, - .vfp = 3, - .vsw = 4, - .vbp = 7, -}; - -static int generic_panel_power_on(struct omap_dss_device *dssdev) -{ - int r; - - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) - return 0; - - r = omapdss_dpi_display_enable(dssdev); - if (r) - goto err0; - - if (dssdev->platform_enable) { - r = dssdev->platform_enable(dssdev); - if (r) - goto err1; - } - - return 0; -err1: - omapdss_dpi_display_disable(dssdev); -err0: - return r; -} - -static void generic_panel_power_off(struct omap_dss_device *dssdev) -{ - if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) - return; - - if (dssdev->platform_disable) - dssdev->platform_disable(dssdev); - - omapdss_dpi_display_disable(dssdev); -} - -static int generic_panel_probe(struct omap_dss_device *dssdev) -{ - dssdev->panel.config = OMAP_DSS_LCD_TFT; - dssdev->panel.timings = generic_panel_timings; - - return 0; -} - -static void generic_panel_remove(struct omap_dss_device *dssdev) -{ -} - -static int generic_panel_enable(struct omap_dss_device *dssdev) -{ - int r = 0; - - r = generic_panel_power_on(dssdev); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void generic_panel_disable(struct omap_dss_device *dssdev) -{ - generic_panel_power_off(dssdev); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static int generic_panel_suspend(struct omap_dss_device *dssdev) -{ - generic_panel_power_off(dssdev); - dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; - return 0; -} - -static int generic_panel_resume(struct omap_dss_device *dssdev) -{ - int r = 0; - - r = generic_panel_power_on(dssdev); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void generic_panel_set_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - dpi_set_timings(dssdev, timings); -} - -static void generic_panel_get_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - *timings = dssdev->panel.timings; -} - -static int generic_panel_check_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - return dpi_check_timings(dssdev, timings); -} - -static struct omap_dss_driver generic_driver = { - .probe = generic_panel_probe, - .remove = generic_panel_remove, - - .enable = generic_panel_enable, - .disable = generic_panel_disable, - .suspend = generic_panel_suspend, - .resume = generic_panel_resume, - - .set_timings = generic_panel_set_timings, - .get_timings = generic_panel_get_timings, - .check_timings = generic_panel_check_timings, - - .driver = { - .name = "generic_panel", - .owner = THIS_MODULE, - }, -}; - -static int __init generic_panel_drv_init(void) -{ - return omap_dss_register_driver(&generic_driver); -} - -static void __exit generic_panel_drv_exit(void) -{ - omap_dss_unregister_driver(&generic_driver); -} - -module_init(generic_panel_drv_init); -module_exit(generic_panel_drv_exit); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c new file mode 100644 index 000000000000..925e0fadff54 --- /dev/null +++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c @@ -0,0 +1,325 @@ +/* + * Support for NEC-nl8048hl11-01b panel driver + * + * Copyright (C) 2010 Texas Instruments Inc. + * Author: Erik Gilling + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include + +#define LCD_XRES 800 +#define LCD_YRES 480 +/* + * NEC PIX Clock Ratings + * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz + */ +#define LCD_PIXEL_CLOCK 23800 + +struct nec_8048_data { + struct backlight_device *bl; +}; + +static const struct { + unsigned char addr; + unsigned char dat; +} nec_8048_init_seq[] = { + { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 }, + { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 }, + { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 }, + { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F }, + { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F }, + { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F }, + { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F }, + { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 }, + { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 }, + { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C }, + { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, + { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 }, + { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 }, + { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 }, + { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC }, + { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, + { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 }, + { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 }, +}; + +/* + * NEC NL8048HL11-01B Manual + * defines HFB, HSW, HBP, VFP, VSW, VBP as shown below + */ + +static struct omap_video_timings nec_8048_panel_timings = { + /* 800 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */ + .x_res = LCD_XRES, + .y_res = LCD_YRES, + .pixel_clock = LCD_PIXEL_CLOCK, + .hfp = 6, + .hsw = 1, + .hbp = 4, + .vfp = 3, + .vsw = 1, + .vbp = 4, +}; + +static int nec_8048_bl_update_status(struct backlight_device *bl) +{ + struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev); + int level; + + if (!dssdev->set_backlight) + return -EINVAL; + + if (bl->props.fb_blank == FB_BLANK_UNBLANK && + bl->props.power == FB_BLANK_UNBLANK) + level = bl->props.brightness; + else + level = 0; + + return dssdev->set_backlight(dssdev, level); +} + +static int nec_8048_bl_get_brightness(struct backlight_device *bl) +{ + if (bl->props.fb_blank == FB_BLANK_UNBLANK && + bl->props.power == FB_BLANK_UNBLANK) + return bl->props.brightness; + + return 0; +} + +static const struct backlight_ops nec_8048_bl_ops = { + .get_brightness = nec_8048_bl_get_brightness, + .update_status = nec_8048_bl_update_status, +}; + +static int nec_8048_panel_probe(struct omap_dss_device *dssdev) +{ + struct backlight_device *bl; + struct nec_8048_data *necd; + struct backlight_properties props; + int r; + + dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF | + OMAP_DSS_LCD_ONOFF; + dssdev->panel.timings = nec_8048_panel_timings; + + necd = kzalloc(sizeof(*necd), GFP_KERNEL); + if (!necd) + return -ENOMEM; + + dev_set_drvdata(&dssdev->dev, necd); + + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 255; + + bl = backlight_device_register("nec-8048", &dssdev->dev, dssdev, + &nec_8048_bl_ops, &props); + if (IS_ERR(bl)) { + r = PTR_ERR(bl); + kfree(necd); + return r; + } + necd->bl = bl; + + bl->props.fb_blank = FB_BLANK_UNBLANK; + bl->props.power = FB_BLANK_UNBLANK; + bl->props.max_brightness = dssdev->max_backlight_level; + bl->props.brightness = dssdev->max_backlight_level; + + r = nec_8048_bl_update_status(bl); + if (r < 0) + dev_err(&dssdev->dev, "failed to set lcd brightness\n"); + + return 0; +} + +static void nec_8048_panel_remove(struct omap_dss_device *dssdev) +{ + struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev); + struct backlight_device *bl = necd->bl; + + bl->props.power = FB_BLANK_POWERDOWN; + nec_8048_bl_update_status(bl); + backlight_device_unregister(bl); + + kfree(necd); +} + +static int nec_8048_panel_enable(struct omap_dss_device *dssdev) +{ + int r = 0; + struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev); + struct backlight_device *bl = necd->bl; + + if (dssdev->platform_enable) { + r = dssdev->platform_enable(dssdev); + if (r) + return r; + } + + r = nec_8048_bl_update_status(bl); + if (r < 0) + dev_err(&dssdev->dev, "failed to set lcd brightness\n"); + + r = omapdss_dpi_display_enable(dssdev); + + return r; +} + +static void nec_8048_panel_disable(struct omap_dss_device *dssdev) +{ + struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev); + struct backlight_device *bl = necd->bl; + + omapdss_dpi_display_disable(dssdev); + + bl->props.brightness = 0; + nec_8048_bl_update_status(bl); + + if (dssdev->platform_disable) + dssdev->platform_disable(dssdev); +} + +static int nec_8048_panel_suspend(struct omap_dss_device *dssdev) +{ + nec_8048_panel_disable(dssdev); + return 0; +} + +static int nec_8048_panel_resume(struct omap_dss_device *dssdev) +{ + return nec_8048_panel_enable(dssdev); +} + +static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev) +{ + return 16; +} + +static struct omap_dss_driver nec_8048_driver = { + .probe = nec_8048_panel_probe, + .remove = nec_8048_panel_remove, + .enable = nec_8048_panel_enable, + .disable = nec_8048_panel_disable, + .suspend = nec_8048_panel_suspend, + .resume = nec_8048_panel_resume, + .get_recommended_bpp = nec_8048_recommended_bpp, + + .driver = { + .name = "NEC_8048_panel", + .owner = THIS_MODULE, + }, +}; + +static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr, + unsigned char reg_data) +{ + int ret = 0; + unsigned int cmd = 0, data = 0; + + cmd = 0x0000 | reg_addr; /* register address write */ + data = 0x0100 | reg_data ; /* register data write */ + data = (cmd << 16) | data; + + ret = spi_write(spi, (unsigned char *)&data, 4); + if (ret) + pr_err("error in spi_write %x\n", data); + + return ret; +} + +static int init_nec_8048_wvga_lcd(struct spi_device *spi) +{ + unsigned int i; + /* Initialization Sequence */ + /* nec_8048_spi_send(spi, REG, VAL) */ + for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++) + nec_8048_spi_send(spi, nec_8048_init_seq[i].addr, + nec_8048_init_seq[i].dat); + udelay(20); + nec_8048_spi_send(spi, nec_8048_init_seq[i].addr, + nec_8048_init_seq[i].dat); + return 0; +} + +static int nec_8048_spi_probe(struct spi_device *spi) +{ + spi->mode = SPI_MODE_0; + spi->bits_per_word = 32; + spi_setup(spi); + + init_nec_8048_wvga_lcd(spi); + + return omap_dss_register_driver(&nec_8048_driver); +} + +static int nec_8048_spi_remove(struct spi_device *spi) +{ + omap_dss_unregister_driver(&nec_8048_driver); + + return 0; +} + +static int nec_8048_spi_suspend(struct spi_device *spi, pm_message_t mesg) +{ + nec_8048_spi_send(spi, 2, 0x01); + mdelay(40); + + return 0; +} + +static int nec_8048_spi_resume(struct spi_device *spi) +{ + /* reinitialize the panel */ + spi_setup(spi); + nec_8048_spi_send(spi, 2, 0x00); + init_nec_8048_wvga_lcd(spi); + + return 0; +} + +static struct spi_driver nec_8048_spi_driver = { + .probe = nec_8048_spi_probe, + .remove = __devexit_p(nec_8048_spi_remove), + .suspend = nec_8048_spi_suspend, + .resume = nec_8048_spi_resume, + .driver = { + .name = "nec_8048_spi", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, +}; + +static int __init nec_8048_lcd_init(void) +{ + return spi_register_driver(&nec_8048_spi_driver); +} + +static void __exit nec_8048_lcd_exit(void) +{ + return spi_unregister_driver(&nec_8048_spi_driver); +} + +module_init(nec_8048_lcd_init); +module_exit(nec_8048_lcd_exit); +MODULE_AUTHOR("Erik Gilling "); +MODULE_DESCRIPTION("NEC-nl8048hl11-01b Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c deleted file mode 100644 index 0c6896cea2d0..000000000000 --- a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * LCD panel driver for Sharp LQ043T1DG01 - * - * Copyright (C) 2009 Texas Instruments Inc - * Author: Vaibhav Hiremath - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include -#include -#include - -#include - -static struct omap_video_timings sharp_lq_timings = { - .x_res = 480, - .y_res = 272, - - .pixel_clock = 9000, - - .hsw = 42, - .hfp = 3, - .hbp = 2, - - .vsw = 11, - .vfp = 3, - .vbp = 2, -}; - -static int sharp_lq_panel_power_on(struct omap_dss_device *dssdev) -{ - int r; - - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) - return 0; - - r = omapdss_dpi_display_enable(dssdev); - if (r) - goto err0; - - /* wait couple of vsyncs until enabling the LCD */ - msleep(50); - - if (dssdev->platform_enable) { - r = dssdev->platform_enable(dssdev); - if (r) - goto err1; - } - - return 0; -err1: - omapdss_dpi_display_disable(dssdev); -err0: - return r; -} - -static void sharp_lq_panel_power_off(struct omap_dss_device *dssdev) -{ - if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) - return; - - if (dssdev->platform_disable) - dssdev->platform_disable(dssdev); - - /* wait at least 5 vsyncs after disabling the LCD */ - msleep(100); - - omapdss_dpi_display_disable(dssdev); -} - -static int sharp_lq_panel_probe(struct omap_dss_device *dssdev) -{ - - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO; - dssdev->panel.acb = 0x0; - dssdev->panel.timings = sharp_lq_timings; - - return 0; -} - -static void sharp_lq_panel_remove(struct omap_dss_device *dssdev) -{ -} - -static int sharp_lq_panel_enable(struct omap_dss_device *dssdev) -{ - int r = 0; - - r = sharp_lq_panel_power_on(dssdev); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void sharp_lq_panel_disable(struct omap_dss_device *dssdev) -{ - sharp_lq_panel_power_off(dssdev); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static int sharp_lq_panel_suspend(struct omap_dss_device *dssdev) -{ - sharp_lq_panel_power_off(dssdev); - dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; - return 0; -} - -static int sharp_lq_panel_resume(struct omap_dss_device *dssdev) -{ - int r = 0; - - r = sharp_lq_panel_power_on(dssdev); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static struct omap_dss_driver sharp_lq_driver = { - .probe = sharp_lq_panel_probe, - .remove = sharp_lq_panel_remove, - - .enable = sharp_lq_panel_enable, - .disable = sharp_lq_panel_disable, - .suspend = sharp_lq_panel_suspend, - .resume = sharp_lq_panel_resume, - - .driver = { - .name = "sharp_lq_panel", - .owner = THIS_MODULE, - }, -}; - -static int __init sharp_lq_panel_drv_init(void) -{ - return omap_dss_register_driver(&sharp_lq_driver); -} - -static void __exit sharp_lq_panel_drv_exit(void) -{ - omap_dss_unregister_driver(&sharp_lq_driver); -} - -module_init(sharp_lq_panel_drv_init); -module_exit(sharp_lq_panel_drv_exit); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index e1c765d11419..61026f96ad20 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -465,7 +465,7 @@ static int taal_bl_get_intensity(struct backlight_device *dev) return 0; } -static struct backlight_ops taal_bl_ops = { +static const struct backlight_ops taal_bl_ops = { .get_brightness = taal_bl_get_intensity, .update_status = taal_bl_update_status, }; diff --git a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c deleted file mode 100644 index 526e906c8a6c..000000000000 --- a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * LCD panel driver for Toppoly TDO35S - * - * Copyright (C) 2009 CompuLab, Ltd. - * Author: Mike Rapoport - * - * Based on generic panel support - * Copyright (C) 2008 Nokia Corporation - * Author: Tomi Valkeinen - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include - -static struct omap_video_timings toppoly_tdo_panel_timings = { - /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */ - .x_res = 480, - .y_res = 640, - - .pixel_clock = 26000, - - .hfp = 104, - .hsw = 8, - .hbp = 8, - - .vfp = 4, - .vsw = 2, - .vbp = 2, -}; - -static int toppoly_tdo_panel_power_on(struct omap_dss_device *dssdev) -{ - int r; - - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) - return 0; - - r = omapdss_dpi_display_enable(dssdev); - if (r) - goto err0; - - if (dssdev->platform_enable) { - r = dssdev->platform_enable(dssdev); - if (r) - goto err1; - } - - return 0; -err1: - omapdss_dpi_display_disable(dssdev); -err0: - return r; -} - -static void toppoly_tdo_panel_power_off(struct omap_dss_device *dssdev) -{ - if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) - return; - - if (dssdev->platform_disable) - dssdev->platform_disable(dssdev); - - omapdss_dpi_display_disable(dssdev); -} - -static int toppoly_tdo_panel_probe(struct omap_dss_device *dssdev) -{ - dssdev->panel.config = OMAP_DSS_LCD_TFT | - OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | - OMAP_DSS_LCD_IPC | - OMAP_DSS_LCD_ONOFF; - - dssdev->panel.timings = toppoly_tdo_panel_timings; - - return 0; -} - -static void toppoly_tdo_panel_remove(struct omap_dss_device *dssdev) -{ -} - -static int toppoly_tdo_panel_enable(struct omap_dss_device *dssdev) -{ - int r = 0; - - r = toppoly_tdo_panel_power_on(dssdev); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void toppoly_tdo_panel_disable(struct omap_dss_device *dssdev) -{ - toppoly_tdo_panel_power_off(dssdev); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static int toppoly_tdo_panel_suspend(struct omap_dss_device *dssdev) -{ - toppoly_tdo_panel_power_off(dssdev); - dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; - return 0; -} - -static int toppoly_tdo_panel_resume(struct omap_dss_device *dssdev) -{ - int r = 0; - - r = toppoly_tdo_panel_power_on(dssdev); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static struct omap_dss_driver generic_driver = { - .probe = toppoly_tdo_panel_probe, - .remove = toppoly_tdo_panel_remove, - - .enable = toppoly_tdo_panel_enable, - .disable = toppoly_tdo_panel_disable, - .suspend = toppoly_tdo_panel_suspend, - .resume = toppoly_tdo_panel_resume, - - .driver = { - .name = "toppoly_tdo35s_panel", - .owner = THIS_MODULE, - }, -}; - -static int __init toppoly_tdo_panel_drv_init(void) -{ - return omap_dss_register_driver(&generic_driver); -} - -static void __exit toppoly_tdo_panel_drv_exit(void) -{ - omap_dss_unregister_driver(&generic_driver); -} - -module_init(toppoly_tdo_panel_drv_init); -module_exit(toppoly_tdo_panel_drv_exit); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index fa40fa59a9ac..9f8c69f16e61 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -44,34 +44,40 @@ /* DISPC */ #define DISPC_BASE 0x48050400 -#define DISPC_SZ_REGS SZ_1K +#define DISPC_SZ_REGS SZ_4K struct dispc_reg { u16 idx; }; #define DISPC_REG(idx) ((const struct dispc_reg) { idx }) -/* DISPC common */ +/* + * DISPC common registers and + * DISPC channel registers , ch = 0 for LCD, ch = 1 for + * DIGIT, and ch = 2 for LCD2 + */ #define DISPC_REVISION DISPC_REG(0x0000) #define DISPC_SYSCONFIG DISPC_REG(0x0010) #define DISPC_SYSSTATUS DISPC_REG(0x0014) #define DISPC_IRQSTATUS DISPC_REG(0x0018) #define DISPC_IRQENABLE DISPC_REG(0x001C) #define DISPC_CONTROL DISPC_REG(0x0040) +#define DISPC_CONTROL2 DISPC_REG(0x0238) #define DISPC_CONFIG DISPC_REG(0x0044) +#define DISPC_CONFIG2 DISPC_REG(0x0620) #define DISPC_CAPABLE DISPC_REG(0x0048) -#define DISPC_DEFAULT_COLOR0 DISPC_REG(0x004C) -#define DISPC_DEFAULT_COLOR1 DISPC_REG(0x0050) -#define DISPC_TRANS_COLOR0 DISPC_REG(0x0054) -#define DISPC_TRANS_COLOR1 DISPC_REG(0x0058) +#define DISPC_DEFAULT_COLOR(ch) DISPC_REG(ch == 0 ? 0x004C : \ + (ch == 1 ? 0x0050 : 0x03AC)) +#define DISPC_TRANS_COLOR(ch) DISPC_REG(ch == 0 ? 0x0054 : \ + (ch == 1 ? 0x0058 : 0x03B0)) #define DISPC_LINE_STATUS DISPC_REG(0x005C) #define DISPC_LINE_NUMBER DISPC_REG(0x0060) -#define DISPC_TIMING_H DISPC_REG(0x0064) -#define DISPC_TIMING_V DISPC_REG(0x0068) -#define DISPC_POL_FREQ DISPC_REG(0x006C) -#define DISPC_DIVISOR DISPC_REG(0x0070) +#define DISPC_TIMING_H(ch) DISPC_REG(ch != 2 ? 0x0064 : 0x0400) +#define DISPC_TIMING_V(ch) DISPC_REG(ch != 2 ? 0x0068 : 0x0404) +#define DISPC_POL_FREQ(ch) DISPC_REG(ch != 2 ? 0x006C : 0x0408) +#define DISPC_DIVISOR(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C) #define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074) #define DISPC_SIZE_DIG DISPC_REG(0x0078) -#define DISPC_SIZE_LCD DISPC_REG(0x007C) +#define DISPC_SIZE_LCD(ch) DISPC_REG(ch != 2 ? 0x007C : 0x03CC) /* DISPC GFX plane */ #define DISPC_GFX_BA0 DISPC_REG(0x0080) @@ -86,13 +92,12 @@ struct dispc_reg { u16 idx; }; #define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4) #define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8) -#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4) -#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8) -#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC) - -#define DISPC_CPR_COEF_R DISPC_REG(0x0220) -#define DISPC_CPR_COEF_G DISPC_REG(0x0224) -#define DISPC_CPR_COEF_B DISPC_REG(0x0228) +#define DISPC_DATA_CYCLE1(ch) DISPC_REG(ch != 2 ? 0x01D4 : 0x03C0) +#define DISPC_DATA_CYCLE2(ch) DISPC_REG(ch != 2 ? 0x01D8 : 0x03C4) +#define DISPC_DATA_CYCLE3(ch) DISPC_REG(ch != 2 ? 0x01DC : 0x03C8) +#define DISPC_CPR_COEF_R(ch) DISPC_REG(ch != 2 ? 0x0220 : 0x03BC) +#define DISPC_CPR_COEF_G(ch) DISPC_REG(ch != 2 ? 0x0224 : 0x03B8) +#define DISPC_CPR_COEF_B(ch) DISPC_REG(ch != 2 ? 0x0228 : 0x03B4) #define DISPC_GFX_PRELOAD DISPC_REG(0x022C) @@ -217,18 +222,29 @@ void dispc_save_context(void) SR(IRQENABLE); SR(CONTROL); SR(CONFIG); - SR(DEFAULT_COLOR0); - SR(DEFAULT_COLOR1); - SR(TRANS_COLOR0); - SR(TRANS_COLOR1); + SR(DEFAULT_COLOR(0)); + SR(DEFAULT_COLOR(1)); + SR(TRANS_COLOR(0)); + SR(TRANS_COLOR(1)); SR(LINE_NUMBER); - SR(TIMING_H); - SR(TIMING_V); - SR(POL_FREQ); - SR(DIVISOR); + SR(TIMING_H(0)); + SR(TIMING_V(0)); + SR(POL_FREQ(0)); + SR(DIVISOR(0)); SR(GLOBAL_ALPHA); SR(SIZE_DIG); - SR(SIZE_LCD); + SR(SIZE_LCD(0)); + if (dss_has_feature(FEAT_MGR_LCD2)) { + SR(CONTROL2); + SR(DEFAULT_COLOR(2)); + SR(TRANS_COLOR(2)); + SR(SIZE_LCD(2)); + SR(TIMING_H(2)); + SR(TIMING_V(2)); + SR(POL_FREQ(2)); + SR(DIVISOR(2)); + SR(CONFIG2); + } SR(GFX_BA0); SR(GFX_BA1); @@ -241,13 +257,22 @@ void dispc_save_context(void) SR(GFX_WINDOW_SKIP); SR(GFX_TABLE_BA); - SR(DATA_CYCLE1); - SR(DATA_CYCLE2); - SR(DATA_CYCLE3); - - SR(CPR_COEF_R); - SR(CPR_COEF_G); - SR(CPR_COEF_B); + SR(DATA_CYCLE1(0)); + SR(DATA_CYCLE2(0)); + SR(DATA_CYCLE3(0)); + + SR(CPR_COEF_R(0)); + SR(CPR_COEF_G(0)); + SR(CPR_COEF_B(0)); + if (dss_has_feature(FEAT_MGR_LCD2)) { + SR(CPR_COEF_B(2)); + SR(CPR_COEF_G(2)); + SR(CPR_COEF_R(2)); + + SR(DATA_CYCLE1(2)); + SR(DATA_CYCLE2(2)); + SR(DATA_CYCLE3(2)); + } SR(GFX_PRELOAD); @@ -356,18 +381,28 @@ void dispc_restore_context(void) /*RR(IRQENABLE);*/ /*RR(CONTROL);*/ RR(CONFIG); - RR(DEFAULT_COLOR0); - RR(DEFAULT_COLOR1); - RR(TRANS_COLOR0); - RR(TRANS_COLOR1); + RR(DEFAULT_COLOR(0)); + RR(DEFAULT_COLOR(1)); + RR(TRANS_COLOR(0)); + RR(TRANS_COLOR(1)); RR(LINE_NUMBER); - RR(TIMING_H); - RR(TIMING_V); - RR(POL_FREQ); - RR(DIVISOR); + RR(TIMING_H(0)); + RR(TIMING_V(0)); + RR(POL_FREQ(0)); + RR(DIVISOR(0)); RR(GLOBAL_ALPHA); RR(SIZE_DIG); - RR(SIZE_LCD); + RR(SIZE_LCD(0)); + if (dss_has_feature(FEAT_MGR_LCD2)) { + RR(DEFAULT_COLOR(2)); + RR(TRANS_COLOR(2)); + RR(SIZE_LCD(2)); + RR(TIMING_H(2)); + RR(TIMING_V(2)); + RR(POL_FREQ(2)); + RR(DIVISOR(2)); + RR(CONFIG2); + } RR(GFX_BA0); RR(GFX_BA1); @@ -380,13 +415,22 @@ void dispc_restore_context(void) RR(GFX_WINDOW_SKIP); RR(GFX_TABLE_BA); - RR(DATA_CYCLE1); - RR(DATA_CYCLE2); - RR(DATA_CYCLE3); - - RR(CPR_COEF_R); - RR(CPR_COEF_G); - RR(CPR_COEF_B); + RR(DATA_CYCLE1(0)); + RR(DATA_CYCLE2(0)); + RR(DATA_CYCLE3(0)); + + RR(CPR_COEF_R(0)); + RR(CPR_COEF_G(0)); + RR(CPR_COEF_B(0)); + if (dss_has_feature(FEAT_MGR_LCD2)) { + RR(DATA_CYCLE1(2)); + RR(DATA_CYCLE2(2)); + RR(DATA_CYCLE3(2)); + + RR(CPR_COEF_B(2)); + RR(CPR_COEF_G(2)); + RR(CPR_COEF_R(2)); + } RR(GFX_PRELOAD); @@ -490,7 +534,8 @@ void dispc_restore_context(void) /* enable last, because LCD & DIGIT enable are here */ RR(CONTROL); - + if (dss_has_feature(FEAT_MGR_LCD2)) + RR(CONTROL2); /* clear spurious SYNC_LOST_DIGIT interrupts */ dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT); @@ -516,42 +561,63 @@ bool dispc_go_busy(enum omap_channel channel) { int bit; - if (channel == OMAP_DSS_CHANNEL_LCD) + if (channel == OMAP_DSS_CHANNEL_LCD || + channel == OMAP_DSS_CHANNEL_LCD2) bit = 5; /* GOLCD */ else bit = 6; /* GODIGIT */ - return REG_GET(DISPC_CONTROL, bit, bit) == 1; + if (channel == OMAP_DSS_CHANNEL_LCD2) + return REG_GET(DISPC_CONTROL2, bit, bit) == 1; + else + return REG_GET(DISPC_CONTROL, bit, bit) == 1; } void dispc_go(enum omap_channel channel) { int bit; + bool enable_bit, go_bit; enable_clocks(1); - if (channel == OMAP_DSS_CHANNEL_LCD) + if (channel == OMAP_DSS_CHANNEL_LCD || + channel == OMAP_DSS_CHANNEL_LCD2) bit = 0; /* LCDENABLE */ else bit = 1; /* DIGITALENABLE */ /* if the channel is not enabled, we don't need GO */ - if (REG_GET(DISPC_CONTROL, bit, bit) == 0) + if (channel == OMAP_DSS_CHANNEL_LCD2) + enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; + else + enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + + if (!enable_bit) goto end; - if (channel == OMAP_DSS_CHANNEL_LCD) + if (channel == OMAP_DSS_CHANNEL_LCD || + channel == OMAP_DSS_CHANNEL_LCD2) bit = 5; /* GOLCD */ else bit = 6; /* GODIGIT */ - if (REG_GET(DISPC_CONTROL, bit, bit) == 1) { + if (channel == OMAP_DSS_CHANNEL_LCD2) + go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; + else + go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + + if (go_bit) { DSSERR("GO bit not down for channel %d\n", channel); goto end; } - DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT"); + DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : + (channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT")); - REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); + if (channel == OMAP_DSS_CHANNEL_LCD2) + REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); + else + REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); end: enable_clocks(0); } @@ -773,13 +839,26 @@ static void _dispc_set_vid_size(enum omap_plane plane, int width, int height) dispc_write_reg(vsi_reg[plane-1], val); } +static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable) +{ + if (!dss_has_feature(FEAT_PRE_MULT_ALPHA)) + return; + + if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) && + plane == OMAP_DSS_VIDEO1) + return; + + REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 28, 28); +} + static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha) { if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) return; - BUG_ON(!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) && - plane == OMAP_DSS_VIDEO1); + if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) && + plane == OMAP_DSS_VIDEO1) + return; if (plane == OMAP_DSS_GFX) REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0); @@ -851,6 +930,7 @@ static void _dispc_set_channel_out(enum omap_plane plane, { int shift; u32 val; + int chan = 0, chan2 = 0; switch (plane) { case OMAP_DSS_GFX: @@ -866,7 +946,29 @@ static void _dispc_set_channel_out(enum omap_plane plane, } val = dispc_read_reg(dispc_reg_att[plane]); - val = FLD_MOD(val, channel, shift, shift); + if (dss_has_feature(FEAT_MGR_LCD2)) { + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + chan = 0; + chan2 = 0; + break; + case OMAP_DSS_CHANNEL_DIGIT: + chan = 1; + chan2 = 0; + break; + case OMAP_DSS_CHANNEL_LCD2: + chan = 0; + chan2 = 1; + break; + default: + BUG(); + } + + val = FLD_MOD(val, chan, shift, shift); + val = FLD_MOD(val, chan2, 31, 30); + } else { + val = FLD_MOD(val, channel, shift, shift); + } dispc_write_reg(dispc_reg_att[plane], val); } @@ -923,13 +1025,13 @@ void dispc_enable_replication(enum omap_plane plane, bool enable) enable_clocks(0); } -void dispc_set_lcd_size(u16 width, u16 height) +void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) { u32 val; BUG_ON((width > (1 << 11)) || (height > (1 << 11))); val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); enable_clocks(1); - dispc_write_reg(DISPC_SIZE_LCD, val); + dispc_write_reg(DISPC_SIZE_LCD(channel), val); enable_clocks(0); } @@ -1426,12 +1528,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, } } -static unsigned long calc_fclk_five_taps(u16 width, u16 height, - u16 out_width, u16 out_height, enum omap_color_mode color_mode) +static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width, + u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode) { u32 fclk = 0; /* FIXME venc pclk? */ - u64 tmp, pclk = dispc_pclk_rate(); + u64 tmp, pclk = dispc_pclk_rate(channel); if (height > out_height) { /* FIXME get real display PPL */ @@ -1463,8 +1566,8 @@ static unsigned long calc_fclk_five_taps(u16 width, u16 height, return fclk; } -static unsigned long calc_fclk(u16 width, u16 height, - u16 out_width, u16 out_height) +static unsigned long calc_fclk(enum omap_channel channel, u16 width, + u16 height, u16 out_width, u16 out_height) { unsigned int hf, vf; @@ -1488,7 +1591,7 @@ static unsigned long calc_fclk(u16 width, u16 height, vf = 1; /* FIXME venc pclk? */ - return dispc_pclk_rate() * vf * hf; + return dispc_pclk_rate(channel) * vf * hf; } void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out) @@ -1507,7 +1610,8 @@ static int _dispc_setup_plane(enum omap_plane plane, bool ilace, enum omap_dss_rotation_type rotation_type, u8 rotation, int mirror, - u8 global_alpha) + u8 global_alpha, u8 pre_mult_alpha, + enum omap_channel channel) { const int maxdownscale = cpu_is_omap34xx() ? 4 : 2; bool five_taps = 0; @@ -1536,29 +1640,12 @@ static int _dispc_setup_plane(enum omap_plane plane, height, pos_y, out_height); } + if (!dss_feat_color_mode_supported(plane, color_mode)) + return -EINVAL; + if (plane == OMAP_DSS_GFX) { if (width != out_width || height != out_height) return -EINVAL; - - switch (color_mode) { - case OMAP_DSS_COLOR_ARGB16: - case OMAP_DSS_COLOR_ARGB32: - case OMAP_DSS_COLOR_RGBA32: - if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) - return -EINVAL; - case OMAP_DSS_COLOR_RGBX32: - if (cpu_is_omap24xx()) - return -EINVAL; - /* fall through */ - case OMAP_DSS_COLOR_RGB12U: - case OMAP_DSS_COLOR_RGB16: - case OMAP_DSS_COLOR_RGB24P: - case OMAP_DSS_COLOR_RGB24U: - break; - - default: - return -EINVAL; - } } else { /* video plane */ @@ -1572,42 +1659,16 @@ static int _dispc_setup_plane(enum omap_plane plane, out_height > height * 8) return -EINVAL; - switch (color_mode) { - case OMAP_DSS_COLOR_RGBX32: - case OMAP_DSS_COLOR_RGB12U: - if (cpu_is_omap24xx()) - return -EINVAL; - /* fall through */ - case OMAP_DSS_COLOR_RGB16: - case OMAP_DSS_COLOR_RGB24P: - case OMAP_DSS_COLOR_RGB24U: - break; - - case OMAP_DSS_COLOR_ARGB16: - case OMAP_DSS_COLOR_ARGB32: - case OMAP_DSS_COLOR_RGBA32: - if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) - return -EINVAL; - if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) && - plane == OMAP_DSS_VIDEO1) - return -EINVAL; - break; - - case OMAP_DSS_COLOR_YUV2: - case OMAP_DSS_COLOR_UYVY: + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) cconv = 1; - break; - - default: - return -EINVAL; - } /* Must use 5-tap filter? */ five_taps = height > out_height * 2; if (!five_taps) { - fclk = calc_fclk(width, height, - out_width, out_height); + fclk = calc_fclk(channel, width, height, out_width, + out_height); /* Try 5-tap filter if 3-tap fclk is too high */ if (cpu_is_omap34xx() && height > out_height && @@ -1621,7 +1682,7 @@ static int _dispc_setup_plane(enum omap_plane plane, } if (five_taps) - fclk = calc_fclk_five_taps(width, height, + fclk = calc_fclk_five_taps(channel, width, height, out_width, out_height, color_mode); DSSDBG("required fclk rate = %lu Hz\n", fclk); @@ -1693,8 +1754,8 @@ static int _dispc_setup_plane(enum omap_plane plane, _dispc_set_rotation_attrs(plane, rotation, mirror, color_mode); - if (plane != OMAP_DSS_VIDEO1) - _dispc_setup_global_alpha(plane, global_alpha); + _dispc_set_pre_mult_alpha(plane, pre_mult_alpha); + _dispc_setup_global_alpha(plane, global_alpha); return 0; } @@ -1710,36 +1771,44 @@ static void dispc_disable_isr(void *data, u32 mask) complete(compl); } -static void _enable_lcd_out(bool enable) +static void _enable_lcd_out(enum omap_channel channel, bool enable) { - REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0); + if (channel == OMAP_DSS_CHANNEL_LCD2) + REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0); + else + REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0); } -static void dispc_enable_lcd_out(bool enable) +static void dispc_enable_lcd_out(enum omap_channel channel, bool enable) { struct completion frame_done_completion; bool is_on; int r; + u32 irq; enable_clocks(1); /* When we disable LCD output, we need to wait until frame is done. * Otherwise the DSS is still working, and turning off the clocks * prevents DSS from going to OFF mode */ - is_on = REG_GET(DISPC_CONTROL, 0, 0); + is_on = channel == OMAP_DSS_CHANNEL_LCD2 ? + REG_GET(DISPC_CONTROL2, 0, 0) : + REG_GET(DISPC_CONTROL, 0, 0); + + irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 : + DISPC_IRQ_FRAMEDONE; if (!enable && is_on) { init_completion(&frame_done_completion); r = omap_dispc_register_isr(dispc_disable_isr, - &frame_done_completion, - DISPC_IRQ_FRAMEDONE); + &frame_done_completion, irq); if (r) DSSERR("failed to register FRAMEDONE isr\n"); } - _enable_lcd_out(enable); + _enable_lcd_out(channel, enable); if (!enable && is_on) { if (!wait_for_completion_timeout(&frame_done_completion, @@ -1747,8 +1816,7 @@ static void dispc_enable_lcd_out(bool enable) DSSERR("timeout waiting for FRAME DONE\n"); r = omap_dispc_unregister_isr(dispc_disable_isr, - &frame_done_completion, - DISPC_IRQ_FRAMEDONE); + &frame_done_completion, irq); if (r) DSSERR("failed to unregister FRAMEDONE isr\n"); @@ -1818,6 +1886,8 @@ static void dispc_enable_digit_out(bool enable) unsigned long flags; spin_lock_irqsave(&dispc.irq_lock, flags); dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR; + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2; dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT); _omap_dispc_set_irqs(); spin_unlock_irqrestore(&dispc.irq_lock, flags); @@ -1832,14 +1902,17 @@ bool dispc_is_channel_enabled(enum omap_channel channel) return !!REG_GET(DISPC_CONTROL, 0, 0); else if (channel == OMAP_DSS_CHANNEL_DIGIT) return !!REG_GET(DISPC_CONTROL, 1, 1); + else if (channel == OMAP_DSS_CHANNEL_LCD2) + return !!REG_GET(DISPC_CONTROL2, 0, 0); else BUG(); } void dispc_enable_channel(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD) - dispc_enable_lcd_out(enable); + if (channel == OMAP_DSS_CHANNEL_LCD || + channel == OMAP_DSS_CHANNEL_LCD2) + dispc_enable_lcd_out(channel, enable); else if (channel == OMAP_DSS_CHANNEL_DIGIT) dispc_enable_digit_out(enable); else @@ -1848,6 +1921,9 @@ void dispc_enable_channel(enum omap_channel channel, bool enable) void dispc_lcd_enable_signal_polarity(bool act_high) { + if (!dss_has_feature(FEAT_LCDENABLEPOL)) + return; + enable_clocks(1); REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29); enable_clocks(0); @@ -1855,6 +1931,9 @@ void dispc_lcd_enable_signal_polarity(bool act_high) void dispc_lcd_enable_signal(bool enable) { + if (!dss_has_feature(FEAT_LCDENABLESIGNAL)) + return; + enable_clocks(1); REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28); enable_clocks(0); @@ -1862,20 +1941,27 @@ void dispc_lcd_enable_signal(bool enable) void dispc_pck_free_enable(bool enable) { + if (!dss_has_feature(FEAT_PCKFREEENABLE)) + return; + enable_clocks(1); REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27); enable_clocks(0); } -void dispc_enable_fifohandcheck(bool enable) +void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable) { enable_clocks(1); - REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); + if (channel == OMAP_DSS_CHANNEL_LCD2) + REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); + else + REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); enable_clocks(0); } -void dispc_set_lcd_display_type(enum omap_lcd_display_type type) +void dispc_set_lcd_display_type(enum omap_channel channel, + enum omap_lcd_display_type type) { int mode; @@ -1894,7 +1980,10 @@ void dispc_set_lcd_display_type(enum omap_lcd_display_type type) } enable_clocks(1); - REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); + if (channel == OMAP_DSS_CHANNEL_LCD2) + REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); + else + REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); enable_clocks(0); } @@ -1908,25 +1997,21 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode) void dispc_set_default_color(enum omap_channel channel, u32 color) { - const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0, - DISPC_DEFAULT_COLOR1 }; - enable_clocks(1); - dispc_write_reg(def_reg[channel], color); + dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color); enable_clocks(0); } u32 dispc_get_default_color(enum omap_channel channel) { - const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0, - DISPC_DEFAULT_COLOR1 }; u32 l; BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT && - channel != OMAP_DSS_CHANNEL_LCD); + channel != OMAP_DSS_CHANNEL_LCD && + channel != OMAP_DSS_CHANNEL_LCD2); enable_clocks(1); - l = dispc_read_reg(def_reg[channel]); + l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel)); enable_clocks(0); return l; @@ -1936,16 +2021,15 @@ void dispc_set_trans_key(enum omap_channel ch, enum omap_dss_trans_key_type type, u32 trans_key) { - const struct dispc_reg tr_reg[] = { - DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 }; - enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); - else /* OMAP_DSS_CHANNEL_DIGIT */ + else if (ch == OMAP_DSS_CHANNEL_DIGIT) REG_FLD_MOD(DISPC_CONFIG, type, 13, 13); + else /* OMAP_DSS_CHANNEL_LCD2 */ + REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); - dispc_write_reg(tr_reg[ch], trans_key); + dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); enable_clocks(0); } @@ -1953,21 +2037,20 @@ void dispc_get_trans_key(enum omap_channel ch, enum omap_dss_trans_key_type *type, u32 *trans_key) { - const struct dispc_reg tr_reg[] = { - DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 }; - enable_clocks(1); if (type) { if (ch == OMAP_DSS_CHANNEL_LCD) *type = REG_GET(DISPC_CONFIG, 11, 11); else if (ch == OMAP_DSS_CHANNEL_DIGIT) *type = REG_GET(DISPC_CONFIG, 13, 13); + else if (ch == OMAP_DSS_CHANNEL_LCD2) + *type = REG_GET(DISPC_CONFIG2, 11, 11); else BUG(); } if (trans_key) - *trans_key = dispc_read_reg(tr_reg[ch]); + *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch)); enable_clocks(0); } @@ -1976,8 +2059,10 @@ void dispc_enable_trans_key(enum omap_channel ch, bool enable) enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); - else /* OMAP_DSS_CHANNEL_DIGIT */ + else if (ch == OMAP_DSS_CHANNEL_DIGIT) REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); + else /* OMAP_DSS_CHANNEL_LCD2 */ + REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); enable_clocks(0); } void dispc_enable_alpha_blending(enum omap_channel ch, bool enable) @@ -1988,8 +2073,10 @@ void dispc_enable_alpha_blending(enum omap_channel ch, bool enable) enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18); - else /* OMAP_DSS_CHANNEL_DIGIT */ + else if (ch == OMAP_DSS_CHANNEL_DIGIT) REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19); + else /* OMAP_DSS_CHANNEL_LCD2 */ + REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18); enable_clocks(0); } bool dispc_alpha_blending_enabled(enum omap_channel ch) @@ -2003,13 +2090,14 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch) if (ch == OMAP_DSS_CHANNEL_LCD) enabled = REG_GET(DISPC_CONFIG, 18, 18); else if (ch == OMAP_DSS_CHANNEL_DIGIT) - enabled = REG_GET(DISPC_CONFIG, 18, 18); + enabled = REG_GET(DISPC_CONFIG, 19, 19); + else if (ch == OMAP_DSS_CHANNEL_LCD2) + enabled = REG_GET(DISPC_CONFIG2, 18, 18); else BUG(); enable_clocks(0); return enabled; - } @@ -2022,6 +2110,8 @@ bool dispc_trans_key_enabled(enum omap_channel ch) enabled = REG_GET(DISPC_CONFIG, 10, 10); else if (ch == OMAP_DSS_CHANNEL_DIGIT) enabled = REG_GET(DISPC_CONFIG, 12, 12); + else if (ch == OMAP_DSS_CHANNEL_LCD2) + enabled = REG_GET(DISPC_CONFIG2, 10, 10); else BUG(); enable_clocks(0); @@ -2030,7 +2120,7 @@ bool dispc_trans_key_enabled(enum omap_channel ch) } -void dispc_set_tft_data_lines(u8 data_lines) +void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines) { int code; @@ -2053,11 +2143,15 @@ void dispc_set_tft_data_lines(u8 data_lines) } enable_clocks(1); - REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); + if (channel == OMAP_DSS_CHANNEL_LCD2) + REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); + else + REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); enable_clocks(0); } -void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode) +void dispc_set_parallel_interface_mode(enum omap_channel channel, + enum omap_parallel_interface_mode mode) { u32 l; int stallmode; @@ -2087,13 +2181,17 @@ void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode) enable_clocks(1); - l = dispc_read_reg(DISPC_CONTROL); - - l = FLD_MOD(l, stallmode, 11, 11); - l = FLD_MOD(l, gpout0, 15, 15); - l = FLD_MOD(l, gpout1, 16, 16); - - dispc_write_reg(DISPC_CONTROL, l); + if (channel == OMAP_DSS_CHANNEL_LCD2) { + l = dispc_read_reg(DISPC_CONTROL2); + l = FLD_MOD(l, stallmode, 11, 11); + dispc_write_reg(DISPC_CONTROL2, l); + } else { + l = dispc_read_reg(DISPC_CONTROL); + l = FLD_MOD(l, stallmode, 11, 11); + l = FLD_MOD(l, gpout0, 15, 15); + l = FLD_MOD(l, gpout1, 16, 16); + dispc_write_reg(DISPC_CONTROL, l); + } enable_clocks(0); } @@ -2129,8 +2227,8 @@ bool dispc_lcd_timings_ok(struct omap_video_timings *timings) timings->vfp, timings->vbp); } -static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp, - int vsw, int vfp, int vbp) +static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw, + int hfp, int hbp, int vsw, int vfp, int vbp) { u32 timing_h, timing_v; @@ -2149,13 +2247,14 @@ static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp, } enable_clocks(1); - dispc_write_reg(DISPC_TIMING_H, timing_h); - dispc_write_reg(DISPC_TIMING_V, timing_v); + dispc_write_reg(DISPC_TIMING_H(channel), timing_h); + dispc_write_reg(DISPC_TIMING_V(channel), timing_v); enable_clocks(0); } /* change name to mode? */ -void dispc_set_lcd_timings(struct omap_video_timings *timings) +void dispc_set_lcd_timings(enum omap_channel channel, + struct omap_video_timings *timings) { unsigned xtot, ytot; unsigned long ht, vt; @@ -2165,10 +2264,11 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings) timings->vfp, timings->vbp)) BUG(); - _dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp, - timings->vsw, timings->vfp, timings->vbp); + _dispc_set_lcd_timings(channel, timings->hsw, timings->hfp, + timings->hbp, timings->vsw, timings->vfp, + timings->vbp); - dispc_set_lcd_size(timings->x_res, timings->y_res); + dispc_set_lcd_size(channel, timings->x_res, timings->y_res); xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp; ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp; @@ -2176,7 +2276,8 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings) ht = (timings->pixel_clock * 1000) / xtot; vt = (timings->pixel_clock * 1000) / xtot / ytot; - DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res); + DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res, + timings->y_res); DSSDBG("pck %u\n", timings->pixel_clock); DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", timings->hsw, timings->hfp, timings->hbp, @@ -2185,21 +2286,23 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings) DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); } -static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div) +static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div, + u16 pck_div) { BUG_ON(lck_div < 1); BUG_ON(pck_div < 2); enable_clocks(1); - dispc_write_reg(DISPC_DIVISOR, + dispc_write_reg(DISPC_DIVISOR(channel), FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0)); enable_clocks(0); } -static void dispc_get_lcd_divisor(int *lck_div, int *pck_div) +static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div, + int *pck_div) { u32 l; - l = dispc_read_reg(DISPC_DIVISOR); + l = dispc_read_reg(DISPC_DIVISOR(channel)); *lck_div = FLD_GET(l, 23, 16); *pck_div = FLD_GET(l, 7, 0); } @@ -2219,13 +2322,13 @@ unsigned long dispc_fclk_rate(void) return r; } -unsigned long dispc_lclk_rate(void) +unsigned long dispc_lclk_rate(enum omap_channel channel) { int lcd; unsigned long r; u32 l; - l = dispc_read_reg(DISPC_DIVISOR); + l = dispc_read_reg(DISPC_DIVISOR(channel)); lcd = FLD_GET(l, 23, 16); @@ -2234,13 +2337,13 @@ unsigned long dispc_lclk_rate(void) return r / lcd; } -unsigned long dispc_pclk_rate(void) +unsigned long dispc_pclk_rate(enum omap_channel channel) { int lcd, pcd; unsigned long r; u32 l; - l = dispc_read_reg(DISPC_DIVISOR); + l = dispc_read_reg(DISPC_DIVISOR(channel)); lcd = FLD_GET(l, 23, 16); pcd = FLD_GET(l, 7, 0); @@ -2256,8 +2359,6 @@ void dispc_dump_clocks(struct seq_file *s) enable_clocks(1); - dispc_get_lcd_divisor(&lcd, &pcd); - seq_printf(s, "- DISPC -\n"); seq_printf(s, "dispc fclk source = %s\n", @@ -2265,9 +2366,25 @@ void dispc_dump_clocks(struct seq_file *s) "dss1_alwon_fclk" : "dsi1_pll_fclk"); seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); - seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd); - seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd); + seq_printf(s, "- LCD1 -\n"); + + dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd); + + seq_printf(s, "lck\t\t%-16lulck div\t%u\n", + dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd); + seq_printf(s, "pck\t\t%-16lupck div\t%u\n", + dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd); + if (dss_has_feature(FEAT_MGR_LCD2)) { + seq_printf(s, "- LCD2 -\n"); + + dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd); + + seq_printf(s, "lck\t\t%-16lulck div\t%u\n", + dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd); + seq_printf(s, "pck\t\t%-16lupck div\t%u\n", + dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); + } enable_clocks(0); } @@ -2309,6 +2426,12 @@ void dispc_dump_irqs(struct seq_file *s) PIS(SYNC_LOST); PIS(SYNC_LOST_DIGIT); PIS(WAKEUP); + if (dss_has_feature(FEAT_MGR_LCD2)) { + PIS(FRAMEDONE2); + PIS(VSYNC2); + PIS(ACBIAS_COUNT_STAT2); + PIS(SYNC_LOST2); + } #undef PIS } #endif @@ -2327,19 +2450,30 @@ void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_CONTROL); DUMPREG(DISPC_CONFIG); DUMPREG(DISPC_CAPABLE); - DUMPREG(DISPC_DEFAULT_COLOR0); - DUMPREG(DISPC_DEFAULT_COLOR1); - DUMPREG(DISPC_TRANS_COLOR0); - DUMPREG(DISPC_TRANS_COLOR1); + DUMPREG(DISPC_DEFAULT_COLOR(0)); + DUMPREG(DISPC_DEFAULT_COLOR(1)); + DUMPREG(DISPC_TRANS_COLOR(0)); + DUMPREG(DISPC_TRANS_COLOR(1)); DUMPREG(DISPC_LINE_STATUS); DUMPREG(DISPC_LINE_NUMBER); - DUMPREG(DISPC_TIMING_H); - DUMPREG(DISPC_TIMING_V); - DUMPREG(DISPC_POL_FREQ); - DUMPREG(DISPC_DIVISOR); + DUMPREG(DISPC_TIMING_H(0)); + DUMPREG(DISPC_TIMING_V(0)); + DUMPREG(DISPC_POL_FREQ(0)); + DUMPREG(DISPC_DIVISOR(0)); DUMPREG(DISPC_GLOBAL_ALPHA); DUMPREG(DISPC_SIZE_DIG); - DUMPREG(DISPC_SIZE_LCD); + DUMPREG(DISPC_SIZE_LCD(0)); + if (dss_has_feature(FEAT_MGR_LCD2)) { + DUMPREG(DISPC_CONTROL2); + DUMPREG(DISPC_CONFIG2); + DUMPREG(DISPC_DEFAULT_COLOR(2)); + DUMPREG(DISPC_TRANS_COLOR(2)); + DUMPREG(DISPC_TIMING_H(2)); + DUMPREG(DISPC_TIMING_V(2)); + DUMPREG(DISPC_POL_FREQ(2)); + DUMPREG(DISPC_DIVISOR(2)); + DUMPREG(DISPC_SIZE_LCD(2)); + } DUMPREG(DISPC_GFX_BA0); DUMPREG(DISPC_GFX_BA1); @@ -2353,13 +2487,22 @@ void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_GFX_WINDOW_SKIP); DUMPREG(DISPC_GFX_TABLE_BA); - DUMPREG(DISPC_DATA_CYCLE1); - DUMPREG(DISPC_DATA_CYCLE2); - DUMPREG(DISPC_DATA_CYCLE3); - - DUMPREG(DISPC_CPR_COEF_R); - DUMPREG(DISPC_CPR_COEF_G); - DUMPREG(DISPC_CPR_COEF_B); + DUMPREG(DISPC_DATA_CYCLE1(0)); + DUMPREG(DISPC_DATA_CYCLE2(0)); + DUMPREG(DISPC_DATA_CYCLE3(0)); + + DUMPREG(DISPC_CPR_COEF_R(0)); + DUMPREG(DISPC_CPR_COEF_G(0)); + DUMPREG(DISPC_CPR_COEF_B(0)); + if (dss_has_feature(FEAT_MGR_LCD2)) { + DUMPREG(DISPC_DATA_CYCLE1(2)); + DUMPREG(DISPC_DATA_CYCLE2(2)); + DUMPREG(DISPC_DATA_CYCLE3(2)); + + DUMPREG(DISPC_CPR_COEF_R(2)); + DUMPREG(DISPC_CPR_COEF_G(2)); + DUMPREG(DISPC_CPR_COEF_B(2)); + } DUMPREG(DISPC_GFX_PRELOAD); @@ -2458,8 +2601,8 @@ void dispc_dump_regs(struct seq_file *s) #undef DUMPREG } -static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc, - bool ihs, bool ivs, u8 acbi, u8 acb) +static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf, + bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, u8 acb) { u32 l = 0; @@ -2476,13 +2619,14 @@ static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc, l |= FLD_VAL(acb, 7, 0); enable_clocks(1); - dispc_write_reg(DISPC_POL_FREQ, l); + dispc_write_reg(DISPC_POL_FREQ(channel), l); enable_clocks(0); } -void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb) +void dispc_set_pol_freq(enum omap_channel channel, + enum omap_panel_config config, u8 acbi, u8 acb) { - _dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0, + _dispc_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0, (config & OMAP_DSS_LCD_RF) != 0, (config & OMAP_DSS_LCD_IEO) != 0, (config & OMAP_DSS_LCD_IPC) != 0, @@ -2551,24 +2695,26 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, return 0; } -int dispc_set_clock_div(struct dispc_clock_info *cinfo) +int dispc_set_clock_div(enum omap_channel channel, + struct dispc_clock_info *cinfo) { DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div); DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div); - dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div); + dispc_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div); return 0; } -int dispc_get_clock_div(struct dispc_clock_info *cinfo) +int dispc_get_clock_div(enum omap_channel channel, + struct dispc_clock_info *cinfo) { unsigned long fck; fck = dispc_fclk_rate(); - cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16); - cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0); + cinfo->lck_div = REG_GET(DISPC_DIVISOR(channel), 23, 16); + cinfo->pck_div = REG_GET(DISPC_DIVISOR(channel), 7, 0); cinfo->lck = fck / cinfo->lck_div; cinfo->pck = cinfo->lck / cinfo->pck_div; @@ -2708,6 +2854,8 @@ static void print_irq_status(u32 status) PIS(VID2_FIFO_UNDERFLOW); PIS(SYNC_LOST); PIS(SYNC_LOST_DIGIT); + if (dss_has_feature(FEAT_MGR_LCD2)) + PIS(SYNC_LOST2); #undef PIS printk("\n"); @@ -2926,6 +3074,45 @@ static void dispc_error_worker(struct work_struct *work) } } + if (errors & DISPC_IRQ_SYNC_LOST2) { + struct omap_overlay_manager *manager = NULL; + bool enable = false; + + DSSERR("SYNC_LOST for LCD2, disabling LCD2\n"); + + for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { + struct omap_overlay_manager *mgr; + mgr = omap_dss_get_overlay_manager(i); + + if (mgr->id == OMAP_DSS_CHANNEL_LCD2) { + manager = mgr; + enable = mgr->device->state == + OMAP_DSS_DISPLAY_ACTIVE; + mgr->device->driver->disable(mgr->device); + break; + } + } + + if (manager) { + struct omap_dss_device *dssdev = manager->device; + for (i = 0; i < omap_dss_get_num_overlays(); ++i) { + struct omap_overlay *ovl; + ovl = omap_dss_get_overlay(i); + + if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC)) + continue; + + if (ovl->id != 0 && ovl->manager == manager) + dispc_enable_plane(ovl->id, 0); + } + + dispc_go(manager->id); + mdelay(50); + if (enable) + dssdev->driver->enable(dssdev); + } + } + if (errors & DISPC_IRQ_OCP_ERR) { DSSERR("OCP_ERR\n"); for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { @@ -3033,6 +3220,8 @@ static void _omap_dispc_initialize_irq(void) memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr)); dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR; + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2; /* there's SYNC_LOST_DIGIT waiting after enabling the DSS, * so clear it */ @@ -3065,7 +3254,8 @@ static void _omap_dispc_initial_config(void) dispc_write_reg(DISPC_SYSCONFIG, l); /* FUNCGATED */ - REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); + if (dss_has_feature(FEAT_FUNCGATED)) + REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); /* L3 firewall setting: enable access to OCM RAM */ /* XXX this should be somewhere in plat-omap */ @@ -3139,17 +3329,18 @@ int dispc_setup_plane(enum omap_plane plane, enum omap_color_mode color_mode, bool ilace, enum omap_dss_rotation_type rotation_type, - u8 rotation, bool mirror, u8 global_alpha) + u8 rotation, bool mirror, u8 global_alpha, + u8 pre_mult_alpha, enum omap_channel channel) { int r = 0; DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> " - "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n", + "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", plane, paddr, screen_width, pos_x, pos_y, width, height, out_width, out_height, ilace, color_mode, - rotation, mirror); + rotation, mirror, channel); enable_clocks(1); @@ -3161,7 +3352,8 @@ int dispc_setup_plane(enum omap_plane plane, color_mode, ilace, rotation_type, rotation, mirror, - global_alpha); + global_alpha, + pre_mult_alpha, channel); enable_clocks(0); diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 960e977a8bf0..75fb0a515430 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -40,8 +40,9 @@ static struct { } dpi; #ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL -static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req, - unsigned long *fck, int *lck_div, int *pck_div) +static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, + unsigned long pck_req, unsigned long *fck, int *lck_div, + int *pck_div) { struct dsi_clock_info dsi_cinfo; struct dispc_clock_info dispc_cinfo; @@ -58,7 +59,7 @@ static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req, dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK); - r = dispc_set_clock_div(&dispc_cinfo); + r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); if (r) return r; @@ -69,8 +70,9 @@ static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req, return 0; } #else -static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req, - unsigned long *fck, int *lck_div, int *pck_div) +static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, + unsigned long pck_req, unsigned long *fck, int *lck_div, + int *pck_div) { struct dss_clock_info dss_cinfo; struct dispc_clock_info dispc_cinfo; @@ -84,7 +86,7 @@ static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req, if (r) return r; - r = dispc_set_clock_div(&dispc_cinfo); + r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); if (r) return r; @@ -107,17 +109,17 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); - dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi, - dssdev->panel.acb); + dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config, + dssdev->panel.acbi, dssdev->panel.acb); is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; #ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL - r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); #else - r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); #endif if (r) goto err0; @@ -132,7 +134,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) t->pixel_clock = pck; } - dispc_set_lcd_timings(t); + dispc_set_lcd_timings(dssdev->manager->id, t); err0: dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); @@ -145,10 +147,12 @@ static int dpi_basic_init(struct omap_dss_device *dssdev) is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; - dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS); - dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT : - OMAP_DSS_LCD_DISPLAY_STN); - dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines); + dispc_set_parallel_interface_mode(dssdev->manager->id, + OMAP_DSS_PARALLELMODE_BYPASS); + dispc_set_lcd_display_type(dssdev->manager->id, is_tft ? + OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); + dispc_set_tft_data_lines(dssdev->manager->id, + dssdev->phy.dpi.data_lines); return 0; } @@ -234,7 +238,7 @@ void dpi_set_timings(struct omap_dss_device *dssdev, dssdev->panel.timings = *timings; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { dpi_set_mode(dssdev); - dispc_go(OMAP_DSS_CHANNEL_LCD); + dispc_go(dssdev->manager->id); } } EXPORT_SYMBOL(dpi_set_timings); diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index aa4f7a5fae29..ddf3a0560822 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -792,7 +792,8 @@ static int dsi_pll_power(enum dsi_pll_power_state state) } /* calculate clock rates using dividers in cinfo */ -static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo) +static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, + struct dsi_clock_info *cinfo) { if (cinfo->regn == 0 || cinfo->regn > REGN_MAX) return -EINVAL; @@ -812,7 +813,7 @@ static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo) * with DSS2_FCK source also */ cinfo->highfreq = 0; } else { - cinfo->clkin = dispc_pclk_rate(); + cinfo->clkin = dispc_pclk_rate(dssdev->manager->id); if (cinfo->clkin < 32000000) cinfo->highfreq = 0; @@ -1206,8 +1207,8 @@ void dsi_dump_clocks(struct seq_file *s) seq_printf(s, "VP_CLK\t\t%lu\n" "VP_PCLK\t\t%lu\n", - dispc_lclk_rate(), - dispc_pclk_rate()); + dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), + dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD)); enable_clocks(0); } @@ -2888,7 +2889,7 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev, if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { dss_setup_partial_planes(dssdev, x, y, w, h, enlarge_update_area); - dispc_set_lcd_size(*w, *h); + dispc_set_lcd_size(dssdev->manager->id, *w, *h); } return 0; @@ -2947,12 +2948,14 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev) return r; } - dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT); + dispc_set_lcd_display_type(dssdev->manager->id, + OMAP_DSS_LCD_DISPLAY_TFT); - dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI); - dispc_enable_fifohandcheck(1); + dispc_set_parallel_interface_mode(dssdev->manager->id, + OMAP_DSS_PARALLELMODE_DSI); + dispc_enable_fifohandcheck(dssdev->manager->id, 1); - dispc_set_tft_data_lines(dssdev->ctrl.pixel_size); + dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size); { struct omap_video_timings timings = { @@ -2964,7 +2967,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev) .vbp = 0, }; - dispc_set_lcd_timings(&timings); + dispc_set_lcd_timings(dssdev->manager->id, &timings); } return 0; @@ -2987,7 +2990,7 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) cinfo.regm = dssdev->phy.dsi.div.regm; cinfo.regm3 = dssdev->phy.dsi.div.regm3; cinfo.regm4 = dssdev->phy.dsi.div.regm4; - r = dsi_calc_clock_rates(&cinfo); + r = dsi_calc_clock_rates(dssdev, &cinfo); if (r) { DSSERR("Failed to calc dsi clocks\n"); return r; @@ -3019,7 +3022,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) return r; } - r = dispc_set_clock_div(&dispc_cinfo); + r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); if (r) { DSSERR("Failed to set dispc clocks\n"); return r; diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index 5c7940d5f282..b394951120ac 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -333,9 +333,9 @@ void dispc_disable_sidle(void); void dispc_lcd_enable_signal_polarity(bool act_high); void dispc_lcd_enable_signal(bool enable); void dispc_pck_free_enable(bool enable); -void dispc_enable_fifohandcheck(bool enable); +void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable); -void dispc_set_lcd_size(u16 width, u16 height); +void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height); void dispc_set_digit_size(u16 width, u16 height); u32 dispc_get_plane_fifo_size(enum omap_plane plane); void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high); @@ -359,7 +359,8 @@ int dispc_setup_plane(enum omap_plane plane, bool ilace, enum omap_dss_rotation_type rotation_type, u8 rotation, bool mirror, - u8 global_alpha); + u8 global_alpha, u8 pre_mult_alpha, + enum omap_channel channel); bool dispc_go_busy(enum omap_channel channel); void dispc_go(enum omap_channel channel); @@ -368,9 +369,11 @@ bool dispc_is_channel_enabled(enum omap_channel channel); int dispc_enable_plane(enum omap_plane plane, bool enable); void dispc_enable_replication(enum omap_plane plane, bool enable); -void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode); -void dispc_set_tft_data_lines(u8 data_lines); -void dispc_set_lcd_display_type(enum omap_lcd_display_type type); +void dispc_set_parallel_interface_mode(enum omap_channel channel, + enum omap_parallel_interface_mode mode); +void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines); +void dispc_set_lcd_display_type(enum omap_channel channel, + enum omap_lcd_display_type type); void dispc_set_loadmode(enum omap_dss_load_mode mode); void dispc_set_default_color(enum omap_channel channel, u32 color); @@ -387,17 +390,21 @@ bool dispc_trans_key_enabled(enum omap_channel ch); bool dispc_alpha_blending_enabled(enum omap_channel ch); bool dispc_lcd_timings_ok(struct omap_video_timings *timings); -void dispc_set_lcd_timings(struct omap_video_timings *timings); +void dispc_set_lcd_timings(enum omap_channel channel, + struct omap_video_timings *timings); unsigned long dispc_fclk_rate(void); -unsigned long dispc_lclk_rate(void); -unsigned long dispc_pclk_rate(void); -void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb); +unsigned long dispc_lclk_rate(enum omap_channel channel); +unsigned long dispc_pclk_rate(enum omap_channel channel); +void dispc_set_pol_freq(enum omap_channel channel, + enum omap_panel_config config, u8 acbi, u8 acb); void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo); int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, struct dispc_clock_info *cinfo); -int dispc_set_clock_div(struct dispc_clock_info *cinfo); -int dispc_get_clock_div(struct dispc_clock_info *cinfo); +int dispc_set_clock_div(enum omap_channel channel, + struct dispc_clock_info *cinfo); +int dispc_get_clock_div(enum omap_channel channel, + struct dispc_clock_info *cinfo); /* VENC */ @@ -424,8 +431,8 @@ void rfbi_dump_regs(struct seq_file *s); int rfbi_configure(int rfbi_module, int bpp, int lines); void rfbi_enable_rfbi(bool enable); -void rfbi_transfer_area(u16 width, u16 height, - void (callback)(void *data), void *data); +void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, + u16 height, void (callback)(void *data), void *data); void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t); unsigned long rfbi_get_max_tx_rate(void); int rfbi_init_display(struct omap_dss_device *display); diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index 867f68de125f..cf3ef696e141 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c @@ -82,6 +82,18 @@ static const enum omap_display_type omap3_dss_supported_displays[] = { OMAP_DISPLAY_TYPE_VENC, }; +static const enum omap_display_type omap4_dss_supported_displays[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DISPLAY_TYPE_VENC, + + /* OMAP_DSS_CHANNEL_LCD2 */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | + OMAP_DISPLAY_TYPE_DSI, +}; + static const enum omap_color_mode omap2_dss_supported_color_modes[] = { /* OMAP_DSS_GFX */ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | @@ -127,6 +139,10 @@ static struct omap_dss_features omap2_dss_features = { .reg_fields = omap2_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields), + .has_feature = + FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL | + FEAT_PCKFREEENABLE | FEAT_FUNCGATED, + .num_mgrs = 2, .num_ovls = 3, .supported_displays = omap2_dss_supported_displays, @@ -134,11 +150,29 @@ static struct omap_dss_features omap2_dss_features = { }; /* OMAP3 DSS Features */ -static struct omap_dss_features omap3_dss_features = { +static struct omap_dss_features omap3430_dss_features = { + .reg_fields = omap3_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), + + .has_feature = + FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL | + FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | + FEAT_FUNCGATED, + + .num_mgrs = 2, + .num_ovls = 3, + .supported_displays = omap3_dss_supported_displays, + .supported_color_modes = omap3_dss_supported_color_modes, +}; + +static struct omap_dss_features omap3630_dss_features = { .reg_fields = omap3_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), - .has_feature = FEAT_GLOBAL_ALPHA, + .has_feature = + FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL | + FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | + FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED, .num_mgrs = 2, .num_ovls = 3, @@ -146,6 +180,21 @@ static struct omap_dss_features omap3_dss_features = { .supported_color_modes = omap3_dss_supported_color_modes, }; +/* OMAP4 DSS Features */ +static struct omap_dss_features omap4_dss_features = { + .reg_fields = omap3_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), + + .has_feature = + FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA | + FEAT_MGR_LCD2, + + .num_mgrs = 3, + .num_ovls = 3, + .supported_displays = omap4_dss_supported_displays, + .supported_color_modes = omap3_dss_supported_color_modes, +}; + /* Functions returning values related to a DSS feature */ int dss_feat_get_num_mgrs(void) { @@ -167,6 +216,13 @@ enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane) return omap_current_dss_features->supported_color_modes[plane]; } +bool dss_feat_color_mode_supported(enum omap_plane plane, + enum omap_color_mode color_mode) +{ + return omap_current_dss_features->supported_color_modes[plane] & + color_mode; +} + /* DSS has_feature check */ bool dss_has_feature(enum dss_feat_id id) { @@ -186,6 +242,10 @@ void dss_features_init(void) { if (cpu_is_omap24xx()) omap_current_dss_features = &omap2_dss_features; + else if (cpu_is_omap3630()) + omap_current_dss_features = &omap3630_dss_features; + else if (cpu_is_omap34xx()) + omap_current_dss_features = &omap3430_dss_features; else - omap_current_dss_features = &omap3_dss_features; + omap_current_dss_features = &omap4_dss_features; } diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index cb231eaa9b31..b9c70be92588 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h @@ -20,13 +20,19 @@ #ifndef __OMAP2_DSS_FEATURES_H #define __OMAP2_DSS_FEATURES_H -#define MAX_DSS_MANAGERS 2 +#define MAX_DSS_MANAGERS 3 #define MAX_DSS_OVERLAYS 3 /* DSS has feature id */ enum dss_feat_id { FEAT_GLOBAL_ALPHA = 1 << 0, FEAT_GLOBAL_ALPHA_VID1 = 1 << 1, + FEAT_PRE_MULT_ALPHA = 1 << 2, + FEAT_LCDENABLEPOL = 1 << 3, + FEAT_LCDENABLESIGNAL = 1 << 4, + FEAT_PCKFREEENABLE = 1 << 5, + FEAT_FUNCGATED = 1 << 6, + FEAT_MGR_LCD2 = 1 << 7, }; /* DSS register field id */ @@ -43,6 +49,8 @@ int dss_feat_get_num_mgrs(void); int dss_feat_get_num_ovls(void); enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel); enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); +bool dss_feat_color_mode_supported(enum omap_plane plane, + enum omap_color_mode color_mode); bool dss_has_feature(enum dss_feat_id id); void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index 545e9b9a4d92..172d4e697309 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c @@ -406,6 +406,7 @@ struct overlay_cache_data { u16 out_width; /* if 0, out_width == width */ u16 out_height; /* if 0, out_height == height */ u8 global_alpha; + u8 pre_mult_alpha; enum omap_channel channel; bool replication; @@ -512,11 +513,14 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr) unsigned long timeout = msecs_to_jiffies(500); u32 irq; - if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) + if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) { irq = DISPC_IRQ_EVSYNC_ODD; - else - irq = DISPC_IRQ_VSYNC; - + } else { + if (mgr->id == OMAP_DSS_CHANNEL_LCD) + irq = DISPC_IRQ_VSYNC; + else + irq = DISPC_IRQ_VSYNC2; + } return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); } @@ -524,7 +528,6 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) { unsigned long timeout = msecs_to_jiffies(500); struct manager_cache_data *mc; - enum omap_channel channel; u32 irq; int r; int i; @@ -535,7 +538,6 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) { irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; - channel = OMAP_DSS_CHANNEL_DIGIT; } else { if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { enum omap_dss_update_mode mode; @@ -543,11 +545,14 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) if (mode != OMAP_DSS_UPDATE_AUTO) return 0; - irq = DISPC_IRQ_FRAMEDONE; + irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? + DISPC_IRQ_FRAMEDONE + : DISPC_IRQ_FRAMEDONE2; } else { - irq = DISPC_IRQ_VSYNC; + irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? + DISPC_IRQ_VSYNC + : DISPC_IRQ_VSYNC2; } - channel = OMAP_DSS_CHANNEL_LCD; } mc = &dss_cache.manager_cache[mgr->id]; @@ -594,7 +599,6 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl) { unsigned long timeout = msecs_to_jiffies(500); - enum omap_channel channel; struct overlay_cache_data *oc; struct omap_dss_device *dssdev; u32 irq; @@ -611,7 +615,6 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl) if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) { irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; - channel = OMAP_DSS_CHANNEL_DIGIT; } else { if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { enum omap_dss_update_mode mode; @@ -619,11 +622,14 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl) if (mode != OMAP_DSS_UPDATE_AUTO) return 0; - irq = DISPC_IRQ_FRAMEDONE; + irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? + DISPC_IRQ_FRAMEDONE + : DISPC_IRQ_FRAMEDONE2; } else { - irq = DISPC_IRQ_VSYNC; + irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? + DISPC_IRQ_VSYNC + : DISPC_IRQ_VSYNC2; } - channel = OMAP_DSS_CHANNEL_LCD; } oc = &dss_cache.overlay_cache[ovl->id]; @@ -842,7 +848,9 @@ static int configure_overlay(enum omap_plane plane) c->rotation_type, c->rotation, c->mirror, - c->global_alpha); + c->global_alpha, + c->pre_mult_alpha, + c->channel); if (r) { /* this shouldn't happen */ @@ -894,10 +902,10 @@ static int configure_dispc(void) r = 0; busy = false; - mgr_busy[0] = dispc_go_busy(0); - mgr_busy[1] = dispc_go_busy(1); - mgr_go[0] = false; - mgr_go[1] = false; + for (i = 0; i < num_mgrs; i++) { + mgr_busy[i] = dispc_go_busy(i); + mgr_go[i] = false; + } /* Commit overlay settings */ for (i = 0; i < num_ovls; ++i) { @@ -1156,9 +1164,10 @@ static void dss_apply_irq_handler(void *data, u32 mask) const int num_mgrs = dss_feat_get_num_mgrs(); int i, r; bool mgr_busy[MAX_DSS_MANAGERS]; + u32 irq_mask; - mgr_busy[0] = dispc_go_busy(0); - mgr_busy[1] = dispc_go_busy(1); + for (i = 0; i < num_mgrs; i++) + mgr_busy[i] = dispc_go_busy(i); spin_lock(&dss_cache.lock); @@ -1179,8 +1188,8 @@ static void dss_apply_irq_handler(void *data, u32 mask) goto end; /* re-read busy flags */ - mgr_busy[0] = dispc_go_busy(0); - mgr_busy[1] = dispc_go_busy(1); + for (i = 0; i < num_mgrs; i++) + mgr_busy[i] = dispc_go_busy(i); /* keep running as long as there are busy managers, so that * we can collect overlay-applied information */ @@ -1189,9 +1198,12 @@ static void dss_apply_irq_handler(void *data, u32 mask) goto end; } - omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, - DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD | - DISPC_IRQ_EVSYNC_EVEN); + irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD | + DISPC_IRQ_EVSYNC_EVEN; + if (dss_has_feature(FEAT_MGR_LCD2)) + irq_mask |= DISPC_IRQ_VSYNC2; + + omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask); dss_cache.irq_enabled = false; end: @@ -1265,6 +1277,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) oc->out_width = ovl->info.out_width; oc->out_height = ovl->info.out_height; oc->global_alpha = ovl->info.global_alpha; + oc->pre_mult_alpha = ovl->info.pre_mult_alpha; oc->replication = dss_use_replication(dssdev, ovl->info.color_mode); @@ -1383,9 +1396,14 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) r = 0; dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); if (!dss_cache.irq_enabled) { - r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, - DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD | - DISPC_IRQ_EVSYNC_EVEN); + u32 mask; + + mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD | + DISPC_IRQ_EVSYNC_EVEN; + if (dss_has_feature(FEAT_MGR_LCD2)) + mask |= DISPC_IRQ_VSYNC2; + + r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask); dss_cache.irq_enabled = true; } configure_dispc(); @@ -1477,6 +1495,10 @@ int dss_init_overlay_managers(struct platform_device *pdev) mgr->name = "tv"; mgr->id = OMAP_DSS_CHANNEL_DIGIT; break; + case 2: + mgr->name = "lcd2"; + mgr->id = OMAP_DSS_CHANNEL_LCD2; + break; } mgr->set_device = &omap_dss_set_device; diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index 75642c22cac7..456efef03c20 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c @@ -257,6 +257,43 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, return size; } +static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + ovl->info.pre_mult_alpha); +} + +static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl, + const char *buf, size_t size) +{ + int r; + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + /* only GFX and Video2 plane support pre alpha multiplied + * set zero for Video1 plane + */ + if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) && + ovl->id == OMAP_DSS_VIDEO1) + info.pre_mult_alpha = 0; + else + info.pre_mult_alpha = simple_strtoul(buf, NULL, 10); + + r = ovl->set_overlay_info(ovl, &info); + if (r) + return r; + + if (ovl->manager) { + r = ovl->manager->apply(ovl->manager); + if (r) + return r; + } + + return size; +} + struct overlay_attribute { struct attribute attr; ssize_t (*show)(struct omap_overlay *, char *); @@ -280,6 +317,9 @@ static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR, overlay_enabled_show, overlay_enabled_store); static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR, overlay_global_alpha_show, overlay_global_alpha_store); +static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR, + overlay_pre_mult_alpha_show, + overlay_pre_mult_alpha_store); static struct attribute *overlay_sysfs_attrs[] = { &overlay_attr_name.attr, @@ -290,6 +330,7 @@ static struct attribute *overlay_sysfs_attrs[] = { &overlay_attr_output_size.attr, &overlay_attr_enabled.attr, &overlay_attr_global_alpha.attr, + &overlay_attr_pre_mult_alpha.attr, NULL }; @@ -623,12 +664,22 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) int i; struct omap_overlay_manager *lcd_mgr; struct omap_overlay_manager *tv_mgr; + struct omap_overlay_manager *lcd2_mgr = NULL; struct omap_overlay_manager *mgr = NULL; lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD); tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV); - - if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) { + if (dss_has_feature(FEAT_MGR_LCD2)) + lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2); + + if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { + if (!lcd2_mgr->device || force) { + if (lcd2_mgr->device) + lcd2_mgr->unset_device(lcd2_mgr); + lcd2_mgr->set_device(lcd2_mgr, dssdev); + mgr = lcd2_mgr; + } + } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) { if (!lcd_mgr->device || force) { if (lcd_mgr->device) lcd_mgr->unset_device(lcd_mgr); diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index bbe62464e92d..10a2ffe02882 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -301,8 +301,8 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, } EXPORT_SYMBOL(omap_rfbi_write_pixels); -void rfbi_transfer_area(u16 width, u16 height, - void (callback)(void *data), void *data) +void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, + u16 height, void (*callback)(void *data), void *data) { u32 l; @@ -311,9 +311,9 @@ void rfbi_transfer_area(u16 width, u16 height, DSSDBG("rfbi_transfer_area %dx%d\n", width, height); - dispc_set_lcd_size(width, height); + dispc_set_lcd_size(dssdev->manager->id, width, height); - dispc_enable_channel(OMAP_DSS_CHANNEL_LCD, true); + dispc_enable_channel(dssdev->manager->id, true); rfbi.framedone_callback = callback; rfbi.framedone_callback_data = data; @@ -887,7 +887,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { dss_setup_partial_planes(dssdev, x, y, w, h, true); - dispc_set_lcd_size(*w, *h); + dispc_set_lcd_size(dssdev->manager->id, *w, *h); } return 0; @@ -899,7 +899,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *), void *data) { if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { - rfbi_transfer_area(w, h, callback, data); + rfbi_transfer_area(dssdev, w, h, callback, data); } else { struct omap_overlay *ovl; void __iomem *addr; @@ -1018,11 +1018,13 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) goto err1; } - dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT); + dispc_set_lcd_display_type(dssdev->manager->id, + OMAP_DSS_LCD_DISPLAY_TFT); - dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI); + dispc_set_parallel_interface_mode(dssdev->manager->id, + OMAP_DSS_PARALLELMODE_RFBI); - dispc_set_tft_data_lines(dssdev->ctrl.pixel_size); + dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size); rfbi_configure(dssdev->phy.rfbi.channel, dssdev->ctrl.pixel_size, diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index ee07a3cc22ef..b64adf7dfc88 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -35,12 +35,16 @@ static struct { struct regulator *vdds_sdi_reg; } sdi; -static void sdi_basic_init(void) +static void sdi_basic_init(struct omap_dss_device *dssdev) + { - dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS); + dispc_set_parallel_interface_mode(dssdev->manager->id, + OMAP_DSS_PARALLELMODE_BYPASS); + + dispc_set_lcd_display_type(dssdev->manager->id, + OMAP_DSS_LCD_DISPLAY_TFT); - dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT); - dispc_set_tft_data_lines(24); + dispc_set_tft_data_lines(dssdev->manager->id, 24); dispc_lcd_enable_signal_polarity(1); } @@ -68,20 +72,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (!sdi.skip_init) dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); - sdi_basic_init(); + sdi_basic_init(dssdev); /* 15.5.9.1.2 */ dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF; - dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi, - dssdev->panel.acb); + dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config, + dssdev->panel.acbi, dssdev->panel.acb); if (!sdi.skip_init) { r = dss_calc_clock_div(1, t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); } else { r = dss_get_clock_div(&dss_cinfo); - r = dispc_get_clock_div(&dispc_cinfo); + r = dispc_get_clock_div(dssdev->manager->id, &dispc_cinfo); } if (r) @@ -102,13 +106,13 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) } - dispc_set_lcd_timings(t); + dispc_set_lcd_timings(dssdev->manager->id, t); r = dss_set_clock_div(&dss_cinfo); if (r) goto err2; - r = dispc_set_clock_div(&dispc_cinfo); + r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); if (r) goto err2; diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 6a704f176c22..4fdab8e9c496 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -2132,8 +2132,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) char *str, *options, *this_opt; int r = 0; - str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL); - strcpy(str, def_mode); + str = kstrdup(def_mode, GFP_KERNEL); + if (!str) + return -ENOMEM; options = str; while (!r && (this_opt = strsep(&options, ",")) != NULL) { diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index 618f36bec10d..da388186d617 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops riva_bl_ops = { +static const struct backlight_ops riva_bl_ops = { .get_brightness = riva_bl_get_brightness, .update_status = riva_bl_update_status, }; diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index 46b430978bcc..61c819e35f7f 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -918,9 +919,9 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev, } info->clk = clk_get(NULL, "lcd"); - if (!info->clk || IS_ERR(info->clk)) { + if (IS_ERR(info->clk)) { printk(KERN_ERR "failed to get lcd clock source\n"); - ret = -ENOENT; + ret = PTR_ERR(info->clk); goto release_irq; } diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c index 8c59cc8c5a9c..74d9f546a2e8 100644 --- a/drivers/video/sh_mobile_hdmi.c +++ b/drivers/video/sh_mobile_hdmi.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -221,6 +222,7 @@ struct sh_hdmi { struct delayed_work edid_work; struct fb_var_screeninfo var; struct fb_monspecs monspec; + struct notifier_block notifier; }; static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg) @@ -737,7 +739,7 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate, struct fb_modelist *modelist = NULL; unsigned int f_width = 0, f_height = 0, f_refresh = 0; unsigned long found_rate_error = ULONG_MAX; /* silly compiler... */ - bool exact_match = false; + bool scanning = false, preferred_bad = false; u8 edid[128]; char *forced; int i; @@ -800,6 +802,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate, if (i < 2) { f_width = 0; f_height = 0; + } else { + /* The user wants us to use the EDID data */ + scanning = true; } dev_dbg(hdmi->dev, "Forced mode %ux%u@%uHz\n", f_width, f_height, f_refresh); @@ -807,37 +812,56 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate, /* Walk monitor modes to find the best or the exact match */ for (i = 0, mode = hdmi->monspec.modedb; - f_width && f_height && i < hdmi->monspec.modedb_len && !exact_match; + i < hdmi->monspec.modedb_len && scanning; i++, mode++) { unsigned long rate_error; - /* No interest in unmatching modes */ - if (f_width != mode->xres || f_height != mode->yres) + if (!f_width && !f_height) { + /* + * A parameter string "video=sh_mobile_lcdc:0x0" means + * use the preferred EDID mode. If it is rejected by + * .fb_check_var(), keep looking, until an acceptable + * one is found. + */ + if ((mode->flag & FB_MODE_IS_FIRST) || preferred_bad) + scanning = false; + else + continue; + } else if (f_width != mode->xres || f_height != mode->yres) { + /* No interest in unmatching modes */ continue; + } rate_error = sh_hdmi_rate_error(hdmi, mode, hdmi_rate, parent_rate); - if (f_refresh == mode->refresh || (!f_refresh && !rate_error)) - /* - * Exact match if either the refresh rate matches or it - * hasn't been specified and we've found a mode, for - * which we can configure the clock precisely - */ - exact_match = true; - else if (found && found_rate_error <= rate_error) - /* - * We otherwise search for the closest matching clock - * rate - either if no refresh rate has been specified - * or we cannot find an exactly matching one - */ - continue; + if (scanning) { + if (f_refresh == mode->refresh || (!f_refresh && !rate_error)) + /* + * Exact match if either the refresh rate + * matches or it hasn't been specified and we've + * found a mode, for which we can configure the + * clock precisely + */ + scanning = false; + else if (found && found_rate_error <= rate_error) + /* + * We otherwise search for the closest matching + * clock rate - either if no refresh rate has + * been specified or we cannot find an exactly + * matching one + */ + continue; + } /* Check if supported: sufficient fb memory, supported clock-rate */ fb_videomode_to_var(var, mode); + var->bits_per_pixel = info->var.bits_per_pixel; + if (info && info->fbops->fb_check_var && info->fbops->fb_check_var(var, info)) { - exact_match = false; + scanning = true; + preferred_bad = true; continue; } @@ -855,9 +879,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate, * driver, and passing ->info with HDMI platform data. */ if (info && !found) { - modelist = hdmi->info->modelist.next && - !list_empty(&hdmi->info->modelist) ? - list_entry(hdmi->info->modelist.next, + modelist = info->modelist.next && + !list_empty(&info->modelist) ? + list_entry(info->modelist.next, struct fb_modelist, list) : NULL; @@ -1100,6 +1124,7 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) mutex_lock(&hdmi->mutex); if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) { + struct fb_info *info = hdmi->info; unsigned long parent_rate = 0, hdmi_rate; /* A device has been plugged in */ @@ -1121,22 +1146,21 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) /* Switched to another (d) power-save mode */ msleep(10); - if (!hdmi->info) + if (!info) goto out; - ch = hdmi->info->par; + ch = info->par; acquire_console_sem(); /* HDMI plug in */ if (!sh_hdmi_must_reconfigure(hdmi) && - hdmi->info->state == FBINFO_STATE_RUNNING) { + info->state == FBINFO_STATE_RUNNING) { /* * First activation with the default monitor - just turn * on, if we run a resume here, the logo disappears */ - if (lock_fb_info(hdmi->info)) { - struct fb_info *info = hdmi->info; + if (lock_fb_info(info)) { info->var.width = hdmi->var.width; info->var.height = hdmi->var.height; sh_hdmi_display_on(hdmi, info); @@ -1144,7 +1168,7 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) } } else { /* New monitor or have to wake up */ - fb_set_suspend(hdmi->info, 0); + fb_set_suspend(info, 0); } release_console_sem(); @@ -1174,13 +1198,6 @@ out: dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, pdata->lcd_dev); } -static int sh_hdmi_notify(struct notifier_block *nb, - unsigned long action, void *data); - -static struct notifier_block sh_hdmi_notifier = { - .notifier_call = sh_hdmi_notify, -}; - static int sh_hdmi_notify(struct notifier_block *nb, unsigned long action, void *data) { @@ -1190,7 +1207,7 @@ static int sh_hdmi_notify(struct notifier_block *nb, struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg; struct sh_hdmi *hdmi = board_cfg->board_data; - if (nb != &sh_hdmi_notifier || !hdmi || hdmi->info != info) + if (!hdmi || nb != &hdmi->notifier || hdmi->info != info) return NOTIFY_DONE; switch(action) { @@ -1209,11 +1226,11 @@ static int sh_hdmi_notify(struct notifier_block *nb, * temporarily, synchronise with the work queue and re-acquire * the info->lock. */ - unlock_fb_info(hdmi->info); + unlock_fb_info(info); mutex_lock(&hdmi->mutex); hdmi->info = NULL; mutex_unlock(&hdmi->mutex); - lock_fb_info(hdmi->info); + lock_fb_info(info); return NOTIFY_OK; } return NOTIFY_DONE; @@ -1311,6 +1328,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev) goto ecodec; } + hdmi->notifier.notifier_call = sh_hdmi_notify; + fb_register_client(&hdmi->notifier); + return 0; ecodec: @@ -1341,6 +1361,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev) snd_soc_unregister_codec(&pdev->dev); + fb_unregister_client(&hdmi->notifier); + board_cfg->display_on = NULL; board_cfg->display_off = NULL; board_cfg->board_data = NULL; diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c index dee64c3b1e67..2ab704118c44 100644 --- a/drivers/video/sstfb.c +++ b/drivers/video/sstfb.c @@ -536,7 +536,7 @@ static int sstfb_set_par(struct fb_info *info) fbiinit2 = sst_read(FBIINIT2); fbiinit3 = sst_read(FBIINIT3); - /* everything is reset. we enable fbiinit2/3 remap : dac acces ok */ + /* everything is reset. we enable fbiinit2/3 remap : dac access ok */ pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, PCI_EN_INIT_WR | PCI_REMAP_DAC ); diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c index 7617f12e4fd7..0e120d67eb65 100644 --- a/drivers/video/vt8500lcdfb.c +++ b/drivers/video/vt8500lcdfb.c @@ -215,6 +215,33 @@ static int vt8500lcd_pan_display(struct fb_var_screeninfo *var, return 0; } +/* + * vt8500lcd_blank(): + * Blank the display by setting all palette values to zero. Note, + * True Color modes do not really use the palette, so this will not + * blank the display in all modes. + */ +static int vt8500lcd_blank(int blank, struct fb_info *info) +{ + int i; + + switch (blank) { + case FB_BLANK_POWERDOWN: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_NORMAL: + if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || + info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) + for (i = 0; i < 256; i++) + vt8500lcd_setcolreg(i, 0, 0, 0, 0, info); + case FB_BLANK_UNBLANK: + if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || + info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) + fb_set_cmap(&info->cmap, info); + } + return 0; +} + static struct fb_ops vt8500lcd_ops = { .owner = THIS_MODULE, .fb_set_par = vt8500lcd_set_par, @@ -225,6 +252,7 @@ static struct fb_ops vt8500lcd_ops = { .fb_sync = wmt_ge_sync, .fb_ioctl = vt8500lcd_ioctl, .fb_pan_display = vt8500lcd_pan_display, + .fb_blank = vt8500lcd_blank, }; static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id) diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig index 1f51366417b9..f0c909625bd1 100644 --- a/drivers/w1/slaves/Kconfig +++ b/drivers/w1/slaves/Kconfig @@ -16,6 +16,17 @@ config W1_SLAVE_SMEM Say Y here if you want to connect 1-wire simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. +config W1_SLAVE_DS2423 + tristate "Counter 1-wire device (DS2423)" + select CRC16 + help + If you enable this you can read the counter values available + in the DS2423 chipset from the w1_slave file under the + sys file system. + + Say Y here if you want to use a 1-wire + counter family device (DS2423). + config W1_SLAVE_DS2431 tristate "1kb EEPROM family support (DS2431)" help diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile index f1f51f19b129..3c76350a24f7 100644 --- a/drivers/w1/slaves/Makefile +++ b/drivers/w1/slaves/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o +obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c new file mode 100644 index 000000000000..7a7dbe5026f1 --- /dev/null +++ b/drivers/w1/slaves/w1_ds2423.c @@ -0,0 +1,166 @@ +/* + * w1_ds2423.c + * + * Copyright (c) 2010 Mika Laitio + * + * This driver will read and write the value of 4 counters to w1_slave file in + * sys filesystem. + * Inspired by the w1_therm and w1_ds2431 drivers. + * + * This program is free software; you can redistribute it and/or modify + * it under the therms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../w1.h" +#include "../w1_int.h" +#include "../w1_family.h" + +#define CRC16_VALID 0xb001 +#define CRC16_INIT 0 + +#define COUNTER_COUNT 4 +#define READ_BYTE_COUNT 42 + +static ssize_t w1_counter_read(struct device *device, + struct device_attribute *attr, char *buf); + +static struct device_attribute w1_counter_attr = + __ATTR(w1_slave, S_IRUGO, w1_counter_read, NULL); + +static ssize_t w1_counter_read(struct device *device, + struct device_attribute *attr, char *out_buf) +{ + struct w1_slave *sl = dev_to_w1_slave(device); + struct w1_master *dev = sl->master; + u8 rbuf[COUNTER_COUNT * READ_BYTE_COUNT]; + u8 wrbuf[3]; + int rom_addr; + int read_byte_count; + int result; + ssize_t c; + int ii; + int p; + int crc; + + c = PAGE_SIZE; + rom_addr = (12 << 5) + 31; + wrbuf[0] = 0xA5; + wrbuf[1] = rom_addr & 0xFF; + wrbuf[2] = rom_addr >> 8; + mutex_lock(&dev->mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_block(dev, wrbuf, 3); + read_byte_count = 0; + for (p = 0; p < 4; p++) { + /* + * 1 byte for first bytes in ram page read + * 4 bytes for counter + * 4 bytes for zero bits + * 2 bytes for crc + * 31 remaining bytes from the ram page + */ + read_byte_count += w1_read_block(dev, + rbuf + (p * READ_BYTE_COUNT), READ_BYTE_COUNT); + for (ii = 0; ii < READ_BYTE_COUNT; ++ii) + c -= snprintf(out_buf + PAGE_SIZE - c, + c, "%02x ", + rbuf[(p * READ_BYTE_COUNT) + ii]); + if (read_byte_count != (p + 1) * READ_BYTE_COUNT) { + dev_warn(device, + "w1_counter_read() returned %u bytes " + "instead of %d bytes wanted.\n", + read_byte_count, + READ_BYTE_COUNT); + c -= snprintf(out_buf + PAGE_SIZE - c, + c, "crc=NO\n"); + } else { + if (p == 0) { + crc = crc16(CRC16_INIT, wrbuf, 3); + crc = crc16(crc, rbuf, 11); + } else { + /* + * DS2423 calculates crc from all bytes + * read after the previous crc bytes. + */ + crc = crc16(CRC16_INIT, + (rbuf + 11) + + ((p - 1) * READ_BYTE_COUNT), + READ_BYTE_COUNT); + } + if (crc == CRC16_VALID) { + result = 0; + for (ii = 4; ii > 0; ii--) { + result <<= 8; + result |= rbuf[(p * + READ_BYTE_COUNT) + ii]; + } + c -= snprintf(out_buf + PAGE_SIZE - c, + c, "crc=YES c=%d\n", result); + } else { + c -= snprintf(out_buf + PAGE_SIZE - c, + c, "crc=NO\n"); + } + } + } + } else { + c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error"); + } + mutex_unlock(&dev->mutex); + return PAGE_SIZE - c; +} + +static int w1_f1d_add_slave(struct w1_slave *sl) +{ + return device_create_file(&sl->dev, &w1_counter_attr); +} + +static void w1_f1d_remove_slave(struct w1_slave *sl) +{ + device_remove_file(&sl->dev, &w1_counter_attr); +} + +static struct w1_family_ops w1_f1d_fops = { + .add_slave = w1_f1d_add_slave, + .remove_slave = w1_f1d_remove_slave, +}; + +static struct w1_family w1_family_1d = { + .fid = W1_COUNTER_DS2423, + .fops = &w1_f1d_fops, +}; + +static int __init w1_f1d_init(void) +{ + return w1_register_family(&w1_family_1d); +} + +static void __exit w1_f1d_exit(void) +{ + w1_unregister_family(&w1_family_1d); +} + +module_init(w1_f1d_init); +module_exit(w1_f1d_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mika Laitio "); +MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram"); diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index 3ca1b9298f21..f3b636d7cafe 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h @@ -30,6 +30,7 @@ #define W1_FAMILY_SMEM_01 0x01 #define W1_FAMILY_SMEM_81 0x81 #define W1_THERM_DS18S20 0x10 +#define W1_COUNTER_DS2423 0x1D #define W1_THERM_DS1822 0x22 #define W1_EEPROM_DS2433 0x23 #define W1_THERM_DS18B20 0x28 diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index a5ad77ef4266..2e2400e7322e 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -409,15 +409,26 @@ config ALIM7101_WDT Most people will say N. config F71808E_WDT - tristate "Fintek F71808E, F71882FG and F71889FG Watchdog" + tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog" depends on X86 && EXPERIMENTAL help This is the driver for the hardware watchdog on the Fintek - F71808E, F71882FG and F71889FG Super I/O controllers. + F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers. You can compile this driver directly into the kernel, or use it as a module. The module will be called f71808e_wdt. +config SP5100_TCO + tristate "AMD/ATI SP5100 TCO Timer/Watchdog" + depends on X86 && PCI + ---help--- + Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO + (Total Cost of Ownership) timer is a watchdog timer that will reboot + the machine after its expiration. The expiration time can be + configured with the "heartbeat" parameter. + + To compile this driver as a module, choose M here: the + module will be called sp5100_tco. config GEODE_WDT tristate "AMD Geode CS5535/CS5536 Watchdog" @@ -631,6 +642,24 @@ config PC87413_WDT Most people will say N. +config NV_TCO + tristate "nVidia TCO Timer/Watchdog" + depends on X86 && PCI + ---help--- + Hardware driver for the TCO timer built into the nVidia Hub family + (such as the MCP51). The TCO (Total Cost of Ownership) timer is a + watchdog timer that will reboot the machine after its second + expiration. The expiration time can be configured with the + "heartbeat" parameter. + + On some motherboards the driver may fail to reset the chipset's + NO_REBOOT flag which prevents the watchdog from rebooting the + machine. If this is the case you will get a kernel message like + "failed to reset NO_REBOOT flag, reboot disabled by hardware". + + To compile this driver as a module, choose M here: the + module will be called nv_tco. + config RDC321X_WDT tristate "RDC R-321x SoC watchdog" depends on X86_RDC321X @@ -722,14 +751,15 @@ config SMSC37B787_WDT Most people will say N. config W83627HF_WDT - tristate "W83627HF Watchdog Timer" + tristate "W83627HF/W83627DHG Watchdog Timer" depends on X86 ---help--- This is the driver for the hardware watchdog on the W83627HF chipset as used in Advantech PC-9578 and Tyan S2721-533 motherboards - (and likely others). This watchdog simply watches your kernel to - make sure it doesn't freeze, and if it does, it reboots your computer - after a certain amount of time. + (and likely others). The driver also supports the W83627DHG chip. + This watchdog simply watches your kernel to make sure it doesn't + freeze, and if it does, it reboots your computer after a certain + amount of time. To compile this driver as a module, choose M here: the module will be called w83627hf_wdt. @@ -832,10 +862,22 @@ config SBC_EPX_C3_WATCHDOG # M68K Architecture -# M68KNOMMU Architecture +config M548x_WATCHDOG + tristate "MCF548x watchdog support" + depends on M548x + help + To compile this driver as a module, choose M here: the + module will be called m548x_wdt. # MIPS Architecture +config ATH79_WDT + tristate "Atheros AR71XX/AR724X/AR913X hardware watchdog" + depends on ATH79 + help + Hardware driver for the built-in watchdog timer on the Atheros + AR71XX/AR724X/AR913X SoCs. + config BCM47XX_WDT tristate "Broadcom BCM47xx Watchdog Timer" depends on BCM47XX diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 4b0ef386229d..dd776651917c 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -68,6 +68,7 @@ obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o +obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o obj-$(CONFIG_GEODE_WDT) += geodewdt.o obj-$(CONFIG_SC520_WDT) += sc520_wdt.o obj-$(CONFIG_SBC_FITPC2_WATCHDOG) += sbc_fitpc2_wdt.o @@ -86,6 +87,7 @@ obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o +obj-$(CONFIG_NV_TCO) += nv_tco.o obj-$(CONFIG_RDC321X_WDT) += rdc321x_wdt.o obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o obj-$(CONFIG_SBC8360_WDT) += sbc8360.o @@ -104,10 +106,10 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o # M32R Architecture # M68K Architecture - -# M68KNOMMU Architecture +obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o # MIPS Architecture +obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c index 1e9caea8ff8a..fa4d36033552 100644 --- a/drivers/watchdog/alim1535_wdt.c +++ b/drivers/watchdog/alim1535_wdt.c @@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this, * want to register another driver on the same PCI id. */ -static struct pci_device_id ali_pci_tbl[] = { +static struct pci_device_id ali_pci_tbl[] __used = { { PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,}, { PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,}, { 0, }, diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c index d8d4da9a483d..4b7a2b4138ed 100644 --- a/drivers/watchdog/alim7101_wdt.c +++ b/drivers/watchdog/alim7101_wdt.c @@ -430,7 +430,7 @@ err_out: module_init(alim7101_wdt_init); module_exit(alim7101_wdt_unload); -static struct pci_device_id alim7101_pci_tbl[] __devinitdata = { +static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) }, { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { } diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c new file mode 100644 index 000000000000..725c84bfdd76 --- /dev/null +++ b/drivers/watchdog/ath79_wdt.c @@ -0,0 +1,305 @@ +/* + * Atheros AR71XX/AR724X/AR913X built-in hardware watchdog timer. + * + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * This driver was based on: drivers/watchdog/ixp4xx_wdt.c + * Author: Deepak Saxena + * Copyright 2004 (c) MontaVista, Software, Inc. + * + * which again was based on sa1100 driver, + * Copyright (C) 2000 Oleg Drokin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRIVER_NAME "ath79-wdt" + +#define WDT_TIMEOUT 15 /* seconds */ + +#define WDOG_CTRL_LAST_RESET BIT(31) +#define WDOG_CTRL_ACTION_MASK 3 +#define WDOG_CTRL_ACTION_NONE 0 /* no action */ +#define WDOG_CTRL_ACTION_GPI 1 /* general purpose interrupt */ +#define WDOG_CTRL_ACTION_NMI 2 /* NMI */ +#define WDOG_CTRL_ACTION_FCR 3 /* full chip reset */ + +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " + "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int timeout = WDT_TIMEOUT; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds " + "(default=" __MODULE_STRING(WDT_TIMEOUT) "s)"); + +static unsigned long wdt_flags; + +#define WDT_FLAGS_BUSY 0 +#define WDT_FLAGS_EXPECT_CLOSE 1 + +static struct clk *wdt_clk; +static unsigned long wdt_freq; +static int boot_status; +static int max_timeout; + +static inline void ath79_wdt_keepalive(void) +{ + ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout); +} + +static inline void ath79_wdt_enable(void) +{ + ath79_wdt_keepalive(); + ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR); +} + +static inline void ath79_wdt_disable(void) +{ + ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE); +} + +static int ath79_wdt_set_timeout(int val) +{ + if (val < 1 || val > max_timeout) + return -EINVAL; + + timeout = val; + ath79_wdt_keepalive(); + + return 0; +} + +static int ath79_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(WDT_FLAGS_BUSY, &wdt_flags)) + return -EBUSY; + + clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags); + ath79_wdt_enable(); + + return nonseekable_open(inode, file); +} + +static int ath79_wdt_release(struct inode *inode, struct file *file) +{ + if (test_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags)) + ath79_wdt_disable(); + else { + pr_crit(DRIVER_NAME ": device closed unexpectedly, " + "watchdog timer will not stop!\n"); + ath79_wdt_keepalive(); + } + + clear_bit(WDT_FLAGS_BUSY, &wdt_flags); + clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags); + + return 0; +} + +static ssize_t ath79_wdt_write(struct file *file, const char *data, + size_t len, loff_t *ppos) +{ + if (len) { + if (!nowayout) { + size_t i; + + clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags); + + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + + if (c == 'V') + set_bit(WDT_FLAGS_EXPECT_CLOSE, + &wdt_flags); + } + } + + ath79_wdt_keepalive(); + } + + return len; +} + +static const struct watchdog_info ath79_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE | WDIOF_CARDRESET, + .firmware_version = 0, + .identity = "ATH79 watchdog", +}; + +static long ath79_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int __user *p = argp; + int err; + int t; + + switch (cmd) { + case WDIOC_GETSUPPORT: + err = copy_to_user(argp, &ath79_wdt_info, + sizeof(ath79_wdt_info)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + err = put_user(0, p); + break; + + case WDIOC_GETBOOTSTATUS: + err = put_user(boot_status, p); + break; + + case WDIOC_KEEPALIVE: + ath79_wdt_keepalive(); + err = 0; + break; + + case WDIOC_SETTIMEOUT: + err = get_user(t, p); + if (err) + break; + + err = ath79_wdt_set_timeout(t); + if (err) + break; + + /* fallthrough */ + case WDIOC_GETTIMEOUT: + err = put_user(timeout, p); + break; + + default: + err = -ENOTTY; + break; + } + + return err; +} + +static const struct file_operations ath79_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = ath79_wdt_write, + .unlocked_ioctl = ath79_wdt_ioctl, + .open = ath79_wdt_open, + .release = ath79_wdt_release, +}; + +static struct miscdevice ath79_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &ath79_wdt_fops, +}; + +static int __devinit ath79_wdt_probe(struct platform_device *pdev) +{ + u32 ctrl; + int err; + + wdt_clk = clk_get(&pdev->dev, "wdt"); + if (IS_ERR(wdt_clk)) + return PTR_ERR(wdt_clk); + + err = clk_enable(wdt_clk); + if (err) + goto err_clk_put; + + wdt_freq = clk_get_rate(wdt_clk); + if (!wdt_freq) { + err = -EINVAL; + goto err_clk_disable; + } + + max_timeout = (0xfffffffful / wdt_freq); + if (timeout < 1 || timeout > max_timeout) { + timeout = max_timeout; + dev_info(&pdev->dev, + "timeout value must be 0 < timeout < %d, using %d\n", + max_timeout, timeout); + } + + ctrl = ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL); + boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0; + + err = misc_register(&ath79_wdt_miscdev); + if (err) { + dev_err(&pdev->dev, + "unable to register misc device, err=%d\n", err); + goto err_clk_disable; + } + + return 0; + +err_clk_disable: + clk_disable(wdt_clk); +err_clk_put: + clk_put(wdt_clk); + return err; +} + +static int __devexit ath79_wdt_remove(struct platform_device *pdev) +{ + misc_deregister(&ath79_wdt_miscdev); + clk_disable(wdt_clk); + clk_put(wdt_clk); + return 0; +} + +static void ath97_wdt_shutdown(struct platform_device *pdev) +{ + ath79_wdt_disable(); +} + +static struct platform_driver ath79_wdt_driver = { + .remove = __devexit_p(ath79_wdt_remove), + .shutdown = ath97_wdt_shutdown, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init ath79_wdt_init(void) +{ + return platform_driver_probe(&ath79_wdt_driver, ath79_wdt_probe); +} +module_init(ath79_wdt_init); + +static void __exit ath79_wdt_exit(void) +{ + platform_driver_unregister(&ath79_wdt_driver); +} +module_exit(ath79_wdt_exit); + +MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver"); +MODULE_AUTHOR("Gabor Juhos WDTRST# */ + superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4); break; case f71882fg: /* Set pin 56 to WDTRST# */ - superio_set_bit(watchdog.sioaddr, 0x29, 1); + superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1); break; case f71889fg: /* set pin 40 to WDTRST# */ - superio_outb(watchdog.sioaddr, 0x2b, - superio_inb(watchdog.sioaddr, 0x2b) & 0xcf); + superio_outb(watchdog.sioaddr, SIO_REG_MFUNCT3, + superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf); break; default: @@ -711,16 +756,19 @@ static int __init f71808e_find(int sioaddr) case SIO_F71808_ID: watchdog.type = f71808fg; break; + case SIO_F71862_ID: + watchdog.type = f71862fg; + err = f71862fg_pin_configure(0); /* validate module parameter */ + break; + case SIO_F71869_ID: + watchdog.type = f71869; + break; case SIO_F71882_ID: watchdog.type = f71882fg; break; case SIO_F71889_ID: watchdog.type = f71889fg; break; - case SIO_F71862_ID: - /* These have a watchdog, though it isn't implemented (yet). */ - err = -ENOSYS; - goto exit; case SIO_F71858_ID: /* Confirmed (by datasheet) not to have a watchdog. */ err = -ENODEV; diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index b8838d2c67a6..2c6c2b4ad8bf 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -1,7 +1,7 @@ /* * intel TCO Watchdog Driver * - * (c) Copyright 2006-2009 Wim Van Sebroeck . + * (c) Copyright 2006-2010 Wim Van Sebroeck . * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -26,13 +26,15 @@ * document number 301473-002, 301474-026: 82801F (ICH6) * document number 313082-001, 313075-006: 631xESB, 632xESB * document number 307013-003, 307014-024: 82801G (ICH7) + * document number 322896-001, 322897-001: NM10 * document number 313056-003, 313057-017: 82801H (ICH8) * document number 316972-004, 316973-012: 82801I (ICH9) * document number 319973-002, 319974-002: 82801J (ICH10) * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH) * document number 320066-003, 320257-008: EP80597 (IICH) - * document number TBD : Cougar Point (CPT) + * document number 324645-001, 324646-001: Cougar Point (CPT) * document number TBD : Patsburg (PBG) + * document number TBD : DH89xxCC */ /* @@ -85,6 +87,7 @@ enum iTCO_chipsets { TCO_ICH7DH, /* ICH7DH */ TCO_ICH7M, /* ICH7-M & ICH7-U */ TCO_ICH7MDH, /* ICH7-M DH */ + TCO_NM10, /* NM10 */ TCO_ICH8, /* ICH8 & ICH8R */ TCO_ICH8DH, /* ICH8DH */ TCO_ICH8DO, /* ICH8DO */ @@ -149,6 +152,7 @@ enum iTCO_chipsets { TCO_CPT31, /* Cougar Point */ TCO_PBG1, /* Patsburg */ TCO_PBG2, /* Patsburg */ + TCO_DH89XXCC, /* DH89xxCC */ }; static struct { @@ -174,6 +178,7 @@ static struct { {"ICH7DH", 2}, {"ICH7-M or ICH7-U", 2}, {"ICH7-M DH", 2}, + {"NM10", 2}, {"ICH8 or ICH8R", 2}, {"ICH8DH", 2}, {"ICH8DO", 2}, @@ -238,6 +243,7 @@ static struct { {"Cougar Point", 2}, {"Patsburg", 2}, {"Patsburg", 2}, + {"DH89xxCC", 2}, {NULL, 0} }; @@ -291,6 +297,7 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30, TCO_ICH7DH)}, { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)}, { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)}, + { ITCO_PCI_DEVICE(0x27bc, TCO_NM10)}, { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)}, { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)}, { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)}, @@ -355,6 +362,7 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)}, { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)}, + { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)}, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index 2852bb2e3fd9..811471903e8a 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #define WDT_DEFAULT_TIME 5 /* seconds */ diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m548x_wdt.c new file mode 100644 index 000000000000..cabbcfe1c847 --- /dev/null +++ b/drivers/watchdog/m548x_wdt.c @@ -0,0 +1,227 @@ +/* + * drivers/watchdog/m548x_wdt.c + * + * Watchdog driver for ColdFire MCF548x processors + * Copyright 2010 (c) Philippe De Muyter + * + * Adapted from the IXP4xx watchdog driver, which carries these notices: + * + * Author: Deepak Saxena + * + * Copyright 2004 (c) MontaVista, Software, Inc. + * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int nowayout = WATCHDOG_NOWAYOUT; +static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */ +static unsigned long wdt_status; + +#define WDT_IN_USE 0 +#define WDT_OK_TO_CLOSE 1 + +static void wdt_enable(void) +{ + unsigned int gms0; + + /* preserve GPIO usage, if any */ + gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); + if (gms0 & MCF_GPT_GMS_TMS_GPIO) + gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK + | MCF_GPT_GMS_OD); + else + gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD; + __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); + __raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) | + MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0); + gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE; + __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); +} + +static void wdt_disable(void) +{ + unsigned int gms0; + + /* disable watchdog */ + gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); + gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE); + __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); +} + +static void wdt_keepalive(void) +{ + unsigned int gms0; + + gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); + gms0 |= MCF_GPT_GMS_OCPW(0xA5); + __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); +} + +static int m548x_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(WDT_IN_USE, &wdt_status)) + return -EBUSY; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + wdt_enable(); + return nonseekable_open(inode, file); +} + +static ssize_t m548x_wdt_write(struct file *file, const char *data, + size_t len, loff_t *ppos) +{ + if (len) { + if (!nowayout) { + size_t i; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + set_bit(WDT_OK_TO_CLOSE, &wdt_status); + } + } + wdt_keepalive(); + } + return len; +} + +static const struct watchdog_info ident = { + .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING, + .identity = "Coldfire M548x Watchdog", +}; + +static long m548x_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOTTY; + int time; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user((struct watchdog_info *)arg, &ident, + sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_GETBOOTSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_KEEPALIVE: + wdt_keepalive(); + ret = 0; + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(time, (int *)arg); + if (ret) + break; + + if (time <= 0 || time > 30) { + ret = -EINVAL; + break; + } + + heartbeat = time; + wdt_enable(); + /* Fall through */ + + case WDIOC_GETTIMEOUT: + ret = put_user(heartbeat, (int *)arg); + break; + } + return ret; +} + +static int m548x_wdt_release(struct inode *inode, struct file *file) +{ + if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) + wdt_disable(); + else { + printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " + "timer will not stop\n"); + wdt_keepalive(); + } + clear_bit(WDT_IN_USE, &wdt_status); + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + return 0; +} + + +static const struct file_operations m548x_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = m548x_wdt_write, + .unlocked_ioctl = m548x_wdt_ioctl, + .open = m548x_wdt_open, + .release = m548x_wdt_release, +}; + +static struct miscdevice m548x_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &m548x_wdt_fops, +}; + +static int __init m548x_wdt_init(void) +{ + if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, + "Coldfire M548x Watchdog")) { + printk(KERN_WARNING + "Coldfire M548x Watchdog : I/O region busy\n"); + return -EBUSY; + } + printk(KERN_INFO "ColdFire watchdog driver is loaded.\n"); + + return misc_register(&m548x_wdt_miscdev); +} + +static void __exit m548x_wdt_exit(void) +{ + misc_deregister(&m548x_wdt_miscdev); + release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); +} + +module_init(m548x_wdt_init); +module_exit(m548x_wdt_exit); + +MODULE_AUTHOR("Philippe De Muyter "); +MODULE_DESCRIPTION("Coldfire M548x Watchdog"); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)"); + +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c new file mode 100644 index 000000000000..1a50aa7079bf --- /dev/null +++ b/drivers/watchdog/nv_tco.c @@ -0,0 +1,512 @@ +/* + * nv_tco 0.01: TCO timer driver for NV chipsets + * + * (c) Copyright 2005 Google Inc., All Rights Reserved. + * + * Based off i8xx_tco.c: + * (c) Copyright 2000 kernel concepts , All Rights + * Reserved. + * http://www.kernelconcepts.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * TCO timer driver for NV chipsets + * based on softdog.c by Alan Cox + */ + +/* + * Includes, defines, variables, module parameters, ... + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nv_tco.h" + +/* Module and version information */ +#define TCO_VERSION "0.01" +#define TCO_MODULE_NAME "NV_TCO" +#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION +#define PFX TCO_MODULE_NAME ": " + +/* internal variables */ +static unsigned int tcobase; +static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ +static unsigned long timer_alive; +static char tco_expect_close; +static struct pci_dev *tco_pci; + +/* the watchdog platform device */ +static struct platform_device *nv_tco_platform_device; + +/* module parameters */ +#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat (2 t, so if t > 0x3f, so is + * tmrval=seconds_to_ticks(t). Check that the count in seconds isn't + * out of range on it's own (to avoid overflow in tmrval). + */ + if (t < 0 || t > 0x3f) + return -EINVAL; + tmrval = seconds_to_ticks(t); + + /* "Values of 0h-3h are ignored and should not be attempted" */ + if (tmrval > 0x3f || tmrval < 0x04) + return -EINVAL; + + /* Write new heartbeat to watchdog */ + spin_lock_irqsave(&tco_lock, flags); + val = inb(TCO_TMR(tcobase)); + val &= 0xc0; + val |= tmrval; + outb(val, TCO_TMR(tcobase)); + val = inb(TCO_TMR(tcobase)); + + if ((val & 0x3f) != tmrval) + ret = -EINVAL; + spin_unlock_irqrestore(&tco_lock, flags); + + if (ret) + return ret; + + heartbeat = t; + return 0; +} + +/* + * /dev/watchdog handling + */ + +static int nv_tco_open(struct inode *inode, struct file *file) +{ + /* /dev/watchdog can only be opened once */ + if (test_and_set_bit(0, &timer_alive)) + return -EBUSY; + + /* Reload and activate timer */ + tco_timer_keepalive(); + tco_timer_start(); + return nonseekable_open(inode, file); +} + +static int nv_tco_release(struct inode *inode, struct file *file) +{ + /* Shut off the timer */ + if (tco_expect_close == 42) { + tco_timer_stop(); + } else { + printk(KERN_CRIT PFX "Unexpected close, not stopping " + "watchdog!\n"); + tco_timer_keepalive(); + } + clear_bit(0, &timer_alive); + tco_expect_close = 0; + return 0; +} + +static ssize_t nv_tco_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + /* See if we got the magic character 'V' and reload the timer */ + if (len) { + if (!nowayout) { + size_t i; + + /* + * note: just in case someone wrote the magic character + * five months ago... + */ + tco_expect_close = 0; + + /* + * scan to see whether or not we got the magic + * character + */ + for (i = 0; i != len; i++) { + char c; + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + tco_expect_close = 42; + } + } + + /* someone wrote to us, we should reload the timer */ + tco_timer_keepalive(); + } + return len; +} + +static long nv_tco_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int new_options, retval = -EINVAL; + int new_heartbeat; + void __user *argp = (void __user *)arg; + int __user *p = argp; + static const struct watchdog_info ident = { + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, + .firmware_version = 0, + .identity = TCO_MODULE_NAME, + }; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + case WDIOC_SETOPTIONS: + if (get_user(new_options, p)) + return -EFAULT; + if (new_options & WDIOS_DISABLECARD) { + tco_timer_stop(); + retval = 0; + } + if (new_options & WDIOS_ENABLECARD) { + tco_timer_keepalive(); + tco_timer_start(); + retval = 0; + } + return retval; + case WDIOC_KEEPALIVE: + tco_timer_keepalive(); + return 0; + case WDIOC_SETTIMEOUT: + if (get_user(new_heartbeat, p)) + return -EFAULT; + if (tco_timer_set_heartbeat(new_heartbeat)) + return -EINVAL; + tco_timer_keepalive(); + /* Fall through */ + case WDIOC_GETTIMEOUT: + return put_user(heartbeat, p); + default: + return -ENOTTY; + } +} + +/* + * Kernel Interfaces + */ + +static const struct file_operations nv_tco_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = nv_tco_write, + .unlocked_ioctl = nv_tco_ioctl, + .open = nv_tco_open, + .release = nv_tco_release, +}; + +static struct miscdevice nv_tco_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &nv_tco_fops, +}; + +/* + * Data for PCI driver interface + * + * This data only exists for exporting the supported + * PCI ids via MODULE_DEVICE_TABLE. We do not actually + * register a pci_driver, because someone else might one day + * want to register another driver on the same PCI id. + */ +static struct pci_device_id tco_pci_tbl[] = { + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0, }, /* End of list */ +}; +MODULE_DEVICE_TABLE(pci, tco_pci_tbl); + +/* + * Init & exit routines + */ + +static unsigned char __init nv_tco_getdevice(void) +{ + struct pci_dev *dev = NULL; + u32 val; + + /* Find the PCI device */ + for_each_pci_dev(dev) { + if (pci_match_id(tco_pci_tbl, dev) != NULL) { + tco_pci = dev; + break; + } + } + + if (!tco_pci) + return 0; + + /* Find the base io port */ + pci_read_config_dword(tco_pci, 0x64, &val); + val &= 0xffff; + if (val == 0x0001 || val == 0x0000) { + /* Something is wrong here, bar isn't setup */ + printk(KERN_ERR PFX "failed to get tcobase address\n"); + return 0; + } + val &= 0xff00; + tcobase = val + 0x40; + + if (!request_region(tcobase, 0x10, "NV TCO")) { + printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", + tcobase); + return 0; + } + + /* Set a reasonable heartbeat before we stop the timer */ + tco_timer_set_heartbeat(30); + + /* + * Stop the TCO before we change anything so we don't race with + * a zeroed timer. + */ + tco_timer_keepalive(); + tco_timer_stop(); + + /* Disable SMI caused by TCO */ + if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) { + printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", + MCP51_SMI_EN(tcobase)); + goto out; + } + val = inl(MCP51_SMI_EN(tcobase)); + val &= ~MCP51_SMI_EN_TCO; + outl(val, MCP51_SMI_EN(tcobase)); + val = inl(MCP51_SMI_EN(tcobase)); + release_region(MCP51_SMI_EN(tcobase), 4); + if (val & MCP51_SMI_EN_TCO) { + printk(KERN_ERR PFX "Could not disable SMI caused by TCO\n"); + goto out; + } + + /* Check chipset's NO_REBOOT bit */ + pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); + val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT; + pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val); + pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); + if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) { + printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot " + "disabled by hardware\n"); + goto out; + } + + return 1; +out: + release_region(tcobase, 0x10); + return 0; +} + +static int __devinit nv_tco_init(struct platform_device *dev) +{ + int ret; + + /* Check whether or not the hardware watchdog is there */ + if (!nv_tco_getdevice()) + return -ENODEV; + + /* Check to see if last reboot was due to watchdog timeout */ + printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n", + inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not "); + + /* Clear out the old status */ + outl(TCO_STS_RESET, TCO_STS(tcobase)); + + /* + * Check that the heartbeat value is within it's range. + * If not, reset to the default. + */ + if (tco_timer_set_heartbeat(heartbeat)) { + heartbeat = WATCHDOG_HEARTBEAT; + tco_timer_set_heartbeat(heartbeat); + printk(KERN_INFO PFX "heartbeat value must be 2, All Rights + * Reserved. + * http://www.kernelconcepts.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Neither kernel concepts nor Nils Faerber admit liability nor provide + * warranty for any of this software. This material is provided + * "AS-IS" and at no charge. + * + * (c) Copyright 2000 kernel concepts + * developed for + * Jentro AG, Haar/Munich (Germany) + * + * TCO timer driver for NV chipsets + * based on softdog.c by Alan Cox + */ + +/* + * Some address definitions for the TCO + */ + +#define TCO_RLD(base) ((base) + 0x00) /* TCO Timer Reload and Current Value */ +#define TCO_TMR(base) ((base) + 0x01) /* TCO Timer Initial Value */ + +#define TCO_STS(base) ((base) + 0x04) /* TCO Status Register */ +/* + * TCO Boot Status bit: set on TCO reset, reset by software or standby + * power-good (survives reboots), unfortunately this bit is never + * set. + */ +# define TCO_STS_BOOT_STS (1 << 9) +/* + * first and 2nd timeout status bits, these also survive a warm boot, + * and they work, so we use them. + */ +# define TCO_STS_TCO_INT_STS (1 << 1) +# define TCO_STS_TCO2TO_STS (1 << 10) +# define TCO_STS_RESET (TCO_STS_BOOT_STS | TCO_STS_TCO2TO_STS | \ + TCO_STS_TCO_INT_STS) + +#define TCO_CNT(base) ((base) + 0x08) /* TCO Control Register */ +# define TCO_CNT_TCOHALT (1 << 12) + +#define MCP51_SMBUS_SETUP_B 0xe8 +# define MCP51_SMBUS_SETUP_B_TCO_REBOOT (1 << 25) + +/* + * The SMI_EN register is at the base io address + 0x04, + * while TCOBASE is + 0x40. + */ +#define MCP51_SMI_EN(base) ((base) - 0x40 + 0x04) +# define MCP51_SMI_EN_TCO ((1 << 4) | (1 << 5)) diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c new file mode 100644 index 000000000000..808372883e88 --- /dev/null +++ b/drivers/watchdog/sp5100_tco.c @@ -0,0 +1,480 @@ +/* + * sp5100_tco : TCO timer driver for sp5100 chipsets + * + * (c) Copyright 2009 Google Inc., All Rights Reserved. + * + * Based on i8xx_tco.c: + * (c) Copyright 2000 kernel concepts , All Rights + * Reserved. + * http://www.kernelconcepts.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide" + */ + +/* + * Includes, defines, variables, module parameters, ... + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sp5100_tco.h" + +/* Module and version information */ +#define TCO_VERSION "0.01" +#define TCO_MODULE_NAME "SP5100 TCO timer" +#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION +#define PFX TCO_MODULE_NAME ": " + +/* internal variables */ +static void __iomem *tcobase; +static unsigned int pm_iobase; +static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ +static unsigned long timer_alive; +static char tco_expect_close; +static struct pci_dev *sp5100_tco_pci; + +/* the watchdog platform device */ +static struct platform_device *sp5100_tco_platform_device; + +/* module parameters */ + +#define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat. */ +static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default=" + __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); + +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started" + " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +/* + * Some TCO specific functions + */ +static void tco_timer_start(void) +{ + u32 val; + unsigned long flags; + + spin_lock_irqsave(&tco_lock, flags); + val = readl(SP5100_WDT_CONTROL(tcobase)); + val |= SP5100_WDT_START_STOP_BIT; + writel(val, SP5100_WDT_CONTROL(tcobase)); + spin_unlock_irqrestore(&tco_lock, flags); +} + +static void tco_timer_stop(void) +{ + u32 val; + unsigned long flags; + + spin_lock_irqsave(&tco_lock, flags); + val = readl(SP5100_WDT_CONTROL(tcobase)); + val &= ~SP5100_WDT_START_STOP_BIT; + writel(val, SP5100_WDT_CONTROL(tcobase)); + spin_unlock_irqrestore(&tco_lock, flags); +} + +static void tco_timer_keepalive(void) +{ + u32 val; + unsigned long flags; + + spin_lock_irqsave(&tco_lock, flags); + val = readl(SP5100_WDT_CONTROL(tcobase)); + val |= SP5100_WDT_TRIGGER_BIT; + writel(val, SP5100_WDT_CONTROL(tcobase)); + spin_unlock_irqrestore(&tco_lock, flags); +} + +static int tco_timer_set_heartbeat(int t) +{ + unsigned long flags; + + if (t < 0 || t > 0xffff) + return -EINVAL; + + /* Write new heartbeat to watchdog */ + spin_lock_irqsave(&tco_lock, flags); + writel(t, SP5100_WDT_COUNT(tcobase)); + spin_unlock_irqrestore(&tco_lock, flags); + + heartbeat = t; + return 0; +} + +/* + * /dev/watchdog handling + */ + +static int sp5100_tco_open(struct inode *inode, struct file *file) +{ + /* /dev/watchdog can only be opened once */ + if (test_and_set_bit(0, &timer_alive)) + return -EBUSY; + + /* Reload and activate timer */ + tco_timer_start(); + tco_timer_keepalive(); + return nonseekable_open(inode, file); +} + +static int sp5100_tco_release(struct inode *inode, struct file *file) +{ + /* Shut off the timer. */ + if (tco_expect_close == 42) { + tco_timer_stop(); + } else { + printk(KERN_CRIT PFX + "Unexpected close, not stopping watchdog!\n"); + tco_timer_keepalive(); + } + clear_bit(0, &timer_alive); + tco_expect_close = 0; + return 0; +} + +static ssize_t sp5100_tco_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + /* See if we got the magic character 'V' and reload the timer */ + if (len) { + if (!nowayout) { + size_t i; + + /* note: just in case someone wrote the magic character + * five months ago... */ + tco_expect_close = 0; + + /* scan to see whether or not we got the magic character + */ + for (i = 0; i != len; i++) { + char c; + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + tco_expect_close = 42; + } + } + + /* someone wrote to us, we should reload the timer */ + tco_timer_keepalive(); + } + return len; +} + +static long sp5100_tco_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int new_options, retval = -EINVAL; + int new_heartbeat; + void __user *argp = (void __user *)arg; + int __user *p = argp; + static const struct watchdog_info ident = { + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, + .firmware_version = 0, + .identity = TCO_MODULE_NAME, + }; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &ident, + sizeof(ident)) ? -EFAULT : 0; + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + case WDIOC_SETOPTIONS: + if (get_user(new_options, p)) + return -EFAULT; + if (new_options & WDIOS_DISABLECARD) { + tco_timer_stop(); + retval = 0; + } + if (new_options & WDIOS_ENABLECARD) { + tco_timer_start(); + tco_timer_keepalive(); + retval = 0; + } + return retval; + case WDIOC_KEEPALIVE: + tco_timer_keepalive(); + return 0; + case WDIOC_SETTIMEOUT: + if (get_user(new_heartbeat, p)) + return -EFAULT; + if (tco_timer_set_heartbeat(new_heartbeat)) + return -EINVAL; + tco_timer_keepalive(); + /* Fall through */ + case WDIOC_GETTIMEOUT: + return put_user(heartbeat, p); + default: + return -ENOTTY; + } +} + +/* + * Kernel Interfaces + */ + +static const struct file_operations sp5100_tco_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = sp5100_tco_write, + .unlocked_ioctl = sp5100_tco_ioctl, + .open = sp5100_tco_open, + .release = sp5100_tco_release, +}; + +static struct miscdevice sp5100_tco_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &sp5100_tco_fops, +}; + +/* + * Data for PCI driver interface + * + * This data only exists for exporting the supported + * PCI ids via MODULE_DEVICE_TABLE. We do not actually + * register a pci_driver, because someone else might + * want to register another driver on the same PCI id. + */ +static struct pci_device_id sp5100_tco_pci_tbl[] = { + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID, + PCI_ANY_ID, }, + { 0, }, /* End of list */ +}; +MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl); + +/* + * Init & exit routines + */ + +static unsigned char __devinit sp5100_tco_setupdevice(void) +{ + struct pci_dev *dev = NULL; + u32 val; + + /* Match the PCI device */ + for_each_pci_dev(dev) { + if (pci_match_id(sp5100_tco_pci_tbl, dev) != NULL) { + sp5100_tco_pci = dev; + break; + } + } + + if (!sp5100_tco_pci) + return 0; + + /* Request the IO ports used by this driver */ + pm_iobase = SP5100_IO_PM_INDEX_REG; + if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) { + printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", + pm_iobase); + goto exit; + } + + /* Find the watchdog base address. */ + outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG); + val = inb(SP5100_IO_PM_DATA_REG); + outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG); + val = val << 8 | inb(SP5100_IO_PM_DATA_REG); + outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG); + val = val << 8 | inb(SP5100_IO_PM_DATA_REG); + outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG); + /* Low three bits of BASE0 are reserved. */ + val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); + + tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); + if (tcobase == 0) { + printk(KERN_ERR PFX "failed to get tcobase address\n"); + goto unreg_region; + } + + /* Enable watchdog decode bit */ + pci_read_config_dword(sp5100_tco_pci, + SP5100_PCI_WATCHDOG_MISC_REG, + &val); + + val |= SP5100_PCI_WATCHDOG_DECODE_EN; + + pci_write_config_dword(sp5100_tco_pci, + SP5100_PCI_WATCHDOG_MISC_REG, + val); + + /* Enable Watchdog timer and set the resolution to 1 sec. */ + outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG); + val = inb(SP5100_IO_PM_DATA_REG); + val |= SP5100_PM_WATCHDOG_SECOND_RES; + val &= ~SP5100_PM_WATCHDOG_DISABLE; + outb(val, SP5100_IO_PM_DATA_REG); + + /* Check that the watchdog action is set to reset the system. */ + val = readl(SP5100_WDT_CONTROL(tcobase)); + val &= ~SP5100_PM_WATCHDOG_ACTION_RESET; + writel(val, SP5100_WDT_CONTROL(tcobase)); + + /* Set a reasonable heartbeat before we stop the timer */ + tco_timer_set_heartbeat(heartbeat); + + /* + * Stop the TCO before we change anything so we don't race with + * a zeroed timer. + */ + tco_timer_stop(); + + /* Done */ + return 1; + + iounmap(tcobase); +unreg_region: + release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); +exit: + return 0; +} + +static int __devinit sp5100_tco_init(struct platform_device *dev) +{ + int ret; + u32 val; + + /* Check whether or not the hardware watchdog is there. If found, then + * set it up. + */ + if (!sp5100_tco_setupdevice()) + return -ENODEV; + + /* Check to see if last reboot was due to watchdog timeout */ + printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n", + readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ? + "" : "not "); + + /* Clear out the old status */ + val = readl(SP5100_WDT_CONTROL(tcobase)); + val &= ~SP5100_PM_WATCHDOG_FIRED; + writel(val, SP5100_WDT_CONTROL(tcobase)); + + /* + * Check that the heartbeat value is within it's range. + * If not, reset to the default. + */ + if (tco_timer_set_heartbeat(heartbeat)) { + heartbeat = WATCHDOG_HEARTBEAT; + tco_timer_set_heartbeat(heartbeat); + } + + ret = misc_register(&sp5100_tco_miscdev); + if (ret != 0) { + printk(KERN_ERR PFX "cannot register miscdev on minor=" + "%d (err=%d)\n", + WATCHDOG_MINOR, ret); + goto exit; + } + + clear_bit(0, &timer_alive); + + printk(KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec" + " (nowayout=%d)\n", + tcobase, heartbeat, nowayout); + + return 0; + +exit: + iounmap(tcobase); + release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); + return ret; +} + +static void __devexit sp5100_tco_cleanup(void) +{ + /* Stop the timer before we leave */ + if (!nowayout) + tco_timer_stop(); + + /* Deregister */ + misc_deregister(&sp5100_tco_miscdev); + iounmap(tcobase); + release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); +} + +static int __devexit sp5100_tco_remove(struct platform_device *dev) +{ + if (tcobase) + sp5100_tco_cleanup(); + return 0; +} + +static void sp5100_tco_shutdown(struct platform_device *dev) +{ + tco_timer_stop(); +} + +static struct platform_driver sp5100_tco_driver = { + .probe = sp5100_tco_init, + .remove = __devexit_p(sp5100_tco_remove), + .shutdown = sp5100_tco_shutdown, + .driver = { + .owner = THIS_MODULE, + .name = TCO_MODULE_NAME, + }, +}; + +static int __init sp5100_tco_init_module(void) +{ + int err; + + printk(KERN_INFO PFX "SP5100 TCO WatchDog Timer Driver v%s\n", + TCO_VERSION); + + err = platform_driver_register(&sp5100_tco_driver); + if (err) + return err; + + sp5100_tco_platform_device = platform_device_register_simple( + TCO_MODULE_NAME, -1, NULL, 0); + if (IS_ERR(sp5100_tco_platform_device)) { + err = PTR_ERR(sp5100_tco_platform_device); + goto unreg_platform_driver; + } + + return 0; + +unreg_platform_driver: + platform_driver_unregister(&sp5100_tco_driver); + return err; +} + +static void __exit sp5100_tco_cleanup_module(void) +{ + platform_device_unregister(sp5100_tco_platform_device); + platform_driver_unregister(&sp5100_tco_driver); + printk(KERN_INFO PFX "SP5100 TCO Watchdog Module Unloaded.\n"); +} + +module_init(sp5100_tco_init_module); +module_exit(sp5100_tco_cleanup_module); + +MODULE_AUTHOR("Priyanka Gupta"); +MODULE_DESCRIPTION("TCO timer driver for SP5100 chipset"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h new file mode 100644 index 000000000000..a5a16cc90a34 --- /dev/null +++ b/drivers/watchdog/sp5100_tco.h @@ -0,0 +1,41 @@ +/* + * sp5100_tco: TCO timer driver for sp5100 chipsets. + * + * (c) Copyright 2009 Google Inc., All Rights Reserved. + * + * TCO timer driver for sp5100 chipsets + */ + +/* + * Some address definitions for the Watchdog + */ + +#define SP5100_WDT_MEM_MAP_SIZE 0x08 +#define SP5100_WDT_CONTROL(base) ((base) + 0x00) /* Watchdog Control */ +#define SP5100_WDT_COUNT(base) ((base) + 0x04) /* Watchdog Count */ + +#define SP5100_WDT_START_STOP_BIT 1 +#define SP5100_WDT_TRIGGER_BIT (1 << 7) + +#define SP5100_PCI_WATCHDOG_MISC_REG 0x41 +#define SP5100_PCI_WATCHDOG_DECODE_EN (1 << 3) + +#define SP5100_PM_IOPORTS_SIZE 0x02 + +/* These two IO registers are hardcoded and there doesn't seem to be a way to + * read them from a register. + */ +#define SP5100_IO_PM_INDEX_REG 0xCD6 +#define SP5100_IO_PM_DATA_REG 0xCD7 + +#define SP5100_PM_WATCHDOG_CONTROL 0x69 +#define SP5100_PM_WATCHDOG_BASE0 0x6C +#define SP5100_PM_WATCHDOG_BASE1 0x6D +#define SP5100_PM_WATCHDOG_BASE2 0x6E +#define SP5100_PM_WATCHDOG_BASE3 0x6F + +#define SP5100_PM_WATCHDOG_FIRED (1 << 1) +#define SP5100_PM_WATCHDOG_ACTION_RESET (1 << 2) + +#define SP5100_PM_WATCHDOG_DISABLE 1 +#define SP5100_PM_WATCHDOG_SECOND_RES (3 << 1) diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 0f5288df0091..e5c91d4404ed 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -42,7 +42,7 @@ #include -#define WATCHDOG_NAME "w83627hf/thf/hg WDT" +#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT" #define PFX WATCHDOG_NAME ": " #define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */ @@ -89,7 +89,7 @@ static void w83627hf_select_wd_register(void) c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */ outb_p(0x2b, WDT_EFER); outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */ - } else if (c == 0x88) { /* W83627EHF */ + } else if (c == 0x88 || c == 0xa0) { /* W83627EHF / W83627DHG */ outb_p(0x2d, WDT_EFER); /* select GPIO5 */ c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */ outb_p(0x2d, WDT_EFER); @@ -129,6 +129,8 @@ static void w83627hf_init(void) t = inb_p(WDT_EFDR); /* read CRF5 */ t &= ~0x0C; /* set second mode & disable keyboard turning off watchdog */ + t |= 0x02; /* enable the WDTO# output low pulse + to the KBRST# pin (PIN60) */ outb_p(t, WDT_EFDR); /* Write back to CRF5 */ outb_p(0xF7, WDT_EFER); /* Select CRF7 */ @@ -321,7 +323,7 @@ static int __init wdt_init(void) { int ret; - printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG Super I/O chip initialising.\n"); + printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising.\n"); if (wdt_set_heartbeat(timeout)) { wdt_set_heartbeat(WATCHDOG_TIMEOUT); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 5a48ce996dea..07bec09d1dad 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -71,11 +71,18 @@ config XEN_SYS_HYPERVISOR but will have no xen contents. config XEN_XENBUS_FRONTEND - tristate + tristate + +config XEN_GNTDEV + tristate "userspace grant access device driver" + depends on XEN + select MMU_NOTIFIER + help + Allows userspace processes to use grants. config XEN_PLATFORM_PCI tristate "xen platform pci device driver" - depends on XEN_PVHVM + depends on XEN_PVHVM && PCI default m help Driver for the Xen PCI Platform device: it is responsible for diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 533a199e7a3f..5088cc2e6fe2 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -9,11 +9,14 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_XEN_XENCOMM) += xencomm.o obj-$(CONFIG_XEN_BALLOON) += balloon.o obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o +obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o -obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o +obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o obj-$(CONFIG_XEN_DOM0) += pci.o xen-evtchn-y := evtchn.o +xen-gntdev-y := gntdev.o +xen-platform-pci-y := platform-pci.o diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c new file mode 100644 index 000000000000..1e31cdcdae1e --- /dev/null +++ b/drivers/xen/gntdev.c @@ -0,0 +1,665 @@ +/****************************************************************************** + * gntdev.c + * + * Device for accessing (in user-space) pages that have been granted by other + * domains. + * + * Copyright (c) 2006-2007, D G Murray. + * (c) 2009 Gerd Hoffmann + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Derek G. Murray , " + "Gerd Hoffmann "); +MODULE_DESCRIPTION("User-space granted page access driver"); + +static int limit = 1024; +module_param(limit, int, 0644); +MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at " + "once by a gntdev instance"); + +struct gntdev_priv { + struct list_head maps; + uint32_t used; + uint32_t limit; + /* lock protects maps from concurrent changes */ + spinlock_t lock; + struct mm_struct *mm; + struct mmu_notifier mn; +}; + +struct grant_map { + struct list_head next; + struct gntdev_priv *priv; + struct vm_area_struct *vma; + int index; + int count; + int flags; + int is_mapped; + struct ioctl_gntdev_grant_ref *grants; + struct gnttab_map_grant_ref *map_ops; + struct gnttab_unmap_grant_ref *unmap_ops; + struct page **pages; +}; + +/* ------------------------------------------------------------------ */ + +static void gntdev_print_maps(struct gntdev_priv *priv, + char *text, int text_index) +{ +#ifdef DEBUG + struct grant_map *map; + + pr_debug("maps list (priv %p, usage %d/%d)\n", + priv, priv->used, priv->limit); + + list_for_each_entry(map, &priv->maps, next) + pr_debug(" index %2d, count %2d %s\n", + map->index, map->count, + map->index == text_index && text ? text : ""); +#endif +} + +static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) +{ + struct grant_map *add; + int i; + + add = kzalloc(sizeof(struct grant_map), GFP_KERNEL); + if (NULL == add) + return NULL; + + add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL); + add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL); + add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL); + add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL); + if (NULL == add->grants || + NULL == add->map_ops || + NULL == add->unmap_ops || + NULL == add->pages) + goto err; + + for (i = 0; i < count; i++) { + add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); + if (add->pages[i] == NULL) + goto err; + } + + add->index = 0; + add->count = count; + add->priv = priv; + + if (add->count + priv->used > priv->limit) + goto err; + + return add; + +err: + if (add->pages) + for (i = 0; i < count; i++) { + if (add->pages[i]) + __free_page(add->pages[i]); + } + kfree(add->pages); + kfree(add->grants); + kfree(add->map_ops); + kfree(add->unmap_ops); + kfree(add); + return NULL; +} + +static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add) +{ + struct grant_map *map; + + list_for_each_entry(map, &priv->maps, next) { + if (add->index + add->count < map->index) { + list_add_tail(&add->next, &map->next); + goto done; + } + add->index = map->index + map->count; + } + list_add_tail(&add->next, &priv->maps); + +done: + priv->used += add->count; + gntdev_print_maps(priv, "[new]", add->index); +} + +static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, + int index, int count) +{ + struct grant_map *map; + + list_for_each_entry(map, &priv->maps, next) { + if (map->index != index) + continue; + if (map->count != count) + continue; + return map; + } + return NULL; +} + +static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv, + unsigned long vaddr) +{ + struct grant_map *map; + + list_for_each_entry(map, &priv->maps, next) { + if (!map->vma) + continue; + if (vaddr < map->vma->vm_start) + continue; + if (vaddr >= map->vma->vm_end) + continue; + return map; + } + return NULL; +} + +static int gntdev_del_map(struct grant_map *map) +{ + int i; + + if (map->vma) + return -EBUSY; + for (i = 0; i < map->count; i++) + if (map->unmap_ops[i].handle) + return -EBUSY; + + map->priv->used -= map->count; + list_del(&map->next); + return 0; +} + +static void gntdev_free_map(struct grant_map *map) +{ + int i; + + if (!map) + return; + + if (map->pages) + for (i = 0; i < map->count; i++) { + if (map->pages[i]) + __free_page(map->pages[i]); + } + kfree(map->pages); + kfree(map->grants); + kfree(map->map_ops); + kfree(map->unmap_ops); + kfree(map); +} + +/* ------------------------------------------------------------------ */ + +static int find_grant_ptes(pte_t *pte, pgtable_t token, + unsigned long addr, void *data) +{ + struct grant_map *map = data; + unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; + u64 pte_maddr; + + BUG_ON(pgnr >= map->count); + pte_maddr = arbitrary_virt_to_machine(pte).maddr; + + gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, + GNTMAP_contains_pte | map->flags, + map->grants[pgnr].ref, + map->grants[pgnr].domid); + gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, + GNTMAP_contains_pte | map->flags, + 0 /* handle */); + return 0; +} + +static int map_grant_pages(struct grant_map *map) +{ + int i, err = 0; + + pr_debug("map %d+%d\n", map->index, map->count); + err = gnttab_map_refs(map->map_ops, map->pages, map->count); + if (err) + return err; + + for (i = 0; i < map->count; i++) { + if (map->map_ops[i].status) + err = -EINVAL; + map->unmap_ops[i].handle = map->map_ops[i].handle; + } + return err; +} + +static int unmap_grant_pages(struct grant_map *map, int offset, int pages) +{ + int i, err = 0; + + pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages); + err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages); + if (err) + return err; + + for (i = 0; i < pages; i++) { + if (map->unmap_ops[offset+i].status) + err = -EINVAL; + map->unmap_ops[offset+i].handle = 0; + } + return err; +} + +/* ------------------------------------------------------------------ */ + +static void gntdev_vma_close(struct vm_area_struct *vma) +{ + struct grant_map *map = vma->vm_private_data; + + pr_debug("close %p\n", vma); + map->is_mapped = 0; + map->vma = NULL; + vma->vm_private_data = NULL; +} + +static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n", + vmf->virtual_address, vmf->pgoff); + vmf->flags = VM_FAULT_ERROR; + return 0; +} + +static struct vm_operations_struct gntdev_vmops = { + .close = gntdev_vma_close, + .fault = gntdev_vma_fault, +}; + +/* ------------------------------------------------------------------ */ + +static void mn_invl_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); + struct grant_map *map; + unsigned long mstart, mend; + int err; + + spin_lock(&priv->lock); + list_for_each_entry(map, &priv->maps, next) { + if (!map->vma) + continue; + if (!map->is_mapped) + continue; + if (map->vma->vm_start >= end) + continue; + if (map->vma->vm_end <= start) + continue; + mstart = max(start, map->vma->vm_start); + mend = min(end, map->vma->vm_end); + pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", + map->index, map->count, + map->vma->vm_start, map->vma->vm_end, + start, end, mstart, mend); + err = unmap_grant_pages(map, + (mstart - map->vma->vm_start) >> PAGE_SHIFT, + (mend - mstart) >> PAGE_SHIFT); + WARN_ON(err); + } + spin_unlock(&priv->lock); +} + +static void mn_invl_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); +} + +static void mn_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); + struct grant_map *map; + int err; + + spin_lock(&priv->lock); + list_for_each_entry(map, &priv->maps, next) { + if (!map->vma) + continue; + pr_debug("map %d+%d (%lx %lx)\n", + map->index, map->count, + map->vma->vm_start, map->vma->vm_end); + err = unmap_grant_pages(map, /* offset */ 0, map->count); + WARN_ON(err); + } + spin_unlock(&priv->lock); +} + +struct mmu_notifier_ops gntdev_mmu_ops = { + .release = mn_release, + .invalidate_page = mn_invl_page, + .invalidate_range_start = mn_invl_range_start, +}; + +/* ------------------------------------------------------------------ */ + +static int gntdev_open(struct inode *inode, struct file *flip) +{ + struct gntdev_priv *priv; + int ret = 0; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + INIT_LIST_HEAD(&priv->maps); + spin_lock_init(&priv->lock); + priv->limit = limit; + + priv->mm = get_task_mm(current); + if (!priv->mm) { + kfree(priv); + return -ENOMEM; + } + priv->mn.ops = &gntdev_mmu_ops; + ret = mmu_notifier_register(&priv->mn, priv->mm); + mmput(priv->mm); + + if (ret) { + kfree(priv); + return ret; + } + + flip->private_data = priv; + pr_debug("priv %p\n", priv); + + return 0; +} + +static int gntdev_release(struct inode *inode, struct file *flip) +{ + struct gntdev_priv *priv = flip->private_data; + struct grant_map *map; + int err; + + pr_debug("priv %p\n", priv); + + spin_lock(&priv->lock); + while (!list_empty(&priv->maps)) { + map = list_entry(priv->maps.next, struct grant_map, next); + err = gntdev_del_map(map); + if (WARN_ON(err)) + gntdev_free_map(map); + + } + spin_unlock(&priv->lock); + + mmu_notifier_unregister(&priv->mn, priv->mm); + kfree(priv); + return 0; +} + +static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, + struct ioctl_gntdev_map_grant_ref __user *u) +{ + struct ioctl_gntdev_map_grant_ref op; + struct grant_map *map; + int err; + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + pr_debug("priv %p, add %d\n", priv, op.count); + if (unlikely(op.count <= 0)) + return -EINVAL; + if (unlikely(op.count > priv->limit)) + return -EINVAL; + + err = -ENOMEM; + map = gntdev_alloc_map(priv, op.count); + if (!map) + return err; + if (copy_from_user(map->grants, &u->refs, + sizeof(map->grants[0]) * op.count) != 0) { + gntdev_free_map(map); + return err; + } + + spin_lock(&priv->lock); + gntdev_add_map(priv, map); + op.index = map->index << PAGE_SHIFT; + spin_unlock(&priv->lock); + + if (copy_to_user(u, &op, sizeof(op)) != 0) { + spin_lock(&priv->lock); + gntdev_del_map(map); + spin_unlock(&priv->lock); + gntdev_free_map(map); + return err; + } + return 0; +} + +static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, + struct ioctl_gntdev_unmap_grant_ref __user *u) +{ + struct ioctl_gntdev_unmap_grant_ref op; + struct grant_map *map; + int err = -ENOENT; + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); + + spin_lock(&priv->lock); + map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); + if (map) + err = gntdev_del_map(map); + spin_unlock(&priv->lock); + if (!err) + gntdev_free_map(map); + return err; +} + +static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, + struct ioctl_gntdev_get_offset_for_vaddr __user *u) +{ + struct ioctl_gntdev_get_offset_for_vaddr op; + struct grant_map *map; + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); + + spin_lock(&priv->lock); + map = gntdev_find_map_vaddr(priv, op.vaddr); + if (map == NULL || + map->vma->vm_start != op.vaddr) { + spin_unlock(&priv->lock); + return -EINVAL; + } + op.offset = map->index << PAGE_SHIFT; + op.count = map->count; + spin_unlock(&priv->lock); + + if (copy_to_user(u, &op, sizeof(op)) != 0) + return -EFAULT; + return 0; +} + +static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv, + struct ioctl_gntdev_set_max_grants __user *u) +{ + struct ioctl_gntdev_set_max_grants op; + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + pr_debug("priv %p, limit %d\n", priv, op.count); + if (op.count > limit) + return -E2BIG; + + spin_lock(&priv->lock); + priv->limit = op.count; + spin_unlock(&priv->lock); + return 0; +} + +static long gntdev_ioctl(struct file *flip, + unsigned int cmd, unsigned long arg) +{ + struct gntdev_priv *priv = flip->private_data; + void __user *ptr = (void __user *)arg; + + switch (cmd) { + case IOCTL_GNTDEV_MAP_GRANT_REF: + return gntdev_ioctl_map_grant_ref(priv, ptr); + + case IOCTL_GNTDEV_UNMAP_GRANT_REF: + return gntdev_ioctl_unmap_grant_ref(priv, ptr); + + case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: + return gntdev_ioctl_get_offset_for_vaddr(priv, ptr); + + case IOCTL_GNTDEV_SET_MAX_GRANTS: + return gntdev_ioctl_set_max_grants(priv, ptr); + + default: + pr_debug("priv %p, unknown cmd %x\n", priv, cmd); + return -ENOIOCTLCMD; + } + + return 0; +} + +static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) +{ + struct gntdev_priv *priv = flip->private_data; + int index = vma->vm_pgoff; + int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + struct grant_map *map; + int err = -EINVAL; + + if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + pr_debug("map %d+%d at %lx (pgoff %lx)\n", + index, count, vma->vm_start, vma->vm_pgoff); + + spin_lock(&priv->lock); + map = gntdev_find_map_index(priv, index, count); + if (!map) + goto unlock_out; + if (map->vma) + goto unlock_out; + if (priv->mm != vma->vm_mm) { + printk(KERN_WARNING "Huh? Other mm?\n"); + goto unlock_out; + } + + vma->vm_ops = &gntdev_vmops; + + vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; + + vma->vm_private_data = map; + map->vma = vma; + + map->flags = GNTMAP_host_map | GNTMAP_application_map; + if (!(vma->vm_flags & VM_WRITE)) + map->flags |= GNTMAP_readonly; + + spin_unlock(&priv->lock); + + err = apply_to_page_range(vma->vm_mm, vma->vm_start, + vma->vm_end - vma->vm_start, + find_grant_ptes, map); + if (err) { + printk(KERN_WARNING "find_grant_ptes() failure.\n"); + return err; + } + + err = map_grant_pages(map); + if (err) { + printk(KERN_WARNING "map_grant_pages() failure.\n"); + return err; + } + + map->is_mapped = 1; + + return 0; + +unlock_out: + spin_unlock(&priv->lock); + return err; +} + +static const struct file_operations gntdev_fops = { + .owner = THIS_MODULE, + .open = gntdev_open, + .release = gntdev_release, + .mmap = gntdev_mmap, + .unlocked_ioctl = gntdev_ioctl +}; + +static struct miscdevice gntdev_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "xen/gntdev", + .fops = &gntdev_fops, +}; + +/* ------------------------------------------------------------------ */ + +static int __init gntdev_init(void) +{ + int err; + + if (!xen_domain()) + return -ENODEV; + + err = misc_register(&gntdev_miscdev); + if (err != 0) { + printk(KERN_ERR "Could not register gntdev device\n"); + return err; + } + return 0; +} + +static void __exit gntdev_exit(void) +{ + misc_deregister(&gntdev_miscdev); +} + +module_init(gntdev_init); +module_exit(gntdev_exit); + +/* ------------------------------------------------------------------ */ diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 6c4531816496..9ef54ebc1194 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -447,6 +447,52 @@ unsigned int gnttab_max_grant_frames(void) } EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); +int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, + struct page **pages, unsigned int count) +{ + int i, ret; + pte_t *pte; + unsigned long mfn; + + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + /* m2p override only supported for GNTMAP_contains_pte mappings */ + if (!(map_ops[i].flags & GNTMAP_contains_pte)) + continue; + pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + + (map_ops[i].host_addr & ~PAGE_MASK)); + mfn = pte_mfn(*pte); + ret = m2p_add_override(mfn, pages[i]); + if (ret) + return ret; + } + + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_map_refs); + +int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, + struct page **pages, unsigned int count) +{ + int i, ret; + + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + ret = m2p_remove_override(pages[i]); + if (ret) + return ret; + } + + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_unmap_refs); + static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index c01b5ddce529..afbe041f42c5 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -105,7 +105,7 @@ static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; - long ioaddr, iolen; + long ioaddr; long mmio_addr, mmio_len; unsigned int max_nr_gframes; @@ -114,7 +114,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev, return i; ioaddr = pci_resource_start(pdev, 0); - iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); @@ -125,19 +124,13 @@ static int __devinit platform_pci_init(struct pci_dev *pdev, goto pci_out; } - if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { - dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n", - mmio_addr, mmio_len); - ret = -EBUSY; + ret = pci_request_region(pdev, 1, DRV_NAME); + if (ret < 0) goto pci_out; - } - if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { - dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n", - iolen, ioaddr); - ret = -EBUSY; + ret = pci_request_region(pdev, 0, DRV_NAME); + if (ret < 0) goto mem_out; - } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; @@ -169,9 +162,9 @@ static int __devinit platform_pci_init(struct pci_dev *pdev, return 0; out: - release_region(ioaddr, iolen); + pci_release_region(pdev, 0); mem_out: - release_mem_region(mmio_addr, mmio_len); + pci_release_region(pdev, 1); pci_out: pci_disable_device(pdev); return ret; diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c index ba0cf0b601bb..cf38e159131a 100644 --- a/firmware/ihex2fw.c +++ b/firmware/ihex2fw.c @@ -124,8 +124,7 @@ int main(int argc, char **argv) if (process_ihex(data, st.st_size)) return 1; - output_records(outfd); - return 0; + return output_records(outfd); } static int process_ihex(uint8_t *data, ssize_t size) @@ -269,11 +268,13 @@ static int output_records(int outfd) p->addr = htonl(p->addr); p->len = htons(p->len); - write(outfd, &p->addr, writelen); + if (write(outfd, &p->addr, writelen) != writelen) + return 1; p = p->next; } /* EOF record is zero length, since we don't bother to represent the type field in the binary version */ - write(outfd, zeroes, 6); + if (write(outfd, zeroes, 6) != 6) + return 1; return 0; } diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index bab0eac873f4..b789f8e597ec 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -59,7 +59,6 @@ void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *); int v9fs_dir_release(struct inode *inode, struct file *filp); int v9fs_file_open(struct inode *inode, struct file *file); void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); -void v9fs_dentry_release(struct dentry *); int v9fs_uflags2omode(int uflags, int extended); ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index 466d2a4fc5cb..233b7d4ffe5e 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -86,7 +86,7 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry) * */ -void v9fs_dentry_release(struct dentry *dentry) +static void v9fs_dentry_release(struct dentry *dentry) { struct v9fs_dentry *dent; struct p9_fid *temp, *current_fid; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 5076eeb95502..b76a40bdf4c2 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -699,11 +699,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, goto error_iput; inst_out: - if (v9ses->cache) - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - else - d_set_d_op(dentry, &v9fs_dentry_operations); - d_add(dentry, inode); return NULL; diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index c55c614500ad..dbaabe3b8131 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -141,6 +141,11 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, } v9fs_fill_super(sb, v9ses, flags, data); + if (v9ses->cache) + sb->s_d_op = &v9fs_cached_dentry_operations; + else + sb->s_d_op = &v9fs_dentry_operations; + inode = v9fs_get_inode(sb, S_IFDIR | mode); if (IS_ERR(inode)) { retval = PTR_ERR(inode); @@ -217,9 +222,6 @@ static void v9fs_kill_super(struct super_block *s) P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); - if (s->s_root) - v9fs_dentry_release(s->s_root); /* clunk root */ - kill_anon_super(s); v9fs_session_cancel(v9ses); diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c index bf7693c384f9..3b4a764ed780 100644 --- a/fs/adfs/dir.c +++ b/fs/adfs/dir.c @@ -276,7 +276,6 @@ adfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) struct object_info obj; int error; - d_set_d_op(dentry, &adfs_dentry_operations); lock_kernel(); error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj); if (error == 0) { diff --git a/fs/adfs/super.c b/fs/adfs/super.c index a4041b52fbca..2d7954049fbe 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -473,6 +473,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) asb->s_namelen = ADFS_F_NAME_LEN; } + sb->s_d_op = &adfs_dentry_operations; root = adfs_iget(sb, &root_obj); sb->s_root = d_alloc_root(root); if (!sb->s_root) { @@ -483,8 +484,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) kfree(asb->s_map); adfs_error(sb, "get root inode failed\n"); goto error; - } else - d_set_d_op(sb->s_root, &adfs_dentry_operations); + } unlock_kernel(); return 0; diff --git a/fs/affs/affs.h b/fs/affs/affs.h index a8cbdeb34025..0e95f73a7023 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -201,6 +201,7 @@ extern const struct address_space_operations affs_aops; extern const struct address_space_operations affs_aops_ofs; extern const struct dentry_operations affs_dentry_operations; +extern const struct dentry_operations affs_intl_dentry_operations; static inline void affs_set_blocksize(struct super_block *sb, int size) diff --git a/fs/affs/namei.c b/fs/affs/namei.c index 944a4042fb65..e3e9efc1fdd8 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -32,7 +32,7 @@ const struct dentry_operations affs_dentry_operations = { .d_compare = affs_compare_dentry, }; -static const struct dentry_operations affs_intl_dentry_operations = { +const struct dentry_operations affs_intl_dentry_operations = { .d_hash = affs_intl_hash_dentry, .d_compare = affs_intl_compare_dentry, }; @@ -240,7 +240,6 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if (IS_ERR(inode)) return ERR_CAST(inode); } - d_set_d_op(dentry, AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations); d_add(dentry, inode); return NULL; } diff --git a/fs/affs/super.c b/fs/affs/super.c index d39081bbe7ce..b31507d0f9b9 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -477,12 +477,16 @@ got_root: goto out_error_noinode; } + if (AFFS_SB(sb)->s_flags & SF_INTL) + sb->s_d_op = &affs_intl_dentry_operations; + else + sb->s_d_op = &affs_dentry_operations; + sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) { printk(KERN_ERR "AFFS: Get root inode failed\n"); goto out_error; } - d_set_d_op(sb->s_root, &affs_dentry_operations); pr_debug("AFFS: s_flags=%lX\n",sb->s_flags); return 0; diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index a3bcec75c54a..1c8c6cc6de30 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -289,7 +289,7 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, call->server = server; INIT_WORK(&call->work, SRXAFSCB_CallBack); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -336,7 +336,7 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call, call->server = server; INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -367,7 +367,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call, call->server = server; INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -400,7 +400,7 @@ static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_Probe); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -496,7 +496,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -580,6 +580,6 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 34a3263d60a4..e6a4ab980e31 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -62,7 +62,7 @@ const struct inode_operations afs_dir_inode_operations = { .setattr = afs_setattr, }; -static const struct dentry_operations afs_fs_dentry_operations = { +const struct dentry_operations afs_fs_dentry_operations = { .d_revalidate = afs_d_revalidate, .d_delete = afs_d_delete, .d_release = afs_d_release, @@ -582,8 +582,6 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, } success: - d_set_d_op(dentry, &afs_fs_dentry_operations); - d_add(dentry, inode); _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }", fid.vnode, diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 6d4bc1c8ff60..58c633b80246 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -486,6 +486,7 @@ extern bool afs_cm_incoming_call(struct afs_call *); * dir.c */ extern const struct inode_operations afs_dir_inode_operations; +extern const struct dentry_operations afs_fs_dentry_operations; extern const struct file_operations afs_dir_file_operations; /* @@ -576,6 +577,7 @@ extern int afs_drop_inode(struct inode *); /* * main.c */ +extern struct workqueue_struct *afs_wq; extern struct afs_uuid afs_uuid; /* diff --git a/fs/afs/main.c b/fs/afs/main.c index cfd1cbe25b22..42dd2e499ed8 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -30,6 +30,7 @@ module_param(rootcell, charp, 0); MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); struct afs_uuid afs_uuid; +struct workqueue_struct *afs_wq; /* * get a client UUID @@ -87,10 +88,16 @@ static int __init afs_init(void) if (ret < 0) return ret; + /* create workqueue */ + ret = -ENOMEM; + afs_wq = alloc_workqueue("afs", 0, 0); + if (!afs_wq) + return ret; + /* register the /proc stuff */ ret = afs_proc_init(); if (ret < 0) - return ret; + goto error_proc; #ifdef CONFIG_AFS_FSCACHE /* we want to be able to cache */ @@ -140,6 +147,8 @@ error_cell_init: error_cache: #endif afs_proc_cleanup(); +error_proc: + destroy_workqueue(afs_wq); rcu_barrier(); printk(KERN_ERR "kAFS: failed to register: %d\n", ret); return ret; @@ -163,7 +172,7 @@ static void __exit afs_exit(void) afs_purge_servers(); afs_callback_update_kill(); afs_vlocation_purge(); - flush_scheduled_work(); + destroy_workqueue(afs_wq); afs_cell_purge(); #ifdef CONFIG_AFS_FSCACHE fscache_unregister_netfs(&afs_cache_netfs); diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 6153417caf57..e83c0336e7b5 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -268,8 +268,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) path_put(&nd->path); nd->path.mnt = newmnt; nd->path.dentry = dget(newmnt->mnt_root); - schedule_delayed_work(&afs_mntpt_expiry_timer, - afs_mntpt_expiry_timeout * HZ); + queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, + afs_mntpt_expiry_timeout * HZ); break; case -EBUSY: /* someone else made a mount here whilst we were busy */ @@ -295,8 +295,8 @@ static void afs_mntpt_expiry_timed_out(struct work_struct *work) if (!list_empty(&afs_vfsmounts)) { mark_mounts_for_expiry(&afs_vfsmounts); - schedule_delayed_work(&afs_mntpt_expiry_timer, - afs_mntpt_expiry_timeout * HZ); + queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, + afs_mntpt_expiry_timeout * HZ); } _leave(""); @@ -310,6 +310,5 @@ void afs_mntpt_kill_timer(void) _enter(""); ASSERT(list_empty(&afs_vfsmounts)); - cancel_delayed_work(&afs_mntpt_expiry_timer); - flush_scheduled_work(); + cancel_delayed_work_sync(&afs_mntpt_expiry_timer); } diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 654d8fdbf01f..e45a323aebb4 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -410,7 +410,7 @@ static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID, if (!call) { /* its an incoming call for our callback service */ skb_queue_tail(&afs_incoming_calls, skb); - schedule_work(&afs_collect_incoming_call_work); + queue_work(afs_wq, &afs_collect_incoming_call_work); } else { /* route the messages directly to the appropriate call */ skb_queue_tail(&call->rx_queue, skb); diff --git a/fs/afs/server.c b/fs/afs/server.c index 9fdc7fe3a7bc..d59b7516e943 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -238,8 +238,8 @@ void afs_put_server(struct afs_server *server) if (atomic_read(&server->usage) == 0) { list_move_tail(&server->grave, &afs_server_graveyard); server->time_of_death = get_seconds(); - schedule_delayed_work(&afs_server_reaper, - afs_server_timeout * HZ); + queue_delayed_work(afs_wq, &afs_server_reaper, + afs_server_timeout * HZ); } spin_unlock(&afs_server_graveyard_lock); _leave(" [dead]"); @@ -285,10 +285,11 @@ static void afs_reap_server(struct work_struct *work) expiry = server->time_of_death + afs_server_timeout; if (expiry > now) { delay = (expiry - now) * HZ; - if (!schedule_delayed_work(&afs_server_reaper, delay)) { + if (!queue_delayed_work(afs_wq, &afs_server_reaper, + delay)) { cancel_delayed_work(&afs_server_reaper); - schedule_delayed_work(&afs_server_reaper, - delay); + queue_delayed_work(afs_wq, &afs_server_reaper, + delay); } break; } @@ -323,5 +324,5 @@ void __exit afs_purge_servers(void) { afs_server_timeout = 0; cancel_delayed_work(&afs_server_reaper); - schedule_delayed_work(&afs_server_reaper, 0); + queue_delayed_work(afs_wq, &afs_server_reaper, 0); } diff --git a/fs/afs/super.c b/fs/afs/super.c index f901a9d7c111..fb240e8766d6 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -336,6 +336,7 @@ static int afs_fill_super(struct super_block *sb, void *data) if (!root) goto error; + sb->s_d_op = &afs_fs_dentry_operations; sb->s_root = root; _leave(" = 0"); diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index 9ac260d1361d..431984d2e372 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c @@ -507,8 +507,8 @@ void afs_put_vlocation(struct afs_vlocation *vl) _debug("buried"); list_move_tail(&vl->grave, &afs_vlocation_graveyard); vl->time_of_death = get_seconds(); - schedule_delayed_work(&afs_vlocation_reap, - afs_vlocation_timeout * HZ); + queue_delayed_work(afs_wq, &afs_vlocation_reap, + afs_vlocation_timeout * HZ); /* suspend updates on this record */ if (!list_empty(&vl->update)) { @@ -561,11 +561,11 @@ static void afs_vlocation_reaper(struct work_struct *work) if (expiry > now) { delay = (expiry - now) * HZ; _debug("delay %lu", delay); - if (!schedule_delayed_work(&afs_vlocation_reap, - delay)) { + if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, + delay)) { cancel_delayed_work(&afs_vlocation_reap); - schedule_delayed_work(&afs_vlocation_reap, - delay); + queue_delayed_work(afs_wq, &afs_vlocation_reap, + delay); } break; } @@ -620,7 +620,7 @@ void afs_vlocation_purge(void) destroy_workqueue(afs_vlocation_update_worker); cancel_delayed_work(&afs_vlocation_reap); - schedule_delayed_work(&afs_vlocation_reap, 0); + queue_delayed_work(afs_wq, &afs_vlocation_reap, 0); } /* diff --git a/fs/aio.c b/fs/aio.c index 8c8f6c5b6d79..5e00f15c54aa 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -798,29 +798,12 @@ static void aio_queue_work(struct kioctx * ctx) queue_delayed_work(aio_wq, &ctx->wq, timeout); } - -/* - * aio_run_iocbs: - * Process all pending retries queued on the ioctx - * run list. - * Assumes it is operating within the aio issuer's mm - * context. - */ -static inline void aio_run_iocbs(struct kioctx *ctx) -{ - int requeue; - - spin_lock_irq(&ctx->ctx_lock); - - requeue = __aio_run_iocbs(ctx); - spin_unlock_irq(&ctx->ctx_lock); - if (requeue) - aio_queue_work(ctx); -} - /* - * just like aio_run_iocbs, but keeps running them until - * the list stays empty + * aio_run_all_iocbs: + * Process all pending retries queued on the ioctx + * run list, and keep running them until the list + * stays empty. + * Assumes it is operating within the aio issuer's mm context. */ static inline void aio_run_all_iocbs(struct kioctx *ctx) { @@ -1839,7 +1822,7 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, long ret = -EINVAL; if (likely(ioctx)) { - if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) + if (likely(min_nr <= nr && min_nr >= 0)) ret = read_events(ioctx, min_nr, nr, events, timeout); put_ioctx(ioctx); } diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 5fd38112a6ca..cbe57f3c4d89 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -26,12 +26,6 @@ static struct vfsmount *anon_inode_mnt __read_mostly; static struct inode *anon_inode_inode; static const struct file_operations anon_inode_fops; -static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) -{ - return mount_pseudo(fs_type, "anon_inode:", NULL, ANON_INODE_FS_MAGIC); -} - /* * anon_inodefs_dname() is called from d_path(). */ @@ -41,14 +35,22 @@ static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen) dentry->d_name.name); } +static const struct dentry_operations anon_inodefs_dentry_operations = { + .d_dname = anon_inodefs_dname, +}; + +static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + return mount_pseudo(fs_type, "anon_inode:", NULL, + &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); +} + static struct file_system_type anon_inode_fs_type = { .name = "anon_inodefs", .mount = anon_inodefs_mount, .kill_sb = kill_anon_super, }; -static const struct dentry_operations anon_inodefs_dentry_operations = { - .d_dname = anon_inodefs_dname, -}; /* * nop .set_page_dirty method so that people can use .page_mkwrite on @@ -64,9 +66,9 @@ static const struct address_space_operations anon_aops = { }; /** - * anon_inode_getfd - creates a new file instance by hooking it up to an - * anonymous inode, and a dentry that describe the "class" - * of the file + * anon_inode_getfile - creates a new file instance by hooking it up to an + * anonymous inode, and a dentry that describe the "class" + * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file @@ -113,7 +115,6 @@ struct file *anon_inode_getfile(const char *name, */ ihold(anon_inode_inode); - d_set_d_op(path.dentry, &anon_inodefs_dentry_operations); d_instantiate(path.dentry, anon_inode_inode); error = -ENFILE; diff --git a/fs/befs/endian.h b/fs/befs/endian.h index 6cb84d896d05..27223878ba9f 100644 --- a/fs/befs/endian.h +++ b/fs/befs/endian.h @@ -102,22 +102,22 @@ cpu_to_fsrun(const struct super_block *sb, befs_block_run n) } static inline befs_data_stream -fsds_to_cpu(const struct super_block *sb, befs_disk_data_stream n) +fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n) { befs_data_stream data; int i; for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i) - data.direct[i] = fsrun_to_cpu(sb, n.direct[i]); + data.direct[i] = fsrun_to_cpu(sb, n->direct[i]); - data.max_direct_range = fs64_to_cpu(sb, n.max_direct_range); - data.indirect = fsrun_to_cpu(sb, n.indirect); - data.max_indirect_range = fs64_to_cpu(sb, n.max_indirect_range); - data.double_indirect = fsrun_to_cpu(sb, n.double_indirect); + data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range); + data.indirect = fsrun_to_cpu(sb, n->indirect); + data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range); + data.double_indirect = fsrun_to_cpu(sb, n->double_indirect); data.max_double_indirect_range = fs64_to_cpu(sb, - n. + n-> max_double_indirect_range); - data.size = fs64_to_cpu(sb, n.size); + data.size = fs64_to_cpu(sb, n->size); return data; } diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index de93581b79a2..b1d0c794747b 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -390,7 +390,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) int num_blks; befs_ino->i_data.ds = - fsds_to_cpu(sb, raw_inode->data.datastream); + fsds_to_cpu(sb, &raw_inode->data.datastream); num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds); inode->i_blocks = diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 6884e198e0c7..d5b640ba6cb1 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -66,12 +66,11 @@ static int elf_core_dump(struct coredump_params *cprm); #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) static struct linux_binfmt elf_format = { - .module = THIS_MODULE, - .load_binary = load_elf_binary, - .load_shlib = load_elf_library, - .core_dump = elf_core_dump, - .min_coredump = ELF_EXEC_PAGESIZE, - .hasvdso = 1 + .module = THIS_MODULE, + .load_binary = load_elf_binary, + .load_shlib = load_elf_library, + .core_dump = elf_core_dump, + .min_coredump = ELF_EXEC_PAGESIZE, }; #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) @@ -316,8 +315,6 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, return 0; } -#ifndef elf_map - static unsigned long elf_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long total_size) @@ -354,8 +351,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, return(map_addr); } -#endif /* !elf_map */ - static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) { int i, first_idx = -1, last_idx = -1; @@ -421,7 +416,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, goto out; retval = kernel_read(interpreter, interp_elf_ex->e_phoff, - (char *)elf_phdata,size); + (char *)elf_phdata, size); error = -EIO; if (retval != size) { if (retval < 0) @@ -601,7 +596,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) goto out; if (!elf_check_arch(&loc->elf_ex)) goto out; - if (!bprm->file->f_op||!bprm->file->f_op->mmap) + if (!bprm->file->f_op || !bprm->file->f_op->mmap) goto out; /* Now read in all of the header information */ @@ -761,8 +756,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* There was a PT_LOAD segment with p_memsz > p_filesz before this one. Map anonymous pages, if needed, and clear the area. */ - retval = set_brk (elf_bss + load_bias, - elf_brk + load_bias); + retval = set_brk(elf_bss + load_bias, + elf_brk + load_bias); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 4d0ff5ee27b8..e49cce234c65 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -782,7 +782,12 @@ void __init bio_integrity_init(void) { unsigned int i; - kintegrityd_wq = create_workqueue("kintegrityd"); + /* + * kintegrityd won't block much but may burn a lot of CPU cycles. + * Make it highpri CPU intensive wq with max concurrency of 1. + */ + kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | + WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); if (!kintegrityd_wq) panic("Failed to create kintegrityd\n"); diff --git a/fs/block_dev.c b/fs/block_dev.c index 771f23527010..333a7bb4cb9c 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -433,7 +433,7 @@ static void init_once(void *foo) INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); #ifdef CONFIG_SYSFS - INIT_LIST_HEAD(&bdev->bd_holder_list); + INIT_LIST_HEAD(&bdev->bd_holder_disks); #endif inode_init_once(&ei->vfs_inode); /* Initialize mutex for freeze. */ @@ -473,7 +473,7 @@ static const struct super_operations bdev_sops = { static struct dentry *bd_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576); + return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, 0x62646576); } static struct file_system_type bd_type = { @@ -669,7 +669,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, else if (bdev->bd_contains == bdev) return true; /* is a whole device which isn't held */ - else if (whole->bd_holder == bd_claim) + else if (whole->bd_holder == bd_may_claim) return true; /* is a partition of a device that is being partitioned */ else if (whole->bd_holder != NULL) return false; /* is a partition of a held device */ @@ -781,439 +781,142 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, } } -/* releases bdev_lock */ -static void __bd_abort_claiming(struct block_device *whole, void *holder) -{ - BUG_ON(whole->bd_claiming != holder); - whole->bd_claiming = NULL; - wake_up_bit(&whole->bd_claiming, 0); - - spin_unlock(&bdev_lock); - bdput(whole); -} - -/** - * bd_abort_claiming - abort claiming a block device - * @whole: whole block device returned by bd_start_claiming() - * @holder: holder trying to claim @bdev - * - * Abort a claiming block started by bd_start_claiming(). Note that - * @whole is not the block device to be claimed but the whole device - * returned by bd_start_claiming(). - * - * CONTEXT: - * Grabs and releases bdev_lock. - */ -static void bd_abort_claiming(struct block_device *whole, void *holder) -{ - spin_lock(&bdev_lock); - __bd_abort_claiming(whole, holder); /* releases bdev_lock */ -} - -/* increment holders when we have a legitimate claim. requires bdev_lock */ -static void __bd_claim(struct block_device *bdev, struct block_device *whole, - void *holder) -{ - /* note that for a whole device bd_holders - * will be incremented twice, and bd_holder will - * be set to bd_claim before being set to holder - */ - whole->bd_holders++; - whole->bd_holder = bd_claim; - bdev->bd_holders++; - bdev->bd_holder = holder; -} - -/** - * bd_finish_claiming - finish claiming a block device - * @bdev: block device of interest (passed to bd_start_claiming()) - * @whole: whole block device returned by bd_start_claiming() - * @holder: holder trying to claim @bdev - * - * Finish a claiming block started by bd_start_claiming(). - * - * CONTEXT: - * Grabs and releases bdev_lock. - */ -static void bd_finish_claiming(struct block_device *bdev, - struct block_device *whole, void *holder) -{ - spin_lock(&bdev_lock); - BUG_ON(!bd_may_claim(bdev, whole, holder)); - __bd_claim(bdev, whole, holder); - __bd_abort_claiming(whole, holder); /* not actually an abort */ -} +#ifdef CONFIG_SYSFS +struct bd_holder_disk { + struct list_head list; + struct gendisk *disk; + int refcnt; +}; -/** - * bd_claim - claim a block device - * @bdev: block device to claim - * @holder: holder trying to claim @bdev - * - * Try to claim @bdev which must have been opened successfully. - * - * CONTEXT: - * Might sleep. - * - * RETURNS: - * 0 if successful, -EBUSY if @bdev is already claimed. - */ -int bd_claim(struct block_device *bdev, void *holder) +static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev, + struct gendisk *disk) { - struct block_device *whole = bdev->bd_contains; - int res; + struct bd_holder_disk *holder; - might_sleep(); - - spin_lock(&bdev_lock); - res = bd_prepare_to_claim(bdev, whole, holder); - if (res == 0) - __bd_claim(bdev, whole, holder); - spin_unlock(&bdev_lock); - - return res; -} -EXPORT_SYMBOL(bd_claim); - -void bd_release(struct block_device *bdev) -{ - spin_lock(&bdev_lock); - if (!--bdev->bd_contains->bd_holders) - bdev->bd_contains->bd_holder = NULL; - if (!--bdev->bd_holders) - bdev->bd_holder = NULL; - spin_unlock(&bdev_lock); + list_for_each_entry(holder, &bdev->bd_holder_disks, list) + if (holder->disk == disk) + return holder; + return NULL; } -EXPORT_SYMBOL(bd_release); - -#ifdef CONFIG_SYSFS -/* - * Functions for bd_claim_by_kobject / bd_release_from_kobject - * - * If a kobject is passed to bd_claim_by_kobject() - * and the kobject has a parent directory, - * following symlinks are created: - * o from the kobject to the claimed bdev - * o from "holders" directory of the bdev to the parent of the kobject - * bd_release_from_kobject() removes these symlinks. - * - * Example: - * If /dev/dm-0 maps to /dev/sda, kobject corresponding to - * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: - * /sys/block/dm-0/slaves/sda --> /sys/block/sda - * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 - */ - static int add_symlink(struct kobject *from, struct kobject *to) { - if (!from || !to) - return 0; return sysfs_create_link(from, to, kobject_name(to)); } static void del_symlink(struct kobject *from, struct kobject *to) { - if (!from || !to) - return; sysfs_remove_link(from, kobject_name(to)); } -/* - * 'struct bd_holder' contains pointers to kobjects symlinked by - * bd_claim_by_kobject. - * It's connected to bd_holder_list which is protected by bdev->bd_sem. - */ -struct bd_holder { - struct list_head list; /* chain of holders of the bdev */ - int count; /* references from the holder */ - struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ - struct kobject *hdev; /* e.g. "/block/dm-0" */ - struct kobject *hdir; /* e.g. "/block/sda/holders" */ - struct kobject *sdev; /* e.g. "/block/sda" */ -}; - -/* - * Get references of related kobjects at once. - * Returns 1 on success. 0 on failure. - * - * Should call bd_holder_release_dirs() after successful use. - */ -static int bd_holder_grab_dirs(struct block_device *bdev, - struct bd_holder *bo) -{ - if (!bdev || !bo) - return 0; - - bo->sdir = kobject_get(bo->sdir); - if (!bo->sdir) - return 0; - - bo->hdev = kobject_get(bo->sdir->parent); - if (!bo->hdev) - goto fail_put_sdir; - - bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj); - if (!bo->sdev) - goto fail_put_hdev; - - bo->hdir = kobject_get(bdev->bd_part->holder_dir); - if (!bo->hdir) - goto fail_put_sdev; - - return 1; - -fail_put_sdev: - kobject_put(bo->sdev); -fail_put_hdev: - kobject_put(bo->hdev); -fail_put_sdir: - kobject_put(bo->sdir); - - return 0; -} - -/* Put references of related kobjects at once. */ -static void bd_holder_release_dirs(struct bd_holder *bo) -{ - kobject_put(bo->hdir); - kobject_put(bo->sdev); - kobject_put(bo->hdev); - kobject_put(bo->sdir); -} - -static struct bd_holder *alloc_bd_holder(struct kobject *kobj) -{ - struct bd_holder *bo; - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return NULL; - - bo->count = 1; - bo->sdir = kobj; - - return bo; -} - -static void free_bd_holder(struct bd_holder *bo) -{ - kfree(bo); -} - /** - * find_bd_holder - find matching struct bd_holder from the block device + * bd_link_disk_holder - create symlinks between holding disk and slave bdev + * @bdev: the claimed slave bdev + * @disk: the holding disk * - * @bdev: struct block device to be searched - * @bo: target struct bd_holder + * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. * - * Returns matching entry with @bo in @bdev->bd_holder_list. - * If found, increment the reference count and return the pointer. - * If not found, returns NULL. - */ -static struct bd_holder *find_bd_holder(struct block_device *bdev, - struct bd_holder *bo) -{ - struct bd_holder *tmp; - - list_for_each_entry(tmp, &bdev->bd_holder_list, list) - if (tmp->sdir == bo->sdir) { - tmp->count++; - return tmp; - } - - return NULL; -} - -/** - * add_bd_holder - create sysfs symlinks for bd_claim() relationship + * This functions creates the following sysfs symlinks. + * + * - from "slaves" directory of the holder @disk to the claimed @bdev + * - from "holders" directory of the @bdev to the holder @disk + * + * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is + * passed to bd_link_disk_holder(), then: * - * @bdev: block device to be bd_claimed - * @bo: preallocated and initialized by alloc_bd_holder() + * /sys/block/dm-0/slaves/sda --> /sys/block/sda + * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 * - * Add @bo to @bdev->bd_holder_list, create symlinks. + * The caller must have claimed @bdev before calling this function and + * ensure that both @bdev and @disk are valid during the creation and + * lifetime of these symlinks. * - * Returns 0 if symlinks are created. - * Returns -ve if something fails. + * CONTEXT: + * Might sleep. + * + * RETURNS: + * 0 on success, -errno on failure. */ -static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) +int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) { - int err; + struct bd_holder_disk *holder; + int ret = 0; - if (!bo) - return -EINVAL; + mutex_lock(&bdev->bd_mutex); - if (!bd_holder_grab_dirs(bdev, bo)) - return -EBUSY; + WARN_ON_ONCE(!bdev->bd_holder); - err = add_symlink(bo->sdir, bo->sdev); - if (err) - return err; + /* FIXME: remove the following once add_disk() handles errors */ + if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) + goto out_unlock; - err = add_symlink(bo->hdir, bo->hdev); - if (err) { - del_symlink(bo->sdir, bo->sdev); - return err; + holder = bd_find_holder_disk(bdev, disk); + if (holder) { + holder->refcnt++; + goto out_unlock; } - list_add_tail(&bo->list, &bdev->bd_holder_list); - return 0; -} - -/** - * del_bd_holder - delete sysfs symlinks for bd_claim() relationship - * - * @bdev: block device to be bd_claimed - * @kobj: holder's kobject - * - * If there is matching entry with @kobj in @bdev->bd_holder_list - * and no other bd_claim() from the same kobject, - * remove the struct bd_holder from the list, delete symlinks for it. - * - * Returns a pointer to the struct bd_holder when it's removed from the list - * and ready to be freed. - * Returns NULL if matching claim isn't found or there is other bd_claim() - * by the same kobject. - */ -static struct bd_holder *del_bd_holder(struct block_device *bdev, - struct kobject *kobj) -{ - struct bd_holder *bo; - - list_for_each_entry(bo, &bdev->bd_holder_list, list) { - if (bo->sdir == kobj) { - bo->count--; - BUG_ON(bo->count < 0); - if (!bo->count) { - list_del(&bo->list); - del_symlink(bo->sdir, bo->sdev); - del_symlink(bo->hdir, bo->hdev); - bd_holder_release_dirs(bo); - return bo; - } - break; - } + holder = kzalloc(sizeof(*holder), GFP_KERNEL); + if (!holder) { + ret = -ENOMEM; + goto out_unlock; } - return NULL; -} - -/** - * bd_claim_by_kobject - bd_claim() with additional kobject signature - * - * @bdev: block device to be claimed - * @holder: holder's signature - * @kobj: holder's kobject - * - * Do bd_claim() and if it succeeds, create sysfs symlinks between - * the bdev and the holder's kobject. - * Use bd_release_from_kobject() when relesing the claimed bdev. - * - * Returns 0 on success. (same as bd_claim()) - * Returns errno on failure. - */ -static int bd_claim_by_kobject(struct block_device *bdev, void *holder, - struct kobject *kobj) -{ - int err; - struct bd_holder *bo, *found; - - if (!kobj) - return -EINVAL; - - bo = alloc_bd_holder(kobj); - if (!bo) - return -ENOMEM; + INIT_LIST_HEAD(&holder->list); + holder->disk = disk; + holder->refcnt = 1; - mutex_lock(&bdev->bd_mutex); + ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + if (ret) + goto out_free; - err = bd_claim(bdev, holder); - if (err) - goto fail; + ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); + if (ret) + goto out_del; - found = find_bd_holder(bdev, bo); - if (found) - goto fail; + list_add(&holder->list, &bdev->bd_holder_disks); + goto out_unlock; - err = add_bd_holder(bdev, bo); - if (err) - bd_release(bdev); - else - bo = NULL; -fail: +out_del: + del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); +out_free: + kfree(holder); +out_unlock: mutex_unlock(&bdev->bd_mutex); - free_bd_holder(bo); - return err; + return ret; } +EXPORT_SYMBOL_GPL(bd_link_disk_holder); /** - * bd_release_from_kobject - bd_release() with additional kobject signature + * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder() + * @bdev: the calimed slave bdev + * @disk: the holding disk * - * @bdev: block device to be released - * @kobj: holder's kobject + * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. * - * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). + * CONTEXT: + * Might sleep. */ -static void bd_release_from_kobject(struct block_device *bdev, - struct kobject *kobj) +void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { - if (!kobj) - return; + struct bd_holder_disk *holder; mutex_lock(&bdev->bd_mutex); - bd_release(bdev); - free_bd_holder(del_bd_holder(bdev, kobj)); - mutex_unlock(&bdev->bd_mutex); -} -/** - * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() - * - * @bdev: block device to be claimed - * @holder: holder's signature - * @disk: holder's gendisk - * - * Call bd_claim_by_kobject() with getting @disk->slave_dir. - */ -int bd_claim_by_disk(struct block_device *bdev, void *holder, - struct gendisk *disk) -{ - return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); -} -EXPORT_SYMBOL_GPL(bd_claim_by_disk); + holder = bd_find_holder_disk(bdev, disk); -/** - * bd_release_from_disk - wrapper function for bd_release_from_kobject() - * - * @bdev: block device to be claimed - * @disk: holder's gendisk - * - * Call bd_release_from_kobject() and put @disk->slave_dir. - */ -void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) -{ - bd_release_from_kobject(bdev, disk->slave_dir); - kobject_put(disk->slave_dir); -} -EXPORT_SYMBOL_GPL(bd_release_from_disk); -#endif + if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { + del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + del_symlink(bdev->bd_part->holder_dir, + &disk_to_dev(disk)->kobj); + list_del_init(&holder->list); + kfree(holder); + } -/* - * Tries to open block device by device number. Use it ONLY if you - * really do not have anything better - i.e. when you are behind a - * truly sucky interface and all you are given is a device number. _Never_ - * to be used for internal purposes. If you ever need it - reconsider - * your API. - */ -struct block_device *open_by_devnum(dev_t dev, fmode_t mode) -{ - struct block_device *bdev = bdget(dev); - int err = -ENOMEM; - if (bdev) - err = blkdev_get(bdev, mode); - return err ? ERR_PTR(err) : bdev; + mutex_unlock(&bdev->bd_mutex); } - -EXPORT_SYMBOL(open_by_devnum); +EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); +#endif /** * flush_disk - invalidates all buffer-cache entries on a disk @@ -1309,10 +1012,11 @@ int check_disk_change(struct block_device *bdev) { struct gendisk *disk = bdev->bd_disk; const struct block_device_operations *bdops = disk->fops; + unsigned int events; - if (!bdops->media_changed) - return 0; - if (!bdops->media_changed(bdev->bd_disk)) + events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE | + DISK_EVENT_EJECT_REQUEST); + if (!(events & DISK_EVENT_MEDIA_CHANGE)) return 0; flush_disk(bdev); @@ -1475,17 +1179,171 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) return ret; } -int blkdev_get(struct block_device *bdev, fmode_t mode) +/** + * blkdev_get - open a block device + * @bdev: block_device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is + * open with exclusive access. Specifying %FMODE_EXCL with %NULL + * @holder is invalid. Exclusive opens may nest for the same @holder. + * + * On success, the reference count of @bdev is unchanged. On failure, + * @bdev is put. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) { - return __blkdev_get(bdev, mode, 0); + struct block_device *whole = NULL; + int res; + + WARN_ON_ONCE((mode & FMODE_EXCL) && !holder); + + if ((mode & FMODE_EXCL) && holder) { + whole = bd_start_claiming(bdev, holder); + if (IS_ERR(whole)) { + bdput(bdev); + return PTR_ERR(whole); + } + } + + res = __blkdev_get(bdev, mode, 0); + + /* __blkdev_get() may alter read only status, check it afterwards */ + if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { + __blkdev_put(bdev, mode, 0); + res = -EACCES; + } + + if (whole) { + /* finish claiming */ + mutex_lock(&bdev->bd_mutex); + spin_lock(&bdev_lock); + + if (!res) { + BUG_ON(!bd_may_claim(bdev, whole, holder)); + /* + * Note that for a whole device bd_holders + * will be incremented twice, and bd_holder + * will be set to bd_may_claim before being + * set to holder + */ + whole->bd_holders++; + whole->bd_holder = bd_may_claim; + bdev->bd_holders++; + bdev->bd_holder = holder; + } + + /* tell others that we're done */ + BUG_ON(whole->bd_claiming != holder); + whole->bd_claiming = NULL; + wake_up_bit(&whole->bd_claiming, 0); + + spin_unlock(&bdev_lock); + + /* + * Block event polling for write claims. Any write + * holder makes the write_holder state stick until all + * are released. This is good enough and tracking + * individual writeable reference is too fragile given + * the way @mode is used in blkdev_get/put(). + */ + if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { + bdev->bd_write_holder = true; + disk_block_events(bdev->bd_disk); + } + + mutex_unlock(&bdev->bd_mutex); + bdput(whole); + } + + return res; } EXPORT_SYMBOL(blkdev_get); +/** + * blkdev_get_by_path - open a block device by name + * @path: path to the block device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open the blockdevice described by the device file at @path. @mode + * and @holder are identical to blkdev_get(). + * + * On success, the returned block_device has reference count of one. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * Pointer to block_device on success, ERR_PTR(-errno) on failure. + */ +struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder) +{ + struct block_device *bdev; + int err; + + bdev = lookup_bdev(path); + if (IS_ERR(bdev)) + return bdev; + + err = blkdev_get(bdev, mode, holder); + if (err) + return ERR_PTR(err); + + return bdev; +} +EXPORT_SYMBOL(blkdev_get_by_path); + +/** + * blkdev_get_by_dev - open a block device by device number + * @dev: device number of block device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open the blockdevice described by device number @dev. @mode and + * @holder are identical to blkdev_get(). + * + * Use it ONLY if you really do not have anything better - i.e. when + * you are behind a truly sucky interface and all you are given is a + * device number. _Never_ to be used for internal purposes. If you + * ever need it - reconsider your API. + * + * On success, the returned block_device has reference count of one. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * Pointer to block_device on success, ERR_PTR(-errno) on failure. + */ +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) +{ + struct block_device *bdev; + int err; + + bdev = bdget(dev); + if (!bdev) + return ERR_PTR(-ENOMEM); + + err = blkdev_get(bdev, mode, holder); + if (err) + return ERR_PTR(err); + + return bdev; +} +EXPORT_SYMBOL(blkdev_get_by_dev); + static int blkdev_open(struct inode * inode, struct file * filp) { - struct block_device *whole = NULL; struct block_device *bdev; - int res; /* * Preserve backwards compatibility and allow large file access @@ -1506,26 +1364,9 @@ static int blkdev_open(struct inode * inode, struct file * filp) if (bdev == NULL) return -ENOMEM; - if (filp->f_mode & FMODE_EXCL) { - whole = bd_start_claiming(bdev, filp); - if (IS_ERR(whole)) { - bdput(bdev); - return PTR_ERR(whole); - } - } - filp->f_mapping = bdev->bd_inode->i_mapping; - res = blkdev_get(bdev, filp->f_mode); - - if (whole) { - if (res == 0) - bd_finish_claiming(bdev, whole, filp); - else - bd_abort_claiming(whole, filp); - } - - return res; + return blkdev_get(bdev, filp->f_mode, filp); } static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) @@ -1539,6 +1380,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_part_count--; if (!--bdev->bd_openers) { + WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); } @@ -1569,6 +1411,44 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) int blkdev_put(struct block_device *bdev, fmode_t mode) { + if (mode & FMODE_EXCL) { + bool bdev_free; + + /* + * Release a claim on the device. The holder fields + * are protected with bdev_lock. bd_mutex is to + * synchronize disk_holder unlinking. + */ + mutex_lock(&bdev->bd_mutex); + spin_lock(&bdev_lock); + + WARN_ON_ONCE(--bdev->bd_holders < 0); + WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); + + /* bd_contains might point to self, check in a separate step */ + if ((bdev_free = !bdev->bd_holders)) + bdev->bd_holder = NULL; + if (!bdev->bd_contains->bd_holders) + bdev->bd_contains->bd_holder = NULL; + + spin_unlock(&bdev_lock); + + /* + * If this was the last claim, remove holder link and + * unblock evpoll if it was a write holder. + */ + if (bdev_free) { + if (bdev->bd_write_holder) { + disk_unblock_events(bdev->bd_disk); + bdev->bd_write_holder = false; + } else + disk_check_events(bdev->bd_disk); + } + + mutex_unlock(&bdev->bd_mutex); + } else + disk_check_events(bdev->bd_disk); + return __blkdev_put(bdev, mode, 0); } EXPORT_SYMBOL(blkdev_put); @@ -1576,8 +1456,7 @@ EXPORT_SYMBOL(blkdev_put); static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); - if (bdev->bd_holder == filp) - bd_release(bdev); + return blkdev_put(bdev, filp->f_mode); } @@ -1722,67 +1601,6 @@ fail: } EXPORT_SYMBOL(lookup_bdev); -/** - * open_bdev_exclusive - open a block device by name and set it up for use - * - * @path: special file representing the block device - * @mode: FMODE_... combination to pass be used - * @holder: owner for exclusion - * - * Open the blockdevice described by the special file at @path, claim it - * for the @holder. - */ -struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder) -{ - struct block_device *bdev, *whole; - int error; - - bdev = lookup_bdev(path); - if (IS_ERR(bdev)) - return bdev; - - whole = bd_start_claiming(bdev, holder); - if (IS_ERR(whole)) { - bdput(bdev); - return whole; - } - - error = blkdev_get(bdev, mode); - if (error) - goto out_abort_claiming; - - error = -EACCES; - if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) - goto out_blkdev_put; - - bd_finish_claiming(bdev, whole, holder); - return bdev; - -out_blkdev_put: - blkdev_put(bdev, mode); -out_abort_claiming: - bd_abort_claiming(whole, holder); - return ERR_PTR(error); -} - -EXPORT_SYMBOL(open_bdev_exclusive); - -/** - * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive() - * - * @bdev: blockdevice to close - * @mode: mode, must match that used to open. - * - * This is the counterpart to open_bdev_exclusive(). - */ -void close_bdev_exclusive(struct block_device *bdev, fmode_t mode) -{ - bd_release(bdev); - blkdev_put(bdev, mode); -} - -EXPORT_SYMBOL(close_bdev_exclusive); - int __invalidate_device(struct block_device *bdev) { struct super_block *sb = get_super(bdev); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 0ccf9a8afcdf..9786963b07e5 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -65,7 +65,6 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, { struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info; struct btrfs_root *root; - struct dentry *dentry; struct inode *inode; struct btrfs_key key; int index; @@ -108,10 +107,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, return ERR_PTR(-ESTALE); } - dentry = d_obtain_alias(inode); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &btrfs_dentry_operations); - return dentry; + return d_obtain_alias(inode); fail: srcu_read_unlock(&fs_info->subvol_srcu, index); return ERR_PTR(err); @@ -166,7 +162,6 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, static struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = child->d_inode; - struct dentry *dentry; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; struct extent_buffer *leaf; @@ -223,10 +218,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &btrfs_dentry_operations); - return dentry; + return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); fail: btrfs_free_path(path); return ERR_PTR(ret); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a0ff46a47895..a3798a3aa0d2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4084,8 +4084,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) int index; int ret; - d_set_d_op(dentry, &btrfs_dentry_operations); - if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); @@ -7117,6 +7115,10 @@ static long btrfs_fallocate(struct inode *inode, int mode, alloc_start = offset & ~mask; alloc_end = (offset + len + mask) & ~mask; + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + return -EOPNOTSUPP; + /* * wait for ordered IO before we have any locks. We'll loop again * below with the locks held. diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 883c6fa1367e..22acdaa78ce1 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -460,6 +460,7 @@ static int btrfs_fill_super(struct super_block *sb, sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = BTRFS_SUPER_MAGIC; sb->s_op = &btrfs_super_ops; + sb->s_d_op = &btrfs_dentry_operations; sb->s_export_op = &btrfs_export_ops; sb->s_xattr = btrfs_xattr_handlers; sb->s_time_gran = 1; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6b9884507837..1718e1a5c320 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -493,7 +493,7 @@ again: continue; if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } @@ -527,7 +527,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode); fs_devices->open_devices--; } if (device->writeable) { @@ -584,13 +584,15 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int seeding = 1; int ret = 0; + flags |= FMODE_EXCL; + list_for_each_entry(device, head, dev_list) { if (device->bdev) continue; if (!device->name) continue; - bdev = open_bdev_exclusive(device->name, flags, holder); + bdev = blkdev_get_by_path(device->name, flags, holder); if (IS_ERR(bdev)) { printk(KERN_INFO "open %s failed\n", device->name); goto error; @@ -642,7 +644,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, error_brelse: brelse(bh); error_close: - close_bdev_exclusive(bdev, FMODE_READ); + blkdev_put(bdev, flags); error: continue; } @@ -688,7 +690,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, mutex_lock(&uuid_mutex); - bdev = open_bdev_exclusive(path, flags, holder); + flags |= FMODE_EXCL; + bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); @@ -720,7 +723,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, brelse(bh); error_close: - close_bdev_exclusive(bdev, flags); + blkdev_put(bdev, flags); error: mutex_unlock(&uuid_mutex); return ret; @@ -1183,8 +1186,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto out; } } else { - bdev = open_bdev_exclusive(device_path, FMODE_READ, - root->fs_info->bdev_holder); + bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, + root->fs_info->bdev_holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; @@ -1251,7 +1254,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) root->fs_info->fs_devices->latest_bdev = next_device->bdev; if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode); device->bdev = NULL; device->fs_devices->open_devices--; } @@ -1294,7 +1297,7 @@ error_brelse: brelse(bh); error_close: if (bdev) - close_bdev_exclusive(bdev, FMODE_READ); + blkdev_put(bdev, FMODE_READ | FMODE_EXCL); out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); @@ -1446,7 +1449,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) return -EINVAL; - bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); + bdev = blkdev_get_by_path(device_path, FMODE_EXCL, + root->fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); @@ -1572,7 +1576,7 @@ out: mutex_unlock(&root->fs_info->volume_mutex); return ret; error: - close_bdev_exclusive(bdev, 0); + blkdev_put(bdev, FMODE_EXCL); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 2740db49eb04..1be781079450 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -50,7 +50,7 @@ struct btrfs_device { struct block_device *bdev; - /* the mode sent to open_bdev_exclusive */ + /* the mode sent to blkdev_get */ fmode_t mode; char *name; diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 9e6c4f2e8ff1..bd352125e829 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -2,31 +2,10 @@ # Makefile for CEPH filesystem. # -ifneq ($(KERNELRELEASE),) - obj-$(CONFIG_CEPH_FS) += ceph.o -ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ +ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ export.o caps.o snap.o xattr.o \ mds_client.o mdsmap.o strings.o ceph_frag.o \ debugfs.o -else -#Otherwise we were called directly from the command -# line; invoke the kernel build system. - -KERNELDIR ?= /lib/modules/$(shell uname -r)/build -PWD := $(shell pwd) - -default: all - -all: - $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules - -modules_install: - $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install - -clean: - $(MAKE) -C $(KERNELDIR) M=$(PWD) clean - -endif diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 7ae1b3d55b58..08f65faac112 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -60,10 +60,13 @@ static int mdsc_show(struct seq_file *s, void *p) for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) { req = rb_entry(rp, struct ceph_mds_request, r_node); - if (req->r_request) - seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds); - else + if (req->r_request && req->r_session) + seq_printf(s, "%lld\tmds%d\t", req->r_tid, + req->r_session->s_mds); + else if (!req->r_request) seq_printf(s, "%lld\t(no request)\t", req->r_tid); + else + seq_printf(s, "%lld\t(no session)\t", req->r_tid); seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index fa7ca04ee816..0bc68de8edd7 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1224,6 +1224,26 @@ void ceph_dentry_lru_del(struct dentry *dn) } } +/* + * Return name hash for a given dentry. This is dependent on + * the parent directory's hash function. + */ +unsigned ceph_dentry_hash(struct dentry *dn) +{ + struct inode *dir = dn->d_parent->d_inode; + struct ceph_inode_info *dci = ceph_inode(dir); + + switch (dci->i_dir_layout.dl_dir_hash) { + case 0: /* for backward compat */ + case CEPH_STR_HASH_LINUX: + return dn->d_name.hash; + + default: + return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, + dn->d_name.name, dn->d_name.len); + } +} + const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, .readdir = ceph_readdir, diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 2297d9426992..e41056174bf8 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -59,7 +59,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, dout("encode_fh %p connectable\n", dentry); cfh->ino = ceph_ino(dentry->d_inode); cfh->parent_ino = ceph_ino(parent->d_inode); - cfh->parent_name_hash = parent->d_name.hash; + cfh->parent_name_hash = ceph_dentry_hash(parent); *max_len = connected_handle_length; type = 2; } else if (*max_len >= handle_length) { diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e61de4f7b99d..e835eff551e3 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_release_count = 0; ci->i_symlink = NULL; + memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); + ci->i_fragtree = RB_ROOT; mutex_init(&ci->i_fragtree_mutex); @@ -689,6 +691,8 @@ static int fill_inode(struct inode *inode, inode->i_op = &ceph_dir_iops; inode->i_fop = &ceph_dir_fops; + ci->i_dir_layout = iinfo->dir_layout; + ci->i_files = le64_to_cpu(info->files); ci->i_subdirs = le64_to_cpu(info->subdirs); ci->i_rbytes = le64_to_cpu(info->rbytes); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a50fca1e03be..1e30d194a8e3 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -60,7 +60,8 @@ static const struct ceph_connection_operations mds_con_ops; * parse individual inode info */ static int parse_reply_info_in(void **p, void *end, - struct ceph_mds_reply_info_in *info) + struct ceph_mds_reply_info_in *info, + int features) { int err = -EIO; @@ -74,6 +75,12 @@ static int parse_reply_info_in(void **p, void *end, info->symlink = *p; *p += info->symlink_len; + if (features & CEPH_FEATURE_DIRLAYOUTHASH) + ceph_decode_copy_safe(p, end, &info->dir_layout, + sizeof(info->dir_layout), bad); + else + memset(&info->dir_layout, 0, sizeof(info->dir_layout)); + ceph_decode_32_safe(p, end, info->xattr_len, bad); ceph_decode_need(p, end, info->xattr_len, bad); info->xattr_data = *p; @@ -88,12 +95,13 @@ bad: * target inode. */ static int parse_reply_info_trace(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { int err; if (info->head->is_dentry) { - err = parse_reply_info_in(p, end, &info->diri); + err = parse_reply_info_in(p, end, &info->diri, features); if (err < 0) goto out_bad; @@ -114,7 +122,7 @@ static int parse_reply_info_trace(void **p, void *end, } if (info->head->is_target) { - err = parse_reply_info_in(p, end, &info->targeti); + err = parse_reply_info_in(p, end, &info->targeti, features); if (err < 0) goto out_bad; } @@ -134,7 +142,8 @@ out_bad: * parse readdir results */ static int parse_reply_info_dir(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { u32 num, i = 0; int err; @@ -182,7 +191,7 @@ static int parse_reply_info_dir(void **p, void *end, *p += sizeof(struct ceph_mds_reply_lease); /* inode */ - err = parse_reply_info_in(p, end, &info->dir_in[i]); + err = parse_reply_info_in(p, end, &info->dir_in[i], features); if (err < 0) goto out_bad; i++; @@ -205,7 +214,8 @@ out_bad: * parse fcntl F_GETLK results */ static int parse_reply_info_filelock(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { if (*p + sizeof(*info->filelock_reply) > end) goto bad; @@ -225,19 +235,21 @@ bad: * parse extra results */ static int parse_reply_info_extra(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { if (info->head->op == CEPH_MDS_OP_GETFILELOCK) - return parse_reply_info_filelock(p, end, info); + return parse_reply_info_filelock(p, end, info, features); else - return parse_reply_info_dir(p, end, info); + return parse_reply_info_dir(p, end, info, features); } /* * parse entire mds reply */ static int parse_reply_info(struct ceph_msg *msg, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { void *p, *end; u32 len; @@ -250,7 +262,7 @@ static int parse_reply_info(struct ceph_msg *msg, /* trace */ ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { - err = parse_reply_info_trace(&p, p+len, info); + err = parse_reply_info_trace(&p, p+len, info, features); if (err < 0) goto out_bad; } @@ -258,7 +270,7 @@ static int parse_reply_info(struct ceph_msg *msg, /* extra */ ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { - err = parse_reply_info_extra(&p, p+len, info); + err = parse_reply_info_extra(&p, p+len, info, features); if (err < 0) goto out_bad; } @@ -654,7 +666,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, } else { /* dir + name */ inode = dir; - hash = req->r_dentry->d_name.hash; + hash = ceph_dentry_hash(req->r_dentry); is_hash = true; } } @@ -1693,7 +1705,6 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, struct ceph_msg *msg; int flags = 0; - req->r_mds = mds; req->r_attempts++; if (req->r_inode) { struct ceph_cap *cap = @@ -1780,6 +1791,8 @@ static int __do_request(struct ceph_mds_client *mdsc, goto finish; } + put_request_session(req); + mds = __choose_mds(mdsc, req); if (mds < 0 || ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { @@ -1797,6 +1810,8 @@ static int __do_request(struct ceph_mds_client *mdsc, goto finish; } } + req->r_session = get_session(session); + dout("do_request mds%d session %p state %s\n", mds, session, session_state_name(session->s_state)); if (session->s_state != CEPH_MDS_SESSION_OPEN && @@ -1809,7 +1824,6 @@ static int __do_request(struct ceph_mds_client *mdsc, } /* send request */ - req->r_session = get_session(session); req->r_resend_mds = -1; /* forget any previous mds hint */ if (req->r_request_started == 0) /* note request start time */ @@ -1863,7 +1877,6 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds) if (req->r_session && req->r_session->s_mds == mds) { dout(" kicking tid %llu\n", req->r_tid); - put_request_session(req); __do_request(mdsc, req); } } @@ -2056,8 +2069,11 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) goto out; } else { struct ceph_inode_info *ci = ceph_inode(req->r_inode); - struct ceph_cap *cap = - ceph_get_cap_for_mds(ci, req->r_mds);; + struct ceph_cap *cap = NULL; + + if (req->r_session) + cap = ceph_get_cap_for_mds(ci, + req->r_session->s_mds); dout("already using auth"); if ((!cap || cap != ci->i_auth_cap) || @@ -2101,7 +2117,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) dout("handle_reply tid %lld result %d\n", tid, result); rinfo = &req->r_reply_info; - err = parse_reply_info(msg, rinfo); + err = parse_reply_info(msg, rinfo, session->s_con.peer_features); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index aabe563b54db..4e3a9cc0bba6 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -35,6 +35,7 @@ struct ceph_cap; */ struct ceph_mds_reply_info_in { struct ceph_mds_reply_inode *in; + struct ceph_dir_layout dir_layout; u32 symlink_len; char *symlink; u32 xattr_len; @@ -165,7 +166,6 @@ struct ceph_mds_request { struct ceph_mds_client *r_mdsc; int r_op; /* mds op code */ - int r_mds; /* operation on what? */ struct inode *r_inode; /* arg1 */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 08b460ae0539..bf6f0f34082a 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -428,7 +428,8 @@ struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, goto fail; } fsc->client->extra_mon_dispatch = extra_mon_dispatch; - fsc->client->supported_features |= CEPH_FEATURE_FLOCK; + fsc->client->supported_features |= CEPH_FEATURE_FLOCK | + CEPH_FEATURE_DIRLAYOUTHASH; fsc->client->monc.want_mdsmap = 1; fsc->mount_options = fsopt; @@ -443,13 +444,17 @@ struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, goto fail_client; err = -ENOMEM; - fsc->wb_wq = create_workqueue("ceph-writeback"); + /* + * The number of concurrent works can be high but they don't need + * to be processed in parallel, limit concurrency. + */ + fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1); if (fsc->wb_wq == NULL) goto fail_bdi; - fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); + fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1); if (fsc->pg_inv_wq == NULL) goto fail_wb_wq; - fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc"); + fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1); if (fsc->trunc_wq == NULL) goto fail_pg_inv_wq; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 4553d8829edb..20b907d76ae2 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -239,6 +239,7 @@ struct ceph_inode_info { unsigned i_ceph_flags; unsigned long i_release_count; + struct ceph_dir_layout i_dir_layout; struct ceph_file_layout i_layout; char *i_symlink; @@ -768,6 +769,7 @@ extern void ceph_dentry_lru_add(struct dentry *dn); extern void ceph_dentry_lru_touch(struct dentry *dn); extern void ceph_dentry_lru_del(struct dentry *dn); extern void ceph_invalidate_dentry_lease(struct dentry *dentry); +extern unsigned ceph_dentry_hash(struct dentry *dn); /* * our d_ops vary depending on whether the inode is live, diff --git a/fs/char_dev.c b/fs/char_dev.c index e5b9df993b93..dca9e5e0f73b 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -59,7 +59,7 @@ static struct char_device_struct { } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; /* index in the above */ -static inline int major_to_index(int major) +static inline int major_to_index(unsigned major) { return major % CHRDEV_MAJOR_HASH_SIZE; } @@ -417,18 +417,6 @@ static int chrdev_open(struct inode *inode, struct file *filp) return ret; } -int cdev_index(struct inode *inode) -{ - int idx; - struct kobject *kobj; - - kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); - if (!kobj) - return -1; - kobject_put(kobj); - return idx; -} - void cd_forget(struct inode *inode) { spin_lock(&cdev_lock); @@ -582,7 +570,6 @@ EXPORT_SYMBOL(cdev_init); EXPORT_SYMBOL(cdev_alloc); EXPORT_SYMBOL(cdev_del); EXPORT_SYMBOL(cdev_add); -EXPORT_SYMBOL(cdev_index); EXPORT_SYMBOL(__register_chrdev); EXPORT_SYMBOL(__unregister_chrdev); EXPORT_SYMBOL(directly_mappable_cdev_bdi); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5e7075d5f139..d9f652a522a6 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -174,6 +174,12 @@ cifs_read_super(struct super_block *sb, void *data, goto out_no_root; } + /* do that *after* d_alloc_root() - we want NULL ->d_op for root here */ + if (cifs_sb_master_tcon(cifs_sb)->nocase) + sb->s_d_op = &cifs_ci_dentry_ops; + else + sb->s_d_op = &cifs_dentry_ops; + #ifdef CONFIG_CIFS_EXPERIMENTAL if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cFYI(1, "export ops supported"); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 2e773825835e..1e95dd635632 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -130,17 +130,6 @@ cifs_bp_rename_retry: return full_path; } -static void setup_cifs_dentry(struct cifsTconInfo *tcon, - struct dentry *direntry, - struct inode *newinode) -{ - if (tcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); - d_instantiate(direntry, newinode); -} - /* Inode operations in similar order to how they appear in Linux file fs.h */ int @@ -327,7 +316,7 @@ cifs_create_get_file_info: cifs_create_set_dentry: if (rc == 0) - setup_cifs_dentry(tcon, direntry, newinode); + d_instantiate(direntry, newinode); else cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); @@ -418,10 +407,6 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); if (rc == 0) d_instantiate(direntry, newinode); @@ -601,10 +586,6 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, parent_dir_inode->i_sb, xid, NULL); if ((rc == 0) && (newInode != NULL)) { - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_add(direntry, newInode); if (posix_open) { filp = lookup_instantiate_filp(nd, direntry, @@ -631,10 +612,6 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } else if (rc == -ENOENT) { rc = 0; direntry->d_time = jiffies; - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_add(direntry, NULL); /* if it was once a directory (but how can we tell?) we could do shrink_dcache_parent(direntry); */ diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0c7e36910e31..b06b60620240 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1324,10 +1324,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need to set uid/gid */ inc_nlink(inode); - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); cifs_fill_uniqueid(inode->i_sb, &fattr); @@ -1368,10 +1364,6 @@ mkdir_get_info: rc = cifs_get_inode_info(&newinode, full_path, NULL, inode->i_sb, xid, NULL); - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_instantiate(direntry, newinode); /* setting nlink not necessary except in cases where we * failed to get it from the server or was set bogus */ diff --git a/fs/cifs/link.c b/fs/cifs/link.c index fe2f6a93c49e..306769de2fb5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -524,10 +524,6 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d", rc); } else { - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_instantiate(direntry, newinode); } } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 76b1b37c9e6b..7f25cc3d2256 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -102,11 +102,6 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, return NULL; } - if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase) - d_set_d_op(dentry, &cifs_ci_dentry_ops); - else - d_set_d_op(dentry, &cifs_dentry_ops); - alias = d_materialise_unique(dentry, inode); if (alias != NULL) { dput(dentry); diff --git a/fs/coda/cache.c b/fs/coda/cache.c index 5525e1c660fd..690157876184 100644 --- a/fs/coda/cache.c +++ b/fs/coda/cache.c @@ -20,10 +20,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" static atomic_t permission_epoch = ATOMIC_INIT(0); diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c index 602240569c89..6475877b0763 100644 --- a/fs/coda/cnode.c +++ b/fs/coda/cnode.c @@ -7,9 +7,8 @@ #include #include -#include -#include #include +#include "coda_linux.h" static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2) { diff --git a/include/linux/coda_cache.h b/fs/coda/coda_cache.h similarity index 100% rename from include/linux/coda_cache.h rename to fs/coda/coda_cache.h diff --git a/include/linux/coda_fs_i.h b/fs/coda/coda_fs_i.h similarity index 100% rename from include/linux/coda_fs_i.h rename to fs/coda/coda_fs_i.h diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c index bf4a3fd3c8e3..2bdbcc11b373 100644 --- a/fs/coda/coda_linux.c +++ b/fs/coda/coda_linux.c @@ -17,9 +17,8 @@ #include #include -#include #include -#include +#include "coda_linux.h" /* initialize the debugging variables */ int coda_fake_statfs; diff --git a/include/linux/coda_linux.h b/fs/coda/coda_linux.h similarity index 97% rename from include/linux/coda_linux.h rename to fs/coda/coda_linux.h index 4ccc59c1ea82..9b0c5323890b 100644 --- a/include/linux/coda_linux.h +++ b/fs/coda/coda_linux.h @@ -20,13 +20,15 @@ #include #include #include -#include +#include "coda_fs_i.h" /* operations */ extern const struct inode_operations coda_dir_inode_operations; extern const struct inode_operations coda_file_inode_operations; extern const struct inode_operations coda_ioctl_inode_operations; +extern const struct dentry_operations coda_dentry_operations; + extern const struct address_space_operations coda_file_aops; extern const struct address_space_operations coda_symlink_aops; diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 29badd91360f..2b8dae4d121e 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -23,10 +23,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" #include "coda_int.h" @@ -61,7 +60,7 @@ static int coda_return_EIO(void) } #define CODA_EIO_ERROR ((void *) (coda_return_EIO)) -static const struct dentry_operations coda_dentry_operations = +const struct dentry_operations coda_dentry_operations = { .d_revalidate = coda_dentry_revalidate, .d_delete = coda_dentry_delete, @@ -126,8 +125,6 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struc return ERR_PTR(error); exit: - d_set_d_op(entry, &coda_dentry_operations); - if (inode && (type & CODA_NOCACHE)) coda_flag_inode(inode, C_VATTR | C_PURGE); diff --git a/fs/coda/file.c b/fs/coda/file.c index c8b50ba4366a..0433057be330 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -21,10 +21,9 @@ #include #include -#include -#include #include +#include "coda_linux.h" #include "coda_int.h" static ssize_t diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 50dc7d189f56..871b27715465 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -28,10 +28,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" #include "coda_int.h" @@ -45,7 +44,7 @@ static struct kmem_cache * coda_inode_cachep; static struct inode *coda_alloc_inode(struct super_block *sb) { struct coda_inode_info *ei; - ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL); if (!ei) return NULL; memset(&ei->c_fid, 0, sizeof(struct CodaFid)); @@ -193,6 +192,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) sb->s_blocksize_bits = 12; sb->s_magic = CODA_SUPER_MAGIC; sb->s_op = &coda_super_operations; + sb->s_d_op = &coda_dentry_operations; sb->s_bdi = &vc->bdi; /* get root fid from Venus: this needs the root inode */ diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index 741f0bd03918..6cbb3afb36dc 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c @@ -19,10 +19,10 @@ #include #include -#include -#include #include +#include "coda_linux.h" + /* pioctl ops */ static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags); static long coda_pioctl(struct file *filp, unsigned int cmd, diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 62647a8595e4..8f616e0e252c 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c @@ -43,10 +43,10 @@ #include #include -#include -#include #include +#include "coda_linux.h" + #include "coda_int.h" /* statistics */ diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c index af78f007a2b0..ab94ef63caef 100644 --- a/fs/coda/symlink.c +++ b/fs/coda/symlink.c @@ -16,9 +16,9 @@ #include #include -#include #include -#include + +#include "coda_linux.h" static int coda_symlink_filler(struct file *file, struct page *page) { diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index c3563cab9758..9727e0c52579 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -33,10 +33,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" #include "coda_int.h" diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index 026cf68553a4..82bda8fdfc1c 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -90,6 +90,7 @@ extern const struct file_operations configfs_file_operations; extern const struct file_operations bin_fops; extern const struct inode_operations configfs_dir_inode_operations; extern const struct inode_operations configfs_symlink_inode_operations; +extern const struct dentry_operations configfs_dentry_ops; extern int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname); diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 36637a8c1ed3..90ff3cb10de3 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -72,7 +72,7 @@ static int configfs_d_delete(const struct dentry *dentry) return 1; } -static const struct dentry_operations configfs_dentry_ops = { +const struct dentry_operations configfs_dentry_ops = { .d_iput = configfs_d_iput, /* simple_delete_dentry() isn't exported */ .d_delete = configfs_d_delete, @@ -442,7 +442,6 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den return error; } - d_set_d_op(dentry, &configfs_dentry_ops); d_rehash(dentry); return 0; @@ -489,7 +488,6 @@ static struct dentry * configfs_lookup(struct inode *dir, */ if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); - d_set_d_op(dentry, &configfs_dentry_ops); d_add(dentry, NULL); return NULL; } @@ -683,7 +681,6 @@ static int create_default_group(struct config_group *parent_group, ret = -ENOMEM; child = d_alloc(parent, &name); if (child) { - d_set_d_op(child, &configfs_dentry_ops); d_add(child, NULL); ret = configfs_attach_group(&parent_group->cg_item, @@ -1681,7 +1678,6 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) err = -ENOMEM; dentry = d_alloc(configfs_sb->s_root, &name); if (dentry) { - d_set_d_op(dentry, &configfs_dentry_ops); d_add(dentry, NULL); err = configfs_attach_group(sd->s_element, &group->cg_item, diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index 7d3607febe1c..ecc62178beda 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -101,6 +101,7 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent) configfs_root_group.cg_item.ci_dentry = root; root->d_fsdata = &configfs_root; sb->s_root = root; + sb->s_d_op = &configfs_dentry_ops; /* the rest get that */ return 0; } diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 32fd5fe9ca0e..e141939080f0 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -34,57 +34,81 @@ static const struct address_space_operations cramfs_aops; static DEFINE_MUTEX(read_mutex); -/* These two macros may change in future, to provide better st_ino - semantics. */ -#define CRAMINO(x) (((x)->offset && (x)->size)?(x)->offset<<2:1) +/* These macros may change in future, to provide better st_ino semantics. */ #define OFFSET(x) ((x)->i_ino) -static void setup_inode(struct inode *inode, struct cramfs_inode * cramfs_inode) +static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset) { + if (!cino->offset) + return offset + 1; + if (!cino->size) + return offset + 1; + + /* + * The file mode test fixes buggy mkcramfs implementations where + * cramfs_inode->offset is set to a non zero value for entries + * which did not contain data, like devices node and fifos. + */ + switch (cino->mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + return cino->offset << 2; + default: + break; + } + return offset + 1; +} + +static struct inode *get_cramfs_inode(struct super_block *sb, + struct cramfs_inode *cramfs_inode, unsigned int offset) +{ + struct inode *inode; static struct timespec zerotime; + + inode = iget_locked(sb, cramino(cramfs_inode, offset)); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + + switch (cramfs_inode->mode & S_IFMT) { + case S_IFREG: + inode->i_fop = &generic_ro_fops; + inode->i_data.a_ops = &cramfs_aops; + break; + case S_IFDIR: + inode->i_op = &cramfs_dir_inode_operations; + inode->i_fop = &cramfs_directory_operations; + break; + case S_IFLNK: + inode->i_op = &page_symlink_inode_operations; + inode->i_data.a_ops = &cramfs_aops; + break; + default: + init_special_inode(inode, cramfs_inode->mode, + old_decode_dev(cramfs_inode->size)); + } + inode->i_mode = cramfs_inode->mode; inode->i_uid = cramfs_inode->uid; - inode->i_size = cramfs_inode->size; - inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; inode->i_gid = cramfs_inode->gid; + + /* if the lower 2 bits are zero, the inode contains data */ + if (!(inode->i_ino & 3)) { + inode->i_size = cramfs_inode->size; + inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; + } + /* Struct copy intentional */ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; /* inode->i_nlink is left 1 - arguably wrong for directories, but it's the best we can do without reading the directory contents. 1 yields the right result in GNU find, even without -noleaf option. */ - if (S_ISREG(inode->i_mode)) { - inode->i_fop = &generic_ro_fops; - inode->i_data.a_ops = &cramfs_aops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &cramfs_dir_inode_operations; - inode->i_fop = &cramfs_directory_operations; - } else if (S_ISLNK(inode->i_mode)) { - inode->i_op = &page_symlink_inode_operations; - inode->i_data.a_ops = &cramfs_aops; - } else { - init_special_inode(inode, inode->i_mode, - old_decode_dev(cramfs_inode->size)); - } -} -static struct inode *get_cramfs_inode(struct super_block *sb, - struct cramfs_inode * cramfs_inode) -{ - struct inode *inode; - if (CRAMINO(cramfs_inode) == 1) { - inode = new_inode(sb); - if (inode) { - inode->i_ino = 1; - setup_inode(inode, cramfs_inode); - } - } else { - inode = iget_locked(sb, CRAMINO(cramfs_inode)); - if (inode && (inode->i_state & I_NEW)) { - setup_inode(inode, cramfs_inode); - unlock_new_inode(inode); - } - } + unlock_new_inode(inode); + return inode; } @@ -265,6 +289,9 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) printk(KERN_ERR "cramfs: root is not a directory\n"); goto out; } + /* correct strange, hard-coded permissions of mkcramfs */ + super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); + root_offset = super.root.offset << 2; if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) { sbi->size=super.size; @@ -289,7 +316,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) /* Set it all up.. */ sb->s_op = &cramfs_ops; - root = get_cramfs_inode(sb, &super.root); + root = get_cramfs_inode(sb, &super.root, 0); if (!root) goto out; sb->s_root = d_alloc_root(root); @@ -365,7 +392,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) */ namelen = de->namelen << 2; memcpy(buf, name, namelen); - ino = CRAMINO(de); + ino = cramino(de, OFFSET(inode) + offset); mode = de->mode; mutex_unlock(&read_mutex); nextoffset = offset + sizeof(*de) + namelen; @@ -404,8 +431,9 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s struct cramfs_inode *de; char *name; int namelen, retval; + int dir_off = OFFSET(dir) + offset; - de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); + de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN); name = (char *)(de+1); /* Try to take advantage of sorted directories */ @@ -436,7 +464,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s if (!retval) { struct cramfs_inode entry = *de; mutex_unlock(&read_mutex); - d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); + d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off)); return NULL; } /* else (retval < 0) */ diff --git a/fs/dcache.c b/fs/dcache.c index 5699d4c027cb..274a22250380 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1320,6 +1320,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) __dget_dlock(parent); dentry->d_parent = parent; dentry->d_sb = parent->d_sb; + d_set_d_op(dentry, dentry->d_sb->s_d_op); list_add(&dentry->d_u.d_child, &parent->d_subdirs); spin_unlock(&parent->d_lock); } @@ -1335,6 +1336,7 @@ struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) struct dentry *dentry = d_alloc(NULL, name); if (dentry) { dentry->d_sb = sb; + d_set_d_op(dentry, dentry->d_sb->s_d_op); dentry->d_parent = dentry; dentry->d_flags |= DCACHE_DISCONNECTED; } @@ -1355,8 +1357,8 @@ EXPORT_SYMBOL(d_alloc_name); void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) { - BUG_ON(dentry->d_op); - BUG_ON(dentry->d_flags & (DCACHE_OP_HASH | + WARN_ON_ONCE(dentry->d_op); + WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE )); @@ -1507,6 +1509,7 @@ struct dentry * d_alloc_root(struct inode * root_inode) res = d_alloc(NULL, &name); if (res) { res->d_sb = root_inode->i_sb; + d_set_d_op(res, res->d_sb->s_d_op); res->d_parent = res; d_instantiate(res, root_inode); } @@ -1567,6 +1570,7 @@ struct dentry *d_obtain_alias(struct inode *inode) /* attach a disconnected dentry */ spin_lock(&tmp->d_lock); tmp->d_sb = inode->i_sb; + d_set_d_op(tmp, tmp->d_sb->s_d_op); tmp->d_inode = inode; tmp->d_flags |= DCACHE_DISCONNECTED; list_add(&tmp->d_alias, &inode->i_dentry); @@ -1966,7 +1970,7 @@ out: /** * d_validate - verify dentry provided from insecure source (deprecated) * @dentry: The dentry alleged to be valid child of @dparent - * @dparent: The parent dentry (known to be valid) + * @parent: The parent dentry (known to be valid) * * An insecure source has sent us a dentry, here we verify it and dget() it. * This is used by ncpfs in its readdir implementation. @@ -2449,8 +2453,7 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) } /** - * Prepend path string to a buffer - * + * prepend_path - Prepend path string to a buffer * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry (may be modified by this function) * @buffer: pointer to the end of the buffer diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 337352a94751..64ff02330752 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -441,7 +441,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, struct qstr lower_name; int rc = 0; - d_set_d_op(ecryptfs_dentry, &ecryptfs_dops); if ((ecryptfs_dentry->d_name.len == 1 && !strcmp(ecryptfs_dentry->d_name.name, ".")) || (ecryptfs_dentry->d_name.len == 2 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 351038675376..d3b28abdd6aa 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -141,25 +141,12 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) return rc; } -/** - * ecryptfs_interpose - * @lower_dentry: Existing dentry in the lower filesystem - * @dentry: ecryptfs' dentry - * @sb: ecryptfs's super_block - * @flags: flags to govern behavior of interpose procedure - * - * Interposes upper and lower dentries. - * - * Returns zero on success; non-zero otherwise - */ -int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, - struct super_block *sb, u32 flags) +static struct inode *ecryptfs_get_inode(struct inode *lower_inode, + struct super_block *sb) { - struct inode *lower_inode; struct inode *inode; int rc = 0; - lower_inode = lower_dentry->d_inode; if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb)) { rc = -EXDEV; goto out; @@ -189,17 +176,38 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, if (special_file(lower_inode->i_mode)) init_special_inode(inode, lower_inode->i_mode, lower_inode->i_rdev); - d_set_d_op(dentry, &ecryptfs_dops); fsstack_copy_attr_all(inode, lower_inode); /* This size will be overwritten for real files w/ headers and * other metadata */ fsstack_copy_inode_size(inode, lower_inode); + return inode; +out: + return ERR_PTR(rc); +} + +/** + * ecryptfs_interpose + * @lower_dentry: Existing dentry in the lower filesystem + * @dentry: ecryptfs' dentry + * @sb: ecryptfs's super_block + * @flags: flags to govern behavior of interpose procedure + * + * Interposes upper and lower dentries. + * + * Returns zero on success; non-zero otherwise + */ +int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, + struct super_block *sb, u32 flags) +{ + struct inode *lower_inode = lower_dentry->d_inode; + struct inode *inode = ecryptfs_get_inode(lower_inode, sb); + if (IS_ERR(inode)) + return PTR_ERR(inode); if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD) d_add(dentry, inode); else d_instantiate(dentry, inode); -out: - return rc; + return 0; } enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, @@ -491,60 +499,12 @@ out: struct kmem_cache *ecryptfs_sb_info_cache; static struct file_system_type ecryptfs_fs_type; -/** - * ecryptfs_read_super - * @sb: The ecryptfs super block - * @dev_name: The path to mount over - * - * Read the super block of the lower filesystem, and use - * ecryptfs_interpose to create our initial inode and super block - * struct. - */ -static int ecryptfs_read_super(struct super_block *sb, const char *dev_name) -{ - struct path path; - int rc; - - rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); - if (rc) { - ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); - goto out; - } - if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { - rc = -EINVAL; - printk(KERN_ERR "Mount on filesystem of type " - "eCryptfs explicitly disallowed due to " - "known incompatibilities\n"); - goto out_free; - } - ecryptfs_set_superblock_lower(sb, path.dentry->d_sb); - sb->s_maxbytes = path.dentry->d_sb->s_maxbytes; - sb->s_blocksize = path.dentry->d_sb->s_blocksize; - ecryptfs_set_dentry_lower(sb->s_root, path.dentry); - ecryptfs_set_dentry_lower_mnt(sb->s_root, path.mnt); - rc = ecryptfs_interpose(path.dentry, sb->s_root, sb, 0); - if (rc) - goto out_free; - rc = 0; - goto out; -out_free: - path_put(&path); -out: - return rc; -} - /** * ecryptfs_get_sb * @fs_type * @flags * @dev_name: The path to mount over * @raw_data: The options passed into the kernel - * - * The whole ecryptfs_get_sb process is broken into 3 functions: - * ecryptfs_parse_options(): handle options passed to ecryptfs, if any - * ecryptfs_read_super(): this accesses the lower filesystem and uses - * ecryptfs_interpose to perform most of the linking - * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c) */ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) @@ -553,6 +513,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags struct ecryptfs_sb_info *sbi; struct ecryptfs_dentry_info *root_info; const char *err = "Getting sb failed"; + struct inode *inode; + struct path path; int rc; sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); @@ -575,10 +537,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags s->s_flags = flags; rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); - if (rc) { - deactivate_locked_super(s); - goto out; - } + if (rc) + goto out1; ecryptfs_set_superblock_private(s, sbi); s->s_bdi = &sbi->bdi; @@ -586,34 +546,54 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags /* ->kill_sb() will take care of sbi after that point */ sbi = NULL; s->s_op = &ecryptfs_sops; + s->s_d_op = &ecryptfs_dops; - rc = -ENOMEM; - s->s_root = d_alloc(NULL, &(const struct qstr) { - .hash = 0,.name = "/",.len = 1}); + err = "Reading sb failed"; + rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); + if (rc) { + ecryptfs_printk(KERN_WARNING, "kern_path() failed\n"); + goto out1; + } + if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { + rc = -EINVAL; + printk(KERN_ERR "Mount on filesystem of type " + "eCryptfs explicitly disallowed due to " + "known incompatibilities\n"); + goto out_free; + } + ecryptfs_set_superblock_lower(s, path.dentry->d_sb); + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; + s->s_blocksize = path.dentry->d_sb->s_blocksize; + + inode = ecryptfs_get_inode(path.dentry->d_inode, s); + rc = PTR_ERR(inode); + if (IS_ERR(inode)) + goto out_free; + + s->s_root = d_alloc_root(inode); if (!s->s_root) { - deactivate_locked_super(s); - goto out; + iput(inode); + rc = -ENOMEM; + goto out_free; } - d_set_d_op(s->s_root, &ecryptfs_dops); - s->s_root->d_sb = s; - s->s_root->d_parent = s->s_root; + rc = -ENOMEM; root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL); - if (!root_info) { - deactivate_locked_super(s); - goto out; - } + if (!root_info) + goto out_free; + /* ->kill_sb() will take care of root_info */ ecryptfs_set_dentry_private(s->s_root, root_info); + ecryptfs_set_dentry_lower(s->s_root, path.dentry); + ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt); + s->s_flags |= MS_ACTIVE; - rc = ecryptfs_read_super(s, dev_name); - if (rc) { - deactivate_locked_super(s); - err = "Reading sb failed"; - goto out; - } return dget(s->s_root); +out_free: + path_put(&path); +out1: + deactivate_locked_super(s); out: if (sbi) { ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8cf07242067d..cc8a9b7d6064 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -217,7 +217,7 @@ struct ep_send_events_data { * Configuration options available inside /proc/sys/fs/epoll/ */ /* Maximum number of epoll watched descriptors, per user */ -static int max_user_watches __read_mostly; +static long max_user_watches __read_mostly; /* * This mutex is used to serialize ep_free() and eventpoll_release_file(). @@ -240,16 +240,18 @@ static struct kmem_cache *pwq_cache __read_mostly; #include -static int zero; +static long zero; +static long long_max = LONG_MAX; ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, - .maxlen = sizeof(int), + .maxlen = sizeof(max_user_watches), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &zero, + .extra2 = &long_max, }, { } }; @@ -561,7 +563,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) /* At this point it is safe to free the eventpoll item */ kmem_cache_free(epi_cache, epi); - atomic_dec(&ep->user->epoll_watches); + atomic_long_dec(&ep->user->epoll_watches); return 0; } @@ -898,11 +900,12 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, { int error, revents, pwake = 0; unsigned long flags; + long user_watches; struct epitem *epi; struct ep_pqueue epq; - if (unlikely(atomic_read(&ep->user->epoll_watches) >= - max_user_watches)) + user_watches = atomic_long_read(&ep->user->epoll_watches); + if (unlikely(user_watches >= max_user_watches)) return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; @@ -966,7 +969,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, spin_unlock_irqrestore(&ep->lock, flags); - atomic_inc(&ep->user->epoll_watches); + atomic_long_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) @@ -1426,6 +1429,7 @@ static int __init eventpoll_init(void) */ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; + BUG_ON(max_user_watches < 0); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index b7d0554631e4..7aa767d4f06f 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -364,7 +364,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; @@ -381,8 +381,7 @@ fail: */ static int ext3_blkdev_put(struct block_device *bdev) { - bd_release(bdev); - return blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext3_blkdev_remove(struct ext3_sb_info *sbi) @@ -2162,13 +2161,6 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, if (bdev == NULL) return NULL; - if (bd_claim(bdev, sb)) { - ext3_msg(sb, KERN_ERR, - "error: failed to claim external journal device"); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); - return NULL; - } - blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index bab2387fba43..1de65f572033 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -561,7 +561,7 @@ struct ext4_new_group_data { #define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION #endif -/* Max physical block we can addres w/o extents */ +/* Max physical block we can address w/o extents */ #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF /* diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e910720e8bb8..c4068f6abf03 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2845,14 +2845,14 @@ fix_extent_len: * to an uninitialized extent. * * Writing to an uninitized extent may result in splitting the uninitialized - * extent into multiple /intialized unintialized extents (up to three) + * extent into multiple /initialized uninitialized extents (up to three) * There are three possibilities: * a> There is no split required: Entire extent should be uninitialized * b> Splits in two extents: Write is happening at either end of the extent * c> Splits in three extents: Somone is writing in middle of the extent * * One of more index blocks maybe needed if the extent tree grow after - * the unintialized extent split. To prevent ENOSPC occur at the IO + * the uninitialized extent split. To prevent ENOSPC occur at the IO * complete, we need to split the uninitialized extent before DIO submit * the IO. The uninitialized extent called at this time will be split * into three uninitialized extent(at most). After IO complete, the part @@ -3644,6 +3644,10 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + return -EOPNOTSUPP; + /* * currently supporting (pre)allocate mode for extent-based * files _only_ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e80fc513eacc..9f7f9e49914f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -3379,7 +3380,7 @@ int ext4_alloc_da_blocks(struct inode *inode) * doing I/O at all. * * We could call write_cache_pages(), and then redirty all of - * the pages by calling redirty_page_for_writeback() but that + * the pages by calling redirty_page_for_writepage() but that * would be ugly in the extreme. So instead we would need to * replicate parts of the code in the above functions, * simplifying them becuase we wouldn't actually intend to @@ -3737,7 +3738,7 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) retry: io_end = ext4_init_io_end(inode, GFP_ATOMIC); if (!io_end) { - pr_warning_ratelimited("%s: allocation fail\n", __func__); + pr_warn_ratelimited("%s: allocation fail\n", __func__); schedule(); goto retry; } @@ -3761,9 +3762,9 @@ retry: * preallocated extents, and those write extend the file, no need to * fall back to buffered IO. * - * For holes, we fallocate those blocks, mark them as unintialized + * For holes, we fallocate those blocks, mark them as uninitialized * If those blocks were preallocated, we mark sure they are splited, but - * still keep the range to write as unintialized. + * still keep the range to write as uninitialized. * * The unwrritten extents will be converted to written when DIO is completed. * For async direct IO, since the IO may still pending when return, we diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 29c80f6d8b27..cb10a06775e4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -657,7 +657,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; @@ -673,8 +673,7 @@ fail: */ static int ext4_blkdev_put(struct block_device *bdev) { - bd_release(bdev); - return blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext4_blkdev_remove(struct ext4_sb_info *sbi) @@ -3778,13 +3777,6 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, if (bdev == NULL) return NULL; - if (bd_claim(bdev, sb)) { - ext4_msg(sb, KERN_ERR, - "failed to claim external journal device"); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); - return NULL; - } - blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { diff --git a/fs/fat/fat.h b/fs/fat/fat.h index d75a77f85c28..f50408901f7e 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -319,7 +319,8 @@ extern struct inode *fat_build_inode(struct super_block *sb, struct msdos_dir_entry *de, loff_t i_pos); extern int fat_sync_inode(struct inode *inode); extern int fat_fill_super(struct super_block *sb, void *data, int silent, - const struct inode_operations *fs_dir_inode_ops, int isvfat); + const struct inode_operations *fs_dir_inode_ops, + int isvfat, void (*setup)(struct super_block *)); extern int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2); diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 206351af7c58..86753fe10bd1 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -703,7 +703,6 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct inode *inode = NULL; - struct dentry *result; u32 *fh = fid->raw; if (fh_len < 5 || fh_type != 3) @@ -748,10 +747,7 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb, * the fat_iget lookup again. If that fails, then we are totally out * of luck. But all that is for another day */ - result = d_obtain_alias(inode); - if (!IS_ERR(result)) - d_set_d_op(result, sb->s_root->d_op); - return result; + return d_obtain_alias(inode); } static int @@ -799,8 +795,6 @@ static struct dentry *fat_get_parent(struct dentry *child) brelse(bh); parent = d_obtain_alias(inode); - if (!IS_ERR(parent)) - d_set_d_op(parent, sb->s_root->d_op); out: unlock_super(sb); @@ -1244,7 +1238,8 @@ static int fat_read_root(struct inode *inode) * Read the super block of an MS-DOS FS. */ int fat_fill_super(struct super_block *sb, void *data, int silent, - const struct inode_operations *fs_dir_inode_ops, int isvfat) + const struct inode_operations *fs_dir_inode_ops, int isvfat, + void (*setup)(struct super_block *)) { struct inode *root_inode = NULL, *fat_inode = NULL; struct buffer_head *bh; @@ -1280,6 +1275,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, if (error) goto out_fail; + setup(sb); /* flavour-specific stuff that needs options */ + error = -EIO; sb_min_blocksize(sb, 512); bh = sb_bread(sb, 0); diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 35ffe43afa4b..711499040eb6 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -227,11 +227,7 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry, } out: unlock_super(sb); - d_set_d_op(dentry, &msdos_dentry_operations); - dentry = d_splice_alias(inode, dentry); - if (dentry) - d_set_d_op(dentry, &msdos_dentry_operations); - return dentry; + return d_splice_alias(inode, dentry); error: unlock_super(sb); @@ -661,21 +657,16 @@ static const struct inode_operations msdos_dir_inode_operations = { .getattr = fat_getattr, }; -static int msdos_fill_super(struct super_block *sb, void *data, int silent) +static void setup(struct super_block *sb) { - int res; - - lock_super(sb); - res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0); - if (res) { - unlock_super(sb); - return res; - } - + sb->s_d_op = &msdos_dentry_operations; sb->s_flags |= MS_NOATIME; - d_set_d_op(sb->s_root, &msdos_dentry_operations); - unlock_super(sb); - return 0; +} + +static int msdos_fill_super(struct super_block *sb, void *data, int silent) +{ + return fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, + 0, setup); } static struct dentry *msdos_mount(struct file_system_type *fs_type, diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index e3ffc5e12332..f88f752babd9 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -772,13 +772,10 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, out: unlock_super(sb); - d_set_d_op(dentry, sb->s_root->d_op); dentry->d_time = dentry->d_parent->d_inode->i_version; dentry = d_splice_alias(inode, dentry); - if (dentry) { - d_set_d_op(dentry, sb->s_root->d_op); + if (dentry) dentry->d_time = dentry->d_parent->d_inode->i_version; - } return dentry; error: @@ -1066,24 +1063,18 @@ static const struct inode_operations vfat_dir_inode_operations = { .getattr = fat_getattr, }; -static int vfat_fill_super(struct super_block *sb, void *data, int silent) +static void setup(struct super_block *sb) { - int res; - - lock_super(sb); - res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1); - if (res) { - unlock_super(sb); - return res; - } - if (MSDOS_SB(sb)->options.name_check != 's') - d_set_d_op(sb->s_root, &vfat_ci_dentry_ops); + sb->s_d_op = &vfat_ci_dentry_ops; else - d_set_d_op(sb->s_root, &vfat_dentry_ops); + sb->s_d_op = &vfat_dentry_ops; +} - unlock_super(sb); - return 0; +static int vfat_fill_super(struct super_block *sb, void *data, int silent) +{ + return fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, + 1, setup); } static struct dentry *vfat_mount(struct file_system_type *fs_type, diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 3d06ccc953aa..59c6e4956786 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -84,13 +84,9 @@ static inline struct inode *wb_inode(struct list_head *head) return list_entry(head, struct inode, i_wb_list); } -static void bdi_queue_work(struct backing_dev_info *bdi, - struct wb_writeback_work *work) +/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ +static void bdi_wakeup_flusher(struct backing_dev_info *bdi) { - trace_writeback_queue(bdi, work); - - spin_lock_bh(&bdi->wb_lock); - list_add_tail(&work->list, &bdi->work_list); if (bdi->wb.task) { wake_up_process(bdi->wb.task); } else { @@ -98,15 +94,26 @@ static void bdi_queue_work(struct backing_dev_info *bdi, * The bdi thread isn't there, wake up the forker thread which * will create and run it. */ - trace_writeback_nothread(bdi, work); wake_up_process(default_backing_dev_info.wb.task); } +} + +static void bdi_queue_work(struct backing_dev_info *bdi, + struct wb_writeback_work *work) +{ + trace_writeback_queue(bdi, work); + + spin_lock_bh(&bdi->wb_lock); + list_add_tail(&work->list, &bdi->work_list); + if (!bdi->wb.task) + trace_writeback_nothread(bdi, work); + bdi_wakeup_flusher(bdi); spin_unlock_bh(&bdi->wb_lock); } static void __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, - bool range_cyclic, bool for_background) + bool range_cyclic) { struct wb_writeback_work *work; @@ -126,7 +133,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, work->sync_mode = WB_SYNC_NONE; work->nr_pages = nr_pages; work->range_cyclic = range_cyclic; - work->for_background = for_background; bdi_queue_work(bdi, work); } @@ -144,7 +150,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, */ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) { - __bdi_start_writeback(bdi, nr_pages, true, false); + __bdi_start_writeback(bdi, nr_pages, true); } /** @@ -152,13 +158,21 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) * @bdi: the backing device to write from * * Description: - * This does WB_SYNC_NONE background writeback. The IO is only - * started when this function returns, we make no guarentees on - * completion. Caller need not hold sb s_umount semaphore. + * This makes sure WB_SYNC_NONE background writeback happens. When + * this function returns, it is only guaranteed that for given BDI + * some IO is happening if we are over background dirty threshold. + * Caller need not hold sb s_umount semaphore. */ void bdi_start_background_writeback(struct backing_dev_info *bdi) { - __bdi_start_writeback(bdi, LONG_MAX, true, true); + /* + * We just wake up the flusher thread. It will perform background + * writeback as soon as there is no other work to do. + */ + trace_writeback_wake_background(bdi); + spin_lock_bh(&bdi->wb_lock); + bdi_wakeup_flusher(bdi); + spin_unlock_bh(&bdi->wb_lock); } /* @@ -616,6 +630,7 @@ static long wb_writeback(struct bdi_writeback *wb, }; unsigned long oldest_jif; long wrote = 0; + long write_chunk; struct inode *inode; if (wbc.for_kupdate) { @@ -628,6 +643,24 @@ static long wb_writeback(struct bdi_writeback *wb, wbc.range_end = LLONG_MAX; } + /* + * WB_SYNC_ALL mode does livelock avoidance by syncing dirty + * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX + * here avoids calling into writeback_inodes_wb() more than once. + * + * The intended call sequence for WB_SYNC_ALL writeback is: + * + * wb_writeback() + * __writeback_inodes_sb() <== called only once + * write_cache_pages() <== called once for each inode + * (quickly) tag currently dirty pages + * (maybe slowly) sync all tagged pages + */ + if (wbc.sync_mode == WB_SYNC_NONE) + write_chunk = MAX_WRITEBACK_PAGES; + else + write_chunk = LONG_MAX; + wbc.wb_start = jiffies; /* livelock avoidance */ for (;;) { /* @@ -636,6 +669,16 @@ static long wb_writeback(struct bdi_writeback *wb, if (work->nr_pages <= 0) break; + /* + * Background writeout and kupdate-style writeback may + * run forever. Stop them if there is other work to do + * so that e.g. sync can proceed. They'll be restarted + * after the other works are all done. + */ + if ((work->for_background || work->for_kupdate) && + !list_empty(&wb->bdi->work_list)) + break; + /* * For background writeout, stop when we are below the * background dirty threshold @@ -644,7 +687,7 @@ static long wb_writeback(struct bdi_writeback *wb, break; wbc.more_io = 0; - wbc.nr_to_write = MAX_WRITEBACK_PAGES; + wbc.nr_to_write = write_chunk; wbc.pages_skipped = 0; trace_wbc_writeback_start(&wbc, wb->bdi); @@ -654,8 +697,8 @@ static long wb_writeback(struct bdi_writeback *wb, writeback_inodes_wb(wb, &wbc); trace_wbc_writeback_written(&wbc, wb->bdi); - work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; - wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; + work->nr_pages -= write_chunk - wbc.nr_to_write; + wrote += write_chunk - wbc.nr_to_write; /* * If we consumed everything, see if we have more @@ -670,7 +713,7 @@ static long wb_writeback(struct bdi_writeback *wb, /* * Did we write something? Try for more */ - if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) + if (wbc.nr_to_write < write_chunk) continue; /* * Nothing written. Wait for some inode to @@ -718,6 +761,23 @@ static unsigned long get_nr_dirty_pages(void) get_nr_dirty_inodes(); } +static long wb_check_background_flush(struct bdi_writeback *wb) +{ + if (over_bground_thresh()) { + + struct wb_writeback_work work = { + .nr_pages = LONG_MAX, + .sync_mode = WB_SYNC_NONE, + .for_background = 1, + .range_cyclic = 1, + }; + + return wb_writeback(wb, &work); + } + + return 0; +} + static long wb_check_old_data_flush(struct bdi_writeback *wb) { unsigned long expired; @@ -787,6 +847,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) * Check for periodic writeback, kupdated() style */ wrote += wb_check_old_data_flush(wb); + wrote += wb_check_background_flush(wb); clear_bit(BDI_writeback_running, &wb->bdi->state); return wrote; @@ -873,7 +934,7 @@ void wakeup_flusher_threads(long nr_pages) list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { if (!bdi_has_dirty_io(bdi)) continue; - __bdi_start_writeback(bdi, nr_pages, false, false); + __bdi_start_writeback(bdi, nr_pages, false); } rcu_read_unlock(); } @@ -1164,7 +1225,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle); * @sb: the superblock * * This function writes and waits on any dirty inode belonging to this - * super_block. The number of pages synced is returned. + * super_block. */ void sync_inodes_sb(struct super_block *sb) { @@ -1242,11 +1303,11 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc) EXPORT_SYMBOL(sync_inode); /** - * sync_inode - write an inode to disk + * sync_inode_metadata - write an inode to disk * @inode: the inode to sync * @wait: wait for I/O to complete. * - * Write an inode to disk and adjust it's dirty state after completion. + * Write an inode to disk and adjust its dirty state after completion. * * Note: only writes the actual inode, no associated data or other metadata. */ diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index b9f34eaede09..48a18f184d50 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -101,7 +101,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, object->n_ops++; object->n_exclusive++; /* reads and writes must wait */ - if (object->n_ops > 0) { + if (object->n_ops > 1) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 042af7346ec1..bfed8447ed80 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -350,7 +350,6 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, } entry = newent ? newent : entry; - d_set_d_op(entry, &fuse_dentry_operations); if (outarg_valid) fuse_change_entry_timeout(entry, &outarg); else diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index f62b32cffea9..9e3f68cc1bd1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -617,10 +617,8 @@ static struct dentry *fuse_get_dentry(struct super_block *sb, goto out_iput; entry = d_obtain_alias(inode); - if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) { - d_set_d_op(entry, &fuse_dentry_operations); + if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(entry); - } return entry; @@ -719,10 +717,8 @@ static struct dentry *fuse_get_parent(struct dentry *child) } parent = d_obtain_alias(inode); - if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) { - d_set_d_op(parent, &fuse_dentry_operations); + if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(parent); - } return parent; } @@ -989,6 +985,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) iput(root); goto err_put_conn; } + /* only now - we want root dentry with NULL ->d_op */ + sb->s_d_op = &fuse_dentry_operations; init_req = fuse_request_alloc(); if (!init_req) diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index 97012ecff560..9023db8184f9 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -126,12 +126,7 @@ static int gfs2_get_name(struct dentry *parent, char *name, static struct dentry *gfs2_get_parent(struct dentry *child) { - struct dentry *dentry; - - dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &gfs2_dops); - return dentry; + return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); } static struct dentry *gfs2_get_dentry(struct super_block *sb, @@ -139,7 +134,6 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, { struct gfs2_sbd *sdp = sb->s_fs_info; struct inode *inode; - struct dentry *dentry; inode = gfs2_ilookup(sb, inum->no_addr); if (inode) { @@ -156,10 +150,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, return ERR_CAST(inode); out_inode: - dentry = d_obtain_alias(inode); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &gfs2_dops); - return dentry; + return d_obtain_alias(inode); } static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 2aeabd4218cc..777927ce6f79 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -440,7 +440,6 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr, iput(inode); return -ENOMEM; } - d_set_d_op(dentry, &gfs2_dops); *dptr = dentry; return 0; } @@ -1106,6 +1105,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; + sb->s_d_op = &gfs2_dops; sb->s_export_op = &gfs2_export_ops; sb->s_xattr = gfs2_xattr_handlers; sb->s_qcop = &gfs2_quotactl_ops; @@ -1268,7 +1268,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, { struct block_device *bdev; struct super_block *s; - fmode_t mode = FMODE_READ; + fmode_t mode = FMODE_READ | FMODE_EXCL; int error; struct gfs2_args args; struct gfs2_sbd *sdp; @@ -1276,7 +1276,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; - bdev = open_bdev_exclusive(dev_name, mode, fs_type); + bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(bdev)) return ERR_CAST(bdev); @@ -1298,7 +1298,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, goto error_bdev; if (s->s_root) - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode); memset(&args, 0, sizeof(args)); args.ar_quota = GFS2_QUOTA_DEFAULT; @@ -1342,7 +1342,7 @@ error_super: deactivate_locked_super(s); return ERR_PTR(error); error_bdev: - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode); return ERR_PTR(error); } diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 1501db4f0e6d..040b5a2e6556 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -106,8 +106,6 @@ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, { struct inode *inode = NULL; - d_set_d_op(dentry, &gfs2_dops); - inode = gfs2_lookupi(dir, &dentry->d_name, 0); if (inode && IS_ERR(inode)) return ERR_CAST(inode); @@ -1427,6 +1425,10 @@ static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset, loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; next = (next + 1) << sdp->sd_sb.sb_bsize_shift; + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + return -EOPNOTSUPP; + offset = (offset >> sdp->sd_sb.sb_bsize_shift) << sdp->sd_sb.sb_bsize_shift; diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index ea4aefe7c652..afa66aaa2237 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -25,8 +25,6 @@ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry, struct inode *inode = NULL; int res; - d_set_d_op(dentry, &hfs_dentry_operations); - hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name); res = hfs_brec_read(&fd, &rec, sizeof(rec)); diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 0bef62aa4f42..1b55f704fb22 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -429,13 +429,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!root_inode) goto bail_no_root; + sb->s_d_op = &hfs_dentry_operations; res = -ENOMEM; sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto bail_iput; - d_set_d_op(sb->s_root, &hfs_dentry_operations); - /* everything's okay */ return 0; diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index f896dc843026..4df5059c25da 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -37,7 +37,6 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, sb = dir->i_sb; - d_set_d_op(dentry, &hfsplus_dentry_operations); dentry->d_fsdata = NULL; hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 6ee6ad20acf2..9a3b4795f43c 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -444,13 +444,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) err = PTR_ERR(root); goto cleanup; } + sb->s_d_op = &hfsplus_dentry_operations; sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); err = -ENOMEM; goto cleanup; } - d_set_d_op(sb->s_root, &hfsplus_dentry_operations); str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; str.name = HFSP_HIDDENDIR_NAME; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index d3244d949a4e..2638c834ed28 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -612,7 +612,6 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry, goto out_put; d_add(dentry, inode); - d_set_d_op(dentry, &hostfs_dentry_ops); return NULL; out_put: @@ -922,6 +921,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) sb->s_blocksize_bits = 10; sb->s_magic = HOSTFS_SUPER_MAGIC; sb->s_op = &hostfs_sbops; + sb->s_d_op = &hostfs_dentry_ops; sb->s_maxbytes = MAX_LFS_FILESIZE; /* NULL is printed as by sprintf: avoid that. */ diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c index 32c13a94e1e9..05d4816e4e77 100644 --- a/fs/hpfs/dentry.c +++ b/fs/hpfs/dentry.c @@ -58,12 +58,7 @@ static int hpfs_compare_dentry(const struct dentry *parent, return 0; } -static const struct dentry_operations hpfs_dentry_operations = { +const struct dentry_operations hpfs_dentry_operations = { .d_hash = hpfs_hash_dentry, .d_compare = hpfs_compare_dentry, }; - -void hpfs_set_dentry_operations(struct dentry *dentry) -{ - d_set_d_op(dentry, &hpfs_dentry_operations); -} diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 2338130cceba..d32f63a569f7 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -298,7 +298,6 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name end: end_add: - hpfs_set_dentry_operations(dentry); unlock_kernel(); d_add(dentry, result); return NULL; diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 2fee17d0d9ab..1c43dbea55e8 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -233,7 +233,7 @@ void hpfs_mark_4buffers_dirty(struct quad_buffer_head *); /* dentry.c */ -void hpfs_set_dentry_operations(struct dentry *); +extern const struct dentry_operations hpfs_dentry_operations; /* dir.c */ diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 49935ba78db8..b30426b1fc97 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -550,6 +550,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) /* Fill superblock stuff */ s->s_magic = HPFS_SUPER_MAGIC; s->s_op = &hpfs_sops; + s->s_d_op = &hpfs_dentry_operations; sbi->sb_root = superblock->root; sbi->sb_fs_size = superblock->n_sectors; @@ -651,7 +652,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) iput(root); goto bail0; } - hpfs_set_dentry_operations(s->s_root); /* * find the root directory's . pointer & finish filling in the inode diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 844a7903c72f..a0f3833c0dbf 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -939,17 +939,18 @@ root_found: goto out_iput; } - /* get the root dentry */ - s->s_root = d_alloc_root(inode); - if (!(s->s_root)) - goto out_no_root; - table = 0; if (joliet_level) table += 2; if (opt.check == 'r') table++; - d_set_d_op(s->s_root, &isofs_dentry_ops[table]); + + s->s_d_op = &isofs_dentry_ops[table]; + + /* get the root dentry */ + s->s_root = d_alloc_root(inode); + if (!(s->s_root)) + goto out_no_root; kfree(opt.iocharset); diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 679a849c3b27..4fb3e8074fd4 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -172,8 +172,6 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam struct inode *inode; struct page *page; - d_set_d_op(dentry, dir->i_sb->s_root->d_op); - page = alloc_page(GFP_USER); if (!page) return ERR_PTR(-ENOMEM); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 846a3f314111..5b2e4c30a2a1 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -207,7 +207,7 @@ repeat_locked: * the committing transaction. Really, we only need to give it * committing_transaction->t_outstanding_credits plus "enough" for * the log control blocks. - * Also, this test is inconsitent with the matching one in + * Also, this test is inconsistent with the matching one in * journal_extend(). */ if (__log_space_left(journal) < jbd_space_needed(journal)) { diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 394893242ae3..faad2bd787c7 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -251,7 +251,7 @@ repeat: * the committing transaction. Really, we only need to give it * committing_transaction->t_outstanding_credits plus "enough" for * the log control blocks. - * Also, this test is inconsitent with the matching one in + * Also, this test is inconsistent with the matching one in * jbd2_journal_extend(). */ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index e1b8493b9aaa..278e3fb40b71 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1120,16 +1120,13 @@ int lmLogOpen(struct super_block *sb) * file systems to log may have n-to-1 relationship; */ - bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE); + bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + log); if (IS_ERR(bdev)) { rc = -PTR_ERR(bdev); goto free; } - if ((rc = bd_claim(bdev, log))) { - goto close; - } - log->bdev = bdev; memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); @@ -1137,7 +1134,7 @@ int lmLogOpen(struct super_block *sb) * initialize log: */ if ((rc = lmLogInit(log))) - goto unclaim; + goto close; list_add(&log->journal_list, &jfs_external_logs); @@ -1163,11 +1160,8 @@ journal_found: list_del(&log->journal_list); lbmLogShutdown(log); - unclaim: - bd_release(bdev); - close: /* close external log device */ - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); free: /* free log descriptor */ mutex_unlock(&jfs_log_mutex); @@ -1512,8 +1506,7 @@ int lmLogClose(struct super_block *sb) bdev = log->bdev; rc = lmLogShutdown(log); - bd_release(bdev); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); kfree(log); diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 4414e3a42264..81ead850ddb6 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1465,9 +1465,6 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc jfs_info("jfs_lookup: name = %s", name); - if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2) - d_set_d_op(dentry, &jfs_ci_dentry_operations); - if ((name[0] == '.') && (len == 1)) inum = dip->i_ino; else if (strcmp(name, "..") == 0) @@ -1492,12 +1489,7 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc return ERR_CAST(ip); } - dentry = d_splice_alias(ip, dentry); - - if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)) - d_set_d_op(dentry, &jfs_ci_dentry_operations); - - return dentry; + return d_splice_alias(ip, dentry); } static struct inode *jfs_nfs_get_inode(struct super_block *sb, diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 3150d766e0d4..eeca48a031ab 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -515,6 +515,9 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_magic = JFS_SUPER_MAGIC; + if (sbi->mntflag & JFS_OS2) + sb->s_d_op = &jfs_ci_dentry_operations; + inode = jfs_iget(sb, ROOT_I); if (IS_ERR(inode)) { ret = PTR_ERR(inode); @@ -524,9 +527,6 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) if (!sb->s_root) goto out_no_root; - if (sbi->mntflag & JFS_OS2) - d_set_d_op(sb->s_root, &jfs_ci_dentry_operations); - /* logical blocks are represented by 40 bits in pxd_t, etc. */ sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; #if BITS_PER_LONG == 32 diff --git a/fs/libfs.c b/fs/libfs.c index 889311e3d06b..c88eab55aec9 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -217,7 +217,8 @@ static const struct super_operations simple_super_operations = { * will never be mountable) */ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, - const struct super_operations *ops, unsigned long magic) + const struct super_operations *ops, + const struct dentry_operations *dops, unsigned long magic) { struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); struct dentry *dentry; @@ -254,6 +255,7 @@ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, dentry->d_parent = dentry; d_instantiate(dentry, root); s->s_root = dentry; + s->s_d_op = dops; s->s_flags |= MS_ACTIVE; return dget(s->s_root); diff --git a/fs/locks.c b/fs/locks.c index 08415b2a6d36..0f3998291f78 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -444,15 +444,9 @@ static void lease_release_private_callback(struct file_lock *fl) fl->fl_file->f_owner.signum = 0; } -static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) -{ - return fl->fl_file == try->fl_file; -} - static const struct lock_manager_operations lease_manager_ops = { .fl_break = lease_break_callback, .fl_release_private = lease_release_private_callback, - .fl_mylease = lease_mylease_callback, .fl_change = lease_modify, }; @@ -1405,7 +1399,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) for (before = &inode->i_flock; ((fl = *before) != NULL) && IS_LEASE(fl); before = &fl->fl_next) { - if (lease->fl_lmops->fl_mylease(fl, lease)) + if (fl->fl_file == filp) my_before = before; else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) /* diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index 92ca6fbe09bd..723bc5bca09a 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -300,7 +300,7 @@ static int bdev_write_sb(struct super_block *sb, struct page *page) static void bdev_put_device(struct logfs_super *s) { - close_bdev_exclusive(s->s_bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int bdev_can_write_buf(struct super_block *sb, u64 ofs) @@ -325,13 +325,14 @@ int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type, { struct block_device *bdev; - bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type); + bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + type); if (IS_ERR(bdev)) return PTR_ERR(bdev); if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { int mtdnr = MINOR(bdev->bd_dev); - close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); return logfs_get_sb_mtd(p, mtdnr); } diff --git a/fs/minix/namei.c b/fs/minix/namei.c index 1b9e07728a9f..ce7337ddfdbf 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -23,8 +23,6 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, st struct inode * inode = NULL; ino_t ino; - d_set_d_op(dentry, dir->i_sb->s_root->d_op); - if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen) return ERR_PTR(-ENAMETOOLONG); diff --git a/fs/mpage.c b/fs/mpage.c index fd56ca2ea556..d78455a81ec9 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -40,7 +40,7 @@ * status of that page is hard. See end_buffer_async_read() for the details. * There is no point in duplicating all that complexity. */ -static void mpage_end_io_read(struct bio *bio, int err) +static void mpage_end_io(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; @@ -50,44 +50,29 @@ static void mpage_end_io_read(struct bio *bio, int err) if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); - - if (uptodate) { - SetPageUptodate(page); - } else { - ClearPageUptodate(page); - SetPageError(page); - } - unlock_page(page); - } while (bvec >= bio->bi_io_vec); - bio_put(bio); -} - -static void mpage_end_io_write(struct bio *bio, int err) -{ - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - - do { - struct page *page = bvec->bv_page; - - if (--bvec >= bio->bi_io_vec) - prefetchw(&bvec->bv_page->flags); - - if (!uptodate){ - SetPageError(page); - if (page->mapping) - set_bit(AS_EIO, &page->mapping->flags); + if (bio_data_dir(bio) == READ) { + if (uptodate) { + SetPageUptodate(page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); + } else { /* bio_data_dir(bio) == WRITE */ + if (!uptodate) { + SetPageError(page); + if (page->mapping) + set_bit(AS_EIO, &page->mapping->flags); + } + end_page_writeback(page); } - end_page_writeback(page); } while (bvec >= bio->bi_io_vec); bio_put(bio); } static struct bio *mpage_bio_submit(int rw, struct bio *bio) { - bio->bi_end_io = mpage_end_io_read; - if (rw == WRITE) - bio->bi_end_io = mpage_end_io_write; + bio->bi_end_io = mpage_end_io; submit_bio(rw, bio); return NULL; } diff --git a/fs/namei.c b/fs/namei.c index 24ece10470b6..8df7a78ace58 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -479,6 +479,14 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry struct fs_struct *fs = current->fs; struct dentry *parent = nd->path.dentry; + /* + * It can be possible to revalidate the dentry that we started + * the path walk with. force_reval_path may also revalidate the + * dentry already committed to the nameidata. + */ + if (unlikely(parent == dentry)) + return nameidata_drop_rcu(nd); + BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt) { spin_lock(&fs->lock); @@ -583,6 +591,13 @@ void release_open_intent(struct nameidata *nd) fput(nd->intent.open.file); } +/* + * Call d_revalidate and handle filesystems that request rcu-walk + * to be dropped. This may be called and return in rcu-walk mode, + * regardless of success or error. If -ECHILD is returned, the caller + * must return -ECHILD back up the path walk stack so path walk may + * be restarted in ref-walk mode. + */ static int d_revalidate(struct dentry *dentry, struct nameidata *nd) { int status; @@ -673,6 +688,9 @@ force_reval_path(struct path *path, struct nameidata *nd) return 0; if (!status) { + /* Don't d_invalidate in rcu-walk mode */ + if (nameidata_drop_rcu(nd)) + return -ECHILD; d_invalidate(dentry); status = -ESTALE; } @@ -761,7 +779,8 @@ static void path_put_conditional(struct path *path, struct nameidata *nd) mntput(path->mnt); } -static inline void path_to_nameidata(struct path *path, struct nameidata *nd) +static inline void path_to_nameidata(const struct path *path, + struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); @@ -773,20 +792,20 @@ static inline void path_to_nameidata(struct path *path, struct nameidata *nd) } static __always_inline int -__do_follow_link(struct path *path, struct nameidata *nd, void **p) +__do_follow_link(const struct path *link, struct nameidata *nd, void **p) { int error; - struct dentry *dentry = path->dentry; + struct dentry *dentry = link->dentry; - touch_atime(path->mnt, dentry); + touch_atime(link->mnt, dentry); nd_set_link(nd, NULL); - if (path->mnt != nd->path.mnt) { - path_to_nameidata(path, nd); + if (link->mnt != nd->path.mnt) { + path_to_nameidata(link, nd); nd->inode = nd->path.dentry->d_inode; dget(dentry); } - mntget(path->mnt); + mntget(link->mnt); nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); @@ -1950,8 +1969,9 @@ int may_open(struct path *path, int acc_mode, int flag) return break_lease(inode, flag); } -static int handle_truncate(struct path *path) +static int handle_truncate(struct file *filp) { + struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) @@ -1965,7 +1985,7 @@ static int handle_truncate(struct path *path) if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, - NULL); + filp); } put_write_access(inode); return error; @@ -2063,7 +2083,7 @@ static struct file *finish_open(struct nameidata *nd, } if (!IS_ERR(filp)) { if (will_truncate) { - error = handle_truncate(&nd->path); + error = handle_truncate(filp); if (error) { fput(filp); filp = ERR_PTR(error); @@ -2104,11 +2124,13 @@ static struct file *do_last(struct nameidata *nd, struct path *path, dir = nd->path.dentry; case LAST_DOT: if (need_reval_dot(dir)) { - error = d_revalidate(nd->path.dentry, nd); - if (!error) - error = -ESTALE; - if (error < 0) + int status = d_revalidate(nd->path.dentry, nd); + if (!status) + status = -ESTALE; + if (status < 0) { + error = status; goto exit; + } } /* fallthrough */ case LAST_ROOT: @@ -2327,11 +2349,12 @@ reval: nd.flags = flags; filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); while (unlikely(!filp)) { /* trailing symlink */ - struct path holder; + struct path link = path; + struct inode *linki = link.dentry->d_inode; void *cookie; error = -ELOOP; /* S_ISDIR part is a temporary automount kludge */ - if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(nd.inode->i_mode)) + if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(linki->i_mode)) goto exit_dput; if (count++ == 32) goto exit_dput; @@ -2347,23 +2370,22 @@ reval: * just set LAST_BIND. */ nd.flags |= LOOKUP_PARENT; - error = security_inode_follow_link(path.dentry, &nd); + error = security_inode_follow_link(link.dentry, &nd); if (error) goto exit_dput; - error = __do_follow_link(&path, &nd, &cookie); + error = __do_follow_link(&link, &nd, &cookie); if (unlikely(error)) { - if (!IS_ERR(cookie) && nd.inode->i_op->put_link) - nd.inode->i_op->put_link(path.dentry, &nd, cookie); + if (!IS_ERR(cookie) && linki->i_op->put_link) + linki->i_op->put_link(link.dentry, &nd, cookie); /* nd.path had been dropped */ - nd.path = path; + nd.path = link; goto out_path; } - holder = path; nd.flags &= ~LOOKUP_PARENT; filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); - if (nd.inode->i_op->put_link) - nd.inode->i_op->put_link(holder.dentry, &nd, cookie); - path_put(&holder); + if (linki->i_op->put_link) + linki->i_op->put_link(link.dentry, &nd, cookie); + path_put(&link); } out: if (nd.root.mnt) diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 28f136d4aaec..f6946bb5cb55 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -21,9 +21,7 @@ #include #include -#include - -#include "ncplib_kernel.h" +#include "ncp_fs.h" static void ncp_read_volume_list(struct file *, void *, filldir_t, struct ncp_cache_control *); @@ -82,7 +80,7 @@ static int ncp_compare_dentry(const struct dentry *, const struct inode *, unsigned int, const char *, const struct qstr *); static int ncp_delete_dentry(const struct dentry *); -static const struct dentry_operations ncp_dentry_operations = +const struct dentry_operations ncp_dentry_operations = { .d_revalidate = ncp_lookup_validate, .d_hash = ncp_hash_dentry, @@ -90,14 +88,6 @@ static const struct dentry_operations ncp_dentry_operations = .d_delete = ncp_delete_dentry, }; -const struct dentry_operations ncp_root_dentry_operations = -{ - .d_hash = ncp_hash_dentry, - .d_compare = ncp_compare_dentry, - .d_delete = ncp_delete_dentry, -}; - - #define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber]) static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator) @@ -309,6 +299,9 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd) int res, val = 0, len; __u8 __name[NCP_MAXPATHLEN + 1]; + if (dentry == dentry->d_sb->s_root) + return 1; + if (nd->flags & LOOKUP_RCU) return -ECHILD; @@ -637,7 +630,6 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir, entry->ino = iunique(dir->i_sb, 2); inode = ncp_iget(dir->i_sb, entry); if (inode) { - d_set_d_op(newdent, &ncp_dentry_operations); d_instantiate(newdent, inode); if (!hashed) d_rehash(newdent); @@ -893,7 +885,6 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc if (inode) { ncp_new_dentry(dentry); add_entry: - d_set_d_op(dentry, &ncp_dentry_operations); d_add(dentry, inode); error = 0; } diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index cb50aaf981df..0ed65e0c3dfe 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -18,8 +18,7 @@ #include #include -#include -#include "ncplib_kernel.h" +#include "ncp_fs.h" static int ncp_fsync(struct file *file, int datasync) { diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 9b39a5dd4131..00a1d1c3d3a4 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -31,11 +31,9 @@ #include #include -#include - #include -#include "ncplib_kernel.h" +#include "ncp_fs.h" #include "getopt.h" #define NCP_DEFAULT_FILE_MODE 0600 @@ -544,6 +542,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) sb->s_blocksize_bits = 10; sb->s_magic = NCP_SUPER_MAGIC; sb->s_op = &ncp_sops; + sb->s_d_op = &ncp_dentry_operations; sb->s_bdi = &server->bdi; server = NCP_SBP(sb); @@ -723,7 +722,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto out_no_root; - d_set_d_op(sb->s_root, &ncp_root_dentry_operations); return 0; out_no_root: diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index d40a547e3377..790e92a9ec63 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -20,11 +20,9 @@ #include #include -#include - #include -#include "ncplib_kernel.h" +#include "ncp_fs.h" /* maximum limit for ncp_objectname_ioctl */ #define NCP_OBJECT_NAME_MAX_LEN 4096 diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index 56f5b3a0e1ee..a7c07b44b100 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c @@ -16,12 +16,12 @@ #include #include #include -#include -#include "ncplib_kernel.h" #include #include +#include "ncp_fs.h" + /* * Fill in the supplied page for mmap * XXX: how are we excluding truncate/invalidate here? Maybe need to lock diff --git a/fs/ncpfs/ncp_fs.h b/fs/ncpfs/ncp_fs.h new file mode 100644 index 000000000000..31831afe1c3b --- /dev/null +++ b/fs/ncpfs/ncp_fs.h @@ -0,0 +1,98 @@ +#include +#include "ncp_fs_i.h" +#include "ncp_fs_sb.h" + +/* define because it is easy to change PRINTK to {*}PRINTK */ +#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args) + +#undef NCPFS_PARANOIA +#ifdef NCPFS_PARANOIA +#define PPRINTK(format, args...) PRINTK(format , ## args) +#else +#define PPRINTK(format, args...) +#endif + +#ifndef DEBUG_NCP +#define DEBUG_NCP 0 +#endif +#if DEBUG_NCP > 0 +#define DPRINTK(format, args...) PRINTK(format , ## args) +#else +#define DPRINTK(format, args...) +#endif +#if DEBUG_NCP > 1 +#define DDPRINTK(format, args...) PRINTK(format , ## args) +#else +#define DDPRINTK(format, args...) +#endif + +#define NCP_MAX_RPC_TIMEOUT (6*HZ) + + +struct ncp_entry_info { + struct nw_info_struct i; + ino_t ino; + int opened; + int access; + unsigned int volume; + __u8 file_handle[6]; +}; + +static inline struct ncp_server *NCP_SBP(const struct super_block *sb) +{ + return sb->s_fs_info; +} + +#define NCP_SERVER(inode) NCP_SBP((inode)->i_sb) +static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode) +{ + return container_of(inode, struct ncp_inode_info, vfs_inode); +} + +/* linux/fs/ncpfs/inode.c */ +int ncp_notify_change(struct dentry *, struct iattr *); +struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *); +void ncp_update_inode(struct inode *, struct ncp_entry_info *); +void ncp_update_inode2(struct inode *, struct ncp_entry_info *); + +/* linux/fs/ncpfs/dir.c */ +extern const struct inode_operations ncp_dir_inode_operations; +extern const struct file_operations ncp_dir_operations; +extern const struct dentry_operations ncp_dentry_operations; +int ncp_conn_logged_in(struct super_block *); +int ncp_date_dos2unix(__le16 time, __le16 date); +void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); + +/* linux/fs/ncpfs/ioctl.c */ +long ncp_ioctl(struct file *, unsigned int, unsigned long); +long ncp_compat_ioctl(struct file *, unsigned int, unsigned long); + +/* linux/fs/ncpfs/sock.c */ +int ncp_request2(struct ncp_server *server, int function, + void* reply, int max_reply_size); +static inline int ncp_request(struct ncp_server *server, int function) { + return ncp_request2(server, function, server->packet, server->packet_size); +} +int ncp_connect(struct ncp_server *server); +int ncp_disconnect(struct ncp_server *server); +void ncp_lock_server(struct ncp_server *server); +void ncp_unlock_server(struct ncp_server *server); + +/* linux/fs/ncpfs/symlink.c */ +#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS) +extern const struct address_space_operations ncp_symlink_aops; +int ncp_symlink(struct inode*, struct dentry*, const char*); +#endif + +/* linux/fs/ncpfs/file.c */ +extern const struct inode_operations ncp_file_inode_operations; +extern const struct file_operations ncp_file_operations; +int ncp_make_open(struct inode *, int); + +/* linux/fs/ncpfs/mmap.c */ +int ncp_mmap(struct file *, struct vm_area_struct *); + +/* linux/fs/ncpfs/ncplib_kernel.c */ +int ncp_make_closed(struct inode *); + +#include "ncplib_kernel.h" diff --git a/include/linux/ncp_fs_i.h b/fs/ncpfs/ncp_fs_i.h similarity index 100% rename from include/linux/ncp_fs_i.h rename to fs/ncpfs/ncp_fs_i.h diff --git a/include/linux/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h similarity index 86% rename from include/linux/ncp_fs_sb.h rename to fs/ncpfs/ncp_fs_sb.h index d64b0e894336..4af803f13516 100644 --- a/include/linux/ncp_fs_sb.h +++ b/fs/ncpfs/ncp_fs_sb.h @@ -13,15 +13,30 @@ #include #include #include - -#ifdef __KERNEL__ - #include #define NCP_DEFAULT_OPTIONS 0 /* 2 for packet signatures */ struct sock; +struct ncp_mount_data_kernel { + unsigned long flags; /* NCP_MOUNT_* flags */ + unsigned int int_flags; /* internal flags */ +#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001 + __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */ + struct pid *wdog_pid; /* Who cares for our watchdog packets? */ + unsigned int ncp_fd; /* The socket to the ncp port */ + unsigned int time_out; /* How long should I wait after + sending a NCP request? */ + unsigned int retry_count; /* And how often should I retry? */ + unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_mode_t file_mode; + __kernel_mode_t dir_mode; + int info_fd; +}; + struct ncp_server { struct ncp_mount_data_kernel m; /* Nearly all of the mount data is of @@ -158,7 +173,4 @@ static inline void ncp_invalidate_conn(struct ncp_server *server) server->conn_status |= 0x01; } -#endif /* __KERNEL__ */ - #endif - diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index a95615a0b6ac..981a95617fc9 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -11,7 +11,7 @@ -#include "ncplib_kernel.h" +#include "ncp_fs.h" static inline void assert_server_locked(struct ncp_server *server) { diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index 1220df75ff22..09881e6aa5ad 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -32,8 +32,6 @@ #include #endif /* CONFIG_NCPFS_NLS */ -#include - #define NCP_MIN_SYMLINK_SIZE 8 #define NCP_MAX_SYMLINK_SIZE 512 diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c index d8b2d7e6910b..08907599dcd2 100644 --- a/fs/ncpfs/ncpsign_kernel.c +++ b/fs/ncpfs/ncpsign_kernel.c @@ -11,6 +11,7 @@ #include #include #include +#include "ncp_fs.h" #include "ncpsign_kernel.h" /* i386: 32-bit, little endian, handles mis-alignment */ diff --git a/fs/ncpfs/ncpsign_kernel.h b/fs/ncpfs/ncpsign_kernel.h index 6451a68381cc..d9a1438bb1f6 100644 --- a/fs/ncpfs/ncpsign_kernel.h +++ b/fs/ncpfs/ncpsign_kernel.h @@ -8,8 +8,6 @@ #ifndef _NCPSIGN_KERNEL_H #define _NCPSIGN_KERNEL_H -#include - #ifdef CONFIG_NCPFS_PACKET_SIGNING void __sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff); int sign_verify_reply(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, const void *sign_buff); diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 668bd267346e..3a1587222c8a 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -28,7 +28,7 @@ #include #include -#include +#include "ncp_fs.h" #include "ncpsign_kernel.h" diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c index c634fd17b337..661f861d80c6 100644 --- a/fs/ncpfs/symlink.c +++ b/fs/ncpfs/symlink.c @@ -25,13 +25,11 @@ #include #include -#include #include #include #include #include -#include "ncplib_kernel.h" - +#include "ncp_fs.h" /* these magic numbers must appear in the symlink file -- this makes it a bit more resilient against the magic attributes being set on random files. */ diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index abe4f0c8dc5f..df8c03a02161 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -439,7 +439,6 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) if (dentry == NULL) return; - d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops); inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr); if (IS_ERR(inode)) goto out; @@ -1193,8 +1192,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru if (dentry->d_name.len > NFS_SERVER(dir)->namelen) goto out; - d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops); - /* * If we're doing an exclusive create, optimize away the lookup * but don't hash the dentry. @@ -1338,7 +1335,6 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry res = ERR_PTR(-ENAMETOOLONG); goto out; } - d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops); /* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash * the dentry. */ @@ -1410,11 +1406,15 @@ no_open: static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *parent = NULL; - struct inode *inode = dentry->d_inode; + struct inode *inode; struct inode *dir; struct nfs_open_context *ctx; int openflags, ret = 0; + if (nd->flags & LOOKUP_RCU) + return -ECHILD; + + inode = dentry->d_inode; if (!is_atomic_open(nd) || d_mountpoint(dentry)) goto no_open; @@ -1583,6 +1583,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, { struct iattr attr; int error; + int open_flags = 0; dfprintk(VFS, "NFS: create(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); @@ -1590,7 +1591,10 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; - error = NFS_PROTO(dir)->create(dir, dentry, &attr, 0, NULL); + if ((nd->flags & LOOKUP_CREATE) != 0) + open_flags = nd->intent.open.flags; + + error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL); if (error != 0) goto out_err; return 0; diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index 5596c6a2881e..b5ffe8fa291f 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -119,9 +119,6 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh) } security_d_instantiate(ret, inode); - - if (ret->d_op == NULL) - d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops); out: nfs_free_fattr(fsinfo.fattr); return ret; @@ -227,9 +224,6 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh) security_d_instantiate(ret, inode); - if (ret->d_op == NULL) - d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops); - out: nfs_free_fattr(fattr); dprintk("<-- nfs4_get_root()\n"); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 0f9ea73e7789..b68c8607770f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2202,6 +2202,7 @@ static int nfs_set_super(struct super_block *s, void *data) s->s_flags = sb_mntdata->mntflags; s->s_fs_info = server; + s->s_d_op = server->nfs_client->rpc_ops->dentry_ops; ret = set_anon_super(s, server); if (ret == 0) server->s_dev = s->s_dev; diff --git a/include/linux/nfs4_acl.h b/fs/nfsd/acl.h similarity index 98% rename from include/linux/nfs4_acl.h rename to fs/nfsd/acl.h index c9c05a78e9bb..34e5c40af5ef 100644 --- a/include/linux/nfs4_acl.h +++ b/fs/nfsd/acl.h @@ -1,6 +1,4 @@ /* - * include/linux/nfs4_acl.c - * * Common NFSv4 ACL handling definitions. * * Copyright (c) 2002 The Regents of the University of Michigan. diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index c0fcb7ab7f6d..8b31e5f8795d 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1,4 +1,3 @@ -#define MSNFS /* HACK HACK */ /* * NFS exporting and validation. * @@ -1444,9 +1443,6 @@ static struct flags { { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}}, { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, { NFSEXP_V4ROOT, {"v4root", ""}}, -#ifdef MSNFS - { NFSEXP_MSNFS, {"msnfs", ""}}, -#endif { 0, {"", ""}} }; diff --git a/include/linux/nfsd_idmap.h b/fs/nfsd/idmap.h similarity index 92% rename from include/linux/nfsd_idmap.h rename to fs/nfsd/idmap.h index d4a2ac18bd4c..2f3be1321534 100644 --- a/include/linux/nfsd_idmap.h +++ b/fs/nfsd/idmap.h @@ -1,6 +1,4 @@ /* - * include/linux/nfsd_idmap.h - * * Mapping of UID to name and vice versa. * * Copyright (c) 2002, 2003 The Regents of the University of @@ -56,8 +54,8 @@ static inline void nfsd_idmap_shutdown(void) } #endif -int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *); -int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *); +__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *); +__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *); int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *); int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *); diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 5b7e3021e06b..2247fc91d5e9 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -151,10 +151,10 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, __be32 nfserr; u32 max_blocksize = svc_max_payload(rqstp); - dprintk("nfsd: READ(3) %s %lu bytes at %lu\n", + dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n", SVCFH_fmt(&argp->fh), (unsigned long) argp->count, - (unsigned long) argp->offset); + (unsigned long long) argp->offset); /* Obtain buffer pointer for payload. * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof) @@ -191,10 +191,10 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp, __be32 nfserr; unsigned long cnt = argp->len; - dprintk("nfsd: WRITE(3) %s %d bytes at %ld%s\n", + dprintk("nfsd: WRITE(3) %s %d bytes at %Lu%s\n", SVCFH_fmt(&argp->fh), argp->len, - (unsigned long) argp->offset, + (unsigned long long) argp->offset, argp->stable? " stable" : ""); fh_copy(&resp->fh, &argp->fh); diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index e48052615159..ad88f1c0a4c3 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -36,7 +36,7 @@ #include #include -#include +#include "acl.h" /* mode bit translations: */ diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 21a63da305ff..3be975e18919 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -628,10 +628,8 @@ static int max_cb_time(void) return max(nfsd4_lease/10, (time_t)1) * HZ; } -/* Reference counting, callback cleanup, etc., all look racy as heck. - * And why is cl_cb_set an atomic? */ -int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn) +static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) { struct rpc_timeout timeparms = { .to_initval = max_cb_time(), @@ -641,6 +639,7 @@ int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn) .net = &init_net, .address = (struct sockaddr *) &conn->cb_addr, .addrsize = conn->cb_addrlen, + .saddress = (struct sockaddr *) &conn->cb_saddr, .timeout = &timeparms, .program = &cb_program, .version = 0, @@ -657,6 +656,10 @@ int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn) args.protocol = XPRT_TRANSPORT_TCP; clp->cl_cb_ident = conn->cb_ident; } else { + if (!conn->cb_xprt) + return -EINVAL; + clp->cl_cb_conn.cb_xprt = conn->cb_xprt; + clp->cl_cb_session = ses; args.bc_xprt = conn->cb_xprt; args.prognumber = clp->cl_cb_session->se_cb_prog; args.protocol = XPRT_TRANSPORT_BC_TCP; @@ -679,14 +682,20 @@ static void warn_no_callback_path(struct nfs4_client *clp, int reason) (int)clp->cl_name.len, clp->cl_name.data, reason); } +static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason) +{ + clp->cl_cb_state = NFSD4_CB_DOWN; + warn_no_callback_path(clp, reason); +} + static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) { struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null); if (task->tk_status) - warn_no_callback_path(clp, task->tk_status); + nfsd4_mark_cb_down(clp, task->tk_status); else - atomic_set(&clp->cl_cb_set, 1); + clp->cl_cb_state = NFSD4_CB_UP; } static const struct rpc_call_ops nfsd4_cb_probe_ops = { @@ -709,6 +718,11 @@ int set_callback_cred(void) static struct workqueue_struct *callback_wq; +static void run_nfsd4_cb(struct nfsd4_callback *cb) +{ + queue_work(callback_wq, &cb->cb_work); +} + static void do_probe_callback(struct nfs4_client *clp) { struct nfsd4_callback *cb = &clp->cl_cb_null; @@ -723,7 +737,7 @@ static void do_probe_callback(struct nfs4_client *clp) cb->cb_ops = &nfsd4_cb_probe_ops; - queue_work(callback_wq, &cb->cb_work); + run_nfsd4_cb(cb); } /* @@ -732,14 +746,21 @@ static void do_probe_callback(struct nfs4_client *clp) */ void nfsd4_probe_callback(struct nfs4_client *clp) { + /* XXX: atomicity? Also, should we be using cl_cb_flags? */ + clp->cl_cb_state = NFSD4_CB_UNKNOWN; set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); do_probe_callback(clp); } -void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) +void nfsd4_probe_callback_sync(struct nfs4_client *clp) { - BUG_ON(atomic_read(&clp->cl_cb_set)); + nfsd4_probe_callback(clp); + flush_workqueue(callback_wq); +} +void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) +{ + clp->cl_cb_state = NFSD4_CB_UNKNOWN; spin_lock(&clp->cl_lock); memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn)); spin_unlock(&clp->cl_lock); @@ -750,24 +771,14 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) * If the slot is available, then mark it busy. Otherwise, set the * thread for sleeping on the callback RPC wait queue. */ -static int nfsd41_cb_setup_sequence(struct nfs4_client *clp, - struct rpc_task *task) +static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) { - u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data; - int status = 0; - - dprintk("%s: %u:%u:%u:%u\n", __func__, - ptr[0], ptr[1], ptr[2], ptr[3]); - if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); dprintk("%s slot is busy\n", __func__); - status = -EAGAIN; - goto out; + return false; } -out: - dprintk("%s status=%d\n", __func__, status); - return status; + return true; } /* @@ -780,20 +791,19 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); struct nfs4_client *clp = dp->dl_client; u32 minorversion = clp->cl_minorversion; - int status = 0; cb->cb_minorversion = minorversion; if (minorversion) { - status = nfsd41_cb_setup_sequence(clp, task); - if (status) { - if (status != -EAGAIN) { - /* terminate rpc task */ - task->tk_status = status; - task->tk_action = NULL; - } + if (!nfsd41_cb_get_slot(clp, task)) return; - } } + spin_lock(&clp->cl_lock); + if (list_empty(&cb->cb_per_client)) { + /* This is the first call, not a restart */ + cb->cb_done = false; + list_add(&cb->cb_per_client, &clp->cl_callbacks); + } + spin_unlock(&clp->cl_lock); rpc_call_start(task); } @@ -829,15 +839,18 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) nfsd4_cb_done(task, calldata); - if (current_rpc_client == NULL) { - /* We're shutting down; give up. */ - /* XXX: err, or is it ok just to fall through - * and rpc_restart_call? */ + if (current_rpc_client != task->tk_client) { + /* We're shutting down or changing cl_cb_client; leave + * it to nfsd4_process_cb_update to restart the call if + * necessary. */ return; } + if (cb->cb_done) + return; switch (task->tk_status) { case 0: + cb->cb_done = true; return; case -EBADHANDLE: case -NFS4ERR_BAD_STATEID: @@ -846,32 +859,30 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) break; default: /* Network partition? */ - atomic_set(&clp->cl_cb_set, 0); - warn_no_callback_path(clp, task->tk_status); - if (current_rpc_client != task->tk_client) { - /* queue a callback on the new connection: */ - atomic_inc(&dp->dl_count); - nfsd4_cb_recall(dp); - return; - } + nfsd4_mark_cb_down(clp, task->tk_status); } if (dp->dl_retries--) { rpc_delay(task, 2*HZ); task->tk_status = 0; rpc_restart_call_prepare(task); return; - } else { - atomic_set(&clp->cl_cb_set, 0); - warn_no_callback_path(clp, task->tk_status); } + nfsd4_mark_cb_down(clp, task->tk_status); + cb->cb_done = true; } static void nfsd4_cb_recall_release(void *calldata) { struct nfsd4_callback *cb = calldata; + struct nfs4_client *clp = cb->cb_clp; struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); - nfs4_put_delegation(dp); + if (cb->cb_done) { + spin_lock(&clp->cl_lock); + list_del(&cb->cb_per_client); + spin_unlock(&clp->cl_lock); + nfs4_put_delegation(dp); + } } static const struct rpc_call_ops nfsd4_cb_recall_ops = { @@ -906,16 +917,33 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp) flush_workqueue(callback_wq); } -void nfsd4_release_cb(struct nfsd4_callback *cb) +static void nfsd4_release_cb(struct nfsd4_callback *cb) { if (cb->cb_ops->rpc_release) cb->cb_ops->rpc_release(cb); } -void nfsd4_process_cb_update(struct nfsd4_callback *cb) +/* requires cl_lock: */ +static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp) +{ + struct nfsd4_session *s; + struct nfsd4_conn *c; + + list_for_each_entry(s, &clp->cl_sessions, se_perclnt) { + list_for_each_entry(c, &s->se_conns, cn_persession) { + if (c->cn_flags & NFS4_CDFC4_BACK) + return c; + } + } + return NULL; +} + +static void nfsd4_process_cb_update(struct nfsd4_callback *cb) { struct nfs4_cb_conn conn; struct nfs4_client *clp = cb->cb_clp; + struct nfsd4_session *ses = NULL; + struct nfsd4_conn *c; int err; /* @@ -926,6 +954,10 @@ void nfsd4_process_cb_update(struct nfsd4_callback *cb) rpc_shutdown_client(clp->cl_cb_client); clp->cl_cb_client = NULL; } + if (clp->cl_cb_conn.cb_xprt) { + svc_xprt_put(clp->cl_cb_conn.cb_xprt); + clp->cl_cb_conn.cb_xprt = NULL; + } if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags)) return; spin_lock(&clp->cl_lock); @@ -936,11 +968,22 @@ void nfsd4_process_cb_update(struct nfsd4_callback *cb) BUG_ON(!clp->cl_cb_flags); clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn)); + c = __nfsd4_find_backchannel(clp); + if (c) { + svc_xprt_get(c->cn_xprt); + conn.cb_xprt = c->cn_xprt; + ses = c->cn_session; + } spin_unlock(&clp->cl_lock); - err = setup_callback_client(clp, &conn); - if (err) + err = setup_callback_client(clp, &conn, ses); + if (err) { warn_no_callback_path(clp, err); + return; + } + /* Yay, the callback channel's back! Restart any callbacks: */ + list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client) + run_nfsd4_cb(cb); } void nfsd4_do_callback_rpc(struct work_struct *w) @@ -965,10 +1008,11 @@ void nfsd4_do_callback_rpc(struct work_struct *w) void nfsd4_cb_recall(struct nfs4_delegation *dp) { struct nfsd4_callback *cb = &dp->dl_recall; + struct nfs4_client *clp = dp->dl_client; dp->dl_retries = 1; cb->cb_op = dp; - cb->cb_clp = dp->dl_client; + cb->cb_clp = clp; cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL]; cb->cb_msg.rpc_argp = cb; cb->cb_msg.rpc_resp = cb; @@ -977,5 +1021,8 @@ void nfsd4_cb_recall(struct nfs4_delegation *dp) cb->cb_ops = &nfsd4_cb_recall_ops; dp->dl_retries = 1; - queue_work(callback_wq, &dp->dl_recall.cb_work); + INIT_LIST_HEAD(&cb->cb_per_client); + cb->cb_done = true; + + run_nfsd4_cb(&dp->dl_recall); } diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index f0695e815f0e..6d2c397d458b 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -33,10 +33,11 @@ */ #include -#include #include #include #include +#include "idmap.h" +#include "nfsd.h" /* * Cache entry @@ -514,7 +515,7 @@ rqst_authname(struct svc_rqst *rqstp) return clp->name; } -static int +static __be32 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id) { @@ -524,15 +525,15 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen int ret; if (namelen + 1 > sizeof(key.name)) - return -EINVAL; + return nfserr_badowner; memcpy(key.name, name, namelen); key.name[namelen] = '\0'; strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item); if (ret == -ENOENT) - ret = -ESRCH; /* nfserr_badname */ + return nfserr_badowner; if (ret) - return ret; + return nfserrno(ret); *id = item->id; cache_put(&item->h, &nametoid_cache); return 0; @@ -560,14 +561,14 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name) return ret; } -int +__be32 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, __u32 *id) { return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id); } -int +__be32 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, __u32 *id) { diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 0cdfd022bb7b..db52546143d1 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -604,9 +604,7 @@ nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return status; } -static __be32 -nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, - void *arg) +static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh) { struct svc_fh tmp_fh; __be32 ret; @@ -615,13 +613,19 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ret = exp_pseudoroot(rqstp, &tmp_fh); if (ret) return ret; - if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) { + if (tmp_fh.fh_dentry == fh->fh_dentry) { fh_put(&tmp_fh); return nfserr_noent; } fh_put(&tmp_fh); - return nfsd_lookup(rqstp, &cstate->current_fh, - "..", 2, &cstate->current_fh); + return nfsd_lookup(rqstp, fh, "..", 2, fh); +} + +static __be32 +nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + void *arg) +{ + return nfsd4_do_lookupp(rqstp, &cstate->current_fh); } static __be32 @@ -769,9 +773,35 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } else secinfo->si_exp = exp; dput(dentry); + if (cstate->minorversion) + /* See rfc 5661 section 2.6.3.1.1.8 */ + fh_put(&cstate->current_fh); return err; } +static __be32 +nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + struct nfsd4_secinfo_no_name *sin) +{ + __be32 err; + + switch (sin->sin_style) { + case NFS4_SECINFO_STYLE4_CURRENT_FH: + break; + case NFS4_SECINFO_STYLE4_PARENT: + err = nfsd4_do_lookupp(rqstp, &cstate->current_fh); + if (err) + return err; + break; + default: + return nfserr_inval; + } + exp_get(cstate->current_fh.fh_export); + sin->sin_exp = cstate->current_fh.fh_export; + fh_put(&cstate->current_fh); + return nfs_ok; +} + static __be32 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) @@ -974,8 +1004,8 @@ static const char *nfsd4_op_name(unsigned opnum); * Also note, enforced elsewhere: * - SEQUENCE other than as first op results in * NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().) - * - BIND_CONN_TO_SESSION must be the only op in its compound - * (Will be enforced in nfsd4_bind_conn_to_session().) + * - BIND_CONN_TO_SESSION must be the only op in its compound. + * (Enforced in nfsd4_bind_conn_to_session().) * - DESTROY_SESSION must be the final operation in a compound, if * sessionid's in SEQUENCE and DESTROY_SESSION are the same. * (Enforced in nfsd4_destroy_session().) @@ -1126,10 +1156,6 @@ encode_op: nfsd4_increment_op_stats(op->opnum); } - if (!rqstp->rq_usedeferral && status == nfserr_dropit) { - dprintk("%s Dropit - send NFS4ERR_DELAY\n", __func__); - status = nfserr_jukebox; - } resp->cstate.status = status; fh_put(&resp->cstate.current_fh); @@ -1300,6 +1326,11 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, .op_name = "OP_EXCHANGE_ID", }, + [OP_BIND_CONN_TO_SESSION] = { + .op_func = (nfsd4op_func)nfsd4_bind_conn_to_session, + .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, + .op_name = "OP_BIND_CONN_TO_SESSION", + }, [OP_CREATE_SESSION] = { .op_func = (nfsd4op_func)nfsd4_create_session, .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, @@ -1320,6 +1351,10 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_flags = ALLOWED_WITHOUT_FH, .op_name = "OP_RECLAIM_COMPLETE", }, + [OP_SECINFO_NO_NAME] = { + .op_func = (nfsd4op_func)nfsd4_secinfo_no_name, + .op_name = "OP_SECINFO_NO_NAME", + }, }; static const char *nfsd4_op_name(unsigned opnum) diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 7e26caab2a26..ffb59ef6f82f 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -302,7 +302,6 @@ purge_old(struct dentry *parent, struct dentry *child) { int status; - /* note: we currently use this path only for minorversion 0 */ if (nfs4_has_reclaimed_state(child->d_name.name, false)) return 0; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index fbd18c3074bb..d98d0213285d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -230,7 +230,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f dp->dl_client = clp; get_nfs4_file(fp); dp->dl_file = fp; - nfs4_file_get_access(fp, O_RDONLY); + dp->dl_vfs_file = find_readable_file(fp); + get_file(dp->dl_vfs_file); dp->dl_flock = NULL; dp->dl_type = type; dp->dl_stateid.si_boot = boot_time; @@ -252,6 +253,7 @@ nfs4_put_delegation(struct nfs4_delegation *dp) if (atomic_dec_and_test(&dp->dl_count)) { dprintk("NFSD: freeing dp %p\n",dp); put_nfs4_file(dp->dl_file); + fput(dp->dl_vfs_file); kmem_cache_free(deleg_slab, dp); num_delegations--; } @@ -265,12 +267,10 @@ nfs4_put_delegation(struct nfs4_delegation *dp) static void nfs4_close_delegation(struct nfs4_delegation *dp) { - struct file *filp = find_readable_file(dp->dl_file); - dprintk("NFSD: close_delegation dp %p\n",dp); + /* XXX: do we even need this check?: */ if (dp->dl_flock) - vfs_setlease(filp, F_UNLCK, &dp->dl_flock); - nfs4_file_put_access(dp->dl_file, O_RDONLY); + vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock); } /* Called under the state lock. */ @@ -642,6 +642,7 @@ static void nfsd4_conn_lost(struct svc_xpt_user *u) free_conn(c); } spin_unlock(&clp->cl_lock); + nfsd4_probe_callback(clp); } static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) @@ -679,15 +680,12 @@ static int nfsd4_register_conn(struct nfsd4_conn *conn) return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); } -static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) +static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir) { struct nfsd4_conn *conn; - u32 flags = NFS4_CDFC4_FORE; int ret; - if (ses->se_flags & SESSION4_BACK_CHAN) - flags |= NFS4_CDFC4_BACK; - conn = alloc_conn(rqstp, flags); + conn = alloc_conn(rqstp, dir); if (!conn) return nfserr_jukebox; nfsd4_hash_conn(conn, ses); @@ -698,6 +696,17 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) return nfs_ok; } +static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses) +{ + u32 dir = NFS4_CDFC4_FORE; + + if (ses->se_flags & SESSION4_BACK_CHAN) + dir |= NFS4_CDFC4_BACK; + + return nfsd4_new_conn(rqstp, ses, dir); +} + +/* must be called under client_lock */ static void nfsd4_del_conns(struct nfsd4_session *s) { struct nfs4_client *clp = s->se_client; @@ -749,6 +758,8 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n */ slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); + if (numslots < 1) + return NULL; new = alloc_session(slotsize, numslots); if (!new) { @@ -769,25 +780,30 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n idx = hash_sessionid(&new->se_sessionid); spin_lock(&client_lock); list_add(&new->se_hash, &sessionid_hashtbl[idx]); + spin_lock(&clp->cl_lock); list_add(&new->se_perclnt, &clp->cl_sessions); + spin_unlock(&clp->cl_lock); spin_unlock(&client_lock); - status = nfsd4_new_conn(rqstp, new); + status = nfsd4_new_conn_from_crses(rqstp, new); /* whoops: benny points out, status is ignored! (err, or bogus) */ if (status) { free_session(&new->se_ref); return NULL; } - if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) { + if (cses->flags & SESSION4_BACK_CHAN) { struct sockaddr *sa = svc_addr(rqstp); - - clp->cl_cb_session = new; - clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt; - svc_xprt_get(rqstp->rq_xprt); + /* + * This is a little silly; with sessions there's no real + * use for the callback address. Use the peer address + * as a reasonable default for now, but consider fixing + * the rpc client not to require an address in the + * future: + */ rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); - nfsd4_probe_callback(clp); } + nfsd4_probe_callback(clp); return new; } @@ -817,7 +833,9 @@ static void unhash_session(struct nfsd4_session *ses) { list_del(&ses->se_hash); + spin_lock(&ses->se_client->cl_lock); list_del(&ses->se_perclnt); + spin_unlock(&ses->se_client->cl_lock); } /* must be called under the client_lock */ @@ -923,8 +941,10 @@ unhash_client_locked(struct nfs4_client *clp) mark_client_expired(clp); list_del(&clp->cl_lru); + spin_lock(&clp->cl_lock); list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) list_del_init(&ses->se_hash); + spin_unlock(&clp->cl_lock); } static void @@ -1051,12 +1071,13 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); atomic_set(&clp->cl_refcount, 0); - atomic_set(&clp->cl_cb_set, 0); + clp->cl_cb_state = NFSD4_CB_UNKNOWN; INIT_LIST_HEAD(&clp->cl_idhash); INIT_LIST_HEAD(&clp->cl_strhash); INIT_LIST_HEAD(&clp->cl_openowners); INIT_LIST_HEAD(&clp->cl_delegations); INIT_LIST_HEAD(&clp->cl_lru); + INIT_LIST_HEAD(&clp->cl_callbacks); spin_lock_init(&clp->cl_lock); INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); clp->cl_time = get_seconds(); @@ -1132,54 +1153,55 @@ find_unconfirmed_client(clientid_t *clid) return NULL; } -/* - * Return 1 iff clp's clientid establishment method matches the use_exchange_id - * parameter. Matching is based on the fact the at least one of the - * EXCHGID4_FLAG_USE_{NON_PNFS,PNFS_MDS,PNFS_DS} flags must be set for v4.1 - * - * FIXME: we need to unify the clientid namespaces for nfsv4.x - * and correctly deal with client upgrade/downgrade in EXCHANGE_ID - * and SET_CLIENTID{,_CONFIRM} - */ -static inline int -match_clientid_establishment(struct nfs4_client *clp, bool use_exchange_id) +static bool clp_used_exchangeid(struct nfs4_client *clp) { - bool has_exchange_flags = (clp->cl_exchange_flags != 0); - return use_exchange_id == has_exchange_flags; -} + return clp->cl_exchange_flags != 0; +} static struct nfs4_client * -find_confirmed_client_by_str(const char *dname, unsigned int hashval, - bool use_exchange_id) +find_confirmed_client_by_str(const char *dname, unsigned int hashval) { struct nfs4_client *clp; list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { - if (same_name(clp->cl_recdir, dname) && - match_clientid_establishment(clp, use_exchange_id)) + if (same_name(clp->cl_recdir, dname)) return clp; } return NULL; } static struct nfs4_client * -find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, - bool use_exchange_id) +find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) { struct nfs4_client *clp; list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { - if (same_name(clp->cl_recdir, dname) && - match_clientid_establishment(clp, use_exchange_id)) + if (same_name(clp->cl_recdir, dname)) return clp; } return NULL; } +static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr) +{ + switch (family) { + case AF_INET: + ((struct sockaddr_in *)sa)->sin_family = AF_INET; + ((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr; + return; + case AF_INET6: + ((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6; + ((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6; + return; + } +} + static void -gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) +gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) { struct nfs4_cb_conn *conn = &clp->cl_cb_conn; + struct sockaddr *sa = svc_addr(rqstp); + u32 scopeid = rpc_get_scope_id(sa); unsigned short expected_family; /* Currently, we only support tcp and tcp6 for the callback channel */ @@ -1205,6 +1227,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) conn->cb_prog = se->se_callback_prog; conn->cb_ident = se->se_callback_ident; + rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr); return; out_err: conn->cb_addr.ss_family = AF_UNSPEC; @@ -1344,7 +1367,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, case SP4_NONE: break; case SP4_SSV: - return nfserr_encr_alg_unsupp; + return nfserr_serverfault; default: BUG(); /* checked by xdr code */ case SP4_MACH_CRED: @@ -1361,8 +1384,12 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, nfs4_lock_state(); status = nfs_ok; - conf = find_confirmed_client_by_str(dname, strhashval, true); + conf = find_confirmed_client_by_str(dname, strhashval); if (conf) { + if (!clp_used_exchangeid(conf)) { + status = nfserr_clid_inuse; /* XXX: ? */ + goto out; + } if (!same_verf(&verf, &conf->cl_verifier)) { /* 18.35.4 case 8 */ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { @@ -1403,7 +1430,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, goto out; } - unconf = find_unconfirmed_client_by_str(dname, strhashval, true); + unconf = find_unconfirmed_client_by_str(dname, strhashval); if (unconf) { /* * Possible retry or client restart. Per 18.35.4 case 4, @@ -1560,6 +1587,8 @@ nfsd4_create_session(struct svc_rqst *rqstp, status = nfs_ok; memcpy(cr_ses->sessionid.data, new->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); + memcpy(&cr_ses->fore_channel, &new->se_fchannel, + sizeof(struct nfsd4_channel_attrs)); cs_slot->sl_seqid++; cr_ses->seqid = cs_slot->sl_seqid; @@ -1581,6 +1610,45 @@ static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) return argp->opcnt == resp->opcnt; } +static __be32 nfsd4_map_bcts_dir(u32 *dir) +{ + switch (*dir) { + case NFS4_CDFC4_FORE: + case NFS4_CDFC4_BACK: + return nfs_ok; + case NFS4_CDFC4_FORE_OR_BOTH: + case NFS4_CDFC4_BACK_OR_BOTH: + *dir = NFS4_CDFC4_BOTH; + return nfs_ok; + }; + return nfserr_inval; +} + +__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_bind_conn_to_session *bcts) +{ + __be32 status; + + if (!nfsd4_last_compound_op(rqstp)) + return nfserr_not_only_op; + spin_lock(&client_lock); + cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid); + /* Sorta weird: we only need the refcnt'ing because new_conn acquires + * client_lock iself: */ + if (cstate->session) { + nfsd4_get_session(cstate->session); + atomic_inc(&cstate->session->se_client->cl_refcount); + } + spin_unlock(&client_lock); + if (!cstate->session) + return nfserr_badsession; + + status = nfsd4_map_bcts_dir(&bcts->dir); + nfsd4_new_conn(rqstp, cstate->session, bcts->dir); + return nfs_ok; +} + static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) { if (!session) @@ -1619,8 +1687,7 @@ nfsd4_destroy_session(struct svc_rqst *r, spin_unlock(&client_lock); nfs4_lock_state(); - /* wait for callbacks */ - nfsd4_shutdown_callback(ses->se_client); + nfsd4_probe_callback_sync(ses->se_client); nfs4_unlock_state(); nfsd4_del_conns(ses); @@ -1733,8 +1800,12 @@ nfsd4_sequence(struct svc_rqst *rqstp, out: /* Hold a session reference until done processing the compound. */ if (cstate->session) { + struct nfs4_client *clp = session->se_client; + nfsd4_get_session(cstate->session); - atomic_inc(&session->se_client->cl_refcount); + atomic_inc(&clp->cl_refcount); + if (clp->cl_cb_state == NFSD4_CB_DOWN) + seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN; } kfree(conn); spin_unlock(&client_lock); @@ -1775,7 +1846,6 @@ __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid *setclid) { - struct sockaddr *sa = svc_addr(rqstp); struct xdr_netobj clname = { .len = setclid->se_namelen, .data = setclid->se_name, @@ -1801,10 +1871,12 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, strhashval = clientstr_hashval(dname); nfs4_lock_state(); - conf = find_confirmed_client_by_str(dname, strhashval, false); + conf = find_confirmed_client_by_str(dname, strhashval); if (conf) { /* RFC 3530 14.2.33 CASE 0: */ status = nfserr_clid_inuse; + if (clp_used_exchangeid(conf)) + goto out; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { char addr_str[INET6_ADDRSTRLEN]; rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, @@ -1819,7 +1891,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * has a description of SETCLIENTID request processing consisting * of 5 bullet points, labeled as CASE0 - CASE4 below. */ - unconf = find_unconfirmed_client_by_str(dname, strhashval, false); + unconf = find_unconfirmed_client_by_str(dname, strhashval); status = nfserr_resource; if (!conf) { /* @@ -1876,7 +1948,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * for consistent minorversion use throughout: */ new->cl_minorversion = 0; - gen_callback(new, setclid, rpc_get_scope_id(sa)); + gen_callback(new, setclid, rqstp); add_to_unconfirmed(new, strhashval); setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; setclid->se_clientid.cl_id = new->cl_clientid.cl_id; @@ -1935,7 +2007,6 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) status = nfserr_clid_inuse; else { - atomic_set(&conf->cl_cb_set, 0); nfsd4_change_callback(conf, &unconf->cl_cb_conn); nfsd4_probe_callback(conf); expire_client(unconf); @@ -1964,7 +2035,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, unsigned int hash = clientstr_hashval(unconf->cl_recdir); conf = find_confirmed_client_by_str(unconf->cl_recdir, - hash, false); + hash); if (conf) { nfsd4_remove_clid_dir(conf); expire_client(conf); @@ -2300,41 +2371,6 @@ void nfsd_break_deleg_cb(struct file_lock *fl) nfsd4_cb_recall(dp); } -/* - * The file_lock is being reapd. - * - * Called by locks_free_lock() with lock_flocks() held. - */ -static -void nfsd_release_deleg_cb(struct file_lock *fl) -{ - struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; - - dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count)); - - if (!(fl->fl_flags & FL_LEASE) || !dp) - return; - dp->dl_flock = NULL; -} - -/* - * Called from setlease() with lock_flocks() held - */ -static -int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try) -{ - struct nfs4_delegation *onlistd = - (struct nfs4_delegation *)onlist->fl_owner; - struct nfs4_delegation *tryd = - (struct nfs4_delegation *)try->fl_owner; - - if (onlist->fl_lmops != try->fl_lmops) - return 0; - - return onlistd->dl_client == tryd->dl_client; -} - - static int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) { @@ -2346,8 +2382,6 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) static const struct lock_manager_operations nfsd_lease_mng_ops = { .fl_break = nfsd_break_deleg_cb, - .fl_release_private = nfsd_release_deleg_cb, - .fl_mylease = nfsd_same_client_deleg_cb, .fl_change = nfsd_change_deleg_cb, }; @@ -2514,8 +2548,6 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file if (!fp->fi_fds[oflag]) { status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &fp->fi_fds[oflag]); - if (status == nfserr_dropit) - status = nfserr_jukebox; if (status) return status; } @@ -2596,6 +2628,19 @@ nfs4_set_claim_prev(struct nfsd4_open *open) open->op_stateowner->so_client->cl_firststate = 1; } +/* Should we give out recallable state?: */ +static bool nfsd4_cb_channel_good(struct nfs4_client *clp) +{ + if (clp->cl_cb_state == NFSD4_CB_UP) + return true; + /* + * In the sessions case, since we don't have to establish a + * separate connection for callbacks, we assume it's OK + * until we hear otherwise: + */ + return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; +} + /* * Attempt to hand out a delegation. */ @@ -2604,10 +2649,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta { struct nfs4_delegation *dp; struct nfs4_stateowner *sop = stp->st_stateowner; - int cb_up = atomic_read(&sop->so_client->cl_cb_set); + int cb_up; struct file_lock *fl; int status, flag = 0; + cb_up = nfsd4_cb_channel_good(sop->so_client); flag = NFS4_OPEN_DELEGATE_NONE; open->op_recall = 0; switch (open->op_claim_type) { @@ -2655,7 +2701,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta dp->dl_flock = fl; /* vfs_setlease checks to see if delegation should be handed out. - * the lock_manager callbacks fl_mylease and fl_change are used + * the lock_manager callback fl_change is used */ if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { dprintk("NFSD: setlease failed [%d], no delegation\n", status); @@ -2794,7 +2840,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, renew_client(clp); status = nfserr_cb_path_down; if (!list_empty(&clp->cl_delegations) - && !atomic_read(&clp->cl_cb_set)) + && clp->cl_cb_state != NFSD4_CB_UP) goto out; status = nfs_ok; out: @@ -3081,9 +3127,10 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, if (status) goto out; renew_client(dp->dl_client); - if (filpp) + if (filpp) { *filpp = find_readable_file(dp->dl_file); - BUG_ON(!*filpp); + BUG_ON(!*filpp); + } } else { /* open or lock stateid */ stp = find_stateid(stateid, flags); if (!stp) @@ -4107,7 +4154,7 @@ nfs4_has_reclaimed_state(const char *name, bool use_exchange_id) unsigned int strhashval = clientstr_hashval(name); struct nfs4_client *clp; - clp = find_confirmed_client_by_str(name, strhashval, use_exchange_id); + clp = find_confirmed_client_by_str(name, strhashval); return clp ? 1 : 0; } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index f35a94a04026..956629b9cdc9 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -44,13 +44,14 @@ #include #include #include -#include -#include #include +#include "idmap.h" +#include "acl.h" #include "xdr4.h" #include "vfs.h" + #define NFSDDBG_FACILITY NFSDDBG_XDR /* @@ -288,17 +289,17 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, len += XDR_QUADLEN(dummy32) << 2; READMEM(buf, dummy32); ace->whotype = nfs4_acl_get_whotype(buf, dummy32); - host_err = 0; + status = nfs_ok; if (ace->whotype != NFS4_ACL_WHO_NAMED) ace->who = 0; else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP) - host_err = nfsd_map_name_to_gid(argp->rqstp, + status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &ace->who); else - host_err = nfsd_map_name_to_uid(argp->rqstp, + status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &ace->who); - if (host_err) - goto out_nfserr; + if (status) + return status; } } else *acl = NULL; @@ -420,6 +421,21 @@ nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access DECODE_TAIL; } +static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts) +{ + DECODE_HEAD; + u32 dummy; + + READ_BUF(NFS4_MAX_SESSIONID_LEN + 8); + COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN); + READ32(bcts->dir); + /* XXX: Perhaps Tom Tucker could help us figure out how we + * should be using ctsa_use_conn_in_rdma_mode: */ + READ32(dummy); + + DECODE_TAIL; +} + static __be32 nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close) { @@ -846,6 +862,17 @@ nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp, DECODE_TAIL; } +static __be32 +nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp, + struct nfsd4_secinfo_no_name *sin) +{ + DECODE_HEAD; + + READ_BUF(4); + READ32(sin->sin_style); + DECODE_TAIL; +} + static __be32 nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr) { @@ -1005,7 +1032,7 @@ static __be32 nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp, struct nfsd4_exchange_id *exid) { - int dummy; + int dummy, tmp; DECODE_HEAD; READ_BUF(NFS4_VERIFIER_SIZE); @@ -1053,15 +1080,23 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp, /* ssp_hash_algs<> */ READ_BUF(4); - READ32(dummy); - READ_BUF(dummy); - p += XDR_QUADLEN(dummy); + READ32(tmp); + while (tmp--) { + READ_BUF(4); + READ32(dummy); + READ_BUF(dummy); + p += XDR_QUADLEN(dummy); + } /* ssp_encr_algs<> */ READ_BUF(4); - READ32(dummy); - READ_BUF(dummy); - p += XDR_QUADLEN(dummy); + READ32(tmp); + while (tmp--) { + READ_BUF(4); + READ32(dummy); + READ_BUF(dummy); + p += XDR_QUADLEN(dummy); + } /* ssp_window and ssp_num_gss_handles */ READ_BUF(8); @@ -1339,7 +1374,7 @@ static nfsd4_dec nfsd41_dec_ops[] = { /* new operations for NFSv4.1 */ [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_notsupp, - [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp, + [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session, [OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id, [OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session, [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session, @@ -1350,7 +1385,7 @@ static nfsd4_dec nfsd41_dec_ops[] = { [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp, - [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_notsupp, + [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence, [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp, @@ -2309,8 +2344,6 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, case nfserr_resource: nfserr = nfserr_toosmall; goto fail; - case nfserr_dropit: - goto fail; case nfserr_noent: goto skip_entry; default: @@ -2365,6 +2398,21 @@ nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_ return nfserr; } +static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts) +{ + __be32 *p; + + if (!nfserr) { + RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8); + WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN); + WRITE32(bcts->dir); + /* XXX: ? */ + WRITE32(0); + ADJUST_ARGS(); + } + return nfserr; +} + static __be32 nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close) { @@ -2826,11 +2874,10 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_ } static __be32 -nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr, - struct nfsd4_secinfo *secinfo) +nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp, + __be32 nfserr,struct svc_export *exp) { int i = 0; - struct svc_export *exp = secinfo->si_exp; u32 nflavs; struct exp_flavor_info *flavs; struct exp_flavor_info def_flavs[2]; @@ -2892,6 +2939,20 @@ out: return nfserr; } +static __be32 +nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_secinfo *secinfo) +{ + return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->si_exp); +} + +static __be32 +nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_secinfo_no_name *secinfo) +{ + return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->sin_exp); +} + /* * The SETATTR encode routine is special -- it always encodes a bitmap, * regardless of the error status. @@ -3076,13 +3137,9 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr, WRITE32(seq->seqid); WRITE32(seq->slotid); WRITE32(seq->maxslots); - /* - * FIXME: for now: - * target_maxslots = maxslots - * status_flags = 0 - */ + /* For now: target_maxslots = maxslots */ WRITE32(seq->maxslots); - WRITE32(0); + WRITE32(seq->status_flags); ADJUST_ARGS(); resp->cstate.datap = p; /* DRC cache data pointer */ @@ -3143,7 +3200,7 @@ static nfsd4_enc nfsd4_enc_ops[] = { /* NFSv4.1 operations */ [OP_BACKCHANNEL_CTL] = (nfsd4_enc)nfsd4_encode_noop, - [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_noop, + [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session, [OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id, [OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session, [OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_destroy_session, @@ -3154,7 +3211,7 @@ static nfsd4_enc nfsd4_enc_ops[] = { [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop, - [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_noop, + [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence, [OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop, [OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_noop, diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 4514ebbee4d6..33b3e2b06779 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -8,12 +8,12 @@ #include #include -#include #include #include #include #include +#include "idmap.h" #include "nfsd.h" #include "cache.h" @@ -127,6 +127,7 @@ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *bu static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) { +#ifdef CONFIG_NFSD_DEPRECATED static int warned; if (file->f_dentry->d_name.name[0] == '.' && !warned) { printk(KERN_INFO @@ -135,6 +136,7 @@ static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size current->comm, file->f_dentry->d_name.name); warned = 1; } +#endif if (! file->private_data) { /* An attempt to read a transaction file without writing * causes a 0-byte write so that the file can return diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 6b641cf2c19a..7ecfa2420307 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -158,6 +158,7 @@ void nfsd_lockd_shutdown(void); #define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) #define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) #define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) +#define nfserr_badowner cpu_to_be32(NFSERR_BADOWNER) #define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) #define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) #define nfserr_grace cpu_to_be32(NFSERR_GRACE) diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 08e17264784b..e15dc45fc5ec 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -735,9 +735,9 @@ nfserrno (int errno) { nfserr_stale, -ESTALE }, { nfserr_jukebox, -ETIMEDOUT }, { nfserr_jukebox, -ERESTARTSYS }, - { nfserr_dropit, -EAGAIN }, - { nfserr_dropit, -ENOMEM }, - { nfserr_badname, -ESRCH }, + { nfserr_jukebox, -EAGAIN }, + { nfserr_jukebox, -EWOULDBLOCK }, + { nfserr_jukebox, -ENOMEM }, { nfserr_io, -ETXTBSY }, { nfserr_notsupp, -EOPNOTSUPP }, { nfserr_toosmall, -ETOOSMALL }, diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 2bae1d86f5f2..18743c4d8bca 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -608,7 +608,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) /* Now call the procedure handler, and encode NFS status. */ nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); nfserr = map_new_errors(rqstp->rq_vers, nfserr); - if (nfserr == nfserr_dropit) { + if (nfserr == nfserr_dropit || rqstp->rq_dropme) { dprintk("nfsd: Dropping request; may be revisited later\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); return 0; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 39adc27b0685..3074656ba7bf 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -68,10 +68,12 @@ typedef struct { struct nfsd4_callback { void *cb_op; struct nfs4_client *cb_clp; + struct list_head cb_per_client; u32 cb_minorversion; struct rpc_message cb_msg; const struct rpc_call_ops *cb_ops; struct work_struct cb_work; + bool cb_done; }; struct nfs4_delegation { @@ -81,6 +83,7 @@ struct nfs4_delegation { atomic_t dl_count; /* ref count */ struct nfs4_client *dl_client; struct nfs4_file *dl_file; + struct file *dl_vfs_file; struct file_lock *dl_flock; u32 dl_type; time_t dl_time; @@ -95,6 +98,7 @@ struct nfs4_delegation { struct nfs4_cb_conn { /* SETCLIENTID info */ struct sockaddr_storage cb_addr; + struct sockaddr_storage cb_saddr; size_t cb_addrlen; u32 cb_prog; /* used only in 4.0 case; per-session otherwise */ @@ -146,6 +150,11 @@ struct nfsd4_create_session { u32 gid; }; +struct nfsd4_bind_conn_to_session { + struct nfs4_sessionid sessionid; + u32 dir; +}; + /* The single slot clientid cache structure */ struct nfsd4_clid_slot { u32 sl_seqid; @@ -235,9 +244,13 @@ struct nfs4_client { unsigned long cl_cb_flags; struct rpc_clnt *cl_cb_client; u32 cl_cb_ident; - atomic_t cl_cb_set; +#define NFSD4_CB_UP 0 +#define NFSD4_CB_UNKNOWN 1 +#define NFSD4_CB_DOWN 2 + int cl_cb_state; struct nfsd4_callback cl_cb_null; struct nfsd4_session *cl_cb_session; + struct list_head cl_callbacks; /* list of in-progress callbacks */ /* for all client information that callback code might need: */ spinlock_t cl_lock; @@ -454,6 +467,7 @@ extern __be32 nfs4_check_open_reclaim(clientid_t *clid); extern void nfs4_free_stateowner(struct kref *kref); extern int set_callback_cred(void); extern void nfsd4_probe_callback(struct nfs4_client *clp); +extern void nfsd4_probe_callback_sync(struct nfs4_client *clp); extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *); extern void nfsd4_do_callback_rpc(struct work_struct *); extern void nfsd4_cb_recall(struct nfs4_delegation *dp); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 3a359023c9f7..a3c7f701395a 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1,4 +1,3 @@ -#define MSNFS /* HACK HACK */ /* * File operations used by nfsd. Some of these have been ripped from * other parts of the kernel because they weren't exported, others @@ -35,8 +34,8 @@ #endif /* CONFIG_NFSD_V3 */ #ifdef CONFIG_NFSD_V4 -#include -#include +#include "acl.h" +#include "idmap.h" #endif /* CONFIG_NFSD_V4 */ #include "nfsd.h" @@ -273,6 +272,13 @@ out: return err; } +static int nfsd_break_lease(struct inode *inode) +{ + if (!S_ISREG(inode->i_mode)) + return 0; + return break_lease(inode, O_WRONLY | O_NONBLOCK); +} + /* * Commit metadata changes to stable storage. */ @@ -375,16 +381,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, goto out; } - /* - * If we are changing the size of the file, then - * we need to break all leases. - */ - host_err = break_lease(inode, O_WRONLY | O_NONBLOCK); - if (host_err == -EWOULDBLOCK) - host_err = -ETIMEDOUT; - if (host_err) /* ENOMEM or EWOULDBLOCK */ - goto out_nfserr; - host_err = get_write_access(inode); if (host_err) goto out_nfserr; @@ -425,7 +421,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, err = nfserr_notsync; if (!check_guard || guardtime == inode->i_ctime.tv_sec) { + host_err = nfsd_break_lease(inode); + if (host_err) + goto out_nfserr; fh_lock(fhp); + host_err = notify_change(dentry, iap); err = nfserrno(host_err); fh_unlock(fhp); @@ -752,8 +752,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, */ if (!(access & NFSD_MAY_NOT_BREAK_LEASE)) host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); - if (host_err == -EWOULDBLOCK) - host_err = -ETIMEDOUT; if (host_err) /* NOMEM or WOULDBLOCK */ goto out_nfserr; @@ -845,11 +843,6 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct page **pp = rqstp->rq_respages + rqstp->rq_resused; struct page *page = buf->page; size_t size; - int ret; - - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; size = sd->len; @@ -879,15 +872,6 @@ static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, return __splice_from_pipe(pipe, sd, nfsd_splice_actor); } -static inline int svc_msnfs(struct svc_fh *ffhp) -{ -#ifdef MSNFS - return (ffhp->fh_export->ex_flags & NFSEXP_MSNFS); -#else - return 0; -#endif -} - static __be32 nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) @@ -900,9 +884,6 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, err = nfserr_perm; inode = file->f_path.dentry->d_inode; - if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count)) - goto out; - if (file->f_op->splice_read && rqstp->rq_splice_ok) { struct splice_desc sd = { .len = 0, @@ -927,7 +908,6 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, fsnotify_access(file); } else err = nfserrno(host_err); -out: return err; } @@ -992,14 +972,6 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, int stable = *stablep; int use_wgather; -#ifdef MSNFS - err = nfserr_perm; - - if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) && - (!lock_may_write(file->f_path.dentry->d_inode, offset, *cnt))) - goto out; -#endif - dentry = file->f_path.dentry; inode = dentry->d_inode; exp = fhp->fh_export; @@ -1050,7 +1022,6 @@ out_nfserr: err = 0; else err = nfserrno(host_err); -out: return err; } @@ -1670,6 +1641,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, err = nfserrno(host_err); goto out_dput; } + err = nfserr_noent; + if (!dold->d_inode) + goto out_drop_write; + host_err = nfsd_break_lease(dold->d_inode); + if (host_err) + goto out_drop_write; host_err = vfs_link(dold, dirp, dnew); if (!host_err) { err = nfserrno(commit_metadata(ffhp)); @@ -1681,6 +1658,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, else err = nfserrno(host_err); } +out_drop_write: mnt_drop_write(tfhp->fh_export->ex_path.mnt); out_dput: dput(dnew); @@ -1755,12 +1733,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (ndentry == trap) goto out_dput_new; - if (svc_msnfs(ffhp) && - ((odentry->d_count > 1) || (ndentry->d_count > 1))) { - host_err = -EPERM; - goto out_dput_new; - } - host_err = -EXDEV; if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) goto out_dput_new; @@ -1768,15 +1740,17 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (host_err) goto out_dput_new; + host_err = nfsd_break_lease(odentry->d_inode); + if (host_err) + goto out_drop_write; host_err = vfs_rename(fdir, odentry, tdir, ndentry); if (!host_err) { host_err = commit_metadata(tfhp); if (!host_err) host_err = commit_metadata(ffhp); } - +out_drop_write: mnt_drop_write(ffhp->fh_export->ex_path.mnt); - out_dput_new: dput(ndentry); out_dput_old: @@ -1839,18 +1813,14 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (host_err) goto out_nfserr; - if (type != S_IFDIR) { /* It's UNLINK */ -#ifdef MSNFS - if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) && - (rdentry->d_count > 1)) { - host_err = -EPERM; - } else -#endif + host_err = nfsd_break_lease(rdentry->d_inode); + if (host_err) + goto out_put; + if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); - } else { /* It's RMDIR */ + else host_err = vfs_rmdir(dirp, rdentry); - } - +out_put: dput(rdentry); if (!host_err) diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 60fce3dc5cb5..366401e1a536 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -311,6 +311,11 @@ struct nfsd4_secinfo { struct svc_export *si_exp; /* response */ }; +struct nfsd4_secinfo_no_name { + u32 sin_style; /* request */ + struct svc_export *sin_exp; /* response */ +}; + struct nfsd4_setattr { stateid_t sa_stateid; /* request */ u32 sa_bmval[3]; /* request */ @@ -373,8 +378,8 @@ struct nfsd4_sequence { u32 cachethis; /* request */ #if 0 u32 target_maxslots; /* response */ - u32 status_flags; /* response */ #endif /* not yet */ + u32 status_flags; /* response */ }; struct nfsd4_destroy_session { @@ -422,6 +427,7 @@ struct nfsd4_op { /* NFSv4.1 */ struct nfsd4_exchange_id exchange_id; + struct nfsd4_bind_conn_to_session bind_conn_to_session; struct nfsd4_create_session create_session; struct nfsd4_destroy_session destroy_session; struct nfsd4_sequence sequence; @@ -518,6 +524,7 @@ extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, struct nfsd4_sequence *seq); extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_exchange_id *); +extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *); extern __be32 nfsd4_create_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_create_session *); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 70dfdd532b83..0994f6a76c07 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1163,14 +1163,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags, { struct nilfs_super_data sd; struct super_block *s; - fmode_t mode = FMODE_READ; + fmode_t mode = FMODE_READ | FMODE_EXCL; struct dentry *root_dentry; int err, s_new = false; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; - sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type); + sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(sd.bdev)) return ERR_CAST(sd.bdev); @@ -1249,7 +1249,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, } if (!s_new) - close_bdev_exclusive(sd.bdev, mode); + blkdev_put(sd.bdev, mode); return root_dentry; @@ -1258,7 +1258,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, failed: if (!s_new) - close_bdev_exclusive(sd.bdev, mode); + blkdev_put(sd.bdev, mode); return ERR_PTR(err); } diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig index 3ac36b7bf6b9..7dceff005a67 100644 --- a/fs/notify/fanotify/Kconfig +++ b/fs/notify/fanotify/Kconfig @@ -6,7 +6,7 @@ config FANOTIFY ---help--- Say Y here to enable fanotify suport. fanotify is a file access notification system which differs from inotify in that it sends - and open file descriptor to the userspace listener along with + an open file descriptor to the userspace listener along with the event. If unsure, say Y. diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index a6cc05302e9f..b108e863d8f6 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1729,7 +1729,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, goto out; reg->hr_bdev = I_BDEV(filp->f_mapping->host); - ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ); + ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL); if (ret) { reg->hr_bdev = NULL; goto out; diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 6adafa576065..5dbc3062b4fd 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -137,9 +137,7 @@ check_gen: } result = d_obtain_alias(inode); - if (!IS_ERR(result)) - d_set_d_op(result, &ocfs2_dentry_ops); - else + if (IS_ERR(result)) mlog_errno(PTR_ERR(result)); bail: @@ -175,8 +173,6 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) } parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); - if (!IS_ERR(parent)) - d_set_d_op(parent, &ocfs2_dentry_ops); bail_unlock: ocfs2_inode_unlock(dir, 0); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index bdadbae09094..63e3fca266e0 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1995,6 +1995,7 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_space_resv sr; int change_size = 1; + int cmd = OCFS2_IOC_RESVSP64; if (!ocfs2_writes_unwritten_extents(osb)) return -EOPNOTSUPP; @@ -2005,12 +2006,15 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, if (mode & FALLOC_FL_KEEP_SIZE) change_size = 0; + if (mode & FALLOC_FL_PUNCH_HOLE) + cmd = OCFS2_IOC_UNRESVSP64; + sr.l_whence = 0; sr.l_start = (s64)offset; sr.l_len = (s64)len; - return __ocfs2_change_file_space(NULL, inode, offset, - OCFS2_IOC_RESVSP64, &sr, change_size); + return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr, + change_size); } int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos, diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index f935fd6600dd..4068c6c4c6f6 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -434,7 +434,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, * #1 and #2 can be simply solved by never taking the lock * here for system files (which are the only type we read * during mount). It's a heavier approach, but our main - * concern is user-accesible files anyway. + * concern is user-accessible files anyway. * * #3 works itself out because we'll eventually take the * cluster lock before trusting anything anyway. diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 30c523144452..849fb4a2e814 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -147,7 +147,6 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, spin_unlock(&oi->ip_lock); bail_add: - d_set_d_op(dentry, &ocfs2_dentry_ops); ret = d_splice_alias(inode, dentry); if (inode) { @@ -415,7 +414,6 @@ static int ocfs2_mknod(struct inode *dir, mlog_errno(status); goto leave; } - d_set_d_op(dentry, &ocfs2_dentry_ops); status = ocfs2_add_entry(handle, dentry, inode, OCFS2_I(inode)->ip_blkno, parent_fe_bh, @@ -743,7 +741,6 @@ static int ocfs2_link(struct dentry *old_dentry, } ihold(inode); - d_set_d_op(dentry, &ocfs2_dentry_ops); d_instantiate(dentry, inode); out_commit: @@ -1797,7 +1794,6 @@ static int ocfs2_symlink(struct inode *dir, mlog_errno(status); goto bail; } - d_set_d_op(dentry, &ocfs2_dentry_ops); status = ocfs2_add_entry(handle, dentry, inode, le64_to_cpu(fe->i_blkno), parent_fe_bh, @@ -2462,7 +2458,6 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, goto out_commit; } - d_set_d_op(dentry, &ocfs2_dentry_ops); d_instantiate(dentry, inode); status = 0; out_commit: diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 5fed60de7630..71998d4d61d5 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -1916,7 +1916,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, if (res->sr_bg_blkno) { /* Attempt to short-circuit the usual search mechanism * by jumping straight to the most recently used - * allocation group. This helps us mantain some + * allocation group. This helps us maintain some * contiguousness across allocations. */ status = ocfs2_search_one_group(ac, handle, bits_wanted, min_bits, res, &bits_left); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 17ff46fa8a10..06d1f749ca89 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2097,6 +2097,7 @@ static int ocfs2_initialize_super(struct super_block *sb, sb->s_fs_info = osb; sb->s_op = &ocfs2_sops; + sb->s_d_op = &ocfs2_dentry_ops; sb->s_export_op = &ocfs2_export_ops; sb->s_qcop = &ocfs2_quotactl_ops; sb->dq_op = &ocfs2_quota_operations; diff --git a/fs/open.c b/fs/open.c index 4197b9ed023d..5b6ef7e2859e 100644 --- a/fs/open.c +++ b/fs/open.c @@ -223,7 +223,12 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) return -EINVAL; /* Return error if mode is not supported */ - if (mode && !(mode & FALLOC_FL_KEEP_SIZE)) + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + + /* Punch hole must have keep size set */ + if ((mode & FALLOC_FL_PUNCH_HOLE) && + !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; if (!(file->f_mode & FMODE_WRITE)) diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 0a8b0ad0c7e2..9c21119512b9 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -237,6 +237,13 @@ ssize_t part_size_show(struct device *dev, return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); } +ssize_t part_ro_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%d\n", p->policy ? 1 : 0); +} + ssize_t part_alignment_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -312,6 +319,7 @@ ssize_t part_fail_store(struct device *dev, static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show, NULL); @@ -326,6 +334,7 @@ static struct attribute *part_attrs[] = { &dev_attr_partition.attr, &dev_attr_start.attr, &dev_attr_size.attr, + &dev_attr_ro.attr, &dev_attr_alignment_offset.attr, &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, @@ -372,6 +381,11 @@ static void delete_partition_rcu_cb(struct rcu_head *head) put_device(part_to_dev(part)); } +void __delete_partition(struct hd_struct *part) +{ + call_rcu(&part->rcu_head, delete_partition_rcu_cb); +} + void delete_partition(struct gendisk *disk, int partno) { struct disk_part_tbl *ptbl = disk->part_tbl; @@ -390,7 +404,7 @@ void delete_partition(struct gendisk *disk, int partno) kobject_put(part->holder_dir); device_del(part_to_dev(part)); - call_rcu(&part->rcu_head, delete_partition_rcu_cb); + hd_struct_put(part); } static ssize_t whole_disk_show(struct device *dev, @@ -489,6 +503,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, if (!dev_get_uevent_suppress(ddev)) kobject_uevent(&pdev->kobj, KOBJ_ADD); + hd_ref_init(p); return p; out_free_info: @@ -507,65 +522,6 @@ out_put: return ERR_PTR(err); } -/* Not exported, helper to add_disk(). */ -void register_disk(struct gendisk *disk) -{ - struct device *ddev = disk_to_dev(disk); - struct block_device *bdev; - struct disk_part_iter piter; - struct hd_struct *part; - int err; - - ddev->parent = disk->driverfs_dev; - - dev_set_name(ddev, disk->disk_name); - - /* delay uevents, until we scanned partition table */ - dev_set_uevent_suppress(ddev, 1); - - if (device_add(ddev)) - return; - if (!sysfs_deprecated) { - err = sysfs_create_link(block_depr, &ddev->kobj, - kobject_name(&ddev->kobj)); - if (err) { - device_del(ddev); - return; - } - } - disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); - disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); - - /* No minors to use for partitions */ - if (!disk_partitionable(disk)) - goto exit; - - /* No such device (e.g., media were just removed) */ - if (!get_capacity(disk)) - goto exit; - - bdev = bdget_disk(disk, 0); - if (!bdev) - goto exit; - - bdev->bd_invalidated = 1; - err = blkdev_get(bdev, FMODE_READ); - if (err < 0) - goto exit; - blkdev_put(bdev, FMODE_READ); - -exit: - /* announce disk after possible partitions are created */ - dev_set_uevent_suppress(ddev, 0); - kobject_uevent(&ddev->kobj, KOBJ_ADD); - - /* announce possible partitions */ - disk_part_iter_init(&piter, disk, 0); - while ((part = disk_part_iter_next(&piter))) - kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); - disk_part_iter_exit(&piter); -} - static bool disk_unlock_native_capacity(struct gendisk *disk) { const struct block_device_operations *bdops = disk->fops; @@ -728,33 +684,3 @@ fail: } EXPORT_SYMBOL(read_dev_sector); - -void del_gendisk(struct gendisk *disk) -{ - struct disk_part_iter piter; - struct hd_struct *part; - - /* invalidate stuff */ - disk_part_iter_init(&piter, disk, - DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); - while ((part = disk_part_iter_next(&piter))) { - invalidate_partition(disk, part->partno); - delete_partition(disk, part->partno); - } - disk_part_iter_exit(&piter); - - invalidate_partition(disk, 0); - blk_free_devt(disk_to_dev(disk)->devt); - set_capacity(disk, 0); - disk->flags &= ~GENHD_FL_UP; - unlink_gendisk(disk); - part_stat_set_all(&disk->part0, 0); - disk->part0.stamp = 0; - - kobject_put(disk->part0.holder_dir); - kobject_put(disk->slave_dir); - disk->driverfs_dev = NULL; - if (!sysfs_deprecated) - sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); - device_del(disk_to_dev(disk)); -} diff --git a/fs/pipe.c b/fs/pipe.c index 68f1f8e4e23b..e2e95fb46a1e 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -441,7 +441,7 @@ redo: break; } if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); @@ -450,7 +450,7 @@ redo: /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) @@ -612,7 +612,7 @@ redo2: break; } if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } @@ -623,7 +623,7 @@ redo2: out: mutex_unlock(&inode->i_mutex); if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0) @@ -715,7 +715,7 @@ pipe_release(struct inode *inode, int decr, int decw) if (!pipe->readers && !pipe->writers) { free_pipe_info(inode); } else { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } @@ -1004,7 +1004,6 @@ struct file *create_write_pipe(int flags) goto err_inode; path.mnt = mntget(pipe_mnt); - d_set_d_op(path.dentry, &pipefs_dentry_operations); d_instantiate(path.dentry, inode); err = -ENFILE; @@ -1266,7 +1265,8 @@ static const struct super_operations pipefs_ops = { static struct dentry *pipefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "pipe:", &pipefs_ops, PIPEFS_MAGIC); + return mount_pseudo(fs_type, "pipe:", &pipefs_ops, + &pipefs_dentry_operations, PIPEFS_MAGIC); } static struct file_system_type pipe_fs_type = { diff --git a/fs/proc/Makefile b/fs/proc/Makefile index 288a49e098bf..df434c5f28fb 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile @@ -10,12 +10,12 @@ proc-$(CONFIG_MMU) := mmu.o task_mmu.o proc-y += inode.o root.o base.o generic.o array.o \ proc_tty.o proc-y += cmdline.o +proc-y += consoles.o proc-y += cpuinfo.o proc-y += devices.o proc-y += interrupts.o proc-y += loadavg.o proc-y += meminfo.o -proc-y += proc_console.o proc-y += stat.o proc-y += uptime.o proc-y += version.o diff --git a/fs/proc/array.c b/fs/proc/array.c index fff6572676ae..df2b703b9d0f 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -95,7 +95,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) get_task_comm(tcomm, p); - seq_printf(m, "Name:\t"); + seq_puts(m, "Name:\t"); end = m->buf + m->size; buf = m->buf + m->count; name = tcomm; @@ -122,7 +122,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) buf++; } m->count = buf - m->buf; - seq_printf(m, "\n"); + seq_putc(m, '\n'); } /* @@ -208,7 +208,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, seq_printf(m, "%d ", GROUP_AT(group_info, g)); put_cred(cred); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } static void render_sigset_t(struct seq_file *m, const char *header, @@ -216,7 +216,7 @@ static void render_sigset_t(struct seq_file *m, const char *header, { int i; - seq_printf(m, "%s", header); + seq_puts(m, header); i = _NSIG; do { @@ -230,7 +230,7 @@ static void render_sigset_t(struct seq_file *m, const char *header, seq_printf(m, "%x", x); } while (i >= 4); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, @@ -291,12 +291,12 @@ static void render_cap_t(struct seq_file *m, const char *header, { unsigned __capi; - seq_printf(m, "%s", header); + seq_puts(m, header); CAP_FOR_EACH_U32(__capi) { seq_printf(m, "%08x", a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); } static inline void task_cap(struct seq_file *m, struct task_struct *p) @@ -329,12 +329,12 @@ static inline void task_context_switch_counts(struct seq_file *m, static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) { - seq_printf(m, "Cpus_allowed:\t"); + seq_puts(m, "Cpus_allowed:\t"); seq_cpumask(m, &task->cpus_allowed); - seq_printf(m, "\n"); - seq_printf(m, "Cpus_allowed_list:\t"); + seq_putc(m, '\n'); + seq_puts(m, "Cpus_allowed_list:\t"); seq_cpumask_list(m, &task->cpus_allowed); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, @@ -535,15 +535,15 @@ int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { - int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0; + unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0; struct mm_struct *mm = get_task_mm(task); if (mm) { size = task_statm(mm, &shared, &text, &data, &resident); mmput(mm); } - seq_printf(m, "%d %d %d %d %d %d %d\n", - size, resident, shared, text, lib, data, 0); + seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", + size, resident, shared, text, data); return 0; } diff --git a/fs/proc/base.c b/fs/proc/base.c index b20962c71a52..9d096e82b201 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -373,26 +373,20 @@ static int lstats_show_proc(struct seq_file *m, void *v) return -ESRCH; seq_puts(m, "Latency Top version : v0.1\n"); for (i = 0; i < 32; i++) { - if (task->latency_record[i].backtrace[0]) { + struct latency_record *lr = &task->latency_record[i]; + if (lr->backtrace[0]) { int q; - seq_printf(m, "%i %li %li ", - task->latency_record[i].count, - task->latency_record[i].time, - task->latency_record[i].max); + seq_printf(m, "%i %li %li", + lr->count, lr->time, lr->max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_SYMBOL_LEN]; - char *c; - if (!task->latency_record[i].backtrace[q]) + unsigned long bt = lr->backtrace[q]; + if (!bt) break; - if (task->latency_record[i].backtrace[q] == ULONG_MAX) + if (bt == ULONG_MAX) break; - sprint_symbol(sym, task->latency_record[i].backtrace[q]); - c = strchr(sym, '+'); - if (c) - *c = 0; - seq_printf(m, "%s ", sym); + seq_printf(m, " %ps", (void *)bt); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); } } @@ -751,14 +745,7 @@ static int proc_single_show(struct seq_file *m, void *v) static int proc_single_open(struct inode *inode, struct file *filp) { - int ret; - ret = single_open(filp, proc_single_show, NULL); - if (!ret) { - struct seq_file *m = filp->private_data; - - m->private = inode; - } - return ret; + return single_open(filp, proc_single_show, inode); } static const struct file_operations proc_single_file_operations = { @@ -1164,7 +1151,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, goto err_task_lock; } - if (oom_score_adj < task->signal->oom_score_adj && + if (oom_score_adj < task->signal->oom_score_adj_min && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; goto err_sighand; @@ -1177,6 +1164,8 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, atomic_dec(&task->mm->oom_disable_count); } task->signal->oom_score_adj = oom_score_adj; + if (has_capability_noaudit(current, CAP_SYS_RESOURCE)) + task->signal->oom_score_adj_min = oom_score_adj; /* * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is * always attainable. @@ -1386,15 +1375,7 @@ sched_write(struct file *file, const char __user *buf, static int sched_open(struct inode *inode, struct file *filp) { - int ret; - - ret = single_open(filp, sched_show, NULL); - if (!ret) { - struct seq_file *m = filp->private_data; - - m->private = inode; - } - return ret; + return single_open(filp, sched_show, inode); } static const struct file_operations proc_pid_sched_operations = { @@ -1530,15 +1511,7 @@ static int comm_show(struct seq_file *m, void *v) static int comm_open(struct inode *inode, struct file *filp) { - int ret; - - ret = single_open(filp, comm_show, NULL); - if (!ret) { - struct seq_file *m = filp->private_data; - - m->private = inode; - } - return ret; + return single_open(filp, comm_show, inode); } static const struct file_operations proc_pid_set_comm_operations = { diff --git a/fs/proc/proc_console.c b/fs/proc/consoles.c similarity index 96% rename from fs/proc/proc_console.c rename to fs/proc/consoles.c index 8a707609f528..eafc22ab1fdd 100644 --- a/fs/proc/proc_console.c +++ b/fs/proc/consoles.c @@ -106,9 +106,9 @@ static const struct file_operations proc_consoles_operations = { .release = seq_release, }; -static int register_proc_consoles(void) +static int __init proc_consoles_init(void) { proc_create("consoles", 0, NULL, &proc_consoles_operations); return 0; } -module_init(register_proc_consoles); +module_init(proc_consoles_init); diff --git a/fs/proc/devices.c b/fs/proc/devices.c index 59ee7da959c9..b14347167c35 100644 --- a/fs/proc/devices.c +++ b/fs/proc/devices.c @@ -9,14 +9,14 @@ static int devinfo_show(struct seq_file *f, void *v) if (i < CHRDEV_MAJOR_HASH_SIZE) { if (i == 0) - seq_printf(f, "Character devices:\n"); + seq_puts(f, "Character devices:\n"); chrdev_show(f, i); } #ifdef CONFIG_BLOCK else { i -= CHRDEV_MAJOR_HASH_SIZE; if (i == 0) - seq_printf(f, "\nBlock devices:\n"); + seq_puts(f, "\nBlock devices:\n"); blkdev_show(f, i); } #endif diff --git a/fs/proc/generic.c b/fs/proc/generic.c index f766be29d2c7..01e07f2a188f 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -425,13 +425,10 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, if (de->namelen != dentry->d_name.len) continue; if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { - unsigned int ino; - - ino = de->low_ino; pde_get(de); spin_unlock(&proc_subdir_lock); error = -EINVAL; - inode = proc_get_inode(dir->i_sb, ino, de); + inode = proc_get_inode(dir->i_sb, de); goto out_unlock; } } @@ -768,12 +765,7 @@ EXPORT_SYMBOL(proc_create_data); static void free_proc_entry(struct proc_dir_entry *de) { - unsigned int ino = de->low_ino; - - if (ino < PROC_DYNAMIC_FIRST) - return; - - release_inode_number(ino); + release_inode_number(de->low_ino); if (S_ISLNK(de->mode)) kfree(de->data); @@ -834,12 +826,9 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) wait_for_completion(de->pde_unload_completion); - goto continue_removing; + spin_lock(&de->pde_unload_lock); } - spin_unlock(&de->pde_unload_lock); -continue_removing: - spin_lock(&de->pde_unload_lock); while (!list_empty(&de->pde_openers)) { struct pde_opener *pdeo; diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 6bcb926b101b..176ce4cda68a 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -416,12 +416,11 @@ static const struct file_operations proc_reg_file_ops_no_compat = { }; #endif -struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, - struct proc_dir_entry *de) +struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { struct inode * inode; - inode = iget_locked(sb, ino); + inode = iget_locked(sb, de->low_ino); if (!inode) return NULL; if (inode->i_state & I_NEW) { @@ -471,7 +470,7 @@ int proc_fill_super(struct super_block *s) s->s_time_gran = 1; pde_get(&proc_root); - root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); + root_inode = proc_get_inode(s, &proc_root); if (!root_inode) goto out_no_root; root_inode->i_uid = 0; diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 1f24a3eddd12..9ad561ded409 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -96,7 +96,8 @@ extern spinlock_t proc_subdir_lock; struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); unsigned long task_vsize(struct mm_struct *); -int task_statm(struct mm_struct *, int *, int *, int *, int *); +unsigned long task_statm(struct mm_struct *, + unsigned long *, unsigned long *, unsigned long *, unsigned long *); void task_mem(struct seq_file *, struct mm_struct *); static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) @@ -108,7 +109,7 @@ void pde_put(struct proc_dir_entry *pde); extern struct vfsmount *proc_mnt; int proc_fill_super(struct super_block *); -struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *); +struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *); /* * These are generic /proc routines that use the internal diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 6f37c391468d..d245cb23dd72 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -558,7 +558,7 @@ static int open_kcore(struct inode *inode, struct file *filp) static const struct file_operations proc_kcore_operations = { .read = read_kcore, .open = open_kcore, - .llseek = generic_file_llseek, + .llseek = default_llseek, }; #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index a65239cfd97e..ed257d141568 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -100,6 +100,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) "VmallocChunk: %8lu kB\n" #ifdef CONFIG_MEMORY_FAILURE "HardwareCorrupted: %5lu kB\n" +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + "AnonHugePages: %8lu kB\n" #endif , K(i.totalram), @@ -128,7 +131,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v) K(i.freeswap), K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_WRITEBACK)), - K(global_page_state(NR_ANON_PAGES)), + K(global_page_state(NR_ANON_PAGES) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * + HPAGE_PMD_NR +#endif + ), K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SLAB_RECLAIMABLE) + @@ -150,6 +158,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) vmi.largest_chunk >> 10 #ifdef CONFIG_MEMORY_FAILURE ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * + HPAGE_PMD_NR) #endif ); diff --git a/fs/proc/page.c b/fs/proc/page.c index 3b8b45660331..6d8e6a9e93ab 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -40,7 +40,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, ppage = pfn_to_page(pfn); else ppage = NULL; - if (!ppage) + if (!ppage || PageSlab(ppage)) pcount = 0; else pcount = page_mapcount(ppage); @@ -116,15 +116,17 @@ u64 stable_page_flags(struct page *page) if (PageHuge(page)) u |= 1 << KPF_HUGE; - u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); - /* - * Caveats on high order pages: - * PG_buddy will only be set on the head page; SLUB/SLQB do the same - * for PG_slab; SLOB won't set PG_slab at all on compound pages. + * Caveats on high order pages: page->_count will only be set + * -1 on the head page; SLUB/SLQB do the same for PG_slab; + * SLOB won't set PG_slab at all on compound pages. */ + if (PageBuddy(page)) + u |= 1 << KPF_BUDDY; + + u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); + u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); - u |= kpf_copy_bit(k, KPF_BUDDY, PG_buddy); u |= kpf_copy_bit(k, KPF_ERROR, PG_error); u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c index 83adcc869437..cb761f010300 100644 --- a/fs/proc/proc_tty.c +++ b/fs/proc/proc_tty.c @@ -36,27 +36,27 @@ static void show_tty_range(struct seq_file *m, struct tty_driver *p, } switch (p->type) { case TTY_DRIVER_TYPE_SYSTEM: - seq_printf(m, "system"); + seq_puts(m, "system"); if (p->subtype == SYSTEM_TYPE_TTY) - seq_printf(m, ":/dev/tty"); + seq_puts(m, ":/dev/tty"); else if (p->subtype == SYSTEM_TYPE_SYSCONS) - seq_printf(m, ":console"); + seq_puts(m, ":console"); else if (p->subtype == SYSTEM_TYPE_CONSOLE) - seq_printf(m, ":vtmaster"); + seq_puts(m, ":vtmaster"); break; case TTY_DRIVER_TYPE_CONSOLE: - seq_printf(m, "console"); + seq_puts(m, "console"); break; case TTY_DRIVER_TYPE_SERIAL: - seq_printf(m, "serial"); + seq_puts(m, "serial"); break; case TTY_DRIVER_TYPE_PTY: if (p->subtype == PTY_TYPE_MASTER) - seq_printf(m, "pty:master"); + seq_puts(m, "pty:master"); else if (p->subtype == PTY_TYPE_SLAVE) - seq_printf(m, "pty:slave"); + seq_puts(m, "pty:slave"); else - seq_printf(m, "pty"); + seq_puts(m, "pty"); break; default: seq_printf(m, "type:%d.%d", p->type, p->subtype); @@ -74,19 +74,19 @@ static int show_tty_driver(struct seq_file *m, void *v) /* pseudo-drivers first */ seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0); - seq_printf(m, "system:/dev/tty\n"); + seq_puts(m, "system:/dev/tty\n"); seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1); - seq_printf(m, "system:console\n"); + seq_puts(m, "system:console\n"); #ifdef CONFIG_UNIX98_PTYS seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2); - seq_printf(m, "system\n"); + seq_puts(m, "system\n"); #endif #ifdef CONFIG_VT seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0"); seq_printf(m, "%3d %7d ", TTY_MAJOR, 0); - seq_printf(m, "system:vtmaster\n"); + seq_puts(m, "system:vtmaster\n"); #endif } diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c index 37994737c983..62604be9f58d 100644 --- a/fs/proc/softirqs.c +++ b/fs/proc/softirqs.c @@ -10,16 +10,16 @@ static int show_softirqs(struct seq_file *p, void *v) { int i, j; - seq_printf(p, " "); + seq_puts(p, " "); for_each_possible_cpu(i) seq_printf(p, "CPU%-8d", i); - seq_printf(p, "\n"); + seq_putc(p, '\n'); for (i = 0; i < NR_SOFTIRQS; i++) { seq_printf(p, "%12s:", softirq_to_name[i]); for_each_possible_cpu(j) seq_printf(p, " %10u", kstat_softirqs_cpu(i, j)); - seq_printf(p, "\n"); + seq_putc(p, '\n'); } return 0; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index e15a19c93bae..1cffa2b8a2fc 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -126,7 +126,7 @@ static int show_stat(struct seq_file *p, void *v) for (i = 0; i < NR_SOFTIRQS; i++) seq_printf(p, " %u", per_softirq_sums[i]); - seq_printf(p, "\n"); + seq_putc(p, '\n'); return 0; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c126c83b9a45..60b914860f81 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -66,8 +66,9 @@ unsigned long task_vsize(struct mm_struct *mm) return PAGE_SIZE * mm->total_vm; } -int task_statm(struct mm_struct *mm, int *shared, int *text, - int *data, int *resident) +unsigned long task_statm(struct mm_struct *mm, + unsigned long *shared, unsigned long *text, + unsigned long *data, unsigned long *resident) { *shared = get_mm_counter(mm, MM_FILEPAGES); *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) @@ -417,7 +418,8 @@ static int show_smap(struct seq_file *m, void *v) "Anonymous: %8lu kB\n" "Swap: %8lu kB\n" "KernelPageSize: %8lu kB\n" - "MMUPageSize: %8lu kB\n", + "MMUPageSize: %8lu kB\n" + "Locked: %8lu kB\n", (vma->vm_end - vma->vm_start) >> 10, mss.resident >> 10, (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), @@ -429,7 +431,9 @@ static int show_smap(struct seq_file *m, void *v) mss.anonymous >> 10, mss.swap >> 10, vma_kernel_pagesize(vma) >> 10, - vma_mmu_pagesize(vma) >> 10); + vma_mmu_pagesize(vma) >> 10, + (vma->vm_flags & VM_LOCKED) ? + (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index cb6306e63843..b535d3e5d5f1 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -92,13 +92,14 @@ unsigned long task_vsize(struct mm_struct *mm) return vsize; } -int task_statm(struct mm_struct *mm, int *shared, int *text, - int *data, int *resident) +unsigned long task_statm(struct mm_struct *mm, + unsigned long *shared, unsigned long *text, + unsigned long *data, unsigned long *resident) { struct vm_area_struct *vma; struct vm_region *region; struct rb_node *p; - int size = kobjsize(mm); + unsigned long size = kobjsize(mm); down_read(&mm->mmap_sem); for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { diff --git a/fs/read_write.c b/fs/read_write.c index 5d431bacbea9..5520f8ad5504 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -30,18 +30,9 @@ const struct file_operations generic_ro_fops = { EXPORT_SYMBOL(generic_ro_fops); -static int -__negative_fpos_check(struct file *file, loff_t pos, size_t count) +static inline int unsigned_offsets(struct file *file) { - /* - * pos or pos+count is negative here, check overflow. - * too big "count" will be caught in rw_verify_area(). - */ - if ((pos < 0) && (pos + count < pos)) - return -EOVERFLOW; - if (file->f_mode & FMODE_UNSIGNED_OFFSET) - return 0; - return -EINVAL; + return file->f_mode & FMODE_UNSIGNED_OFFSET; } /** @@ -75,7 +66,7 @@ generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin) break; } - if (offset < 0 && __negative_fpos_check(file, offset, 0)) + if (offset < 0 && !unsigned_offsets(file)) return -EINVAL; if (offset > inode->i_sb->s_maxbytes) return -EINVAL; @@ -152,7 +143,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin) offset += file->f_pos; } retval = -EINVAL; - if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) { + if (offset >= 0 || unsigned_offsets(file)) { if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; @@ -252,9 +243,13 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count if (unlikely((ssize_t) count < 0)) return retval; pos = *ppos; - if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) { - retval = __negative_fpos_check(file, pos, count); - if (retval) + if (unlikely(pos < 0)) { + if (!unsigned_offsets(file)) + return retval; + if (count >= -pos) /* both values are in 0..LLONG_MAX */ + return -EOVERFLOW; + } else if (unlikely((loff_t) (pos + count) < 0)) { + if (!unsigned_offsets(file)) return retval; } diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index d31bce1a9f90..3eea859e6990 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2551,8 +2551,6 @@ static int release_journal_dev(struct super_block *super, result = 0; if (journal->j_dev_bd != NULL) { - if (journal->j_dev_bd->bd_dev != super->s_dev) - bd_release(journal->j_dev_bd); result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode); journal->j_dev_bd = NULL; } @@ -2570,7 +2568,7 @@ static int journal_init_dev(struct super_block *super, { int result; dev_t jdev; - fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE; + fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; char b[BDEVNAME_SIZE]; result = 0; @@ -2584,7 +2582,10 @@ static int journal_init_dev(struct super_block *super, /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { - journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode); + if (jdev == super->s_dev) + blkdev_mode &= ~FMODE_EXCL; + journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, + journal); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); @@ -2593,22 +2594,14 @@ static int journal_init_dev(struct super_block *super, "cannot init journal device '%s': %i", __bdevname(jdev, b), result); return result; - } else if (jdev != super->s_dev) { - result = bd_claim(journal->j_dev_bd, journal); - if (result) { - blkdev_put(journal->j_dev_bd, blkdev_mode); - return result; - } - + } else if (jdev != super->s_dev) set_blocksize(journal->j_dev_bd, super->s_blocksize); - } return 0; } journal->j_dev_mode = blkdev_mode; - journal->j_dev_bd = open_bdev_exclusive(jdev_name, - blkdev_mode, journal); + journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index adbc6f538515..45de98b59466 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -586,13 +586,13 @@ void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int l va_list args; int mode, first, last; - va_start(args, bh); - if (!bh) { printk("print_block: buffer is NULL\n"); return; } + va_start(args, bh); + mode = va_arg(args, int); first = va_arg(args, int); last = va_arg(args, int); diff --git a/fs/select.c b/fs/select.c index b7b10aa30861..e56560d2b08a 100644 --- a/fs/select.c +++ b/fs/select.c @@ -306,6 +306,8 @@ static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, rts.tv_sec = rts.tv_nsec = 0; if (timeval) { + if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) + memset(&rtv, 0, sizeof(rtv)); rtv.tv_sec = rts.tv_sec; rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; diff --git a/fs/splice.c b/fs/splice.c index ce2f02579e35..50a5d978da16 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -682,19 +682,14 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe, { struct file *file = sd->u.file; loff_t pos = sd->pos; - int ret, more; - - ret = buf->ops->confirm(pipe, buf); - if (!ret) { - more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; - if (file->f_op && file->f_op->sendpage) - ret = file->f_op->sendpage(file, buf->page, buf->offset, - sd->len, &pos, more); - else - ret = -EINVAL; - } + int more; - return ret; + if (!likely(file->f_op && file->f_op->sendpage)) + return -EINVAL; + + more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; + return file->f_op->sendpage(file, buf->page, buf->offset, + sd->len, &pos, more); } /* @@ -727,13 +722,6 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, void *fsdata; int ret; - /* - * make sure the data in this buffer is uptodate - */ - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; - offset = sd->pos & ~PAGE_CACHE_MASK; this_len = sd->len; @@ -805,12 +793,17 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, if (sd->len > sd->total_len) sd->len = sd->total_len; - ret = actor(pipe, buf, sd); - if (ret <= 0) { + ret = buf->ops->confirm(pipe, buf); + if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } + + ret = actor(pipe, buf, sd); + if (ret <= 0) + return ret; + buf->offset += ret; buf->len -= ret; @@ -1044,10 +1037,6 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, int ret; void *data; - ret = buf->ops->confirm(pipe, buf); - if (ret) - return ret; - data = buf->ops->map(pipe, buf, 0); ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); buf->ops->unmap(pipe, buf, data); @@ -1495,10 +1484,6 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, char *src; int ret; - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; - /* * See if we can use the atomic maps, by prefaulting in the * pages and doing an atomic copy diff --git a/fs/super.c b/fs/super.c index 823e061faa87..4f6a3571a634 100644 --- a/fs/super.c +++ b/fs/super.c @@ -767,13 +767,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, { struct block_device *bdev; struct super_block *s; - fmode_t mode = FMODE_READ; + fmode_t mode = FMODE_READ | FMODE_EXCL; int error = 0; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; - bdev = open_bdev_exclusive(dev_name, mode, fs_type); + bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(bdev)) return ERR_CAST(bdev); @@ -802,13 +802,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, /* * s_umount nests inside bd_mutex during - * __invalidate_device(). close_bdev_exclusive() - * acquires bd_mutex and can't be called under - * s_umount. Drop s_umount temporarily. This is safe - * as we're holding an active reference. + * __invalidate_device(). blkdev_put() acquires + * bd_mutex and can't be called under s_umount. Drop + * s_umount temporarily. This is safe as we're + * holding an active reference. */ up_write(&s->s_umount); - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode); down_write(&s->s_umount); } else { char b[BDEVNAME_SIZE]; @@ -832,7 +832,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, error_s: error = PTR_ERR(s); error_bdev: - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode); error: return ERR_PTR(error); } @@ -863,7 +863,8 @@ void kill_block_super(struct super_block *sb) bdev->bd_super = NULL; generic_shutdown_super(sb); sync_blockdev(bdev); - close_bdev_exclusive(bdev, mode); + WARN_ON_ONCE(!(mode & FMODE_EXCL)); + blkdev_put(bdev, mode | FMODE_EXCL); } EXPORT_SYMBOL(kill_block_super); diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index b5e68da2db32..b427b1208c26 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -48,7 +48,6 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, st struct inode * inode = NULL; ino_t ino; - d_set_d_op(dentry, dir->i_sb->s_root->d_op); if (dentry->d_name.len > SYSV_NAMELEN) return ERR_PTR(-ENAMETOOLONG); ino = sysv_inode_by_name(dentry); diff --git a/fs/sysv/super.c b/fs/sysv/super.c index 76712aefc4ab..f60c196913ea 100644 --- a/fs/sysv/super.c +++ b/fs/sysv/super.c @@ -332,6 +332,10 @@ static int complete_read_super(struct super_block *sb, int silent, int size) sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type; /* set up enough so that it can read an inode */ sb->s_op = &sysv_sops; + if (sbi->s_forced_ro) + sb->s_flags |= MS_RDONLY; + if (sbi->s_truncate) + sb->s_d_op = &sysv_dentry_operations; root_inode = sysv_iget(sb, SYSV_ROOT_INO); if (IS_ERR(root_inode)) { printk("SysV FS: get root inode failed\n"); @@ -343,10 +347,6 @@ static int complete_read_super(struct super_block *sb, int silent, int size) printk("SysV FS: get root dentry failed\n"); return 0; } - if (sbi->s_forced_ro) - sb->s_flags |= MS_RDONLY; - if (sbi->s_truncate) - d_set_d_op(sb->s_root, &sysv_dentry_operations); return 1; } diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 0dce969d6cad..faca44997099 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -98,6 +98,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ kmem.o \ xfs_aops.o \ xfs_buf.o \ + xfs_discard.o \ xfs_export.o \ xfs_file.o \ xfs_fs_subr.o \ diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 92f1f2acc6ab..ac1c7e8378dd 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -896,7 +896,6 @@ xfs_buf_rele( trace_xfs_buf_rele(bp, _RET_IP_); if (!pag) { - ASSERT(!bp->b_relse); ASSERT(list_empty(&bp->b_lru)); ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); if (atomic_dec_and_test(&bp->b_hold)) @@ -908,11 +907,7 @@ xfs_buf_rele( ASSERT(atomic_read(&bp->b_hold) > 0); if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { - if (bp->b_relse) { - atomic_inc(&bp->b_hold); - spin_unlock(&pag->pag_buf_lock); - bp->b_relse(bp); - } else if (!(bp->b_flags & XBF_STALE) && + if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { xfs_buf_lru_add(bp); spin_unlock(&pag->pag_buf_lock); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index a76c2428faff..cbe65950e524 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -152,8 +152,6 @@ typedef struct xfs_buftarg { struct xfs_buf; typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); -typedef void (*xfs_buf_relse_t)(struct xfs_buf *); -typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *); #define XB_PAGES 2 @@ -183,7 +181,6 @@ typedef struct xfs_buf { void *b_addr; /* virtual address of buffer */ struct work_struct b_iodone_work; xfs_buf_iodone_t b_iodone; /* I/O completion function */ - xfs_buf_relse_t b_relse; /* releasing function */ struct completion b_iowait; /* queue for I/O waiters */ void *b_fspriv; void *b_fspriv2; @@ -323,7 +320,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2) #define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val)) #define XFS_BUF_SET_START(bp) do { } while (0) -#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func)) #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) #define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt) @@ -360,8 +356,7 @@ xfs_buf_set_ref( static inline void xfs_buf_relse(xfs_buf_t *bp) { - if (!bp->b_relse) - xfs_buf_unlock(bp); + xfs_buf_unlock(bp); xfs_buf_rele(bp); } diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c new file mode 100644 index 000000000000..05201ae719e5 --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_discard.c @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2010 Red Hat, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_sb.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_ag.h" +#include "xfs_mount.h" +#include "xfs_quota.h" +#include "xfs_trans.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_error.h" +#include "xfs_discard.h" +#include "xfs_trace.h" + +STATIC int +xfs_trim_extents( + struct xfs_mount *mp, + xfs_agnumber_t agno, + xfs_fsblock_t start, + xfs_fsblock_t len, + xfs_fsblock_t minlen, + __uint64_t *blocks_trimmed) +{ + struct block_device *bdev = mp->m_ddev_targp->bt_bdev; + struct xfs_btree_cur *cur; + struct xfs_buf *agbp; + struct xfs_perag *pag; + int error; + int i; + + pag = xfs_perag_get(mp, agno); + + error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); + if (error || !agbp) + goto out_put_perag; + + cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); + + /* + * Force out the log. This means any transactions that might have freed + * space before we took the AGF buffer lock are now on disk, and the + * volatile disk cache is flushed. + */ + xfs_log_force(mp, XFS_LOG_SYNC); + + /* + * Look up the longest btree in the AGF and start with it. + */ + error = xfs_alloc_lookup_le(cur, 0, + XFS_BUF_TO_AGF(agbp)->agf_longest, &i); + if (error) + goto out_del_cursor; + + /* + * Loop until we are done with all extents that are large + * enough to be worth discarding. + */ + while (i) { + xfs_agblock_t fbno; + xfs_extlen_t flen; + + error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); + if (error) + goto out_del_cursor; + XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); + ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest); + + /* + * Too small? Give up. + */ + if (flen < minlen) { + trace_xfs_discard_toosmall(mp, agno, fbno, flen); + goto out_del_cursor; + } + + /* + * If the extent is entirely outside of the range we are + * supposed to discard skip it. Do not bother to trim + * down partially overlapping ranges for now. + */ + if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start || + XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) { + trace_xfs_discard_exclude(mp, agno, fbno, flen); + goto next_extent; + } + + /* + * If any blocks in the range are still busy, skip the + * discard and try again the next time. + */ + if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { + trace_xfs_discard_busy(mp, agno, fbno, flen); + goto next_extent; + } + + trace_xfs_discard_extent(mp, agno, fbno, flen); + error = -blkdev_issue_discard(bdev, + XFS_AGB_TO_DADDR(mp, agno, fbno), + XFS_FSB_TO_BB(mp, flen), + GFP_NOFS, 0); + if (error) + goto out_del_cursor; + *blocks_trimmed += flen; + +next_extent: + error = xfs_btree_decrement(cur, 0, &i); + if (error) + goto out_del_cursor; + } + +out_del_cursor: + xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + xfs_buf_relse(agbp); +out_put_perag: + xfs_perag_put(pag); + return error; +} + +int +xfs_ioc_trim( + struct xfs_mount *mp, + struct fstrim_range __user *urange) +{ + struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; + unsigned int granularity = q->limits.discard_granularity; + struct fstrim_range range; + xfs_fsblock_t start, len, minlen; + xfs_agnumber_t start_agno, end_agno, agno; + __uint64_t blocks_trimmed = 0; + int error, last_error = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&range, urange, sizeof(range))) + return -XFS_ERROR(EFAULT); + + /* + * Truncating down the len isn't actually quite correct, but using + * XFS_B_TO_FSB would mean we trivially get overflows for values + * of ULLONG_MAX or slightly lower. And ULLONG_MAX is the default + * used by the fstrim application. In the end it really doesn't + * matter as trimming blocks is an advisory interface. + */ + start = XFS_B_TO_FSBT(mp, range.start); + len = XFS_B_TO_FSBT(mp, range.len); + minlen = XFS_B_TO_FSB(mp, max_t(u64, granularity, range.minlen)); + + start_agno = XFS_FSB_TO_AGNO(mp, start); + if (start_agno >= mp->m_sb.sb_agcount) + return -XFS_ERROR(EINVAL); + + end_agno = XFS_FSB_TO_AGNO(mp, start + len); + if (end_agno >= mp->m_sb.sb_agcount) + end_agno = mp->m_sb.sb_agcount - 1; + + for (agno = start_agno; agno <= end_agno; agno++) { + error = -xfs_trim_extents(mp, agno, start, len, minlen, + &blocks_trimmed); + if (error) + last_error = error; + } + + if (last_error) + return last_error; + + range.len = XFS_FSB_TO_B(mp, blocks_trimmed); + if (copy_to_user(urange, &range, sizeof(range))) + return -XFS_ERROR(EFAULT); + return 0; +} diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h new file mode 100644 index 000000000000..e82b6dd3e127 --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_discard.h @@ -0,0 +1,8 @@ +#ifndef XFS_DISCARD_H +#define XFS_DISCARD_H 1 + +struct fstrim_range; + +extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); + +#endif /* XFS_DISCARD_H */ diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index ba8ad422a165..ef51eb43e137 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -40,6 +40,40 @@ static const struct vm_operations_struct xfs_file_vm_ops; +/* + * Locking primitives for read and write IO paths to ensure we consistently use + * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. + */ +static inline void +xfs_rw_ilock( + struct xfs_inode *ip, + int type) +{ + if (type & XFS_IOLOCK_EXCL) + mutex_lock(&VFS_I(ip)->i_mutex); + xfs_ilock(ip, type); +} + +static inline void +xfs_rw_iunlock( + struct xfs_inode *ip, + int type) +{ + xfs_iunlock(ip, type); + if (type & XFS_IOLOCK_EXCL) + mutex_unlock(&VFS_I(ip)->i_mutex); +} + +static inline void +xfs_rw_ilock_demote( + struct xfs_inode *ip, + int type) +{ + xfs_ilock_demote(ip, type); + if (type & XFS_IOLOCK_EXCL) + mutex_unlock(&VFS_I(ip)->i_mutex); +} + /* * xfs_iozero * @@ -262,22 +296,21 @@ xfs_file_aio_read( if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; - if (unlikely(ioflags & IO_ISDIRECT)) - mutex_lock(&inode->i_mutex); - xfs_ilock(ip, XFS_IOLOCK_SHARED); - if (unlikely(ioflags & IO_ISDIRECT)) { + xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); + if (inode->i_mapping->nrpages) { ret = -xfs_flushinval_pages(ip, (iocb->ki_pos & PAGE_CACHE_MASK), -1, FI_REMAPF_LOCKED); + if (ret) { + xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); + return ret; + } } - mutex_unlock(&inode->i_mutex); - if (ret) { - xfs_iunlock(ip, XFS_IOLOCK_SHARED); - return ret; - } - } + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); + } else + xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); @@ -285,7 +318,7 @@ xfs_file_aio_read( if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); - xfs_iunlock(ip, XFS_IOLOCK_SHARED); + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); return ret; } @@ -309,7 +342,7 @@ xfs_file_splice_read( if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; - xfs_ilock(ip, XFS_IOLOCK_SHARED); + xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); trace_xfs_file_splice_read(ip, count, *ppos, ioflags); @@ -317,10 +350,61 @@ xfs_file_splice_read( if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); - xfs_iunlock(ip, XFS_IOLOCK_SHARED); + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); return ret; } +STATIC void +xfs_aio_write_isize_update( + struct inode *inode, + loff_t *ppos, + ssize_t bytes_written) +{ + struct xfs_inode *ip = XFS_I(inode); + xfs_fsize_t isize = i_size_read(inode); + + if (bytes_written > 0) + XFS_STATS_ADD(xs_write_bytes, bytes_written); + + if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && + *ppos > isize)) + *ppos = isize; + + if (*ppos > ip->i_size) { + xfs_rw_ilock(ip, XFS_ILOCK_EXCL); + if (*ppos > ip->i_size) + ip->i_size = *ppos; + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + } +} + +/* + * If this was a direct or synchronous I/O that failed (such as ENOSPC) then + * part of the I/O may have been written to disk before the error occured. In + * this case the on-disk file size may have been adjusted beyond the in-memory + * file size and now needs to be truncated back. + */ +STATIC void +xfs_aio_write_newsize_update( + struct xfs_inode *ip) +{ + if (ip->i_new_size) { + xfs_rw_ilock(ip, XFS_ILOCK_EXCL); + ip->i_new_size = 0; + if (ip->i_d.di_size > ip->i_size) + ip->i_d.di_size = ip->i_size; + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + } +} + +/* + * xfs_file_splice_write() does not use xfs_rw_ilock() because + * generic_file_splice_write() takes the i_mutex itself. This, in theory, + * couuld cause lock inversions between the aio_write path and the splice path + * if someone is doing concurrent splice(2) based writes and write(2) based + * writes to the same inode. The only real way to fix this is to re-implement + * the generic code here with correct locking orders. + */ STATIC ssize_t xfs_file_splice_write( struct pipe_inode_info *pipe, @@ -331,7 +415,7 @@ xfs_file_splice_write( { struct inode *inode = outfilp->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); - xfs_fsize_t isize, new_size; + xfs_fsize_t new_size; int ioflags = 0; ssize_t ret; @@ -355,27 +439,9 @@ xfs_file_splice_write( trace_xfs_file_splice_write(ip, count, *ppos, ioflags); ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); - if (ret > 0) - XFS_STATS_ADD(xs_write_bytes, ret); - - isize = i_size_read(inode); - if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize)) - *ppos = isize; - if (*ppos > ip->i_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - if (*ppos > ip->i_size) - ip->i_size = *ppos; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } - - if (ip->i_new_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - ip->i_new_size = 0; - if (ip->i_d.di_size > ip->i_size) - ip->i_d.di_size = ip->i_size; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } + xfs_aio_write_isize_update(inode, ppos, ret); + xfs_aio_write_newsize_update(ip); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return ret; } @@ -562,245 +628,258 @@ out_lock: return error; } +/* + * Common pre-write limit and setup checks. + * + * Returns with iolock held according to @iolock. + */ STATIC ssize_t -xfs_file_aio_write( - struct kiocb *iocb, - const struct iovec *iovp, - unsigned long nr_segs, - loff_t pos) +xfs_file_aio_write_checks( + struct file *file, + loff_t *pos, + size_t *count, + int *iolock) { - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; + struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - ssize_t ret = 0, error = 0; - int ioflags = 0; - xfs_fsize_t isize, new_size; - int iolock; - size_t ocount = 0, count; - int need_i_mutex; + xfs_fsize_t new_size; + int error = 0; - XFS_STATS_INC(xs_write_calls); + error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); + if (error) { + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); + *iolock = 0; + return error; + } - BUG_ON(iocb->ki_pos != pos); + new_size = *pos + *count; + if (new_size > ip->i_size) + ip->i_new_size = new_size; - if (unlikely(file->f_flags & O_DIRECT)) - ioflags |= IO_ISDIRECT; - if (file->f_mode & FMODE_NOCMTIME) - ioflags |= IO_INVIS; + if (likely(!(file->f_mode & FMODE_NOCMTIME))) + file_update_time(file); - error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); + /* + * If the offset is beyond the size of the file, we need to zero any + * blocks that fall between the existing EOF and the start of this + * write. + */ + if (*pos > ip->i_size) + error = -xfs_zero_eof(ip, *pos, ip->i_size); + + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); if (error) return error; - count = ocount; - if (count == 0) - return 0; - - xfs_wait_for_freeze(mp, SB_FREEZE_WRITE); - - if (XFS_FORCED_SHUTDOWN(mp)) - return -EIO; - -relock: - if (ioflags & IO_ISDIRECT) { - iolock = XFS_IOLOCK_SHARED; - need_i_mutex = 0; - } else { - iolock = XFS_IOLOCK_EXCL; - need_i_mutex = 1; - mutex_lock(&inode->i_mutex); - } + /* + * If we're writing the file then make sure to clear the setuid and + * setgid bits if the process is not being run by root. This keeps + * people from modifying setuid and setgid binaries. + */ + return file_remove_suid(file); - xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); +} -start: - error = -generic_write_checks(file, &pos, &count, - S_ISBLK(inode->i_mode)); - if (error) { - xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_mutex; +/* + * xfs_file_dio_aio_write - handle direct IO writes + * + * Lock the inode appropriately to prepare for and issue a direct IO write. + * By separating it from the buffered write path we remove all the tricky to + * follow locking changes and looping. + * + * If there are cached pages or we're extending the file, we need IOLOCK_EXCL + * until we're sure the bytes at the new EOF have been zeroed and/or the cached + * pages are flushed out. + * + * In most cases the direct IO writes will be done holding IOLOCK_SHARED + * allowing them to be done in parallel with reads and other direct IO writes. + * However, if the IO is not aligned to filesystem blocks, the direct IO layer + * needs to do sub-block zeroing and that requires serialisation against other + * direct IOs to the same block. In this case we need to serialise the + * submission of the unaligned IOs so that we don't get racing block zeroing in + * the dio layer. To avoid the problem with aio, we also need to wait for + * outstanding IOs to complete so that unwritten extent conversion is completed + * before we try to map the overlapping block. This is currently implemented by + * hitting it with a big hammer (i.e. xfs_ioend_wait()). + * + * Returns with locks held indicated by @iolock and errors indicated by + * negative return values. + */ +STATIC ssize_t +xfs_file_dio_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos, + size_t ocount, + int *iolock) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + ssize_t ret = 0; + size_t count = ocount; + int unaligned_io = 0; + struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? + mp->m_rtdev_targp : mp->m_ddev_targp; + + *iolock = 0; + if ((pos & target->bt_smask) || (count & target->bt_smask)) + return -XFS_ERROR(EINVAL); + + if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) + unaligned_io = 1; + + if (unaligned_io || mapping->nrpages || pos > ip->i_size) + *iolock = XFS_IOLOCK_EXCL; + else + *iolock = XFS_IOLOCK_SHARED; + xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); + + ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); + if (ret) + return ret; + + if (mapping->nrpages) { + WARN_ON(*iolock != XFS_IOLOCK_EXCL); + ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, + FI_REMAPF_LOCKED); + if (ret) + return ret; } - if (ioflags & IO_ISDIRECT) { - xfs_buftarg_t *target = - XFS_IS_REALTIME_INODE(ip) ? - mp->m_rtdev_targp : mp->m_ddev_targp; - - if ((pos & target->bt_smask) || (count & target->bt_smask)) { - xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); - return XFS_ERROR(-EINVAL); - } - - if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) { - xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); - iolock = XFS_IOLOCK_EXCL; - need_i_mutex = 1; - mutex_lock(&inode->i_mutex); - xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); - goto start; - } + /* + * If we are doing unaligned IO, wait for all other IO to drain, + * otherwise demote the lock if we had to flush cached pages + */ + if (unaligned_io) + xfs_ioend_wait(ip); + else if (*iolock == XFS_IOLOCK_EXCL) { + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); + *iolock = XFS_IOLOCK_SHARED; } - new_size = pos + count; - if (new_size > ip->i_size) - ip->i_new_size = new_size; + trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); + ret = generic_file_direct_write(iocb, iovp, + &nr_segs, pos, &iocb->ki_pos, count, ocount); - if (likely(!(ioflags & IO_INVIS))) - file_update_time(file); + /* No fallback to buffered IO on errors for XFS. */ + ASSERT(ret < 0 || ret == count); + return ret; +} - /* - * If the offset is beyond the size of the file, we have a couple - * of things to do. First, if there is already space allocated - * we need to either create holes or zero the disk or ... - * - * If there is a page where the previous size lands, we need - * to zero it out up to the new size. - */ +STATIC ssize_t +xfs_file_buffered_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos, + size_t ocount, + int *iolock) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + ssize_t ret; + int enospc = 0; + size_t count = ocount; - if (pos > ip->i_size) { - error = xfs_zero_eof(ip, pos, ip->i_size); - if (error) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - goto out_unlock_internal; - } - } - xfs_iunlock(ip, XFS_ILOCK_EXCL); + *iolock = XFS_IOLOCK_EXCL; + xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); - /* - * If we're writing the file then make sure to clear the - * setuid and setgid bits if the process is not being run - * by root. This keeps people from modifying setuid and - * setgid binaries. - */ - error = -file_remove_suid(file); - if (unlikely(error)) - goto out_unlock_internal; + ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); + if (ret) + return ret; /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; - if ((ioflags & IO_ISDIRECT)) { - if (mapping->nrpages) { - WARN_ON(need_i_mutex == 0); - error = xfs_flushinval_pages(ip, - (pos & PAGE_CACHE_MASK), - -1, FI_REMAPF_LOCKED); - if (error) - goto out_unlock_internal; - } - - if (need_i_mutex) { - /* demote the lock now the cached pages are gone */ - xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); - mutex_unlock(&inode->i_mutex); +write_retry: + trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); + ret = generic_file_buffered_write(iocb, iovp, nr_segs, + pos, &iocb->ki_pos, count, ret); + /* + * if we just got an ENOSPC, flush the inode now we aren't holding any + * page locks and retry *once* + */ + if (ret == -ENOSPC && !enospc) { + ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); + if (ret) + return ret; + enospc = 1; + goto write_retry; + } + current->backing_dev_info = NULL; + return ret; +} - iolock = XFS_IOLOCK_SHARED; - need_i_mutex = 0; - } +STATIC ssize_t +xfs_file_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + ssize_t ret; + int iolock; + size_t ocount = 0; - trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags); - ret = generic_file_direct_write(iocb, iovp, - &nr_segs, pos, &iocb->ki_pos, count, ocount); + XFS_STATS_INC(xs_write_calls); - /* - * direct-io write to a hole: fall through to buffered I/O - * for completing the rest of the request. - */ - if (ret >= 0 && ret != count) { - XFS_STATS_ADD(xs_write_bytes, ret); + BUG_ON(iocb->ki_pos != pos); - pos += ret; - count -= ret; + ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); + if (ret) + return ret; - ioflags &= ~IO_ISDIRECT; - xfs_iunlock(ip, iolock); - goto relock; - } - } else { - int enospc = 0; - ssize_t ret2 = 0; + if (ocount == 0) + return 0; -write_retry: - trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags); - ret2 = generic_file_buffered_write(iocb, iovp, nr_segs, - pos, &iocb->ki_pos, count, ret); - /* - * if we just got an ENOSPC, flush the inode now we - * aren't holding any page locks and retry *once* - */ - if (ret2 == -ENOSPC && !enospc) { - error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE); - if (error) - goto out_unlock_internal; - enospc = 1; - goto write_retry; - } - ret = ret2; - } + xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); - current->backing_dev_info = NULL; + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return -EIO; - isize = i_size_read(inode); - if (unlikely(ret < 0 && ret != -EFAULT && iocb->ki_pos > isize)) - iocb->ki_pos = isize; + if (unlikely(file->f_flags & O_DIRECT)) + ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, + ocount, &iolock); + else + ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, + ocount, &iolock); - if (iocb->ki_pos > ip->i_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - if (iocb->ki_pos > ip->i_size) - ip->i_size = iocb->ki_pos; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } + xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); - error = -ret; if (ret <= 0) - goto out_unlock_internal; - - XFS_STATS_ADD(xs_write_bytes, ret); + goto out_unlock; /* Handle various SYNC-type writes */ if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { loff_t end = pos + ret - 1; - int error2; - - xfs_iunlock(ip, iolock); - if (need_i_mutex) - mutex_unlock(&inode->i_mutex); + int error, error2; - error2 = filemap_write_and_wait_range(mapping, pos, end); - if (!error) - error = error2; - if (need_i_mutex) - mutex_lock(&inode->i_mutex); - xfs_ilock(ip, iolock); + xfs_rw_iunlock(ip, iolock); + error = filemap_write_and_wait_range(mapping, pos, end); + xfs_rw_ilock(ip, iolock); error2 = -xfs_file_fsync(file, (file->f_flags & __O_SYNC) ? 0 : 1); - if (!error) - error = error2; + if (error) + ret = error; + else if (error2) + ret = error2; } - out_unlock_internal: - if (ip->i_new_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - ip->i_new_size = 0; - /* - * If this was a direct or synchronous I/O that failed (such - * as ENOSPC) then part of the I/O may have been written to - * disk before the error occured. In this case the on-disk - * file size may have been adjusted beyond the in-memory file - * size and now needs to be truncated back. - */ - if (ip->i_d.di_size > ip->i_size) - ip->i_d.di_size = ip->i_size; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } - xfs_iunlock(ip, iolock); - out_unlock_mutex: - if (need_i_mutex) - mutex_unlock(&inode->i_mutex); - return -error; +out_unlock: + xfs_aio_write_newsize_update(ip); + xfs_rw_iunlock(ip, iolock); + return ret; } STATIC int diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index ad442d9e392e..b06ede1d0bed 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -39,6 +39,7 @@ #include "xfs_dfrag.h" #include "xfs_fsops.h" #include "xfs_vnodeops.h" +#include "xfs_discard.h" #include "xfs_quota.h" #include "xfs_inode_item.h" #include "xfs_export.h" @@ -1294,6 +1295,8 @@ xfs_file_ioctl( trace_xfs_file_ioctl(ip); switch (cmd) { + case FITRIM: + return xfs_ioc_trim(mp, arg); case XFS_IOC_ALLOCSP: case XFS_IOC_FREESP: case XFS_IOC_RESVSP: diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 94d5fd6a2973..da54403633b6 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -516,6 +516,7 @@ xfs_vn_fallocate( loff_t new_size = 0; xfs_flock64_t bf; xfs_inode_t *ip = XFS_I(inode); + int cmd = XFS_IOC_RESVSP; /* preallocation on directories not yet supported */ error = -ENODEV; @@ -528,6 +529,9 @@ xfs_vn_fallocate( xfs_ilock(ip, XFS_IOLOCK_EXCL); + if (mode & FALLOC_FL_PUNCH_HOLE) + cmd = XFS_IOC_UNRESVSP; + /* check the new inode size is valid before allocating */ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { @@ -537,8 +541,7 @@ xfs_vn_fallocate( goto out_unlock; } - error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, - 0, XFS_ATTR_NOLOCK); + error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK); if (error) goto out_unlock; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index c51faaa5e291..9731898083ae 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -606,7 +606,8 @@ xfs_blkdev_get( { int error = 0; - *bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp); + *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + mp); if (IS_ERR(*bdevp)) { error = PTR_ERR(*bdevp); printk("XFS: Invalid device [%s], error=%d\n", name, error); @@ -620,7 +621,7 @@ xfs_blkdev_put( struct block_device *bdev) { if (bdev) - close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } /* @@ -947,7 +948,7 @@ out_reclaim: * Slab object creation initialisation for the XFS inode. * This covers only the idempotent fields in the XFS inode; * all other fields need to be initialised on allocation - * from the slab. This avoids the need to repeatedly intialise + * from the slab. This avoids the need to repeatedly initialise * fields in the xfs inode that left in the initialise state * when freeing the inode. */ @@ -1413,7 +1414,7 @@ xfs_fs_freeze( xfs_save_resvblks(mp); xfs_quiesce_attr(mp); - return -xfs_fs_log_dummy(mp, SYNC_WAIT); + return -xfs_fs_log_dummy(mp); } STATIC int diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index a02480de9759..e22f0057d21f 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -362,7 +362,7 @@ xfs_quiesce_data( /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) - error2 = xfs_fs_log_dummy(mp, SYNC_WAIT); + error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) @@ -503,13 +503,14 @@ xfs_sync_worker( int error; if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { - xfs_log_force(mp, 0); - xfs_reclaim_inodes(mp, 0); /* dgc: errors ignored here */ - error = xfs_qm_sync(mp, SYNC_TRYLOCK); if (mp->m_super->s_frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) - error = xfs_fs_log_dummy(mp, 0); + error = xfs_fs_log_dummy(mp); + else + xfs_log_force(mp, 0); + xfs_reclaim_inodes(mp, 0); + error = xfs_qm_sync(mp, SYNC_TRYLOCK); } mp->m_sync_seq++; wake_up(&mp->m_wait_single_sync_task); diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index 7bb5092d6ae4..ee3cee097e7e 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -18,6 +18,7 @@ #include "xfs.h" #include #include +#include "xfs_error.h" static struct ctl_table_header *xfs_table_header; @@ -51,6 +52,26 @@ xfs_stats_clear_proc_handler( return ret; } + +STATIC int +xfs_panic_mask_proc_handler( + ctl_table *ctl, + int write, + void __user *buffer, + size_t *lenp, + loff_t *ppos) +{ + int ret, *valp = ctl->data; + + ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); + if (!ret && write) { + xfs_panic_mask = *valp; +#ifdef DEBUG + xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES); +#endif + } + return ret; +} #endif /* CONFIG_PROC_FS */ static ctl_table xfs_table[] = { @@ -77,7 +98,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.panic_mask.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = xfs_panic_mask_proc_handler, .extra1 = &xfs_params.panic_mask.min, .extra2 = &xfs_params.panic_mask.max }, diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 647af2a2e7aa..2d0bcb479075 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -1759,6 +1759,39 @@ DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel); DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip); +DECLARE_EVENT_CLASS(xfs_discard_class, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(mp, agno, agbno, len), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u\n", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len) +) + +#define DEFINE_DISCARD_EVENT(name) \ +DEFINE_EVENT(xfs_discard_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ + xfs_agblock_t agbno, xfs_extlen_t len), \ + TP_ARGS(mp, agno, agbno, len)) +DEFINE_DISCARD_EVENT(xfs_discard_extent); +DEFINE_DISCARD_EVENT(xfs_discard_toosmall); +DEFINE_DISCARD_EVENT(xfs_discard_exclude); +DEFINE_DISCARD_EVENT(xfs_discard_busy); + #endif /* _TRACE_XFS_H */ #undef TRACE_INCLUDE_PATH diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index 975aa10e1a47..e6cf955ec0fc 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c @@ -25,86 +25,78 @@ #include "xfs_mount.h" #include "xfs_error.h" -static char message[1024]; /* keep it off the stack */ -static DEFINE_SPINLOCK(xfs_err_lock); - -/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */ -#define XFS_MAX_ERR_LEVEL 7 -#define XFS_ERR_MASK ((1 << 3) - 1) -static const char * const err_level[XFS_MAX_ERR_LEVEL+1] = - {KERN_EMERG, KERN_ALERT, KERN_CRIT, - KERN_ERR, KERN_WARNING, KERN_NOTICE, - KERN_INFO, KERN_DEBUG}; - void -cmn_err(register int level, char *fmt, ...) +cmn_err( + const char *lvl, + const char *fmt, + ...) { - char *fp = fmt; - int len; - ulong flags; - va_list ap; - - level &= XFS_ERR_MASK; - if (level > XFS_MAX_ERR_LEVEL) - level = XFS_MAX_ERR_LEVEL; - spin_lock_irqsave(&xfs_err_lock,flags); - va_start(ap, fmt); - if (*fmt == '!') fp++; - len = vsnprintf(message, sizeof(message), fp, ap); - if (len >= sizeof(message)) - len = sizeof(message) - 1; - if (message[len-1] == '\n') - message[len-1] = 0; - printk("%s%s\n", err_level[level], message); - va_end(ap); - spin_unlock_irqrestore(&xfs_err_lock,flags); - BUG_ON(level == CE_PANIC); + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + printk("%s%pV", lvl, &vaf); + va_end(args); + + BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0); } void -xfs_fs_vcmn_err( - int level, +xfs_fs_cmn_err( + const char *lvl, struct xfs_mount *mp, - char *fmt, - va_list ap) + const char *fmt, + ...) { - unsigned long flags; - int len = 0; + struct va_format vaf; + va_list args; - level &= XFS_ERR_MASK; - if (level > XFS_MAX_ERR_LEVEL) - level = XFS_MAX_ERR_LEVEL; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; - spin_lock_irqsave(&xfs_err_lock,flags); + printk("%sFilesystem %s: %pV", lvl, mp->m_fsname, &vaf); + va_end(args); - if (mp) { - len = sprintf(message, "Filesystem \"%s\": ", mp->m_fsname); + BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0); +} + +/* All callers to xfs_cmn_err use CE_ALERT, so don't bother testing lvl */ +void +xfs_cmn_err( + int panic_tag, + const char *lvl, + struct xfs_mount *mp, + const char *fmt, + ...) +{ + struct va_format vaf; + va_list args; + int panic = 0; - /* - * Skip the printk if we can't print anything useful - * due to an over-long device name. - */ - if (len >= sizeof(message)) - goto out; + if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { + printk(KERN_ALERT "XFS: Transforming an alert into a BUG."); + panic = 1; } - len = vsnprintf(message + len, sizeof(message) - len, fmt, ap); - if (len >= sizeof(message)) - len = sizeof(message) - 1; - if (message[len-1] == '\n') - message[len-1] = 0; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; - printk("%s%s\n", err_level[level], message); - out: - spin_unlock_irqrestore(&xfs_err_lock,flags); + printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf); + va_end(args); - BUG_ON(level == CE_PANIC); + BUG_ON(panic); } void assfail(char *expr, char *file, int line) { - printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line); + printk(KERN_CRIT "Assertion failed: %s, file: %s, line: %d\n", expr, + file, line); BUG(); } diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h index d2d20462fd4f..05699f67d475 100644 --- a/fs/xfs/support/debug.h +++ b/fs/xfs/support/debug.h @@ -20,15 +20,22 @@ #include -#define CE_DEBUG 7 /* debug */ -#define CE_CONT 6 /* continuation */ -#define CE_NOTE 5 /* notice */ -#define CE_WARN 4 /* warning */ -#define CE_ALERT 1 /* alert */ -#define CE_PANIC 0 /* panic */ - -extern void cmn_err(int, char *, ...) - __attribute__ ((format (printf, 2, 3))); +struct xfs_mount; + +#define CE_DEBUG KERN_DEBUG +#define CE_CONT KERN_INFO +#define CE_NOTE KERN_NOTICE +#define CE_WARN KERN_WARNING +#define CE_ALERT KERN_ALERT +#define CE_PANIC KERN_EMERG + +void cmn_err(const char *lvl, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +void xfs_fs_cmn_err( const char *lvl, struct xfs_mount *mp, + const char *fmt, ...) __attribute__ ((format (printf, 3, 4))); +void xfs_cmn_err( int panic_tag, const char *lvl, struct xfs_mount *mp, + const char *fmt, ...) __attribute__ ((format (printf, 4, 5))); + extern void assfail(char *expr, char *f, int l); #define ASSERT_ALWAYS(expr) \ diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index fa8723f5870a..f3227984a9bf 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -41,10 +41,6 @@ #define XFSA_FIXUP_BNO_OK 1 #define XFSA_FIXUP_CNT_OK 2 -static int -xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, - xfs_agblock_t bno, xfs_extlen_t len); - /* * Prototypes for per-ag allocation routines */ @@ -94,7 +90,7 @@ xfs_alloc_lookup_ge( * Lookup the first record less than or equal to [bno, len] * in the btree given by cur. */ -STATIC int /* error */ +int /* error */ xfs_alloc_lookup_le( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ @@ -127,7 +123,7 @@ xfs_alloc_update( /* * Get the data from the pointed-to record. */ -STATIC int /* error */ +int /* error */ xfs_alloc_get_rec( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t *bno, /* output: starting block of extent */ @@ -2615,7 +2611,7 @@ restart: * will require a synchronous transaction, but it can still be * used to distinguish between a partial or exact match. */ -static int +int xfs_alloc_busy_search( struct xfs_mount *mp, xfs_agnumber_t agno, diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 895009a97271..0ab56b32c7eb 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h @@ -19,6 +19,7 @@ #define __XFS_ALLOC_H__ struct xfs_buf; +struct xfs_btree_cur; struct xfs_mount; struct xfs_perag; struct xfs_trans; @@ -118,16 +119,16 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp, struct xfs_perag *pag); #ifdef __KERNEL__ - void -xfs_alloc_busy_insert(xfs_trans_t *tp, - xfs_agnumber_t agno, - xfs_agblock_t bno, - xfs_extlen_t len); +xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno, + xfs_agblock_t bno, xfs_extlen_t len); void xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp); +int +xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t bno, xfs_extlen_t len); #endif /* __KERNEL__ */ /* @@ -205,4 +206,18 @@ xfs_free_extent( xfs_fsblock_t bno, /* starting block number of extent */ xfs_extlen_t len); /* length of extent */ +int /* error */ +xfs_alloc_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +int /* error */ +xfs_alloc_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t *bno, /* output: starting block of extent */ + xfs_extlen_t *len, /* output: length of extent */ + int *stat); /* output: success/failure */ + #endif /* __XFS_ALLOC_H__ */ diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index ed2b65f3f8b9..98c6f73b6752 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -141,7 +141,6 @@ xfs_buf_item_log_check( #define xfs_buf_item_log_check(x) #endif -STATIC void xfs_buf_error_relse(xfs_buf_t *bp); STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); /* @@ -959,128 +958,76 @@ xfs_buf_do_callbacks( */ void xfs_buf_iodone_callbacks( - xfs_buf_t *bp) + struct xfs_buf *bp) { - xfs_log_item_t *lip; - static ulong lasttime; - static xfs_buftarg_t *lasttarg; - xfs_mount_t *mp; + struct xfs_log_item *lip = bp->b_fspriv; + struct xfs_mount *mp = lip->li_mountp; + static ulong lasttime; + static xfs_buftarg_t *lasttarg; - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + if (likely(!XFS_BUF_GETERROR(bp))) + goto do_callbacks; - if (XFS_BUF_GETERROR(bp) != 0) { - /* - * If we've already decided to shutdown the filesystem - * because of IO errors, there's no point in giving this - * a retry. - */ - mp = lip->li_mountp; - if (XFS_FORCED_SHUTDOWN(mp)) { - ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); - XFS_BUF_SUPER_STALE(bp); - trace_xfs_buf_item_iodone(bp, _RET_IP_); - xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); - XFS_BUF_CLR_IODONE_FUNC(bp); - xfs_buf_ioend(bp, 0); - return; - } + /* + * If we've already decided to shutdown the filesystem because of + * I/O errors, there's no point in giving this a retry. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + XFS_BUF_SUPER_STALE(bp); + trace_xfs_buf_item_iodone(bp, _RET_IP_); + goto do_callbacks; + } - if ((XFS_BUF_TARGET(bp) != lasttarg) || - (time_after(jiffies, (lasttime + 5*HZ)))) { - lasttime = jiffies; - cmn_err(CE_ALERT, "Device %s, XFS metadata write error" - " block 0x%llx in %s", - XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), - (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname); - } - lasttarg = XFS_BUF_TARGET(bp); + if (XFS_BUF_TARGET(bp) != lasttarg || + time_after(jiffies, (lasttime + 5*HZ))) { + lasttime = jiffies; + cmn_err(CE_ALERT, "Device %s, XFS metadata write error" + " block 0x%llx in %s", + XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), + (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname); + } + lasttarg = XFS_BUF_TARGET(bp); - if (XFS_BUF_ISASYNC(bp)) { - /* - * If the write was asynchronous then noone will be - * looking for the error. Clear the error state - * and write the buffer out again delayed write. - * - * XXXsup This is OK, so long as we catch these - * before we start the umount; we don't want these - * DELWRI metadata bufs to be hanging around. - */ - XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */ - - if (!(XFS_BUF_ISSTALE(bp))) { - XFS_BUF_DELAYWRITE(bp); - XFS_BUF_DONE(bp); - XFS_BUF_SET_START(bp); - } - ASSERT(XFS_BUF_IODONE_FUNC(bp)); - trace_xfs_buf_item_iodone_async(bp, _RET_IP_); - xfs_buf_relse(bp); - } else { - /* - * If the write of the buffer was not asynchronous, - * then we want to make sure to return the error - * to the caller of bwrite(). Because of this we - * cannot clear the B_ERROR state at this point. - * Instead we install a callback function that - * will be called when the buffer is released, and - * that routine will clear the error state and - * set the buffer to be written out again after - * some delay. - */ - /* We actually overwrite the existing b-relse - function at times, but we're gonna be shutting down - anyway. */ - XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); + /* + * If the write was asynchronous then noone will be looking for the + * error. Clear the error state and write the buffer out again. + * + * During sync or umount we'll write all pending buffers again + * synchronous, which will catch these errors if they keep hanging + * around. + */ + if (XFS_BUF_ISASYNC(bp)) { + XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */ + + if (!XFS_BUF_ISSTALE(bp)) { + XFS_BUF_DELAYWRITE(bp); XFS_BUF_DONE(bp); - XFS_BUF_FINISH_IOWAIT(bp); + XFS_BUF_SET_START(bp); } + ASSERT(XFS_BUF_IODONE_FUNC(bp)); + trace_xfs_buf_item_iodone_async(bp, _RET_IP_); + xfs_buf_relse(bp); return; } - xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); - XFS_BUF_CLR_IODONE_FUNC(bp); - xfs_buf_ioend(bp, 0); -} - -/* - * This is a callback routine attached to a buffer which gets an error - * when being written out synchronously. - */ -STATIC void -xfs_buf_error_relse( - xfs_buf_t *bp) -{ - xfs_log_item_t *lip; - xfs_mount_t *mp; - - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - mp = (xfs_mount_t *)lip->li_mountp; - ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); - + /* + * If the write of the buffer was synchronous, we want to make + * sure to return the error to the caller of xfs_bwrite(). + */ XFS_BUF_STALE(bp); XFS_BUF_DONE(bp); XFS_BUF_UNDELAYWRITE(bp); - XFS_BUF_ERROR(bp,0); trace_xfs_buf_error_relse(bp, _RET_IP_); + xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); - if (! XFS_FORCED_SHUTDOWN(mp)) - xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); - /* - * We have to unpin the pinned buffers so do the - * callbacks. - */ +do_callbacks: xfs_buf_do_callbacks(bp); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); - XFS_BUF_SET_BRELSE_FUNC(bp,NULL); - xfs_buf_relse(bp); + xfs_buf_ioend(bp, 0); } - /* * This is the iodone() function for buffers which have been * logged. It is called when they are eventually flushed out. diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index c78cc6a3d87c..4c7db74a05f7 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -152,37 +152,6 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud) } #endif /* DEBUG */ - -void -xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - xfs_fs_vcmn_err(level, mp, fmt, ap); - va_end(ap); -} - -void -xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...) -{ - va_list ap; - -#ifdef DEBUG - xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES); -#endif - - if (xfs_panic_mask && (xfs_panic_mask & panic_tag) - && (level & CE_ALERT)) { - level &= ~CE_ALERT; - level |= CE_PANIC; - cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG."); - } - va_start(ap, fmt); - xfs_fs_vcmn_err(level, mp, fmt, ap); - va_end(ap); -} - void xfs_error_report( const char *tag, diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h index f338847f80b8..10dce5475f02 100644 --- a/fs/xfs/xfs_error.h +++ b/fs/xfs/xfs_error.h @@ -136,8 +136,8 @@ extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ (rf)))) -extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); -extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); +extern int xfs_errortag_add(int error_tag, struct xfs_mount *mp); +extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud); #else #define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) #define xfs_errortag_add(tag, mp) (ENOSYS) @@ -162,21 +162,15 @@ extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); struct xfs_mount; -extern void xfs_fs_vcmn_err(int level, struct xfs_mount *mp, - char *fmt, va_list ap) - __attribute__ ((format (printf, 3, 0))); -extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp, - char *fmt, ...) - __attribute__ ((format (printf, 4, 5))); -extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...) - __attribute__ ((format (printf, 3, 4))); - extern void xfs_hex_dump(void *p, int length); #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) #define xfs_fs_mount_cmn_err(f, fmt, args...) \ - ((f & XFS_MFSI_QUIET)? (void)0 : cmn_err(CE_WARN, "XFS: " fmt, ## args)) + do { \ + if (!(f & XFS_MFSI_QUIET)) \ + cmn_err(CE_WARN, "XFS: " fmt, ## args); \ + } while (0) #endif /* __XFS_ERROR_H__ */ diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index f56d30e8040c..cec89dd5d7d2 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -612,12 +612,13 @@ out: * * We cannot use an inode here for this - that will push dirty state back up * into the VFS and then periodic inode flushing will prevent log covering from - * making progress. Hence we log a field in the superblock instead. + * making progress. Hence we log a field in the superblock instead and use a + * synchronous transaction to ensure the superblock is immediately unpinned + * and can be written back. */ int xfs_fs_log_dummy( - xfs_mount_t *mp, - int flags) + xfs_mount_t *mp) { xfs_trans_t *tp; int error; @@ -632,8 +633,7 @@ xfs_fs_log_dummy( /* log the UUID because it is an unchanging field */ xfs_mod_sb(tp, XFS_SB_UUID); - if (flags & SYNC_WAIT) - xfs_trans_set_sync(tp); + xfs_trans_set_sync(tp); return xfs_trans_commit(tp, 0); } diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h index a786c5212c1e..1b6a98b66886 100644 --- a/fs/xfs/xfs_fsops.h +++ b/fs/xfs/xfs_fsops.h @@ -25,6 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt); extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval, xfs_fsop_resblks_t *outval); extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags); -extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags); +extern int xfs_fs_log_dummy(struct xfs_mount *mp); #endif /* __XFS_FSOPS_H__ */ diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 0bf24b11d0c4..ae6fef1ff563 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -377,7 +377,7 @@ xfs_log_mount( cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); else { cmn_err(CE_NOTE, - "!Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.", + "Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.", mp->m_fsname); ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 204d8e5fa7fa..aa0ebb776903 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3800,7 +3800,7 @@ xlog_recover_finish( log->l_flags &= ~XLOG_RECOVERY_NEEDED; } else { cmn_err(CE_DEBUG, - "!Ending clean XFS mount for filesystem: %s\n", + "Ending clean XFS mount for filesystem: %s\n", log->l_mp->m_fsname); } return 0; diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index f80a067a4658..33dbc4e0ad62 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1137,7 +1137,7 @@ out_undo_fdblocks: if (blkdelta) xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd); out: - ASSERT(error = 0); + ASSERT(error == 0); return; } diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 359ef11725a6..78ca429929f7 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -148,9 +148,7 @@ struct acpi_device_flags { u32 suprise_removal_ok:1; u32 power_manageable:1; u32 performance_manageable:1; - u32 wake_capable:1; /* Wakeup(_PRW) supported? */ - u32 force_power_state:1; - u32 reserved:22; + u32 reserved:24; }; /* File System */ @@ -242,20 +240,14 @@ struct acpi_device_perf { struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ u8 run_wake:1; /* Run-Wake GPE devices */ - u8 always_enabled:1; /* Run-wake devices that are always enabled */ u8 notifier_present:1; /* Wake-up notify handler has been installed */ }; -struct acpi_device_wakeup_state { - u8 enabled:1; -}; - struct acpi_device_wakeup { acpi_handle gpe_device; u64 gpe_number; u64 sleep_state; struct acpi_handle_list resources; - struct acpi_device_wakeup_state state; struct acpi_device_wakeup_flags flags; int prepare_count; int run_wake_count; @@ -328,8 +320,8 @@ void acpi_bus_data_handler(acpi_handle handle, void *context); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); -int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_set_power(acpi_handle handle, int state); +int acpi_bus_update_power(acpi_handle handle, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); bool acpi_bus_can_wakeup(acpi_handle handle); #ifdef CONFIG_ACPI_PROC_EVENT diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 53b7cfd924a3..241b8a04c83c 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -47,7 +47,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20101013 +#define ACPI_CA_VERSION 0x20101209 #include "actypes.h" #include "actbl.h" @@ -228,6 +228,10 @@ acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle); acpi_status acpi_install_initialization_handler(acpi_init_handler handler, u32 function); +acpi_status +acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, + void *context); + acpi_status acpi_install_fixed_event_handler(u32 acpi_event, acpi_event_handler handler, void *context); @@ -258,11 +262,11 @@ acpi_remove_address_space_handler(acpi_handle device, acpi_status acpi_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, - u32 type, acpi_event_handler address, void *context); + u32 type, acpi_gpe_handler address, void *context); acpi_status acpi_remove_gpe_handler(acpi_handle gpe_device, - u32 gpe_number, acpi_event_handler address); + u32 gpe_number, acpi_gpe_handler address); #ifdef ACPI_FUTURE_USAGE acpi_status acpi_install_exception_handler(acpi_exception_handler handler); @@ -292,11 +296,13 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number); -acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number); - acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number); -acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action); +acpi_status +acpi_setup_gpe_for_wake(acpi_handle parent_device, + acpi_handle gpe_device, u32 gpe_number); + +acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action); acpi_status acpi_get_gpe_status(acpi_handle gpe_device, @@ -315,7 +321,7 @@ acpi_install_gpe_block(acpi_handle gpe_device, acpi_status acpi_remove_gpe_block(acpi_handle gpe_device); -acpi_status acpi_update_gpes(void); +acpi_status acpi_update_all_gpes(void); /* * Resource interfaces diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index c637b75b9f3f..cd77aa75c962 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -119,7 +119,7 @@ struct acpi_whea_header { struct acpi_table_bert { struct acpi_table_header header; /* Common ACPI table header */ u32 region_length; /* Length of the boot error region */ - u64 address; /* Physical addresss of the error region */ + u64 address; /* Physical address of the error region */ }; /* Boot Error Region (not a subtable, pointed to by Address field above) */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 2b134b691e34..939a431a6ab6 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -656,33 +656,34 @@ typedef u32 acpi_event_status; #define ACPI_GPE_MAX 0xFF #define ACPI_NUM_GPE 256 -/* Actions for acpi_gpe_wakeup, acpi_hw_low_set_gpe */ +/* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */ #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 -#define ACPI_GPE_COND_ENABLE 2 +#define ACPI_GPE_CONDITIONAL_ENABLE 2 /* * GPE info flags - Per GPE - * +-------+---+-+-+ - * | 7:4 |3:2|1|0| - * +-------+---+-+-+ - * | | | | - * | | | +--- Interrupt type: edge or level triggered - * | | +----- GPE can wake the system - * | +-------- Type of dispatch:to method, handler, or none - * +-------------- + * +-------+-+-+---+ + * | 7:4 |3|2|1:0| + * +-------+-+-+---+ + * | | | | + * | | | +-- Type of dispatch:to method, handler, notify, or none + * | | +----- Interrupt type: edge or level triggered + * | +------- Is a Wake GPE + * +------------ */ -#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 -#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 -#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 +#define ACPI_GPE_DISPATCH_NONE (u8) 0x00 +#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 +#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 +#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 +#define ACPI_GPE_DISPATCH_MASK (u8) 0x03 -#define ACPI_GPE_CAN_WAKE (u8) 0x02 +#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04 +#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 +#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04 -#define ACPI_GPE_DISPATCH_MASK (u8) 0x0C -#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x04 -#define ACPI_GPE_DISPATCH_METHOD (u8) 0x08 -#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 +#define ACPI_GPE_CAN_WAKE (u8) 0x08 /* * Flags for GPE and Lock interfaces @@ -894,8 +895,19 @@ typedef void /* * Various handlers and callback procedures */ +typedef +void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type, + acpi_handle device, + u32 event_number, void *context); + +#define ACPI_EVENT_TYPE_GPE 0 +#define ACPI_EVENT_TYPE_FIXED 1 + typedef u32(*acpi_event_handler) (void *context); +typedef +u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); + typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); @@ -951,6 +963,10 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 +/* GPE handler return values */ + +#define ACPI_REENABLE_GPE 0x80 + /* Length of 32-bit EISAID values when converted back to a string */ #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ diff --git a/include/acpi/apei.h b/include/acpi/apei.h index b3365025ff8d..c4dbb132d902 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -19,6 +19,12 @@ extern int hest_disable; extern int erst_disable; +#ifdef CONFIG_ACPI_APEI +void __init acpi_hest_init(void); +#else +static inline void acpi_hest_init(void) { return; } +#endif + typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data); int apei_hest_parse(apei_hest_func_t func, void *data); diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 1b62102fbb67..55192ac0cede 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -324,6 +324,12 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr); int acpi_processor_get_throttling_info(struct acpi_processor *pr); extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state, bool force); +/* + * Reevaluate whether the T-state is invalid after one cpu is + * onlined/offlined. In such case the flags.throttling will be updated. + */ +extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, + unsigned long action); extern const struct file_operations acpi_processor_throttling_fops; extern void acpi_processor_throttling_init(void); /* in processor_idle.c */ diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 3da9e2742fa0..787abbb6d867 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h @@ -45,6 +45,9 @@ #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 6f3c6ae4fe03..f1eddf71dd0c 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -5,67 +5,108 @@ #ifdef CONFIG_MMU #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -/* - * Largely same as above, but only sets the access flags (dirty, - * accessed, and writable). Furthermore, we know it always gets set - * to a "more permissive" setting, which allows most architectures - * to optimize this. We return whether the PTE actually changed, which - * in turn instructs the caller to do things like update__mmu_cache. - * This used to be done in the caller, but sparc needs minor faults to - * force that call on sun4c so we changed this macro slightly - */ -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ -({ \ - int __changed = !pte_same(*(__ptep), __entry); \ - if (__changed) { \ - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ - flush_tlb_page(__vma, __address); \ - } \ - __changed; \ -}) +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define ptep_test_and_clear_young(__vma, __address, __ptep) \ -({ \ - pte_t __pte = *(__ptep); \ - int r = 1; \ - if (!pte_young(__pte)) \ - r = 0; \ - else \ - set_pte_at((__vma)->vm_mm, (__address), \ - (__ptep), pte_mkold(__pte)); \ - r; \ -}) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + int r = 1; + if (!pte_young(pte)) + r = 0; + else + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); + return r; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + int r = 1; + if (!pmd_young(pmd)) + r = 0; + else + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); + return r; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -#define ptep_clear_flush_young(__vma, __address, __ptep) \ -({ \ - int __young; \ - __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ - if (__young) \ - flush_tlb_page(__vma, __address); \ - __young; \ -}) +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define ptep_get_and_clear(__mm, __address, __ptep) \ -({ \ - pte_t __pte = *(__ptep); \ - pte_clear((__mm), (__address), (__ptep)); \ - __pte; \ +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, address, ptep); + return pte; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + pmd_clear(mm, address, pmdp); + return pmd; }) +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + BUG(); + return __pmd(0); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \ -({ \ - pte_t __pte; \ - __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \ - __pte; \ -}) +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + int full) +{ + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + return pte; +} #endif /* @@ -74,20 +115,25 @@ * not present, or in the process of an address space destruction. */ #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL -#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \ -do { \ - pte_clear((__mm), (__address), (__ptep)); \ -} while (0) +static inline void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH -#define ptep_clear_flush(__vma, __address, __ptep) \ -({ \ - pte_t __pte; \ - __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ - flush_tlb_page(__vma, __address); \ - __pte; \ -}) +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH +extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -99,8 +145,49 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +#endif + #ifndef __HAVE_ARCH_PTE_SAME -#define pte_same(A,B) (pte_val(A) == pte_val(B)) +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} +#endif + +#ifndef __HAVE_ARCH_PMD_SAME +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY @@ -348,6 +435,24 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); #endif +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return 0; +} +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return 0; +} +#ifndef __HAVE_ARCH_PMD_WRITE +static inline int pmd_write(pmd_t pmd) +{ + BUG(); + return 0; +} +#endif /* __HAVE_ARCH_PMD_WRITE */ +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 05cbad03c5ab..68649336c4ad 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -200,7 +200,8 @@ #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ - *(.data..read_mostly) + *(.data..read_mostly) \ + . = ALIGN(align); #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h new file mode 100644 index 000000000000..c5813c87de06 --- /dev/null +++ b/include/crypto/if_alg.h @@ -0,0 +1,92 @@ +/* + * if_alg: User-space algorithm interface + * + * Copyright (c) 2010 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_IF_ALG_H +#define _CRYPTO_IF_ALG_H + +#include +#include +#include +#include +#include + +#define ALG_MAX_PAGES 16 + +struct crypto_async_request; + +struct alg_sock { + /* struct sock must be the first member of struct alg_sock */ + struct sock sk; + + struct sock *parent; + + const struct af_alg_type *type; + void *private; +}; + +struct af_alg_completion { + struct completion completion; + int err; +}; + +struct af_alg_control { + struct af_alg_iv *iv; + int op; +}; + +struct af_alg_type { + void *(*bind)(const char *name, u32 type, u32 mask); + void (*release)(void *private); + int (*setkey)(void *private, const u8 *key, unsigned int keylen); + int (*accept)(void *private, struct sock *sk); + + struct proto_ops *ops; + struct module *owner; + char name[14]; +}; + +struct af_alg_sgl { + struct scatterlist sg[ALG_MAX_PAGES]; + struct page *pages[ALG_MAX_PAGES]; +}; + +int af_alg_register_type(const struct af_alg_type *type); +int af_alg_unregister_type(const struct af_alg_type *type); + +int af_alg_release(struct socket *sock); +int af_alg_accept(struct sock *sk, struct socket *newsock); + +int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, + int write); +void af_alg_free_sg(struct af_alg_sgl *sgl); + +int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); + +int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); +void af_alg_complete(struct crypto_async_request *req, int err); + +static inline struct alg_sock *alg_sk(struct sock *sk) +{ + return (struct alg_sock *)sk; +} + +static inline void af_alg_release_parent(struct sock *sk) +{ + sock_put(alg_sk(sk)->parent); +} + +static inline void af_alg_init_completion(struct af_alg_completion *completion) +{ + init_completion(&completion->completion); +} + +#endif /* _CRYPTO_IF_ALG_H */ diff --git a/drivers/crypto/padlock.h b/include/crypto/padlock.h similarity index 82% rename from drivers/crypto/padlock.h rename to include/crypto/padlock.h index b728e4518bd1..d2cfa2ef49e8 100644 --- a/drivers/crypto/padlock.h +++ b/include/crypto/padlock.h @@ -15,9 +15,15 @@ #define PADLOCK_ALIGNMENT 16 -#define PFX "padlock: " +#define PFX KBUILD_MODNAME ": " #define PADLOCK_CRA_PRIORITY 300 #define PADLOCK_COMPOSITE_PRIORITY 400 +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + #endif /* _CRYPTO_PADLOCK_H */ diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 833d208c25d6..4fd95a323beb 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -68,6 +68,21 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) return (++sg)->length ? sg : (void *)sg_page(sg); } +static inline void scatterwalk_crypto_chain(struct scatterlist *head, + struct scatterlist *sg, + int chain, int num) +{ + if (chain) { + head->length += sg->length; + sg = scatterwalk_sg_next(sg); + } + + if (sg) + scatterwalk_sg_chain(head, num, sg); + else + sg_mark_end(head); +} + static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, struct scatter_walk *walk_out) { diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index aac27bd56e89..f22e7fe4b6db 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -121,6 +121,9 @@ int drm_fb_helper_setcolreg(unsigned regno, void drm_fb_helper_restore(void); void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height); +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth); + int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); diff --git a/include/linux/Kbuild b/include/linux/Kbuild index d1580c17cab3..2296d8b1931f 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -158,6 +158,7 @@ header-y += icmpv6.h header-y += if.h header-y += if_addr.h header-y += if_addrlabel.h +header-y += if_alg.h header-y += if_arcnet.h header-y += if_arp.h header-y += if_bonding.h diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 67c91b4418b0..eb176bb1b15b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -352,4 +352,14 @@ static inline int acpi_table_parse(char *id, return -1; } #endif /* !CONFIG_ACPI */ + +#ifdef CONFIG_ACPI_SLEEP +int suspend_nvs_register(unsigned long start, unsigned long size); +#else +static inline int suspend_nvs_register(unsigned long a, unsigned long b) +{ + return 0; +} +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 64a7114a9394..c3d6512eded1 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -25,7 +25,7 @@ struct pt_regs; /* * This structure is used to hold the arguments that are used when loading binaries. */ -struct linux_binprm{ +struct linux_binprm { char buf[BINPRM_BUF_SIZE]; #ifdef CONFIG_MMU struct vm_area_struct *vma; @@ -93,7 +93,6 @@ struct linux_binfmt { int (*load_shlib)(struct file *); int (*core_dump)(struct coredump_params *cprm); unsigned long min_coredump; /* minimal dump size */ - int hasvdso; }; extern int __register_binfmt(struct linux_binfmt *fmt, int insert); @@ -113,7 +112,7 @@ extern void unregister_binfmt(struct linux_binfmt *); extern int prepare_binprm(struct linux_binprm *); extern int __must_check remove_arg_zero(struct linux_binprm *); -extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); +extern int search_binary_handler(struct linux_binprm *, struct pt_regs *); extern int flush_old_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 36ab42c9bb99..4d18ff34670a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -115,6 +115,7 @@ struct request { void *elevator_private3; struct gendisk *rq_disk; + struct hd_struct *part; unsigned long start_time; #ifdef CONFIG_BLK_CGROUP unsigned long long start_time_ns; @@ -646,7 +647,6 @@ static inline void rq_flush_dcache_pages(struct request *rq) extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); -extern void register_disk(struct gendisk *dev); extern void generic_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); @@ -1256,6 +1256,9 @@ struct block_device_operations { int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *); + unsigned int (*check_events) (struct gendisk *disk, + unsigned int clearing); + /* ->media_changed() is DEPRECATED, use ->check_events() instead */ int (*media_changed) (struct gendisk *); void (*unlock_native_capacity) (struct gendisk *); int (*revalidate_disk) (struct gendisk *); diff --git a/include/linux/cdev.h b/include/linux/cdev.h index f389e319a454..fb4591977b03 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -28,8 +28,6 @@ int cdev_add(struct cdev *, dev_t, unsigned); void cdev_del(struct cdev *); -int cdev_index(struct inode *inode); - void cd_forget(struct inode *); extern struct backing_dev_info directly_mappable_cdev_bdi; diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 78e904796622..35eae4b67503 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -946,6 +946,8 @@ struct cdrom_device_info { /* device-related storage */ unsigned int options : 30; /* options flags */ unsigned mc_flags : 2; /* media change buffer flags */ + unsigned int vfs_events; /* cached events for vfs path */ + unsigned int ioctl_events; /* cached events for ioctl path */ int use_count; /* number of times device opened */ char name[20]; /* name of the device type */ /* per-device flags */ @@ -965,6 +967,8 @@ struct cdrom_device_ops { int (*open) (struct cdrom_device_info *, int); void (*release) (struct cdrom_device_info *); int (*drive_status) (struct cdrom_device_info *, int); + unsigned int (*check_events) (struct cdrom_device_info *cdi, + unsigned int clearing, int slot); int (*media_changed) (struct cdrom_device_info *, int); int (*tray_move) (struct cdrom_device_info *, int); int (*lock_door) (struct cdrom_device_info *, int); @@ -993,6 +997,8 @@ extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode); extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); +extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, + unsigned int clearing); extern int cdrom_media_changed(struct cdrom_device_info *); extern int register_cdrom(struct cdrom_device_info *cdi); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index c3c74aef289d..09dcc0c2ffd5 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -43,6 +43,10 @@ #define CEPH_FEATURE_NOSRCADDR (1<<1) #define CEPH_FEATURE_MONCLOCKCHECK (1<<2) #define CEPH_FEATURE_FLOCK (1<<3) +#define CEPH_FEATURE_SUBSCRIBE2 (1<<4) +#define CEPH_FEATURE_MONNAMES (1<<5) +#define CEPH_FEATURE_RECONNECT_SEQ (1<<6) +#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7) /* @@ -55,10 +59,10 @@ struct ceph_file_layout { __le32 fl_stripe_count; /* over this many objects */ __le32 fl_object_size; /* until objects are this big, then move to new objects */ - __le32 fl_cas_hash; /* 0 = none; 1 = sha256 */ + __le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */ /* pg -> disk layout */ - __le32 fl_object_stripe_unit; /* for per-object parity, if any */ + __le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */ /* object -> pg layout */ __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */ @@ -69,6 +73,12 @@ struct ceph_file_layout { int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); +struct ceph_dir_layout { + __u8 dl_dir_hash; /* see ceph_hash.h for ids */ + __u8 dl_unused1; + __u16 dl_unused2; + __u32 dl_unused3; +} __attribute__ ((packed)); /* crypto algorithms */ #define CEPH_CRYPTO_NONE 0x0 @@ -457,7 +467,7 @@ struct ceph_mds_reply_inode { struct ceph_timespec rctime; struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */ } __attribute__ ((packed)); -/* followed by frag array, then symlink string, then xattr blob */ +/* followed by frag array, symlink string, dir layout, xattr blob */ /* reply_lease follows dname, and reply_inode */ struct ceph_mds_reply_lease { diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index a108b425fee2..c3011beac30d 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -110,17 +110,12 @@ struct ceph_msg_pos { /* * ceph_connection state bit flags - * - * QUEUED and BUSY are used together to ensure that only a single - * thread is currently opening, reading or writing data to the socket. */ #define LOSSYTX 0 /* we can close channel or drop messages on errors */ #define CONNECTING 1 #define NEGOTIATING 2 #define KEEPALIVE_PENDING 3 #define WRITE_PENDING 4 /* we have data ready to send */ -#define QUEUED 5 /* there is work queued on this connection */ -#define BUSY 6 /* work is being done */ #define STANDBY 8 /* no outgoing messages, socket closed. we keep * the ceph_connection around to maintain shared * state with the peer. */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed4ba111bc8d..ce104e33cd22 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -564,7 +564,7 @@ struct cgroup_iter { /* * To iterate across the tasks in a cgroup: * - * 1) call cgroup_iter_start to intialize an iterator + * 1) call cgroup_iter_start to initialize an iterator * * 2) call cgroup_iter_next() to retrieve member tasks until it * returns NULL or until you want to end the iteration diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 5ac51552d908..dfa2ed4c0d26 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -11,6 +11,9 @@ /* The full zone was compacted */ #define COMPACT_COMPLETE 3 +#define COMPACT_MODE_DIRECT_RECLAIM 0 +#define COMPACT_MODE_KSWAPD 1 + #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, @@ -21,7 +24,12 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, - int order, gfp_t gfp_mask, nodemask_t *mask); + int order, gfp_t gfp_mask, nodemask_t *mask, + bool sync); +extern unsigned long compaction_suitable(struct zone *zone, int order); +extern unsigned long compact_zone_order(struct zone *zone, int order, + gfp_t gfp_mask, bool sync, + int compact_mode); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -54,7 +62,20 @@ static inline bool compaction_deferred(struct zone *zone) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, - int order, gfp_t gfp_mask, nodemask_t *nodemask) + int order, gfp_t gfp_mask, nodemask_t *nodemask, + bool sync) +{ + return COMPACT_CONTINUE; +} + +static inline unsigned long compaction_suitable(struct zone *zone, int order) +{ + return COMPACT_SKIPPED; +} + +static inline unsigned long compact_zone_order(struct zone *zone, int order, + gfp_t gfp_mask, bool sync, + int compact_mode) { return COMPACT_CONTINUE; } diff --git a/include/linux/cper.h b/include/linux/cper.h index bf972f81e2a7..3104aaff5dd0 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -39,10 +39,12 @@ * Severity difinition for error_severity in struct cper_record_header * and section_severity in struct cper_section_descriptor */ -#define CPER_SEV_RECOVERABLE 0x0 -#define CPER_SEV_FATAL 0x1 -#define CPER_SEV_CORRECTED 0x2 -#define CPER_SEV_INFORMATIONAL 0x3 +enum { + CPER_SEV_RECOVERABLE, + CPER_SEV_FATAL, + CPER_SEV_CORRECTED, + CPER_SEV_INFORMATIONAL, +}; /* * Validation bits difinition for validation_bits in struct @@ -201,6 +203,47 @@ UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ 0xDF, 0xAA, 0x84, 0xEC) +#define CPER_PROC_VALID_TYPE 0x0001 +#define CPER_PROC_VALID_ISA 0x0002 +#define CPER_PROC_VALID_ERROR_TYPE 0x0004 +#define CPER_PROC_VALID_OPERATION 0x0008 +#define CPER_PROC_VALID_FLAGS 0x0010 +#define CPER_PROC_VALID_LEVEL 0x0020 +#define CPER_PROC_VALID_VERSION 0x0040 +#define CPER_PROC_VALID_BRAND_INFO 0x0080 +#define CPER_PROC_VALID_ID 0x0100 +#define CPER_PROC_VALID_TARGET_ADDRESS 0x0200 +#define CPER_PROC_VALID_REQUESTOR_ID 0x0400 +#define CPER_PROC_VALID_RESPONDER_ID 0x0800 +#define CPER_PROC_VALID_IP 0x1000 + +#define CPER_MEM_VALID_ERROR_STATUS 0x0001 +#define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002 +#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004 +#define CPER_MEM_VALID_NODE 0x0008 +#define CPER_MEM_VALID_CARD 0x0010 +#define CPER_MEM_VALID_MODULE 0x0020 +#define CPER_MEM_VALID_BANK 0x0040 +#define CPER_MEM_VALID_DEVICE 0x0080 +#define CPER_MEM_VALID_ROW 0x0100 +#define CPER_MEM_VALID_COLUMN 0x0200 +#define CPER_MEM_VALID_BIT_POSITION 0x0400 +#define CPER_MEM_VALID_REQUESTOR_ID 0x0800 +#define CPER_MEM_VALID_RESPONDER_ID 0x1000 +#define CPER_MEM_VALID_TARGET_ID 0x2000 +#define CPER_MEM_VALID_ERROR_TYPE 0x4000 + +#define CPER_PCIE_VALID_PORT_TYPE 0x0001 +#define CPER_PCIE_VALID_VERSION 0x0002 +#define CPER_PCIE_VALID_COMMAND_STATUS 0x0004 +#define CPER_PCIE_VALID_DEVICE_ID 0x0008 +#define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010 +#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020 +#define CPER_PCIE_VALID_CAPABILITY 0x0040 +#define CPER_PCIE_VALID_AER_INFO 0x0080 + +#define CPER_PCIE_SLOT_SHIFT 3 + /* * All tables and structs must be byte-packed to match CPER * specification, since the tables are provided by the system BIOS @@ -306,6 +349,41 @@ struct cper_sec_mem_err { __u8 error_type; }; +struct cper_sec_pcie { + __u64 validation_bits; + __u32 port_type; + struct { + __u8 minor; + __u8 major; + __u8 reserved[2]; + } version; + __u16 command; + __u16 status; + __u32 reserved; + struct { + __u16 vendor_id; + __u16 device_id; + __u8 class_code[3]; + __u8 function; + __u8 device; + __u16 segment; + __u8 bus; + __u8 secondary_bus; + __u16 slot; + __u8 reserved; + } device_id; + struct { + __u32 lower; + __u32 upper; + } serial_number; + struct { + __u16 secondary_status; + __u16 control; + } bridge; + __u8 capability[60]; + __u8 aer_info[96]; +}; + /* Reset to default packing */ #pragma pack() diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 1be416bbbb82..36719ead50e8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -47,13 +47,7 @@ struct cpuidle_state { /* Idle State Flags */ #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ -#define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */ -#define CPUIDLE_FLAG_POLL (0x10) /* no latency, no savings */ -#define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */ -#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ -#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ -#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h index 6fc2bed368b8..0e7bf272ec2f 100644 --- a/include/linux/cramfs_fs.h +++ b/include/linux/cramfs_fs.h @@ -84,9 +84,11 @@ struct cramfs_super { | CRAMFS_FLAG_WRONG_SIGNATURE \ | CRAMFS_FLAG_SHIFTED_ROOT_OFFSET ) +#ifdef __KERNEL__ /* Uncompression interfaces to the underlying zlib */ int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen); int cramfs_uncompress_init(void); void cramfs_uncompress_exit(void); +#endif /* __KERNEL__ */ #endif diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h index d5a1d4810b80..6fe2114f8ad2 100644 --- a/include/linux/cs5535.h +++ b/include/linux/cs5535.h @@ -103,14 +103,20 @@ static inline int cs5535_has_vsa2(void) #define GPIO_POSITIVE_EDGE_STS 0x48 #define GPIO_NEGATIVE_EDGE_STS 0x4C +#define GPIO_FLTR7_AMOUNT 0xD8 + #define GPIO_MAP_X 0xE0 #define GPIO_MAP_Y 0xE4 #define GPIO_MAP_Z 0xE8 #define GPIO_MAP_W 0xEC +#define GPIO_FE7_SEL 0xF7 + void cs5535_gpio_set(unsigned offset, unsigned int reg); void cs5535_gpio_clear(unsigned offset, unsigned int reg); int cs5535_gpio_isset(unsigned offset, unsigned int reg); +int cs5535_gpio_set_irq(unsigned group, unsigned irq); +void cs5535_gpio_setup_event(unsigned offset, int pair, int pme); /* MFGPTs */ diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h index f9b06ccc3e5c..8c0aef1ba5f5 100644 --- a/include/linux/decompress/inflate.h +++ b/include/linux/decompress/inflate.h @@ -1,9 +1,6 @@ #ifndef INFLATE_H #define INFLATE_H -/* Other housekeeping constants */ -#define INBUFSIZ 4096 - int gunzip(unsigned char *inbuf, int len, int(*fill)(void*, unsigned int), int(*flush)(void*, unsigned int), diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h index ad5ec1d0475e..4cb72b920c74 100644 --- a/include/linux/decompress/mm.h +++ b/include/linux/decompress/mm.h @@ -61,8 +61,6 @@ static void free(void *where) #define large_malloc(a) malloc(a) #define large_free(a) free(a) -#define set_error_fn(x) - #define INIT #else /* STATIC */ @@ -72,6 +70,7 @@ static void free(void *where) #include #include #include +#include #include /* Use defines rather than static inline in order to avoid spurious @@ -84,9 +83,6 @@ static void free(void *where) #define large_malloc(a) vmalloc(a) #define large_free(a) vfree(a) -static void(*error)(char *m); -#define set_error_fn(x) error = x; - #define INIT __init #define STATIC diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h new file mode 100644 index 000000000000..41728fc6c8a1 --- /dev/null +++ b/include/linux/decompress/unxz.h @@ -0,0 +1,19 @@ +/* + * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef DECOMPRESS_UNXZ_H +#define DECOMPRESS_UNXZ_H + +int unxz(unsigned char *in, int in_size, + int (*fill)(void *dest, unsigned int size), + int (*flush)(void *src, unsigned int size), + unsigned char *out, int *in_used, + void (*error)(char *x)); + +#endif diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 2970022faa63..272496d1fae4 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -193,6 +193,13 @@ struct dm_target { char *error; }; +/* Each target can link one of these into the table */ +struct dm_target_callbacks { + struct list_head list; + int (*congested_fn) (struct dm_target_callbacks *, int); + void (*unplug_fn)(struct dm_target_callbacks *); +}; + int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); @@ -268,6 +275,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode, int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params); +/* + * Target_ctr should call this if it needs to add any callbacks. + */ +void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); + /* * Finally call this to make the table ready for use. */ diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index 49eab360d5d4..78bbf47bbb96 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -44,7 +44,7 @@ * Remove a device, destroy any tables. * * DM_DEV_RENAME: - * Rename a device. + * Rename a device or set its uuid if none was previously supplied. * * DM_SUSPEND: * This performs both suspend and resume, depending which flag is @@ -267,9 +267,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 18 -#define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2010-06-29)" +#define DM_VERSION_MINOR 19 +#define DM_VERSION_PATCHLEVEL 1 +#define DM_VERSION_EXTRA "-ioctl (2011-01-07)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -322,4 +322,10 @@ enum { */ #define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */ +/* + * If set, rename changes the uuid not the name. Only permitted + * if no uuid was previously supplied: an existing uuid cannot be changed. + */ +#define DM_UUID_FLAG (1 << 14) /* In */ + #endif /* _LINUX_DM_IOCTL_H */ diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h index 0c3c3a2110c4..eeace7d3ff15 100644 --- a/include/linux/dm-log-userspace.h +++ b/include/linux/dm-log-userspace.h @@ -370,6 +370,16 @@ #define DM_ULOG_REQUEST_TYPE(request_type) \ (DM_ULOG_REQUEST_MASK & (request_type)) +/* + * DM_ULOG_REQUEST_VERSION is incremented when there is a + * change to the way information is passed between kernel + * and userspace. This could be a structure change of + * dm_ulog_request or a change in the way requests are + * issued/handled. Changes are outlined here: + * version 1: Initial implementation + */ +#define DM_ULOG_REQUEST_VERSION 1 + struct dm_ulog_request { /* * The local unique identifier (luid) and the universally unique @@ -383,8 +393,9 @@ struct dm_ulog_request { */ uint64_t luid; char uuid[DM_UUID_LEN]; - char padding[7]; /* Padding because DM_UUID_LEN = 129 */ + char padding[3]; /* Padding because DM_UUID_LEN = 129 */ + uint32_t version; /* See DM_ULOG_REQUEST_VERSION */ int32_t error; /* Used to report back processing errors */ uint32_t seq; /* Sequence number for request */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index bec8b82889bf..ab68f785fd19 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -98,6 +98,17 @@ static inline int is_broadcast_ether_addr(const u8 *addr) return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff; } +/** + * is_unicast_ether_addr - Determine if the Ethernet address is unicast + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a unicast address. + */ +static inline int is_unicast_ether_addr(const u8 *addr) +{ + return !is_multicast_ether_addr(addr); +} + /** * is_valid_ether_addr - Determine if the given Ethernet address is valid * @addr: Pointer to a six-byte array containing the Ethernet address diff --git a/include/linux/falloc.h b/include/linux/falloc.h index 3c155107d61f..73e0b628e058 100644 --- a/include/linux/falloc.h +++ b/include/linux/falloc.h @@ -2,6 +2,7 @@ #define _FALLOC_H_ #define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */ +#define FALLOC_FL_PUNCH_HOLE 0x02 /* de-allocates range */ #ifdef __KERNEL__ diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 68c642d8843d..59ea406be7f6 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -273,7 +273,7 @@ struct fw_cdev_event_iso_interrupt { * @closure: See &fw_cdev_event_common; * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl * @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL - * @completed: Offset into the receive buffer; data before this offest is valid + * @completed: Offset into the receive buffer; data before this offset is valid * * This event is sent in multichannel contexts (context type * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer diff --git a/include/linux/fs.h b/include/linux/fs.h index f84d9928bdb1..08824e0ef381 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -664,8 +664,9 @@ struct block_device { void * bd_claiming; void * bd_holder; int bd_holders; + bool bd_write_holder; #ifdef CONFIG_SYSFS - struct list_head bd_holder_list; + struct list_head bd_holder_disks; #endif struct block_device * bd_contains; unsigned bd_block_size; @@ -1065,7 +1066,6 @@ struct lock_manager_operations { int (*fl_grant)(struct file_lock *, struct file_lock *, int); void (*fl_release_private)(struct file_lock *); void (*fl_break)(struct file_lock *); - int (*fl_mylease)(struct file_lock *, struct file_lock *); int (*fl_change)(struct file_lock **, int); }; @@ -1423,6 +1423,7 @@ struct super_block { * generic_show_options() */ char __rcu *s_options; + const struct dentry_operations *s_d_op; /* default d_op for dentries */ }; extern struct timespec current_fs_time(struct super_block *sb); @@ -1834,7 +1835,9 @@ struct super_block *sget(struct file_system_type *type, int (*set)(struct super_block *,void *), void *data); extern struct dentry *mount_pseudo(struct file_system_type *, char *, - const struct super_operations *ops, unsigned long); + const struct super_operations *ops, + const struct dentry_operations *dops, + unsigned long); extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); static inline void sb_mark_dirty(struct super_block *sb) @@ -2016,7 +2019,6 @@ extern struct block_device *bdgrab(struct block_device *bdev); extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); -extern struct block_device *open_by_devnum(dev_t, fmode_t); extern void invalidate_bdev(struct block_device *); extern int sync_blockdev(struct block_device *bdev); extern struct super_block *freeze_bdev(struct block_device *); @@ -2047,16 +2049,26 @@ extern const struct file_operations def_fifo_fops; extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); -extern int blkdev_get(struct block_device *, fmode_t); -extern int blkdev_put(struct block_device *, fmode_t); -extern int bd_claim(struct block_device *, void *); -extern void bd_release(struct block_device *); +extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, + void *holder); +extern int blkdev_put(struct block_device *bdev, fmode_t mode); #ifdef CONFIG_SYSFS -extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *); -extern void bd_release_from_disk(struct block_device *, struct gendisk *); +extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +extern void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk); #else -#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder) -#define bd_release_from_disk(bdev, disk) bd_release(bdev) +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} #endif #endif @@ -2092,8 +2104,6 @@ static inline void unregister_chrdev(unsigned int major, const char *name) extern const char *__bdevname(dev_t, char *buffer); extern const char *bdevname(struct block_device *bdev, char *buffer); extern struct block_device *lookup_bdev(const char *); -extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *); -extern void close_bdev_exclusive(struct block_device *, fmode_t); extern void blkdev_show(struct seq_file *,off_t); #else diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 7a7b9c1644e4..c0d5f6945c1e 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -115,6 +115,7 @@ struct hd_struct { #else struct disk_stats dkstats; #endif + atomic_t ref; struct rcu_head rcu_head; }; @@ -127,6 +128,11 @@ struct hd_struct { #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ #define GENHD_FL_NATIVE_CAPACITY 128 +enum { + DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ + DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ +}; + #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) @@ -143,6 +149,8 @@ struct disk_part_tbl { struct hd_struct __rcu *part[]; }; +struct disk_events; + struct gendisk { /* major, first_minor and minors are input parameters only, * don't use directly. Use disk_devt() and disk_max_parts(). @@ -154,6 +162,10 @@ struct gendisk { char disk_name[DISK_NAME_LEN]; /* name of major driver */ char *(*devnode)(struct gendisk *gd, mode_t *mode); + + unsigned int events; /* supported events */ + unsigned int async_events; /* async events, subset of all */ + /* Array of pointers to partitions indexed by partno. * Protected with matching bdev lock but stat and other * non-critical accesses use RCU. Always access through @@ -171,9 +183,8 @@ struct gendisk { struct kobject *slave_dir; struct timer_rand_state *random; - atomic_t sync_io; /* RAID */ - struct work_struct async_notify; + struct disk_events *ev; #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity *integrity; #endif @@ -395,7 +406,6 @@ extern void part_round_stats(int cpu, struct hd_struct *part); /* block/genhd.c */ extern void add_disk(struct gendisk *disk); extern void del_gendisk(struct gendisk *gp); -extern void unlink_gendisk(struct gendisk *gp); extern struct gendisk *get_gendisk(dev_t dev, int *partno); extern struct block_device *bdget_disk(struct gendisk *disk, int partno); @@ -407,6 +417,11 @@ static inline int get_disk_ro(struct gendisk *disk) return disk->part0.policy; } +extern void disk_block_events(struct gendisk *disk); +extern void disk_unblock_events(struct gendisk *disk); +extern void disk_check_events(struct gendisk *disk); +extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); + /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk); extern void rand_initialize_disk(struct gendisk *disk); @@ -583,6 +598,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk, sector_t len, int flags, struct partition_meta_info *info); +extern void __delete_partition(struct hd_struct *); extern void delete_partition(struct gendisk *, int); extern void printk_all_partitions(void); @@ -611,6 +627,29 @@ extern ssize_t part_fail_store(struct device *dev, const char *buf, size_t count); #endif /* CONFIG_FAIL_MAKE_REQUEST */ +static inline void hd_ref_init(struct hd_struct *part) +{ + atomic_set(&part->ref, 1); + smp_mb(); +} + +static inline void hd_struct_get(struct hd_struct *part) +{ + atomic_inc(&part->ref); + smp_mb__after_atomic_inc(); +} + +static inline int hd_struct_try_get(struct hd_struct *part) +{ + return atomic_inc_not_zero(&part->ref); +} + +static inline void hd_struct_put(struct hd_struct *part) +{ + if (atomic_dec_and_test(&part->ref)) + __delete_partition(part); +} + #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f54adfcbec9c..a3b148a91874 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -34,6 +34,7 @@ struct vm_area_struct; #else #define ___GFP_NOTRACK 0 #endif +#define ___GFP_NO_KSWAPD 0x400000u /* * GFP bitmasks.. @@ -81,13 +82,15 @@ struct vm_area_struct; #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ +#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) + /* * This may seem redundant, but it's a way of annotating false positives vs. * allocations that simply cannot be supported (e.g. page tables). */ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) -#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 23 /* Room for 23 __GFP_FOO bits */ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* This equals 0, but use constants in case they ever change */ @@ -106,6 +109,9 @@ struct vm_area_struct; __GFP_HARDWALL | __GFP_HIGHMEM | \ __GFP_MOVABLE) #define GFP_IOFS (__GFP_IO | __GFP_FS) +#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ + __GFP_NO_KSWAPD) #ifdef CONFIG_NUMA #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) @@ -325,14 +331,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) { return alloc_pages_current(gfp_mask, order); } -extern struct page *alloc_page_vma(gfp_t gfp_mask, +extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr); #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) -#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) +#define alloc_pages_vma(gfp_mask, order, vma, addr) \ + alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) +#define alloc_page_vma(gfp_mask, vma, addr) \ + alloc_pages_vma(gfp_mask, 0, vma, addr) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/include/linux/gpio.h b/include/linux/gpio.h index e41f7dd1ae67..32720baf70f1 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -13,6 +13,7 @@ #include struct device; +struct gpio; struct gpio_chip; /* @@ -34,6 +35,17 @@ static inline int gpio_request(unsigned gpio, const char *label) return -ENOSYS; } +static inline int gpio_request_one(unsigned gpio, + unsigned long flags, const char *label) +{ + return -ENOSYS; +} + +static inline int gpio_request_array(struct gpio *array, size_t num) +{ + return -ENOSYS; +} + static inline void gpio_free(unsigned gpio) { might_sleep(); @@ -42,6 +54,14 @@ static inline void gpio_free(unsigned gpio) WARN_ON(1); } +static inline void gpio_free_array(struct gpio *array, size_t num) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + static inline int gpio_direction_input(unsigned gpio) { return -ENOSYS; diff --git a/include/linux/hid.h b/include/linux/hid.h index 20b9801f669b..d91c25e253c8 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -402,7 +402,7 @@ struct hid_field { __u16 dpad; /* dpad input code */ }; -#define HID_MAX_FIELDS 64 +#define HID_MAX_FIELDS 128 struct hid_report { struct list_head list; @@ -593,6 +593,7 @@ struct hid_usage_id { * @report_fixup: called before report descriptor parsing (NULL means nop) * @input_mapping: invoked on input registering before mapping an usage * @input_mapped: invoked on input registering after mapping an usage + * @feature_mapping: invoked on feature registering * @suspend: invoked on suspend (NULL means nop) * @resume: invoked on resume if device was not reset (NULL means nop) * @reset_resume: invoked on resume if device was reset (NULL means nop) @@ -636,6 +637,9 @@ struct hid_driver { int (*input_mapped)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); + void (*feature_mapping)(struct hid_device *hdev, + struct hid_input *hidinput, struct hid_field *field, + struct hid_usage *usage); #ifdef CONFIG_PM int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h new file mode 100644 index 000000000000..8e6c8c42bc3c --- /dev/null +++ b/include/linux/huge_mm.h @@ -0,0 +1,179 @@ +#ifndef _LINUX_HUGE_MM_H +#define _LINUX_HUGE_MM_H + +extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, + unsigned int flags); +extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, + struct vm_area_struct *vma); +extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, + pmd_t orig_pmd); +extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); +extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, + unsigned long addr, + pmd_t *pmd, + unsigned int flags); +extern int zap_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd); +extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned char *vec); +extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, pgprot_t newprot); + +enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_FLAG, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, +#ifdef CONFIG_DEBUG_VM + TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, +#endif +}; + +enum page_check_address_pmd_flag { + PAGE_CHECK_ADDRESS_PMD_FLAG, + PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, + PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, +}; +extern pmd_t *page_check_address_pmd(struct page *page, + struct mm_struct *mm, + unsigned long address, + enum page_check_address_pmd_flag flag); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define HPAGE_PMD_SHIFT HPAGE_SHIFT +#define HPAGE_PMD_MASK HPAGE_MASK +#define HPAGE_PMD_SIZE HPAGE_SIZE + +#define transparent_hugepage_enabled(__vma) \ + ((transparent_hugepage_flags & \ + (1<vm_flags & VM_HUGEPAGE))) && \ + !((__vma)->vm_flags & VM_NOHUGEPAGE)) +#define transparent_hugepage_defrag(__vma) \ + ((transparent_hugepage_flags & \ + (1<vm_flags & VM_HUGEPAGE)) +#ifdef CONFIG_DEBUG_VM +#define transparent_hugepage_debug_cow() \ + (transparent_hugepage_flags & \ + (1<root->lock); \ + /* \ + * spin_unlock_wait() is just a loop in C and so the \ + * CPU can reorder anything around it. \ + */ \ + smp_mb(); \ + BUG_ON(pmd_trans_splitting(*____pmd) || \ + pmd_trans_huge(*____pmd)); \ + } while (0) +#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) +#define HPAGE_PMD_NR (1< MAX_ORDER +#error "hugepages can't be allocated by the buddy allocator" +#endif +extern int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice); +extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next); +static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next) +{ + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + return; + __vma_adjust_trans_huge(vma, start, end, adjust_next); +} +static inline int hpage_nr_pages(struct page *page) +{ + if (unlikely(PageTransHuge(page))) + return HPAGE_PMD_NR; + return 1; +} +static inline struct page *compound_trans_head(struct page *page) +{ + if (PageTail(page)) { + struct page *head; + head = page->first_page; + smp_rmb(); + /* + * head may be a dangling pointer. + * __split_huge_page_refcount clears PageTail before + * overwriting first_page, so if PageTail is still + * there it means the head pointer isn't dangling. + */ + if (PageTail(page)) + return head; + } + return page; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +#define HPAGE_PMD_SHIFT ({ BUG(); 0; }) +#define HPAGE_PMD_MASK ({ BUG(); 0; }) +#define HPAGE_PMD_SIZE ({ BUG(); 0; }) + +#define hpage_nr_pages(x) 1 + +#define transparent_hugepage_enabled(__vma) 0 + +#define transparent_hugepage_flags 0UL +static inline int split_huge_page(struct page *page) +{ + return 0; +} +#define split_huge_page_pmd(__mm, __pmd) \ + do { } while (0) +#define wait_split_huge_page(__anon_vma, __pmd) \ + do { } while (0) +#define compound_trans_head(page) compound_head(page) +static inline int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice) +{ + BUG(); + return 0; +} +static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next) +{ +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/if_alg.h b/include/linux/if_alg.h new file mode 100644 index 000000000000..0f9acce5b1ff --- /dev/null +++ b/include/linux/if_alg.h @@ -0,0 +1,40 @@ +/* + * if_alg: User-space algorithm interface + * + * Copyright (c) 2010 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _LINUX_IF_ALG_H +#define _LINUX_IF_ALG_H + +#include + +struct sockaddr_alg { + __u16 salg_family; + __u8 salg_type[14]; + __u32 salg_feat; + __u32 salg_mask; + __u8 salg_name[64]; +}; + +struct af_alg_iv { + __u32 ivlen; + __u8 iv[0]; +}; + +/* Socket options */ +#define ALG_SET_KEY 1 +#define ALG_SET_IV 2 +#define ALG_SET_OP 3 + +/* Operations */ +#define ALG_OP_DECRYPT 0 +#define ALG_OP_ENCRYPT 1 + +#endif /* _LINUX_IF_ALG_H */ diff --git a/include/linux/input.h b/include/linux/input.h index c4e9d91ec979..e428382ca28a 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -802,6 +802,7 @@ struct input_keymap_entry { #define SW_CAMERA_LENS_COVER 0x09 /* set = lens covered */ #define SW_KEYPAD_SLIDE 0x0a /* set = keypad slide out */ #define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ +#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ #define SW_MAX 0x0f #define SW_CNT (SW_MAX+1) diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h new file mode 100644 index 000000000000..1affd0ddfa9d --- /dev/null +++ b/include/linux/input/as5011.h @@ -0,0 +1,20 @@ +#ifndef _AS5011_H +#define _AS5011_H + +/* + * Copyright (c) 2010, 2011 Fabien Marteau + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +struct as5011_platform_data { + unsigned int button_gpio; + unsigned int axis_irq; /* irq number */ + unsigned long axis_irqflags; + char xp, xn; /* threshold for x axis */ + char yp, yn; /* threshold for y axis */ +}; + +#endif /* _AS5011_H */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 65aae34759de..045f2f275cd0 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -454,6 +454,44 @@ unsigned int ipmi_addr_length(int addr_type); /* Validate that the given IPMI address is valid. */ int ipmi_validate_addr(struct ipmi_addr *addr, int len); +/* + * How did the IPMI driver find out about the device? + */ +enum ipmi_addr_src { + SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, + SI_PCI, SI_DEVICETREE, SI_DEFAULT +}; + +union ipmi_smi_info_union { + /* + * the acpi_info element is defined for the SI_ACPI + * address type + */ + struct { + void *acpi_handle; + } acpi_info; +}; + +struct ipmi_smi_info { + enum ipmi_addr_src addr_src; + + /* + * Base device for the interface. Don't forget to put this when + * you are done. + */ + struct device *dev; + + /* + * The addr_info provides more detailed info for some IPMI + * devices, depending on the addr_src. Currently only SI_ACPI + * info is provided. + */ + union ipmi_smi_info_union addr_info; +}; + +/* This is to get the private info of ipmi_smi_t */ +extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); + #endif /* __KERNEL__ */ diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 4b48318ac542..906590aa6907 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -39,6 +39,7 @@ #include #include #include +#include /* This files describes the interface for IPMI system management interface drivers to bind into the IPMI message handler. */ @@ -86,6 +87,13 @@ struct ipmi_smi_handlers { int (*start_processing)(void *send_info, ipmi_smi_t new_intf); + /* + * Get the detailed private info of the low level interface and store + * it into the structure of ipmi_smi_data. For example: the + * ACPI device handle will be returned for the pnp_acpi IPMI device. + */ + int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); + /* Called to enqueue an SMI message to be sent. This operation is not allowed to fail. If an error occurs, it should report back the error in a received message. It may diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 979c68cc7458..6a64c6fa81af 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -57,7 +57,7 @@ struct irq_desc { #endif struct timer_rand_state *timer_rand_state; - unsigned int *kstat_irqs; + unsigned int __percpu *kstat_irqs; irq_flow_handler_t handle_irq; struct irqaction *action; /* IRQ action list */ unsigned int status; /* IRQ status */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d0fbc043de60..5a9d9059520b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -143,9 +143,22 @@ extern int _cond_resched(void); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) -#define abs(x) ({ \ - long __x = (x); \ - (__x < 0) ? -__x : __x; \ +/* + * abs() handles unsigned and signed longs, ints, shorts and chars. For all + * input types abs() returns a signed long. + * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() + * for those. + */ +#define abs(x) ({ \ + long ret; \ + if (sizeof(x) == sizeof(long)) { \ + long __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } else { \ + int __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } \ + ret; \ }) #define abs64(x) ({ \ @@ -587,6 +600,13 @@ struct sysinfo { #define NUMA_BUILD 0 #endif +/* This helps us avoid #ifdef CONFIG_COMPACTION */ +#ifdef CONFIG_COMPACTION +#define COMPACTION_BUILD 1 +#else +#define COMPACTION_BUILD 0 +#endif + /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 44e83ba12b5b..0cce2db580c3 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -46,16 +46,14 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); extern unsigned long long nr_context_switches(void); #ifndef CONFIG_GENERIC_HARDIRQS -#define kstat_irqs_this_cpu(irq) \ - (this_cpu_read(kstat.irqs[irq]) struct irq_desc; static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) { - kstat_this_cpu.irqs[irq]++; - kstat_this_cpu.irqs_sum++; + __this_cpu_inc(kstat.irqs[irq]); + __this_cpu_inc(kstat.irqs_sum); } static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) @@ -65,17 +63,18 @@ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) #else #include extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); -#define kstat_irqs_this_cpu(DESC) \ - ((DESC)->kstat_irqs[smp_processor_id()]) -#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\ - ((DESC)->kstat_irqs[smp_processor_id()]++);\ - kstat_this_cpu.irqs_sum++; } while (0) + +#define kstat_incr_irqs_this_cpu(irqno, DESC) \ +do { \ + __this_cpu_inc(*(DESC)->kstat_irqs); \ + __this_cpu_inc(kstat.irqs_sum); \ +} while (0) #endif static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) { - kstat_this_cpu.softirqs[irq]++; + __this_cpu_inc(kstat.softirqs[irq]); } static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h new file mode 100644 index 000000000000..6b394f0b5148 --- /dev/null +++ b/include/linux/khugepaged.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_KHUGEPAGED_H +#define _LINUX_KHUGEPAGED_H + +#include /* MMF_VM_HUGEPAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int __khugepaged_enter(struct mm_struct *mm); +extern void __khugepaged_exit(struct mm_struct *mm); +extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); + +#define khugepaged_enabled() \ + (transparent_hugepage_flags & \ + ((1<flags)) + return __khugepaged_enter(mm); + return 0; +} + +static inline void khugepaged_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) + __khugepaged_exit(mm); +} + +static inline int khugepaged_enter(struct vm_area_struct *vma) +{ + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) + if ((khugepaged_always() || + (khugepaged_req_madv() && + vma->vm_flags & VM_HUGEPAGE)) && + !(vma->vm_flags & VM_NOHUGEPAGE)) + if (__khugepaged_enter(vma->vm_mm)) + return -ENOMEM; + return 0; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + return 0; +} +static inline void khugepaged_exit(struct mm_struct *mm) +{ +} +static inline int khugepaged_enter(struct vm_area_struct *vma) +{ + return 0; +} +static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) +{ + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 24b44145a886..2a0d7d651dc3 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -18,6 +18,10 @@ enum kmsg_dump_reason { KMSG_DUMP_OOPS, KMSG_DUMP_PANIC, KMSG_DUMP_KEXEC, + KMSG_DUMP_RESTART, + KMSG_DUMP_HALT, + KMSG_DUMP_POWEROFF, + KMSG_DUMP_EMERG, }; /** diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 919ae53adc5c..ea2dc1a2e13d 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -540,6 +540,7 @@ struct kvm_ppc_pvinfo { #endif #define KVM_CAP_PPC_GET_PVINFO 57 #define KVM_CAP_PPC_IRQ_LEVEL 58 +#define KVM_CAP_ASYNC_PF 59 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a0557422715e..b5021db21858 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include @@ -40,6 +42,7 @@ #define KVM_REQ_KICK 9 #define KVM_REQ_DEACTIVATE_FPU 10 #define KVM_REQ_EVENT 11 +#define KVM_REQ_APF_HALT 12 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 @@ -74,6 +77,27 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev); +#ifdef CONFIG_KVM_ASYNC_PF +struct kvm_async_pf { + struct work_struct work; + struct list_head link; + struct list_head queue; + struct kvm_vcpu *vcpu; + struct mm_struct *mm; + gva_t gva; + unsigned long addr; + struct kvm_arch_async_pf arch; + struct page *page; + bool done; +}; + +void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); +void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + struct kvm_arch_async_pf *arch); +int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); +#endif + struct kvm_vcpu { struct kvm *kvm; #ifdef CONFIG_PREEMPT_NOTIFIERS @@ -104,6 +128,15 @@ struct kvm_vcpu { gpa_t mmio_phys_addr; #endif +#ifdef CONFIG_KVM_ASYNC_PF + struct { + u32 queued; + struct list_head queue; + struct list_head done; + spinlock_t lock; + } async_pf; +#endif + struct kvm_vcpu_arch arch; }; @@ -113,16 +146,19 @@ struct kvm_vcpu { */ #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) +struct kvm_lpage_info { + unsigned long rmap_pde; + int write_count; +}; + struct kvm_memory_slot { gfn_t base_gfn; unsigned long npages; unsigned long flags; unsigned long *rmap; unsigned long *dirty_bitmap; - struct { - unsigned long rmap_pde; - int write_count; - } *lpage_info[KVM_NR_PAGE_SIZES - 1]; + unsigned long *dirty_bitmap_head; + struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; unsigned long userspace_addr; int user_alloc; int id; @@ -169,6 +205,7 @@ struct kvm_irq_routing_table {}; struct kvm_memslots { int nmemslots; + u64 generation; struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS]; }; @@ -206,6 +243,10 @@ struct kvm { struct mutex irq_lock; #ifdef CONFIG_HAVE_KVM_IRQCHIP + /* + * Update side is protected by irq_lock and, + * if configured, irqfds.lock. + */ struct kvm_irq_routing_table __rcu *irq_routing; struct hlist_head mask_notifier_list; struct hlist_head irq_ack_notifier_list; @@ -216,6 +257,7 @@ struct kvm { unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif + long tlbs_dirty; }; /* The guest did something we don't support. */ @@ -302,7 +344,11 @@ void kvm_set_page_accessed(struct page *page); pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); +pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, + bool write_fault, bool *writable); pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); +pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, + bool *writable); pfn_t gfn_to_pfn_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn); int memslot_id(struct kvm *kvm, gfn_t gfn); @@ -321,18 +367,25 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); +int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + gpa_t gpa); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); +void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, + gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); void kvm_resched(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); + void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); @@ -398,7 +451,19 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); void kvm_free_physmem(struct kvm *kvm); -struct kvm *kvm_arch_create_vm(void); +#ifndef __KVM_HAVE_ARCH_VM_ALLOC +static inline struct kvm *kvm_arch_alloc_vm(void) +{ + return kzalloc(sizeof(struct kvm), GFP_KERNEL); +} + +static inline void kvm_arch_free_vm(struct kvm *kvm) +{ + kfree(kvm); +} +#endif + +int kvm_arch_init_vm(struct kvm *kvm); void kvm_arch_destroy_vm(struct kvm *kvm); void kvm_free_all_assigned_devices(struct kvm *kvm); void kvm_arch_sync_events(struct kvm *kvm); @@ -414,16 +479,8 @@ struct kvm_irq_ack_notifier { void (*irq_acked)(struct kvm_irq_ack_notifier *kian); }; -#define KVM_ASSIGNED_MSIX_PENDING 0x1 -struct kvm_guest_msix_entry { - u32 vector; - u16 entry; - u16 flags; -}; - struct kvm_assigned_dev_kernel { struct kvm_irq_ack_notifier ack_notifier; - struct work_struct interrupt_work; struct list_head list; int assigned_dev_id; int host_segnr; @@ -434,13 +491,14 @@ struct kvm_assigned_dev_kernel { bool host_irq_disabled; struct msix_entry *host_msix_entries; int guest_irq; - struct kvm_guest_msix_entry *guest_msix_entries; + struct msix_entry *guest_msix_entries; unsigned long irq_requested_type; int irq_source_id; int flags; struct pci_dev *dev; struct kvm *kvm; - spinlock_t assigned_dev_lock; + spinlock_t intx_lock; + char irq_name[32]; }; struct kvm_irq_mask_notifier { @@ -462,6 +520,8 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, unsigned long *deliver_bitmask); #endif int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, + int irq_source_id, int level); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); @@ -603,17 +663,28 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {} void kvm_eventfd_init(struct kvm *kvm); int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); void kvm_irqfd_release(struct kvm *kvm); +void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); #else static inline void kvm_eventfd_init(struct kvm *kvm) {} + static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) { return -EINVAL; } static inline void kvm_irqfd_release(struct kvm *kvm) {} + +#ifdef CONFIG_HAVE_KVM_IRQCHIP +static inline void kvm_irq_routing_update(struct kvm *kvm, + struct kvm_irq_routing_table *irq_rt) +{ + rcu_assign_pointer(kvm->irq_routing, irq_rt); +} +#endif + static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { return -ENOSYS; diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 7ac0d4eee430..fa7cc7244cbd 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -67,4 +67,11 @@ struct kvm_lapic_irq { u32 dest_id; }; +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + unsigned long hva; + struct kvm_memory_slot *memslot; +}; + #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h index 38368d785f08..fd548d2a8775 100644 --- a/include/linux/leds-lp5521.h +++ b/include/linux/leds-lp5521.h @@ -42,6 +42,7 @@ struct lp5521_platform_data { int (*setup_resources)(void); void (*release_resources)(void); void (*enable)(bool state); + const char *label; }; #endif /* __LINUX_LP5521_H */ diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h index 796747637b80..2694289babd0 100644 --- a/include/linux/leds-lp5523.h +++ b/include/linux/leds-lp5523.h @@ -42,6 +42,7 @@ struct lp5523_platform_data { int (*setup_resources)(void); void (*release_resources)(void); void (*enable)(bool state); + const char *label; }; #endif /* __LINUX_LP5523_H */ diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 9ee97e7f2be4..5bad17d1acde 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -62,7 +62,8 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h, struct hlist_bl_node *n) { LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); - LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK)); + LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != + LIST_BL_LOCKMASK); h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 159a0762aeaf..6a576f989437 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,6 +25,11 @@ struct page_cgroup; struct page; struct mm_struct; +/* Stats that can be updated by kernel. */ +enum mem_cgroup_page_stat_item { + MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ +}; + extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, @@ -93,7 +98,7 @@ extern int mem_cgroup_prepare_migration(struct page *page, struct page *newpage, struct mem_cgroup **ptr); extern void mem_cgroup_end_migration(struct mem_cgroup *mem, - struct page *oldpage, struct page *newpage); + struct page *oldpage, struct page *newpage, bool migration_ok); /* * For memory reclaim. @@ -121,7 +126,22 @@ static inline bool mem_cgroup_disabled(void) return false; } -void mem_cgroup_update_file_mapped(struct page *page, int val); +void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx, + int val); + +static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) +{ + mem_cgroup_update_page_stat(page, idx, 1); +} + +static inline void mem_cgroup_dec_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) +{ + mem_cgroup_update_page_stat(page, idx, -1); +} + unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, gfp_t gfp_mask); u64 mem_cgroup_get_limit(struct mem_cgroup *mem); @@ -231,8 +251,7 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage, } static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, - struct page *oldpage, - struct page *newpage) + struct page *oldpage, struct page *newpage, bool migration_ok) { } @@ -293,8 +312,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } -static inline void mem_cgroup_update_file_mapped(struct page *page, - int val) +static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) +{ +} + +static inline void mem_cgroup_dec_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) { } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 31c237a00c48..24376fe7ee68 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -13,12 +13,16 @@ struct mem_section; #ifdef CONFIG_MEMORY_HOTPLUG /* - * Types for free bootmem. - * The normal smallest mapcount is -1. Here is smaller value than it. + * Types for free bootmem stored in page->lru.next. These have to be in + * some random range in unsigned long space for debugging purposes. */ -#define SECTION_INFO (-1 - 1) -#define MIX_SECTION_INFO (-1 - 2) -#define NODE_INFO (-1 - 3) +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, + MIX_SECTION_INFO, + NODE_INFO, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, +}; /* * pgdat resizing functions diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h index d63b6050b183..37f56b7c4c15 100644 --- a/include/linux/mfd/ab8500.h +++ b/include/linux/mfd/ab8500.h @@ -74,32 +74,37 @@ #define AB8500_INT_ACC_DETECT_21DB_F 37 #define AB8500_INT_ACC_DETECT_21DB_R 38 #define AB8500_INT_GP_SW_ADC_CONV_END 39 -#define AB8500_INT_BTEMP_LOW 72 -#define AB8500_INT_BTEMP_LOW_MEDIUM 73 -#define AB8500_INT_BTEMP_MEDIUM_HIGH 74 -#define AB8500_INT_BTEMP_HIGH 75 -#define AB8500_INT_USB_CHARGER_NOT_OK 81 -#define AB8500_INT_ID_WAKEUP_R 82 -#define AB8500_INT_ID_DET_R1R 84 -#define AB8500_INT_ID_DET_R2R 85 -#define AB8500_INT_ID_DET_R3R 86 -#define AB8500_INT_ID_DET_R4R 87 -#define AB8500_INT_ID_WAKEUP_F 88 -#define AB8500_INT_ID_DET_R1F 90 -#define AB8500_INT_ID_DET_R2F 91 -#define AB8500_INT_ID_DET_R3F 92 -#define AB8500_INT_ID_DET_R4F 93 -#define AB8500_INT_USB_CHG_DET_DONE 94 -#define AB8500_INT_USB_CH_TH_PROT_F 96 -#define AB8500_INT_USB_CH_TH_PROP_R 97 -#define AB8500_INT_MAIN_CH_TH_PROP_F 98 -#define AB8500_INT_MAIN_CH_TH_PROT_R 99 -#define AB8500_INT_USB_CHARGER_NOT_OKF 103 +#define AB8500_INT_ADP_SOURCE_ERROR 72 +#define AB8500_INT_ADP_SINK_ERROR 73 +#define AB8500_INT_ADP_PROBE_PLUG 74 +#define AB8500_INT_ADP_PROBE_UNPLUG 75 +#define AB8500_INT_ADP_SENSE_OFF 76 +#define AB8500_INT_USB_PHY_POWER_ERR 78 +#define AB8500_INT_USB_LINK_STATUS 79 +#define AB8500_INT_BTEMP_LOW 80 +#define AB8500_INT_BTEMP_LOW_MEDIUM 81 +#define AB8500_INT_BTEMP_MEDIUM_HIGH 82 +#define AB8500_INT_BTEMP_HIGH 83 +#define AB8500_INT_USB_CHARGER_NOT_OK 89 +#define AB8500_INT_ID_WAKEUP_R 90 +#define AB8500_INT_ID_DET_R1R 92 +#define AB8500_INT_ID_DET_R2R 93 +#define AB8500_INT_ID_DET_R3R 94 +#define AB8500_INT_ID_DET_R4R 95 +#define AB8500_INT_ID_WAKEUP_F 96 +#define AB8500_INT_ID_DET_R1F 98 +#define AB8500_INT_ID_DET_R2F 99 +#define AB8500_INT_ID_DET_R3F 100 +#define AB8500_INT_ID_DET_R4F 101 +#define AB8500_INT_USB_CHG_DET_DONE 102 +#define AB8500_INT_USB_CH_TH_PROT_F 104 +#define AB8500_INT_USB_CH_TH_PROT_R 105 +#define AB8500_INT_MAIN_CH_TH_PROT_F 106 +#define AB8500_INT_MAIN_CH_TH_PROT_R 107 +#define AB8500_INT_USB_CHARGER_NOT_OKF 111 -#define AB8500_NR_IRQS 104 -#define AB8500_NUM_IRQ_REGS 13 - -#define AB8500_NUM_REGULATORS 15 +#define AB8500_NR_IRQS 112 +#define AB8500_NUM_IRQ_REGS 14 /** * struct ab8500 - ab8500 internal structure @@ -145,7 +150,8 @@ struct regulator_init_data; struct ab8500_platform_data { int irq_base; void (*init) (struct ab8500 *); - struct regulator_init_data *regulator[AB8500_NUM_REGULATORS]; + int num_regulator; + struct regulator_init_data *regulator; }; extern int __devinit ab8500_init(struct ab8500 *ab8500); diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index cb93d80aa642..835996e167e1 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -39,7 +39,7 @@ struct mfd_cell { size_t data_size; /* - * This resources can be specified relatievly to the parent device. + * This resources can be specified relatively to the parent device. * For accessing device you should use resources from device */ int num_resources; @@ -47,6 +47,12 @@ struct mfd_cell { /* don't check for resource conflicts */ bool ignore_resource_conflicts; + + /* + * Disable runtime PM callbacks for this subdevice - see + * pm_runtime_no_callbacks(). + */ + bool pm_runtime_no_callbacks; }; extern int mfd_add_devices(struct device *parent, int id, diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h index 7363dea6bbcd..effa5d3b96ae 100644 --- a/include/linux/mfd/max8998-private.h +++ b/include/linux/mfd/max8998-private.h @@ -159,10 +159,12 @@ struct max8998_dev { u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS]; u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS]; int type; + bool wakeup; }; int max8998_irq_init(struct max8998_dev *max8998); void max8998_irq_exit(struct max8998_dev *max8998); +int max8998_irq_resume(struct max8998_dev *max8998); extern int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest); extern int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count, diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h index f8c9f884aff2..61daa167b576 100644 --- a/include/linux/mfd/max8998.h +++ b/include/linux/mfd/max8998.h @@ -70,24 +70,43 @@ struct max8998_regulator_data { * @num_regulators: number of regultors used * @irq_base: base IRQ number for max8998, required for IRQs * @ono: power onoff IRQ number for max8998 - * @buck1_max_voltage1: BUCK1 maximum alowed voltage register 1 - * @buck1_max_voltage2: BUCK1 maximum alowed voltage register 2 - * @buck2_max_voltage: BUCK2 maximum alowed voltage + * @buck_voltage_lock: Do NOT change the values of the following six + * registers set by buck?_voltage?. The voltage of BUCK1/2 cannot + * be other than the preset values. + * @buck1_voltage1: BUCK1 DVS mode 1 voltage register + * @buck1_voltage2: BUCK1 DVS mode 2 voltage register + * @buck1_voltage3: BUCK1 DVS mode 3 voltage register + * @buck1_voltage4: BUCK1 DVS mode 4 voltage register + * @buck2_voltage1: BUCK2 DVS mode 1 voltage register + * @buck2_voltage2: BUCK2 DVS mode 2 voltage register * @buck1_set1: BUCK1 gpio pin 1 to set output voltage * @buck1_set2: BUCK1 gpio pin 2 to set output voltage + * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2 * @buck2_set3: BUCK2 gpio pin to set output voltage + * @buck2_default_idx: Default for BUCK2 gpio pin. + * @wakeup: Allow to wake up from suspend + * @rtc_delay: LP3974 RTC chip bug that requires delay after a register + * write before reading it. */ struct max8998_platform_data { struct max8998_regulator_data *regulators; int num_regulators; int irq_base; int ono; - int buck1_max_voltage1; - int buck1_max_voltage2; - int buck2_max_voltage; + bool buck_voltage_lock; + int buck1_voltage1; + int buck1_voltage2; + int buck1_voltage3; + int buck1_voltage4; + int buck2_voltage1; + int buck2_voltage2; int buck1_set1; int buck1_set2; + int buck1_default_idx; int buck2_set3; + int buck2_default_idx; + bool wakeup; + bool rtc_delay; }; #endif /* __LINUX_MFD_MAX8998_H */ diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h index b4c741e352c2..7d0f3d6a0002 100644 --- a/include/linux/mfd/mc13783.h +++ b/include/linux/mfd/mc13783.h @@ -1,4 +1,5 @@ /* + * Copyright 2010 Yong Shen * Copyright 2009-2010 Pengutronix * Uwe Kleine-Koenig * @@ -122,39 +123,39 @@ int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode, unsigned int channel, unsigned int *sample); -#define MC13783_SW_SW1A 0 -#define MC13783_SW_SW1B 1 -#define MC13783_SW_SW2A 2 -#define MC13783_SW_SW2B 3 -#define MC13783_SW_SW3 4 -#define MC13783_SW_PLL 5 -#define MC13783_REGU_VAUDIO 6 -#define MC13783_REGU_VIOHI 7 -#define MC13783_REGU_VIOLO 8 -#define MC13783_REGU_VDIG 9 -#define MC13783_REGU_VGEN 10 -#define MC13783_REGU_VRFDIG 11 -#define MC13783_REGU_VRFREF 12 -#define MC13783_REGU_VRFCP 13 -#define MC13783_REGU_VSIM 14 -#define MC13783_REGU_VESIM 15 -#define MC13783_REGU_VCAM 16 -#define MC13783_REGU_VRFBG 17 -#define MC13783_REGU_VVIB 18 -#define MC13783_REGU_VRF1 19 -#define MC13783_REGU_VRF2 20 -#define MC13783_REGU_VMMC1 21 -#define MC13783_REGU_VMMC2 22 -#define MC13783_REGU_GPO1 23 -#define MC13783_REGU_GPO2 24 -#define MC13783_REGU_GPO3 25 -#define MC13783_REGU_GPO4 26 -#define MC13783_REGU_V1 27 -#define MC13783_REGU_V2 28 -#define MC13783_REGU_V3 29 -#define MC13783_REGU_V4 30 -#define MC13783_REGU_PWGT1SPI 31 -#define MC13783_REGU_PWGT2SPI 32 +#define MC13783_REG_SW1A 0 +#define MC13783_REG_SW1B 1 +#define MC13783_REG_SW2A 2 +#define MC13783_REG_SW2B 3 +#define MC13783_REG_SW3 4 +#define MC13783_REG_PLL 5 +#define MC13783_REG_VAUDIO 6 +#define MC13783_REG_VIOHI 7 +#define MC13783_REG_VIOLO 8 +#define MC13783_REG_VDIG 9 +#define MC13783_REG_VGEN 10 +#define MC13783_REG_VRFDIG 11 +#define MC13783_REG_VRFREF 12 +#define MC13783_REG_VRFCP 13 +#define MC13783_REG_VSIM 14 +#define MC13783_REG_VESIM 15 +#define MC13783_REG_VCAM 16 +#define MC13783_REG_VRFBG 17 +#define MC13783_REG_VVIB 18 +#define MC13783_REG_VRF1 19 +#define MC13783_REG_VRF2 20 +#define MC13783_REG_VMMC1 21 +#define MC13783_REG_VMMC2 22 +#define MC13783_REG_GPO1 23 +#define MC13783_REG_GPO2 24 +#define MC13783_REG_GPO3 25 +#define MC13783_REG_GPO4 26 +#define MC13783_REG_V1 27 +#define MC13783_REG_V2 28 +#define MC13783_REG_V3 29 +#define MC13783_REG_V4 30 +#define MC13783_REG_PWGT1SPI 31 +#define MC13783_REG_PWGT2SPI 32 #define MC13783_IRQ_ADCDONE MC13XXX_IRQ_ADCDONE #define MC13783_IRQ_ADCBISDONE MC13XXX_IRQ_ADCBISDONE diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h new file mode 100644 index 000000000000..a00f2bec178c --- /dev/null +++ b/include/linux/mfd/mc13892.h @@ -0,0 +1,39 @@ +/* + * Copyright 2010 Yong Shen + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#ifndef __LINUX_MFD_MC13892_H +#define __LINUX_MFD_MC13892_H + +#include + +#define MC13892_SW1 0 +#define MC13892_SW2 1 +#define MC13892_SW3 2 +#define MC13892_SW4 3 +#define MC13892_SWBST 4 +#define MC13892_VIOHI 5 +#define MC13892_VPLL 6 +#define MC13892_VDIG 7 +#define MC13892_VSD 8 +#define MC13892_VUSB2 9 +#define MC13892_VVIDEO 10 +#define MC13892_VAUDIO 11 +#define MC13892_VCAM 12 +#define MC13892_VGEN1 13 +#define MC13892_VGEN2 14 +#define MC13892_VGEN3 15 +#define MC13892_VUSB 16 +#define MC13892_GPO1 17 +#define MC13892_GPO2 18 +#define MC13892_GPO3 19 +#define MC13892_GPO4 20 +#define MC13892_PWGT1SPI 21 +#define MC13892_PWGT2SPI 22 +#define MC13892_VCOINCELL 23 + +#endif diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index a1239c48b41a..903280d21866 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -245,6 +245,7 @@ enum wm831x_parent { WM8320 = 0x8320, WM8321 = 0x8321, WM8325 = 0x8325, + WM8326 = 0x8326, }; struct wm831x { diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h index de79baee4925..3fd36845ca45 100644 --- a/include/linux/mfd/wm8994/core.h +++ b/include/linux/mfd/wm8994/core.h @@ -17,6 +17,11 @@ #include +enum wm8994_type { + WM8994 = 0, + WM8958 = 1, +}; + struct regulator_dev; struct regulator_bulk_data; @@ -48,6 +53,8 @@ struct wm8994 { struct mutex io_lock; struct mutex irq_lock; + enum wm8994_type type; + struct device *dev; int (*read_dev)(struct wm8994 *wm8994, unsigned short reg, int bytes, void *dest); @@ -68,6 +75,7 @@ struct wm8994 { u16 gpio_regs[WM8994_NUM_GPIO_REGS]; struct regulator_dev *dbvdd; + int num_supplies; struct regulator_bulk_data *supplies; }; diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index add8a1b8bcf0..9eab263658be 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -30,6 +30,8 @@ struct wm8994_ldo_pdata { #define WM8994_DRC_REGS 5 #define WM8994_EQ_REGS 20 +#define WM8958_MBC_CUTOFF_REGS 20 +#define WM8958_MBC_COEFF_REGS 48 /** * DRC configurations are specified with a label and a set of register @@ -59,6 +61,18 @@ struct wm8994_retune_mobile_cfg { u16 regs[WM8994_EQ_REGS]; }; +/** + * Multiband compressor configurations are specified with a label and + * two sets of values to write. Configurations are expected to be + * generated using the multiband compressor configuration panel in + * WISCE - see http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_mbc_cfg { + const char *name; + u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS]; + u16 coeff_regs[WM8958_MBC_COEFF_REGS]; +}; + struct wm8994_pdata { int gpio_base; @@ -78,6 +92,9 @@ struct wm8994_pdata { int num_retune_mobile_cfgs; struct wm8994_retune_mobile_cfg *retune_mobile_cfgs; + int num_mbc_cfgs; + struct wm8958_mbc_cfg *mbc_cfgs; + /* LINEOUT can be differential or single ended */ unsigned int lineout1_diff:1; unsigned int lineout2_diff:1; diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h index 967f62f54159..be072faec6f0 100644 --- a/include/linux/mfd/wm8994/registers.h +++ b/include/linux/mfd/wm8994/registers.h @@ -64,12 +64,16 @@ #define WM8994_LDO_1 0x3B #define WM8994_LDO_2 0x3C #define WM8994_CHARGE_PUMP_1 0x4C +#define WM8958_CHARGE_PUMP_2 0x4D #define WM8994_CLASS_W_1 0x51 #define WM8994_DC_SERVO_1 0x54 #define WM8994_DC_SERVO_2 0x55 #define WM8994_DC_SERVO_4 0x57 #define WM8994_DC_SERVO_READBACK 0x58 #define WM8994_ANALOGUE_HP_1 0x60 +#define WM8958_MIC_DETECT_1 0xD0 +#define WM8958_MIC_DETECT_2 0xD1 +#define WM8958_MIC_DETECT_3 0xD2 #define WM8994_CHIP_REVISION 0x100 #define WM8994_CONTROL_INTERFACE 0x101 #define WM8994_WRITE_SEQUENCER_CTRL_1 0x110 @@ -109,6 +113,10 @@ #define WM8994_AIF2DAC_LRCLK 0x315 #define WM8994_AIF2DAC_DATA 0x316 #define WM8994_AIF2ADC_DATA 0x317 +#define WM8958_AIF3_CONTROL_1 0x320 +#define WM8958_AIF3_CONTROL_2 0x321 +#define WM8958_AIF3DAC_DATA 0x322 +#define WM8958_AIF3ADC_DATA 0x323 #define WM8994_AIF1_ADC1_LEFT_VOLUME 0x400 #define WM8994_AIF1_ADC1_RIGHT_VOLUME 0x401 #define WM8994_AIF1_DAC1_LEFT_VOLUME 0x402 @@ -242,6 +250,83 @@ #define WM8994_INTERRUPT_STATUS_2_MASK 0x739 #define WM8994_INTERRUPT_CONTROL 0x740 #define WM8994_IRQ_DEBOUNCE 0x748 +#define WM8958_DSP2_PROGRAM 0x900 +#define WM8958_DSP2_CONFIG 0x901 +#define WM8958_DSP2_MAGICNUM 0xA00 +#define WM8958_DSP2_RELEASEYEAR 0xA01 +#define WM8958_DSP2_RELEASEMONTHDAY 0xA02 +#define WM8958_DSP2_RELEASETIME 0xA03 +#define WM8958_DSP2_VERMAJMIN 0xA04 +#define WM8958_DSP2_VERBUILD 0xA05 +#define WM8958_DSP2_EXECCONTROL 0xA0D +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1 0x2200 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_2 0x2201 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_1 0x2202 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_2 0x2203 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_1 0x2204 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_2 0x2205 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_1 0x2206 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_2 0x2207 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_1 0x2208 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_2 0x2209 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_1 0x220A +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_2 0x220B +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_1 0x220C +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_2 0x220D +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_1 0x220E +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_2 0x220F +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_1 0x2210 +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_2 0x2211 +#define WM8958_MBC_BAND_1_LOWER_CUTOFF_1 0x2212 +#define WM8958_MBC_BAND_1_LOWER_CUTOFF_2 0x2213 +#define WM8958_MBC_BAND_1_K_1 0x2400 +#define WM8958_MBC_BAND_1_K_2 0x2401 +#define WM8958_MBC_BAND_1_N1_1 0x2402 +#define WM8958_MBC_BAND_1_N1_2 0x2403 +#define WM8958_MBC_BAND_1_N2_1 0x2404 +#define WM8958_MBC_BAND_1_N2_2 0x2405 +#define WM8958_MBC_BAND_1_N3_1 0x2406 +#define WM8958_MBC_BAND_1_N3_2 0x2407 +#define WM8958_MBC_BAND_1_N4_1 0x2408 +#define WM8958_MBC_BAND_1_N4_2 0x2409 +#define WM8958_MBC_BAND_1_N5_1 0x240A +#define WM8958_MBC_BAND_1_N5_2 0x240B +#define WM8958_MBC_BAND_1_X1_1 0x240C +#define WM8958_MBC_BAND_1_X1_2 0x240D +#define WM8958_MBC_BAND_1_X2_1 0x240E +#define WM8958_MBC_BAND_1_X2_2 0x240F +#define WM8958_MBC_BAND_1_X3_1 0x2410 +#define WM8958_MBC_BAND_1_X3_2 0x2411 +#define WM8958_MBC_BAND_1_ATTACK_1 0x2412 +#define WM8958_MBC_BAND_1_ATTACK_2 0x2413 +#define WM8958_MBC_BAND_1_DECAY_1 0x2414 +#define WM8958_MBC_BAND_1_DECAY_2 0x2415 +#define WM8958_MBC_BAND_2_K_1 0x2416 +#define WM8958_MBC_BAND_2_K_2 0x2417 +#define WM8958_MBC_BAND_2_N1_1 0x2418 +#define WM8958_MBC_BAND_2_N1_2 0x2419 +#define WM8958_MBC_BAND_2_N2_1 0x241A +#define WM8958_MBC_BAND_2_N2_2 0x241B +#define WM8958_MBC_BAND_2_N3_1 0x241C +#define WM8958_MBC_BAND_2_N3_2 0x241D +#define WM8958_MBC_BAND_2_N4_1 0x241E +#define WM8958_MBC_BAND_2_N4_2 0x241F +#define WM8958_MBC_BAND_2_N5_1 0x2420 +#define WM8958_MBC_BAND_2_N5_2 0x2421 +#define WM8958_MBC_BAND_2_X1_1 0x2422 +#define WM8958_MBC_BAND_2_X1_2 0x2423 +#define WM8958_MBC_BAND_2_X2_1 0x2424 +#define WM8958_MBC_BAND_2_X2_2 0x2425 +#define WM8958_MBC_BAND_2_X3_1 0x2426 +#define WM8958_MBC_BAND_2_X3_2 0x2427 +#define WM8958_MBC_BAND_2_ATTACK_1 0x2428 +#define WM8958_MBC_BAND_2_ATTACK_2 0x2429 +#define WM8958_MBC_BAND_2_DECAY_1 0x242A +#define WM8958_MBC_BAND_2_DECAY_2 0x242B +#define WM8958_MBC_B2_PG2_1 0x242C +#define WM8958_MBC_B2_PG2_2 0x242D +#define WM8958_MBC_B1_PG2_1 0x242E +#define WM8958_MBC_B1_PG2_2 0x242F #define WM8994_WRITE_SEQUENCER_0 0x3000 #define WM8994_WRITE_SEQUENCER_1 0x3001 #define WM8994_WRITE_SEQUENCER_2 0x3002 @@ -992,6 +1077,12 @@ /* * R6 (0x06) - Power Management (6) */ +#define WM8958_AIF3ADC_SRC_MASK 0x0600 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF3ADC_SRC_SHIFT 9 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF3ADC_SRC_WIDTH 2 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF2DAC_SRC_MASK 0x0180 /* AIF2DAC_SRC - [8:7] */ +#define WM8958_AIF2DAC_SRC_SHIFT 7 /* AIF2DAC_SRC - [8:7] */ +#define WM8958_AIF2DAC_SRC_WIDTH 2 /* AIF2DAC_SRC - [8:7] */ #define WM8994_AIF3_TRI 0x0020 /* AIF3_TRI */ #define WM8994_AIF3_TRI_MASK 0x0020 /* AIF3_TRI */ #define WM8994_AIF3_TRI_SHIFT 5 /* AIF3_TRI */ @@ -1835,6 +1926,14 @@ #define WM8994_CP_ENA_SHIFT 15 /* CP_ENA */ #define WM8994_CP_ENA_WIDTH 1 /* CP_ENA */ +/* + * R77 (0x4D) - Charge Pump (2) + */ +#define WM8958_CP_DISCH 0x8000 /* CP_DISCH */ +#define WM8958_CP_DISCH_MASK 0x8000 /* CP_DISCH */ +#define WM8958_CP_DISCH_SHIFT 15 /* CP_DISCH */ +#define WM8958_CP_DISCH_WIDTH 1 /* CP_DISCH */ + /* * R81 (0x51) - Class W (1) */ @@ -1951,6 +2050,46 @@ #define WM8994_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */ #define WM8994_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */ +/* + * R208 (0xD0) - Mic Detect 1 + */ +#define WM8958_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_DBTIME 0x0002 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */ +#define WM8958_MICD_ENA 0x0001 /* MICD_ENA */ +#define WM8958_MICD_ENA_MASK 0x0001 /* MICD_ENA */ +#define WM8958_MICD_ENA_SHIFT 0 /* MICD_ENA */ +#define WM8958_MICD_ENA_WIDTH 1 /* MICD_ENA */ + +/* + * R209 (0xD1) - Mic Detect 2 + */ +#define WM8958_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */ +#define WM8958_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */ +#define WM8958_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */ + +/* + * R210 (0xD2) - Mic Detect 3 + */ +#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define WM8958_MICD_STS 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */ +#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */ + /* * R256 (0x100) - Chip Revision */ @@ -2069,6 +2208,14 @@ /* * R520 (0x208) - Clocking (1) */ +#define WM8958_DSP2CLK_ENA 0x4000 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_MASK 0x4000 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_SHIFT 14 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_WIDTH 1 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_SRC 0x1000 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_MASK 0x1000 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_SHIFT 12 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_WIDTH 1 /* DSP2CLK_SRC */ #define WM8994_TOCLK_ENA 0x0010 /* TOCLK_ENA */ #define WM8994_TOCLK_ENA_MASK 0x0010 /* TOCLK_ENA */ #define WM8994_TOCLK_ENA_SHIFT 4 /* TOCLK_ENA */ @@ -2552,6 +2699,63 @@ #define WM8994_AIF2ADCR_DAT_INV_SHIFT 0 /* AIF2ADCR_DAT_INV */ #define WM8994_AIF2ADCR_DAT_INV_WIDTH 1 /* AIF2ADCR_DAT_INV */ +/* + * R800 (0x320) - AIF3 Control (1) + */ +#define WM8958_AIF3_LRCLK_INV 0x0080 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_MASK 0x0080 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_SHIFT 7 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_WIDTH 1 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_WL_MASK 0x0060 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_WL_SHIFT 5 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_WL_WIDTH 2 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_FMT_MASK 0x0018 /* AIF3_FMT - [4:3] */ +#define WM8958_AIF3_FMT_SHIFT 3 /* AIF3_FMT - [4:3] */ +#define WM8958_AIF3_FMT_WIDTH 2 /* AIF3_FMT - [4:3] */ + +/* + * R801 (0x321) - AIF3 Control (2) + */ +#define WM8958_AIF3DAC_BOOST_MASK 0x0C00 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_BOOST_SHIFT 10 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_BOOST_WIDTH 2 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_COMP 0x0010 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_MASK 0x0010 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_SHIFT 4 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_WIDTH 1 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMPMODE 0x0008 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_MASK 0x0008 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_SHIFT 3 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_WIDTH 1 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3ADC_COMP 0x0004 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_MASK 0x0004 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_SHIFT 2 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_WIDTH 1 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMPMODE 0x0002 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_MASK 0x0002 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_SHIFT 1 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_WIDTH 1 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3_LOOPBACK 0x0001 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_MASK 0x0001 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_SHIFT 0 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_WIDTH 1 /* AIF3_LOOPBACK */ + +/* + * R802 (0x322) - AIF3DAC Data + */ +#define WM8958_AIF3DAC_DAT_INV 0x0001 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_MASK 0x0001 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_SHIFT 0 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_WIDTH 1 /* AIF3DAC_DAT_INV */ + +/* + * R803 (0x323) - AIF3ADC Data + */ +#define WM8958_AIF3ADC_DAT_INV 0x0001 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_MASK 0x0001 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_SHIFT 0 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_WIDTH 1 /* AIF3ADC_DAT_INV */ + /* * R1024 (0x400) - AIF1 ADC1 Left Volume */ @@ -4289,4 +4493,102 @@ #define WM8994_TEMP_SHUT_DB_SHIFT 0 /* TEMP_SHUT_DB */ #define WM8994_TEMP_SHUT_DB_WIDTH 1 /* TEMP_SHUT_DB */ +/* + * R2304 (0x900) - DSP2_Program + */ +#define WM8958_DSP2_ENA 0x0001 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_MASK 0x0001 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_SHIFT 0 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_WIDTH 1 /* DSP2_ENA */ + +/* + * R2305 (0x901) - DSP2_Config + */ +#define WM8958_MBC_SEL_MASK 0x0030 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_SEL_SHIFT 4 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_SEL_WIDTH 2 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_ENA 0x0001 /* MBC_ENA */ +#define WM8958_MBC_ENA_MASK 0x0001 /* MBC_ENA */ +#define WM8958_MBC_ENA_SHIFT 0 /* MBC_ENA */ +#define WM8958_MBC_ENA_WIDTH 1 /* MBC_ENA */ + +/* + * R2560 (0xA00) - DSP2_MagicNum + */ +#define WM8958_DSP2_MAGIC_NUM_MASK 0xFFFF /* DSP2_MAGIC_NUM - [15:0] */ +#define WM8958_DSP2_MAGIC_NUM_SHIFT 0 /* DSP2_MAGIC_NUM - [15:0] */ +#define WM8958_DSP2_MAGIC_NUM_WIDTH 16 /* DSP2_MAGIC_NUM - [15:0] */ + +/* + * R2561 (0xA01) - DSP2_ReleaseYear + */ +#define WM8958_DSP2_RELEASE_YEAR_MASK 0xFFFF /* DSP2_RELEASE_YEAR - [15:0] */ +#define WM8958_DSP2_RELEASE_YEAR_SHIFT 0 /* DSP2_RELEASE_YEAR - [15:0] */ +#define WM8958_DSP2_RELEASE_YEAR_WIDTH 16 /* DSP2_RELEASE_YEAR - [15:0] */ + +/* + * R2562 (0xA02) - DSP2_ReleaseMonthDay + */ +#define WM8958_DSP2_RELEASE_MONTH_MASK 0xFF00 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_MONTH_SHIFT 8 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_MONTH_WIDTH 8 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_DAY_MASK 0x00FF /* DSP2_RELEASE_DAY - [7:0] */ +#define WM8958_DSP2_RELEASE_DAY_SHIFT 0 /* DSP2_RELEASE_DAY - [7:0] */ +#define WM8958_DSP2_RELEASE_DAY_WIDTH 8 /* DSP2_RELEASE_DAY - [7:0] */ + +/* + * R2563 (0xA03) - DSP2_ReleaseTime + */ +#define WM8958_DSP2_RELEASE_HOURS_MASK 0xFF00 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_HOURS_SHIFT 8 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_HOURS_WIDTH 8 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_MINS_MASK 0x00FF /* DSP2_RELEASE_MINS - [7:0] */ +#define WM8958_DSP2_RELEASE_MINS_SHIFT 0 /* DSP2_RELEASE_MINS - [7:0] */ +#define WM8958_DSP2_RELEASE_MINS_WIDTH 8 /* DSP2_RELEASE_MINS - [7:0] */ + +/* + * R2564 (0xA04) - DSP2_VerMajMin + */ +#define WM8958_DSP2_MAJOR_VER_MASK 0xFF00 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MAJOR_VER_SHIFT 8 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MAJOR_VER_WIDTH 8 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MINOR_VER_MASK 0x00FF /* DSP2_MINOR_VER - [7:0] */ +#define WM8958_DSP2_MINOR_VER_SHIFT 0 /* DSP2_MINOR_VER - [7:0] */ +#define WM8958_DSP2_MINOR_VER_WIDTH 8 /* DSP2_MINOR_VER - [7:0] */ + +/* + * R2565 (0xA05) - DSP2_VerBuild + */ +#define WM8958_DSP2_BUILD_VER_MASK 0xFFFF /* DSP2_BUILD_VER - [15:0] */ +#define WM8958_DSP2_BUILD_VER_SHIFT 0 /* DSP2_BUILD_VER - [15:0] */ +#define WM8958_DSP2_BUILD_VER_WIDTH 16 /* DSP2_BUILD_VER - [15:0] */ + +/* + * R2573 (0xA0D) - DSP2_ExecControl + */ +#define WM8958_DSP2_STOPC 0x0020 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_MASK 0x0020 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_SHIFT 5 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_WIDTH 1 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPS 0x0010 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_MASK 0x0010 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_SHIFT 4 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_WIDTH 1 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPI 0x0008 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_MASK 0x0008 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_SHIFT 3 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_WIDTH 1 /* DSP2_STOPI */ +#define WM8958_DSP2_STOP 0x0004 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_MASK 0x0004 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_SHIFT 2 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_WIDTH 1 /* DSP2_STOP */ +#define WM8958_DSP2_RUNR 0x0002 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_MASK 0x0002 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_SHIFT 1 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_WIDTH 1 /* DSP2_RUNR */ +#define WM8958_DSP2_RUN 0x0001 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_MASK 0x0001 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_SHIFT 0 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_WIDTH 1 /* DSP2_RUN */ + #endif diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 085527fb8261..e39aeecfe9a2 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -13,9 +13,11 @@ extern void putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, struct page *, struct page *); extern int migrate_pages(struct list_head *l, new_page_t x, - unsigned long private, int offlining); + unsigned long private, bool offlining, + bool sync); extern int migrate_huge_pages(struct list_head *l, new_page_t x, - unsigned long private, int offlining); + unsigned long private, bool offlining, + bool sync); extern int fail_migrate_page(struct address_space *, struct page *, struct page *); @@ -33,9 +35,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, static inline void putback_lru_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t x, - unsigned long private, int offlining) { return -ENOSYS; } + unsigned long private, bool offlining, + bool sync) { return -ENOSYS; } static inline int migrate_huge_pages(struct list_head *l, new_page_t x, - unsigned long private, int offlining) { return -ENOSYS; } + unsigned long private, bool offlining, + bool sync) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 721f451c3029..956a35532f47 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -14,6 +14,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -82,6 +83,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_GROWSUP 0x00000200 #else #define VM_GROWSUP 0x00000000 +#define VM_NOHUGEPAGE 0x00000200 /* MADV_NOHUGEPAGE marked this vma */ #endif #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ @@ -101,7 +103,11 @@ extern unsigned int kobjsize(const void *objp); #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ +#ifndef CONFIG_TRANSPARENT_HUGEPAGE #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ +#else +#define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */ +#endif #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ @@ -242,6 +248,7 @@ struct inode; * files which need it (119 of them) */ #include +#include /* * Methods to modify the page usage count. @@ -305,6 +312,39 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif +static inline void compound_lock(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + bit_spin_lock(PG_compound_lock, &page->flags); +#endif +} + +static inline void compound_unlock(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + bit_spin_unlock(PG_compound_lock, &page->flags); +#endif +} + +static inline unsigned long compound_lock_irqsave(struct page *page) +{ + unsigned long uninitialized_var(flags); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + local_irq_save(flags); + compound_lock(page); +#endif + return flags; +} + +static inline void compound_unlock_irqrestore(struct page *page, + unsigned long flags) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + compound_unlock(page); + local_irq_restore(flags); +#endif +} + static inline struct page *compound_head(struct page *page) { if (unlikely(PageTail(page))) @@ -319,9 +359,29 @@ static inline int page_count(struct page *page) static inline void get_page(struct page *page) { - page = compound_head(page); - VM_BUG_ON(atomic_read(&page->_count) == 0); + /* + * Getting a normal page or the head of a compound page + * requires to already have an elevated page->_count. Only if + * we're getting a tail page, the elevated page->_count is + * required only in the head page, so for tail pages the + * bugcheck only verifies that the page->_count isn't + * negative. + */ + VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page)); atomic_inc(&page->_count); + /* + * Getting a tail page will elevate both the head and tail + * page->_count(s). + */ + if (unlikely(PageTail(page))) { + /* + * This is safe only because + * __split_huge_page_refcount can't run under + * get_page(). + */ + VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); + atomic_inc(&page->first_page->_count); + } } static inline struct page *virt_to_head_page(const void *x) @@ -339,6 +399,27 @@ static inline void init_page_count(struct page *page) atomic_set(&page->_count, 1); } +/* + * PageBuddy() indicate that the page is free and in the buddy system + * (see mm/page_alloc.c). + */ +static inline int PageBuddy(struct page *page) +{ + return atomic_read(&page->_mapcount) == -2; +} + +static inline void __SetPageBuddy(struct page *page) +{ + VM_BUG_ON(atomic_read(&page->_mapcount) != -1); + atomic_set(&page->_mapcount, -2); +} + +static inline void __ClearPageBuddy(struct page *page) +{ + VM_BUG_ON(!PageBuddy(page)); + atomic_set(&page->_mapcount, -1); +} + void put_page(struct page *page); void put_pages_list(struct list_head *pages); @@ -370,11 +451,38 @@ static inline int compound_order(struct page *page) return (unsigned long)page[1].lru.prev; } +static inline int compound_trans_order(struct page *page) +{ + int order; + unsigned long flags; + + if (!PageHead(page)) + return 0; + + flags = compound_lock_irqsave(page); + order = compound_order(page); + compound_unlock_irqrestore(page, flags); + return order; +} + static inline void set_compound_order(struct page *page, unsigned long order) { page[1].lru.prev = (void *)order; } +/* + * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when + * servicing faults for write access. In the normal case, do always want + * pte_mkwrite. But get_user_pages can cause write faults for mappings + * that do not have writing enabled, when used by access_process_vm. + */ +static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +{ + if (likely(vma->vm_flags & VM_WRITE)) + pte = pte_mkwrite(pte); + return pte; +} + /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of @@ -657,7 +765,7 @@ static inline struct address_space *page_mapping(struct page *page) VM_BUG_ON(PageSlab(page)); if (unlikely(PageSwapCache(page))) mapping = &swapper_space; - else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) + else if ((unsigned long)mapping & PAGE_MAPPING_ANON) mapping = NULL; return mapping; } @@ -1064,7 +1172,8 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); #endif -int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); +int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + pmd_t *pmd, unsigned long address); int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); /* @@ -1133,16 +1242,18 @@ static inline void pgtable_page_dtor(struct page *page) pte_unmap(pte); \ } while (0) -#define pte_alloc_map(mm, pmd, address) \ - ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ - NULL: pte_offset_map(pmd, address)) +#define pte_alloc_map(mm, vma, pmd, address) \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ + pmd, address))? \ + NULL: pte_offset_map(pmd, address)) #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ - ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ + pmd, address))? \ NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) #define pte_alloc_kernel(pmd, address) \ - ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ NULL: pte_offset_kernel(pmd, address)) extern void free_area_init(unsigned long * zones_size); @@ -1415,6 +1526,8 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, #define FOLL_GET 0x04 /* do get_page on page */ #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ +#define FOLL_MLOCK 0x40 /* mark page as mlocked */ +#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); @@ -1518,5 +1631,14 @@ static inline int is_hwpoison_address(unsigned long addr) extern void dump_page(struct page *page); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) +extern void clear_huge_page(struct page *page, + unsigned long addr, + unsigned int pages_per_huge_page); +extern void copy_user_huge_page(struct page *dst, struct page *src, + unsigned long addr, struct vm_area_struct *vma, + unsigned int pages_per_huge_page); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8835b877b8db..8f7d24712dc1 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -1,6 +1,8 @@ #ifndef LINUX_MM_INLINE_H #define LINUX_MM_INLINE_H +#include + /** * page_is_file_cache - should the page be on a file LRU or anon LRU? * @page: the page to test @@ -20,18 +22,25 @@ static inline int page_is_file_cache(struct page *page) } static inline void -add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) +__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, + struct list_head *head) { - list_add(&page->lru, &zone->lru[l].list); - __inc_zone_state(zone, NR_LRU_BASE + l); + list_add(&page->lru, head); + __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); mem_cgroup_add_lru_list(page, l); } +static inline void +add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) +{ + __add_page_to_lru_list(zone, page, l, &zone->lru[l].list); +} + static inline void del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) { list_del(&page->lru); - __dec_zone_state(zone, NR_LRU_BASE + l); + __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); mem_cgroup_del_lru_list(page, l); } @@ -66,7 +75,7 @@ del_page_from_lru(struct zone *zone, struct page *page) l += LRU_ACTIVE; } } - __dec_zone_state(zone, NR_LRU_BASE + l); + __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); mem_cgroup_del_lru_list(page, l); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bb7288a782fd..26bc4e2cd275 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -309,6 +309,9 @@ struct mm_struct { #endif #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pgtable_t pmd_huge_pte; /* protected by page_table_lock */ #endif /* How many tasks sharing this mm are OOM_DISABLE */ atomic_t oom_disable_count; diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h index 44fc5348fd5d..bf173502d744 100644 --- a/include/linux/mmc/sh_mmcif.h +++ b/include/linux/mmc/sh_mmcif.h @@ -104,6 +104,9 @@ static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) #define SH_MMCIF_BBS 512 /* boot block size */ +enum { MMCIF_PROGRESS_ENTER, MMCIF_PROGRESS_INIT, + MMCIF_PROGRESS_LOAD, MMCIF_PROGRESS_DONE }; + static inline void sh_mmcif_boot_cmd_send(void __iomem *base, unsigned long cmd, unsigned long arg) { @@ -166,6 +169,17 @@ static inline int sh_mmcif_boot_do_read(void __iomem *base, unsigned long k; int ret = 0; + /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, + CLK_ENABLE | CLKDIV_4 | SRSPTO_256 | + SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + + /* CMD9 - Get CSD */ + sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000); + + /* CMD7 - Select the card */ + sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000); + /* CMD16 - Set the block size */ sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS); @@ -209,27 +223,4 @@ static inline void sh_mmcif_boot_init(void __iomem *base) sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); } -static inline void sh_mmcif_boot_slurp(void __iomem *base, - unsigned char *buf, - unsigned long no_bytes) -{ - unsigned long tmp; - - /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */ - sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, - CLK_ENABLE | CLKDIV_4 | SRSPTO_256 | - SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); - - /* CMD9 - Get CSD */ - sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000); - - /* CMD7 - Select the card */ - sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000); - - tmp = no_bytes / SH_MMCIF_BBS; - tmp += (no_bytes % SH_MMCIF_BBS) ? 1 : 0; - - sh_mmcif_boot_do_read(base, 512, tmp, buf); -} - #endif /* __SH_MMCIF_H__ */ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 43dcfbdc39de..cc2e7dfea9d7 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -61,6 +61,16 @@ struct mmu_notifier_ops { struct mm_struct *mm, unsigned long address); + /* + * test_young is called to check the young/accessed bitflag in + * the secondary pte. This is used to know if the page is + * frequently used without actually clearing the flag or tearing + * down the secondary mapping on the page. + */ + int (*test_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + /* * change_pte is called in cases that pte mapping to page is changed: * for example, when ksm remaps pte to point to a new shared page. @@ -163,6 +173,8 @@ extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long address); +extern int __mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address); extern void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte); extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, @@ -186,6 +198,14 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline int mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_test_young(mm, address); + return 0; +} + static inline void mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) { @@ -243,6 +263,32 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) __pte; \ }) +#define pmdp_clear_flush_notify(__vma, __address, __pmdp) \ +({ \ + pmd_t __pmd; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \ + mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE);\ + __pmd = pmdp_clear_flush(___vma, ___address, __pmdp); \ + mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE); \ + __pmd; \ +}) + +#define pmdp_splitting_flush_notify(__vma, __address, __pmdp) \ +({ \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \ + mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE);\ + pmdp_splitting_flush(___vma, ___address, __pmdp); \ + mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE); \ +}) + #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ ({ \ int __young; \ @@ -254,6 +300,17 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) __young; \ }) +#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + #define set_pte_at_notify(__mm, __address, __ptep, __pte) \ ({ \ struct mm_struct *___mm = __mm; \ @@ -276,6 +333,12 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline int mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + static inline void mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) { @@ -305,7 +368,10 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) } #define ptep_clear_flush_young_notify ptep_clear_flush_young +#define pmdp_clear_flush_young_notify pmdp_clear_flush_young #define ptep_clear_flush_notify ptep_clear_flush +#define pmdp_clear_flush_notify pmdp_clear_flush +#define pmdp_splitting_flush_notify pmdp_splitting_flush #define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 39c24ebe9cfd..02ecb0189b1d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -114,6 +114,7 @@ enum zone_stat_item { NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ #endif + NR_ANON_TRANSPARENT_HUGEPAGES, NR_VM_ZONE_STAT_ITEMS }; /* @@ -458,12 +459,6 @@ static inline int zone_is_oom_locked(const struct zone *zone) return test_bit(ZONE_OOM_LOCKED, &zone->flags); } -#ifdef CONFIG_SMP -unsigned long zone_nr_free_pages(struct zone *zone); -#else -#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) -#endif /* CONFIG_SMP */ - /* * The "priority" of VM scanning is how much of the queues we will scan in one * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the @@ -645,6 +640,7 @@ typedef struct pglist_data { wait_queue_head_t kswapd_wait; struct task_struct *kswapd; int kswapd_max_order; + enum zone_type classzone_idx; } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) @@ -660,8 +656,10 @@ typedef struct pglist_data { extern struct mutex zonelists_mutex; void build_all_zonelists(void *data); -void wakeup_kswapd(struct zone *zone, int order); -int zone_watermark_ok(struct zone *z, int order, unsigned long mark, +void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags); +bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags); enum memmap_context { MEMMAP_EARLY, diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index 1c27f201c856..e13eefef0653 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h @@ -143,104 +143,4 @@ struct ncp_nls_ioctl #define NCP_MAXPATHLEN 255 #define NCP_MAXNAMELEN 14 -#ifdef __KERNEL__ - -#include -#include - -/* define because it is easy to change PRINTK to {*}PRINTK */ -#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args) - -#undef NCPFS_PARANOIA -#ifdef NCPFS_PARANOIA -#define PPRINTK(format, args...) PRINTK(format , ## args) -#else -#define PPRINTK(format, args...) -#endif - -#ifndef DEBUG_NCP -#define DEBUG_NCP 0 -#endif -#if DEBUG_NCP > 0 -#define DPRINTK(format, args...) PRINTK(format , ## args) -#else -#define DPRINTK(format, args...) -#endif -#if DEBUG_NCP > 1 -#define DDPRINTK(format, args...) PRINTK(format , ## args) -#else -#define DDPRINTK(format, args...) -#endif - -#define NCP_MAX_RPC_TIMEOUT (6*HZ) - - -struct ncp_entry_info { - struct nw_info_struct i; - ino_t ino; - int opened; - int access; - unsigned int volume; - __u8 file_handle[6]; -}; - -static inline struct ncp_server *NCP_SBP(const struct super_block *sb) -{ - return sb->s_fs_info; -} - -#define NCP_SERVER(inode) NCP_SBP((inode)->i_sb) -static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode) -{ - return container_of(inode, struct ncp_inode_info, vfs_inode); -} - -/* linux/fs/ncpfs/inode.c */ -int ncp_notify_change(struct dentry *, struct iattr *); -struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *); -void ncp_update_inode(struct inode *, struct ncp_entry_info *); -void ncp_update_inode2(struct inode *, struct ncp_entry_info *); - -/* linux/fs/ncpfs/dir.c */ -extern const struct inode_operations ncp_dir_inode_operations; -extern const struct file_operations ncp_dir_operations; -extern const struct dentry_operations ncp_root_dentry_operations; -int ncp_conn_logged_in(struct super_block *); -int ncp_date_dos2unix(__le16 time, __le16 date); -void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); - -/* linux/fs/ncpfs/ioctl.c */ -long ncp_ioctl(struct file *, unsigned int, unsigned long); -long ncp_compat_ioctl(struct file *, unsigned int, unsigned long); - -/* linux/fs/ncpfs/sock.c */ -int ncp_request2(struct ncp_server *server, int function, - void* reply, int max_reply_size); -static inline int ncp_request(struct ncp_server *server, int function) { - return ncp_request2(server, function, server->packet, server->packet_size); -} -int ncp_connect(struct ncp_server *server); -int ncp_disconnect(struct ncp_server *server); -void ncp_lock_server(struct ncp_server *server); -void ncp_unlock_server(struct ncp_server *server); - -/* linux/fs/ncpfs/symlink.c */ -#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS) -extern const struct address_space_operations ncp_symlink_aops; -int ncp_symlink(struct inode*, struct dentry*, const char*); -#endif - -/* linux/fs/ncpfs/file.c */ -extern const struct inode_operations ncp_file_inode_operations; -extern const struct file_operations ncp_file_operations; -int ncp_make_open(struct inode *, int); - -/* linux/fs/ncpfs/mmap.c */ -int ncp_mmap(struct file *, struct vm_area_struct *); - -/* linux/fs/ncpfs/ncplib_kernel.c */ -int ncp_make_closed(struct inode *); - -#endif /* __KERNEL__ */ - #endif /* _LINUX_NCP_FS_H */ diff --git a/include/linux/ncp_mount.h b/include/linux/ncp_mount.h index a2b549eb1eca..dfcbea2d889f 100644 --- a/include/linux/ncp_mount.h +++ b/include/linux/ncp_mount.h @@ -68,26 +68,4 @@ struct ncp_mount_data_v4 { #define NCP_MOUNT_VERSION_V5 (5) /* Text only */ -#ifdef __KERNEL__ - -struct ncp_mount_data_kernel { - unsigned long flags; /* NCP_MOUNT_* flags */ - unsigned int int_flags; /* internal flags */ -#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001 - __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */ - struct pid *wdog_pid; /* Who cares for our watchdog packets? */ - unsigned int ncp_fd; /* The socket to the ncp port */ - unsigned int time_out; /* How long should I wait after - sending a NCP request? */ - unsigned int retry_count; /* And how often should I retry? */ - unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; - __kernel_uid32_t uid; - __kernel_gid32_t gid; - __kernel_mode_t file_mode; - __kernel_mode_t dir_mode; - int info_fd; -}; - -#endif /* __KERNEL__ */ - #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index be4957cf6511..d971346b0340 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -520,9 +520,6 @@ struct netdev_queue { * please use this field instead of dev->trans_start */ unsigned long trans_start; - u64 tx_bytes; - u64 tx_packets; - u64 tx_dropped; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -2265,8 +2262,6 @@ extern void dev_load(struct net *net, const char *name); extern void dev_mcast_init(void); extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage); -extern void dev_txq_stats_fold(const struct net_device *dev, - struct rtnl_link_stats64 *stats); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h new file mode 100644 index 000000000000..7ab8521f2347 --- /dev/null +++ b/include/linux/nfc/pn544.h @@ -0,0 +1,97 @@ +/* + * Driver include for the PN544 NFC chip. + * + * Copyright (C) Nokia Corporation + * + * Author: Jari Vanhala + * Contact: Matti Aaltoenn + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PN544_H_ +#define _PN544_H_ + +#include + +#define PN544_DRIVER_NAME "pn544" +#define PN544_MAXWINDOW_SIZE 7 +#define PN544_WINDOW_SIZE 4 +#define PN544_RETRIES 10 +#define PN544_MAX_I2C_TRANSFER 0x0400 +#define PN544_MSG_MAX_SIZE 0x21 /* at normal HCI mode */ + +/* ioctl */ +#define PN544_CHAR_BASE 'P' +#define PN544_IOR(num, dtype) _IOR(PN544_CHAR_BASE, num, dtype) +#define PN544_IOW(num, dtype) _IOW(PN544_CHAR_BASE, num, dtype) +#define PN544_GET_FW_MODE PN544_IOW(1, unsigned int) +#define PN544_SET_FW_MODE PN544_IOW(2, unsigned int) +#define PN544_GET_DEBUG PN544_IOW(3, unsigned int) +#define PN544_SET_DEBUG PN544_IOW(4, unsigned int) + +/* Timing restrictions (ms) */ +#define PN544_RESETVEN_TIME 30 /* 7 */ +#define PN544_PVDDVEN_TIME 0 +#define PN544_VBATVEN_TIME 0 +#define PN544_GPIO4VEN_TIME 0 +#define PN544_WAKEUP_ACK 5 +#define PN544_WAKEUP_GUARD (PN544_WAKEUP_ACK + 1) +#define PN544_INACTIVITY_TIME 1000 +#define PN544_INTERFRAME_DELAY 200 /* us */ +#define PN544_BAUDRATE_CHANGE 150 /* us */ + +/* Debug bits */ +#define PN544_DEBUG_BUF 0x01 +#define PN544_DEBUG_READ 0x02 +#define PN544_DEBUG_WRITE 0x04 +#define PN544_DEBUG_IRQ 0x08 +#define PN544_DEBUG_CALLS 0x10 +#define PN544_DEBUG_MODE 0x20 + +/* Normal (HCI) mode */ +#define PN544_LLC_HCI_OVERHEAD 3 /* header + crc (to length) */ +#define PN544_LLC_MIN_SIZE (1 + PN544_LLC_HCI_OVERHEAD) /* length + */ +#define PN544_LLC_MAX_DATA (PN544_MSG_MAX_SIZE - 2) +#define PN544_LLC_MAX_HCI_SIZE (PN544_LLC_MAX_DATA - 2) + +struct pn544_llc_packet { + unsigned char length; /* of rest of packet */ + unsigned char header; + unsigned char data[PN544_LLC_MAX_DATA]; /* includes crc-ccitt */ +}; + +/* Firmware upgrade mode */ +#define PN544_FW_HEADER_SIZE 3 +/* max fw transfer is 1024bytes, but I2C limits it to 0xC0 */ +#define PN544_MAX_FW_DATA (PN544_MAX_I2C_TRANSFER - PN544_FW_HEADER_SIZE) + +struct pn544_fw_packet { + unsigned char command; /* status in answer */ + unsigned char length[2]; /* big-endian order (msf) */ + unsigned char data[PN544_MAX_FW_DATA]; +}; + +#ifdef __KERNEL__ +/* board config */ +struct pn544_nfc_platform_data { + int (*request_resources) (struct i2c_client *client); + void (*free_resources) (void); + void (*enable) (int fw); + int (*test) (void); + void (*disable) (void); +}; +#endif /* __KERNEL__ */ + +#endif /* _PN544_H_ */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 9b46300b4305..134716e5e350 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -65,6 +65,9 @@ #define NFS4_CDFC4_FORE 0x1 #define NFS4_CDFC4_BACK 0x2 +#define NFS4_CDFC4_BOTH 0x3 +#define NFS4_CDFC4_FORE_OR_BOTH 0x3 +#define NFS4_CDFC4_BACK_OR_BOTH 0x7 #define NFS4_SET_TO_SERVER_TIME 0 #define NFS4_SET_TO_CLIENT_TIME 1 @@ -140,6 +143,9 @@ #define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200 #define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400 +#define NFS4_SECINFO_STYLE4_CURRENT_FH 0 +#define NFS4_SECINFO_STYLE4_PARENT 1 + #define NFS4_MAX_UINT64 (~(u64)0) /* An NFS4 sessions server must support at least NFS4_MAX_OPS operations. diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 8ae78a61eea4..bd316159278c 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -35,7 +35,7 @@ #define NFSEXP_NOHIDE 0x0200 #define NFSEXP_NOSUBTREECHECK 0x0400 #define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ -#define NFSEXP_MSNFS 0x1000 /* do silly things that MS clients expect */ +#define NFSEXP_MSNFS 0x1000 /* do silly things that MS clients expect; no longer supported */ #define NFSEXP_FSID 0x2000 #define NFSEXP_CROSSMOUNT 0x4000 #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 2b89b712565b..821ffb954f14 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -148,6 +148,10 @@ * @NL80211_CMD_SET_MPATH: Set mesh path attributes for mesh path to * destination %NL80211_ATTR_MAC on the interface identified by * %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_NEW_MPATH: Create a new mesh path for the destination given by + * %NL80211_ATTR_MAC via %NL80211_ATTR_MPATH_NEXT_HOP. + * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by + * %NL80211_ATTR_MAC. * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the * the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC @@ -612,7 +616,7 @@ enum nl80211_commands { * consisting of a nested array. * * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes). - * @NL80211_ATTR_PLINK_ACTION: action to perform on the mesh peer link. + * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link. * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path. * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path * info given for %NL80211_CMD_GET_MPATH, nested attribute described at @@ -879,7 +883,9 @@ enum nl80211_commands { * See &enum nl80211_key_default_types. * * @NL80211_ATTR_MESH_SETUP: Optional mesh setup parameters. These cannot be - * changed once the mesh is active. + * changed once the mesh is active. + * @NL80211_ATTR_MESH_CONFIG: Mesh configuration parameters, a nested attribute + * containing attributes from &enum nl80211_meshconf_params. * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -1225,8 +1231,6 @@ enum nl80211_rate_info { * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs) * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station) * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station) - * @__NL80211_STA_INFO_AFTER_LAST: internal - * @NL80211_STA_INFO_MAX: highest possible station info attribute * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute * containing info as possible, see &enum nl80211_sta_info_txrate. @@ -1236,6 +1240,11 @@ enum nl80211_rate_info { * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station) * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station) * @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm) + * @NL80211_STA_INFO_LLID: the station's mesh LLID + * @NL80211_STA_INFO_PLID: the station's mesh PLID + * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station + * @__NL80211_STA_INFO_AFTER_LAST: internal + * @NL80211_STA_INFO_MAX: highest possible station info attribute */ enum nl80211_sta_info { __NL80211_STA_INFO_INVALID, @@ -1626,7 +1635,7 @@ enum nl80211_mntr_flags { * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs) * that it takes for an HWMP information element to propagate across the mesh * - * @NL80211_MESHCONF_ROOTMODE: whether root mode is enabled or not + * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not * * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a * source mesh point for path selection elements. @@ -1678,6 +1687,7 @@ enum nl80211_meshconf_params { * element that vendors will use to identify the path selection methods and * metrics in use. * + * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use */ enum nl80211_mesh_setup_params { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5f38c460367e..0db8037e2725 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -48,9 +48,6 @@ * struct page (these bits with information) are always mapped into kernel * address space... * - * PG_buddy is set to indicate that the page is free and in the buddy system - * (see mm/page_alloc.c). - * * PG_hwpoison indicates that a page got corrupted in hardware and contains * data with incorrect ECC bits that triggered a machine check. Accessing is * not safe since it may cause another machine check. Don't touch! @@ -96,7 +93,6 @@ enum pageflags { PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ - PG_buddy, /* Page is free, on buddy lists */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ #ifdef CONFIG_MMU @@ -107,6 +103,9 @@ enum pageflags { #endif #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + PG_compound_lock, #endif __NR_PAGEFLAGS, @@ -198,7 +197,7 @@ static inline int __TestClearPage##uname(struct page *page) { return 0; } struct page; /* forward declaration */ TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) -PAGEFLAG(Error, error) +PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) @@ -230,7 +229,6 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) * risky: they bypass page accounting. */ TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) -__PAGEFLAG(Buddy, buddy) PAGEFLAG(MappedToDisk, mappedtodisk) /* PG_readahead is only used for file reads; PG_reclaim is only for writes */ @@ -344,7 +342,7 @@ static inline void set_page_writeback(struct page *page) * tests can be used in performance sensitive paths. PageCompound is * generally not used in hot code paths. */ -__PAGEFLAG(Head, head) +__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) __PAGEFLAG(Tail, tail) static inline int PageCompound(struct page *page) @@ -352,6 +350,13 @@ static inline int PageCompound(struct page *page) return page->flags & ((1L << PG_head) | (1L << PG_tail)); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void ClearPageCompound(struct page *page) +{ + BUG_ON(!PageHead(page)); + ClearPageHead(page); +} +#endif #else /* * Reduce page flag use as much as possible by overlapping @@ -389,14 +394,61 @@ static inline void __ClearPageTail(struct page *page) page->flags &= ~PG_head_tail_mask; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void ClearPageCompound(struct page *page) +{ + BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); + clear_bit(PG_compound, &page->flags); +} +#endif + #endif /* !PAGEFLAGS_EXTENDED */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * PageHuge() only returns true for hugetlbfs pages, but not for + * normal or transparent huge pages. + * + * PageTransHuge() returns true for both transparent huge and + * hugetlbfs pages, but not normal pages. PageTransHuge() can only be + * called only in the core VM paths where hugetlbfs pages can't exist. + */ +static inline int PageTransHuge(struct page *page) +{ + VM_BUG_ON(PageTail(page)); + return PageHead(page); +} + +static inline int PageTransCompound(struct page *page) +{ + return PageCompound(page); +} + +#else + +static inline int PageTransHuge(struct page *page) +{ + return 0; +} + +static inline int PageTransCompound(struct page *page) +{ + return 0; +} +#endif + #ifdef CONFIG_MMU #define __PG_MLOCKED (1 << PG_mlocked) #else #define __PG_MLOCKED 0 #endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __PG_COMPOUND_LOCK (1 << PG_compound_lock) +#else +#define __PG_COMPOUND_LOCK 0 +#endif + /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. @@ -404,9 +456,10 @@ static inline void __ClearPageTail(struct page *page) #define PAGE_FLAGS_CHECK_AT_FREE \ (1 << PG_lru | 1 << PG_locked | \ 1 << PG_private | 1 << PG_private_2 | \ - 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ + 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON) + 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ + __PG_COMPOUND_LOCK) /* * Flags checked when a page is prepped for return by the page allocator. diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index b02195dfc1b0..6d6cb7a57bb3 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -35,12 +35,15 @@ struct page_cgroup *lookup_page_cgroup(struct page *page); enum { /* flags for mem_cgroup */ - PCG_LOCK, /* page cgroup is locked */ + PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */ PCG_CACHE, /* charged as cache */ PCG_USED, /* this object is in use. */ - PCG_ACCT_LRU, /* page has been accounted for */ - PCG_FILE_MAPPED, /* page is accounted as "mapped" */ PCG_MIGRATION, /* under page migration */ + /* flags for mem_cgroup and file and I/O status */ + PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ + PCG_FILE_MAPPED, /* page is accounted as "mapped" */ + /* No lock in page_cgroup */ + PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */ }; #define TESTPCGFLAG(uname, lname) \ @@ -94,6 +97,10 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc) static inline void lock_page_cgroup(struct page_cgroup *pc) { + /* + * Don't take this lock in IRQ context. + * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION + */ bit_spin_lock(PCG_LOCK, &pc->flags); } @@ -107,6 +114,24 @@ static inline int page_is_cgroup_locked(struct page_cgroup *pc) return bit_spin_is_locked(PCG_LOCK, &pc->flags); } +static inline void move_lock_page_cgroup(struct page_cgroup *pc, + unsigned long *flags) +{ + /* + * We know updates to pc->flags of page cache's stats are from both of + * usual context or IRQ context. Disable IRQ to avoid deadlock. + */ + local_irq_save(*flags); + bit_spin_lock(PCG_MOVE_LOCK, &pc->flags); +} + +static inline void move_unlock_page_cgroup(struct page_cgroup *pc, + unsigned long *flags) +{ + bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags); + local_irq_restore(*flags); +} + #else /* CONFIG_CGROUP_MEM_RES_CTLR */ struct page_cgroup; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 2d1ffe3cf1ee..9c66e994540f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -48,7 +48,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping) static inline int mapping_unevictable(struct address_space *mapping) { - if (likely(mapping)) + if (mapping) return test_bit(AS_UNEVICTABLE, &mapping->flags); return !!mapping; } diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index c8b6473c5f42..479d9bb88e11 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -40,4 +40,10 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { return NULL; } #endif +#ifdef CONFIG_ACPI_APEI +extern bool aer_acpi_firmware_first(void); +#else +static inline bool aer_acpi_firmware_first(void) { return false; } +#endif + #endif /* _PCI_ACPI_H_ */ diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index 91ba0b338b47..ce6810512c66 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h @@ -27,6 +27,7 @@ extern void pcie_aspm_init_link_state(struct pci_dev *pdev); extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); extern void pci_disable_link_state(struct pci_dev *pdev, int state); +extern void pcie_clear_aspm(void); extern void pcie_no_aspm(void); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) @@ -41,7 +42,9 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } - +static inline void pcie_clear_aspm(void) +{ +} static inline void pcie_no_aspm(void) { } diff --git a/include/linux/pci.h b/include/linux/pci.h index 7454408c41b6..559d02897075 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -806,7 +806,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); -int pci_restore_state(struct pci_dev *dev); +void pci_restore_state(struct pci_dev *dev); int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); @@ -820,7 +820,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev); int pci_back_from_sleep(struct pci_dev *dev); bool pci_dev_run_wake(struct pci_dev *dev); bool pci_check_pme_status(struct pci_dev *dev); -void pci_wakeup_event(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, @@ -994,6 +993,14 @@ extern void pci_restore_msi_state(struct pci_dev *dev); extern int pci_msi_enabled(void); #endif +#ifdef CONFIG_PCIEPORTBUS +extern bool pcie_ports_disabled; +extern bool pcie_ports_auto; +#else +#define pcie_ports_disabled true +#define pcie_ports_auto false +#endif + #ifndef CONFIG_PCIEASPM static inline int pcie_aspm_enabled(void) { @@ -1003,6 +1010,14 @@ static inline int pcie_aspm_enabled(void) extern int pcie_aspm_enabled(void); #endif +#ifdef CONFIG_PCIEAER +void pci_no_aer(void); +bool pci_aer_available(void); +#else +static inline void pci_no_aer(void) { } +static inline bool pci_aer_available(void) { return false; } +#endif + #ifndef CONFIG_PCIE_ECRC static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { @@ -1168,10 +1183,8 @@ static inline int pci_save_state(struct pci_dev *dev) return 0; } -static inline int pci_restore_state(struct pci_dev *dev) -{ - return 0; -} +static inline void pci_restore_state(struct pci_dev *dev) +{ } static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ab47732d81e0..3adb06ebf841 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2369,8 +2369,10 @@ #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 +#define PCI_DEVICE_ID_JMICRON_JMB385_MS 0x2388 #define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391 #define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392 +#define PCI_DEVICE_ID_JMICRON_JMB390_MS 0x2393 #define PCI_VENDOR_ID_KORENIX 0x1982 #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 @@ -2476,7 +2478,8 @@ #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 -#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC 0x1d40 +#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 +#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index af83076c31a6..5b7e6b1ba54f 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -309,6 +309,14 @@ #define PCI_MSIX_PBA 8 #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) +/* MSI-X entry's format */ +#define PCI_MSIX_ENTRY_SIZE 16 +#define PCI_MSIX_ENTRY_LOWER_ADDR 0 +#define PCI_MSIX_ENTRY_UPPER_ADDR 4 +#define PCI_MSIX_ENTRY_DATA 8 +#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + /* CompactPCI Hotswap Register */ #define PCI_CHSWP_CSR 2 /* Control and Status Register */ @@ -496,6 +504,8 @@ #define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */ #define PCI_EXP_RTCAP 30 /* Root Capabilities */ #define PCI_EXP_RTSTA 32 /* Root Status */ +#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ +#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ #define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ #define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ diff --git a/include/linux/poll.h b/include/linux/poll.h index 56e76af78102..1a2ccd6f3823 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -57,7 +57,7 @@ struct poll_table_entry { }; /* - * Structures and helpers for sys_poll/sys_poll + * Structures and helpers for select/poll syscall */ struct poll_wqueues { poll_table pt; diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h new file mode 100644 index 000000000000..de1dfe09a03d --- /dev/null +++ b/include/linux/power/gpio-charger.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_POWER_GPIO_CHARGER_H__ +#define __LINUX_POWER_GPIO_CHARGER_H__ + +#include +#include + +/** + * struct gpio_charger_platform_data - platform_data for gpio_charger devices + * @name: Name for the chargers power_supply device + * @type: Type of the charger + * @gpio: GPIO which is used to indicate the chargers status + * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0 + * @supplied_to: Array of battery names to which this chargers supplies power + * @num_supplicants: Number of entries in the supplied_to array + */ +struct gpio_charger_platform_data { + const char *name; + enum power_supply_type type; + + int gpio; + int gpio_active_low; + + char **supplied_to; + size_t num_supplicants; +}; + +#endif diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h new file mode 100644 index 000000000000..7995deb8bfc1 --- /dev/null +++ b/include/linux/power/max17042_battery.h @@ -0,0 +1,30 @@ +/* + * Fuel gauge driver for Maxim 17042 / 8966 / 8997 + * Note that Maxim 8966 and 8997 are mfd and this is its subdevice. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __MAX17042_BATTERY_H_ +#define __MAX17042_BATTERY_H_ + +struct max17042_platform_data { + bool enable_current_sense; +}; + +#endif /* __MAX17042_BATTERY_H_ */ diff --git a/include/linux/pps.h b/include/linux/pps.h index 0194ab06177b..a9bb1d93451a 100644 --- a/include/linux/pps.h +++ b/include/linux/pps.h @@ -114,11 +114,18 @@ struct pps_fdata { struct pps_ktime timeout; }; +struct pps_bind_args { + int tsformat; /* format of time stamps */ + int edge; /* selected event type */ + int consumer; /* selected kernel consumer */ +}; + #include #define PPS_GETPARAMS _IOR('p', 0xa1, struct pps_kparams *) #define PPS_SETPARAMS _IOW('p', 0xa2, struct pps_kparams *) #define PPS_GETCAP _IOR('p', 0xa3, int *) #define PPS_FETCH _IOWR('p', 0xa4, struct pps_fdata *) +#define PPS_KC_BIND _IOW('p', 0xa5, struct pps_bind_args *) #endif /* _PPS_H_ */ diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index e0a193f830ef..94048547f29a 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -18,6 +18,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#ifndef LINUX_PPS_KERNEL_H +#define LINUX_PPS_KERNEL_H + #include #include @@ -28,18 +31,28 @@ * Global defines */ +struct pps_device; + /* The specific PPS source info */ struct pps_source_info { char name[PPS_MAX_NAME_LEN]; /* simbolic name */ char path[PPS_MAX_NAME_LEN]; /* path of connected device */ int mode; /* PPS's allowed mode */ - void (*echo)(int source, int event, void *data); /* PPS echo function */ + void (*echo)(struct pps_device *pps, + int event, void *data); /* PPS echo function */ struct module *owner; struct device *dev; }; +struct pps_event_time { +#ifdef CONFIG_NTP_PPS + struct timespec ts_raw; +#endif /* CONFIG_NTP_PPS */ + struct timespec ts_real; +}; + /* The main struct */ struct pps_device { struct pps_source_info info; /* PSS source info */ @@ -52,38 +65,56 @@ struct pps_device { struct pps_ktime clear_tu; int current_mode; /* PPS mode at event time */ - int go; /* PPS event is arrived? */ + unsigned int last_ev; /* last PPS event id */ wait_queue_head_t queue; /* PPS event queue */ unsigned int id; /* PPS source unique ID */ struct cdev cdev; struct device *dev; - int devno; struct fasync_struct *async_queue; /* fasync method */ spinlock_t lock; - - atomic_t usage; /* usage count */ }; /* * Global variables */ -extern spinlock_t pps_idr_lock; -extern struct idr pps_idr; -extern struct timespec pps_irq_ts[]; - extern struct device_attribute pps_attrs[]; /* * Exported functions */ -struct pps_device *pps_get_source(int source); -extern void pps_put_source(struct pps_device *pps); -extern int pps_register_source(struct pps_source_info *info, - int default_params); -extern void pps_unregister_source(int source); +extern struct pps_device *pps_register_source( + struct pps_source_info *info, int default_params); +extern void pps_unregister_source(struct pps_device *pps); extern int pps_register_cdev(struct pps_device *pps); extern void pps_unregister_cdev(struct pps_device *pps); -extern void pps_event(int source, struct pps_ktime *ts, int event, void *data); +extern void pps_event(struct pps_device *pps, + struct pps_event_time *ts, int event, void *data); + +static inline void timespec_to_pps_ktime(struct pps_ktime *kt, + struct timespec ts) +{ + kt->sec = ts.tv_sec; + kt->nsec = ts.tv_nsec; +} + +#ifdef CONFIG_NTP_PPS + +static inline void pps_get_ts(struct pps_event_time *ts) +{ + getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); +} + +#else /* CONFIG_NTP_PPS */ + +static inline void pps_get_ts(struct pps_event_time *ts) +{ + getnstimeofday(&ts->ts_real); +} + +#endif /* CONFIG_NTP_PPS */ + +#endif /* LINUX_PPS_KERNEL_H */ + diff --git a/include/linux/printk.h b/include/linux/printk.h index b772ca5fbdf0..ee048e77e1ae 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -4,14 +4,14 @@ extern const char linux_banner[]; extern const char linux_proc_banner[]; -#define KERN_EMERG "<0>" /* system is unusable */ -#define KERN_ALERT "<1>" /* action must be taken immediately */ -#define KERN_CRIT "<2>" /* critical conditions */ -#define KERN_ERR "<3>" /* error conditions */ -#define KERN_WARNING "<4>" /* warning conditions */ -#define KERN_NOTICE "<5>" /* normal but significant condition */ -#define KERN_INFO "<6>" /* informational */ -#define KERN_DEBUG "<7>" /* debug-level messages */ +#define KERN_EMERG "<0>" /* system is unusable */ +#define KERN_ALERT "<1>" /* action must be taken immediately */ +#define KERN_CRIT "<2>" /* critical conditions */ +#define KERN_ERR "<3>" /* error conditions */ +#define KERN_WARNING "<4>" /* warning conditions */ +#define KERN_NOTICE "<5>" /* normal but significant condition */ +#define KERN_INFO "<6>" /* informational */ +#define KERN_DEBUG "<7>" /* debug-level messages */ /* Use the default kernel loglevel */ #define KERN_DEFAULT "" @@ -20,7 +20,7 @@ extern const char linux_proc_banner[]; * line that had no enclosing \n). Only to be used by core/arch code * during early bootup (a continued line is not SMP-safe otherwise). */ -#define KERN_CONT "" +#define KERN_CONT "" extern int console_printk[]; @@ -29,6 +29,17 @@ extern int console_printk[]; #define minimum_console_loglevel (console_printk[2]) #define default_console_loglevel (console_printk[3]) +static inline void console_silent(void) +{ + console_loglevel = 0; +} + +static inline void console_verbose(void) +{ + if (console_loglevel) + console_loglevel = 15; +} + struct va_format { const char *fmt; va_list *va; @@ -65,11 +76,27 @@ struct va_format { */ #define HW_ERR "[Hardware Error]: " +/* + * Dummy printk for disabled debugging statements to use whilst maintaining + * gcc's format and side-effect checking. + */ +static inline __attribute__ ((format (printf, 1, 2))) +int no_printk(const char *fmt, ...) +{ + return 0; +} + +extern asmlinkage __attribute__ ((format (printf, 1, 2))) +void early_printk(const char *fmt, ...); + +extern int printk_needs_cpu(int cpu); +extern void printk_tick(void); + #ifdef CONFIG_PRINTK -asmlinkage int vprintk(const char *fmt, va_list args) - __attribute__ ((format (printf, 1, 0))); -asmlinkage int printk(const char * fmt, ...) - __attribute__ ((format (printf, 1, 2))) __cold; +asmlinkage __attribute__ ((format (printf, 1, 0))) +int vprintk(const char *fmt, va_list args); +asmlinkage __attribute__ ((format (printf, 1, 2))) __cold +int printk(const char *fmt, ...); /* * Please don't use printk_ratelimit(), because it shares ratelimiting state @@ -83,99 +110,56 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, extern int printk_delay_msec; extern int dmesg_restrict; - -/* - * Print a one-time message (analogous to WARN_ONCE() et al): - */ -#define printk_once(x...) ({ \ - static bool __print_once; \ - \ - if (!__print_once) { \ - __print_once = true; \ - printk(x); \ - } \ -}) +extern int kptr_restrict; void log_buf_kexec_setup(void); #else -static inline int vprintk(const char *s, va_list args) - __attribute__ ((format (printf, 1, 0))); -static inline int vprintk(const char *s, va_list args) { return 0; } -static inline int printk(const char *s, ...) - __attribute__ ((format (printf, 1, 2))); -static inline int __cold printk(const char *s, ...) { return 0; } -static inline int printk_ratelimit(void) { return 0; } -static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ - unsigned int interval_msec) \ - { return false; } - -/* No effect, but we still get type checking even in the !PRINTK case: */ -#define printk_once(x...) printk(x) - -static inline void log_buf_kexec_setup(void) +static inline __attribute__ ((format (printf, 1, 0))) +int vprintk(const char *s, va_list args) { + return 0; } -#endif - -/* - * Dummy printk for disabled debugging statements to use whilst maintaining - * gcc's format and side-effect checking. - */ -static inline __attribute__ ((format (printf, 1, 2))) -int no_printk(const char *s, ...) { return 0; } - -extern int printk_needs_cpu(int cpu); -extern void printk_tick(void); - -extern void asmlinkage __attribute__((format(printf, 1, 2))) - early_printk(const char *fmt, ...); - -static inline void console_silent(void) +static inline __attribute__ ((format (printf, 1, 2))) __cold +int printk(const char *s, ...) { - console_loglevel = 0; + return 0; +} +static inline int printk_ratelimit(void) +{ + return 0; +} +static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec) +{ + return false; } -static inline void console_verbose(void) +static inline void log_buf_kexec_setup(void) { - if (console_loglevel) - console_loglevel = 15; } +#endif extern void dump_stack(void) __cold; -enum { - DUMP_PREFIX_NONE, - DUMP_PREFIX_ADDRESS, - DUMP_PREFIX_OFFSET -}; -extern void hex_dump_to_buffer(const void *buf, size_t len, - int rowsize, int groupsize, - char *linebuf, size_t linebuflen, bool ascii); -extern void print_hex_dump(const char *level, const char *prefix_str, - int prefix_type, int rowsize, int groupsize, - const void *buf, size_t len, bool ascii); -extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, - const void *buf, size_t len); - #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif #define pr_emerg(fmt, ...) \ - printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #define pr_alert(fmt, ...) \ - printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) #define pr_crit(fmt, ...) \ - printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) #define pr_err(fmt, ...) \ - printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #define pr_warning(fmt, ...) \ - printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) #define pr_warn pr_warning #define pr_notice(fmt, ...) \ - printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info(fmt, ...) \ - printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) @@ -185,7 +169,7 @@ extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel(fmt, ...) \ - ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* If you are writing a driver, please use dev_dbg instead */ @@ -198,7 +182,51 @@ extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, dynamic_pr_debug(fmt, ##__VA_ARGS__) #else #define pr_debug(fmt, ...) \ - ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* + * Print a one-time message (analogous to WARN_ONCE() et al): + */ + +#ifdef CONFIG_PRINTK +#define printk_once(fmt, ...) \ +({ \ + static bool __print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk(fmt, ##__VA_ARGS__); \ + } \ +}) +#else +#define printk_once(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) +#endif + +#define pr_emerg_once(fmt, ...) \ + printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert_once(fmt, ...) \ + printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit_once(fmt, ...) \ + printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err_once(fmt, ...) \ + printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn_once(fmt, ...) \ + printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_notice_once(fmt, ...) \ + printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info_once(fmt, ...) \ + printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +#define pr_cont_once(fmt, ...) \ + printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(DEBUG) +#define pr_debug_once(fmt, ...) \ + printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug_once(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* @@ -206,7 +234,8 @@ extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, * no local ratelimit_state used in the !PRINTK case */ #ifdef CONFIG_PRINTK -#define printk_ratelimited(fmt, ...) ({ \ +#define printk_ratelimited(fmt, ...) \ +({ \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ @@ -215,34 +244,59 @@ extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, printk(fmt, ##__VA_ARGS__); \ }) #else -/* No effect, but we still get type checking even in the !PRINTK case: */ -#define printk_ratelimited printk +#define printk_ratelimited(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) #endif -#define pr_emerg_ratelimited(fmt, ...) \ +#define pr_emerg_ratelimited(fmt, ...) \ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) -#define pr_alert_ratelimited(fmt, ...) \ +#define pr_alert_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) -#define pr_crit_ratelimited(fmt, ...) \ +#define pr_crit_ratelimited(fmt, ...) \ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) -#define pr_err_ratelimited(fmt, ...) \ +#define pr_err_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) -#define pr_warning_ratelimited(fmt, ...) \ +#define pr_warn_ratelimited(fmt, ...) \ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) -#define pr_warn_ratelimited pr_warning_ratelimited -#define pr_notice_ratelimited(fmt, ...) \ +#define pr_notice_ratelimited(fmt, ...) \ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) -#define pr_info_ratelimited(fmt, ...) \ +#define pr_info_ratelimited(fmt, ...) \ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) /* no pr_cont_ratelimited, don't do that... */ /* If you are writing a driver, please use dev_dbg instead */ #if defined(DEBUG) -#define pr_debug_ratelimited(fmt, ...) \ +#define pr_debug_ratelimited(fmt, ...) \ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug_ratelimited(fmt, ...) \ - ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ - ##__VA_ARGS__); 0; }) + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +extern void hex_dump_to_buffer(const void *buf, size_t len, + int rowsize, int groupsize, + char *linebuf, size_t linebuflen, bool ascii); +#ifdef CONFIG_PRINTK +extern void print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len); +#else +static inline void print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ +} +static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len) +{ +} + #endif #endif diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index ab2baa5c4884..23241c2fecce 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -145,6 +145,22 @@ static inline void *radix_tree_deref_slot(void **pslot) return rcu_dereference(*pslot); } +/** + * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held + * @pslot: pointer to slot, returned by radix_tree_lookup_slot + * Returns: item that was stored in that slot with any direct pointer flag + * removed. + * + * Similar to radix_tree_deref_slot but only used during migration when a pages + * mapping is being moved. The caller does not hold the RCU read lock but it + * must hold the tree lock to prevent parallel updates. + */ +static inline void *radix_tree_deref_slot_protected(void **pslot, + spinlock_t *treelock) +{ + return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); +} + /** * radix_tree_deref_retry - check radix_tree_deref_slot * @arg: pointer returned by radix_tree_deref_slot diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h index b872b493724d..cf1244fbf3b6 100644 --- a/include/linux/rculist_bl.h +++ b/include/linux/rculist_bl.h @@ -11,7 +11,8 @@ static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, struct hlist_bl_node *n) { LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); - LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK)); + LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != + LIST_BL_LOCKMASK); rcu_assign_pointer(h->first, (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); } diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h index f509877c2ed4..6a210f1511fc 100644 --- a/include/linux/regulator/ab8500.h +++ b/include/linux/regulator/ab8500.h @@ -11,15 +11,17 @@ #define __LINUX_MFD_AB8500_REGULATOR_H /* AB8500 regulators */ -#define AB8500_LDO_AUX1 0 -#define AB8500_LDO_AUX2 1 -#define AB8500_LDO_AUX3 2 -#define AB8500_LDO_INTCORE 3 -#define AB8500_LDO_TVOUT 4 -#define AB8500_LDO_AUDIO 5 -#define AB8500_LDO_ANAMIC1 6 -#define AB8500_LDO_ANAMIC2 7 -#define AB8500_LDO_DMIC 8 -#define AB8500_LDO_ANA 9 - +enum ab8500_regulator_id { + AB8500_LDO_AUX1, + AB8500_LDO_AUX2, + AB8500_LDO_AUX3, + AB8500_LDO_INTCORE, + AB8500_LDO_TVOUT, + AB8500_LDO_AUDIO, + AB8500_LDO_ANAMIC1, + AB8500_LDO_ANAMIC2, + AB8500_LDO_DMIC, + AB8500_LDO_ANA, + AB8500_NUM_REGULATORS, +}; #endif diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index ebd747265294..7954f6bd7edb 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -154,6 +154,7 @@ int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV); int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); int regulator_get_voltage(struct regulator *regulator); +int regulator_sync_voltage(struct regulator *regulator); int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA); int regulator_get_current_limit(struct regulator *regulator); diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 592cd7c642c2..b8ed16a33c47 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -42,7 +42,11 @@ enum regulator_status { * * @set_voltage: Set the voltage for the regulator within the range specified. * The driver should select the voltage closest to min_uV. + * @set_voltage_sel: Set the voltage for the regulator using the specified + * selector. * @get_voltage: Return the currently configured voltage for the regulator. + * @get_voltage_sel: Return the currently configured voltage selector for the + * regulator. * @list_voltage: Return one of the supported voltages, in microvolts; zero * if the selector indicates a voltage that is unusable on this system; * or negative errno. Selectors range from zero to one less than @@ -79,8 +83,11 @@ struct regulator_ops { int (*list_voltage) (struct regulator_dev *, unsigned selector); /* get/set regulator voltage */ - int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); + int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV, + unsigned *selector); + int (*set_voltage_sel) (struct regulator_dev *, unsigned selector); int (*get_voltage) (struct regulator_dev *); + int (*get_voltage_sel) (struct regulator_dev *); /* get/set regulator current */ int (*set_current_limit) (struct regulator_dev *, @@ -168,9 +175,9 @@ struct regulator_desc { */ struct regulator_dev { struct regulator_desc *desc; - int use_count; - int open_count; int exclusive; + u32 use_count; + u32 open_count; /* lists we belong to */ struct list_head list; /* list of all regulators */ @@ -188,10 +195,14 @@ struct regulator_dev { struct regulator_dev *supply; /* for tree */ void *reg_data; /* regulator_dev data */ + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif }; struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, - struct device *dev, struct regulator_init_data *init_data, + struct device *dev, const struct regulator_init_data *init_data, void *driver_data); void regulator_unregister(struct regulator_dev *rdev); diff --git a/include/linux/rio.h b/include/linux/rio.h index 0bed941f9b13..ff681ebba585 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -66,13 +66,61 @@ #define RIO_PW_MSG_SIZE 64 +/* + * A component tag value (stored in the component tag CSR) is used as device's + * unique identifier assigned during enumeration. Besides being used for + * identifying switches (which do not have device ID register), it also is used + * by error management notification and therefore has to be assigned + * to endpoints as well. + */ +#define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */ +#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */ + extern struct bus_type rio_bus_type; extern struct device rio_bus; extern struct list_head rio_devices; /* list of all devices */ struct rio_mport; +struct rio_dev; union rio_pw_msg; +/** + * struct rio_switch - RIO switch info + * @node: Node in global list of switches + * @switchid: Switch ID that is unique across a network + * @route_table: Copy of switch routing table + * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 + * @add_entry: Callback for switch-specific route add function + * @get_entry: Callback for switch-specific route get function + * @clr_table: Callback for switch-specific clear route table function + * @set_domain: Callback for switch-specific domain setting function + * @get_domain: Callback for switch-specific domain get function + * @em_init: Callback for switch-specific error management init function + * @em_handle: Callback for switch-specific error management handler function + * @sw_sysfs: Callback that initializes switch-specific sysfs attributes + * @nextdev: Array of per-port pointers to the next attached device + */ +struct rio_switch { + struct list_head node; + u16 switchid; + u8 *route_table; + u32 port_ok; + int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port); + int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port); + int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table); + int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain); + int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain); + int (*em_init) (struct rio_dev *dev); + int (*em_handle) (struct rio_dev *dev, u8 swport); + int (*sw_sysfs) (struct rio_dev *dev, int create); + struct rio_dev *nextdev[0]; +}; + /** * struct rio_dev - RIO device info * @global_list: Node in list of all RIO devices @@ -93,13 +141,14 @@ union rio_pw_msg; * @phys_efptr: RIO device extended features pointer * @em_efptr: RIO Error Management features pointer * @dma_mask: Mask of bits of RIO address this device implements - * @rswitch: Pointer to &struct rio_switch if valid for this device * @driver: Driver claiming this device * @dev: Device model device * @riores: RIO resources this device owns * @pwcback: port-write callback function for this device - * @destid: Network destination ID + * @destid: Network destination ID (or associated destid for switch) + * @hopcount: Hopcount to this device * @prev: Previous RIO device connected to the current one + * @rswitch: struct rio_switch (if valid for this device) */ struct rio_dev { struct list_head global_list; /* node in list of all RIO devices */ @@ -120,18 +169,20 @@ struct rio_dev { u32 phys_efptr; u32 em_efptr; u64 dma_mask; - struct rio_switch *rswitch; /* RIO switch info */ struct rio_driver *driver; /* RIO driver claiming this device */ struct device dev; /* LDM device structure */ struct resource riores[RIO_MAX_DEV_RESOURCES]; int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step); u16 destid; + u8 hopcount; struct rio_dev *prev; + struct rio_switch rswitch[0]; /* RIO switch info */ }; #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) #define rio_dev_f(n) list_entry(n, struct rio_dev, net_list) #define to_rio_dev(n) container_of(n, struct rio_dev, dev) +#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0]) /** * struct rio_msg - RIO message event @@ -224,49 +275,6 @@ struct rio_net { #define RIO_SW_SYSFS_CREATE 1 /* Create switch attributes */ #define RIO_SW_SYSFS_REMOVE 0 /* Remove switch attributes */ -/** - * struct rio_switch - RIO switch info - * @node: Node in global list of switches - * @rdev: Associated RIO device structure - * @switchid: Switch ID that is unique across a network - * @hopcount: Hopcount to this switch - * @destid: Associated destid in the path - * @route_table: Copy of switch routing table - * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 - * @add_entry: Callback for switch-specific route add function - * @get_entry: Callback for switch-specific route get function - * @clr_table: Callback for switch-specific clear route table function - * @set_domain: Callback for switch-specific domain setting function - * @get_domain: Callback for switch-specific domain get function - * @em_init: Callback for switch-specific error management initialization function - * @em_handle: Callback for switch-specific error management handler function - * @sw_sysfs: Callback that initializes switch-specific sysfs attributes - * @nextdev: Array of per-port pointers to the next attached device - */ -struct rio_switch { - struct list_head node; - struct rio_dev *rdev; - u16 switchid; - u16 hopcount; - u16 destid; - u8 *route_table; - u32 port_ok; - int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 route_port); - int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 * route_port); - int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table); - int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, - u8 sw_domain); - int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, - u8 *sw_domain); - int (*em_init) (struct rio_dev *dev); - int (*em_handle) (struct rio_dev *dev, u8 swport); - int (*sw_sysfs) (struct rio_dev *dev, int create); - struct rio_dev *nextdev[0]; -}; - /* Low-level architecture-dependent routines */ /** diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h index edc55da717b3..e09e565c4bce 100644 --- a/include/linux/rio_drv.h +++ b/include/linux/rio_drv.h @@ -150,16 +150,8 @@ static inline int rio_local_write_config_8(struct rio_mport *port, u32 offset, static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset, u32 * data) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; - - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - return rio_mport_read_config_32(rdev->net->hport, destid, hopcount, - offset, data); + return rio_mport_read_config_32(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); }; /** @@ -174,16 +166,8 @@ static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset, static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset, u32 data) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; - - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - return rio_mport_write_config_32(rdev->net->hport, destid, hopcount, - offset, data); + return rio_mport_write_config_32(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); }; /** @@ -198,16 +182,8 @@ static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset, static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset, u16 * data) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; - - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - return rio_mport_read_config_16(rdev->net->hport, destid, hopcount, - offset, data); + return rio_mport_read_config_16(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); }; /** @@ -222,16 +198,8 @@ static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset, static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset, u16 data) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; - - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - return rio_mport_write_config_16(rdev->net->hport, destid, hopcount, - offset, data); + return rio_mport_write_config_16(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); }; /** @@ -245,16 +213,8 @@ static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset, */ static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; - - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - return rio_mport_read_config_8(rdev->net->hport, destid, hopcount, - offset, data); + return rio_mport_read_config_8(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); }; /** @@ -268,16 +228,8 @@ static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data) */ static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data) { - u8 hopcount = 0xff; - u16 destid = rdev->destid; - - if (rdev->rswitch) { - destid = rdev->rswitch->destid; - hopcount = rdev->rswitch->hopcount; - } - - return rio_mport_write_config_8(rdev->net->hport, destid, hopcount, - offset, data); + return rio_mport_write_config_8(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); }; extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index ee7b6ada188f..7410d3365e2a 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h @@ -36,5 +36,7 @@ #define RIO_DID_IDTCPS10Q 0x035e #define RIO_DID_IDTCPS1848 0x0374 #define RIO_DID_IDTCPS1616 0x0379 +#define RIO_DID_IDTVPS1616 0x0377 +#define RIO_DID_IDTSPS1616 0x0378 #endif /* LINUX_RIO_IDS_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bb83c0da2071..e9fd04ca1e51 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -198,6 +198,8 @@ enum ttu_flags { }; #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) +bool is_vma_temporary_stack(struct vm_area_struct *vma); + int try_to_unmap(struct page *, enum ttu_flags flags); int try_to_unmap_one(struct page *, struct vm_area_struct *, unsigned long address, enum ttu_flags flags); diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h index c490fbc43fe2..5f57f93b284f 100644 --- a/include/linux/romfs_fs.h +++ b/include/linux/romfs_fs.h @@ -1,6 +1,9 @@ #ifndef __LINUX_ROMFS_FS_H #define __LINUX_ROMFS_FS_H +#include +#include + /* The basic structures of the romfs filesystem */ #define ROMBSIZE BLOCK_SIZE diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h index dbce22faa660..fbe58b7e63eb 100644 --- a/include/linux/s3c_adc_battery.h +++ b/include/linux/s3c_adc_battery.h @@ -14,6 +14,7 @@ struct s3c_adc_bat_pdata { void (*disable_charger)(void); int gpio_charge_finished; + int gpio_inverted; const struct s3c_adc_bat_thresh *lut_noac; unsigned int lut_noac_cnt; diff --git a/include/linux/sched.h b/include/linux/sched.h index abc527aa8550..d747f948b34e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -21,7 +21,8 @@ #define CLONE_DETACHED 0x00400000 /* Unused, ignored */ #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ -#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ +/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) + and is now available for re-use. */ #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ #define CLONE_NEWIPC 0x08000000 /* New ipcs */ #define CLONE_NEWUSER 0x10000000 /* New user namespace */ @@ -433,6 +434,7 @@ extern int get_dumpable(struct mm_struct *mm); #endif /* leave room for more dump flags */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) @@ -633,6 +635,8 @@ struct signal_struct { int oom_adj; /* OOM kill score adjustment (bit shift) */ int oom_score_adj; /* OOM kill score adjustment */ + int oom_score_adj_min; /* OOM kill score adjustment minimum value. + * Only settable by CAP_SYS_RESOURCE. */ struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations @@ -683,7 +687,7 @@ struct user_struct { atomic_t fanotify_listeners; #endif #ifdef CONFIG_EPOLL - atomic_t epoll_watches; /* The number of file descriptors currently watched */ + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ #endif #ifdef CONFIG_POSIX_MQUEUE /* protected by mq_lock */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index a23fa29d4eb0..758c5b0c6fd3 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -212,6 +212,7 @@ #include #include #include +#include struct uart_port; struct serial_struct; @@ -528,10 +529,10 @@ uart_handle_dcd_change(struct uart_port *uport, unsigned int status) struct uart_state *state = uport->state; struct tty_port *port = &state->port; struct tty_ldisc *ld = tty_ldisc_ref(port->tty); - struct timespec ts; + struct pps_event_time ts; if (ld && ld->ops->dcd_change) - getnstimeofday(&ts); + pps_get_ts(&ts); uport->icount.dcd++; #ifdef CONFIG_HARD_PPS diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index baed2122c5a6..1630d9cae22a 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -8,6 +8,23 @@ * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) */ +enum { + SCBRR_ALGO_1, /* ((clk + 16 * bps) / (16 * bps) - 1) */ + SCBRR_ALGO_2, /* ((clk + 16 * bps) / (32 * bps) - 1) */ + SCBRR_ALGO_3, /* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */ + SCBRR_ALGO_4, /* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */ + SCBRR_ALGO_5, /* (((clk * 1000 / 32) / bps) - 1) */ +}; + +#define SCSCR_TIE (1 << 7) +#define SCSCR_RIE (1 << 6) +#define SCSCR_TE (1 << 5) +#define SCSCR_RE (1 << 4) +#define SCSCR_REIE (1 << 3) /* not supported by all parts */ +#define SCSCR_TOIE (1 << 2) /* not supported by all parts */ +#define SCSCR_CKE1 (1 << 1) +#define SCSCR_CKE0 (1 << 0) + /* Offsets into the sci_port->irqs array */ enum { SCIx_ERI_IRQ, @@ -29,7 +46,12 @@ struct plat_sci_port { unsigned int type; /* SCI / SCIF / IRDA */ upf_t flags; /* UPF_* flags */ char *clk; /* clock string */ + + unsigned int scbrr_algo_id; /* SCBRR calculation algo */ + unsigned int scscr; /* SCSCR initialization */ + struct device *dma_dev; + #ifdef CONFIG_SERIAL_SH_SCI_DMA unsigned int dma_slave_tx; unsigned int dma_slave_rx; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 20ec0a64cb9f..bf221d65d9ad 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -255,6 +255,11 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif +#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ + defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) +#define NET_SKBUFF_NF_DEFRAG_NEEDED 1 +#endif + /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -362,6 +367,8 @@ struct sk_buff { void (*destructor)(struct sk_buff *skb); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack *nfct; +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED struct sk_buff *nfct_reasm; #endif #ifdef CONFIG_BRIDGE_NETFILTER @@ -2057,6 +2064,8 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) if (nfct) atomic_inc(&nfct->use); } +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED static inline void nf_conntrack_get_reasm(struct sk_buff *skb) { if (skb) @@ -2085,6 +2094,8 @@ static inline void nf_reset(struct sk_buff *skb) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb->nfct); skb->nfct = NULL; +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED nf_conntrack_put_reasm(skb->nfct_reasm); skb->nfct_reasm = NULL; #endif @@ -2101,6 +2112,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) dst->nfct = src->nfct; nf_conntrack_get(src->nfct); dst->nfctinfo = src->nfctinfo; +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED dst->nfct_reasm = src->nfct_reasm; nf_conntrack_get_reasm(src->nfct_reasm); #endif @@ -2114,6 +2127,8 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(dst->nfct); +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED nf_conntrack_put_reasm(dst->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/include/linux/socket.h b/include/linux/socket.h index 5f65f14c4f44..edbb1d07ddf4 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -191,7 +191,8 @@ struct ucred { #define AF_PHONET 35 /* Phonet sockets */ #define AF_IEEE802154 36 /* IEEE802154 sockets */ #define AF_CAIF 37 /* CAIF sockets */ -#define AF_MAX 38 /* For now.. */ +#define AF_ALG 38 /* Algorithm sockets */ +#define AF_MAX 39 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -232,6 +233,7 @@ struct ucred { #define PF_PHONET AF_PHONET #define PF_IEEE802154 AF_IEEE802154 #define PF_CAIF AF_CAIF +#define PF_ALG AF_ALG #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -305,6 +307,7 @@ struct ucred { #define SOL_RDS 276 #define SOL_IUCV 277 #define SOL_CAIF 278 +#define SOL_ALG 279 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 78aa104250b7..7898ea13de70 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -256,10 +256,13 @@ static inline time_t get_expiry(char **bpp) return rv - boot.tv_sec; } +#ifdef CONFIG_NFSD_DEPRECATED static inline void sunrpc_invalidate(struct cache_head *h, struct cache_detail *detail) { h->expiry_time = seconds_since_boot() - 1; detail->nextcheck = seconds_since_boot(); } +#endif /* CONFIG_NFSD_DEPRECATED */ + #endif /* _LINUX_SUNRPC_CACHE_H_ */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index c81d4d8be3a9..ea29330b78bd 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -269,6 +269,7 @@ struct svc_rqst { struct cache_req rq_chandle; /* handle passed to caches for * request delaying */ + bool rq_dropme; /* Catering to nfsd */ struct auth_domain * rq_client; /* RPC peer info */ struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 357da5e0daa3..059877b4d85b 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -63,7 +63,6 @@ struct svc_xprt { #define XPT_LISTENER 11 /* listening endpoint */ #define XPT_CACHE_AUTH 12 /* cache auth info */ - struct svc_pool *xpt_pool; /* current pool iff queued */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ struct mutex xpt_mutex; /* to serialize sending data */ @@ -81,6 +80,7 @@ struct svc_xprt { void *xpt_bc_sid; /* back channel session ID */ struct net *xpt_net; + struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ }; static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 1b353a76c304..04dba23c59f2 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -28,7 +28,6 @@ struct svc_sock { /* private TCP part */ u32 sk_reclen; /* length of record */ u32 sk_tcplen; /* current read length */ - struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */ }; /* diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 89d10d279a20..bef0f535f746 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -321,6 +321,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); #define XPRT_CLOSING (6) #define XPRT_CONNECTION_ABORT (7) #define XPRT_CONNECTION_CLOSE (8) +#define XPRT_INITIALIZED (9) static inline void xprt_set_connected(struct rpc_xprt *xprt) { diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 144b34be5c32..5a89e3612875 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -122,7 +122,7 @@ struct platform_suspend_ops { * suspend_set_ops - set platform dependent suspend operations * @ops: The new suspend operations to set. */ -extern void suspend_set_ops(struct platform_suspend_ops *ops); +extern void suspend_set_ops(const struct platform_suspend_ops *ops); extern int suspend_valid_only_mem(suspend_state_t state); /** @@ -147,7 +147,7 @@ extern int pm_suspend(suspend_state_t state); #else /* !CONFIG_SUSPEND */ #define suspend_valid_only_mem NULL -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {} +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } #endif /* !CONFIG_SUSPEND */ @@ -245,7 +245,7 @@ extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); -extern void hibernation_set_ops(struct platform_hibernation_ops *ops); +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); extern bool system_entering_hibernation(void); #else /* CONFIG_HIBERNATION */ @@ -253,28 +253,11 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} static inline int hibernate(void) { return -ENOSYS; } static inline bool system_entering_hibernation(void) { return false; } #endif /* CONFIG_HIBERNATION */ -#ifdef CONFIG_SUSPEND_NVS -extern int suspend_nvs_register(unsigned long start, unsigned long size); -extern int suspend_nvs_alloc(void); -extern void suspend_nvs_free(void); -extern void suspend_nvs_save(void); -extern void suspend_nvs_restore(void); -#else /* CONFIG_SUSPEND_NVS */ -static inline int suspend_nvs_register(unsigned long a, unsigned long b) -{ - return 0; -} -static inline int suspend_nvs_alloc(void) { return 0; } -static inline void suspend_nvs_free(void) {} -static inline void suspend_nvs_save(void) {} -static inline void suspend_nvs_restore(void) {} -#endif /* CONFIG_SUSPEND_NVS */ - #ifdef CONFIG_PM_SLEEP void save_processor_state(void); void restore_processor_state(void); diff --git a/include/linux/swap.h b/include/linux/swap.h index eba53e71d2cc..4d559325d919 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -208,6 +208,8 @@ extern unsigned int nr_free_pagecache_pages(void); /* linux/mm/swap.c */ extern void __lru_cache_add(struct page *, enum lru_list lru); extern void lru_cache_add_lru(struct page *, enum lru_list lru); +extern void lru_add_page_tail(struct zone* zone, + struct page *page, struct page *page_tail); extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 1de8b9eb841b..8651556dbd52 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -77,7 +77,7 @@ struct thermal_cooling_device { char type[THERMAL_NAME_LENGTH]; struct device device; void *devdata; - struct thermal_cooling_device_ops *ops; + const struct thermal_cooling_device_ops *ops; struct list_head node; }; @@ -114,7 +114,7 @@ struct thermal_zone_device { int last_temperature; bool passive; unsigned int forced_passive; - struct thermal_zone_device_ops *ops; + const struct thermal_zone_device_ops *ops; struct list_head cooling_devices; struct idr idr; struct mutex lock; /* protect cooling devices list */ @@ -127,13 +127,41 @@ struct thermal_zone_device { struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ #endif }; +/* Adding event notification support elements */ +#define THERMAL_GENL_FAMILY_NAME "thermal_event" +#define THERMAL_GENL_VERSION 0x01 +#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" + +enum events { + THERMAL_AUX0, + THERMAL_AUX1, + THERMAL_CRITICAL, + THERMAL_DEV_FAULT, +}; + +struct thermal_genl_event { + u32 orig; + enum events event; +}; +/* attributes of thermal_genl_family */ +enum { + THERMAL_GENL_ATTR_UNSPEC, + THERMAL_GENL_ATTR_EVENT, + __THERMAL_GENL_ATTR_MAX, +}; +#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) + +/* commands supported by the thermal_genl_family */ +enum { + THERMAL_GENL_CMD_UNSPEC, + THERMAL_GENL_CMD_EVENT, + __THERMAL_GENL_CMD_MAX, +}; +#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, - struct - thermal_zone_device_ops - *, int tc1, int tc2, - int passive_freq, - int polling_freq); + const struct thermal_zone_device_ops *, int tc1, int tc2, + int passive_freq, int polling_freq); void thermal_zone_device_unregister(struct thermal_zone_device *); int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, @@ -142,9 +170,8 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, - struct - thermal_cooling_device_ops - *); + const struct thermal_cooling_device_ops *); void thermal_cooling_device_unregister(struct thermal_cooling_device *); +extern int generate_netlink_event(u32 orig, enum events event); #endif /* __THERMAL_H__ */ diff --git a/include/linux/time.h b/include/linux/time.h index 9f15ac7ab92a..1e6d3b59238d 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -158,6 +158,8 @@ extern unsigned int alarm_setitimer(unsigned int seconds); extern int do_getitimer(int which, struct itimerval *value); extern void getnstimeofday(struct timespec *tv); extern void getrawmonotonic(struct timespec *ts); +extern void getnstime_raw_and_real(struct timespec *ts_raw, + struct timespec *ts_real); extern void getboottime(struct timespec *ts); extern void monotonic_to_bootbased(struct timespec *ts); diff --git a/include/linux/timex.h b/include/linux/timex.h index 32d852f8cbe4..d23999f9499d 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -268,6 +268,7 @@ extern u64 tick_length; extern void second_overflow(void); extern void update_ntp_one_tick(void); extern int do_adjtimex(struct timex *); +extern void hardpps(const struct timespec *, const struct timespec *); int read_current_timer(unsigned long *timer_val); diff --git a/include/linux/toshiba.h b/include/linux/toshiba.h index 6a7c4edf0e13..772dedbc3a22 100644 --- a/include/linux/toshiba.h +++ b/include/linux/toshiba.h @@ -33,6 +33,8 @@ typedef struct { unsigned int edi __attribute__ ((packed)); } SMMRegisters; +#ifdef __KERNEL__ int tosh_smm(SMMRegisters *regs); +#endif /* __KERNEL__ */ #endif diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 526d66f066a3..ff7dc08696a8 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -101,14 +101,15 @@ * any pending driver I/O is completed. * * void (*dcd_change)(struct tty_struct *tty, unsigned int status, - * struct timespec *ts) + * struct pps_event_time *ts) * * Tells the discipline that the DCD pin has changed its status and - * the relative timestamp. Pointer ts can be NULL. + * the relative timestamp. Pointer ts cannot be NULL. */ #include #include +#include struct tty_ldisc_ops { int magic; @@ -143,7 +144,7 @@ struct tty_ldisc_ops { char *fp, int count); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int, - struct timespec *); + struct pps_event_time *); struct module *owner; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index fa261a0da280..8da8c4e87da3 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -67,21 +67,21 @@ struct u64_stats_sync { #endif }; -static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) +static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); #endif } -static void inline u64_stats_update_end(struct u64_stats_sync *syncp) +static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); #endif } -static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); @@ -93,7 +93,7 @@ static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *sy #endif } -static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, +static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) @@ -112,7 +112,7 @@ static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, * - UP 32bit must disable BH. * - 64bit have no problem atomically reading u64 values, irq safe. */ -static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) +static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); @@ -124,7 +124,7 @@ static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync #endif } -static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, +static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h index c9a6abd972a1..c0d817de4df2 100644 --- a/include/linux/unaligned/packed_struct.h +++ b/include/linux/unaligned/packed_struct.h @@ -3,9 +3,9 @@ #include -struct __una_u16 { u16 x; } __attribute__((packed)); -struct __una_u32 { u32 x; } __attribute__((packed)); -struct __una_u64 { u64 x; } __attribute__((packed)); +struct __una_u16 { u16 x; } __packed; +struct __una_u32 { u32 x; } __packed; +struct __una_u64 { u64 x; } __packed; static inline u16 __get_unaligned_cpu16(const void *p) { diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 8178156711f9..faf467944baf 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -6,7 +6,7 @@ #include #include -#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8) +#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) #define UIDHASH_SZ (1 << UIDHASH_BITS) struct user_namespace { diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 44b54f619ac6..4ed6fcd6b726 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -59,8 +59,9 @@ extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); -extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, - pgprot_t prot); +extern void *__vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, int node, void *caller); extern void vfree(const void *addr); extern void *vmap(struct page **pages, unsigned int count, @@ -90,9 +91,6 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, void *caller); -extern struct vm_struct *get_vm_area_node(unsigned long size, - unsigned long flags, int node, - gfp_t gfp_mask); extern struct vm_struct *remove_vm_area(const void *addr); extern int map_vm_area(struct vm_struct *area, pgprot_t prot, @@ -120,7 +118,7 @@ extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); #ifdef CONFIG_SMP struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, - size_t align, gfp_t gfp_mask); + size_t align); void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); #endif diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index eaaea37b3b75..833e676d6d92 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -254,6 +254,11 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); + +int calculate_pressure_threshold(struct zone *zone); +int calculate_normal_threshold(struct zone *zone); +void set_pgdat_percpu_threshold(pg_data_t *pgdat, + int (*calculate_pressure)(struct zone *)); #else /* CONFIG_SMP */ /* @@ -298,6 +303,8 @@ static inline void __dec_zone_page_state(struct page *page, #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state +#define set_pgdat_percpu_threshold(pgdat, callback) { } + static inline void refresh_cpu_vm_stats(int cpu) { } #endif diff --git a/include/linux/xz.h b/include/linux/xz.h new file mode 100644 index 000000000000..64cffa6ddfce --- /dev/null +++ b/include/linux/xz.h @@ -0,0 +1,264 @@ +/* + * XZ decompressor + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_H +#define XZ_H + +#ifdef __KERNEL__ +# include +# include +#else +# include +# include +#endif + +/* In Linux, this is used to make extern functions static when needed. */ +#ifndef XZ_EXTERN +# define XZ_EXTERN extern +#endif + +/** + * enum xz_mode - Operation mode + * + * @XZ_SINGLE: Single-call mode. This uses less RAM than + * than multi-call modes, because the LZMA2 + * dictionary doesn't need to be allocated as + * part of the decoder state. All required data + * structures are allocated at initialization, + * so xz_dec_run() cannot return XZ_MEM_ERROR. + * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2 + * dictionary buffer. All data structures are + * allocated at initialization, so xz_dec_run() + * cannot return XZ_MEM_ERROR. + * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is + * allocated once the required size has been + * parsed from the stream headers. If the + * allocation fails, xz_dec_run() will return + * XZ_MEM_ERROR. + * + * It is possible to enable support only for a subset of the above + * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, + * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled + * with support for all operation modes, but the preboot code may + * be built with fewer features to minimize code size. + */ +enum xz_mode { + XZ_SINGLE, + XZ_PREALLOC, + XZ_DYNALLOC +}; + +/** + * enum xz_ret - Return codes + * @XZ_OK: Everything is OK so far. More input or more + * output space is required to continue. This + * return code is possible only in multi-call mode + * (XZ_PREALLOC or XZ_DYNALLOC). + * @XZ_STREAM_END: Operation finished successfully. + * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding + * is still possible in multi-call mode by simply + * calling xz_dec_run() again. + * Note that this return value is used only if + * XZ_DEC_ANY_CHECK was defined at build time, + * which is not used in the kernel. Unsupported + * check types return XZ_OPTIONS_ERROR if + * XZ_DEC_ANY_CHECK was not defined at build time. + * @XZ_MEM_ERROR: Allocating memory failed. This return code is + * possible only if the decoder was initialized + * with XZ_DYNALLOC. The amount of memory that was + * tried to be allocated was no more than the + * dict_max argument given to xz_dec_init(). + * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than + * allowed by the dict_max argument given to + * xz_dec_init(). This return value is possible + * only in multi-call mode (XZ_PREALLOC or + * XZ_DYNALLOC); the single-call mode (XZ_SINGLE) + * ignores the dict_max argument. + * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic + * bytes). + * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested + * compression options. In the decoder this means + * that the header CRC32 matches, but the header + * itself specifies something that we don't support. + * @XZ_DATA_ERROR: Compressed data is corrupt. + * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly + * different between multi-call and single-call + * mode; more information below. + * + * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls + * to XZ code cannot consume any input and cannot produce any new output. + * This happens when there is no new input available, or the output buffer + * is full while at least one output byte is still pending. Assuming your + * code is not buggy, you can get this error only when decoding a compressed + * stream that is truncated or otherwise corrupt. + * + * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer + * is too small or the compressed input is corrupt in a way that makes the + * decoder produce more output than the caller expected. When it is + * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR + * is used instead of XZ_BUF_ERROR. + */ +enum xz_ret { + XZ_OK, + XZ_STREAM_END, + XZ_UNSUPPORTED_CHECK, + XZ_MEM_ERROR, + XZ_MEMLIMIT_ERROR, + XZ_FORMAT_ERROR, + XZ_OPTIONS_ERROR, + XZ_DATA_ERROR, + XZ_BUF_ERROR +}; + +/** + * struct xz_buf - Passing input and output buffers to XZ code + * @in: Beginning of the input buffer. This may be NULL if and only + * if in_pos is equal to in_size. + * @in_pos: Current position in the input buffer. This must not exceed + * in_size. + * @in_size: Size of the input buffer + * @out: Beginning of the output buffer. This may be NULL if and only + * if out_pos is equal to out_size. + * @out_pos: Current position in the output buffer. This must not exceed + * out_size. + * @out_size: Size of the output buffer + * + * Only the contents of the output buffer from out[out_pos] onward, and + * the variables in_pos and out_pos are modified by the XZ code. + */ +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +/** + * struct xz_dec - Opaque type to hold the XZ decoder state + */ +struct xz_dec; + +/** + * xz_dec_init() - Allocate and initialize a XZ decoder state + * @mode: Operation mode + * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for + * multi-call decoding. This is ignored in single-call mode + * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes + * or 2^n + 2^(n-1) bytes (the latter sizes are less common + * in practice), so other values for dict_max don't make sense. + * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB, + * 512 KiB, and 1 MiB are probably the only reasonable values, + * except for kernel and initramfs images where a bigger + * dictionary can be fine and useful. + * + * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at + * once. The caller must provide enough output space or the decoding will + * fail. The output space is used as the dictionary buffer, which is why + * there is no need to allocate the dictionary as part of the decoder's + * internal state. + * + * Because the output buffer is used as the workspace, streams encoded using + * a big dictionary are not a problem in single-call mode. It is enough that + * the output buffer is big enough to hold the actual uncompressed data; it + * can be smaller than the dictionary size stored in the stream headers. + * + * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes + * of memory is preallocated for the LZMA2 dictionary. This way there is no + * risk that xz_dec_run() could run out of memory, since xz_dec_run() will + * never allocate any memory. Instead, if the preallocated dictionary is too + * small for decoding the given input stream, xz_dec_run() will return + * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be + * decoded to avoid allocating excessive amount of memory for the dictionary. + * + * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC): + * dict_max specifies the maximum allowed dictionary size that xz_dec_run() + * may allocate once it has parsed the dictionary size from the stream + * headers. This way excessive allocations can be avoided while still + * limiting the maximum memory usage to a sane value to prevent running the + * system out of memory when decompressing streams from untrusted sources. + * + * On success, xz_dec_init() returns a pointer to struct xz_dec, which is + * ready to be used with xz_dec_run(). If memory allocation fails, + * xz_dec_init() returns NULL. + */ +XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); + +/** + * xz_dec_run() - Run the XZ decoder + * @s: Decoder state allocated using xz_dec_init() + * @b: Input and output buffers + * + * The possible return values depend on build options and operation mode. + * See enum xz_ret for details. + * + * Note that if an error occurs in single-call mode (return value is not + * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the + * contents of the output buffer from b->out[b->out_pos] onward are + * undefined. This is true even after XZ_BUF_ERROR, because with some filter + * chains, there may be a second pass over the output buffer, and this pass + * cannot be properly done if the output buffer is truncated. Thus, you + * cannot give the single-call decoder a too small buffer and then expect to + * get that amount valid data from the beginning of the stream. You must use + * the multi-call decoder if you don't want to uncompress the whole stream. + */ +XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); + +/** + * xz_dec_reset() - Reset an already allocated decoder state + * @s: Decoder state allocated using xz_dec_init() + * + * This function can be used to reset the multi-call decoder state without + * freeing and reallocating memory with xz_dec_end() and xz_dec_init(). + * + * In single-call mode, xz_dec_reset() is always called in the beginning of + * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in + * multi-call mode. + */ +XZ_EXTERN void xz_dec_reset(struct xz_dec *s); + +/** + * xz_dec_end() - Free the memory allocated for the decoder state + * @s: Decoder state allocated using xz_dec_init(). If s is NULL, + * this function does nothing. + */ +XZ_EXTERN void xz_dec_end(struct xz_dec *s); + +/* + * Standalone build (userspace build or in-kernel build for boot time use) + * needs a CRC32 implementation. For normal in-kernel use, kernel's own + * CRC32 module is used instead, and users of this module don't need to + * care about the functions below. + */ +#ifndef XZ_INTERNAL_CRC32 +# ifdef __KERNEL__ +# define XZ_INTERNAL_CRC32 0 +# else +# define XZ_INTERNAL_CRC32 1 +# endif +#endif + +#if XZ_INTERNAL_CRC32 +/* + * This must be called before any other xz_* function to initialize + * the CRC32 lookup table. + */ +XZ_EXTERN void xz_crc32_init(void); + +/* + * Update CRC32 value using the polynomial from IEEE-802.3. To start a new + * calculation, the third argument must be zero. To continue the calculation, + * the previously returned value is passed as the third argument. + */ +XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); +#endif +#endif diff --git a/include/media/davinci/vpss.h b/include/media/davinci/vpss.h index c59cc029c74a..b586495bcd53 100644 --- a/include/media/davinci/vpss.h +++ b/include/media/davinci/vpss.h @@ -44,7 +44,7 @@ struct vpss_pg_frame_size { short pplen; }; -/* Used for enable/diable VPSS Clock */ +/* Used for enable/disable VPSS Clock */ enum vpss_clock_sel { /* DM355/DM365 */ VPSS_CCDC_CLOCK, diff --git a/include/net/ah.h b/include/net/ah.h index be7798dea6f4..ca95b98969dd 100644 --- a/include/net/ah.h +++ b/include/net/ah.h @@ -4,7 +4,7 @@ #include /* This is the maximum truncated ICV length that we know of. */ -#define MAX_AH_AUTH_LEN 16 +#define MAX_AH_AUTH_LEN 64 struct crypto_ahash; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bcc9f448ec4e..1322695beb52 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1103,6 +1103,8 @@ struct cfg80211_pmksa { * @change_mpath: change a given mesh path * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx + * @join_mesh: join the mesh network with the specified parameters + * @leave_mesh: leave the current mesh network * * @get_mesh_config: Get the current mesh configuration * diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5b3fd5add7a4..62c0ce2d1dc8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -337,6 +337,10 @@ struct ieee80211_bss_conf { * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this * frame and selects the maximum number of streams that it can use. + * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on + * the off-channel channel when a remain-on-channel offload is done + * in hardware -- normal packets still flow and are expected to be + * handled properly by the device. * * Note: If you have to add new flags to the enumeration, then don't * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. @@ -1753,6 +1757,16 @@ enum ieee80211_ampdu_mlme_action { * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX). * * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant). + * + * @remain_on_channel: Starts an off-channel period on the given channel, must + * call back to ieee80211_ready_on_channel() when on that channel. Note + * that normal channel traffic is not stopped as this is intended for hw + * offload. Frames to transmit on the off-channel channel are transmitted + * normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the + * duration (which will always be non-zero) expires, the driver must call + * ieee80211_remain_on_channel_expired(). This callback may sleep. + * @cancel_remain_on_channel: Requests that an ongoing off-channel period is + * aborted before it expires. This callback may sleep. */ struct ieee80211_ops { int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index 1ee717eb5b09..a4c993685795 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -7,16 +7,6 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; -extern int nf_ct_frag6_init(void); -extern void nf_ct_frag6_cleanup(void); -extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); -extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, - struct net_device *in, - struct net_device *out, - int (*okfn)(struct sk_buff *)); - -struct inet_frags_ctl; - #include extern struct ctl_table nf_ct_ipv6_sysctl_table[]; diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h index 94dd54d76b48..fd79c9a1779d 100644 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h @@ -3,4 +3,14 @@ extern void nf_defrag_ipv6_enable(void); +extern int nf_ct_frag6_init(void); +extern void nf_ct_frag6_cleanup(void); +extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); +extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, + struct net_device *in, + struct net_device *out, + int (*okfn)(struct sk_buff *)); + +struct inet_frags_ctl; + #endif /* _NF_DEFRAG_IPV6_H */ diff --git a/include/net/red.h b/include/net/red.h index 995108e54d9f..3319f16b3beb 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -97,7 +97,6 @@ struct red_stats { u32 forced_mark; /* Forced marks, qavg > max_thresh */ u32 pdrop; /* Drops due to queue limits */ u32 other; /* Drops due to drop() calls */ - u32 backlog; }; struct red_parms { diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 2b2769c5ca9f..2a128c8c2718 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -99,8 +99,8 @@ typedef __s32 sctp_assoc_t; #define SCTP_SOCKOPT_PEELOFF 102 /* peel off association. */ /* Options 104-106 are deprecated and removed. Do not use this space */ #define SCTP_SOCKOPT_CONNECTX_OLD 107 /* CONNECTX old requests. */ -#define SCTP_GET_PEER_ADDRS 108 /* Get all peer addresss. */ -#define SCTP_GET_LOCAL_ADDRS 109 /* Get all local addresss. */ +#define SCTP_GET_PEER_ADDRS 108 /* Get all peer address. */ +#define SCTP_GET_LOCAL_ADDRS 109 /* Get all local address. */ #define SCTP_SOCKOPT_CONNECTX 110 /* CONNECTX requests. */ #define SCTP_SOCKOPT_CONNECTX3 111 /* CONNECTX requests (updated) */ diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h index 8e9b222251c2..8a143ca79878 100644 --- a/include/scsi/fc/fc_fcp.h +++ b/include/scsi/fc/fc_fcp.h @@ -46,7 +46,7 @@ */ struct fcp_cmnd { __u8 fc_lun[8]; /* logical unit number */ - __u8 fc_cmdref; /* commmand reference number */ + __u8 fc_cmdref; /* command reference number */ __u8 fc_pri_ta; /* priority and task attribute */ __u8 fc_tm_flags; /* task management flags */ __u8 fc_flags; /* additional len & flags */ @@ -58,7 +58,7 @@ struct fcp_cmnd { struct fcp_cmnd32 { __u8 fc_lun[8]; /* logical unit number */ - __u8 fc_cmdref; /* commmand reference number */ + __u8 fc_cmdref; /* command reference number */ __u8 fc_pri_ta; /* priority and task attribute */ __u8 fc_tm_flags; /* task management flags */ __u8 fc_flags; /* additional len & flags */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 1651fef18831..648d23358038 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -104,6 +104,7 @@ struct scsi_cmnd; #define UNMAP 0x42 #define READ_TOC 0x43 #define READ_HEADER 0x44 +#define GET_EVENT_STATUS_NOTIFICATION 0x4a #define LOG_SELECT 0x4c #define LOG_SENSE 0x4d #define XDWRITEREAD_10 0x53 diff --git a/include/sound/alc5623.h b/include/sound/alc5623.h new file mode 100644 index 000000000000..422c97d43df3 --- /dev/null +++ b/include/sound/alc5623.h @@ -0,0 +1,15 @@ +#ifndef _INCLUDE_SOUND_ALC5623_H +#define _INCLUDE_SOUND_ALC5623_H +struct alc5623_platform_data { + /* configure : */ + /* Lineout/Speaker Amps Vmid ratio control */ + /* enable/disable adc/dac high pass filters */ + unsigned int add_ctrl; + /* configure : */ + /* output to enable when jack is low */ + /* output to enable when jack is high */ + /* jack detect (gpio/nc/jack detect [12] */ + unsigned int jack_det_ctrl; +}; +#endif + diff --git a/include/sound/asound.h b/include/sound/asound.h index a1803ecea34d..5d6074faa279 100644 --- a/include/sound/asound.h +++ b/include/sound/asound.h @@ -259,6 +259,7 @@ typedef int __bitwise snd_pcm_subformat_t; #define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */ #define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */ #define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */ +#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */ #define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */ typedef int __bitwise snd_pcm_state_t; @@ -334,6 +335,8 @@ typedef int snd_pcm_hw_param_t; #define SNDRV_PCM_HW_PARAM_LAST_INTERVAL SNDRV_PCM_HW_PARAM_TICK_TIME #define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */ +#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */ +#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */ struct snd_interval { unsigned int min, max; diff --git a/include/sound/control.h b/include/sound/control.h index 112374dc0c58..7715e6f00d38 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -160,12 +160,14 @@ static inline struct snd_ctl_elem_id *snd_ctl_build_ioff(struct snd_ctl_elem_id } /* - * Frequently used control callbacks + * Frequently used control callbacks/helpers */ int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); +int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels, + unsigned int items, const char *const names[]); /* * virtual master control diff --git a/include/sound/hdsp.h b/include/sound/hdsp.h index d98a78dff2db..0909a3843479 100644 --- a/include/sound/hdsp.h +++ b/include/sound/hdsp.h @@ -28,6 +28,7 @@ enum HDSP_IO_Type { Multiface, H9652, H9632, + RPM, Undefined, }; diff --git a/include/sound/minors.h b/include/sound/minors.h index a81798ab73ed..8f764204a856 100644 --- a/include/sound/minors.h +++ b/include/sound/minors.h @@ -31,8 +31,8 @@ /* these minors can still be used for autoloading devices (/dev/aload*) */ #define SNDRV_MINOR_CONTROL 0 /* 0 */ #define SNDRV_MINOR_GLOBAL 1 /* 1 */ -#define SNDRV_MINOR_SEQUENCER (SNDRV_MINOR_GLOBAL + 0 * 32) -#define SNDRV_MINOR_TIMER (SNDRV_MINOR_GLOBAL + 1 * 32) +#define SNDRV_MINOR_SEQUENCER 1 /* SNDRV_MINOR_GLOBAL + 0 * 32 */ +#define SNDRV_MINOR_TIMER 33 /* SNDRV_MINOR_GLOBAL + 1 * 32 */ #ifndef CONFIG_SND_DYNAMIC_MINORS /* 2 - 3 (reserved) */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index dfd9b76b1853..e731f8d71934 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -297,6 +297,7 @@ struct snd_pcm_runtime { unsigned int info; unsigned int rate_num; unsigned int rate_den; + unsigned int no_period_wakeup: 1; /* -- SW params -- */ int tstamp_mode; /* mmap timestamp is updated */ diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index e7b680248006..1bafe95dcf41 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -16,8 +16,6 @@ #include -#include - struct snd_pcm_substream; /* @@ -205,7 +203,7 @@ struct snd_soc_dai_driver { int (*resume)(struct snd_soc_dai *dai); /* ops */ - struct snd_soc_dai_ops *ops; + const struct snd_soc_dai_ops *ops; /* DAI capabilities */ struct snd_soc_pcm_stream capture; diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 8fd3b41b763f..8031769ac485 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -16,7 +16,6 @@ #include #include #include -#include /* widget has no PM register bit */ #define SND_SOC_NOPM -1 @@ -72,6 +71,10 @@ wcontrols, wncontrols) \ { .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \ .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols} +#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\ + wcontrols, wncontrols) \ +{ .id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \ + .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols} #define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \ wcontrols, wncontrols)\ { .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \ @@ -90,6 +93,9 @@ #define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \ { .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \ .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1} +#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \ +{ .id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \ + .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1} #define SND_SOC_DAPM_VALUE_MUX(wname, wreg, wshift, winvert, wcontrols) \ { .id = snd_soc_dapm_value_mux, .name = wname, .reg = wreg, \ .shift = wshift, .invert = winvert, .kcontrols = wcontrols, \ @@ -116,6 +122,11 @@ { .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \ .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \ .event = wevent, .event_flags = wflags} +#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \ + wncontrols, wevent, wflags) \ +{ .id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \ + .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \ + .event = wevent, .event_flags = wflags} #define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \ wncontrols, wevent, wflags) \ { .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \ @@ -140,6 +151,11 @@ { .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \ .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \ .event = wevent, .event_flags = wflags} +#define SND_SOC_DAPM_VIRT_MUX_E(wname, wreg, wshift, winvert, wcontrols, \ + wevent, wflags) \ +{ .id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \ + .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \ + .event = wevent, .event_flags = wflags} /* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */ #define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \ @@ -219,13 +235,6 @@ .info = snd_soc_info_volsw, \ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) } -#define SOC_DAPM_DOUBLE(xname, reg, shift_left, shift_right, max, invert, \ - power) \ -{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ - .info = snd_soc_info_volsw, \ - .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ - .private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\ - ((max) << 16) | ((invert) << 24) } #define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_soc_info_volsw, \ @@ -233,15 +242,6 @@ .tlv.p = (tlv_array), \ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) } -#define SOC_DAPM_DOUBLE_TLV(xname, reg, shift_left, shift_right, max, invert, \ - power, tlv_array) \ -{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ - .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\ - .tlv.p = (tlv_array), \ - .info = snd_soc_info_volsw, \ - .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ - .private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\ - ((max) << 16) | ((invert) << 24) } #define SOC_DAPM_ENUM(xname, xenum) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_soc_info_enum_double, \ @@ -297,6 +297,7 @@ enum snd_soc_dapm_type; struct snd_soc_dapm_path; struct snd_soc_dapm_pin; struct snd_soc_dapm_route; +struct snd_soc_dapm_context; int dapm_reg_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event); @@ -324,16 +325,16 @@ int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uncontrol); int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uncontrol); -int snd_soc_dapm_new_control(struct snd_soc_codec *codec, +int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget); -int snd_soc_dapm_new_controls(struct snd_soc_codec *codec, +int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget, int num); /* dapm path setup */ -int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec); -void snd_soc_dapm_free(struct snd_soc_codec *codec); -int snd_soc_dapm_add_routes(struct snd_soc_codec *codec, +int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm); +void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm); +int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num); /* dapm events */ @@ -343,27 +344,33 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card); /* dapm sys fs - used by the core */ int snd_soc_dapm_sys_add(struct device *dev); -void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec); +void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm); /* dapm audio pin control and status */ -int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin); -int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin); -int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin); -int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin); -int snd_soc_dapm_sync(struct snd_soc_codec *codec); -int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec, +int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, + const char *pin); +int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, + const char *pin); +int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin); +int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, + const char *pin); +int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm); +int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin); -int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin); +int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, + const char *pin); /* dapm widget types */ enum snd_soc_dapm_type { snd_soc_dapm_input = 0, /* input pin */ snd_soc_dapm_output, /* output pin */ snd_soc_dapm_mux, /* selects 1 analog signal from many inputs */ + snd_soc_dapm_virt_mux, /* virtual version of snd_soc_dapm_mux */ snd_soc_dapm_value_mux, /* selects 1 analog signal from many inputs */ snd_soc_dapm_mixer, /* mixes several analog signals together */ snd_soc_dapm_mixer_named_ctl, /* mixer with named controls */ snd_soc_dapm_pga, /* programmable gain/attenuation (volume) */ + snd_soc_dapm_out_drv, /* output driver */ snd_soc_dapm_adc, /* analog to digital converter */ snd_soc_dapm_dac, /* digital to analog converter */ snd_soc_dapm_micbias, /* microphone bias (power) */ @@ -425,6 +432,7 @@ struct snd_soc_dapm_widget { char *sname; /* stream name */ struct snd_soc_codec *codec; struct list_head list; + struct snd_soc_dapm_context *dapm; /* dapm control */ short reg; /* negative reg = no direct dapm */ @@ -461,4 +469,35 @@ struct snd_soc_dapm_widget { struct list_head power_list; }; +struct snd_soc_dapm_update { + struct snd_soc_dapm_widget *widget; + struct snd_kcontrol *kcontrol; + int reg; + int mask; + int val; +}; + +/* DAPM context */ +struct snd_soc_dapm_context { + int n_widgets; /* number of widgets in this context */ + enum snd_soc_bias_level bias_level; + enum snd_soc_bias_level suspend_bias_level; + struct delayed_work delayed_work; + unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */ + + struct snd_soc_dapm_update *update; + + struct device *dev; /* from parent - for debug */ + struct snd_soc_codec *codec; /* parent codec */ + struct snd_soc_card *card; /* parent card */ + + /* used during DAPM updates */ + int dev_power; + struct list_head list; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_dapm; +#endif +}; + #endif diff --git a/include/sound/soc.h b/include/sound/soc.h index 5c3bce83f28a..74921f20a1d8 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -222,10 +222,8 @@ enum snd_soc_bias_level { struct snd_jack; struct snd_soc_card; -struct snd_soc_device; struct snd_soc_pcm_stream; struct snd_soc_ops; -struct snd_soc_dai_mode; struct snd_soc_pcm_runtime; struct snd_soc_dai; struct snd_soc_dai_driver; @@ -235,9 +233,10 @@ struct snd_soc_platform_driver; struct snd_soc_codec; struct snd_soc_codec_driver; struct soc_enum; -struct snd_soc_ac97_ops; struct snd_soc_jack; struct snd_soc_jack_pin; +struct snd_soc_cache_ops; +#include #ifdef CONFIG_GPIOLIB struct snd_soc_jack_gpio; @@ -253,17 +252,30 @@ enum snd_soc_control_type { SND_SOC_SPI, }; +enum snd_soc_compress_type { + SND_SOC_FLAT_COMPRESSION = 1, + SND_SOC_LZO_COMPRESSION, + SND_SOC_RBTREE_COMPRESSION +}; + int snd_soc_register_platform(struct device *dev, struct snd_soc_platform_driver *platform_drv); void snd_soc_unregister_platform(struct device *dev); int snd_soc_register_codec(struct device *dev, - struct snd_soc_codec_driver *codec_drv, + const struct snd_soc_codec_driver *codec_drv, struct snd_soc_dai_driver *dai_drv, int num_dai); void snd_soc_unregister_codec(struct device *dev); int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg); int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, int addr_bits, int data_bits, enum snd_soc_control_type control); +int snd_soc_cache_sync(struct snd_soc_codec *codec); +int snd_soc_cache_init(struct snd_soc_codec *codec); +int snd_soc_cache_exit(struct snd_soc_codec *codec); +int snd_soc_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value); +int snd_soc_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value); /* Utility functions to get clock rates from various things */ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots); @@ -420,23 +432,37 @@ struct snd_soc_ops { int (*trigger)(struct snd_pcm_substream *, int); }; +/* SoC cache ops */ +struct snd_soc_cache_ops { + const char *name; + enum snd_soc_compress_type id; + int (*init)(struct snd_soc_codec *codec); + int (*exit)(struct snd_soc_codec *codec); + int (*read)(struct snd_soc_codec *codec, unsigned int reg, + unsigned int *value); + int (*write)(struct snd_soc_codec *codec, unsigned int reg, + unsigned int value); + int (*sync)(struct snd_soc_codec *codec); +}; + /* SoC Audio Codec device */ struct snd_soc_codec { const char *name; + const char *name_prefix; int id; struct device *dev; - struct snd_soc_codec_driver *driver; + const struct snd_soc_codec_driver *driver; struct mutex mutex; struct snd_soc_card *card; struct list_head list; struct list_head card_list; int num_dai; + enum snd_soc_compress_type compress_type; /* runtime */ struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */ unsigned int active; - unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */ unsigned int cache_only:1; /* Suppress writes to hardware */ unsigned int cache_sync:1; /* Cache needs to be synced to hardware */ unsigned int suspended:1; /* Codec is in suspend PM state */ @@ -444,25 +470,25 @@ struct snd_soc_codec { unsigned int ac97_registered:1; /* Codec has been AC97 registered */ unsigned int ac97_created:1; /* Codec has been created by SoC */ unsigned int sysfs_registered:1; /* codec has been sysfs registered */ + unsigned int cache_init:1; /* codec cache has been initialized */ /* codec IO */ void *control_data; /* codec control (i2c/3wire) data */ hw_write_t hw_write; unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int); + unsigned int (*read)(struct snd_soc_codec *, unsigned int); + int (*write)(struct snd_soc_codec *, unsigned int, unsigned int); void *reg_cache; + const void *reg_def_copy; + const struct snd_soc_cache_ops *cache_ops; + struct mutex cache_rw_mutex; /* dapm */ - u32 pop_time; - struct list_head dapm_widgets; - struct list_head dapm_paths; - enum snd_soc_bias_level bias_level; - enum snd_soc_bias_level suspend_bias_level; - struct delayed_work delayed_work; + struct snd_soc_dapm_context dapm; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_codec_root; struct dentry *debugfs_reg; - struct dentry *debugfs_pop_time; struct dentry *debugfs_dapm; #endif }; @@ -488,6 +514,7 @@ struct snd_soc_codec_driver { short reg_cache_step; short reg_word_size; const void *reg_cache_default; + enum snd_soc_compress_type compress_type; /* codec bias level */ int (*set_bias_level)(struct snd_soc_codec *, @@ -554,6 +581,30 @@ struct snd_soc_dai_link { struct snd_soc_ops *ops; }; +struct snd_soc_codec_conf { + const char *dev_name; + + /* + * optional map of kcontrol, widget and path name prefixes that are + * associated per device + */ + const char *name_prefix; + + /* + * set this to the desired compression type if you want to + * override the one supplied in codec->driver->compress_type + */ + enum snd_soc_compress_type compress_type; +}; + +struct snd_soc_aux_dev { + const char *name; /* Codec name */ + const char *codec_name; /* for multi-codec */ + + /* codec/machine specific init - e.g. add machine controls */ + int (*init)(struct snd_soc_dapm_context *dapm); +}; + /* SoC card */ struct snd_soc_card { const char *name; @@ -579,6 +630,8 @@ struct snd_soc_card { /* callbacks */ int (*set_bias_level)(struct snd_soc_card *, enum snd_soc_bias_level level); + int (*set_bias_level_post)(struct snd_soc_card *, + enum snd_soc_bias_level level); long pmdown_time; @@ -588,12 +641,35 @@ struct snd_soc_card { struct snd_soc_pcm_runtime *rtd; int num_rtd; + /* optional codec specific configuration */ + struct snd_soc_codec_conf *codec_conf; + int num_configs; + + /* + * optional auxiliary devices such as amplifiers or codecs with DAI + * link unused + */ + struct snd_soc_aux_dev *aux_dev; + int num_aux_devs; + struct snd_soc_pcm_runtime *rtd_aux; + int num_aux_rtd; + struct work_struct deferred_resume_work; /* lists of probed devices belonging to this card */ struct list_head codec_dev_list; struct list_head platform_dev_list; struct list_head dai_dev_list; + + struct list_head widgets; + struct list_head paths; + struct list_head dapm_list; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_card_root; + struct dentry *debugfs_pop_time; +#endif + u32 pop_time; }; /* SoC machine DAI configuration, glues a codec and cpu DAI together */ @@ -639,17 +715,9 @@ struct soc_enum { }; /* codec IO */ -static inline unsigned int snd_soc_read(struct snd_soc_codec *codec, - unsigned int reg) -{ - return codec->driver->read(codec, reg); -} - -static inline unsigned int snd_soc_write(struct snd_soc_codec *codec, - unsigned int reg, unsigned int val) -{ - return codec->driver->write(codec, reg, val); -} +unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg); +unsigned int snd_soc_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int val); /* device driver data */ diff --git a/include/target/configfs_macros.h b/include/target/configfs_macros.h new file mode 100644 index 000000000000..7fe74608b437 --- /dev/null +++ b/include/target/configfs_macros.h @@ -0,0 +1,147 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * configfs_macros.h - extends macros for configfs + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * Based on sysfs: + * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel + * + * Based on kobject.h: + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * + * configfs Copyright (C) 2005 Oracle. All rights reserved. + * + * Added CONFIGFS_EATTR() macros from original configfs.h macros + * Copright (C) 2008-2009 Nicholas A. Bellinger + * + * Please read Documentation/filesystems/configfs.txt before using the + * configfs interface, ESPECIALLY the parts about reference counts and + * item destructors. + */ + +#ifndef _CONFIGFS_MACROS_H_ +#define _CONFIGFS_MACROS_H_ + +#include + +/* + * Users often need to create attribute structures for their configurable + * attributes, containing a configfs_attribute member and function pointers + * for the show() and store() operations on that attribute. If they don't + * need anything else on the extended attribute structure, they can use + * this macro to define it. The argument _name isends up as + * 'struct _name_attribute, as well as names of to CONFIGFS_ATTR_OPS() below. + * The argument _item is the name of the structure containing the + * struct config_item or struct config_group structure members + */ +#define CONFIGFS_EATTR_STRUCT(_name, _item) \ +struct _name##_attribute { \ + struct configfs_attribute attr; \ + ssize_t (*show)(struct _item *, char *); \ + ssize_t (*store)(struct _item *, const char *, size_t); \ +} + +/* + * With the extended attribute structure, users can use this macro + * (similar to sysfs' __ATTR) to make defining attributes easier. + * An example: + * #define MYITEM_EATTR(_name, _mode, _show, _store) \ + * struct myitem_attribute childless_attr_##_name = \ + * __CONFIGFS_EATTR(_name, _mode, _show, _store) + */ +#define __CONFIGFS_EATTR(_name, _mode, _show, _store) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = _mode, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ + .store = _store, \ +} +/* Here is a readonly version, only requiring a show() operation */ +#define __CONFIGFS_EATTR_RO(_name, _show) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = 0444, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ +} + +/* + * With these extended attributes, the simple show_attribute() and + * store_attribute() operations need to call the show() and store() of the + * attributes. This is a common pattern, so we provide a macro to define + * them. The argument _name is the name of the attribute defined by + * CONFIGFS_ATTR_STRUCT(). The argument _item is the name of the structure + * containing the struct config_item or struct config_group structure member. + * The argument _item_member is the actual name of the struct config_* struct + * in your _item structure. Meaning my_structure->some_config_group. + * ^^_item^^^^^ ^^_item_member^^^ + * This macro expects the attributes to be named "struct _attribute". + */ +#define CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member) \ +static struct _item *to_##_name(struct config_item *ci) \ +{ \ + return (ci) ? container_of(to_config_group(ci), struct _item, \ + _item_member) : NULL; \ +} + +#define CONFIGFS_EATTR_OPS_SHOW(_name, _item) \ +static ssize_t _name##_attr_show(struct config_item *item, \ + struct configfs_attribute *attr, \ + char *page) \ +{ \ + struct _item *_item = to_##_name(item); \ + struct _name##_attribute * _name##_attr = \ + container_of(attr, struct _name##_attribute, attr); \ + ssize_t ret = 0; \ + \ + if (_name##_attr->show) \ + ret = _name##_attr->show(_item, page); \ + return ret; \ +} + +#define CONFIGFS_EATTR_OPS_STORE(_name, _item) \ +static ssize_t _name##_attr_store(struct config_item *item, \ + struct configfs_attribute *attr, \ + const char *page, size_t count) \ +{ \ + struct _item *_item = to_##_name(item); \ + struct _name##_attribute * _name##_attr = \ + container_of(attr, struct _name##_attribute, attr); \ + ssize_t ret = -EINVAL; \ + \ + if (_name##_attr->store) \ + ret = _name##_attr->store(_item, page, count); \ + return ret; \ +} + +#define CONFIGFS_EATTR_OPS(_name, _item, _item_member) \ + CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \ + CONFIGFS_EATTR_OPS_SHOW(_name, _item); \ + CONFIGFS_EATTR_OPS_STORE(_name, _item); + +#define CONFIGFS_EATTR_OPS_RO(_name, _item, _item_member) \ + CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \ + CONFIGFS_EATTR_OPS_SHOW(_name, _item); + +#endif /* _CONFIGFS_MACROS_H_ */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h new file mode 100644 index 000000000000..07fdfb6b9a9a --- /dev/null +++ b/include/target/target_core_base.h @@ -0,0 +1,937 @@ +#ifndef TARGET_CORE_BASE_H +#define TARGET_CORE_BASE_H + +#include +#include +#include +#include +#include +#include +#include +#include "target_core_mib.h" + +#define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" +#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) + +/* Used by transport_generic_allocate_iovecs() */ +#define TRANSPORT_IOV_DATA_BUFFER 5 +/* Maximum Number of LUNs per Target Portal Group */ +#define TRANSPORT_MAX_LUNS_PER_TPG 256 +/* + * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. + * + * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and + * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use + * 16-byte CDBs by default and require an extra allocation for + * 32-byte CDBs to becasue of legacy issues. + * + * Within TCM Core there are no such legacy limitiations, so we go ahead + * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size() + * within all TCM Core and subsystem plugin code. + */ +#define TCM_MAX_COMMAND_SIZE 32 +/* + * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently + * defined 96, but the real limit is 252 (or 260 including the header) + */ +#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE +/* Used by transport_send_check_condition_and_sense() */ +#define SPC_SENSE_KEY_OFFSET 2 +#define SPC_ASC_KEY_OFFSET 12 +#define SPC_ASCQ_KEY_OFFSET 13 +#define TRANSPORT_IQN_LEN 224 +/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ +#define LU_GROUP_NAME_BUF 256 +/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ +#define TG_PT_GROUP_NAME_BUF 256 +/* Used to parse VPD into struct t10_vpd */ +#define VPD_TMP_BUF_SIZE 128 +/* Used by transport_generic_cmd_sequencer() */ +#define READ_BLOCK_LEN 6 +#define READ_CAP_LEN 8 +#define READ_POSITION_LEN 20 +#define INQUIRY_LEN 36 +/* Used by transport_get_inquiry_vpd_serial() */ +#define INQUIRY_VPD_SERIAL_LEN 254 +/* Used by transport_get_inquiry_vpd_device_ident() */ +#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 + +/* struct se_hba->hba_flags */ +enum hba_flags_table { + HBA_FLAGS_INTERNAL_USE = 0x01, + HBA_FLAGS_PSCSI_MODE = 0x02, +}; + +/* struct se_lun->lun_status */ +enum transport_lun_status_table { + TRANSPORT_LUN_STATUS_FREE = 0, + TRANSPORT_LUN_STATUS_ACTIVE = 1, +}; + +/* struct se_portal_group->se_tpg_type */ +enum transport_tpg_type_table { + TRANSPORT_TPG_TYPE_NORMAL = 0, + TRANSPORT_TPG_TYPE_DISCOVERY = 1, +}; + +/* Used for generate timer flags */ +enum timer_flags_table { + TF_RUNNING = 0x01, + TF_STOP = 0x02, +}; + +/* Special transport agnostic struct se_cmd->t_states */ +enum transport_state_table { + TRANSPORT_NO_STATE = 0, + TRANSPORT_NEW_CMD = 1, + TRANSPORT_DEFERRED_CMD = 2, + TRANSPORT_WRITE_PENDING = 3, + TRANSPORT_PROCESS_WRITE = 4, + TRANSPORT_PROCESSING = 5, + TRANSPORT_COMPLETE_OK = 6, + TRANSPORT_COMPLETE_FAILURE = 7, + TRANSPORT_COMPLETE_TIMEOUT = 8, + TRANSPORT_PROCESS_TMR = 9, + TRANSPORT_TMR_COMPLETE = 10, + TRANSPORT_ISTATE_PROCESSING = 11, + TRANSPORT_ISTATE_PROCESSED = 12, + TRANSPORT_KILL = 13, + TRANSPORT_REMOVE = 14, + TRANSPORT_FREE = 15, + TRANSPORT_NEW_CMD_MAP = 16, +}; + +/* Used for struct se_cmd->se_cmd_flags */ +enum se_cmd_flags_table { + SCF_SUPPORTED_SAM_OPCODE = 0x00000001, + SCF_TRANSPORT_TASK_SENSE = 0x00000002, + SCF_EMULATED_TASK_SENSE = 0x00000004, + SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, + SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, + SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, + SCF_SCSI_NON_DATA_CDB = 0x00000040, + SCF_SCSI_CDB_EXCEPTION = 0x00000080, + SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, + SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, + SCF_SE_CMD_FAILED = 0x00000400, + SCF_SE_LUN_CMD = 0x00000800, + SCF_SE_ALLOW_EOO = 0x00001000, + SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, + SCF_SENT_CHECK_CONDITION = 0x00004000, + SCF_OVERFLOW_BIT = 0x00008000, + SCF_UNDERFLOW_BIT = 0x00010000, + SCF_SENT_DELAYED_TAS = 0x00020000, + SCF_ALUA_NON_OPTIMIZED = 0x00040000, + SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, + SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, + SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, + SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, + SCF_EMULATE_SYNC_CACHE = 0x00800000, + SCF_EMULATE_CDB_ASYNC = 0x01000000, + SCF_EMULATE_SYNC_UNMAP = 0x02000000 +}; + +/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ +enum transport_lunflags_table { + TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, + TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, + TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, + TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, +}; + +/* struct se_device->dev_status */ +enum transport_device_status_table { + TRANSPORT_DEVICE_ACTIVATED = 0x01, + TRANSPORT_DEVICE_DEACTIVATED = 0x02, + TRANSPORT_DEVICE_QUEUE_FULL = 0x04, + TRANSPORT_DEVICE_SHUTDOWN = 0x08, + TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, + TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, +}; + +/* + * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason + * to signal which ASC/ASCQ sense payload should be built. + */ +enum tcm_sense_reason_table { + TCM_NON_EXISTENT_LUN = 0x01, + TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, + TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, + TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, + TCM_SERVICE_CRC_ERROR = 0x05, + TCM_SNACK_REJECTED = 0x06, + TCM_SECTOR_COUNT_TOO_MANY = 0x07, + TCM_INVALID_CDB_FIELD = 0x08, + TCM_INVALID_PARAMETER_LIST = 0x09, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, + TCM_UNKNOWN_MODE_PAGE = 0x0b, + TCM_WRITE_PROTECTED = 0x0c, + TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, + TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, + TCM_CHECK_CONDITION_NOT_READY = 0x0f, +}; + +struct se_obj { + atomic_t obj_access_count; +} ____cacheline_aligned; + +/* + * Used by TCM Core internally to signal if ALUA emulation is enabled or + * disabled, or running in with TCM/pSCSI passthrough mode + */ +typedef enum { + SPC_ALUA_PASSTHROUGH, + SPC2_ALUA_DISABLED, + SPC3_ALUA_EMULATED +} t10_alua_index_t; + +/* + * Used by TCM Core internally to signal if SAM Task Attribute emulation + * is enabled or disabled, or running in with TCM/pSCSI passthrough mode + */ +typedef enum { + SAM_TASK_ATTR_PASSTHROUGH, + SAM_TASK_ATTR_UNTAGGED, + SAM_TASK_ATTR_EMULATED +} t10_task_attr_index_t; + +struct se_cmd; + +struct t10_alua { + t10_alua_index_t alua_type; + /* ALUA Target Port Group ID */ + u16 alua_tg_pt_gps_counter; + u32 alua_tg_pt_gps_count; + spinlock_t tg_pt_gps_lock; + struct se_subsystem_dev *t10_sub_dev; + /* Used for default ALUA Target Port Group */ + struct t10_alua_tg_pt_gp *default_tg_pt_gp; + /* Used for default ALUA Target Port Group ConfigFS group */ + struct config_group alua_tg_pt_gps_group; + int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); + struct list_head tg_pt_gps_list; +} ____cacheline_aligned; + +struct t10_alua_lu_gp { + u16 lu_gp_id; + int lu_gp_valid_id; + u32 lu_gp_members; + atomic_t lu_gp_shutdown; + atomic_t lu_gp_ref_cnt; + spinlock_t lu_gp_lock; + struct config_group lu_gp_group; + struct list_head lu_gp_list; + struct list_head lu_gp_mem_list; +} ____cacheline_aligned; + +struct t10_alua_lu_gp_member { + int lu_gp_assoc:1; + atomic_t lu_gp_mem_ref_cnt; + spinlock_t lu_gp_mem_lock; + struct t10_alua_lu_gp *lu_gp; + struct se_device *lu_gp_mem_dev; + struct list_head lu_gp_mem_list; +} ____cacheline_aligned; + +struct t10_alua_tg_pt_gp { + u16 tg_pt_gp_id; + int tg_pt_gp_valid_id; + int tg_pt_gp_alua_access_status; + int tg_pt_gp_alua_access_type; + int tg_pt_gp_nonop_delay_msecs; + int tg_pt_gp_trans_delay_msecs; + int tg_pt_gp_pref; + int tg_pt_gp_write_metadata; + /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ +#define ALUA_MD_BUF_LEN 1024 + u32 tg_pt_gp_md_buf_len; + u32 tg_pt_gp_members; + atomic_t tg_pt_gp_alua_access_state; + atomic_t tg_pt_gp_ref_cnt; + spinlock_t tg_pt_gp_lock; + struct mutex tg_pt_gp_md_mutex; + struct se_subsystem_dev *tg_pt_gp_su_dev; + struct config_group tg_pt_gp_group; + struct list_head tg_pt_gp_list; + struct list_head tg_pt_gp_mem_list; +} ____cacheline_aligned; + +struct t10_alua_tg_pt_gp_member { + int tg_pt_gp_assoc:1; + atomic_t tg_pt_gp_mem_ref_cnt; + spinlock_t tg_pt_gp_mem_lock; + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct se_port *tg_pt; + struct list_head tg_pt_gp_mem_list; +} ____cacheline_aligned; + +struct t10_vpd { + unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; + int protocol_identifier_set; + u32 protocol_identifier; + u32 device_identifier_code_set; + u32 association; + u32 device_identifier_type; + struct list_head vpd_list; +} ____cacheline_aligned; + +struct t10_wwn { + unsigned char vendor[8]; + unsigned char model[16]; + unsigned char revision[4]; + unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; + spinlock_t t10_vpd_lock; + struct se_subsystem_dev *t10_sub_dev; + struct config_group t10_wwn_group; + struct list_head t10_vpd_list; +} ____cacheline_aligned; + + +/* + * Used by TCM Core internally to signal if >= SPC-3 peristent reservations + * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough + * mode + */ +typedef enum { + SPC_PASSTHROUGH, + SPC2_RESERVATIONS, + SPC3_PERSISTENT_RESERVATIONS +} t10_reservations_index_t; + +struct t10_pr_registration { + /* Used for fabrics that contain WWN+ISID */ +#define PR_REG_ISID_LEN 16 + /* PR_REG_ISID_LEN + ',i,0x' */ +#define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) + char pr_reg_isid[PR_REG_ISID_LEN]; + /* Used during APTPL metadata reading */ +#define PR_APTPL_MAX_IPORT_LEN 256 + unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; + /* Used during APTPL metadata reading */ +#define PR_APTPL_MAX_TPORT_LEN 256 + unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; + /* For writing out live meta data */ + unsigned char *pr_aptpl_buf; + u16 pr_aptpl_rpti; + u16 pr_reg_tpgt; + /* Reservation effects all target ports */ + int pr_reg_all_tg_pt; + /* Activate Persistence across Target Power Loss */ + int pr_reg_aptpl; + int pr_res_holder; + int pr_res_type; + int pr_res_scope; + /* Used for fabric initiator WWPNs using a ISID */ + int isid_present_at_reg:1; + u32 pr_res_mapped_lun; + u32 pr_aptpl_target_lun; + u32 pr_res_generation; + u64 pr_reg_bin_isid; + u64 pr_res_key; + atomic_t pr_res_holders; + struct se_node_acl *pr_reg_nacl; + struct se_dev_entry *pr_reg_deve; + struct se_lun *pr_reg_tg_pt_lun; + struct list_head pr_reg_list; + struct list_head pr_reg_abort_list; + struct list_head pr_reg_aptpl_list; + struct list_head pr_reg_atp_list; + struct list_head pr_reg_atp_mem_list; +} ____cacheline_aligned; + +/* + * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, + * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c: + * core_setup_reservations() + */ +struct t10_reservation_ops { + int (*t10_reservation_check)(struct se_cmd *, u32 *); + int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); + int (*t10_pr_register)(struct se_cmd *); + int (*t10_pr_clear)(struct se_cmd *); +}; + +struct t10_reservation_template { + /* Reservation effects all target ports */ + int pr_all_tg_pt; + /* Activate Persistence across Target Power Loss enabled + * for SCSI device */ + int pr_aptpl_active; + /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ +#define PR_APTPL_BUF_LEN 8192 + u32 pr_aptpl_buf_len; + u32 pr_generation; + t10_reservations_index_t res_type; + spinlock_t registration_lock; + spinlock_t aptpl_reg_lock; + /* + * This will always be set by one individual I_T Nexus. + * However with all_tg_pt=1, other I_T Nexus from the + * same initiator can access PR reg/res info on a different + * target port. + * + * There is also the 'All Registrants' case, where there is + * a single *pr_res_holder of the reservation, but all + * registrations are considered reservation holders. + */ + struct se_node_acl *pr_res_holder; + struct list_head registration_list; + struct list_head aptpl_reg_list; + struct t10_reservation_ops pr_ops; +} ____cacheline_aligned; + +struct se_queue_req { + int state; + void *cmd; + struct list_head qr_list; +} ____cacheline_aligned; + +struct se_queue_obj { + atomic_t queue_cnt; + spinlock_t cmd_queue_lock; + struct list_head qobj_list; + wait_queue_head_t thread_wq; +} ____cacheline_aligned; + +/* + * Used one per struct se_cmd to hold all extra struct se_task + * metadata. This structure is setup and allocated in + * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() + */ +struct se_transport_task { + unsigned char *t_task_cdb; + unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; + unsigned long long t_task_lba; + int t_tasks_failed; + int t_tasks_fua; + int t_tasks_bidi:1; + u32 t_task_cdbs; + u32 t_tasks_check; + u32 t_tasks_no; + u32 t_tasks_sectors; + u32 t_tasks_se_num; + u32 t_tasks_se_bidi_num; + u32 t_tasks_sg_chained_no; + atomic_t t_fe_count; + atomic_t t_se_count; + atomic_t t_task_cdbs_left; + atomic_t t_task_cdbs_ex_left; + atomic_t t_task_cdbs_timeout_left; + atomic_t t_task_cdbs_sent; + atomic_t t_transport_aborted; + atomic_t t_transport_active; + atomic_t t_transport_complete; + atomic_t t_transport_queue_active; + atomic_t t_transport_sent; + atomic_t t_transport_stop; + atomic_t t_transport_timeout; + atomic_t transport_dev_active; + atomic_t transport_lun_active; + atomic_t transport_lun_fe_stop; + atomic_t transport_lun_stop; + spinlock_t t_state_lock; + struct completion t_transport_stop_comp; + struct completion transport_lun_fe_stop_comp; + struct completion transport_lun_stop_comp; + struct scatterlist *t_tasks_sg_chained; + struct scatterlist t_tasks_sg_bounce; + void *t_task_buf; + /* + * Used for pre-registered fabric SGL passthrough WRITE and READ + * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop + * and other HW target mode fabric modules. + */ + struct scatterlist *t_task_pt_sgl; + struct list_head *t_mem_list; + /* Used for BIDI READ */ + struct list_head *t_mem_bidi_list; + struct list_head t_task_list; +} ____cacheline_aligned; + +struct se_task { + unsigned char task_sense; + struct scatterlist *task_sg; + struct scatterlist *task_sg_bidi; + u8 task_scsi_status; + u8 task_flags; + int task_error_status; + int task_state_flags; + int task_padded_sg:1; + unsigned long long task_lba; + u32 task_no; + u32 task_sectors; + u32 task_size; + u32 task_sg_num; + u32 task_sg_offset; + enum dma_data_direction task_data_direction; + struct se_cmd *task_se_cmd; + struct se_device *se_dev; + struct completion task_stop_comp; + atomic_t task_active; + atomic_t task_execute_queue; + atomic_t task_timeout; + atomic_t task_sent; + atomic_t task_stop; + atomic_t task_state_active; + struct timer_list task_timer; + struct se_device *se_obj_ptr; + struct list_head t_list; + struct list_head t_execute_list; + struct list_head t_state_list; +} ____cacheline_aligned; + +#define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd) +#define TASK_DEV(task) ((struct se_device *)task->se_dev) + +struct se_cmd { + /* SAM response code being sent to initiator */ + u8 scsi_status; + u8 scsi_asc; + u8 scsi_ascq; + u8 scsi_sense_reason; + u16 scsi_sense_length; + /* Delay for ALUA Active/NonOptimized state access in milliseconds */ + int alua_nonop_delay; + /* See include/linux/dma-mapping.h */ + enum dma_data_direction data_direction; + /* For SAM Task Attribute */ + int sam_task_attr; + /* Transport protocol dependent state, see transport_state_table */ + enum transport_state_table t_state; + /* Transport protocol dependent state for out of order CmdSNs */ + int deferred_t_state; + /* Transport specific error status */ + int transport_error_status; + /* See se_cmd_flags_table */ + u32 se_cmd_flags; + u32 se_ordered_id; + /* Total size in bytes associated with command */ + u32 data_length; + /* SCSI Presented Data Transfer Length */ + u32 cmd_spdtl; + u32 residual_count; + u32 orig_fe_lun; + /* Persistent Reservation key */ + u64 pr_res_key; + atomic_t transport_sent; + /* Used for sense data */ + void *sense_buffer; + struct list_head se_delayed_list; + struct list_head se_ordered_list; + struct list_head se_lun_list; + struct se_device *se_dev; + struct se_dev_entry *se_deve; + struct se_device *se_obj_ptr; + struct se_device *se_orig_obj_ptr; + struct se_lun *se_lun; + /* Only used for internal passthrough and legacy TCM fabric modules */ + struct se_session *se_sess; + struct se_tmr_req *se_tmr_req; + /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ + struct se_transport_task *t_task; + struct se_transport_task t_task_backstore; + struct target_core_fabric_ops *se_tfo; + int (*transport_emulate_cdb)(struct se_cmd *); + void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); + void (*transport_wait_for_tasks)(struct se_cmd *, int, int); + void (*transport_complete_callback)(struct se_cmd *); +} ____cacheline_aligned; + +#define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task)) +#define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo) + +struct se_tmr_req { + /* Task Management function to be preformed */ + u8 function; + /* Task Management response to send */ + u8 response; + int call_transport; + /* Reference to ITT that Task Mgmt should be preformed */ + u32 ref_task_tag; + /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ + u64 ref_task_lun; + void *fabric_tmr_ptr; + struct se_cmd *task_cmd; + struct se_cmd *ref_cmd; + struct se_device *tmr_dev; + struct se_lun *tmr_lun; + struct list_head tmr_list; +} ____cacheline_aligned; + +struct se_ua { + u8 ua_asc; + u8 ua_ascq; + struct se_node_acl *ua_nacl; + struct list_head ua_dev_list; + struct list_head ua_nacl_list; +} ____cacheline_aligned; + +struct se_node_acl { + char initiatorname[TRANSPORT_IQN_LEN]; + /* Used to signal demo mode created ACL, disabled by default */ + int dynamic_node_acl:1; + u32 queue_depth; + u32 acl_index; + u64 num_cmds; + u64 read_bytes; + u64 write_bytes; + spinlock_t stats_lock; + /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ + atomic_t acl_pr_ref_count; + /* Used for MIB access */ + atomic_t mib_ref_count; + struct se_dev_entry *device_list; + struct se_session *nacl_sess; + struct se_portal_group *se_tpg; + spinlock_t device_list_lock; + spinlock_t nacl_sess_lock; + struct config_group acl_group; + struct config_group acl_attrib_group; + struct config_group acl_auth_group; + struct config_group acl_param_group; + struct config_group *acl_default_groups[4]; + struct list_head acl_list; + struct list_head acl_sess_list; +} ____cacheline_aligned; + +struct se_session { + /* Used for MIB access */ + atomic_t mib_ref_count; + u64 sess_bin_isid; + struct se_node_acl *se_node_acl; + struct se_portal_group *se_tpg; + void *fabric_sess_ptr; + struct list_head sess_list; + struct list_head sess_acl_list; +} ____cacheline_aligned; + +#define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess) +#define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl) + +struct se_device; +struct se_transform_info; +struct scatterlist; + +struct se_lun_acl { + char initiatorname[TRANSPORT_IQN_LEN]; + u32 mapped_lun; + struct se_node_acl *se_lun_nacl; + struct se_lun *se_lun; + struct list_head lacl_list; + struct config_group se_lun_group; +} ____cacheline_aligned; + +struct se_dev_entry { + int def_pr_registered:1; + /* See transport_lunflags_table */ + u32 lun_flags; + u32 deve_cmds; + u32 mapped_lun; + u32 average_bytes; + u32 last_byte_count; + u32 total_cmds; + u32 total_bytes; + u64 pr_res_key; + u64 creation_time; + u32 attach_count; + u64 read_bytes; + u64 write_bytes; + atomic_t ua_count; + /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ + atomic_t pr_ref_count; + struct se_lun_acl *se_lun_acl; + spinlock_t ua_lock; + struct se_lun *se_lun; + struct list_head alua_port_list; + struct list_head ua_list; +} ____cacheline_aligned; + +struct se_dev_limits { + /* Max supported HW queue depth */ + u32 hw_queue_depth; + /* Max supported virtual queue depth */ + u32 queue_depth; + /* From include/linux/blkdev.h for the other HW/SW limits. */ + struct queue_limits limits; +} ____cacheline_aligned; + +struct se_dev_attrib { + int emulate_dpo; + int emulate_fua_write; + int emulate_fua_read; + int emulate_write_cache; + int emulate_ua_intlck_ctrl; + int emulate_tas; + int emulate_tpu; + int emulate_tpws; + int emulate_reservations; + int emulate_alua; + int enforce_pr_isids; + u32 hw_block_size; + u32 block_size; + u32 hw_max_sectors; + u32 max_sectors; + u32 optimal_sectors; + u32 hw_queue_depth; + u32 queue_depth; + u32 task_timeout; + u32 max_unmap_lba_count; + u32 max_unmap_block_desc_count; + u32 unmap_granularity; + u32 unmap_granularity_alignment; + struct se_subsystem_dev *da_sub_dev; + struct config_group da_group; +} ____cacheline_aligned; + +struct se_subsystem_dev { +/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ +#define SE_DEV_ALIAS_LEN 512 + unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; +/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ +#define SE_UDEV_PATH_LEN 512 + unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; + u32 su_dev_flags; + struct se_hba *se_dev_hba; + struct se_device *se_dev_ptr; + struct se_dev_attrib se_dev_attrib; + /* T10 Asymmetric Logical Unit Assignment for Target Ports */ + struct t10_alua t10_alua; + /* T10 Inquiry and VPD WWN Information */ + struct t10_wwn t10_wwn; + /* T10 SPC-2 + SPC-3 Reservations */ + struct t10_reservation_template t10_reservation; + spinlock_t se_dev_lock; + void *se_dev_su_ptr; + struct list_head g_se_dev_list; + struct config_group se_dev_group; + /* For T10 Reservations */ + struct config_group se_dev_pr_group; +} ____cacheline_aligned; + +#define T10_ALUA(su_dev) (&(su_dev)->t10_alua) +#define T10_RES(su_dev) (&(su_dev)->t10_reservation) +#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) + +struct se_device { + /* Set to 1 if thread is NOT sleeping on thread_sem */ + u8 thread_active; + u8 dev_status_timer_flags; + /* RELATIVE TARGET PORT IDENTIFER Counter */ + u16 dev_rpti_counter; + /* Used for SAM Task Attribute ordering */ + u32 dev_cur_ordered_id; + u32 dev_flags; + u32 dev_port_count; + /* See transport_device_status_table */ + u32 dev_status; + u32 dev_tcq_window_closed; + /* Physical device queue depth */ + u32 queue_depth; + /* Used for SPC-2 reservations enforce of ISIDs */ + u64 dev_res_bin_isid; + t10_task_attr_index_t dev_task_attr_type; + /* Pointer to transport specific device structure */ + void *dev_ptr; + u32 dev_index; + u64 creation_time; + u32 num_resets; + u64 num_cmds; + u64 read_bytes; + u64 write_bytes; + spinlock_t stats_lock; + /* Active commands on this virtual SE device */ + atomic_t active_cmds; + atomic_t simple_cmds; + atomic_t depth_left; + atomic_t dev_ordered_id; + atomic_t dev_tur_active; + atomic_t execute_tasks; + atomic_t dev_status_thr_count; + atomic_t dev_hoq_count; + atomic_t dev_ordered_sync; + struct se_obj dev_obj; + struct se_obj dev_access_obj; + struct se_obj dev_export_obj; + struct se_queue_obj *dev_queue_obj; + struct se_queue_obj *dev_status_queue_obj; + spinlock_t delayed_cmd_lock; + spinlock_t ordered_cmd_lock; + spinlock_t execute_task_lock; + spinlock_t state_task_lock; + spinlock_t dev_alua_lock; + spinlock_t dev_reservation_lock; + spinlock_t dev_state_lock; + spinlock_t dev_status_lock; + spinlock_t dev_status_thr_lock; + spinlock_t se_port_lock; + spinlock_t se_tmr_lock; + /* Used for legacy SPC-2 reservationsa */ + struct se_node_acl *dev_reserved_node_acl; + /* Used for ALUA Logical Unit Group membership */ + struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; + /* Used for SPC-3 Persistent Reservations */ + struct t10_pr_registration *dev_pr_res_holder; + struct list_head dev_sep_list; + struct list_head dev_tmr_list; + struct timer_list dev_status_timer; + /* Pointer to descriptor for processing thread */ + struct task_struct *process_thread; + pid_t process_thread_pid; + struct task_struct *dev_mgmt_thread; + struct list_head delayed_cmd_list; + struct list_head ordered_cmd_list; + struct list_head execute_task_list; + struct list_head state_task_list; + /* Pointer to associated SE HBA */ + struct se_hba *se_hba; + struct se_subsystem_dev *se_sub_dev; + /* Pointer to template of function pointers for transport */ + struct se_subsystem_api *transport; + /* Linked list for struct se_hba struct se_device list */ + struct list_head dev_list; + /* Linked list for struct se_global->g_se_dev_list */ + struct list_head g_se_dev_list; +} ____cacheline_aligned; + +#define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev) +#define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev) +#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) +#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) + +struct se_hba { + u16 hba_tpgt; + u32 hba_id; + /* See hba_flags_table */ + u32 hba_flags; + /* Virtual iSCSI devices attached. */ + u32 dev_count; + u32 hba_index; + atomic_t dev_mib_access_count; + atomic_t load_balance_queue; + atomic_t left_queue_depth; + /* Maximum queue depth the HBA can handle. */ + atomic_t max_queue_depth; + /* Pointer to transport specific host structure. */ + void *hba_ptr; + /* Linked list for struct se_device */ + struct list_head hba_dev_list; + struct list_head hba_list; + spinlock_t device_lock; + spinlock_t hba_queue_lock; + struct config_group hba_group; + struct mutex hba_access_mutex; + struct se_subsystem_api *transport; +} ____cacheline_aligned; + +#define SE_HBA(d) ((struct se_hba *)(d)->se_hba) + +struct se_lun { + /* See transport_lun_status_table */ + enum transport_lun_status_table lun_status; + u32 lun_access; + u32 lun_flags; + u32 unpacked_lun; + atomic_t lun_acl_count; + spinlock_t lun_acl_lock; + spinlock_t lun_cmd_lock; + spinlock_t lun_sep_lock; + struct completion lun_shutdown_comp; + struct list_head lun_cmd_list; + struct list_head lun_acl_list; + struct se_device *lun_se_dev; + struct config_group lun_group; + struct se_port *lun_sep; +} ____cacheline_aligned; + +#define SE_LUN(c) ((struct se_lun *)(c)->se_lun) + +struct se_port { + /* RELATIVE TARGET PORT IDENTIFER */ + u16 sep_rtpi; + int sep_tg_pt_secondary_stat; + int sep_tg_pt_secondary_write_md; + u32 sep_index; + struct scsi_port_stats sep_stats; + /* Used for ALUA Target Port Groups membership */ + atomic_t sep_tg_pt_gp_active; + atomic_t sep_tg_pt_secondary_offline; + /* Used for PR ALL_TG_PT=1 */ + atomic_t sep_tg_pt_ref_cnt; + spinlock_t sep_alua_lock; + struct mutex sep_tg_pt_md_mutex; + struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; + struct se_lun *sep_lun; + struct se_portal_group *sep_tpg; + struct list_head sep_alua_list; + struct list_head sep_list; +} ____cacheline_aligned; + +struct se_tpg_np { + struct config_group tpg_np_group; +} ____cacheline_aligned; + +struct se_portal_group { + /* Type of target portal group, see transport_tpg_type_table */ + enum transport_tpg_type_table se_tpg_type; + /* Number of ACLed Initiator Nodes for this TPG */ + u32 num_node_acls; + /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ + atomic_t tpg_pr_ref_count; + /* Spinlock for adding/removing ACLed Nodes */ + spinlock_t acl_node_lock; + /* Spinlock for adding/removing sessions */ + spinlock_t session_lock; + spinlock_t tpg_lun_lock; + /* Pointer to $FABRIC_MOD portal group */ + void *se_tpg_fabric_ptr; + struct list_head se_tpg_list; + /* linked list for initiator ACL list */ + struct list_head acl_node_list; + struct se_lun *tpg_lun_list; + struct se_lun tpg_virt_lun0; + /* List of TCM sessions assoicated wth this TPG */ + struct list_head tpg_sess_list; + /* Pointer to $FABRIC_MOD dependent code */ + struct target_core_fabric_ops *se_tpg_tfo; + struct se_wwn *se_tpg_wwn; + struct config_group tpg_group; + struct config_group *tpg_default_groups[6]; + struct config_group tpg_lun_group; + struct config_group tpg_np_group; + struct config_group tpg_acl_group; + struct config_group tpg_attrib_group; + struct config_group tpg_param_group; +} ____cacheline_aligned; + +#define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo) + +struct se_wwn { + struct target_fabric_configfs *wwn_tf; + struct config_group wwn_group; +} ____cacheline_aligned; + +struct se_global { + u16 alua_lu_gps_counter; + int g_sub_api_initialized; + u32 in_shutdown; + u32 alua_lu_gps_count; + u32 g_hba_id_counter; + struct config_group target_core_hbagroup; + struct config_group alua_group; + struct config_group alua_lu_gps_group; + struct list_head g_lu_gps_list; + struct list_head g_se_tpg_list; + struct list_head g_hba_list; + struct list_head g_se_dev_list; + struct se_hba *g_lun0_hba; + struct se_subsystem_dev *g_lun0_su_dev; + struct se_device *g_lun0_dev; + struct t10_alua_lu_gp *default_lu_gp; + spinlock_t g_device_lock; + spinlock_t hba_lock; + spinlock_t se_tpg_lock; + spinlock_t lu_gps_lock; + spinlock_t plugin_class_lock; +} ____cacheline_aligned; + +#endif /* TARGET_CORE_BASE_H */ diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h new file mode 100644 index 000000000000..40e6e740527c --- /dev/null +++ b/include/target/target_core_configfs.h @@ -0,0 +1,52 @@ +#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION + +#define TARGET_CORE_CONFIG_ROOT "/sys/kernel/config" + +#define TARGET_CORE_NAME_MAX_LEN 64 +#define TARGET_FABRIC_NAME_SIZE 32 + +extern struct target_fabric_configfs *target_fabric_configfs_init( + struct module *, const char *); +extern void target_fabric_configfs_free(struct target_fabric_configfs *); +extern int target_fabric_configfs_register(struct target_fabric_configfs *); +extern void target_fabric_configfs_deregister(struct target_fabric_configfs *); + +struct target_fabric_configfs_template { + struct config_item_type tfc_discovery_cit; + struct config_item_type tfc_wwn_cit; + struct config_item_type tfc_tpg_cit; + struct config_item_type tfc_tpg_base_cit; + struct config_item_type tfc_tpg_lun_cit; + struct config_item_type tfc_tpg_port_cit; + struct config_item_type tfc_tpg_np_cit; + struct config_item_type tfc_tpg_np_base_cit; + struct config_item_type tfc_tpg_attrib_cit; + struct config_item_type tfc_tpg_param_cit; + struct config_item_type tfc_tpg_nacl_cit; + struct config_item_type tfc_tpg_nacl_base_cit; + struct config_item_type tfc_tpg_nacl_attrib_cit; + struct config_item_type tfc_tpg_nacl_auth_cit; + struct config_item_type tfc_tpg_nacl_param_cit; + struct config_item_type tfc_tpg_mappedlun_cit; +}; + +struct target_fabric_configfs { + char tf_name[TARGET_FABRIC_NAME_SIZE]; + atomic_t tf_access_cnt; + struct list_head tf_list; + struct config_group tf_group; + struct config_group tf_disc_group; + struct config_group *tf_default_groups[2]; + /* Pointer to fabric's config_item */ + struct config_item *tf_fabric; + /* Passed from fabric modules */ + struct config_item_type *tf_fabric_cit; + /* Pointer to target core subsystem */ + struct configfs_subsystem *tf_subsys; + /* Pointer to fabric's struct module */ + struct module *tf_module; + struct target_core_fabric_ops tf_ops; + struct target_fabric_configfs_template tf_cit_tmpl; +}; + +#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl) diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h new file mode 100644 index 000000000000..52b18a5752c9 --- /dev/null +++ b/include/target/target_core_device.h @@ -0,0 +1,61 @@ +#ifndef TARGET_CORE_DEVICE_H +#define TARGET_CORE_DEVICE_H + +extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32); +extern int transport_get_lun_for_tmr(struct se_cmd *, u32); +extern struct se_dev_entry *core_get_se_deve_from_rtpi( + struct se_node_acl *, u16); +extern int core_free_device_list_for_node(struct se_node_acl *, + struct se_portal_group *); +extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); +extern void core_update_device_list_access(u32, u32, struct se_node_acl *); +extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32, + u32, struct se_node_acl *, + struct se_portal_group *, int); +extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); +extern int core_dev_export(struct se_device *, struct se_portal_group *, + struct se_lun *); +extern void core_dev_unexport(struct se_device *, struct se_portal_group *, + struct se_lun *); +extern int transport_core_report_lun_response(struct se_cmd *); +extern void se_release_device_for_hba(struct se_device *); +extern void se_release_vpd_for_dev(struct se_device *); +extern void se_clear_dev_ports(struct se_device *); +extern int se_free_virtual_device(struct se_device *, struct se_hba *); +extern int se_dev_check_online(struct se_device *); +extern int se_dev_check_shutdown(struct se_device *); +extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *); +extern int se_dev_set_task_timeout(struct se_device *, u32); +extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32); +extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); +extern int se_dev_set_unmap_granularity(struct se_device *, u32); +extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); +extern int se_dev_set_emulate_dpo(struct se_device *, int); +extern int se_dev_set_emulate_fua_write(struct se_device *, int); +extern int se_dev_set_emulate_fua_read(struct se_device *, int); +extern int se_dev_set_emulate_write_cache(struct se_device *, int); +extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); +extern int se_dev_set_emulate_tas(struct se_device *, int); +extern int se_dev_set_emulate_tpu(struct se_device *, int); +extern int se_dev_set_emulate_tpws(struct se_device *, int); +extern int se_dev_set_enforce_pr_isids(struct se_device *, int); +extern int se_dev_set_queue_depth(struct se_device *, u32); +extern int se_dev_set_max_sectors(struct se_device *, u32); +extern int se_dev_set_optimal_sectors(struct se_device *, u32); +extern int se_dev_set_block_size(struct se_device *, u32); +extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, + struct se_device *, u32); +extern int core_dev_del_lun(struct se_portal_group *, u32); +extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); +extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, + u32, char *, int *); +extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun_acl *, u32, u32); +extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun *, struct se_lun_acl *); +extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun_acl *lacl); +extern int core_dev_setup_virtual_lun0(void); +extern void core_dev_release_virtual_lun0(void); + +#endif /* TARGET_CORE_DEVICE_H */ diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h new file mode 100644 index 000000000000..a26fb7586a09 --- /dev/null +++ b/include/target/target_core_fabric_configfs.h @@ -0,0 +1,106 @@ +/* + * Used for tfc_wwn_cit attributes + */ + +#include + +CONFIGFS_EATTR_STRUCT(target_fabric_nacl_attrib, se_node_acl); +#define TF_NACL_ATTRIB_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_nacl_attrib_attribute _fabric##_nacl_attrib_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_nacl_attrib_show_##_name, \ + _fabric##_nacl_attrib_store_##_name); + +CONFIGFS_EATTR_STRUCT(target_fabric_nacl_auth, se_node_acl); +#define TF_NACL_AUTH_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_nacl_auth_show_##_name, \ + _fabric##_nacl_auth_store_##_name); + +#define TF_NACL_AUTH_ATTR_RO(_fabric, _name) \ +static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + _fabric##_nacl_auth_show_##_name); + +CONFIGFS_EATTR_STRUCT(target_fabric_nacl_param, se_node_acl); +#define TF_NACL_PARAM_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_nacl_param_show_##_name, \ + _fabric##_nacl_param_store_##_name); + +#define TF_NACL_PARAM_ATTR_RO(_fabric, _name) \ +static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + _fabric##_nacl_param_show_##_name); + + +CONFIGFS_EATTR_STRUCT(target_fabric_nacl_base, se_node_acl); +#define TF_NACL_BASE_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_nacl_show_##_name, \ + _fabric##_nacl_store_##_name); + +#define TF_NACL_BASE_ATTR_RO(_fabric, _name) \ +static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + _fabric##_nacl_show_##_name); + +CONFIGFS_EATTR_STRUCT(target_fabric_np_base, se_tpg_np); +#define TF_NP_BASE_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_np_base_attribute _fabric##_np_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_np_show_##_name, \ + _fabric##_np_store_##_name); + +CONFIGFS_EATTR_STRUCT(target_fabric_tpg_attrib, se_portal_group); +#define TF_TPG_ATTRIB_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_tpg_attrib_show_##_name, \ + _fabric##_tpg_attrib_store_##_name); + + +CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group); +#define TF_TPG_PARAM_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_tpg_param_attribute _fabric##_tpg_param_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_tpg_param_show_##_name, \ + _fabric##_tpg_param_store_##_name); + + +CONFIGFS_EATTR_STRUCT(target_fabric_tpg, se_portal_group); +#define TF_TPG_BASE_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_tpg_show_##_name, \ + _fabric##_tpg_store_##_name); + + +CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs); +#define TF_WWN_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_wwn_show_attr_##_name, \ + _fabric##_wwn_store_attr_##_name); + +#define TF_WWN_ATTR_RO(_fabric, _name) \ +static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + _fabric##_wwn_show_attr_##_name); + +CONFIGFS_EATTR_STRUCT(target_fabric_discovery, target_fabric_configfs); +#define TF_DISC_ATTR(_fabric, _name, _mode) \ +static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _fabric##_disc_show_##_name, \ + _fabric##_disc_store_##_name); + +#define TF_DISC_ATTR_RO(_fabric, _name) \ +static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + _fabric##_disc_show_##_name); + +extern int target_fabric_setup_cits(struct target_fabric_configfs *); diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h new file mode 100644 index 000000000000..c2f8d0e3a03b --- /dev/null +++ b/include/target/target_core_fabric_lib.h @@ -0,0 +1,28 @@ +#ifndef TARGET_CORE_FABRIC_LIB_H +#define TARGET_CORE_FABRIC_LIB_H + +extern u8 sas_get_fabric_proto_ident(struct se_portal_group *); +extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, + struct t10_pr_registration *, int *, unsigned char *); +extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, + struct t10_pr_registration *, int *); +extern char *sas_parse_pr_out_transport_id(struct se_portal_group *, + const char *, u32 *, char **); + +extern u8 fc_get_fabric_proto_ident(struct se_portal_group *); +extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, + struct t10_pr_registration *, int *, unsigned char *); +extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, + struct t10_pr_registration *, int *); +extern char *fc_parse_pr_out_transport_id(struct se_portal_group *, + const char *, u32 *, char **); + +extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *); +extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, + struct t10_pr_registration *, int *, unsigned char *); +extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, + struct t10_pr_registration *, int *); +extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, + const char *, u32 *, char **); + +#endif /* TARGET_CORE_FABRIC_LIB_H */ diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h new file mode 100644 index 000000000000..f3ac12b019c2 --- /dev/null +++ b/include/target/target_core_fabric_ops.h @@ -0,0 +1,100 @@ +/* Defined in target_core_configfs.h */ +struct target_fabric_configfs; + +struct target_core_fabric_ops { + struct configfs_subsystem *tf_subsys; + /* + * Optional to signal struct se_task->task_sg[] padding entries + * for scatterlist chaining using transport_do_task_sg_link(), + * disabled by default + */ + int task_sg_chaining:1; + char *(*get_fabric_name)(void); + u8 (*get_fabric_proto_ident)(struct se_portal_group *); + char *(*tpg_get_wwn)(struct se_portal_group *); + u16 (*tpg_get_tag)(struct se_portal_group *); + u32 (*tpg_get_default_depth)(struct se_portal_group *); + u32 (*tpg_get_pr_transport_id)(struct se_portal_group *, + struct se_node_acl *, + struct t10_pr_registration *, int *, + unsigned char *); + u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *, + struct se_node_acl *, + struct t10_pr_registration *, int *); + char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *, + const char *, u32 *, char **); + int (*tpg_check_demo_mode)(struct se_portal_group *); + int (*tpg_check_demo_mode_cache)(struct se_portal_group *); + int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); + int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); + struct se_node_acl *(*tpg_alloc_fabric_acl)( + struct se_portal_group *); + void (*tpg_release_fabric_acl)(struct se_portal_group *, + struct se_node_acl *); + u32 (*tpg_get_inst_index)(struct se_portal_group *); + /* + * Optional function pointer for TCM to perform command map + * from TCM processing thread context, for those struct se_cmd + * initally allocated in interrupt context. + */ + int (*new_cmd_map)(struct se_cmd *); + /* + * Optional function pointer for TCM fabric modules that use + * Linux/NET sockets to allocate struct iovec array to struct se_cmd + */ + int (*alloc_cmd_iovecs)(struct se_cmd *); + /* + * Optional to release struct se_cmd and fabric dependent allocated + * I/O descriptor in transport_cmd_check_stop() + */ + void (*check_stop_free)(struct se_cmd *); + void (*release_cmd_to_pool)(struct se_cmd *); + void (*release_cmd_direct)(struct se_cmd *); + /* + * Called with spin_lock_bh(struct se_portal_group->session_lock held. + */ + int (*shutdown_session)(struct se_session *); + void (*close_session)(struct se_session *); + void (*stop_session)(struct se_session *, int, int); + void (*fall_back_to_erl0)(struct se_session *); + int (*sess_logged_in)(struct se_session *); + u32 (*sess_get_index)(struct se_session *); + /* + * Used only for SCSI fabrics that contain multi-value TransportIDs + * (like iSCSI). All other SCSI fabrics should set this to NULL. + */ + u32 (*sess_get_initiator_sid)(struct se_session *, + unsigned char *, u32); + int (*write_pending)(struct se_cmd *); + int (*write_pending_status)(struct se_cmd *); + void (*set_default_node_attributes)(struct se_node_acl *); + u32 (*get_task_tag)(struct se_cmd *); + int (*get_cmd_state)(struct se_cmd *); + void (*new_cmd_failure)(struct se_cmd *); + int (*queue_data_in)(struct se_cmd *); + int (*queue_status)(struct se_cmd *); + int (*queue_tm_rsp)(struct se_cmd *); + u16 (*set_fabric_sense_len)(struct se_cmd *, u32); + u16 (*get_fabric_sense_len)(void); + int (*is_state_remove)(struct se_cmd *); + u64 (*pack_lun)(unsigned int); + /* + * fabric module calls for target_core_fabric_configfs.c + */ + struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, + struct config_group *, const char *); + void (*fabric_drop_wwn)(struct se_wwn *); + struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, + struct config_group *, const char *); + void (*fabric_drop_tpg)(struct se_portal_group *); + int (*fabric_post_link)(struct se_portal_group *, + struct se_lun *); + void (*fabric_pre_unlink)(struct se_portal_group *, + struct se_lun *); + struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, + struct config_group *, const char *); + void (*fabric_drop_np)(struct se_tpg_np *); + struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, + struct config_group *, const char *); + void (*fabric_drop_nodeacl)(struct se_node_acl *); +}; diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h new file mode 100644 index 000000000000..6c8248bc2c66 --- /dev/null +++ b/include/target/target_core_tmr.h @@ -0,0 +1,43 @@ +#ifndef TARGET_CORE_TMR_H +#define TARGET_CORE_TMR_H + +/* task management function values */ +#ifdef ABORT_TASK +#undef ABORT_TASK +#endif /* ABORT_TASK */ +#define ABORT_TASK 1 +#ifdef ABORT_TASK_SET +#undef ABORT_TASK_SET +#endif /* ABORT_TASK_SET */ +#define ABORT_TASK_SET 2 +#ifdef CLEAR_ACA +#undef CLEAR_ACA +#endif /* CLEAR_ACA */ +#define CLEAR_ACA 3 +#ifdef CLEAR_TASK_SET +#undef CLEAR_TASK_SET +#endif /* CLEAR_TASK_SET */ +#define CLEAR_TASK_SET 4 +#define LUN_RESET 5 +#define TARGET_WARM_RESET 6 +#define TARGET_COLD_RESET 7 +#define TASK_REASSIGN 8 + +/* task management response values */ +#define TMR_FUNCTION_COMPLETE 0 +#define TMR_TASK_DOES_NOT_EXIST 1 +#define TMR_LUN_DOES_NOT_EXIST 2 +#define TMR_TASK_STILL_ALLEGIANT 3 +#define TMR_TASK_FAILOVER_NOT_SUPPORTED 4 +#define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED 5 +#define TMR_FUNCTION_AUTHORIZATION_FAILED 6 +#define TMR_FUNCTION_REJECTED 255 + +extern struct kmem_cache *se_tmr_req_cache; + +extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8); +extern void core_tmr_release_req(struct se_tmr_req *); +extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *, + struct list_head *, struct se_cmd *); + +#endif /* TARGET_CORE_TMR_H */ diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h new file mode 100644 index 000000000000..77e18729c4c1 --- /dev/null +++ b/include/target/target_core_tpg.h @@ -0,0 +1,35 @@ +#ifndef TARGET_CORE_TPG_H +#define TARGET_CORE_TPG_H + +extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, + const char *); +extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, + unsigned char *); +extern void core_tpg_add_node_to_devs(struct se_node_acl *, + struct se_portal_group *); +extern struct se_node_acl *core_tpg_check_initiator_node_acl( + struct se_portal_group *, + unsigned char *); +extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); +extern void core_tpg_wait_for_mib_ref(struct se_node_acl *); +extern void core_tpg_clear_object_luns(struct se_portal_group *); +extern struct se_node_acl *core_tpg_add_initiator_node_acl( + struct se_portal_group *, + struct se_node_acl *, + const char *, u32); +extern int core_tpg_del_initiator_node_acl(struct se_portal_group *, + struct se_node_acl *, int); +extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, + unsigned char *, u32, int); +extern int core_tpg_register(struct target_core_fabric_ops *, + struct se_wwn *, + struct se_portal_group *, void *, + int); +extern int core_tpg_deregister(struct se_portal_group *); +extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); +extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32, + void *); +extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *); +extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); + +#endif /* TARGET_CORE_TPG_H */ diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h new file mode 100644 index 000000000000..66f44e56eb80 --- /dev/null +++ b/include/target/target_core_transport.h @@ -0,0 +1,351 @@ +#ifndef TARGET_CORE_TRANSPORT_H +#define TARGET_CORE_TRANSPORT_H + +#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION + +/* Attempts before moving from SHORT to LONG */ +#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 +#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ +#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */ + +#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ + +#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0 +#define PYX_TRANSPORT_WRITE_PENDING 1 + +#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1 +#define PYX_TRANSPORT_HBA_QUEUE_FULL -2 +#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3 +#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4 +#define PYX_TRANSPORT_INVALID_CDB_FIELD -5 +#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6 +#define PYX_TRANSPORT_LU_COMM_FAILURE -7 +#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 +#define PYX_TRANSPORT_WRITE_PROTECTED -9 +#define PYX_TRANSPORT_TASK_TIMEOUT -10 +#define PYX_TRANSPORT_RESERVATION_CONFLICT -11 +#define PYX_TRANSPORT_ILLEGAL_REQUEST -12 +#define PYX_TRANSPORT_USE_SENSE_REASON -13 + +#ifndef SAM_STAT_RESERVATION_CONFLICT +#define SAM_STAT_RESERVATION_CONFLICT 0x18 +#endif + +#define TRANSPORT_PLUGIN_FREE 0 +#define TRANSPORT_PLUGIN_REGISTERED 1 + +#define TRANSPORT_PLUGIN_PHBA_PDEV 1 +#define TRANSPORT_PLUGIN_VHBA_PDEV 2 +#define TRANSPORT_PLUGIN_VHBA_VDEV 3 + +/* For SE OBJ Plugins, in seconds */ +#define TRANSPORT_TIMEOUT_TUR 10 +#define TRANSPORT_TIMEOUT_TYPE_DISK 60 +#define TRANSPORT_TIMEOUT_TYPE_ROM 120 +#define TRANSPORT_TIMEOUT_TYPE_TAPE 600 +#define TRANSPORT_TIMEOUT_TYPE_OTHER 300 + +/* For se_task->task_state_flags */ +#define TSF_EXCEPTION_CLEARED 0x01 + +/* + * struct se_subsystem_dev->su_dev_flags +*/ +#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001 +#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002 +#define SDF_USING_UDEV_PATH 0x00000004 +#define SDF_USING_ALIAS 0x00000008 + +/* + * struct se_device->dev_flags + */ +#define DF_READ_ONLY 0x00000001 +#define DF_SPC2_RESERVATIONS 0x00000002 +#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 + +/* struct se_dev_attrib sanity values */ +/* 10 Minutes */ +#define DA_TASK_TIMEOUT_MAX 600 +/* Default max_unmap_lba_count */ +#define DA_MAX_UNMAP_LBA_COUNT 0 +/* Default max_unmap_block_desc_count */ +#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0 +/* Default unmap_granularity */ +#define DA_UNMAP_GRANULARITY_DEFAULT 0 +/* Default unmap_granularity_alignment */ +#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 +/* Emulation for Direct Page Out */ +#define DA_EMULATE_DPO 0 +/* Emulation for Forced Unit Access WRITEs */ +#define DA_EMULATE_FUA_WRITE 1 +/* Emulation for Forced Unit Access READs */ +#define DA_EMULATE_FUA_READ 0 +/* Emulation for WriteCache and SYNCHRONIZE_CACHE */ +#define DA_EMULATE_WRITE_CACHE 0 +/* Emulation for UNIT ATTENTION Interlock Control */ +#define DA_EMULATE_UA_INTLLCK_CTRL 0 +/* Emulation for TASK_ABORTED status (TAS) by default */ +#define DA_EMULATE_TAS 1 +/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ +#define DA_EMULATE_TPU 0 +/* + * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using + * block/blk-lib.c:blkdev_issue_discard() + */ +#define DA_EMULATE_TPWS 0 +/* No Emulation for PSCSI by default */ +#define DA_EMULATE_RESERVATIONS 0 +/* No Emulation for PSCSI by default */ +#define DA_EMULATE_ALUA 0 +/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ +#define DA_ENFORCE_PR_ISIDS 1 +#define DA_STATUS_MAX_SECTORS_MIN 16 +#define DA_STATUS_MAX_SECTORS_MAX 8192 + +#define SE_MODE_PAGE_BUF 512 + +#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs)) + +struct se_mem; +struct se_subsystem_api; + +extern int init_se_global(void); +extern void release_se_global(void); +extern void transport_init_queue_obj(struct se_queue_obj *); +extern int transport_subsystem_check_init(void); +extern int transport_subsystem_register(struct se_subsystem_api *); +extern void transport_subsystem_release(struct se_subsystem_api *); +extern void transport_load_plugins(void); +extern struct se_session *transport_init_session(void); +extern void __transport_register_session(struct se_portal_group *, + struct se_node_acl *, + struct se_session *, void *); +extern void transport_register_session(struct se_portal_group *, + struct se_node_acl *, + struct se_session *, void *); +extern void transport_free_session(struct se_session *); +extern void transport_deregister_session_configfs(struct se_session *); +extern void transport_deregister_session(struct se_session *); +extern void transport_cmd_finish_abort(struct se_cmd *, int); +extern void transport_cmd_finish_abort_tmr(struct se_cmd *); +extern void transport_complete_sync_cache(struct se_cmd *, int); +extern void transport_complete_task(struct se_task *, int); +extern void transport_add_task_to_execute_queue(struct se_task *, + struct se_task *, + struct se_device *); +unsigned char *transport_dump_cmd_direction(struct se_cmd *); +extern void transport_dump_dev_state(struct se_device *, char *, int *); +extern void transport_dump_dev_info(struct se_device *, struct se_lun *, + unsigned long long, char *, int *); +extern void transport_dump_vpd_proto_id(struct t10_vpd *, + unsigned char *, int); +extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); +extern int transport_dump_vpd_assoc(struct t10_vpd *, + unsigned char *, int); +extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); +extern int transport_dump_vpd_ident_type(struct t10_vpd *, + unsigned char *, int); +extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); +extern int transport_dump_vpd_ident(struct t10_vpd *, + unsigned char *, int); +extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); +extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, + struct se_subsystem_api *, + struct se_subsystem_dev *, u32, + void *, struct se_dev_limits *, + const char *, const char *); +extern void transport_device_setup_cmd(struct se_cmd *); +extern void transport_init_se_cmd(struct se_cmd *, + struct target_core_fabric_ops *, + struct se_session *, u32, int, int, + unsigned char *); +extern void transport_free_se_cmd(struct se_cmd *); +extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); +extern int transport_generic_handle_cdb(struct se_cmd *); +extern int transport_generic_handle_cdb_map(struct se_cmd *); +extern int transport_generic_handle_data(struct se_cmd *); +extern void transport_new_cmd_failure(struct se_cmd *); +extern int transport_generic_handle_tmr(struct se_cmd *); +extern void __transport_stop_task_timer(struct se_task *, unsigned long *); +extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); +extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, + struct scatterlist *, u32); +extern int transport_clear_lun_from_sessions(struct se_lun *); +extern int transport_check_aborted_status(struct se_cmd *, int); +extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); +extern void transport_send_task_abort(struct se_cmd *); +extern void transport_release_cmd_to_pool(struct se_cmd *); +extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); +extern void transport_generic_wait_for_cmds(struct se_cmd *, int); +extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); +extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, + void *, struct se_mem *, + struct se_mem **, u32 *, u32 *); +extern void transport_do_task_sg_chain(struct se_cmd *); +extern void transport_generic_process_write(struct se_cmd *); +extern int transport_generic_do_tmr(struct se_cmd *); +/* From target_core_alua.c */ +extern int core_alua_check_nonop_delay(struct se_cmd *); + +/* + * Each se_transport_task_t can have N number of possible struct se_task's + * for the storage transport(s) to possibly execute. + * Used primarily for splitting up CDBs that exceed the physical storage + * HBA's maximum sector count per task. + */ +struct se_mem { + struct page *se_page; + u32 se_len; + u32 se_off; + struct list_head se_list; +} ____cacheline_aligned; + +/* + * Each type of disk transport supported MUST have a template defined + * within its .h file. + */ +struct se_subsystem_api { + /* + * The Name. :-) + */ + char name[16]; + /* + * Transport Type. + */ + u8 transport_type; + /* + * struct module for struct se_hba references + */ + struct module *owner; + /* + * Used for global se_subsystem_api list_head + */ + struct list_head sub_api_list; + /* + * For SCF_SCSI_NON_DATA_CDB + */ + int (*cdb_none)(struct se_task *); + /* + * For SCF_SCSI_CONTROL_NONSG_IO_CDB + */ + int (*map_task_non_SG)(struct se_task *); + /* + * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB + */ + int (*map_task_SG)(struct se_task *); + /* + * attach_hba(): + */ + int (*attach_hba)(struct se_hba *, u32); + /* + * detach_hba(): + */ + void (*detach_hba)(struct se_hba *); + /* + * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA -> + * Linux/SCSI struct Scsi_Host passthrough + */ + int (*pmode_enable_hba)(struct se_hba *, unsigned long); + /* + * allocate_virtdevice(): + */ + void *(*allocate_virtdevice)(struct se_hba *, const char *); + /* + * create_virtdevice(): Only for Virtual HBAs + */ + struct se_device *(*create_virtdevice)(struct se_hba *, + struct se_subsystem_dev *, void *); + /* + * free_device(): + */ + void (*free_device)(void *); + + /* + * dpo_emulated(): + */ + int (*dpo_emulated)(struct se_device *); + /* + * fua_write_emulated(): + */ + int (*fua_write_emulated)(struct se_device *); + /* + * fua_read_emulated(): + */ + int (*fua_read_emulated)(struct se_device *); + /* + * write_cache_emulated(): + */ + int (*write_cache_emulated)(struct se_device *); + /* + * transport_complete(): + * + * Use transport_generic_complete() for majority of DAS transport + * drivers. Provided out of convenience. + */ + int (*transport_complete)(struct se_task *task); + struct se_task *(*alloc_task)(struct se_cmd *); + /* + * do_task(): + */ + int (*do_task)(struct se_task *); + /* + * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate + * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard + */ + int (*do_discard)(struct se_device *, sector_t, u32); + /* + * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate + * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush() + */ + void (*do_sync_cache)(struct se_task *); + /* + * free_task(): + */ + void (*free_task)(struct se_task *); + /* + * check_configfs_dev_params(): + */ + ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *); + /* + * set_configfs_dev_params(): + */ + ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, + const char *, ssize_t); + /* + * show_configfs_dev_params(): + */ + ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, + char *); + /* + * get_cdb(): + */ + unsigned char *(*get_cdb)(struct se_task *); + /* + * get_device_rev(): + */ + u32 (*get_device_rev)(struct se_device *); + /* + * get_device_type(): + */ + u32 (*get_device_type)(struct se_device *); + /* + * Get the sector_t from a subsystem backstore.. + */ + sector_t (*get_blocks)(struct se_device *); + /* + * do_se_mem_map(): + */ + int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, + struct se_mem *, struct se_mem **, u32 *, u32 *); + /* + * get_sense_buffer(): + */ + unsigned char *(*get_sense_buffer)(struct se_task *); +} ____cacheline_aligned; + +#define TRANSPORT(dev) ((dev)->transport) +#define HBA_TRANSPORT(hba) ((hba)->transport) + +extern struct se_global *se_global; + +#endif /* TARGET_CORE_TRANSPORT_H */ diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h new file mode 100644 index 000000000000..186e84db4b54 --- /dev/null +++ b/include/trace/events/asoc.h @@ -0,0 +1,235 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM asoc + +#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ASOC_H + +#include +#include + +struct snd_soc_jack; +struct snd_soc_codec; +struct snd_soc_card; +struct snd_soc_dapm_widget; + +/* + * Log register events + */ +DECLARE_EVENT_CLASS(snd_soc_reg, + + TP_PROTO(struct snd_soc_codec *codec, unsigned int reg, + unsigned int val), + + TP_ARGS(codec, reg, val), + + TP_STRUCT__entry( + __string( name, codec->name ) + __field( int, id ) + __field( unsigned int, reg ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __assign_str(name, codec->name); + __entry->id = codec->id; + __entry->reg = reg; + __entry->val = val; + ), + + TP_printk("codec=%s.%d reg=%x val=%x", __get_str(name), + (int)__entry->id, (unsigned int)__entry->reg, + (unsigned int)__entry->val) +); + +DEFINE_EVENT(snd_soc_reg, snd_soc_reg_write, + + TP_PROTO(struct snd_soc_codec *codec, unsigned int reg, + unsigned int val), + + TP_ARGS(codec, reg, val) + +); + +DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read, + + TP_PROTO(struct snd_soc_codec *codec, unsigned int reg, + unsigned int val), + + TP_ARGS(codec, reg, val) + +); + +DECLARE_EVENT_CLASS(snd_soc_card, + + TP_PROTO(struct snd_soc_card *card, int val), + + TP_ARGS(card, val), + + TP_STRUCT__entry( + __string( name, card->name ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, card->name); + __entry->val = val; + ), + + TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val) +); + +DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start, + + TP_PROTO(struct snd_soc_card *card, int val), + + TP_ARGS(card, val) + +); + +DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done, + + TP_PROTO(struct snd_soc_card *card, int val), + + TP_ARGS(card, val) + +); + +DECLARE_EVENT_CLASS(snd_soc_dapm_basic, + + TP_PROTO(struct snd_soc_card *card), + + TP_ARGS(card), + + TP_STRUCT__entry( + __string( name, card->name ) + ), + + TP_fast_assign( + __assign_str(name, card->name); + ), + + TP_printk("card=%s", __get_str(name)) +); + +DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start, + + TP_PROTO(struct snd_soc_card *card), + + TP_ARGS(card) + +); + +DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done, + + TP_PROTO(struct snd_soc_card *card), + + TP_ARGS(card) + +); + +DECLARE_EVENT_CLASS(snd_soc_dapm_widget, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val), + + TP_STRUCT__entry( + __string( name, w->name ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, w->name); + __entry->val = val; + ), + + TP_printk("widget=%s val=%d", __get_str(name), + (int)__entry->val) +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val) + +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val) + +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val) + +); + +TRACE_EVENT(snd_soc_jack_irq, + + TP_PROTO(const char *name), + + TP_ARGS(name), + + TP_STRUCT__entry( + __string( name, name ) + ), + + TP_fast_assign( + __assign_str(name, name); + ), + + TP_printk("%s", __get_str(name)) +); + +TRACE_EVENT(snd_soc_jack_report, + + TP_PROTO(struct snd_soc_jack *jack, int mask, int val), + + TP_ARGS(jack, mask, val), + + TP_STRUCT__entry( + __string( name, jack->jack->name ) + __field( int, mask ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, jack->jack->name); + __entry->mask = mask; + __entry->val = val; + ), + + TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val, + (int)__entry->mask) +); + +TRACE_EVENT(snd_soc_jack_notify, + + TP_PROTO(struct snd_soc_jack *jack, int val), + + TP_ARGS(jack, val), + + TP_STRUCT__entry( + __string( name, jack->jack->name ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, jack->jack->name); + __entry->val = val; + ), + + TP_printk("jack=%s %x", __get_str(name), (int)__entry->val) +); + +#endif /* _TRACE_ASOC_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/block.h b/include/trace/events/block.h index d8ce278515c3..aba421d68f6f 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -206,15 +206,16 @@ TRACE_EVENT(block_bio_bounce, * block_bio_complete - completed all work on the block operation * @q: queue holding the block operation * @bio: block operation completed + * @error: io error value * * This tracepoint indicates there is no further work to do on this * block IO operation @bio. */ TRACE_EVENT(block_bio_complete, - TP_PROTO(struct request_queue *q, struct bio *bio), + TP_PROTO(struct request_queue *q, struct bio *bio, int error), - TP_ARGS(q, bio), + TP_ARGS(q, bio, error), TP_STRUCT__entry( __field( dev_t, dev ) @@ -228,6 +229,7 @@ TRACE_EVENT(block_bio_complete, __entry->dev = bio->bi_bdev->bd_dev; __entry->sector = bio->bi_sector; __entry->nr_sector = bio->bi_size >> 9; + __entry->error = error; blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); ), @@ -486,16 +488,16 @@ TRACE_EVENT(block_split, ); /** - * block_remap - map request for a partition to the raw device + * block_bio_remap - map request for a logical device to the raw device * @q: queue holding the operation * @bio: revised operation * @dev: device for the operation * @from: original sector for the operation * - * An operation for a partition on a block device has been mapped to the + * An operation for a logical device has been mapped to the * raw block device. */ -TRACE_EVENT(block_remap, +TRACE_EVENT(block_bio_remap, TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, sector_t from), diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h new file mode 100644 index 000000000000..388bcdd26d46 --- /dev/null +++ b/include/trace/events/compaction.h @@ -0,0 +1,74 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM compaction + +#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COMPACTION_H + +#include +#include +#include "gfpflags.h" + +DECLARE_EVENT_CLASS(mm_compaction_isolate_template, + + TP_PROTO(unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(nr_scanned, nr_taken), + + TP_STRUCT__entry( + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_taken) + ), + + TP_fast_assign( + __entry->nr_scanned = nr_scanned; + __entry->nr_taken = nr_taken; + ), + + TP_printk("nr_scanned=%lu nr_taken=%lu", + __entry->nr_scanned, + __entry->nr_taken) +); + +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages, + + TP_PROTO(unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(nr_scanned, nr_taken) +); + +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages, + TP_PROTO(unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(nr_scanned, nr_taken) +); + +TRACE_EVENT(mm_compaction_migratepages, + + TP_PROTO(unsigned long nr_migrated, + unsigned long nr_failed), + + TP_ARGS(nr_migrated, nr_failed), + + TP_STRUCT__entry( + __field(unsigned long, nr_migrated) + __field(unsigned long, nr_failed) + ), + + TP_fast_assign( + __entry->nr_migrated = nr_migrated; + __entry->nr_failed = nr_failed; + ), + + TP_printk("nr_migrated=%lu nr_failed=%lu", + __entry->nr_migrated, + __entry->nr_failed) +); + + +#endif /* _TRACE_COMPACTION_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 6dd3a51ab1cb..46e3cd8e197a 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -6,6 +6,36 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm +#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x } + +#define kvm_trace_exit_reason \ + ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \ + ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \ + ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ + ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ + ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI) + +TRACE_EVENT(kvm_userspace_exit, + TP_PROTO(__u32 reason, int errno), + TP_ARGS(reason, errno), + + TP_STRUCT__entry( + __field( __u32, reason ) + __field( int, errno ) + ), + + TP_fast_assign( + __entry->reason = reason; + __entry->errno = errno; + ), + + TP_printk("reason %s (%d)", + __entry->errno < 0 ? + (__entry->errno == -EINTR ? "restart" : "error") : + __print_symbolic(__entry->reason, kvm_trace_exit_reason), + __entry->errno < 0 ? -__entry->errno : __entry->reason) +); + #if defined(__KVM_HAVE_IOAPIC) TRACE_EVENT(kvm_set_irq, TP_PROTO(unsigned int gsi, int level, int irq_source_id), @@ -185,6 +215,97 @@ TRACE_EVENT(kvm_age_page, __entry->referenced ? "YOUNG" : "OLD") ); +#ifdef CONFIG_KVM_ASYNC_PF +DECLARE_EVENT_CLASS(kvm_async_get_page_class, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn), + + TP_STRUCT__entry( + __field(__u64, gva) + __field(u64, gfn) + ), + + TP_fast_assign( + __entry->gva = gva; + __entry->gfn = gfn; + ), + + TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) +); + +DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn) +); + +DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn) +); + +DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready, + + TP_PROTO(u64 token, u64 gva), + + TP_ARGS(token, gva), + + TP_STRUCT__entry( + __field(__u64, token) + __field(__u64, gva) + ), + + TP_fast_assign( + __entry->token = token; + __entry->gva = gva; + ), + + TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva) + +); + +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present, + + TP_PROTO(u64 token, u64 gva), + + TP_ARGS(token, gva) +); + +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready, + + TP_PROTO(u64 token, u64 gva), + + TP_ARGS(token, gva) +); + +TRACE_EVENT( + kvm_async_pf_completed, + TP_PROTO(unsigned long address, struct page *page, u64 gva), + TP_ARGS(address, page, gva), + + TP_STRUCT__entry( + __field(unsigned long, address) + __field(pfn_t, pfn) + __field(u64, gva) + ), + + TP_fast_assign( + __entry->address = address; + __entry->pfn = page ? page_to_pfn(page) : 0; + __entry->gva = gva; + ), + + TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva, + __entry->address, __entry->pfn) +); + +#endif + #endif /* _TRACE_KVM_MAIN_H */ /* This part must be outside protection */ diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h new file mode 100644 index 000000000000..37502a7404b7 --- /dev/null +++ b/include/trace/events/regulator.h @@ -0,0 +1,141 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM regulator + +#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_REGULATOR_H + +#include +#include + +/* + * Events which just log themselves and the regulator name for enable/disable + * type tracking. + */ +DECLARE_EVENT_CLASS(regulator_basic, + + TP_PROTO(const char *name), + + TP_ARGS(name), + + TP_STRUCT__entry( + __string( name, name ) + ), + + TP_fast_assign( + __assign_str(name, name); + ), + + TP_printk("name=%s", __get_str(name)) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable_delay, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable_complete, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_disable, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_disable_complete, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +/* + * Events that take a range of numerical values, mostly for voltages + * and so on. + */ +DECLARE_EVENT_CLASS(regulator_range, + + TP_PROTO(const char *name, int min, int max), + + TP_ARGS(name, min, max), + + TP_STRUCT__entry( + __string( name, name ) + __field( int, min ) + __field( int, max ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->min = min; + __entry->max = max; + ), + + TP_printk("name=%s (%d-%d)", __get_str(name), + (int)__entry->min, (int)__entry->max) +); + +DEFINE_EVENT(regulator_range, regulator_set_voltage, + + TP_PROTO(const char *name, int min, int max), + + TP_ARGS(name, min, max) + +); + + +/* + * Events that take a single value, mostly for readback and refcounts. + */ +DECLARE_EVENT_CLASS(regulator_value, + + TP_PROTO(const char *name, unsigned int val), + + TP_ARGS(name, val), + + TP_STRUCT__entry( + __string( name, name ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->val = val; + ), + + TP_printk("name=%s, val=%u", __get_str(name), + (int)__entry->val) +); + +DEFINE_EVENT(regulator_value, regulator_set_voltage_complete, + + TP_PROTO(const char *name, unsigned int value), + + TP_ARGS(name, value) + +); + +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index c255fcc587bf..ea422aaa23e1 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -25,13 +25,13 @@ #define trace_reclaim_flags(page, sync) ( \ (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ - (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ ) #define trace_shrink_flags(file, sync) ( \ - (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ + (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ - (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ ) TRACE_EVENT(mm_vmscan_kswapd_sleep, diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 89a2b2db4375..4e249b927eaa 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -81,6 +81,7 @@ DEFINE_EVENT(writeback_class, name, \ TP_ARGS(bdi)) DEFINE_WRITEBACK_EVENT(writeback_nowork); +DEFINE_WRITEBACK_EVENT(writeback_wake_background); DEFINE_WRITEBACK_EVENT(writeback_wake_thread); DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread); DEFINE_WRITEBACK_EVENT(writeback_bdi_register); diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h new file mode 100644 index 000000000000..eb23f4188f5a --- /dev/null +++ b/include/xen/gntdev.h @@ -0,0 +1,119 @@ +/****************************************************************************** + * gntdev.h + * + * Interface to /dev/xen/gntdev. + * + * Copyright (c) 2007, D G Murray + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __LINUX_PUBLIC_GNTDEV_H__ +#define __LINUX_PUBLIC_GNTDEV_H__ + +struct ioctl_gntdev_grant_ref { + /* The domain ID of the grant to be mapped. */ + uint32_t domid; + /* The grant reference of the grant to be mapped. */ + uint32_t ref; +}; + +/* + * Inserts the grant references into the mapping table of an instance + * of gntdev. N.B. This does not perform the mapping, which is deferred + * until mmap() is called with @index as the offset. + */ +#define IOCTL_GNTDEV_MAP_GRANT_REF \ +_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) +struct ioctl_gntdev_map_grant_ref { + /* IN parameters */ + /* The number of grants to be mapped. */ + uint32_t count; + uint32_t pad; + /* OUT parameters */ + /* The offset to be used on a subsequent call to mmap(). */ + uint64_t index; + /* Variable IN parameter. */ + /* Array of grant references, of size @count. */ + struct ioctl_gntdev_grant_ref refs[1]; +}; + +/* + * Removes the grant references from the mapping table of an instance of + * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) + * before this ioctl is called, or an error will result. + */ +#define IOCTL_GNTDEV_UNMAP_GRANT_REF \ +_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) +struct ioctl_gntdev_unmap_grant_ref { + /* IN parameters */ + /* The offset was returned by the corresponding map operation. */ + uint64_t index; + /* The number of pages to be unmapped. */ + uint32_t count; + uint32_t pad; +}; + +/* + * Returns the offset in the driver's address space that corresponds + * to @vaddr. This can be used to perform a munmap(), followed by an + * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by + * the caller. The number of pages that were allocated at the same time as + * @vaddr is returned in @count. + * + * N.B. Where more than one page has been mapped into a contiguous range, the + * supplied @vaddr must correspond to the start of the range; otherwise + * an error will result. It is only possible to munmap() the entire + * contiguously-allocated range at once, and not any subrange thereof. + */ +#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ +_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) +struct ioctl_gntdev_get_offset_for_vaddr { + /* IN parameters */ + /* The virtual address of the first mapped page in a range. */ + uint64_t vaddr; + /* OUT parameters */ + /* The offset that was used in the initial mmap() operation. */ + uint64_t offset; + /* The number of pages mapped in the VM area that begins at @vaddr. */ + uint32_t count; + uint32_t pad; +}; + +/* + * Sets the maximum number of grants that may mapped at once by this gntdev + * instance. + * + * N.B. This must be called before any other ioctl is performed on the device. + */ +#define IOCTL_GNTDEV_SET_MAX_GRANTS \ +_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) +struct ioctl_gntdev_set_max_grants { + /* IN parameter */ + /* The maximum number of grants that may be mapped at once. */ + uint32_t count; +}; + +#endif /* __LINUX_PUBLIC_GNTDEV_H__ */ diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 9a731706a016..b1fab6b5b3ef 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -37,10 +37,16 @@ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ -#include +#include + +#include #include + +#include #include +#include + /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 @@ -107,6 +113,37 @@ void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); +static inline void +gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, + uint32_t flags, grant_ref_t ref, domid_t domid) +{ + if (flags & GNTMAP_contains_pte) + map->host_addr = addr; + else if (xen_feature(XENFEAT_auto_translated_physmap)) + map->host_addr = __pa(addr); + else + map->host_addr = addr; + + map->flags = flags; + map->ref = ref; + map->dom = domid; +} + +static inline void +gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr, + uint32_t flags, grant_handle_t handle) +{ + if (flags & GNTMAP_contains_pte) + unmap->host_addr = addr; + else if (xen_feature(XENFEAT_auto_translated_physmap)) + unmap->host_addr = __pa(addr); + else + unmap->host_addr = addr; + + unmap->handle = handle; + unmap->dev_bus_addr = 0; +} + int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, struct grant_entry **__shared); @@ -118,4 +155,9 @@ unsigned int gnttab_max_grant_frames(void); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) +int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, + struct page **pages, unsigned int count); +int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, + struct page **pages, unsigned int count); + #endif /* __ASM_GNTTAB_H__ */ diff --git a/init/Kconfig b/init/Kconfig index 8dfd094e6875..4f6cdbf523eb 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -130,13 +130,16 @@ config HAVE_KERNEL_BZIP2 config HAVE_KERNEL_LZMA bool +config HAVE_KERNEL_XZ + bool + config HAVE_KERNEL_LZO bool choice prompt "Kernel compression mode" default KERNEL_GZIP - depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO + depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO help The linux kernel is a kind of self-extracting executable. Several compression algorithms are available, which differ @@ -181,6 +184,21 @@ config KERNEL_LZMA two. Compression is slowest. The kernel size is about 33% smaller with LZMA in comparison to gzip. +config KERNEL_XZ + bool "XZ" + depends on HAVE_KERNEL_XZ + help + XZ uses the LZMA2 algorithm and instruction set specific + BCJ filters which can improve compression ratio of executable + code. The size of the kernel is about 30% smaller with XZ in + comparison to gzip. On architectures for which there is a BCJ + filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ + will create a few percent smaller kernel than plain LZMA. + + The speed is about the same as with LZMA: The decompression + speed of XZ is better than that of bzip2 but worse than gzip + and LZO. Compression is slow. + config KERNEL_LZO bool "LZO" depends on HAVE_KERNEL_LZO @@ -673,7 +691,7 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED help Memory Resource Controller Swap Extension comes with its price in a bigger memory consumption. General purpose distribution kernels - which want to enable the feautre but keep it disabled by default + which want to enable the feature but keep it disabled by default and let the user enable it by swapaccount boot command line parameter should have this option unselected. For those who want to have the feature enabled by default should diff --git a/kernel/Makefile b/kernel/Makefile index 5669f71dfdd5..353d3fe8ba33 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -43,7 +43,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o -obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o +obj-$(CONFIG_SMP) += smp.o ifneq ($(CONFIG_SMP),y) obj-y += up.o endif diff --git a/kernel/audit.c b/kernel/audit.c index 77770a034d59..e4956244ae50 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -400,7 +400,7 @@ static void kauditd_send_skb(struct sk_buff *skb) if (err < 0) { BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); - audit_log_lost("auditd dissapeared\n"); + audit_log_lost("auditd disappeared\n"); audit_pid = 0; /* we might get lucky and get this in the next auditd */ audit_hold_skb(skb); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 51cddc11cd85..b24d7027b83c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -763,9 +763,8 @@ EXPORT_SYMBOL_GPL(cgroup_unlock); * -> cgroup_mkdir. */ -static struct dentry *cgroup_lookup(struct inode *dir, - struct dentry *dentry, struct nameidata *nd); static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); +static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *); static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); static int cgroup_populate_dir(struct cgroup *cgrp); static const struct inode_operations cgroup_dir_inode_operations; @@ -862,6 +861,11 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) iput(inode); } +static int cgroup_delete(const struct dentry *d) +{ + return 1; +} + static void remove_dir(struct dentry *d) { struct dentry *parent = dget(d->d_parent); @@ -912,7 +916,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry) parent = dentry->d_parent; spin_lock(&parent->d_lock); - spin_lock(&dentry->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); list_del_init(&dentry->d_u.d_child); spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); @@ -1451,6 +1455,11 @@ static int cgroup_set_super(struct super_block *sb, void *data) static int cgroup_get_rootdir(struct super_block *sb) { + static const struct dentry_operations cgroup_dops = { + .d_iput = cgroup_diput, + .d_delete = cgroup_delete, + }; + struct inode *inode = cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); struct dentry *dentry; @@ -1468,6 +1477,8 @@ static int cgroup_get_rootdir(struct super_block *sb) return -ENOMEM; } sb->s_root = dentry; + /* for everything else we want ->d_op set */ + sb->s_d_op = &cgroup_dops; return 0; } @@ -2197,6 +2208,14 @@ static const struct inode_operations cgroup_dir_inode_operations = { .rename = cgroup_rename, }; +static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) +{ + if (dentry->d_name.len > NAME_MAX) + return ERR_PTR(-ENAMETOOLONG); + d_add(dentry, NULL); + return NULL; +} + /* * Check if a file is a control file */ @@ -2207,26 +2226,6 @@ static inline struct cftype *__file_cft(struct file *file) return __d_cft(file->f_dentry); } -static int cgroup_delete_dentry(const struct dentry *dentry) -{ - return 1; -} - -static struct dentry *cgroup_lookup(struct inode *dir, - struct dentry *dentry, struct nameidata *nd) -{ - static const struct dentry_operations cgroup_dentry_operations = { - .d_delete = cgroup_delete_dentry, - .d_iput = cgroup_diput, - }; - - if (dentry->d_name.len > NAME_MAX) - return ERR_PTR(-ENAMETOOLONG); - d_set_d_op(dentry, &cgroup_dentry_operations); - d_add(dentry, NULL); - return NULL; -} - static int cgroup_create_file(struct dentry *dentry, mode_t mode, struct super_block *sb) { diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index a6e729766821..bd3e8e29caa3 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2914,7 +2914,7 @@ static void __init kdb_cmd_init(void) } } -/* Intialize kdb_printf, breakpoint tables and kdb state */ +/* Initialize kdb_printf, breakpoint tables and kdb state */ void __init kdb_init(int lvl) { static int kdb_init_lvl = KDB_NOT_INITIALIZED; diff --git a/kernel/fork.c b/kernel/fork.c index d9b44f20b6b0..25e429152ddc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include @@ -328,6 +329,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) rb_parent = NULL; pprev = &mm->mmap; retval = ksm_fork(mm, oldmm); + if (retval) + goto out; + retval = khugepaged_fork(mm, oldmm); if (retval) goto out; @@ -529,6 +533,9 @@ void __mmdrop(struct mm_struct *mm) mm_free_pgd(mm); destroy_context(mm); mmu_notifier_mm_destroy(mm); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + VM_BUG_ON(mm->pmd_huge_pte); +#endif free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); @@ -543,6 +550,7 @@ void mmput(struct mm_struct *mm) if (atomic_dec_and_test(&mm->mm_users)) { exit_aio(mm); ksm_exit(mm); + khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { @@ -669,6 +677,10 @@ struct mm_struct *dup_mm(struct task_struct *tsk) mm->token_priority = 0; mm->last_interval = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + mm->pmd_huge_pte = NULL; +#endif + if (!mm_init(mm, tsk)) goto fail_nomem; @@ -910,6 +922,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->oom_adj = current->signal->oom_adj; sig->oom_score_adj = current->signal->oom_score_adj; + sig->oom_score_adj_min = current->signal->oom_score_adj_min; mutex_init(&sig->cred_guard_mutex); @@ -1409,23 +1422,6 @@ long do_fork(unsigned long clone_flags, return -EPERM; } - /* - * We hope to recycle these flags after 2.6.26 - */ - if (unlikely(clone_flags & CLONE_STOPPED)) { - static int __read_mostly count = 100; - - if (count > 0 && printk_ratelimit()) { - char comm[TASK_COMM_LEN]; - - count--; - printk(KERN_INFO "fork(): process `%s' used deprecated " - "clone flags 0x%lx\n", - get_task_comm(comm, current), - clone_flags & CLONE_STOPPED); - } - } - /* * When called from kernel_thread, don't do user tracing stuff. */ @@ -1464,16 +1460,7 @@ long do_fork(unsigned long clone_flags, */ p->flags &= ~PF_STARTING; - if (unlikely(clone_flags & CLONE_STOPPED)) { - /* - * We'll start up with an immediate SIGSTOP. - */ - sigaddset(&p->pending.signal, SIGSTOP); - set_tsk_thread_flag(p, TIF_SIGPENDING); - __set_task_state(p, TASK_STOPPED); - } else { - wake_up_new_task(p, clone_flags); - } + wake_up_new_task(p, clone_flags); tracehook_report_clone_complete(trace, regs, clone_flags, nr, p); diff --git a/kernel/futex.c b/kernel/futex.c index 3019b92e6917..52075633373f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -233,7 +233,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; - struct page *page; + struct page *page, *page_head; int err; /* @@ -265,11 +265,46 @@ again: if (err < 0) return err; - page = compound_head(page); - lock_page(page); - if (!page->mapping) { - unlock_page(page); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + page_head = page; + if (unlikely(PageTail(page))) { put_page(page); + /* serialize against __split_huge_page_splitting() */ + local_irq_disable(); + if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { + page_head = compound_head(page); + /* + * page_head is valid pointer but we must pin + * it before taking the PG_lock and/or + * PG_compound_lock. The moment we re-enable + * irqs __split_huge_page_splitting() can + * return and the head page can be freed from + * under us. We can't take the PG_lock and/or + * PG_compound_lock on a page that could be + * freed from under us. + */ + if (page != page_head) { + get_page(page_head); + put_page(page); + } + local_irq_enable(); + } else { + local_irq_enable(); + goto again; + } + } +#else + page_head = compound_head(page); + if (page != page_head) { + get_page(page_head); + put_page(page); + } +#endif + + lock_page(page_head); + if (!page_head->mapping) { + unlock_page(page_head); + put_page(page_head); goto again; } @@ -280,20 +315,20 @@ again: * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ - if (PageAnon(page)) { + if (PageAnon(page_head)) { key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; } else { key->both.offset |= FUT_OFF_INODE; /* inode-based key */ - key->shared.inode = page->mapping->host; - key->shared.pgoff = page->index; + key->shared.inode = page_head->mapping->host; + key->shared.pgoff = page_head->index; } get_futex_key_refs(key); - unlock_page(page); - put_page(page); + unlock_page(page_head); + put_page(page_head); return 0; } diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 45da2b6920ab..0c8d7c048615 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1745,7 +1745,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, } /* - * A NULL parameter means "inifinte" + * A NULL parameter means "infinite" */ if (!expires) { schedule(); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 9988d03797f5..282f20230e67 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -72,6 +72,8 @@ static inline int desc_node(struct irq_desc *desc) { return 0; } static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) { + int cpu; + desc->irq_data.irq = irq; desc->irq_data.chip = &no_irq_chip; desc->irq_data.chip_data = NULL; @@ -83,7 +85,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) desc->irq_count = 0; desc->irqs_unhandled = 0; desc->name = NULL; - memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); + for_each_possible_cpu(cpu) + *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; desc_smp_init(desc, node); } @@ -133,8 +136,7 @@ static struct irq_desc *alloc_desc(int irq, int node) if (!desc) return NULL; /* allocate based on nr_cpu_ids */ - desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), - gfp, node); + desc->kstat_irqs = alloc_percpu(unsigned int); if (!desc->kstat_irqs) goto err_desc; @@ -149,7 +151,7 @@ static struct irq_desc *alloc_desc(int irq, int node) return desc; err_kstat: - kfree(desc->kstat_irqs); + free_percpu(desc->kstat_irqs); err_desc: kfree(desc); return NULL; @@ -166,7 +168,7 @@ static void free_desc(unsigned int irq) mutex_unlock(&sparse_irq_lock); free_masks(desc); - kfree(desc->kstat_irqs); + free_percpu(desc->kstat_irqs); kfree(desc); } @@ -234,7 +236,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { } }; -static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; int __init early_irq_init(void) { int count, i, node = first_online_node; @@ -250,7 +251,8 @@ int __init early_irq_init(void) for (i = 0; i < count; i++) { desc[i].irq_data.irq = i; desc[i].irq_data.chip = &no_irq_chip; - desc[i].kstat_irqs = kstat_irqs_all[i]; + /* TODO : do this allocation on-demand ... */ + desc[i].kstat_irqs = alloc_percpu(unsigned int); alloc_masks(desc + i, GFP_KERNEL, node); desc_smp_init(desc + i, node); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); @@ -275,6 +277,22 @@ static void free_desc(unsigned int irq) static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) { +#if defined(CONFIG_KSTAT_IRQS_ONDEMAND) + struct irq_desc *desc; + unsigned int i; + + for (i = 0; i < cnt; i++) { + desc = irq_to_desc(start + i); + if (desc && !desc->kstat_irqs) { + unsigned int __percpu *stats = alloc_percpu(unsigned int); + + if (!stats) + return -1; + if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL) + free_percpu(stats); + } + } +#endif return start; } #endif /* !CONFIG_SPARSE_IRQ */ @@ -391,7 +409,9 @@ void dynamic_irq_cleanup(unsigned int irq) unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { struct irq_desc *desc = irq_to_desc(irq); - return desc ? desc->kstat_irqs[cpu] : 0; + + return desc && desc->kstat_irqs ? + *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; } #ifdef CONFIG_GENERIC_HARDIRQS @@ -401,10 +421,10 @@ unsigned int kstat_irqs(unsigned int irq) int cpu; int sum = 0; - if (!desc) + if (!desc || !desc->kstat_irqs) return 0; for_each_possible_cpu(cpu) - sum += desc->kstat_irqs[cpu]; + sum += *per_cpu_ptr(desc->kstat_irqs, cpu); return sum; } #endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/kernel/kexec.c b/kernel/kexec.c index b55045bc7563..ec19b92c7ebd 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -163,7 +163,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, * just verifies it is an address we can use. * * Since the kernel does everything in page size chunks ensure - * the destination addreses are page aligned. Too many + * the destination addresses are page aligned. Too many * special cases crop of when we don't do this. The most * insidious is getting overlapping destination addresses * simply because addresses are changed to page size diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 17110a4a4fc2..ee74b35e528d 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -241,24 +241,19 @@ static int lstats_show(struct seq_file *m, void *v) seq_puts(m, "Latency Top version : v0.1\n"); for (i = 0; i < MAXLR; i++) { - if (latency_record[i].backtrace[0]) { + struct latency_record *lr = &latency_record[i]; + + if (lr->backtrace[0]) { int q; - seq_printf(m, "%i %lu %lu ", - latency_record[i].count, - latency_record[i].time, - latency_record[i].max); + seq_printf(m, "%i %lu %lu", + lr->count, lr->time, lr->max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_SYMBOL_LEN]; - char *c; - if (!latency_record[i].backtrace[q]) + unsigned long bt = lr->backtrace[q]; + if (!bt) break; - if (latency_record[i].backtrace[q] == ULONG_MAX) + if (bt == ULONG_MAX) break; - sprint_symbol(sym, latency_record[i].backtrace[q]); - c = strchr(sym, '+'); - if (c) - *c = 0; - seq_printf(m, "%s ", sym); + seq_printf(m, " %ps", (void *)bt); } seq_printf(m, "\n"); } diff --git a/kernel/panic.c b/kernel/panic.c index 4c13b1a88ebb..991bb87a1704 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -34,6 +34,7 @@ static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); int panic_timeout; +EXPORT_SYMBOL_GPL(panic_timeout); ATOMIC_NOTIFIER_HEAD(panic_notifier_list); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b782b7a79f00..05ebe841270b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4664,7 +4664,7 @@ int perf_swevent_get_recursion_context(void) } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); -void inline perf_swevent_put_recursion_context(int rctx) +inline void perf_swevent_put_recursion_context(int rctx) { struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index a5aff3ebad38..265729966ece 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -100,13 +100,9 @@ config PM_SLEEP_ADVANCED_DEBUG depends on PM_ADVANCED_DEBUG default n -config SUSPEND_NVS - bool - config SUSPEND bool "Suspend to RAM and standby" depends on PM && ARCH_SUSPEND_POSSIBLE - select SUSPEND_NVS if HAS_IOMEM default y ---help--- Allow the system to enter sleep states in which main memory is @@ -140,7 +136,6 @@ config HIBERNATION depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE select LZO_COMPRESS select LZO_DECOMPRESS - select SUSPEND_NVS if HAS_IOMEM ---help--- Enable the suspend to disk (STD) functionality, which is usually called "hibernation" in user interfaces. STD checkpoints the diff --git a/kernel/power/Makefile b/kernel/power/Makefile index b75597235d85..c350e18b53e3 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -7,6 +7,5 @@ obj-$(CONFIG_SUSPEND) += suspend.o obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ block_io.o -obj-$(CONFIG_SUSPEND_NVS) += nvs.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 870f72bc72ae..1832bd264219 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -51,14 +51,14 @@ enum { static int hibernation_mode = HIBERNATION_SHUTDOWN; -static struct platform_hibernation_ops *hibernation_ops; +static const struct platform_hibernation_ops *hibernation_ops; /** * hibernation_set_ops - set the global hibernate operations * @ops: the hibernation operations to use in subsequent hibernation transitions */ -void hibernation_set_ops(struct platform_hibernation_ops *ops) +void hibernation_set_ops(const struct platform_hibernation_ops *ops) { if (ops && !(ops->begin && ops->end && ops->pre_snapshot && ops->prepare && ops->finish && ops->enter && ops->pre_restore diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8850df68794d..de6f86bfa303 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -31,13 +31,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = { [PM_SUSPEND_MEM] = "mem", }; -static struct platform_suspend_ops *suspend_ops; +static const struct platform_suspend_ops *suspend_ops; /** * suspend_set_ops - Set the global suspend method table. * @ops: Pointer to ops structure. */ -void suspend_set_ops(struct platform_suspend_ops *ops) +void suspend_set_ops(const struct platform_suspend_ops *ops) { mutex_lock(&pm_mutex); suspend_ops = ops; diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8c7e4832b9be..7c97c3a0eee3 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -224,7 +224,7 @@ static int swsusp_swap_check(void) return res; root_swap = res; - res = blkdev_get(hib_resume_bdev, FMODE_WRITE); + res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL); if (res) return res; @@ -888,7 +888,7 @@ out_finish: /** * swsusp_read - read the hibernation image. * @flags_p: flags passed by the "frozen" kernel in the image header should - * be written into this memeory location + * be written into this memory location */ int swsusp_read(unsigned int *flags_p) @@ -930,7 +930,8 @@ int swsusp_check(void) { int error; - hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); + hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, + FMODE_READ, NULL); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); clear_page(swsusp_header); diff --git a/kernel/printk.c b/kernel/printk.c index f64b8997fc76..53d9a9ec88e6 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -39,6 +39,7 @@ #include #include #include +#include #include @@ -1502,7 +1503,7 @@ int kmsg_dump_register(struct kmsg_dumper *dumper) /* Don't allow registering multiple times */ if (!dumper->registered) { dumper->registered = 1; - list_add_tail(&dumper->list, &dump_list); + list_add_tail_rcu(&dumper->list, &dump_list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); @@ -1526,29 +1527,16 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper) spin_lock_irqsave(&dump_list_lock, flags); if (dumper->registered) { dumper->registered = 0; - list_del(&dumper->list); + list_del_rcu(&dumper->list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); + synchronize_rcu(); return err; } EXPORT_SYMBOL_GPL(kmsg_dump_unregister); -static const char * const kmsg_reasons[] = { - [KMSG_DUMP_OOPS] = "oops", - [KMSG_DUMP_PANIC] = "panic", - [KMSG_DUMP_KEXEC] = "kexec", -}; - -static const char *kmsg_to_str(enum kmsg_dump_reason reason) -{ - if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0) - return "unknown"; - - return kmsg_reasons[reason]; -} - /** * kmsg_dump - dump kernel log to kernel message dumpers. * @reason: the reason (oops, panic etc) for dumping @@ -1587,13 +1575,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) l2 = chars; } - if (!spin_trylock_irqsave(&dump_list_lock, flags)) { - printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n", - kmsg_to_str(reason)); - return; - } - list_for_each_entry(dumper, &dump_list, list) + rcu_read_lock(); + list_for_each_entry_rcu(dumper, &dump_list, list) dumper->dump(dumper, reason, s1, l1, s2, l2); - spin_unlock_irqrestore(&dump_list_lock, flags); + rcu_read_unlock(); } #endif diff --git a/kernel/sched.c b/kernel/sched.c index a0eb0941fa84..ea3e5eff3878 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2505,7 +2505,7 @@ out: * try_to_wake_up_local - try to wake up a local task with rq lock held * @p: the thread to be awakened * - * Put @p on the run-queue if it's not alredy there. The caller must + * Put @p on the run-queue if it's not already there. The caller must * ensure that this_rq() is locked, @p is bound to this_rq() and not * the current task. this_rq() stays locked over invocation. */ diff --git a/kernel/smp.c b/kernel/smp.c index 12ed8b013e2d..4ec30e069987 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -13,6 +13,7 @@ #include #include +#ifdef CONFIG_USE_GENERIC_SMP_HELPERS static struct { struct list_head queue; raw_spinlock_t lock; @@ -529,3 +530,21 @@ void ipi_call_unlock_irq(void) { raw_spin_unlock_irq(&call_function.lock); } +#endif /* USE_GENERIC_SMP_HELPERS */ + +/* + * Call a function on all processors + */ +int on_each_cpu(void (*func) (void *info), void *info, int wait) +{ + int ret = 0; + + preempt_disable(); + ret = smp_call_function(func, info, wait); + local_irq_disable(); + func(info); + local_irq_enable(); + preempt_enable(); + return ret; +} +EXPORT_SYMBOL(on_each_cpu); diff --git a/kernel/softirq.c b/kernel/softirq.c index 0823778f87fc..68eb5efec388 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -885,25 +885,6 @@ static __init int spawn_ksoftirqd(void) } early_initcall(spawn_ksoftirqd); -#ifdef CONFIG_SMP -/* - * Call a function on all processors - */ -int on_each_cpu(void (*func) (void *info), void *info, int wait) -{ - int ret = 0; - - preempt_disable(); - ret = smp_call_function(func, info, wait); - local_irq_disable(); - func(info); - local_irq_enable(); - preempt_enable(); - return ret; -} -EXPORT_SYMBOL(on_each_cpu); -#endif - /* * [ These __weak aliases are kept in a separate compilation unit, so that * GCC does not inline them incorrectly. ] diff --git a/kernel/sys.c b/kernel/sys.c index 2745dcdb6c6c..31b71a276b40 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -43,6 +43,8 @@ #include #include +#include + #include #include #include @@ -285,6 +287,7 @@ out_unlock: */ void emergency_restart(void) { + kmsg_dump(KMSG_DUMP_EMERG); machine_emergency_restart(); } EXPORT_SYMBOL_GPL(emergency_restart); @@ -312,6 +315,7 @@ void kernel_restart(char *cmd) printk(KERN_EMERG "Restarting system.\n"); else printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); + kmsg_dump(KMSG_DUMP_RESTART); machine_restart(cmd); } EXPORT_SYMBOL_GPL(kernel_restart); @@ -333,6 +337,7 @@ void kernel_halt(void) kernel_shutdown_prepare(SYSTEM_HALT); sysdev_shutdown(); printk(KERN_EMERG "System halted.\n"); + kmsg_dump(KMSG_DUMP_HALT); machine_halt(); } @@ -351,6 +356,7 @@ void kernel_power_off(void) disable_nonboot_cpus(); sysdev_shutdown(); printk(KERN_EMERG "Power down.\n"); + kmsg_dump(KMSG_DUMP_POWEROFF); machine_power_off(); } EXPORT_SYMBOL_GPL(kernel_power_off); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ae5cbb1e3ced..bc86bb32e126 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -245,10 +246,6 @@ static struct ctl_table root_table[] = { .mode = 0555, .child = dev_table, }, -/* - * NOTE: do not add new entries to this table unless you have read - * Documentation/sysctl/ctl_unnumbered.txt - */ { } }; @@ -710,6 +707,15 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, + { + .procname = "kptr_restrict", + .data = &kptr_restrict, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &two, + }, #endif { .procname = "ngroups_max", @@ -962,10 +968,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -/* - * NOTE: do not add new entries to this table unless you have read - * Documentation/sysctl/ctl_unnumbered.txt - */ { } }; @@ -1326,11 +1328,6 @@ static struct ctl_table vm_table[] = { .extra2 = &one, }, #endif - -/* - * NOTE: do not add new entries to this table unless you have read - * Documentation/sysctl/ctl_unnumbered.txt - */ { } }; @@ -1486,10 +1483,6 @@ static struct ctl_table fs_table[] = { .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, }, -/* - * NOTE: do not add new entries to this table unless you have read - * Documentation/sysctl/ctl_unnumbered.txt - */ { } }; @@ -2899,7 +2892,7 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, } } -#else /* CONFIG_PROC_FS */ +#else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -2951,7 +2944,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, } -#endif /* CONFIG_PROC_FS */ +#endif /* CONFIG_PROC_SYSCTL */ /* * No sense putting this after each symbol definition, twice, diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 4b2545a136ff..b875bedf7c9a 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1192,7 +1192,7 @@ static ssize_t bin_dn_node_address(struct file *file, buf[result] = '\0'; - /* Convert the decnet addresss to binary */ + /* Convert the decnet address to binary */ result = -EIO; nodep = strchr(buf, '.') + 1; if (!nodep) diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 69691eb4b715..3971c6b9d58d 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -348,7 +348,7 @@ static int parse(struct nlattr *na, struct cpumask *mask) return ret; } -#ifdef CONFIG_IA64 +#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) #define TASKSTATS_NEEDS_PADDING 1 #endif diff --git a/kernel/time.c b/kernel/time.c index ba9b338d1835..32174359576f 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -238,7 +238,7 @@ EXPORT_SYMBOL(current_fs_time); * Avoid unnecessary multiplications/divisions in the * two most common HZ cases: */ -unsigned int inline jiffies_to_msecs(const unsigned long j) +inline unsigned int jiffies_to_msecs(const unsigned long j) { #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (MSEC_PER_SEC / HZ) * j; @@ -254,7 +254,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j) } EXPORT_SYMBOL(jiffies_to_msecs); -unsigned int inline jiffies_to_usecs(const unsigned long j) +inline unsigned int jiffies_to_usecs(const unsigned long j) { #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) return (USEC_PER_SEC / HZ) * j; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index df140cd3ea47..c50a034de30f 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -679,7 +679,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) { - /* Intialize mult/shift and max_idle_ns */ + /* Initialize mult/shift and max_idle_ns */ __clocksource_updatefreq_scale(cs, scale, freq); /* Add clocksource to the clcoksource list */ diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index d2321891538f..5c00242fa921 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -14,6 +14,7 @@ #include #include #include +#include /* * NTP timekeeping variables: @@ -74,6 +75,162 @@ static long time_adjust; /* constant (boot-param configurable) NTP tick adjustment (upscaled) */ static s64 ntp_tick_adj; +#ifdef CONFIG_NTP_PPS + +/* + * The following variables are used when a pulse-per-second (PPS) signal + * is available. They establish the engineering parameters of the clock + * discipline loop when controlled by the PPS signal. + */ +#define PPS_VALID 10 /* PPS signal watchdog max (s) */ +#define PPS_POPCORN 4 /* popcorn spike threshold (shift) */ +#define PPS_INTMIN 2 /* min freq interval (s) (shift) */ +#define PPS_INTMAX 8 /* max freq interval (s) (shift) */ +#define PPS_INTCOUNT 4 /* number of consecutive good intervals to + increase pps_shift or consecutive bad + intervals to decrease it */ +#define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */ + +static int pps_valid; /* signal watchdog counter */ +static long pps_tf[3]; /* phase median filter */ +static long pps_jitter; /* current jitter (ns) */ +static struct timespec pps_fbase; /* beginning of the last freq interval */ +static int pps_shift; /* current interval duration (s) (shift) */ +static int pps_intcnt; /* interval counter */ +static s64 pps_freq; /* frequency offset (scaled ns/s) */ +static long pps_stabil; /* current stability (scaled ns/s) */ + +/* + * PPS signal quality monitors + */ +static long pps_calcnt; /* calibration intervals */ +static long pps_jitcnt; /* jitter limit exceeded */ +static long pps_stbcnt; /* stability limit exceeded */ +static long pps_errcnt; /* calibration errors */ + + +/* PPS kernel consumer compensates the whole phase error immediately. + * Otherwise, reduce the offset by a fixed factor times the time constant. + */ +static inline s64 ntp_offset_chunk(s64 offset) +{ + if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) + return offset; + else + return shift_right(offset, SHIFT_PLL + time_constant); +} + +static inline void pps_reset_freq_interval(void) +{ + /* the PPS calibration interval may end + surprisingly early */ + pps_shift = PPS_INTMIN; + pps_intcnt = 0; +} + +/** + * pps_clear - Clears the PPS state variables + * + * Must be called while holding a write on the xtime_lock + */ +static inline void pps_clear(void) +{ + pps_reset_freq_interval(); + pps_tf[0] = 0; + pps_tf[1] = 0; + pps_tf[2] = 0; + pps_fbase.tv_sec = pps_fbase.tv_nsec = 0; + pps_freq = 0; +} + +/* Decrease pps_valid to indicate that another second has passed since + * the last PPS signal. When it reaches 0, indicate that PPS signal is + * missing. + * + * Must be called while holding a write on the xtime_lock + */ +static inline void pps_dec_valid(void) +{ + if (pps_valid > 0) + pps_valid--; + else { + time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | + STA_PPSWANDER | STA_PPSERROR); + pps_clear(); + } +} + +static inline void pps_set_freq(s64 freq) +{ + pps_freq = freq; +} + +static inline int is_error_status(int status) +{ + return (time_status & (STA_UNSYNC|STA_CLOCKERR)) + /* PPS signal lost when either PPS time or + * PPS frequency synchronization requested + */ + || ((time_status & (STA_PPSFREQ|STA_PPSTIME)) + && !(time_status & STA_PPSSIGNAL)) + /* PPS jitter exceeded when + * PPS time synchronization requested */ + || ((time_status & (STA_PPSTIME|STA_PPSJITTER)) + == (STA_PPSTIME|STA_PPSJITTER)) + /* PPS wander exceeded or calibration error when + * PPS frequency synchronization requested + */ + || ((time_status & STA_PPSFREQ) + && (time_status & (STA_PPSWANDER|STA_PPSERROR))); +} + +static inline void pps_fill_timex(struct timex *txc) +{ + txc->ppsfreq = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) * + PPM_SCALE_INV, NTP_SCALE_SHIFT); + txc->jitter = pps_jitter; + if (!(time_status & STA_NANO)) + txc->jitter /= NSEC_PER_USEC; + txc->shift = pps_shift; + txc->stabil = pps_stabil; + txc->jitcnt = pps_jitcnt; + txc->calcnt = pps_calcnt; + txc->errcnt = pps_errcnt; + txc->stbcnt = pps_stbcnt; +} + +#else /* !CONFIG_NTP_PPS */ + +static inline s64 ntp_offset_chunk(s64 offset) +{ + return shift_right(offset, SHIFT_PLL + time_constant); +} + +static inline void pps_reset_freq_interval(void) {} +static inline void pps_clear(void) {} +static inline void pps_dec_valid(void) {} +static inline void pps_set_freq(s64 freq) {} + +static inline int is_error_status(int status) +{ + return status & (STA_UNSYNC|STA_CLOCKERR); +} + +static inline void pps_fill_timex(struct timex *txc) +{ + /* PPS is not implemented, so these are zero */ + txc->ppsfreq = 0; + txc->jitter = 0; + txc->shift = 0; + txc->stabil = 0; + txc->jitcnt = 0; + txc->calcnt = 0; + txc->errcnt = 0; + txc->stbcnt = 0; +} + +#endif /* CONFIG_NTP_PPS */ + /* * NTP methods: */ @@ -185,6 +342,9 @@ void ntp_clear(void) tick_length = tick_length_base; time_offset = 0; + + /* Clear PPS state variables */ + pps_clear(); } /* @@ -250,16 +410,16 @@ void second_overflow(void) time_status |= STA_UNSYNC; } - /* - * Compute the phase adjustment for the next second. The offset is - * reduced by a fixed factor times the time constant. - */ + /* Compute the phase adjustment for the next second */ tick_length = tick_length_base; - delta = shift_right(time_offset, SHIFT_PLL + time_constant); + delta = ntp_offset_chunk(time_offset); time_offset -= delta; tick_length += delta; + /* Check PPS signal */ + pps_dec_valid(); + if (!time_adjust) return; @@ -369,6 +529,8 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { time_state = TIME_OK; time_status = STA_UNSYNC; + /* restart PPS frequency calibration */ + pps_reset_freq_interval(); } /* @@ -418,6 +580,8 @@ static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts time_freq = txc->freq * PPM_SCALE; time_freq = min(time_freq, MAXFREQ_SCALED); time_freq = max(time_freq, -MAXFREQ_SCALED); + /* update pps_freq */ + pps_set_freq(time_freq); } if (txc->modes & ADJ_MAXERROR) @@ -508,7 +672,8 @@ int do_adjtimex(struct timex *txc) } result = time_state; /* mostly `TIME_OK' */ - if (time_status & (STA_UNSYNC|STA_CLOCKERR)) + /* check for errors */ + if (is_error_status(time_status)) result = TIME_ERROR; txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * @@ -522,15 +687,8 @@ int do_adjtimex(struct timex *txc) txc->tick = tick_usec; txc->tai = time_tai; - /* PPS is not implemented, so these are zero */ - txc->ppsfreq = 0; - txc->jitter = 0; - txc->shift = 0; - txc->stabil = 0; - txc->jitcnt = 0; - txc->calcnt = 0; - txc->errcnt = 0; - txc->stbcnt = 0; + /* fill PPS status fields */ + pps_fill_timex(txc); write_sequnlock_irq(&xtime_lock); @@ -544,6 +702,243 @@ int do_adjtimex(struct timex *txc) return result; } +#ifdef CONFIG_NTP_PPS + +/* actually struct pps_normtime is good old struct timespec, but it is + * semantically different (and it is the reason why it was invented): + * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] + * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ +struct pps_normtime { + __kernel_time_t sec; /* seconds */ + long nsec; /* nanoseconds */ +}; + +/* normalize the timestamp so that nsec is in the + ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ +static inline struct pps_normtime pps_normalize_ts(struct timespec ts) +{ + struct pps_normtime norm = { + .sec = ts.tv_sec, + .nsec = ts.tv_nsec + }; + + if (norm.nsec > (NSEC_PER_SEC >> 1)) { + norm.nsec -= NSEC_PER_SEC; + norm.sec++; + } + + return norm; +} + +/* get current phase correction and jitter */ +static inline long pps_phase_filter_get(long *jitter) +{ + *jitter = pps_tf[0] - pps_tf[1]; + if (*jitter < 0) + *jitter = -*jitter; + + /* TODO: test various filters */ + return pps_tf[0]; +} + +/* add the sample to the phase filter */ +static inline void pps_phase_filter_add(long err) +{ + pps_tf[2] = pps_tf[1]; + pps_tf[1] = pps_tf[0]; + pps_tf[0] = err; +} + +/* decrease frequency calibration interval length. + * It is halved after four consecutive unstable intervals. + */ +static inline void pps_dec_freq_interval(void) +{ + if (--pps_intcnt <= -PPS_INTCOUNT) { + pps_intcnt = -PPS_INTCOUNT; + if (pps_shift > PPS_INTMIN) { + pps_shift--; + pps_intcnt = 0; + } + } +} + +/* increase frequency calibration interval length. + * It is doubled after four consecutive stable intervals. + */ +static inline void pps_inc_freq_interval(void) +{ + if (++pps_intcnt >= PPS_INTCOUNT) { + pps_intcnt = PPS_INTCOUNT; + if (pps_shift < PPS_INTMAX) { + pps_shift++; + pps_intcnt = 0; + } + } +} + +/* update clock frequency based on MONOTONIC_RAW clock PPS signal + * timestamps + * + * At the end of the calibration interval the difference between the + * first and last MONOTONIC_RAW clock timestamps divided by the length + * of the interval becomes the frequency update. If the interval was + * too long, the data are discarded. + * Returns the difference between old and new frequency values. + */ +static long hardpps_update_freq(struct pps_normtime freq_norm) +{ + long delta, delta_mod; + s64 ftemp; + + /* check if the frequency interval was too long */ + if (freq_norm.sec > (2 << pps_shift)) { + time_status |= STA_PPSERROR; + pps_errcnt++; + pps_dec_freq_interval(); + pr_err("hardpps: PPSERROR: interval too long - %ld s\n", + freq_norm.sec); + return 0; + } + + /* here the raw frequency offset and wander (stability) is + * calculated. If the wander is less than the wander threshold + * the interval is increased; otherwise it is decreased. + */ + ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT, + freq_norm.sec); + delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT); + pps_freq = ftemp; + if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { + pr_warning("hardpps: PPSWANDER: change=%ld\n", delta); + time_status |= STA_PPSWANDER; + pps_stbcnt++; + pps_dec_freq_interval(); + } else { /* good sample */ + pps_inc_freq_interval(); + } + + /* the stability metric is calculated as the average of recent + * frequency changes, but is used only for performance + * monitoring + */ + delta_mod = delta; + if (delta_mod < 0) + delta_mod = -delta_mod; + pps_stabil += (div_s64(((s64)delta_mod) << + (NTP_SCALE_SHIFT - SHIFT_USEC), + NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN; + + /* if enabled, the system clock frequency is updated */ + if ((time_status & STA_PPSFREQ) != 0 && + (time_status & STA_FREQHOLD) == 0) { + time_freq = pps_freq; + ntp_update_frequency(); + } + + return delta; +} + +/* correct REALTIME clock phase error against PPS signal */ +static void hardpps_update_phase(long error) +{ + long correction = -error; + long jitter; + + /* add the sample to the median filter */ + pps_phase_filter_add(correction); + correction = pps_phase_filter_get(&jitter); + + /* Nominal jitter is due to PPS signal noise. If it exceeds the + * threshold, the sample is discarded; otherwise, if so enabled, + * the time offset is updated. + */ + if (jitter > (pps_jitter << PPS_POPCORN)) { + pr_warning("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n", + jitter, (pps_jitter << PPS_POPCORN)); + time_status |= STA_PPSJITTER; + pps_jitcnt++; + } else if (time_status & STA_PPSTIME) { + /* correct the time using the phase offset */ + time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT, + NTP_INTERVAL_FREQ); + /* cancel running adjtime() */ + time_adjust = 0; + } + /* update jitter */ + pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN; +} + +/* + * hardpps() - discipline CPU clock oscillator to external PPS signal + * + * This routine is called at each PPS signal arrival in order to + * discipline the CPU clock oscillator to the PPS signal. It takes two + * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former + * is used to correct clock phase error and the latter is used to + * correct the frequency. + * + * This code is based on David Mills's reference nanokernel + * implementation. It was mostly rewritten but keeps the same idea. + */ +void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) +{ + struct pps_normtime pts_norm, freq_norm; + unsigned long flags; + + pts_norm = pps_normalize_ts(*phase_ts); + + write_seqlock_irqsave(&xtime_lock, flags); + + /* clear the error bits, they will be set again if needed */ + time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); + + /* indicate signal presence */ + time_status |= STA_PPSSIGNAL; + pps_valid = PPS_VALID; + + /* when called for the first time, + * just start the frequency interval */ + if (unlikely(pps_fbase.tv_sec == 0)) { + pps_fbase = *raw_ts; + write_sequnlock_irqrestore(&xtime_lock, flags); + return; + } + + /* ok, now we have a base for frequency calculation */ + freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase)); + + /* check that the signal is in the range + * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ + if ((freq_norm.sec == 0) || + (freq_norm.nsec > MAXFREQ * freq_norm.sec) || + (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) { + time_status |= STA_PPSJITTER; + /* restart the frequency calibration interval */ + pps_fbase = *raw_ts; + write_sequnlock_irqrestore(&xtime_lock, flags); + pr_err("hardpps: PPSJITTER: bad pulse\n"); + return; + } + + /* signal is ok */ + + /* check if the current frequency interval is finished */ + if (freq_norm.sec >= (1 << pps_shift)) { + pps_calcnt++; + /* restart the frequency calibration interval */ + pps_fbase = *raw_ts; + hardpps_update_freq(freq_norm); + } + + hardpps_update_phase(pts_norm.nsec); + + write_sequnlock_irqrestore(&xtime_lock, flags); +} +EXPORT_SYMBOL(hardpps); + +#endif /* CONFIG_NTP_PPS */ + static int __init ntp_tick_adj_setup(char *str) { ntp_tick_adj = simple_strtol(str, NULL, 0); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5bb86da82003..5536aaf3ba36 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -288,6 +288,49 @@ void ktime_get_ts(struct timespec *ts) } EXPORT_SYMBOL_GPL(ktime_get_ts); +#ifdef CONFIG_NTP_PPS + +/** + * getnstime_raw_and_real - get day and raw monotonic time in timespec format + * @ts_raw: pointer to the timespec to be set to raw monotonic time + * @ts_real: pointer to the timespec to be set to the time of day + * + * This function reads both the time of day and raw monotonic time at the + * same time atomically and stores the resulting timestamps in timespec + * format. + */ +void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) +{ + unsigned long seq; + s64 nsecs_raw, nsecs_real; + + WARN_ON_ONCE(timekeeping_suspended); + + do { + u32 arch_offset; + + seq = read_seqbegin(&xtime_lock); + + *ts_raw = raw_time; + *ts_real = xtime; + + nsecs_raw = timekeeping_get_ns_raw(); + nsecs_real = timekeeping_get_ns(); + + /* If arch requires, add in gettimeoffset() */ + arch_offset = arch_gettimeoffset(); + nsecs_raw += arch_offset; + nsecs_real += arch_offset; + + } while (read_seqretry(&xtime_lock, seq)); + + timespec_add_ns(ts_raw, nsecs_raw); + timespec_add_ns(ts_real, nsecs_real); +} +EXPORT_SYMBOL(getnstime_raw_and_real); + +#endif /* CONFIG_NTP_PPS */ + /** * do_gettimeofday - Returns the time of day in a timeval * @tv: pointer to the timeval to be set diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7b8ec0281548..153562d0b93c 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -758,53 +758,58 @@ static void blk_add_trace_rq_complete(void *ignore, * @q: queue the io is for * @bio: the source bio * @what: the action + * @error: error, if any * * Description: * Records an action against a bio. Will log the bio offset + size. * **/ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, - u32 what) + u32 what, int error) { struct blk_trace *bt = q->blk_trace; if (likely(!bt)) return; + if (!error && !bio_flagged(bio, BIO_UPTODATE)) + error = EIO; + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, - !bio_flagged(bio, BIO_UPTODATE), 0, NULL); + error, 0, NULL); } static void blk_add_trace_bio_bounce(void *ignore, struct request_queue *q, struct bio *bio) { - blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); + blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); } static void blk_add_trace_bio_complete(void *ignore, - struct request_queue *q, struct bio *bio) + struct request_queue *q, struct bio *bio, + int error) { - blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); + blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); } static void blk_add_trace_bio_backmerge(void *ignore, struct request_queue *q, struct bio *bio) { - blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); } static void blk_add_trace_bio_frontmerge(void *ignore, struct request_queue *q, struct bio *bio) { - blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); } static void blk_add_trace_bio_queue(void *ignore, struct request_queue *q, struct bio *bio) { - blk_add_trace_bio(q, bio, BLK_TA_QUEUE); + blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); } static void blk_add_trace_getrq(void *ignore, @@ -812,7 +817,7 @@ static void blk_add_trace_getrq(void *ignore, struct bio *bio, int rw) { if (bio) - blk_add_trace_bio(q, bio, BLK_TA_GETRQ); + blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); else { struct blk_trace *bt = q->blk_trace; @@ -827,7 +832,7 @@ static void blk_add_trace_sleeprq(void *ignore, struct bio *bio, int rw) { if (bio) - blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); + blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); else { struct blk_trace *bt = q->blk_trace; @@ -887,7 +892,7 @@ static void blk_add_trace_split(void *ignore, } /** - * blk_add_trace_remap - Add a trace for a remap operation + * blk_add_trace_bio_remap - Add a trace for a bio-remap operation * @ignore: trace callback data parameter (not used) * @q: queue the io is for * @bio: the source bio @@ -899,9 +904,9 @@ static void blk_add_trace_split(void *ignore, * it spans a stripe (or similar). Add a trace for that action. * **/ -static void blk_add_trace_remap(void *ignore, - struct request_queue *q, struct bio *bio, - dev_t dev, sector_t from) +static void blk_add_trace_bio_remap(void *ignore, + struct request_queue *q, struct bio *bio, + dev_t dev, sector_t from) { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; @@ -1016,7 +1021,7 @@ static void blk_register_tracepoints(void) WARN_ON(ret); ret = register_trace_block_split(blk_add_trace_split, NULL); WARN_ON(ret); - ret = register_trace_block_remap(blk_add_trace_remap, NULL); + ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); WARN_ON(ret); ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); WARN_ON(ret); @@ -1025,7 +1030,7 @@ static void blk_register_tracepoints(void) static void blk_unregister_tracepoints(void) { unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); - unregister_trace_block_remap(blk_add_trace_remap, NULL); + unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); unregister_trace_block_split(blk_add_trace_split, NULL); unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e3dfecaf13e6..6cf223764be8 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -53,7 +53,7 @@ */ /* - * Function trace entry - function address and parent function addres: + * Function trace entry - function address and parent function address: */ FTRACE_ENTRY(function, ftrace_entry, diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 25915832291a..9da289c34f22 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -12,6 +12,8 @@ #include #include +static struct kmem_cache *user_ns_cachep __read_mostly; + /* * Create a new user namespace, deriving the creator from the user in the * passed credentials, and replacing that user with the new root user for the @@ -26,7 +28,7 @@ int create_user_ns(struct cred *new) struct user_struct *root_user; int n; - ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); + ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL); if (!ns) return -ENOMEM; @@ -38,7 +40,7 @@ int create_user_ns(struct cred *new) /* Alloc new root user. */ root_user = alloc_uid(ns, 0); if (!root_user) { - kfree(ns); + kmem_cache_free(user_ns_cachep, ns); return -ENOMEM; } @@ -71,7 +73,7 @@ static void free_user_ns_work(struct work_struct *work) struct user_namespace *ns = container_of(work, struct user_namespace, destroyer); free_uid(ns->creator); - kfree(ns); + kmem_cache_free(user_ns_cachep, ns); } void free_user_ns(struct kref *kref) @@ -126,3 +128,10 @@ gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t /* No useful relationship so no mapping */ return overflowgid; } + +static __init int user_namespaces_init(void) +{ + user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); + return 0; +} +module_init(user_namespaces_init); diff --git a/lib/Kconfig b/lib/Kconfig index 3116aa631af6..0ee67e08ad3e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -106,6 +106,8 @@ config LZO_COMPRESS config LZO_DECOMPRESS tristate +source "lib/xz/Kconfig" + # # These all provide a common interface (hence the apparent duplication with # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.) @@ -120,6 +122,10 @@ config DECOMPRESS_BZIP2 config DECOMPRESS_LZMA tristate +config DECOMPRESS_XZ + select XZ_DEC + tristate + config DECOMPRESS_LZO select LZO_DECOMPRESS tristate diff --git a/lib/Makefile b/lib/Makefile index d7b6e30a3a1e..cbb774f7d41d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o prio_heap.o ratelimit.o show_mem.o \ - is_single_threaded.o plist.o decompress.o flex_array.o + is_single_threaded.o plist.o decompress.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o lcm.o list_sort.o uuid.o + string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -69,11 +69,13 @@ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ obj-$(CONFIG_LZO_COMPRESS) += lzo/ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ +obj-$(CONFIG_XZ_DEC) += xz/ obj-$(CONFIG_RAID6_PQ) += raid6/ lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o +lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o obj-$(CONFIG_TEXTSEARCH) += textsearch.o diff --git a/lib/decompress.c b/lib/decompress.c index a7606815541f..3d766b7f60ab 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -23,6 +24,9 @@ #ifndef CONFIG_DECOMPRESS_LZMA # define unlzma NULL #endif +#ifndef CONFIG_DECOMPRESS_XZ +# define unxz NULL +#endif #ifndef CONFIG_DECOMPRESS_LZO # define unlzo NULL #endif @@ -36,6 +40,7 @@ static const struct compress_format { { {037, 0236}, "gzip", gunzip }, { {0x42, 0x5a}, "bzip2", bunzip2 }, { {0x5d, 0x00}, "lzma", unlzma }, + { {0xfd, 0x37}, "xz", unxz }, { {0x89, 0x4c}, "lzo", unlzo }, { {0, 0}, NULL, NULL } }; diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 81c8bb1cc6aa..a7b80c1d6a0d 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -49,7 +49,6 @@ #define PREBOOT #else #include -#include #endif /* STATIC */ #include @@ -682,13 +681,12 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, int(*flush)(void*, unsigned int), unsigned char *outbuf, int *pos, - void(*error_fn)(char *x)) + void(*error)(char *x)) { struct bunzip_data *bd; int i = -1; unsigned char *inbuf; - set_error_fn(error_fn); if (flush) outbuf = malloc(BZIP2_IOBUF_SIZE); @@ -751,8 +749,8 @@ STATIC int INIT decompress(unsigned char *buf, int len, int(*flush)(void*, unsigned int), unsigned char *outbuf, int *pos, - void(*error_fn)(char *x)) + void(*error)(char *x)) { - return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn); + return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error); } #endif diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index fc686c7a0a0d..19ff89e34eec 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -19,7 +19,6 @@ #include "zlib_inflate/inflate.h" #include "zlib_inflate/infutil.h" -#include #endif /* STATIC */ @@ -27,7 +26,7 @@ #define GZIP_IOBUF_SIZE (16*1024) -static int nofill(void *buffer, unsigned int len) +static int INIT nofill(void *buffer, unsigned int len) { return -1; } @@ -38,13 +37,12 @@ STATIC int INIT gunzip(unsigned char *buf, int len, int(*flush)(void*, unsigned int), unsigned char *out_buf, int *pos, - void(*error_fn)(char *x)) { + void(*error)(char *x)) { u8 *zbuf; struct z_stream_s *strm; int rc; size_t out_len; - set_error_fn(error_fn); rc = -1; if (flush) { out_len = 0x8000; /* 32 K */ @@ -100,13 +98,22 @@ STATIC int INIT gunzip(unsigned char *buf, int len, * possible asciz filename) */ strm->next_in = zbuf + 10; + strm->avail_in = len - 10; /* skip over asciz filename */ if (zbuf[3] & 0x8) { - while (strm->next_in[0]) - strm->next_in++; - strm->next_in++; + do { + /* + * If the filename doesn't fit into the buffer, + * the file is very probably corrupt. Don't try + * to read more data. + */ + if (strm->avail_in == 0) { + error("header error"); + goto gunzip_5; + } + --strm->avail_in; + } while (*strm->next_in++); } - strm->avail_in = len - (strm->next_in - zbuf); strm->next_out = out_buf; strm->avail_out = out_len; diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index ca82fde81c8f..476c65af9709 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c @@ -33,7 +33,6 @@ #define PREBOOT #else #include -#include #endif /* STATIC */ #include @@ -74,6 +73,7 @@ struct rc { uint32_t code; uint32_t range; uint32_t bound; + void (*error)(char *); }; @@ -82,7 +82,7 @@ struct rc { #define RC_MODEL_TOTAL_BITS 11 -static int nofill(void *buffer, unsigned int len) +static int INIT nofill(void *buffer, unsigned int len) { return -1; } @@ -92,7 +92,7 @@ static void INIT rc_read(struct rc *rc) { rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE); if (rc->buffer_size <= 0) - error("unexpected EOF"); + rc->error("unexpected EOF"); rc->ptr = rc->buffer; rc->buffer_end = rc->buffer + rc->buffer_size; } @@ -127,12 +127,6 @@ static inline void INIT rc_init_code(struct rc *rc) } -/* Called once. TODO: bb_maybe_free() */ -static inline void INIT rc_free(struct rc *rc) -{ - free(rc->buffer); -} - /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */ static void INIT rc_do_normalize(struct rc *rc) { @@ -169,7 +163,7 @@ static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p) rc->range = rc->bound; *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; } -static inline void rc_update_bit_1(struct rc *rc, uint16_t *p) +static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p) { rc->range -= rc->bound; rc->code -= rc->bound; @@ -319,32 +313,38 @@ static inline uint8_t INIT peek_old_byte(struct writer *wr, } -static inline void INIT write_byte(struct writer *wr, uint8_t byte) +static inline int INIT write_byte(struct writer *wr, uint8_t byte) { wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte; if (wr->flush && wr->buffer_pos == wr->header->dict_size) { wr->buffer_pos = 0; wr->global_pos += wr->header->dict_size; - wr->flush((char *)wr->buffer, wr->header->dict_size); + if (wr->flush((char *)wr->buffer, wr->header->dict_size) + != wr->header->dict_size) + return -1; } + return 0; } -static inline void INIT copy_byte(struct writer *wr, uint32_t offs) +static inline int INIT copy_byte(struct writer *wr, uint32_t offs) { - write_byte(wr, peek_old_byte(wr, offs)); + return write_byte(wr, peek_old_byte(wr, offs)); } -static inline void INIT copy_bytes(struct writer *wr, +static inline int INIT copy_bytes(struct writer *wr, uint32_t rep0, int len) { do { - copy_byte(wr, rep0); + if (copy_byte(wr, rep0)) + return -1; len--; } while (len != 0 && wr->buffer_pos < wr->header->dst_size); + + return len; } -static inline void INIT process_bit0(struct writer *wr, struct rc *rc, +static inline int INIT process_bit0(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob, int lc, uint32_t literal_pos_mask) { @@ -378,16 +378,17 @@ static inline void INIT process_bit0(struct writer *wr, struct rc *rc, uint16_t *prob_lit = prob + mi; rc_get_bit(rc, prob_lit, &mi); } - write_byte(wr, mi); if (cst->state < 4) cst->state = 0; else if (cst->state < 10) cst->state -= 3; else cst->state -= 6; + + return write_byte(wr, mi); } -static inline void INIT process_bit1(struct writer *wr, struct rc *rc, +static inline int INIT process_bit1(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob) { int offset; @@ -418,8 +419,7 @@ static inline void INIT process_bit1(struct writer *wr, struct rc *rc, cst->state = cst->state < LZMA_NUM_LIT_STATES ? 9 : 11; - copy_byte(wr, cst->rep0); - return; + return copy_byte(wr, cst->rep0); } else { rc_update_bit_1(rc, prob); } @@ -521,12 +521,15 @@ static inline void INIT process_bit1(struct writer *wr, struct rc *rc, } else cst->rep0 = pos_slot; if (++(cst->rep0) == 0) - return; + return 0; + if (cst->rep0 > wr->header->dict_size + || cst->rep0 > get_pos(wr)) + return -1; } len += LZMA_MATCH_MIN_LEN; - copy_bytes(wr, cst->rep0, len); + return copy_bytes(wr, cst->rep0, len); } @@ -536,7 +539,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len, int(*flush)(void*, unsigned int), unsigned char *output, int *posp, - void(*error_fn)(char *x) + void(*error)(char *x) ) { struct lzma_header header; @@ -552,7 +555,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len, unsigned char *inbuf; int ret = -1; - set_error_fn(error_fn); + rc.error = error; if (buf) inbuf = buf; @@ -580,8 +583,10 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len, ((unsigned char *)&header)[i] = *rc.ptr++; } - if (header.pos >= (9 * 5 * 5)) + if (header.pos >= (9 * 5 * 5)) { error("bad header"); + goto exit_1; + } mi = 0; lc = header.pos; @@ -627,21 +632,29 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len, int pos_state = get_pos(&wr) & pos_state_mask; uint16_t *prob = p + LZMA_IS_MATCH + (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state; - if (rc_is_bit_0(&rc, prob)) - process_bit0(&wr, &rc, &cst, p, pos_state, prob, - lc, literal_pos_mask); - else { - process_bit1(&wr, &rc, &cst, p, pos_state, prob); + if (rc_is_bit_0(&rc, prob)) { + if (process_bit0(&wr, &rc, &cst, p, pos_state, prob, + lc, literal_pos_mask)) { + error("LZMA data is corrupt"); + goto exit_3; + } + } else { + if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) { + error("LZMA data is corrupt"); + goto exit_3; + } if (cst.rep0 == 0) break; } + if (rc.buffer_size <= 0) + goto exit_3; } if (posp) *posp = rc.ptr-rc.buffer; - if (wr.flush) - wr.flush(wr.buffer, wr.buffer_pos); - ret = 0; + if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos) + ret = 0; +exit_3: large_free(p); exit_2: if (!output) @@ -659,9 +672,9 @@ STATIC int INIT decompress(unsigned char *buf, int in_len, int(*flush)(void*, unsigned int), unsigned char *output, int *posp, - void(*error_fn)(char *x) + void(*error)(char *x) ) { - return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn); + return unlzma(buf, in_len - 4, fill, flush, output, posp, error); } #endif diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index bcb3a4bd68ff..5a7a2adf4c4c 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c @@ -33,7 +33,6 @@ #ifdef STATIC #include "lzo/lzo1x_decompress.c" #else -#include #include #endif @@ -49,14 +48,25 @@ static const unsigned char lzop_magic[] = { #define LZO_BLOCK_SIZE (256*1024l) #define HEADER_HAS_FILTER 0x00000800L +#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4) +#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4) -STATIC inline int INIT parse_header(u8 *input, u8 *skip) +STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len) { int l; u8 *parse = input; + u8 *end = input + in_len; u8 level = 0; u16 version; + /* + * Check that there's enough input to possibly have a valid header. + * Then it is possible to parse several fields until the minimum + * size may have been used. + */ + if (in_len < HEADER_SIZE_MIN) + return 0; + /* read magic: 9 first bits */ for (l = 0; l < 9; l++) { if (*parse++ != lzop_magic[l]) @@ -74,6 +84,15 @@ STATIC inline int INIT parse_header(u8 *input, u8 *skip) else parse += 4; /* flags */ + /* + * At least mode, mtime_low, filename length, and checksum must + * be left to be parsed. If also mtime_high is present, it's OK + * because the next input buffer check is after reading the + * filename length. + */ + if (end - parse < 8 + 1 + 4) + return 0; + /* skip mode and mtime_low */ parse += 8; if (version >= 0x0940) @@ -81,6 +100,8 @@ STATIC inline int INIT parse_header(u8 *input, u8 *skip) l = *parse++; /* don't care about the file name, and skip checksum */ + if (end - parse < l + 4) + return 0; parse += l + 4; *skip = parse - input; @@ -91,16 +112,15 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, int (*fill) (void *, unsigned int), int (*flush) (void *, unsigned int), u8 *output, int *posp, - void (*error_fn) (char *x)) + void (*error) (char *x)) { - u8 skip = 0, r = 0; + u8 r = 0; + int skip = 0; u32 src_len, dst_len; size_t tmp; u8 *in_buf, *in_buf_save, *out_buf; int ret = -1; - set_error_fn(error_fn); - if (output) { out_buf = output; } else if (!flush) { @@ -119,8 +139,8 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, goto exit_1; } else if (input) { in_buf = input; - } else if (!fill || !posp) { - error("NULL input pointer and missing position pointer or fill function"); + } else if (!fill) { + error("NULL input pointer and missing fill function"); goto exit_1; } else { in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE)); @@ -134,22 +154,47 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, if (posp) *posp = 0; - if (fill) - fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); + if (fill) { + /* + * Start from in_buf + HEADER_SIZE_MAX to make it possible + * to use memcpy() to copy the unused data to the beginning + * of the buffer. This way memmove() isn't needed which + * is missing from pre-boot environments of most archs. + */ + in_buf += HEADER_SIZE_MAX; + in_len = fill(in_buf, HEADER_SIZE_MAX); + } - if (!parse_header(input, &skip)) { + if (!parse_header(in_buf, &skip, in_len)) { error("invalid header"); goto exit_2; } in_buf += skip; + in_len -= skip; + + if (fill) { + /* Move the unused data to the beginning of the buffer. */ + memcpy(in_buf_save, in_buf, in_len); + in_buf = in_buf_save; + } if (posp) *posp = skip; for (;;) { /* read uncompressed block size */ + if (fill && in_len < 4) { + skip = fill(in_buf + in_len, 4 - in_len); + if (skip > 0) + in_len += skip; + } + if (in_len < 4) { + error("file corrupted"); + goto exit_2; + } dst_len = get_unaligned_be32(in_buf); in_buf += 4; + in_len -= 4; /* exit if last block */ if (dst_len == 0) { @@ -164,8 +209,18 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, } /* read compressed block size, and skip block checksum info */ + if (fill && in_len < 8) { + skip = fill(in_buf + in_len, 8 - in_len); + if (skip > 0) + in_len += skip; + } + if (in_len < 8) { + error("file corrupted"); + goto exit_2; + } src_len = get_unaligned_be32(in_buf); in_buf += 8; + in_len -= 8; if (src_len <= 0 || src_len > dst_len) { error("file corrupted"); @@ -173,6 +228,15 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, } /* decompress */ + if (fill && in_len < src_len) { + skip = fill(in_buf + in_len, src_len - in_len); + if (skip > 0) + in_len += skip; + } + if (in_len < src_len) { + error("file corrupted"); + goto exit_2; + } tmp = dst_len; /* When the input data is not compressed at all, @@ -190,17 +254,26 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, } } - if (flush) - flush(out_buf, dst_len); + if (flush && flush(out_buf, dst_len) != dst_len) + goto exit_2; if (output) out_buf += dst_len; if (posp) *posp += src_len + 12; + + in_buf += src_len; + in_len -= src_len; if (fill) { + /* + * If there happens to still be unused data left in + * in_buf, move it to the beginning of the buffer. + * Use a loop to avoid memmove() dependency. + */ + if (in_len > 0) + for (skip = 0; skip < in_len; ++skip) + in_buf_save[skip] = in_buf[skip]; in_buf = in_buf_save; - fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); - } else - in_buf += src_len; + } } ret = 0; diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c new file mode 100644 index 000000000000..cecd23df2b9a --- /dev/null +++ b/lib/decompress_unxz.c @@ -0,0 +1,397 @@ +/* + * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +/* + * Important notes about in-place decompression + * + * At least on x86, the kernel is decompressed in place: the compressed data + * is placed to the end of the output buffer, and the decompressor overwrites + * most of the compressed data. There must be enough safety margin to + * guarantee that the write position is always behind the read position. + * + * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below. + * Note that the margin with XZ is bigger than with Deflate (gzip)! + * + * The worst case for in-place decompression is that the beginning of + * the file is compressed extremely well, and the rest of the file is + * uncompressible. Thus, we must look for worst-case expansion when the + * compressor is encoding uncompressible data. + * + * The structure of the .xz file in case of a compresed kernel is as follows. + * Sizes (as bytes) of the fields are in parenthesis. + * + * Stream Header (12) + * Block Header: + * Block Header (8-12) + * Compressed Data (N) + * Block Padding (0-3) + * CRC32 (4) + * Index (8-20) + * Stream Footer (12) + * + * Normally there is exactly one Block, but let's assume that there are + * 2-4 Blocks just in case. Because Stream Header and also Block Header + * of the first Block don't make the decompressor produce any uncompressed + * data, we can ignore them from our calculations. Block Headers of possible + * additional Blocks have to be taken into account still. With these + * assumptions, it is safe to assume that the total header overhead is + * less than 128 bytes. + * + * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ + * doesn't change the size of the data, it is enough to calculate the + * safety margin for LZMA2. + * + * LZMA2 stores the data in chunks. Each chunk has a header whose size is + * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that + * the maximum chunk header size is 8 bytes. After the chunk header, there + * may be up to 64 KiB of actual payload in the chunk. Often the payload is + * quite a bit smaller though; to be safe, let's assume that an average + * chunk has only 32 KiB of payload. + * + * The maximum uncompressed size of the payload is 2 MiB. The minimum + * uncompressed size of the payload is in practice never less than the + * payload size itself. The LZMA2 format would allow uncompressed size + * to be less than the payload size, but no sane compressor creates such + * files. LZMA2 supports storing uncompressible data in uncompressed form, + * so there's never a need to create payloads whose uncompressed size is + * smaller than the compressed size. + * + * The assumption, that the uncompressed size of the payload is never + * smaller than the payload itself, is valid only when talking about + * the payload as a whole. It is possible that the payload has parts where + * the decompressor consumes more input than it produces output. Calculating + * the worst case for this would be tricky. Instead of trying to do that, + * let's simply make sure that the decompressor never overwrites any bytes + * of the payload which it is currently reading. + * + * Now we have enough information to calculate the safety margin. We need + * - 128 bytes for the .xz file format headers; + * - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header + * per chunk, each chunk having average payload size of 32 KiB); and + * - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that + * the decompressor never overwrites anything from the LZMA2 chunk + * payload it is currently reading. + * + * We get the following formula: + * + * safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536 + * = 128 + (uncompressed_size >> 12) + 65536 + * + * For comparision, according to arch/x86/boot/compressed/misc.c, the + * equivalent formula for Deflate is this: + * + * safety_margin = 18 + (uncompressed_size >> 12) + 32768 + * + * Thus, when updating Deflate-only in-place kernel decompressor to + * support XZ, the fixed overhead has to be increased from 18+32768 bytes + * to 128+65536 bytes. + */ + +/* + * STATIC is defined to "static" if we are being built for kernel + * decompression (pre-boot code). will define + * STATIC to empty if it wasn't already defined. Since we will need to + * know later if we are being used for kernel decompression, we define + * XZ_PREBOOT here. + */ +#ifdef STATIC +# define XZ_PREBOOT +#endif +#ifdef __KERNEL__ +# include +#endif +#define XZ_EXTERN STATIC + +#ifndef XZ_PREBOOT +# include +# include +#else +/* + * Use the internal CRC32 code instead of kernel's CRC32 module, which + * is not available in early phase of booting. + */ +#define XZ_INTERNAL_CRC32 1 + +/* + * For boot time use, we enable only the BCJ filter of the current + * architecture or none if no BCJ filter is available for the architecture. + */ +#ifdef CONFIG_X86 +# define XZ_DEC_X86 +#endif +#ifdef CONFIG_PPC +# define XZ_DEC_POWERPC +#endif +#ifdef CONFIG_ARM +# define XZ_DEC_ARM +#endif +#ifdef CONFIG_IA64 +# define XZ_DEC_IA64 +#endif +#ifdef CONFIG_SPARC +# define XZ_DEC_SPARC +#endif + +/* + * This will get the basic headers so that memeq() and others + * can be defined. + */ +#include "xz/xz_private.h" + +/* + * Replace the normal allocation functions with the versions from + * . vfree() needs to support vfree(NULL) + * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it. + * Workaround it here because the other decompressors don't need it. + */ +#undef kmalloc +#undef kfree +#undef vmalloc +#undef vfree +#define kmalloc(size, flags) malloc(size) +#define kfree(ptr) free(ptr) +#define vmalloc(size) malloc(size) +#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0) + +/* + * FIXME: Not all basic memory functions are provided in architecture-specific + * files (yet). We define our own versions here for now, but this should be + * only a temporary solution. + * + * memeq and memzero are not used much and any remotely sane implementation + * is fast enough. memcpy/memmove speed matters in multi-call mode, but + * the kernel image is decompressed in single-call mode, in which only + * memcpy speed can matter and only if there is a lot of uncompressible data + * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the + * functions below should just be kept small; it's probably not worth + * optimizing for speed. + */ + +#ifndef memeq +static bool memeq(const void *a, const void *b, size_t size) +{ + const uint8_t *x = a; + const uint8_t *y = b; + size_t i; + + for (i = 0; i < size; ++i) + if (x[i] != y[i]) + return false; + + return true; +} +#endif + +#ifndef memzero +static void memzero(void *buf, size_t size) +{ + uint8_t *b = buf; + uint8_t *e = b + size; + + while (b != e) + *b++ = '\0'; +} +#endif + +#ifndef memmove +/* Not static to avoid a conflict with the prototype in the Linux headers. */ +void *memmove(void *dest, const void *src, size_t size) +{ + uint8_t *d = dest; + const uint8_t *s = src; + size_t i; + + if (d < s) { + for (i = 0; i < size; ++i) + d[i] = s[i]; + } else if (d > s) { + i = size; + while (i-- > 0) + d[i] = s[i]; + } + + return dest; +} +#endif + +/* + * Since we need memmove anyway, would use it as memcpy too. + * Commented out for now to avoid breaking things. + */ +/* +#ifndef memcpy +# define memcpy memmove +#endif +*/ + +#include "xz/xz_crc32.c" +#include "xz/xz_dec_stream.c" +#include "xz/xz_dec_lzma2.c" +#include "xz/xz_dec_bcj.c" + +#endif /* XZ_PREBOOT */ + +/* Size of the input and output buffers in multi-call mode */ +#define XZ_IOBUF_SIZE 4096 + +/* + * This function implements the API defined in . + * + * This wrapper will automatically choose single-call or multi-call mode + * of the native XZ decoder API. The single-call mode can be used only when + * both input and output buffers are available as a single chunk, i.e. when + * fill() and flush() won't be used. + */ +STATIC int INIT unxz(unsigned char *in, int in_size, + int (*fill)(void *dest, unsigned int size), + int (*flush)(void *src, unsigned int size), + unsigned char *out, int *in_used, + void (*error)(char *x)) +{ + struct xz_buf b; + struct xz_dec *s; + enum xz_ret ret; + bool must_free_in = false; + +#if XZ_INTERNAL_CRC32 + xz_crc32_init(); +#endif + + if (in_used != NULL) + *in_used = 0; + + if (fill == NULL && flush == NULL) + s = xz_dec_init(XZ_SINGLE, 0); + else + s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1); + + if (s == NULL) + goto error_alloc_state; + + if (flush == NULL) { + b.out = out; + b.out_size = (size_t)-1; + } else { + b.out_size = XZ_IOBUF_SIZE; + b.out = malloc(XZ_IOBUF_SIZE); + if (b.out == NULL) + goto error_alloc_out; + } + + if (in == NULL) { + must_free_in = true; + in = malloc(XZ_IOBUF_SIZE); + if (in == NULL) + goto error_alloc_in; + } + + b.in = in; + b.in_pos = 0; + b.in_size = in_size; + b.out_pos = 0; + + if (fill == NULL && flush == NULL) { + ret = xz_dec_run(s, &b); + } else { + do { + if (b.in_pos == b.in_size && fill != NULL) { + if (in_used != NULL) + *in_used += b.in_pos; + + b.in_pos = 0; + + in_size = fill(in, XZ_IOBUF_SIZE); + if (in_size < 0) { + /* + * This isn't an optimal error code + * but it probably isn't worth making + * a new one either. + */ + ret = XZ_BUF_ERROR; + break; + } + + b.in_size = in_size; + } + + ret = xz_dec_run(s, &b); + + if (flush != NULL && (b.out_pos == b.out_size + || (ret != XZ_OK && b.out_pos > 0))) { + /* + * Setting ret here may hide an error + * returned by xz_dec_run(), but probably + * it's not too bad. + */ + if (flush(b.out, b.out_pos) != (int)b.out_pos) + ret = XZ_BUF_ERROR; + + b.out_pos = 0; + } + } while (ret == XZ_OK); + + if (must_free_in) + free(in); + + if (flush != NULL) + free(b.out); + } + + if (in_used != NULL) + *in_used += b.in_pos; + + xz_dec_end(s); + + switch (ret) { + case XZ_STREAM_END: + return 0; + + case XZ_MEM_ERROR: + /* This can occur only in multi-call mode. */ + error("XZ decompressor ran out of memory"); + break; + + case XZ_FORMAT_ERROR: + error("Input is not in the XZ format (wrong magic bytes)"); + break; + + case XZ_OPTIONS_ERROR: + error("Input was encoded with settings that are not " + "supported by this XZ decoder"); + break; + + case XZ_DATA_ERROR: + case XZ_BUF_ERROR: + error("XZ-compressed data is corrupt"); + break; + + default: + error("Bug in the XZ decompressor"); + break; + } + + return -1; + +error_alloc_in: + if (flush != NULL) + free(b.out); + +error_alloc_out: + xz_dec_end(s); + +error_alloc_state: + error("XZ decompressor ran out of memory"); + return -1; +} + +/* + * This macro is used by architecture-specific files to decompress + * the kernel image. + */ +#define decompress unxz diff --git a/lib/flex_array.c b/lib/flex_array.c index 77a6fea7481e..c0ea40ba2082 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -23,6 +23,7 @@ #include #include #include +#include struct flex_array_part { char elements[FLEX_ARRAY_PART_SIZE]; @@ -103,6 +104,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, FLEX_ARRAY_BASE_BYTES_LEFT); return ret; } +EXPORT_SYMBOL(flex_array_alloc); static int fa_element_to_part_nr(struct flex_array *fa, unsigned int element_nr) @@ -126,12 +128,14 @@ void flex_array_free_parts(struct flex_array *fa) for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) kfree(fa->parts[part_nr]); } +EXPORT_SYMBOL(flex_array_free_parts); void flex_array_free(struct flex_array *fa) { flex_array_free_parts(fa); kfree(fa); } +EXPORT_SYMBOL(flex_array_free); static unsigned int index_inside_part(struct flex_array *fa, unsigned int element_nr) @@ -196,6 +200,7 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, memcpy(dst, src, fa->element_size); return 0; } +EXPORT_SYMBOL(flex_array_put); /** * flex_array_clear - clear element in array at @element_nr @@ -223,6 +228,7 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr) memset(dst, FLEX_ARRAY_FREE, fa->element_size); return 0; } +EXPORT_SYMBOL(flex_array_clear); /** * flex_array_prealloc - guarantee that array space exists @@ -259,6 +265,7 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start, } return 0; } +EXPORT_SYMBOL(flex_array_prealloc); /** * flex_array_get - pull data back out of the array @@ -288,6 +295,7 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) } return &part->elements[index_inside_part(fa, element_nr)]; } +EXPORT_SYMBOL(flex_array_get); /** * flex_array_get_ptr - pull a ptr back out of the array @@ -308,6 +316,7 @@ void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr) return *tmp; } +EXPORT_SYMBOL(flex_array_get_ptr); static int part_is_free(struct flex_array_part *part) { @@ -348,3 +357,4 @@ int flex_array_shrink(struct flex_array *fa) } return ret; } +EXPORT_SYMBOL(flex_array_shrink); diff --git a/lib/hexdump.c b/lib/hexdump.c index b66b2bd67952..f5fe6ba7a3ab 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -154,6 +154,7 @@ nil: } EXPORT_SYMBOL(hex_dump_to_buffer); +#ifdef CONFIG_PRINTK /** * print_hex_dump - print a text hex dump to syslog for a binary blob of data * @level: kernel log level (e.g. KERN_DEBUG) @@ -238,3 +239,4 @@ void print_hex_dump_bytes(const char *prefix_str, int prefix_type, buf, len, true); } EXPORT_SYMBOL(print_hex_dump_bytes); +#endif diff --git a/lib/ioremap.c b/lib/ioremap.c index 5730ecd3eb66..da4e2ad74b68 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -90,3 +91,4 @@ int ioremap_page_range(unsigned long addr, return err; } +EXPORT_SYMBOL_GPL(ioremap_page_range); diff --git a/lib/nlattr.c b/lib/nlattr.c index 00e8a02681a6..5021cbc34411 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -167,7 +167,7 @@ nla_policy_len(const struct nla_policy *p, int n) * @policy: validation policy * * Parses a stream of attributes and stores a pointer to each attribute in - * the tb array accessable via the attribute type. Attributes with a type + * the tb array accessible via the attribute type. Attributes with a type * exceeding maxtype will be silently ignored for backwards compatibility * reasons. policy may be set to NULL if no validation is required. * diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 7c06ee51a29a..c47bbe11b804 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -60,7 +60,7 @@ int swiotlb_force; static char *io_tlb_start, *io_tlb_end; /* - * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and + * The number of IO TLB blocks (in groups of 64) between io_tlb_start and * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. */ static unsigned long io_tlb_nslabs; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index c150d3dafff4..d3023df8477f 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -936,6 +936,8 @@ char *uuid_string(char *buf, char *end, const u8 *addr, return string(buf, end, uuid, spec); } +int kptr_restrict = 1; + /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format @@ -979,6 +981,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, * Implements a "recursive vsnprintf". * Do not use this feature without some mechanism to verify the * correctness of the format string and va_list arguments. + * - 'K' For a kernel pointer that should be hidden from unprivileged users * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a @@ -1035,6 +1038,25 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return buf + vsnprintf(buf, end - buf, ((struct va_format *)ptr)->fmt, *(((struct va_format *)ptr)->va)); + case 'K': + /* + * %pK cannot be used in IRQ context because its test + * for CAP_SYSLOG would be meaningless. + */ + if (in_irq() || in_serving_softirq() || in_nmi()) { + if (spec.field_width == -1) + spec.field_width = 2 * sizeof(void *); + return string(buf, end, "pK-error", spec); + } else if ((kptr_restrict == 0) || + (kptr_restrict == 1 && + has_capability_noaudit(current, CAP_SYSLOG))) + break; + + if (spec.field_width == -1) { + spec.field_width = 2 * sizeof(void *); + spec.flags |= ZEROPAD; + } + return number(buf, end, 0, spec); } spec.flags |= SMALL; if (spec.field_width == -1) { @@ -1451,7 +1473,7 @@ EXPORT_SYMBOL(vsnprintf); * @args: Arguments for the format string * * The return value is the number of characters which have been written into - * the @buf not including the trailing '\0'. If @size is <= 0 the function + * the @buf not including the trailing '\0'. If @size is == 0 the function * returns 0. * * Call this function if you are already dealing with a va_list. @@ -1465,7 +1487,11 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) i = vsnprintf(buf, size, fmt, args); - return (i >= size) ? (size - 1) : i; + if (likely(i < size)) + return i; + if (size != 0) + return size - 1; + return 0; } EXPORT_SYMBOL(vscnprintf); @@ -1513,14 +1539,10 @@ int scnprintf(char *buf, size_t size, const char *fmt, ...) int i; va_start(args, fmt); - i = vsnprintf(buf, size, fmt, args); + i = vscnprintf(buf, size, fmt, args); va_end(args); - if (likely(i < size)) - return i; - if (size != 0) - return size - 1; - return 0; + return i; } EXPORT_SYMBOL(scnprintf); diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig new file mode 100644 index 000000000000..e3b6e18fdac5 --- /dev/null +++ b/lib/xz/Kconfig @@ -0,0 +1,59 @@ +config XZ_DEC + tristate "XZ decompression support" + select CRC32 + help + LZMA2 compression algorithm and BCJ filters are supported using + the .xz file format as the container. For integrity checking, + CRC32 is supported. See Documentation/xz.txt for more information. + +config XZ_DEC_X86 + bool "x86 BCJ filter decoder" if EMBEDDED + default y + depends on XZ_DEC + select XZ_DEC_BCJ + +config XZ_DEC_POWERPC + bool "PowerPC BCJ filter decoder" if EMBEDDED + default y + depends on XZ_DEC + select XZ_DEC_BCJ + +config XZ_DEC_IA64 + bool "IA-64 BCJ filter decoder" if EMBEDDED + default y + depends on XZ_DEC + select XZ_DEC_BCJ + +config XZ_DEC_ARM + bool "ARM BCJ filter decoder" if EMBEDDED + default y + depends on XZ_DEC + select XZ_DEC_BCJ + +config XZ_DEC_ARMTHUMB + bool "ARM-Thumb BCJ filter decoder" if EMBEDDED + default y + depends on XZ_DEC + select XZ_DEC_BCJ + +config XZ_DEC_SPARC + bool "SPARC BCJ filter decoder" if EMBEDDED + default y + depends on XZ_DEC + select XZ_DEC_BCJ + +config XZ_DEC_BCJ + bool + default n + +config XZ_DEC_TEST + tristate "XZ decompressor tester" + default n + depends on XZ_DEC + help + This allows passing .xz files to the in-kernel XZ decoder via + a character special file. It calculates CRC32 of the decompressed + data and writes diagnostics to the system log. + + Unless you are developing the XZ decoder, you don't need this + and should say N. diff --git a/lib/xz/Makefile b/lib/xz/Makefile new file mode 100644 index 000000000000..a7fa7693f0f3 --- /dev/null +++ b/lib/xz/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_XZ_DEC) += xz_dec.o +xz_dec-y := xz_dec_syms.o xz_dec_stream.o xz_dec_lzma2.o +xz_dec-$(CONFIG_XZ_DEC_BCJ) += xz_dec_bcj.o + +obj-$(CONFIG_XZ_DEC_TEST) += xz_dec_test.o diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c new file mode 100644 index 000000000000..34532d14fd4c --- /dev/null +++ b/lib/xz/xz_crc32.c @@ -0,0 +1,59 @@ +/* + * CRC32 using the polynomial from IEEE-802.3 + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +/* + * This is not the fastest implementation, but it is pretty compact. + * The fastest versions of xz_crc32() on modern CPUs without hardware + * accelerated CRC instruction are 3-5 times as fast as this version, + * but they are bigger and use more memory for the lookup table. + */ + +#include "xz_private.h" + +/* + * STATIC_RW_DATA is used in the pre-boot environment on some architectures. + * See for details. + */ +#ifndef STATIC_RW_DATA +# define STATIC_RW_DATA static +#endif + +STATIC_RW_DATA uint32_t xz_crc32_table[256]; + +XZ_EXTERN void xz_crc32_init(void) +{ + const uint32_t poly = 0xEDB88320; + + uint32_t i; + uint32_t j; + uint32_t r; + + for (i = 0; i < 256; ++i) { + r = i; + for (j = 0; j < 8; ++j) + r = (r >> 1) ^ (poly & ~((r & 1) - 1)); + + xz_crc32_table[i] = r; + } + + return; +} + +XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) +{ + crc = ~crc; + + while (size != 0) { + crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); + --size; + } + + return ~crc; +} diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c new file mode 100644 index 000000000000..e51e2558ca9d --- /dev/null +++ b/lib/xz/xz_dec_bcj.c @@ -0,0 +1,561 @@ +/* + * Branch/Call/Jump (BCJ) filter decoders + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" + +/* + * The rest of the file is inside this ifdef. It makes things a little more + * convenient when building without support for any BCJ filters. + */ +#ifdef XZ_DEC_BCJ + +struct xz_dec_bcj { + /* Type of the BCJ filter being used */ + enum { + BCJ_X86 = 4, /* x86 or x86-64 */ + BCJ_POWERPC = 5, /* Big endian only */ + BCJ_IA64 = 6, /* Big or little endian */ + BCJ_ARM = 7, /* Little endian only */ + BCJ_ARMTHUMB = 8, /* Little endian only */ + BCJ_SPARC = 9 /* Big or little endian */ + } type; + + /* + * Return value of the next filter in the chain. We need to preserve + * this information across calls, because we must not call the next + * filter anymore once it has returned XZ_STREAM_END. + */ + enum xz_ret ret; + + /* True if we are operating in single-call mode. */ + bool single_call; + + /* + * Absolute position relative to the beginning of the uncompressed + * data (in a single .xz Block). We care only about the lowest 32 + * bits so this doesn't need to be uint64_t even with big files. + */ + uint32_t pos; + + /* x86 filter state */ + uint32_t x86_prev_mask; + + /* Temporary space to hold the variables from struct xz_buf */ + uint8_t *out; + size_t out_pos; + size_t out_size; + + struct { + /* Amount of already filtered data in the beginning of buf */ + size_t filtered; + + /* Total amount of data currently stored in buf */ + size_t size; + + /* + * Buffer to hold a mix of filtered and unfiltered data. This + * needs to be big enough to hold Alignment + 2 * Look-ahead: + * + * Type Alignment Look-ahead + * x86 1 4 + * PowerPC 4 0 + * IA-64 16 0 + * ARM 4 0 + * ARM-Thumb 2 2 + * SPARC 4 0 + */ + uint8_t buf[16]; + } temp; +}; + +#ifdef XZ_DEC_X86 +/* + * This is used to test the most significant byte of a memory address + * in an x86 instruction. + */ +static inline int bcj_x86_test_msbyte(uint8_t b) +{ + return b == 0x00 || b == 0xFF; +} + +static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + static const bool mask_to_allowed_status[8] + = { true, true, true, false, true, false, false, false }; + + static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 }; + + size_t i; + size_t prev_pos = (size_t)-1; + uint32_t prev_mask = s->x86_prev_mask; + uint32_t src; + uint32_t dest; + uint32_t j; + uint8_t b; + + if (size <= 4) + return 0; + + size -= 4; + for (i = 0; i < size; ++i) { + if ((buf[i] & 0xFE) != 0xE8) + continue; + + prev_pos = i - prev_pos; + if (prev_pos > 3) { + prev_mask = 0; + } else { + prev_mask = (prev_mask << (prev_pos - 1)) & 7; + if (prev_mask != 0) { + b = buf[i + 4 - mask_to_bit_num[prev_mask]]; + if (!mask_to_allowed_status[prev_mask] + || bcj_x86_test_msbyte(b)) { + prev_pos = i; + prev_mask = (prev_mask << 1) | 1; + continue; + } + } + } + + prev_pos = i; + + if (bcj_x86_test_msbyte(buf[i + 4])) { + src = get_unaligned_le32(buf + i + 1); + while (true) { + dest = src - (s->pos + (uint32_t)i + 5); + if (prev_mask == 0) + break; + + j = mask_to_bit_num[prev_mask] * 8; + b = (uint8_t)(dest >> (24 - j)); + if (!bcj_x86_test_msbyte(b)) + break; + + src = dest ^ (((uint32_t)1 << (32 - j)) - 1); + } + + dest &= 0x01FFFFFF; + dest |= (uint32_t)0 - (dest & 0x01000000); + put_unaligned_le32(dest, buf + i + 1); + i += 4; + } else { + prev_mask = (prev_mask << 1) | 1; + } + } + + prev_pos = i - prev_pos; + s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); + return i; +} +#endif + +#ifdef XZ_DEC_POWERPC +static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) { + instr = get_unaligned_be32(buf + i); + if ((instr & 0xFC000003) == 0x48000001) { + instr &= 0x03FFFFFC; + instr -= s->pos + (uint32_t)i; + instr &= 0x03FFFFFC; + instr |= 0x48000001; + put_unaligned_be32(instr, buf + i); + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_IA64 +static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + static const uint8_t branch_table[32] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 4, 4, 6, 6, 0, 0, 7, 7, + 4, 4, 0, 0, 4, 4, 0, 0 + }; + + /* + * The local variables take a little bit stack space, but it's less + * than what LZMA2 decoder takes, so it doesn't make sense to reduce + * stack usage here without doing that for the LZMA2 decoder too. + */ + + /* Loop counters */ + size_t i; + size_t j; + + /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ + uint32_t slot; + + /* Bitwise offset of the instruction indicated by slot */ + uint32_t bit_pos; + + /* bit_pos split into byte and bit parts */ + uint32_t byte_pos; + uint32_t bit_res; + + /* Address part of an instruction */ + uint32_t addr; + + /* Mask used to detect which instructions to convert */ + uint32_t mask; + + /* 41-bit instruction stored somewhere in the lowest 48 bits */ + uint64_t instr; + + /* Instruction normalized with bit_res for easier manipulation */ + uint64_t norm; + + for (i = 0; i + 16 <= size; i += 16) { + mask = branch_table[buf[i] & 0x1F]; + for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) { + if (((mask >> slot) & 1) == 0) + continue; + + byte_pos = bit_pos >> 3; + bit_res = bit_pos & 7; + instr = 0; + for (j = 0; j < 6; ++j) + instr |= (uint64_t)(buf[i + j + byte_pos]) + << (8 * j); + + norm = instr >> bit_res; + + if (((norm >> 37) & 0x0F) == 0x05 + && ((norm >> 9) & 0x07) == 0) { + addr = (norm >> 13) & 0x0FFFFF; + addr |= ((uint32_t)(norm >> 36) & 1) << 20; + addr <<= 4; + addr -= s->pos + (uint32_t)i; + addr >>= 4; + + norm &= ~((uint64_t)0x8FFFFF << 13); + norm |= (uint64_t)(addr & 0x0FFFFF) << 13; + norm |= (uint64_t)(addr & 0x100000) + << (36 - 20); + + instr &= (1 << bit_res) - 1; + instr |= norm << bit_res; + + for (j = 0; j < 6; j++) + buf[i + j + byte_pos] + = (uint8_t)(instr >> (8 * j)); + } + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_ARM +static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 4) { + if (buf[i + 3] == 0xEB) { + addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) + | ((uint32_t)buf[i + 2] << 16); + addr <<= 2; + addr -= s->pos + (uint32_t)i + 8; + addr >>= 2; + buf[i] = (uint8_t)addr; + buf[i + 1] = (uint8_t)(addr >> 8); + buf[i + 2] = (uint8_t)(addr >> 16); + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_ARMTHUMB +static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 2) { + if ((buf[i + 1] & 0xF8) == 0xF0 + && (buf[i + 3] & 0xF8) == 0xF8) { + addr = (((uint32_t)buf[i + 1] & 0x07) << 19) + | ((uint32_t)buf[i] << 11) + | (((uint32_t)buf[i + 3] & 0x07) << 8) + | (uint32_t)buf[i + 2]; + addr <<= 1; + addr -= s->pos + (uint32_t)i + 4; + addr >>= 1; + buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); + buf[i] = (uint8_t)(addr >> 11); + buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); + buf[i + 2] = (uint8_t)addr; + i += 2; + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_SPARC +static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) { + instr = get_unaligned_be32(buf + i); + if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) { + instr <<= 2; + instr -= s->pos + (uint32_t)i; + instr >>= 2; + instr = ((uint32_t)0x40000000 - (instr & 0x400000)) + | 0x40000000 | (instr & 0x3FFFFF); + put_unaligned_be32(instr, buf + i); + } + } + + return i; +} +#endif + +/* + * Apply the selected BCJ filter. Update *pos and s->pos to match the amount + * of data that got filtered. + * + * NOTE: This is implemented as a switch statement to avoid using function + * pointers, which could be problematic in the kernel boot code, which must + * avoid pointers to static data (at least on x86). + */ +static void bcj_apply(struct xz_dec_bcj *s, + uint8_t *buf, size_t *pos, size_t size) +{ + size_t filtered; + + buf += *pos; + size -= *pos; + + switch (s->type) { +#ifdef XZ_DEC_X86 + case BCJ_X86: + filtered = bcj_x86(s, buf, size); + break; +#endif +#ifdef XZ_DEC_POWERPC + case BCJ_POWERPC: + filtered = bcj_powerpc(s, buf, size); + break; +#endif +#ifdef XZ_DEC_IA64 + case BCJ_IA64: + filtered = bcj_ia64(s, buf, size); + break; +#endif +#ifdef XZ_DEC_ARM + case BCJ_ARM: + filtered = bcj_arm(s, buf, size); + break; +#endif +#ifdef XZ_DEC_ARMTHUMB + case BCJ_ARMTHUMB: + filtered = bcj_armthumb(s, buf, size); + break; +#endif +#ifdef XZ_DEC_SPARC + case BCJ_SPARC: + filtered = bcj_sparc(s, buf, size); + break; +#endif + default: + /* Never reached but silence compiler warnings. */ + filtered = 0; + break; + } + + *pos += filtered; + s->pos += filtered; +} + +/* + * Flush pending filtered data from temp to the output buffer. + * Move the remaining mixture of possibly filtered and unfiltered + * data to the beginning of temp. + */ +static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) +{ + size_t copy_size; + + copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); + memcpy(b->out + b->out_pos, s->temp.buf, copy_size); + b->out_pos += copy_size; + + s->temp.filtered -= copy_size; + s->temp.size -= copy_size; + memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); +} + +/* + * The BCJ filter functions are primitive in sense that they process the + * data in chunks of 1-16 bytes. To hide this issue, this function does + * some buffering. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, + struct xz_dec_lzma2 *lzma2, + struct xz_buf *b) +{ + size_t out_start; + + /* + * Flush pending already filtered data to the output buffer. Return + * immediatelly if we couldn't flush everything, or if the next + * filter in the chain had already returned XZ_STREAM_END. + */ + if (s->temp.filtered > 0) { + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + } + + /* + * If we have more output space than what is currently pending in + * temp, copy the unfiltered data from temp to the output buffer + * and try to fill the output buffer by decoding more data from the + * next filter in the chain. Apply the BCJ filter on the new data + * in the output buffer. If everything cannot be filtered, copy it + * to temp and rewind the output buffer position accordingly. + */ + if (s->temp.size < b->out_size - b->out_pos) { + out_start = b->out_pos; + memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); + b->out_pos += s->temp.size; + + s->ret = xz_dec_lzma2_run(lzma2, b); + if (s->ret != XZ_STREAM_END + && (s->ret != XZ_OK || s->single_call)) + return s->ret; + + bcj_apply(s, b->out, &out_start, b->out_pos); + + /* + * As an exception, if the next filter returned XZ_STREAM_END, + * we can do that too, since the last few bytes that remain + * unfiltered are meant to remain unfiltered. + */ + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + + s->temp.size = b->out_pos - out_start; + b->out_pos -= s->temp.size; + memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); + } + + /* + * If we have unfiltered data in temp, try to fill by decoding more + * data from the next filter. Apply the BCJ filter on temp. Then we + * hopefully can fill the actual output buffer by copying filtered + * data from temp. A mix of filtered and unfiltered data may be left + * in temp; it will be taken care on the next call to this function. + */ + if (s->temp.size > 0) { + /* Make b->out{,_pos,_size} temporarily point to s->temp. */ + s->out = b->out; + s->out_pos = b->out_pos; + s->out_size = b->out_size; + b->out = s->temp.buf; + b->out_pos = s->temp.size; + b->out_size = sizeof(s->temp.buf); + + s->ret = xz_dec_lzma2_run(lzma2, b); + + s->temp.size = b->out_pos; + b->out = s->out; + b->out_pos = s->out_pos; + b->out_size = s->out_size; + + if (s->ret != XZ_OK && s->ret != XZ_STREAM_END) + return s->ret; + + bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); + + /* + * If the next filter returned XZ_STREAM_END, we mark that + * everything is filtered, since the last unfiltered bytes + * of the stream are meant to be left as is. + */ + if (s->ret == XZ_STREAM_END) + s->temp.filtered = s->temp.size; + + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + } + + return s->ret; +} + +XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call) +{ + struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s != NULL) + s->single_call = single_call; + + return s; +} + +XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id) +{ + switch (id) { +#ifdef XZ_DEC_X86 + case BCJ_X86: +#endif +#ifdef XZ_DEC_POWERPC + case BCJ_POWERPC: +#endif +#ifdef XZ_DEC_IA64 + case BCJ_IA64: +#endif +#ifdef XZ_DEC_ARM + case BCJ_ARM: +#endif +#ifdef XZ_DEC_ARMTHUMB + case BCJ_ARMTHUMB: +#endif +#ifdef XZ_DEC_SPARC + case BCJ_SPARC: +#endif + break; + + default: + /* Unsupported Filter ID */ + return XZ_OPTIONS_ERROR; + } + + s->type = id; + s->ret = XZ_OK; + s->pos = 0; + s->x86_prev_mask = 0; + s->temp.filtered = 0; + s->temp.size = 0; + + return XZ_OK; +} + +#endif diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c new file mode 100644 index 000000000000..ea5fa4fe9d67 --- /dev/null +++ b/lib/xz/xz_dec_lzma2.c @@ -0,0 +1,1171 @@ +/* + * LZMA2 decoder + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" +#include "xz_lzma2.h" + +/* + * Range decoder initialization eats the first five bytes of each LZMA chunk. + */ +#define RC_INIT_BYTES 5 + +/* + * Minimum number of usable input buffer to safely decode one LZMA symbol. + * The worst case is that we decode 22 bits using probabilities and 26 + * direct bits. This may decode at maximum of 20 bytes of input. However, + * lzma_main() does an extra normalization before returning, thus we + * need to put 21 here. + */ +#define LZMA_IN_REQUIRED 21 + +/* + * Dictionary (history buffer) + * + * These are always true: + * start <= pos <= full <= end + * pos <= limit <= end + * + * In multi-call mode, also these are true: + * end == size + * size <= size_max + * allocated <= size + * + * Most of these variables are size_t to support single-call mode, + * in which the dictionary variables address the actual output + * buffer directly. + */ +struct dictionary { + /* Beginning of the history buffer */ + uint8_t *buf; + + /* Old position in buf (before decoding more data) */ + size_t start; + + /* Position in buf */ + size_t pos; + + /* + * How full dictionary is. This is used to detect corrupt input that + * would read beyond the beginning of the uncompressed stream. + */ + size_t full; + + /* Write limit; we don't write to buf[limit] or later bytes. */ + size_t limit; + + /* + * End of the dictionary buffer. In multi-call mode, this is + * the same as the dictionary size. In single-call mode, this + * indicates the size of the output buffer. + */ + size_t end; + + /* + * Size of the dictionary as specified in Block Header. This is used + * together with "full" to detect corrupt input that would make us + * read beyond the beginning of the uncompressed stream. + */ + uint32_t size; + + /* + * Maximum allowed dictionary size in multi-call mode. + * This is ignored in single-call mode. + */ + uint32_t size_max; + + /* + * Amount of memory currently allocated for the dictionary. + * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, + * size_max is always the same as the allocated size.) + */ + uint32_t allocated; + + /* Operation mode */ + enum xz_mode mode; +}; + +/* Range decoder */ +struct rc_dec { + uint32_t range; + uint32_t code; + + /* + * Number of initializing bytes remaining to be read + * by rc_read_init(). + */ + uint32_t init_bytes_left; + + /* + * Buffer from which we read our input. It can be either + * temp.buf or the caller-provided input buffer. + */ + const uint8_t *in; + size_t in_pos; + size_t in_limit; +}; + +/* Probabilities for a length decoder. */ +struct lzma_len_dec { + /* Probability of match length being at least 10 */ + uint16_t choice; + + /* Probability of match length being at least 18 */ + uint16_t choice2; + + /* Probabilities for match lengths 2-9 */ + uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; + + /* Probabilities for match lengths 10-17 */ + uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; + + /* Probabilities for match lengths 18-273 */ + uint16_t high[LEN_HIGH_SYMBOLS]; +}; + +struct lzma_dec { + /* Distances of latest four matches */ + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + + /* Types of the most recently seen LZMA symbols */ + enum lzma_state state; + + /* + * Length of a match. This is updated so that dict_repeat can + * be called again to finish repeating the whole match. + */ + uint32_t len; + + /* + * LZMA properties or related bit masks (number of literal + * context bits, a mask dervied from the number of literal + * position bits, and a mask dervied from the number + * position bits) + */ + uint32_t lc; + uint32_t literal_pos_mask; /* (1 << lp) - 1 */ + uint32_t pos_mask; /* (1 << pb) - 1 */ + + /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ + uint16_t is_match[STATES][POS_STATES_MAX]; + + /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ + uint16_t is_rep[STATES]; + + /* + * If 0, distance of a repeated match is rep0. + * Otherwise check is_rep1. + */ + uint16_t is_rep0[STATES]; + + /* + * If 0, distance of a repeated match is rep1. + * Otherwise check is_rep2. + */ + uint16_t is_rep1[STATES]; + + /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ + uint16_t is_rep2[STATES]; + + /* + * If 1, the repeated match has length of one byte. Otherwise + * the length is decoded from rep_len_decoder. + */ + uint16_t is_rep0_long[STATES][POS_STATES_MAX]; + + /* + * Probability tree for the highest two bits of the match + * distance. There is a separate probability tree for match + * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. + */ + uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; + + /* + * Probility trees for additional bits for match distance + * when the distance is in the range [4, 127]. + */ + uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; + + /* + * Probability tree for the lowest four bits of a match + * distance that is equal to or greater than 128. + */ + uint16_t dist_align[ALIGN_SIZE]; + + /* Length of a normal match */ + struct lzma_len_dec match_len_dec; + + /* Length of a repeated match */ + struct lzma_len_dec rep_len_dec; + + /* Probabilities of literals */ + uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; +}; + +struct lzma2_dec { + /* Position in xz_dec_lzma2_run(). */ + enum lzma2_seq { + SEQ_CONTROL, + SEQ_UNCOMPRESSED_1, + SEQ_UNCOMPRESSED_2, + SEQ_COMPRESSED_0, + SEQ_COMPRESSED_1, + SEQ_PROPERTIES, + SEQ_LZMA_PREPARE, + SEQ_LZMA_RUN, + SEQ_COPY + } sequence; + + /* Next position after decoding the compressed size of the chunk. */ + enum lzma2_seq next_sequence; + + /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ + uint32_t uncompressed; + + /* + * Compressed size of LZMA chunk or compressed/uncompressed + * size of uncompressed chunk (64 KiB at maximum) + */ + uint32_t compressed; + + /* + * True if dictionary reset is needed. This is false before + * the first chunk (LZMA or uncompressed). + */ + bool need_dict_reset; + + /* + * True if new LZMA properties are needed. This is false + * before the first LZMA chunk. + */ + bool need_props; +}; + +struct xz_dec_lzma2 { + /* + * The order below is important on x86 to reduce code size and + * it shouldn't hurt on other platforms. Everything up to and + * including lzma.pos_mask are in the first 128 bytes on x86-32, + * which allows using smaller instructions to access those + * variables. On x86-64, fewer variables fit into the first 128 + * bytes, but this is still the best order without sacrificing + * the readability by splitting the structures. + */ + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + + /* + * Temporary buffer which holds small number of input bytes between + * decoder calls. See lzma2_lzma() for details. + */ + struct { + uint32_t size; + uint8_t buf[3 * LZMA_IN_REQUIRED]; + } temp; +}; + +/************** + * Dictionary * + **************/ + +/* + * Reset the dictionary state. When in single-call mode, set up the beginning + * of the dictionary to point to the actual output buffer. + */ +static void dict_reset(struct dictionary *dict, struct xz_buf *b) +{ + if (DEC_IS_SINGLE(dict->mode)) { + dict->buf = b->out + b->out_pos; + dict->end = b->out_size - b->out_pos; + } + + dict->start = 0; + dict->pos = 0; + dict->limit = 0; + dict->full = 0; +} + +/* Set dictionary write limit */ +static void dict_limit(struct dictionary *dict, size_t out_max) +{ + if (dict->end - dict->pos <= out_max) + dict->limit = dict->end; + else + dict->limit = dict->pos + out_max; +} + +/* Return true if at least one byte can be written into the dictionary. */ +static inline bool dict_has_space(const struct dictionary *dict) +{ + return dict->pos < dict->limit; +} + +/* + * Get a byte from the dictionary at the given distance. The distance is + * assumed to valid, or as a special case, zero when the dictionary is + * still empty. This special case is needed for single-call decoding to + * avoid writing a '\0' to the end of the destination buffer. + */ +static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist) +{ + size_t offset = dict->pos - dist - 1; + + if (dist >= dict->pos) + offset += dict->end; + + return dict->full > 0 ? dict->buf[offset] : 0; +} + +/* + * Put one byte into the dictionary. It is assumed that there is space for it. + */ +static inline void dict_put(struct dictionary *dict, uint8_t byte) +{ + dict->buf[dict->pos++] = byte; + + if (dict->full < dict->pos) + dict->full = dict->pos; +} + +/* + * Repeat given number of bytes from the given distance. If the distance is + * invalid, false is returned. On success, true is returned and *len is + * updated to indicate how many bytes were left to be repeated. + */ +static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) +{ + size_t back; + uint32_t left; + + if (dist >= dict->full || dist >= dict->size) + return false; + + left = min_t(size_t, dict->limit - dict->pos, *len); + *len -= left; + + back = dict->pos - dist - 1; + if (dist >= dict->pos) + back += dict->end; + + do { + dict->buf[dict->pos++] = dict->buf[back++]; + if (back == dict->end) + back = 0; + } while (--left > 0); + + if (dict->full < dict->pos) + dict->full = dict->pos; + + return true; +} + +/* Copy uncompressed data as is from input to dictionary and output buffers. */ +static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, + uint32_t *left) +{ + size_t copy_size; + + while (*left > 0 && b->in_pos < b->in_size + && b->out_pos < b->out_size) { + copy_size = min(b->in_size - b->in_pos, + b->out_size - b->out_pos); + if (copy_size > dict->end - dict->pos) + copy_size = dict->end - dict->pos; + if (copy_size > *left) + copy_size = *left; + + *left -= copy_size; + + memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); + dict->pos += copy_size; + + if (dict->full < dict->pos) + dict->full = dict->pos; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + memcpy(b->out + b->out_pos, b->in + b->in_pos, + copy_size); + } + + dict->start = dict->pos; + + b->out_pos += copy_size; + b->in_pos += copy_size; + } +} + +/* + * Flush pending data from dictionary to b->out. It is assumed that there is + * enough space in b->out. This is guaranteed because caller uses dict_limit() + * before decoding data into the dictionary. + */ +static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) +{ + size_t copy_size = dict->pos - dict->start; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + memcpy(b->out + b->out_pos, dict->buf + dict->start, + copy_size); + } + + dict->start = dict->pos; + b->out_pos += copy_size; + return copy_size; +} + +/***************** + * Range decoder * + *****************/ + +/* Reset the range decoder. */ +static void rc_reset(struct rc_dec *rc) +{ + rc->range = (uint32_t)-1; + rc->code = 0; + rc->init_bytes_left = RC_INIT_BYTES; +} + +/* + * Read the first five initial bytes into rc->code if they haven't been + * read already. (Yes, the first byte gets completely ignored.) + */ +static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b) +{ + while (rc->init_bytes_left > 0) { + if (b->in_pos == b->in_size) + return false; + + rc->code = (rc->code << 8) + b->in[b->in_pos++]; + --rc->init_bytes_left; + } + + return true; +} + +/* Return true if there may not be enough input for the next decoding loop. */ +static inline bool rc_limit_exceeded(const struct rc_dec *rc) +{ + return rc->in_pos > rc->in_limit; +} + +/* + * Return true if it is possible (from point of view of range decoder) that + * we have reached the end of the LZMA chunk. + */ +static inline bool rc_is_finished(const struct rc_dec *rc) +{ + return rc->code == 0; +} + +/* Read the next input byte if needed. */ +static __always_inline void rc_normalize(struct rc_dec *rc) +{ + if (rc->range < RC_TOP_VALUE) { + rc->range <<= RC_SHIFT_BITS; + rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; + } +} + +/* + * Decode one bit. In some versions, this function has been splitted in three + * functions so that the compiler is supposed to be able to more easily avoid + * an extra branch. In this particular version of the LZMA decoder, this + * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3 + * on x86). Using a non-splitted version results in nicer looking code too. + * + * NOTE: This must return an int. Do not make it return a bool or the speed + * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care, + * and it generates 10-20 % faster code than GCC 3.x from this file anyway.) + */ +static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) +{ + uint32_t bound; + int bit; + + rc_normalize(rc); + bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; + if (rc->code < bound) { + rc->range = bound; + *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; + bit = 0; + } else { + rc->range -= bound; + rc->code -= bound; + *prob -= *prob >> RC_MOVE_BITS; + bit = 1; + } + + return bit; +} + +/* Decode a bittree starting from the most significant bit. */ +static __always_inline uint32_t rc_bittree(struct rc_dec *rc, + uint16_t *probs, uint32_t limit) +{ + uint32_t symbol = 1; + + do { + if (rc_bit(rc, &probs[symbol])) + symbol = (symbol << 1) + 1; + else + symbol <<= 1; + } while (symbol < limit); + + return symbol; +} + +/* Decode a bittree starting from the least significant bit. */ +static __always_inline void rc_bittree_reverse(struct rc_dec *rc, + uint16_t *probs, + uint32_t *dest, uint32_t limit) +{ + uint32_t symbol = 1; + uint32_t i = 0; + + do { + if (rc_bit(rc, &probs[symbol])) { + symbol = (symbol << 1) + 1; + *dest += 1 << i; + } else { + symbol <<= 1; + } + } while (++i < limit); +} + +/* Decode direct bits (fixed fifty-fifty probability) */ +static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) +{ + uint32_t mask; + + do { + rc_normalize(rc); + rc->range >>= 1; + rc->code -= rc->range; + mask = (uint32_t)0 - (rc->code >> 31); + rc->code += rc->range & mask; + *dest = (*dest << 1) + (mask + 1); + } while (--limit > 0); +} + +/******** + * LZMA * + ********/ + +/* Get pointer to literal coder probability array. */ +static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s) +{ + uint32_t prev_byte = dict_get(&s->dict, 0); + uint32_t low = prev_byte >> (8 - s->lzma.lc); + uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; + return s->lzma.literal[low + high]; +} + +/* Decode a literal (one 8-bit byte) */ +static void lzma_literal(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + uint32_t symbol; + uint32_t match_byte; + uint32_t match_bit; + uint32_t offset; + uint32_t i; + + probs = lzma_literal_probs(s); + + if (lzma_state_is_literal(s->lzma.state)) { + symbol = rc_bittree(&s->rc, probs, 0x100); + } else { + symbol = 1; + match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; + offset = 0x100; + + do { + match_bit = match_byte & offset; + match_byte <<= 1; + i = offset + match_bit + symbol; + + if (rc_bit(&s->rc, &probs[i])) { + symbol = (symbol << 1) + 1; + offset &= match_bit; + } else { + symbol <<= 1; + offset &= ~match_bit; + } + } while (symbol < 0x100); + } + + dict_put(&s->dict, (uint8_t)symbol); + lzma_state_literal(&s->lzma.state); +} + +/* Decode the length of the match into s->lzma.len. */ +static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, + uint32_t pos_state) +{ + uint16_t *probs; + uint32_t limit; + + if (!rc_bit(&s->rc, &l->choice)) { + probs = l->low[pos_state]; + limit = LEN_LOW_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN; + } else { + if (!rc_bit(&s->rc, &l->choice2)) { + probs = l->mid[pos_state]; + limit = LEN_MID_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; + } else { + probs = l->high; + limit = LEN_HIGH_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + + LEN_MID_SYMBOLS; + } + } + + s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; +} + +/* Decode a match. The distance will be stored in s->lzma.rep0. */ +static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint16_t *probs; + uint32_t dist_slot; + uint32_t limit; + + lzma_state_match(&s->lzma.state); + + s->lzma.rep3 = s->lzma.rep2; + s->lzma.rep2 = s->lzma.rep1; + s->lzma.rep1 = s->lzma.rep0; + + lzma_len(s, &s->lzma.match_len_dec, pos_state); + + probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; + dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; + + if (dist_slot < DIST_MODEL_START) { + s->lzma.rep0 = dist_slot; + } else { + limit = (dist_slot >> 1) - 1; + s->lzma.rep0 = 2 + (dist_slot & 1); + + if (dist_slot < DIST_MODEL_END) { + s->lzma.rep0 <<= limit; + probs = s->lzma.dist_special + s->lzma.rep0 + - dist_slot - 1; + rc_bittree_reverse(&s->rc, probs, + &s->lzma.rep0, limit); + } else { + rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); + s->lzma.rep0 <<= ALIGN_BITS; + rc_bittree_reverse(&s->rc, s->lzma.dist_align, + &s->lzma.rep0, ALIGN_BITS); + } + } +} + +/* + * Decode a repeated match. The distance is one of the four most recently + * seen matches. The distance will be stored in s->lzma.rep0. + */ +static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint32_t tmp; + + if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { + if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ + s->lzma.state][pos_state])) { + lzma_state_short_rep(&s->lzma.state); + s->lzma.len = 1; + return; + } + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { + tmp = s->lzma.rep1; + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { + tmp = s->lzma.rep2; + } else { + tmp = s->lzma.rep3; + s->lzma.rep3 = s->lzma.rep2; + } + + s->lzma.rep2 = s->lzma.rep1; + } + + s->lzma.rep1 = s->lzma.rep0; + s->lzma.rep0 = tmp; + } + + lzma_state_long_rep(&s->lzma.state); + lzma_len(s, &s->lzma.rep_len_dec, pos_state); +} + +/* LZMA decoder core */ +static bool lzma_main(struct xz_dec_lzma2 *s) +{ + uint32_t pos_state; + + /* + * If the dictionary was reached during the previous call, try to + * finish the possibly pending repeat in the dictionary. + */ + if (dict_has_space(&s->dict) && s->lzma.len > 0) + dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); + + /* + * Decode more LZMA symbols. One iteration may consume up to + * LZMA_IN_REQUIRED - 1 bytes. + */ + while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { + pos_state = s->dict.pos & s->lzma.pos_mask; + + if (!rc_bit(&s->rc, &s->lzma.is_match[ + s->lzma.state][pos_state])) { + lzma_literal(s); + } else { + if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) + lzma_rep_match(s, pos_state); + else + lzma_match(s, pos_state); + + if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) + return false; + } + } + + /* + * Having the range decoder always normalized when we are outside + * this function makes it easier to correctly handle end of the chunk. + */ + rc_normalize(&s->rc); + + return true; +} + +/* + * Reset the LZMA decoder and range decoder state. Dictionary is nore reset + * here, because LZMA state may be reset without resetting the dictionary. + */ +static void lzma_reset(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + size_t i; + + s->lzma.state = STATE_LIT_LIT; + s->lzma.rep0 = 0; + s->lzma.rep1 = 0; + s->lzma.rep2 = 0; + s->lzma.rep3 = 0; + + /* + * All probabilities are initialized to the same value. This hack + * makes the code smaller by avoiding a separate loop for each + * probability array. + * + * This could be optimized so that only that part of literal + * probabilities that are actually required. In the common case + * we would write 12 KiB less. + */ + probs = s->lzma.is_match[0]; + for (i = 0; i < PROBS_TOTAL; ++i) + probs[i] = RC_BIT_MODEL_TOTAL / 2; + + rc_reset(&s->rc); +} + +/* + * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks + * from the decoded lp and pb values. On success, the LZMA decoder state is + * reset and true is returned. + */ +static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) +{ + if (props > (4 * 5 + 4) * 9 + 8) + return false; + + s->lzma.pos_mask = 0; + while (props >= 9 * 5) { + props -= 9 * 5; + ++s->lzma.pos_mask; + } + + s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; + + s->lzma.literal_pos_mask = 0; + while (props >= 9) { + props -= 9; + ++s->lzma.literal_pos_mask; + } + + s->lzma.lc = props; + + if (s->lzma.lc + s->lzma.literal_pos_mask > 4) + return false; + + s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; + + lzma_reset(s); + + return true; +} + +/********* + * LZMA2 * + *********/ + +/* + * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't + * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This + * wrapper function takes care of making the LZMA decoder's assumption safe. + * + * As long as there is plenty of input left to be decoded in the current LZMA + * chunk, we decode directly from the caller-supplied input buffer until + * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into + * s->temp.buf, which (hopefully) gets filled on the next call to this + * function. We decode a few bytes from the temporary buffer so that we can + * continue decoding from the caller-supplied input buffer again. + */ +static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) +{ + size_t in_avail; + uint32_t tmp; + + in_avail = b->in_size - b->in_pos; + if (s->temp.size > 0 || s->lzma2.compressed == 0) { + tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; + if (tmp > s->lzma2.compressed - s->temp.size) + tmp = s->lzma2.compressed - s->temp.size; + if (tmp > in_avail) + tmp = in_avail; + + memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); + + if (s->temp.size + tmp == s->lzma2.compressed) { + memzero(s->temp.buf + s->temp.size + tmp, + sizeof(s->temp.buf) + - s->temp.size - tmp); + s->rc.in_limit = s->temp.size + tmp; + } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { + s->temp.size += tmp; + b->in_pos += tmp; + return true; + } else { + s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; + } + + s->rc.in = s->temp.buf; + s->rc.in_pos = 0; + + if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) + return false; + + s->lzma2.compressed -= s->rc.in_pos; + + if (s->rc.in_pos < s->temp.size) { + s->temp.size -= s->rc.in_pos; + memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, + s->temp.size); + return true; + } + + b->in_pos += s->rc.in_pos - s->temp.size; + s->temp.size = 0; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail >= LZMA_IN_REQUIRED) { + s->rc.in = b->in; + s->rc.in_pos = b->in_pos; + + if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) + s->rc.in_limit = b->in_pos + s->lzma2.compressed; + else + s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; + + if (!lzma_main(s)) + return false; + + in_avail = s->rc.in_pos - b->in_pos; + if (in_avail > s->lzma2.compressed) + return false; + + s->lzma2.compressed -= in_avail; + b->in_pos = s->rc.in_pos; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail < LZMA_IN_REQUIRED) { + if (in_avail > s->lzma2.compressed) + in_avail = s->lzma2.compressed; + + memcpy(s->temp.buf, b->in + b->in_pos, in_avail); + s->temp.size = in_avail; + b->in_pos += in_avail; + } + + return true; +} + +/* + * Take care of the LZMA2 control layer, and forward the job of actual LZMA + * decoding or copying of uncompressed chunks to other functions. + */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, + struct xz_buf *b) +{ + uint32_t tmp; + + while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { + switch (s->lzma2.sequence) { + case SEQ_CONTROL: + /* + * LZMA2 control byte + * + * Exact values: + * 0x00 End marker + * 0x01 Dictionary reset followed by + * an uncompressed chunk + * 0x02 Uncompressed chunk (no dictionary reset) + * + * Highest three bits (s->control & 0xE0): + * 0xE0 Dictionary reset, new properties and state + * reset, followed by LZMA compressed chunk + * 0xC0 New properties and state reset, followed + * by LZMA compressed chunk (no dictionary + * reset) + * 0xA0 State reset using old properties, + * followed by LZMA compressed chunk (no + * dictionary reset) + * 0x80 LZMA chunk (no dictionary or state reset) + * + * For LZMA compressed chunks, the lowest five bits + * (s->control & 1F) are the highest bits of the + * uncompressed size (bits 16-20). + * + * A new LZMA2 stream must begin with a dictionary + * reset. The first LZMA chunk must set new + * properties and reset the LZMA state. + * + * Values that don't match anything described above + * are invalid and we return XZ_DATA_ERROR. + */ + tmp = b->in[b->in_pos++]; + + if (tmp >= 0xE0 || tmp == 0x01) { + s->lzma2.need_props = true; + s->lzma2.need_dict_reset = false; + dict_reset(&s->dict, b); + } else if (s->lzma2.need_dict_reset) { + return XZ_DATA_ERROR; + } + + if (tmp >= 0x80) { + s->lzma2.uncompressed = (tmp & 0x1F) << 16; + s->lzma2.sequence = SEQ_UNCOMPRESSED_1; + + if (tmp >= 0xC0) { + /* + * When there are new properties, + * state reset is done at + * SEQ_PROPERTIES. + */ + s->lzma2.need_props = false; + s->lzma2.next_sequence + = SEQ_PROPERTIES; + + } else if (s->lzma2.need_props) { + return XZ_DATA_ERROR; + + } else { + s->lzma2.next_sequence + = SEQ_LZMA_PREPARE; + if (tmp >= 0xA0) + lzma_reset(s); + } + } else { + if (tmp == 0x00) + return XZ_STREAM_END; + + if (tmp > 0x02) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_COMPRESSED_0; + s->lzma2.next_sequence = SEQ_COPY; + } + + break; + + case SEQ_UNCOMPRESSED_1: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_UNCOMPRESSED_2; + break; + + case SEQ_UNCOMPRESSED_2: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = SEQ_COMPRESSED_0; + break; + + case SEQ_COMPRESSED_0: + s->lzma2.compressed + = (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_COMPRESSED_1; + break; + + case SEQ_COMPRESSED_1: + s->lzma2.compressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = s->lzma2.next_sequence; + break; + + case SEQ_PROPERTIES: + if (!lzma_props(s, b->in[b->in_pos++])) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_LZMA_PREPARE; + + case SEQ_LZMA_PREPARE: + if (s->lzma2.compressed < RC_INIT_BYTES) + return XZ_DATA_ERROR; + + if (!rc_read_init(&s->rc, b)) + return XZ_OK; + + s->lzma2.compressed -= RC_INIT_BYTES; + s->lzma2.sequence = SEQ_LZMA_RUN; + + case SEQ_LZMA_RUN: + /* + * Set dictionary limit to indicate how much we want + * to be encoded at maximum. Decode new data into the + * dictionary. Flush the new data from dictionary to + * b->out. Check if we finished decoding this chunk. + * In case the dictionary got full but we didn't fill + * the output buffer yet, we may run this loop + * multiple times without changing s->lzma2.sequence. + */ + dict_limit(&s->dict, min_t(size_t, + b->out_size - b->out_pos, + s->lzma2.uncompressed)); + if (!lzma2_lzma(s, b)) + return XZ_DATA_ERROR; + + s->lzma2.uncompressed -= dict_flush(&s->dict, b); + + if (s->lzma2.uncompressed == 0) { + if (s->lzma2.compressed > 0 || s->lzma.len > 0 + || !rc_is_finished(&s->rc)) + return XZ_DATA_ERROR; + + rc_reset(&s->rc); + s->lzma2.sequence = SEQ_CONTROL; + + } else if (b->out_pos == b->out_size + || (b->in_pos == b->in_size + && s->temp.size + < s->lzma2.compressed)) { + return XZ_OK; + } + + break; + + case SEQ_COPY: + dict_uncompressed(&s->dict, b, &s->lzma2.compressed); + if (s->lzma2.compressed > 0) + return XZ_OK; + + s->lzma2.sequence = SEQ_CONTROL; + break; + } + } + + return XZ_OK; +} + +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, + uint32_t dict_max) +{ + struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->dict.mode = mode; + s->dict.size_max = dict_max; + + if (DEC_IS_PREALLOC(mode)) { + s->dict.buf = vmalloc(dict_max); + if (s->dict.buf == NULL) { + kfree(s); + return NULL; + } + } else if (DEC_IS_DYNALLOC(mode)) { + s->dict.buf = NULL; + s->dict.allocated = 0; + } + + return s; +} + +XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) +{ + /* This limits dictionary size to 3 GiB to keep parsing simpler. */ + if (props > 39) + return XZ_OPTIONS_ERROR; + + s->dict.size = 2 + (props & 1); + s->dict.size <<= (props >> 1) + 11; + + if (DEC_IS_MULTI(s->dict.mode)) { + if (s->dict.size > s->dict.size_max) + return XZ_MEMLIMIT_ERROR; + + s->dict.end = s->dict.size; + + if (DEC_IS_DYNALLOC(s->dict.mode)) { + if (s->dict.allocated < s->dict.size) { + vfree(s->dict.buf); + s->dict.buf = vmalloc(s->dict.size); + if (s->dict.buf == NULL) { + s->dict.allocated = 0; + return XZ_MEM_ERROR; + } + } + } + } + + s->lzma.len = 0; + + s->lzma2.sequence = SEQ_CONTROL; + s->lzma2.need_dict_reset = true; + + s->temp.size = 0; + + return XZ_OK; +} + +XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s) +{ + if (DEC_IS_MULTI(s->dict.mode)) + vfree(s->dict.buf); + + kfree(s); +} diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c new file mode 100644 index 000000000000..ac809b1e64f7 --- /dev/null +++ b/lib/xz/xz_dec_stream.c @@ -0,0 +1,821 @@ +/* + * .xz Stream decoder + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" +#include "xz_stream.h" + +/* Hash used to validate the Index field */ +struct xz_dec_hash { + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; +}; + +struct xz_dec { + /* Position in dec_main() */ + enum { + SEQ_STREAM_HEADER, + SEQ_BLOCK_START, + SEQ_BLOCK_HEADER, + SEQ_BLOCK_UNCOMPRESS, + SEQ_BLOCK_PADDING, + SEQ_BLOCK_CHECK, + SEQ_INDEX, + SEQ_INDEX_PADDING, + SEQ_INDEX_CRC32, + SEQ_STREAM_FOOTER + } sequence; + + /* Position in variable-length integers and Check fields */ + uint32_t pos; + + /* Variable-length integer decoded by dec_vli() */ + vli_type vli; + + /* Saved in_pos and out_pos */ + size_t in_start; + size_t out_start; + + /* CRC32 value in Block or Index */ + uint32_t crc32; + + /* Type of the integrity check calculated from uncompressed data */ + enum xz_check check_type; + + /* Operation mode */ + enum xz_mode mode; + + /* + * True if the next call to xz_dec_run() is allowed to return + * XZ_BUF_ERROR. + */ + bool allow_buf_error; + + /* Information stored in Block Header */ + struct { + /* + * Value stored in the Compressed Size field, or + * VLI_UNKNOWN if Compressed Size is not present. + */ + vli_type compressed; + + /* + * Value stored in the Uncompressed Size field, or + * VLI_UNKNOWN if Uncompressed Size is not present. + */ + vli_type uncompressed; + + /* Size of the Block Header field */ + uint32_t size; + } block_header; + + /* Information collected when decoding Blocks */ + struct { + /* Observed compressed size of the current Block */ + vli_type compressed; + + /* Observed uncompressed size of the current Block */ + vli_type uncompressed; + + /* Number of Blocks decoded so far */ + vli_type count; + + /* + * Hash calculated from the Block sizes. This is used to + * validate the Index field. + */ + struct xz_dec_hash hash; + } block; + + /* Variables needed when verifying the Index field */ + struct { + /* Position in dec_index() */ + enum { + SEQ_INDEX_COUNT, + SEQ_INDEX_UNPADDED, + SEQ_INDEX_UNCOMPRESSED + } sequence; + + /* Size of the Index in bytes */ + vli_type size; + + /* Number of Records (matches block.count in valid files) */ + vli_type count; + + /* + * Hash calculated from the Records (matches block.hash in + * valid files). + */ + struct xz_dec_hash hash; + } index; + + /* + * Temporary buffer needed to hold Stream Header, Block Header, + * and Stream Footer. The Block Header is the biggest (1 KiB) + * so we reserve space according to that. buf[] has to be aligned + * to a multiple of four bytes; the size_t variables before it + * should guarantee this. + */ + struct { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + + struct xz_dec_lzma2 *lzma2; + +#ifdef XZ_DEC_BCJ + struct xz_dec_bcj *bcj; + bool bcj_active; +#endif +}; + +#ifdef XZ_DEC_ANY_CHECK +/* Sizes of the Check field with different Check IDs */ +static const uint8_t check_sizes[16] = { + 0, + 4, 4, 4, + 8, 8, 8, + 16, 16, 16, + 32, 32, 32, + 64, 64, 64 +}; +#endif + +/* + * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller + * must have set s->temp.pos to indicate how much data we are supposed + * to copy into s->temp.buf. Return true once s->temp.pos has reached + * s->temp.size. + */ +static bool fill_temp(struct xz_dec *s, struct xz_buf *b) +{ + size_t copy_size = min_t(size_t, + b->in_size - b->in_pos, s->temp.size - s->temp.pos); + + memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); + b->in_pos += copy_size; + s->temp.pos += copy_size; + + if (s->temp.pos == s->temp.size) { + s->temp.pos = 0; + return true; + } + + return false; +} + +/* Decode a variable-length integer (little-endian base-128 encoding) */ +static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, + size_t *in_pos, size_t in_size) +{ + uint8_t byte; + + if (s->pos == 0) + s->vli = 0; + + while (*in_pos < in_size) { + byte = in[*in_pos]; + ++*in_pos; + + s->vli |= (vli_type)(byte & 0x7F) << s->pos; + + if ((byte & 0x80) == 0) { + /* Don't allow non-minimal encodings. */ + if (byte == 0 && s->pos != 0) + return XZ_DATA_ERROR; + + s->pos = 0; + return XZ_STREAM_END; + } + + s->pos += 7; + if (s->pos == 7 * VLI_BYTES_MAX) + return XZ_DATA_ERROR; + } + + return XZ_OK; +} + +/* + * Decode the Compressed Data field from a Block. Update and validate + * the observed compressed and uncompressed sizes of the Block so that + * they don't exceed the values possibly stored in the Block Header + * (validation assumes that no integer overflow occurs, since vli_type + * is normally uint64_t). Update the CRC32 if presence of the CRC32 + * field was indicated in Stream Header. + * + * Once the decoding is finished, validate that the observed sizes match + * the sizes possibly stored in the Block Header. Update the hash and + * Block count, which are later used to validate the Index field. + */ +static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + s->in_start = b->in_pos; + s->out_start = b->out_pos; + +#ifdef XZ_DEC_BCJ + if (s->bcj_active) + ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); + else +#endif + ret = xz_dec_lzma2_run(s->lzma2, b); + + s->block.compressed += b->in_pos - s->in_start; + s->block.uncompressed += b->out_pos - s->out_start; + + /* + * There is no need to separately check for VLI_UNKNOWN, since + * the observed sizes are always smaller than VLI_UNKNOWN. + */ + if (s->block.compressed > s->block_header.compressed + || s->block.uncompressed + > s->block_header.uncompressed) + return XZ_DATA_ERROR; + + if (s->check_type == XZ_CHECK_CRC32) + s->crc32 = xz_crc32(b->out + s->out_start, + b->out_pos - s->out_start, s->crc32); + + if (ret == XZ_STREAM_END) { + if (s->block_header.compressed != VLI_UNKNOWN + && s->block_header.compressed + != s->block.compressed) + return XZ_DATA_ERROR; + + if (s->block_header.uncompressed != VLI_UNKNOWN + && s->block_header.uncompressed + != s->block.uncompressed) + return XZ_DATA_ERROR; + + s->block.hash.unpadded += s->block_header.size + + s->block.compressed; + +#ifdef XZ_DEC_ANY_CHECK + s->block.hash.unpadded += check_sizes[s->check_type]; +#else + if (s->check_type == XZ_CHECK_CRC32) + s->block.hash.unpadded += 4; +#endif + + s->block.hash.uncompressed += s->block.uncompressed; + s->block.hash.crc32 = xz_crc32( + (const uint8_t *)&s->block.hash, + sizeof(s->block.hash), s->block.hash.crc32); + + ++s->block.count; + } + + return ret; +} + +/* Update the Index size and the CRC32 value. */ +static void index_update(struct xz_dec *s, const struct xz_buf *b) +{ + size_t in_used = b->in_pos - s->in_start; + s->index.size += in_used; + s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); +} + +/* + * Decode the Number of Records, Unpadded Size, and Uncompressed Size + * fields from the Index field. That is, Index Padding and CRC32 are not + * decoded by this function. + * + * This can return XZ_OK (more input needed), XZ_STREAM_END (everything + * successfully decoded), or XZ_DATA_ERROR (input is corrupt). + */ +static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + do { + ret = dec_vli(s, b->in, &b->in_pos, b->in_size); + if (ret != XZ_STREAM_END) { + index_update(s, b); + return ret; + } + + switch (s->index.sequence) { + case SEQ_INDEX_COUNT: + s->index.count = s->vli; + + /* + * Validate that the Number of Records field + * indicates the same number of Records as + * there were Blocks in the Stream. + */ + if (s->index.count != s->block.count) + return XZ_DATA_ERROR; + + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + + case SEQ_INDEX_UNPADDED: + s->index.hash.unpadded += s->vli; + s->index.sequence = SEQ_INDEX_UNCOMPRESSED; + break; + + case SEQ_INDEX_UNCOMPRESSED: + s->index.hash.uncompressed += s->vli; + s->index.hash.crc32 = xz_crc32( + (const uint8_t *)&s->index.hash, + sizeof(s->index.hash), + s->index.hash.crc32); + --s->index.count; + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + } + } while (s->index.count > 0); + + return XZ_STREAM_END; +} + +/* + * Validate that the next four input bytes match the value of s->crc32. + * s->pos must be zero when starting to validate the first byte. + */ +static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b) +{ + do { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) + return XZ_DATA_ERROR; + + s->pos += 8; + + } while (s->pos < 32); + + s->crc32 = 0; + s->pos = 0; + + return XZ_STREAM_END; +} + +#ifdef XZ_DEC_ANY_CHECK +/* + * Skip over the Check field when the Check ID is not supported. + * Returns true once the whole Check field has been skipped over. + */ +static bool check_skip(struct xz_dec *s, struct xz_buf *b) +{ + while (s->pos < check_sizes[s->check_type]) { + if (b->in_pos == b->in_size) + return false; + + ++b->in_pos; + ++s->pos; + } + + s->pos = 0; + + return true; +} +#endif + +/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ +static enum xz_ret dec_stream_header(struct xz_dec *s) +{ + if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) + return XZ_FORMAT_ERROR; + + if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) + != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) + return XZ_DATA_ERROR; + + if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) + return XZ_OPTIONS_ERROR; + + /* + * Of integrity checks, we support only none (Check ID = 0) and + * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, + * we will accept other check types too, but then the check won't + * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. + */ + s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; + +#ifdef XZ_DEC_ANY_CHECK + if (s->check_type > XZ_CHECK_MAX) + return XZ_OPTIONS_ERROR; + + if (s->check_type > XZ_CHECK_CRC32) + return XZ_UNSUPPORTED_CHECK; +#else + if (s->check_type > XZ_CHECK_CRC32) + return XZ_OPTIONS_ERROR; +#endif + + return XZ_OK; +} + +/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ +static enum xz_ret dec_stream_footer(struct xz_dec *s) +{ + if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) + return XZ_DATA_ERROR; + + if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) + return XZ_DATA_ERROR; + + /* + * Validate Backward Size. Note that we never added the size of the + * Index CRC32 field to s->index.size, thus we use s->index.size / 4 + * instead of s->index.size / 4 - 1. + */ + if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) + return XZ_DATA_ERROR; + + if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) + return XZ_DATA_ERROR; + + /* + * Use XZ_STREAM_END instead of XZ_OK to be more convenient + * for the caller. + */ + return XZ_STREAM_END; +} + +/* Decode the Block Header and initialize the filter chain. */ +static enum xz_ret dec_block_header(struct xz_dec *s) +{ + enum xz_ret ret; + + /* + * Validate the CRC32. We know that the temp buffer is at least + * eight bytes so this is safe. + */ + s->temp.size -= 4; + if (xz_crc32(s->temp.buf, s->temp.size, 0) + != get_le32(s->temp.buf + s->temp.size)) + return XZ_DATA_ERROR; + + s->temp.pos = 2; + + /* + * Catch unsupported Block Flags. We support only one or two filters + * in the chain, so we catch that with the same test. + */ +#ifdef XZ_DEC_BCJ + if (s->temp.buf[1] & 0x3E) +#else + if (s->temp.buf[1] & 0x3F) +#endif + return XZ_OPTIONS_ERROR; + + /* Compressed Size */ + if (s->temp.buf[1] & 0x40) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.compressed = s->vli; + } else { + s->block_header.compressed = VLI_UNKNOWN; + } + + /* Uncompressed Size */ + if (s->temp.buf[1] & 0x80) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.uncompressed = s->vli; + } else { + s->block_header.uncompressed = VLI_UNKNOWN; + } + +#ifdef XZ_DEC_BCJ + /* If there are two filters, the first one must be a BCJ filter. */ + s->bcj_active = s->temp.buf[1] & 0x01; + if (s->bcj_active) { + if (s->temp.size - s->temp.pos < 2) + return XZ_OPTIONS_ERROR; + + ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* + * We don't support custom start offset, + * so Size of Properties must be zero. + */ + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + } +#endif + + /* Valid Filter Flags always take at least two bytes. */ + if (s->temp.size - s->temp.pos < 2) + return XZ_DATA_ERROR; + + /* Filter ID = LZMA2 */ + if (s->temp.buf[s->temp.pos++] != 0x21) + return XZ_OPTIONS_ERROR; + + /* Size of Properties = 1-byte Filter Properties */ + if (s->temp.buf[s->temp.pos++] != 0x01) + return XZ_OPTIONS_ERROR; + + /* Filter Properties contains LZMA2 dictionary size. */ + if (s->temp.size - s->temp.pos < 1) + return XZ_DATA_ERROR; + + ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* The rest must be Header Padding. */ + while (s->temp.pos < s->temp.size) + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + + s->temp.pos = 0; + s->block.compressed = 0; + s->block.uncompressed = 0; + + return XZ_OK; +} + +static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + /* + * Store the start position for the case when we are in the middle + * of the Index field. + */ + s->in_start = b->in_pos; + + while (true) { + switch (s->sequence) { + case SEQ_STREAM_HEADER: + /* + * Stream Header is copied to s->temp, and then + * decoded from there. This way if the caller + * gives us only little input at a time, we can + * still keep the Stream Header decoding code + * simple. Similar approach is used in many places + * in this file. + */ + if (!fill_temp(s, b)) + return XZ_OK; + + /* + * If dec_stream_header() returns + * XZ_UNSUPPORTED_CHECK, it is still possible + * to continue decoding if working in multi-call + * mode. Thus, update s->sequence before calling + * dec_stream_header(). + */ + s->sequence = SEQ_BLOCK_START; + + ret = dec_stream_header(s); + if (ret != XZ_OK) + return ret; + + case SEQ_BLOCK_START: + /* We need one byte of input to continue. */ + if (b->in_pos == b->in_size) + return XZ_OK; + + /* See if this is the beginning of the Index field. */ + if (b->in[b->in_pos] == 0) { + s->in_start = b->in_pos++; + s->sequence = SEQ_INDEX; + break; + } + + /* + * Calculate the size of the Block Header and + * prepare to decode it. + */ + s->block_header.size + = ((uint32_t)b->in[b->in_pos] + 1) * 4; + + s->temp.size = s->block_header.size; + s->temp.pos = 0; + s->sequence = SEQ_BLOCK_HEADER; + + case SEQ_BLOCK_HEADER: + if (!fill_temp(s, b)) + return XZ_OK; + + ret = dec_block_header(s); + if (ret != XZ_OK) + return ret; + + s->sequence = SEQ_BLOCK_UNCOMPRESS; + + case SEQ_BLOCK_UNCOMPRESS: + ret = dec_block(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_BLOCK_PADDING; + + case SEQ_BLOCK_PADDING: + /* + * Size of Compressed Data + Block Padding + * must be a multiple of four. We don't need + * s->block.compressed for anything else + * anymore, so we use it here to test the size + * of the Block Padding field. + */ + while (s->block.compressed & 3) { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + + ++s->block.compressed; + } + + s->sequence = SEQ_BLOCK_CHECK; + + case SEQ_BLOCK_CHECK: + if (s->check_type == XZ_CHECK_CRC32) { + ret = crc32_validate(s, b); + if (ret != XZ_STREAM_END) + return ret; + } +#ifdef XZ_DEC_ANY_CHECK + else if (!check_skip(s, b)) { + return XZ_OK; + } +#endif + + s->sequence = SEQ_BLOCK_START; + break; + + case SEQ_INDEX: + ret = dec_index(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_INDEX_PADDING; + + case SEQ_INDEX_PADDING: + while ((s->index.size + (b->in_pos - s->in_start)) + & 3) { + if (b->in_pos == b->in_size) { + index_update(s, b); + return XZ_OK; + } + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + } + + /* Finish the CRC32 value and Index size. */ + index_update(s, b); + + /* Compare the hashes to validate the Index field. */ + if (!memeq(&s->block.hash, &s->index.hash, + sizeof(s->block.hash))) + return XZ_DATA_ERROR; + + s->sequence = SEQ_INDEX_CRC32; + + case SEQ_INDEX_CRC32: + ret = crc32_validate(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_FOOTER; + + case SEQ_STREAM_FOOTER: + if (!fill_temp(s, b)) + return XZ_OK; + + return dec_stream_footer(s); + } + } + + /* Never reached */ +} + +/* + * xz_dec_run() is a wrapper for dec_main() to handle some special cases in + * multi-call and single-call decoding. + * + * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we + * are not going to make any progress anymore. This is to prevent the caller + * from calling us infinitely when the input file is truncated or otherwise + * corrupt. Since zlib-style API allows that the caller fills the input buffer + * only when the decoder doesn't produce any new output, we have to be careful + * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only + * after the second consecutive call to xz_dec_run() that makes no progress. + * + * In single-call mode, if we couldn't decode everything and no error + * occurred, either the input is truncated or the output buffer is too small. + * Since we know that the last input byte never produces any output, we know + * that if all the input was consumed and decoding wasn't finished, the file + * must be corrupt. Otherwise the output buffer has to be too small or the + * file is corrupt in a way that decoding it produces too big output. + * + * If single-call decoding fails, we reset b->in_pos and b->out_pos back to + * their original values. This is because with some filter chains there won't + * be any valid uncompressed data in the output buffer unless the decoding + * actually succeeds (that's the price to pay of using the output buffer as + * the workspace). + */ +XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) +{ + size_t in_start; + size_t out_start; + enum xz_ret ret; + + if (DEC_IS_SINGLE(s->mode)) + xz_dec_reset(s); + + in_start = b->in_pos; + out_start = b->out_pos; + ret = dec_main(s, b); + + if (DEC_IS_SINGLE(s->mode)) { + if (ret == XZ_OK) + ret = b->in_pos == b->in_size + ? XZ_DATA_ERROR : XZ_BUF_ERROR; + + if (ret != XZ_STREAM_END) { + b->in_pos = in_start; + b->out_pos = out_start; + } + + } else if (ret == XZ_OK && in_start == b->in_pos + && out_start == b->out_pos) { + if (s->allow_buf_error) + ret = XZ_BUF_ERROR; + + s->allow_buf_error = true; + } else { + s->allow_buf_error = false; + } + + return ret; +} + +XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) +{ + struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->mode = mode; + +#ifdef XZ_DEC_BCJ + s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); + if (s->bcj == NULL) + goto error_bcj; +#endif + + s->lzma2 = xz_dec_lzma2_create(mode, dict_max); + if (s->lzma2 == NULL) + goto error_lzma2; + + xz_dec_reset(s); + return s; + +error_lzma2: +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +error_bcj: +#endif + kfree(s); + return NULL; +} + +XZ_EXTERN void xz_dec_reset(struct xz_dec *s) +{ + s->sequence = SEQ_STREAM_HEADER; + s->allow_buf_error = false; + s->pos = 0; + s->crc32 = 0; + memzero(&s->block, sizeof(s->block)); + memzero(&s->index, sizeof(s->index)); + s->temp.pos = 0; + s->temp.size = STREAM_HEADER_SIZE; +} + +XZ_EXTERN void xz_dec_end(struct xz_dec *s) +{ + if (s != NULL) { + xz_dec_lzma2_end(s->lzma2); +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +#endif + kfree(s); + } +} diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c new file mode 100644 index 000000000000..32eb3c03aede --- /dev/null +++ b/lib/xz/xz_dec_syms.c @@ -0,0 +1,26 @@ +/* + * XZ decoder module information + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include +#include + +EXPORT_SYMBOL(xz_dec_init); +EXPORT_SYMBOL(xz_dec_reset); +EXPORT_SYMBOL(xz_dec_run); +EXPORT_SYMBOL(xz_dec_end); + +MODULE_DESCRIPTION("XZ decompressor"); +MODULE_VERSION("1.0"); +MODULE_AUTHOR("Lasse Collin and Igor Pavlov"); + +/* + * This code is in the public domain, but in Linux it's simplest to just + * say it's GPL and consider the authors as the copyright holders. + */ +MODULE_LICENSE("GPL"); diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c new file mode 100644 index 000000000000..da28a19d6c98 --- /dev/null +++ b/lib/xz/xz_dec_test.c @@ -0,0 +1,220 @@ +/* + * XZ decoder tester + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include +#include +#include +#include +#include +#include + +/* Maximum supported dictionary size */ +#define DICT_MAX (1 << 20) + +/* Device name to pass to register_chrdev(). */ +#define DEVICE_NAME "xz_dec_test" + +/* Dynamically allocated device major number */ +static int device_major; + +/* + * We reuse the same decoder state, and thus can decode only one + * file at a time. + */ +static bool device_is_open; + +/* XZ decoder state */ +static struct xz_dec *state; + +/* + * Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after + * it has returned XZ_STREAM_END, so we make this static. + */ +static enum xz_ret ret; + +/* + * Input and output buffers. The input buffer is used as a temporary safe + * place for the data coming from the userspace. + */ +static uint8_t buffer_in[1024]; +static uint8_t buffer_out[1024]; + +/* + * Structure to pass the input and output buffers to the XZ decoder. + * A few of the fields are never modified so we initialize them here. + */ +static struct xz_buf buffers = { + .in = buffer_in, + .out = buffer_out, + .out_size = sizeof(buffer_out) +}; + +/* + * CRC32 of uncompressed data. This is used to give the user a simple way + * to check that the decoder produces correct output. + */ +static uint32_t crc; + +static int xz_dec_test_open(struct inode *i, struct file *f) +{ + if (device_is_open) + return -EBUSY; + + device_is_open = true; + + xz_dec_reset(state); + ret = XZ_OK; + crc = 0xFFFFFFFF; + + buffers.in_pos = 0; + buffers.in_size = 0; + buffers.out_pos = 0; + + printk(KERN_INFO DEVICE_NAME ": opened\n"); + return 0; +} + +static int xz_dec_test_release(struct inode *i, struct file *f) +{ + device_is_open = false; + + if (ret == XZ_OK) + printk(KERN_INFO DEVICE_NAME ": input was truncated\n"); + + printk(KERN_INFO DEVICE_NAME ": closed\n"); + return 0; +} + +/* + * Decode the data given to us from the userspace. CRC32 of the uncompressed + * data is calculated and is printed at the end of successful decoding. The + * uncompressed data isn't stored anywhere for further use. + * + * The .xz file must have exactly one Stream and no Stream Padding. The data + * after the first Stream is considered to be garbage. + */ +static ssize_t xz_dec_test_write(struct file *file, const char __user *buf, + size_t size, loff_t *pos) +{ + size_t remaining; + + if (ret != XZ_OK) { + if (size > 0) + printk(KERN_INFO DEVICE_NAME ": %zu bytes of " + "garbage at the end of the file\n", + size); + + return -ENOSPC; + } + + printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n", + size); + + remaining = size; + while ((remaining > 0 || buffers.out_pos == buffers.out_size) + && ret == XZ_OK) { + if (buffers.in_pos == buffers.in_size) { + buffers.in_pos = 0; + buffers.in_size = min(remaining, sizeof(buffer_in)); + if (copy_from_user(buffer_in, buf, buffers.in_size)) + return -EFAULT; + + buf += buffers.in_size; + remaining -= buffers.in_size; + } + + buffers.out_pos = 0; + ret = xz_dec_run(state, &buffers); + crc = crc32(crc, buffer_out, buffers.out_pos); + } + + switch (ret) { + case XZ_OK: + printk(KERN_INFO DEVICE_NAME ": XZ_OK\n"); + return size; + + case XZ_STREAM_END: + printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, " + "CRC32 = 0x%08X\n", ~crc); + return size - remaining - (buffers.in_size - buffers.in_pos); + + case XZ_MEMLIMIT_ERROR: + printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n"); + break; + + case XZ_FORMAT_ERROR: + printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n"); + break; + + case XZ_OPTIONS_ERROR: + printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n"); + break; + + case XZ_DATA_ERROR: + printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n"); + break; + + case XZ_BUF_ERROR: + printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n"); + break; + + default: + printk(KERN_INFO DEVICE_NAME ": Bug detected!\n"); + break; + } + + return -EIO; +} + +/* Allocate the XZ decoder state and register the character device. */ +static int __init xz_dec_test_init(void) +{ + static const struct file_operations fileops = { + .owner = THIS_MODULE, + .open = &xz_dec_test_open, + .release = &xz_dec_test_release, + .write = &xz_dec_test_write + }; + + state = xz_dec_init(XZ_PREALLOC, DICT_MAX); + if (state == NULL) + return -ENOMEM; + + device_major = register_chrdev(0, DEVICE_NAME, &fileops); + if (device_major < 0) { + xz_dec_end(state); + return device_major; + } + + printk(KERN_INFO DEVICE_NAME ": module loaded\n"); + printk(KERN_INFO DEVICE_NAME ": Create a device node with " + "'mknod " DEVICE_NAME " c %d 0' and write .xz files " + "to it.\n", device_major); + return 0; +} + +static void __exit xz_dec_test_exit(void) +{ + unregister_chrdev(device_major, DEVICE_NAME); + xz_dec_end(state); + printk(KERN_INFO DEVICE_NAME ": module unloaded\n"); +} + +module_init(xz_dec_test_init); +module_exit(xz_dec_test_exit); + +MODULE_DESCRIPTION("XZ decompressor tester"); +MODULE_VERSION("1.0"); +MODULE_AUTHOR("Lasse Collin "); + +/* + * This code is in the public domain, but in Linux it's simplest to just + * say it's GPL and consider the authors as the copyright holders. + */ +MODULE_LICENSE("GPL"); diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h new file mode 100644 index 000000000000..071d67bee9f5 --- /dev/null +++ b/lib/xz/xz_lzma2.h @@ -0,0 +1,204 @@ +/* + * LZMA2 definitions + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_LZMA2_H +#define XZ_LZMA2_H + +/* Range coder constants */ +#define RC_SHIFT_BITS 8 +#define RC_TOP_BITS 24 +#define RC_TOP_VALUE (1 << RC_TOP_BITS) +#define RC_BIT_MODEL_TOTAL_BITS 11 +#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS) +#define RC_MOVE_BITS 5 + +/* + * Maximum number of position states. A position state is the lowest pb + * number of bits of the current uncompressed offset. In some places there + * are different sets of probabilities for different position states. + */ +#define POS_STATES_MAX (1 << 4) + +/* + * This enum is used to track which LZMA symbols have occurred most recently + * and in which order. This information is used to predict the next symbol. + * + * Symbols: + * - Literal: One 8-bit byte + * - Match: Repeat a chunk of data at some distance + * - Long repeat: Multi-byte match at a recently seen distance + * - Short repeat: One-byte repeat at a recently seen distance + * + * The symbol names are in from STATE_oldest_older_previous. REP means + * either short or long repeated match, and NONLIT means any non-literal. + */ +enum lzma_state { + STATE_LIT_LIT, + STATE_MATCH_LIT_LIT, + STATE_REP_LIT_LIT, + STATE_SHORTREP_LIT_LIT, + STATE_MATCH_LIT, + STATE_REP_LIT, + STATE_SHORTREP_LIT, + STATE_LIT_MATCH, + STATE_LIT_LONGREP, + STATE_LIT_SHORTREP, + STATE_NONLIT_MATCH, + STATE_NONLIT_REP +}; + +/* Total number of states */ +#define STATES 12 + +/* The lowest 7 states indicate that the previous state was a literal. */ +#define LIT_STATES 7 + +/* Indicate that the latest symbol was a literal. */ +static inline void lzma_state_literal(enum lzma_state *state) +{ + if (*state <= STATE_SHORTREP_LIT_LIT) + *state = STATE_LIT_LIT; + else if (*state <= STATE_LIT_SHORTREP) + *state -= 3; + else + *state -= 6; +} + +/* Indicate that the latest symbol was a match. */ +static inline void lzma_state_match(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; +} + +/* Indicate that the latest state was a long repeated match. */ +static inline void lzma_state_long_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; +} + +/* Indicate that the latest symbol was a short match. */ +static inline void lzma_state_short_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; +} + +/* Test if the previous symbol was a literal. */ +static inline bool lzma_state_is_literal(enum lzma_state state) +{ + return state < LIT_STATES; +} + +/* Each literal coder is divided in three sections: + * - 0x001-0x0FF: Without match byte + * - 0x101-0x1FF: With match byte; match bit is 0 + * - 0x201-0x2FF: With match byte; match bit is 1 + * + * Match byte is used when the previous LZMA symbol was something else than + * a literal (that is, it was some kind of match). + */ +#define LITERAL_CODER_SIZE 0x300 + +/* Maximum number of literal coders */ +#define LITERAL_CODERS_MAX (1 << 4) + +/* Minimum length of a match is two bytes. */ +#define MATCH_LEN_MIN 2 + +/* Match length is encoded with 4, 5, or 10 bits. + * + * Length Bits + * 2-9 4 = Choice=0 + 3 bits + * 10-17 5 = Choice=1 + Choice2=0 + 3 bits + * 18-273 10 = Choice=1 + Choice2=1 + 8 bits + */ +#define LEN_LOW_BITS 3 +#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS) +#define LEN_MID_BITS 3 +#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS) +#define LEN_HIGH_BITS 8 +#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS) +#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS) + +/* + * Maximum length of a match is 273 which is a result of the encoding + * described above. + */ +#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1) + +/* + * Different sets of probabilities are used for match distances that have + * very short match length: Lengths of 2, 3, and 4 bytes have a separate + * set of probabilities for each length. The matches with longer length + * use a shared set of probabilities. + */ +#define DIST_STATES 4 + +/* + * Get the index of the appropriate probability array for decoding + * the distance slot. + */ +static inline uint32_t lzma_get_dist_state(uint32_t len) +{ + return len < DIST_STATES + MATCH_LEN_MIN + ? len - MATCH_LEN_MIN : DIST_STATES - 1; +} + +/* + * The highest two bits of a 32-bit match distance are encoded using six bits. + * This six-bit value is called a distance slot. This way encoding a 32-bit + * value takes 6-36 bits, larger values taking more bits. + */ +#define DIST_SLOT_BITS 6 +#define DIST_SLOTS (1 << DIST_SLOT_BITS) + +/* Match distances up to 127 are fully encoded using probabilities. Since + * the highest two bits (distance slot) are always encoded using six bits, + * the distances 0-3 don't need any additional bits to encode, since the + * distance slot itself is the same as the actual distance. DIST_MODEL_START + * indicates the first distance slot where at least one additional bit is + * needed. + */ +#define DIST_MODEL_START 4 + +/* + * Match distances greater than 127 are encoded in three pieces: + * - distance slot: the highest two bits + * - direct bits: 2-26 bits below the highest two bits + * - alignment bits: four lowest bits + * + * Direct bits don't use any probabilities. + * + * The distance slot value of 14 is for distances 128-191. + */ +#define DIST_MODEL_END 14 + +/* Distance slots that indicate a distance <= 127. */ +#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2) +#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS) + +/* + * For match distances greater than 127, only the highest two bits and the + * lowest four bits (alignment) is encoded using probabilities. + */ +#define ALIGN_BITS 4 +#define ALIGN_SIZE (1 << ALIGN_BITS) +#define ALIGN_MASK (ALIGN_SIZE - 1) + +/* Total number of all probability variables */ +#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE) + +/* + * LZMA remembers the four most recent match distances. Reusing these + * distances tends to take less space than re-encoding the actual + * distance value. + */ +#define REPS 4 + +#endif diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h new file mode 100644 index 000000000000..a65633e06962 --- /dev/null +++ b/lib/xz/xz_private.h @@ -0,0 +1,156 @@ +/* + * Private includes and definitions + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_PRIVATE_H +#define XZ_PRIVATE_H + +#ifdef __KERNEL__ +# include +# include +# include + /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ +# ifndef XZ_PREBOOT +# include +# include +# include +# ifdef CONFIG_XZ_DEC_X86 +# define XZ_DEC_X86 +# endif +# ifdef CONFIG_XZ_DEC_POWERPC +# define XZ_DEC_POWERPC +# endif +# ifdef CONFIG_XZ_DEC_IA64 +# define XZ_DEC_IA64 +# endif +# ifdef CONFIG_XZ_DEC_ARM +# define XZ_DEC_ARM +# endif +# ifdef CONFIG_XZ_DEC_ARMTHUMB +# define XZ_DEC_ARMTHUMB +# endif +# ifdef CONFIG_XZ_DEC_SPARC +# define XZ_DEC_SPARC +# endif +# define memeq(a, b, size) (memcmp(a, b, size) == 0) +# define memzero(buf, size) memset(buf, 0, size) +# endif +# define get_le32(p) le32_to_cpup((const uint32_t *)(p)) +#else + /* + * For userspace builds, use a separate header to define the required + * macros and functions. This makes it easier to adapt the code into + * different environments and avoids clutter in the Linux kernel tree. + */ +# include "xz_config.h" +#endif + +/* If no specific decoding mode is requested, enable support for all modes. */ +#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \ + && !defined(XZ_DEC_DYNALLOC) +# define XZ_DEC_SINGLE +# define XZ_DEC_PREALLOC +# define XZ_DEC_DYNALLOC +#endif + +/* + * The DEC_IS_foo(mode) macros are used in "if" statements. If only some + * of the supported modes are enabled, these macros will evaluate to true or + * false at compile time and thus allow the compiler to omit unneeded code. + */ +#ifdef XZ_DEC_SINGLE +# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE) +#else +# define DEC_IS_SINGLE(mode) (false) +#endif + +#ifdef XZ_DEC_PREALLOC +# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC) +#else +# define DEC_IS_PREALLOC(mode) (false) +#endif + +#ifdef XZ_DEC_DYNALLOC +# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC) +#else +# define DEC_IS_DYNALLOC(mode) (false) +#endif + +#if !defined(XZ_DEC_SINGLE) +# define DEC_IS_MULTI(mode) (true) +#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC) +# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE) +#else +# define DEC_IS_MULTI(mode) (false) +#endif + +/* + * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ. + * XZ_DEC_BCJ is used to enable generic support for BCJ decoders. + */ +#ifndef XZ_DEC_BCJ +# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \ + || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \ + || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \ + || defined(XZ_DEC_SPARC) +# define XZ_DEC_BCJ +# endif +#endif + +/* + * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used + * before calling xz_dec_lzma2_run(). + */ +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, + uint32_t dict_max); + +/* + * Decode the LZMA2 properties (one byte) and reset the decoder. Return + * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not + * big enough, and XZ_OPTIONS_ERROR if props indicates something that this + * decoder doesn't support. + */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, + uint8_t props); + +/* Decode raw LZMA2 stream from b->in to b->out. */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, + struct xz_buf *b); + +/* Free the memory allocated for the LZMA2 decoder. */ +XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s); + +#ifdef XZ_DEC_BCJ +/* + * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before + * calling xz_dec_bcj_run(). + */ +XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call); + +/* + * Decode the Filter ID of a BCJ filter. This implementation doesn't + * support custom start offsets, so no decoding of Filter Properties + * is needed. Returns XZ_OK if the given Filter ID is supported. + * Otherwise XZ_OPTIONS_ERROR is returned. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id); + +/* + * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is + * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run() + * must be called directly. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, + struct xz_dec_lzma2 *lzma2, + struct xz_buf *b); + +/* Free the memory allocated for the BCJ filters. */ +#define xz_dec_bcj_end(s) kfree(s) +#endif + +#endif diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h new file mode 100644 index 000000000000..66cb5a7055ec --- /dev/null +++ b/lib/xz/xz_stream.h @@ -0,0 +1,62 @@ +/* + * Definitions for handling the .xz file format + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_STREAM_H +#define XZ_STREAM_H + +#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32 +# include +# undef crc32 +# define xz_crc32(buf, size, crc) \ + (~crc32_le(~(uint32_t)(crc), buf, size)) +#endif + +/* + * See the .xz file format specification at + * http://tukaani.org/xz/xz-file-format.txt + * to understand the container format. + */ + +#define STREAM_HEADER_SIZE 12 + +#define HEADER_MAGIC "\3757zXZ" +#define HEADER_MAGIC_SIZE 6 + +#define FOOTER_MAGIC "YZ" +#define FOOTER_MAGIC_SIZE 2 + +/* + * Variable-length integer can hold a 63-bit unsigned integer or a special + * value indicating that the value is unknown. + * + * Experimental: vli_type can be defined to uint32_t to save a few bytes + * in code size (no effect on speed). Doing so limits the uncompressed and + * compressed size of the file to less than 256 MiB and may also weaken + * error detection slightly. + */ +typedef uint64_t vli_type; + +#define VLI_MAX ((vli_type)-1 / 2) +#define VLI_UNKNOWN ((vli_type)-1) + +/* Maximum encoded size of a VLI */ +#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7) + +/* Integrity Check types */ +enum xz_check { + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10 +}; + +/* Maximum possible Check ID */ +#define XZ_CHECK_MAX 15 + +#endif diff --git a/mm/Kconfig b/mm/Kconfig index c2c8a4a11898..3ad483bdf505 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -302,6 +302,44 @@ config NOMMU_INITIAL_TRIM_EXCESS See Documentation/nommu-mmap.txt for more information. +config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" + depends on X86 && MMU + select COMPACTION + help + Transparent Hugepages allows the kernel to use huge pages and + huge tlb transparently to the applications whenever possible. + This feature can improve computing performance to certain + applications by speeding up page faults during memory + allocation, by reducing the number of tlb misses and by speeding + up the pagetable walking. + + If memory constrained on embedded, you may want to say N. + +choice + prompt "Transparent Hugepage Support sysfs defaults" + depends on TRANSPARENT_HUGEPAGE + default TRANSPARENT_HUGEPAGE_ALWAYS + help + Selects the sysfs defaults for Transparent Hugepage Support. + + config TRANSPARENT_HUGEPAGE_ALWAYS + bool "always" + help + Enabling Transparent Hugepage always, can increase the + memory footprint of applications without a guaranteed + benefit but it will work automatically for all applications. + + config TRANSPARENT_HUGEPAGE_MADVISE + bool "madvise" + help + Enabling Transparent Hugepage madvise, will only provide a + performance improvement benefit to the applications using + madvise(MADV_HUGEPAGE) but it won't risk to increase the + memory footprint of applications without a guaranteed + benefit. +endchoice + # # UP and nommu archs use km based percpu allocator # diff --git a/mm/Makefile b/mm/Makefile index f73f75a29f82..2b1b575ae712 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -5,7 +5,7 @@ mmu-y := nommu.o mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ - vmalloc.o pagewalk.o + vmalloc.o pagewalk.o pgtable-generic.o obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page_alloc.o page-writeback.o \ @@ -37,6 +37,7 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o diff --git a/mm/compaction.c b/mm/compaction.c index 1a8894eadf72..6d592a021072 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -16,6 +16,9 @@ #include #include "internal.h" +#define CREATE_TRACE_POINTS +#include + /* * compact_control is used to track pages being migrated and the free pages * they are being migrated to during memory compaction. The free_pfn starts @@ -30,6 +33,7 @@ struct compact_control { unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ + bool sync; /* Synchronous migration */ /* Account for isolated anon and file pages */ unsigned long nr_anon; @@ -38,6 +42,8 @@ struct compact_control { unsigned int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; + + int compact_mode; }; static unsigned long release_freepages(struct list_head *freelist) @@ -60,7 +66,7 @@ static unsigned long isolate_freepages_block(struct zone *zone, struct list_head *freelist) { unsigned long zone_end_pfn, end_pfn; - int total_isolated = 0; + int nr_scanned = 0, total_isolated = 0; struct page *cursor; /* Get the last PFN we should scan for free pages at */ @@ -81,6 +87,7 @@ static unsigned long isolate_freepages_block(struct zone *zone, if (!pfn_valid_within(blockpfn)) continue; + nr_scanned++; if (!PageBuddy(page)) continue; @@ -100,6 +107,7 @@ static unsigned long isolate_freepages_block(struct zone *zone, } } + trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); return total_isolated; } @@ -234,6 +242,8 @@ static unsigned long isolate_migratepages(struct zone *zone, struct compact_control *cc) { unsigned long low_pfn, end_pfn; + unsigned long last_pageblock_nr = 0, pageblock_nr; + unsigned long nr_scanned = 0, nr_isolated = 0; struct list_head *migratelist = &cc->migratepages; /* Do not scan outside zone boundaries */ @@ -266,20 +276,51 @@ static unsigned long isolate_migratepages(struct zone *zone, struct page *page; if (!pfn_valid_within(low_pfn)) continue; + nr_scanned++; /* Get the page and skip if free */ page = pfn_to_page(low_pfn); if (PageBuddy(page)) continue; + /* + * For async migration, also only scan in MOVABLE blocks. Async + * migration is optimistic to see if the minimum amount of work + * satisfies the allocation + */ + pageblock_nr = low_pfn >> pageblock_order; + if (!cc->sync && last_pageblock_nr != pageblock_nr && + get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { + low_pfn += pageblock_nr_pages; + low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; + last_pageblock_nr = pageblock_nr; + continue; + } + + if (!PageLRU(page)) + continue; + + /* + * PageLRU is set, and lru_lock excludes isolation, + * splitting and collapsing (collapsing has already + * happened if PageLRU is set). + */ + if (PageTransHuge(page)) { + low_pfn += (1 << compound_order(page)) - 1; + continue; + } + /* Try isolate the page */ if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0) continue; + VM_BUG_ON(PageTransCompound(page)); + /* Successfully isolated */ del_page_from_lru_list(zone, page, page_lru(page)); list_add(&page->lru, migratelist); cc->nr_migratepages++; + nr_isolated++; /* Avoid isolating too much */ if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) @@ -291,6 +332,8 @@ static unsigned long isolate_migratepages(struct zone *zone, spin_unlock_irq(&zone->lru_lock); cc->migrate_pfn = low_pfn; + trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); + return cc->nr_migratepages; } @@ -341,10 +384,10 @@ static void update_nr_listpages(struct compact_control *cc) } static int compact_finished(struct zone *zone, - struct compact_control *cc) + struct compact_control *cc) { unsigned int order; - unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order); + unsigned long watermark; if (fatal_signal_pending(current)) return COMPACT_PARTIAL; @@ -354,12 +397,27 @@ static int compact_finished(struct zone *zone, return COMPACT_COMPLETE; /* Compaction run is not finished if the watermark is not met */ + if (cc->compact_mode != COMPACT_MODE_KSWAPD) + watermark = low_wmark_pages(zone); + else + watermark = high_wmark_pages(zone); + watermark += (1 << cc->order); + if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) return COMPACT_CONTINUE; if (cc->order == -1) return COMPACT_CONTINUE; + /* + * Generating only one page of the right order is not enough + * for kswapd, we must continue until we're above the high + * watermark as a pool for high order GFP_ATOMIC allocations + * too. + */ + if (cc->compact_mode == COMPACT_MODE_KSWAPD) + return COMPACT_CONTINUE; + /* Direct compactor: Is a suitable page free? */ for (order = cc->order; order < MAX_ORDER; order++) { /* Job done if page is free of the right migratetype */ @@ -374,10 +432,62 @@ static int compact_finished(struct zone *zone, return COMPACT_CONTINUE; } +/* + * compaction_suitable: Is this suitable to run compaction on this zone now? + * Returns + * COMPACT_SKIPPED - If there are too few free pages for compaction + * COMPACT_PARTIAL - If the allocation would succeed without compaction + * COMPACT_CONTINUE - If compaction should run now + */ +unsigned long compaction_suitable(struct zone *zone, int order) +{ + int fragindex; + unsigned long watermark; + + /* + * Watermarks for order-0 must be met for compaction. Note the 2UL. + * This is because during migration, copies of pages need to be + * allocated and for a short time, the footprint is higher + */ + watermark = low_wmark_pages(zone) + (2UL << order); + if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) + return COMPACT_SKIPPED; + + /* + * fragmentation index determines if allocation failures are due to + * low memory or external fragmentation + * + * index of -1 implies allocations might succeed dependingon watermarks + * index towards 0 implies failure is due to lack of memory + * index towards 1000 implies failure is due to fragmentation + * + * Only compact if a failure would be due to fragmentation. + */ + fragindex = fragmentation_index(zone, order); + if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) + return COMPACT_SKIPPED; + + if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) + return COMPACT_PARTIAL; + + return COMPACT_CONTINUE; +} + static int compact_zone(struct zone *zone, struct compact_control *cc) { int ret; + ret = compaction_suitable(zone, cc->order); + switch (ret) { + case COMPACT_PARTIAL: + case COMPACT_SKIPPED: + /* Compaction is likely to fail */ + return ret; + case COMPACT_CONTINUE: + /* Fall through to compaction */ + ; + } + /* Setup to move all movable pages to the end of the zone */ cc->migrate_pfn = zone->zone_start_pfn; cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; @@ -393,7 +503,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) nr_migrate = cc->nr_migratepages; migrate_pages(&cc->migratepages, compaction_alloc, - (unsigned long)cc, 0); + (unsigned long)cc, false, + cc->sync); update_nr_listpages(cc); nr_remaining = cc->nr_migratepages; @@ -401,6 +512,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); if (nr_remaining) count_vm_events(COMPACTPAGEFAILED, nr_remaining); + trace_mm_compaction_migratepages(nr_migrate - nr_remaining, + nr_remaining); /* Release LRU pages not migrated */ if (!list_empty(&cc->migratepages)) { @@ -417,8 +530,10 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) return ret; } -static unsigned long compact_zone_order(struct zone *zone, - int order, gfp_t gfp_mask) +unsigned long compact_zone_order(struct zone *zone, + int order, gfp_t gfp_mask, + bool sync, + int compact_mode) { struct compact_control cc = { .nr_freepages = 0, @@ -426,6 +541,8 @@ static unsigned long compact_zone_order(struct zone *zone, .order = order, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, + .sync = sync, + .compact_mode = compact_mode, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -441,16 +558,17 @@ int sysctl_extfrag_threshold = 500; * @order: The order of the current allocation * @gfp_mask: The GFP mask of the current allocation * @nodemask: The allowed nodes to allocate from + * @sync: Whether migration is synchronous or not * * This is the main entry point for direct page compaction. */ unsigned long try_to_compact_pages(struct zonelist *zonelist, - int order, gfp_t gfp_mask, nodemask_t *nodemask) + int order, gfp_t gfp_mask, nodemask_t *nodemask, + bool sync) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; int may_perform_io = gfp_mask & __GFP_IO; - unsigned long watermark; struct zoneref *z; struct zone *zone; int rc = COMPACT_SKIPPED; @@ -460,7 +578,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, * made because an assumption is made that the page allocator can satisfy * the "cheaper" orders without taking special steps */ - if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io) + if (!order || !may_enter_fs || !may_perform_io) return rc; count_vm_event(COMPACTSTALL); @@ -468,43 +586,14 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, /* Compact each zone in the list */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { - int fragindex; int status; - /* - * Watermarks for order-0 must be met for compaction. Note - * the 2UL. This is because during migration, copies of - * pages need to be allocated and for a short time, the - * footprint is higher - */ - watermark = low_wmark_pages(zone) + (2UL << order); - if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) - continue; - - /* - * fragmentation index determines if allocation failures are - * due to low memory or external fragmentation - * - * index of -1 implies allocations might succeed depending - * on watermarks - * index towards 0 implies failure is due to lack of memory - * index towards 1000 implies failure is due to fragmentation - * - * Only compact if a failure would be due to fragmentation. - */ - fragindex = fragmentation_index(zone, order); - if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) - continue; - - if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) { - rc = COMPACT_PARTIAL; - break; - } - - status = compact_zone_order(zone, order, gfp_mask); + status = compact_zone_order(zone, order, gfp_mask, sync, + COMPACT_MODE_DIRECT_RECLAIM); rc = max(status, rc); - if (zone_watermark_ok(zone, order, watermark, 0, 0)) + /* If a normal allocation would succeed, stop compacting */ + if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) break; } @@ -531,6 +620,7 @@ static int compact_node(int nid) .nr_freepages = 0, .nr_migratepages = 0, .order = -1, + .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, }; zone = &pgdat->node_zones[zoneid]; diff --git a/mm/dmapool.c b/mm/dmapool.c index 4df2de77e069..03bf3bb4519a 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -324,7 +324,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, if (mem_flags & __GFP_WAIT) { DECLARE_WAITQUEUE(wait, current); - __set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_UNINTERRUPTIBLE); __add_wait_queue(&pool->waitq, &wait); spin_unlock_irqrestore(&pool->lock, flags); @@ -355,20 +355,15 @@ EXPORT_SYMBOL(dma_pool_alloc); static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) { - unsigned long flags; struct dma_page *page; - spin_lock_irqsave(&pool->lock, flags); list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; if (dma < (page->dma + pool->allocation)) - goto done; + return page; } - page = NULL; - done: - spin_unlock_irqrestore(&pool->lock, flags); - return page; + return NULL; } /** @@ -386,8 +381,10 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) unsigned long flags; unsigned int offset; + spin_lock_irqsave(&pool->lock, flags); page = pool_find_page(pool, dma); if (!page) { + spin_unlock_irqrestore(&pool->lock, flags); if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", @@ -401,6 +398,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) offset = vaddr - page->vaddr; #ifdef DMAPOOL_DEBUG if ((dma - page->dma) != offset) { + spin_unlock_irqrestore(&pool->lock, flags); if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", @@ -418,6 +416,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) chain = *(int *)(page->vaddr + chain); continue; } + spin_unlock_irqrestore(&pool->lock, flags); if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, dma %Lx " "already free\n", pool->name, @@ -432,7 +431,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) memset(vaddr, POOL_POISON_FREED, pool->size); #endif - spin_lock_irqsave(&pool->lock, flags); page->in_use--; *(int *)vaddr = page->offset; page->offset = offset; diff --git a/mm/filemap.c b/mm/filemap.c index ca389394fa2a..83a45d35468b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -298,7 +298,7 @@ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, continue; wait_on_page_writeback(page); - if (PageError(page)) + if (TestClearPageError(page)) ret = -EIO; } pagevec_release(&pvec); @@ -837,9 +837,6 @@ repeat: if (radix_tree_deref_retry(page)) goto restart; - if (page->mapping == NULL || page->index != index) - break; - if (!page_cache_get_speculative(page)) goto repeat; @@ -849,6 +846,16 @@ repeat: goto repeat; } + /* + * must check mapping and index after taking the ref. + * otherwise we can get both false positives and false + * negatives, which is just confusing to the caller. + */ + if (page->mapping == NULL || page->index != index) { + page_cache_release(page); + break; + } + pages[ret] = page; ret++; index++; @@ -2220,7 +2227,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, gfp_notmask = __GFP_FS; repeat: page = find_lock_page(mapping, index); - if (likely(page)) + if (page) return page; page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); diff --git a/mm/huge_memory.c b/mm/huge_memory.c new file mode 100644 index 000000000000..004c9c2aac78 --- /dev/null +++ b/mm/huge_memory.c @@ -0,0 +1,2346 @@ +/* + * Copyright (C) 2009 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +/* + * By default transparent hugepage support is enabled for all mappings + * and khugepaged scans all mappings. Defrag is only invoked by + * khugepaged hugepage allocations and by page faults inside + * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived + * allocations. + */ +unsigned long transparent_hugepage_flags __read_mostly = +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS + (1< min_free_kbytes) + min_free_kbytes = recommended_min; + setup_per_zone_wmarks(); + return 0; +} +late_initcall(set_recommended_min_free_kbytes); + +static int start_khugepaged(void) +{ + int err = 0; + if (khugepaged_enabled()) { + int wakeup; + if (unlikely(!mm_slot_cache || !mm_slots_hash)) { + err = -ENOMEM; + goto out; + } + mutex_lock(&khugepaged_mutex); + if (!khugepaged_thread) + khugepaged_thread = kthread_run(khugepaged, NULL, + "khugepaged"); + if (unlikely(IS_ERR(khugepaged_thread))) { + printk(KERN_ERR + "khugepaged: kthread_run(khugepaged) failed\n"); + err = PTR_ERR(khugepaged_thread); + khugepaged_thread = NULL; + } + wakeup = !list_empty(&khugepaged_scan.mm_head); + mutex_unlock(&khugepaged_mutex); + if (wakeup) + wake_up_interruptible(&khugepaged_wait); + + set_recommended_min_free_kbytes(); + } else + /* wakeup to exit */ + wake_up_interruptible(&khugepaged_wait); +out: + return err; +} + +#ifdef CONFIG_SYSFS + +static ssize_t double_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag enabled, + enum transparent_hugepage_flag req_madv) +{ + if (test_bit(enabled, &transparent_hugepage_flags)) { + VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); + return sprintf(buf, "[always] madvise never\n"); + } else if (test_bit(req_madv, &transparent_hugepage_flags)) + return sprintf(buf, "always [madvise] never\n"); + else + return sprintf(buf, "always madvise [never]\n"); +} +static ssize_t double_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag enabled, + enum transparent_hugepage_flag req_madv) +{ + if (!memcmp("always", buf, + min(sizeof("always")-1, count))) { + set_bit(enabled, &transparent_hugepage_flags); + clear_bit(req_madv, &transparent_hugepage_flags); + } else if (!memcmp("madvise", buf, + min(sizeof("madvise")-1, count))) { + clear_bit(enabled, &transparent_hugepage_flags); + set_bit(req_madv, &transparent_hugepage_flags); + } else if (!memcmp("never", buf, + min(sizeof("never")-1, count))) { + clear_bit(enabled, &transparent_hugepage_flags); + clear_bit(req_madv, &transparent_hugepage_flags); + } else + return -EINVAL; + + return count; +} + +static ssize_t enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return double_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_FLAG, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); +} +static ssize_t enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + ssize_t ret; + + ret = double_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_FLAG, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); + + if (ret > 0) { + int err = start_khugepaged(); + if (err) + ret = err; + } + + if (ret > 0 && + (test_bit(TRANSPARENT_HUGEPAGE_FLAG, + &transparent_hugepage_flags) || + test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + &transparent_hugepage_flags))) + set_recommended_min_free_kbytes(); + + return ret; +} +static struct kobj_attribute enabled_attr = + __ATTR(enabled, 0644, enabled_show, enabled_store); + +static ssize_t single_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag) +{ + if (test_bit(flag, &transparent_hugepage_flags)) + return sprintf(buf, "[yes] no\n"); + else + return sprintf(buf, "yes [no]\n"); +} +static ssize_t single_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag) +{ + if (!memcmp("yes", buf, + min(sizeof("yes")-1, count))) { + set_bit(flag, &transparent_hugepage_flags); + } else if (!memcmp("no", buf, + min(sizeof("no")-1, count))) { + clear_bit(flag, &transparent_hugepage_flags); + } else + return -EINVAL; + + return count; +} + +/* + * Currently defrag only disables __GFP_NOWAIT for allocation. A blind + * __GFP_REPEAT is too aggressive, it's never worth swapping tons of + * memory just to allocate one more hugepage. + */ +static ssize_t defrag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return double_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); +} +static ssize_t defrag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return double_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); +} +static struct kobj_attribute defrag_attr = + __ATTR(defrag, 0644, defrag_show, defrag_store); + +#ifdef CONFIG_DEBUG_VM +static ssize_t debug_cow_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return single_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); +} +static ssize_t debug_cow_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return single_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); +} +static struct kobj_attribute debug_cow_attr = + __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); +#endif /* CONFIG_DEBUG_VM */ + +static struct attribute *hugepage_attr[] = { + &enabled_attr.attr, + &defrag_attr.attr, +#ifdef CONFIG_DEBUG_VM + &debug_cow_attr.attr, +#endif + NULL, +}; + +static struct attribute_group hugepage_attr_group = { + .attrs = hugepage_attr, +}; + +static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); +} + +static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long msecs; + int err; + + err = strict_strtoul(buf, 10, &msecs); + if (err || msecs > UINT_MAX) + return -EINVAL; + + khugepaged_scan_sleep_millisecs = msecs; + wake_up_interruptible(&khugepaged_wait); + + return count; +} +static struct kobj_attribute scan_sleep_millisecs_attr = + __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, + scan_sleep_millisecs_store); + +static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); +} + +static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long msecs; + int err; + + err = strict_strtoul(buf, 10, &msecs); + if (err || msecs > UINT_MAX) + return -EINVAL; + + khugepaged_alloc_sleep_millisecs = msecs; + wake_up_interruptible(&khugepaged_wait); + + return count; +} +static struct kobj_attribute alloc_sleep_millisecs_attr = + __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, + alloc_sleep_millisecs_store); + +static ssize_t pages_to_scan_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_pages_to_scan); +} +static ssize_t pages_to_scan_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long pages; + + err = strict_strtoul(buf, 10, &pages); + if (err || !pages || pages > UINT_MAX) + return -EINVAL; + + khugepaged_pages_to_scan = pages; + + return count; +} +static struct kobj_attribute pages_to_scan_attr = + __ATTR(pages_to_scan, 0644, pages_to_scan_show, + pages_to_scan_store); + +static ssize_t pages_collapsed_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_pages_collapsed); +} +static struct kobj_attribute pages_collapsed_attr = + __ATTR_RO(pages_collapsed); + +static ssize_t full_scans_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_full_scans); +} +static struct kobj_attribute full_scans_attr = + __ATTR_RO(full_scans); + +static ssize_t khugepaged_defrag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return single_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); +} +static ssize_t khugepaged_defrag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return single_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); +} +static struct kobj_attribute khugepaged_defrag_attr = + __ATTR(defrag, 0644, khugepaged_defrag_show, + khugepaged_defrag_store); + +/* + * max_ptes_none controls if khugepaged should collapse hugepages over + * any unmapped ptes in turn potentially increasing the memory + * footprint of the vmas. When max_ptes_none is 0 khugepaged will not + * reduce the available free memory in the system as it + * runs. Increasing max_ptes_none will instead potentially reduce the + * free memory in the system during the khugepaged scan. + */ +static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_max_ptes_none); +} +static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long max_ptes_none; + + err = strict_strtoul(buf, 10, &max_ptes_none); + if (err || max_ptes_none > HPAGE_PMD_NR-1) + return -EINVAL; + + khugepaged_max_ptes_none = max_ptes_none; + + return count; +} +static struct kobj_attribute khugepaged_max_ptes_none_attr = + __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, + khugepaged_max_ptes_none_store); + +static struct attribute *khugepaged_attr[] = { + &khugepaged_defrag_attr.attr, + &khugepaged_max_ptes_none_attr.attr, + &pages_to_scan_attr.attr, + &pages_collapsed_attr.attr, + &full_scans_attr.attr, + &scan_sleep_millisecs_attr.attr, + &alloc_sleep_millisecs_attr.attr, + NULL, +}; + +static struct attribute_group khugepaged_attr_group = { + .attrs = khugepaged_attr, + .name = "khugepaged", +}; +#endif /* CONFIG_SYSFS */ + +static int __init hugepage_init(void) +{ + int err; +#ifdef CONFIG_SYSFS + static struct kobject *hugepage_kobj; +#endif + + err = -EINVAL; + if (!has_transparent_hugepage()) { + transparent_hugepage_flags = 0; + goto out; + } + +#ifdef CONFIG_SYSFS + err = -ENOMEM; + hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); + if (unlikely(!hugepage_kobj)) { + printk(KERN_ERR "hugepage: failed kobject create\n"); + goto out; + } + + err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group); + if (err) { + printk(KERN_ERR "hugepage: failed register hugeage group\n"); + goto out; + } + + err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group); + if (err) { + printk(KERN_ERR "hugepage: failed register hugeage group\n"); + goto out; + } +#endif + + err = khugepaged_slab_init(); + if (err) + goto out; + + err = mm_slots_hash_init(); + if (err) { + khugepaged_slab_free(); + goto out; + } + + /* + * By default disable transparent hugepages on smaller systems, + * where the extra memory used could hurt more than TLB overhead + * is likely to save. The admin can still enable it through /sys. + */ + if (totalram_pages < (512 << (20 - PAGE_SHIFT))) + transparent_hugepage_flags = 0; + + start_khugepaged(); + + set_recommended_min_free_kbytes(); + +out: + return err; +} +module_init(hugepage_init) + +static int __init setup_transparent_hugepage(char *str) +{ + int ret = 0; + if (!str) + goto out; + if (!strcmp(str, "always")) { + set_bit(TRANSPARENT_HUGEPAGE_FLAG, + &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + &transparent_hugepage_flags); + ret = 1; + } else if (!strcmp(str, "madvise")) { + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, + &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + &transparent_hugepage_flags); + ret = 1; + } else if (!strcmp(str, "never")) { + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, + &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + &transparent_hugepage_flags); + ret = 1; + } +out: + if (!ret) + printk(KERN_WARNING + "transparent_hugepage= cannot parse, ignored\n"); + return ret; +} +__setup("transparent_hugepage=", setup_transparent_hugepage); + +static void prepare_pmd_huge_pte(pgtable_t pgtable, + struct mm_struct *mm) +{ + assert_spin_locked(&mm->page_table_lock); + + /* FIFO */ + if (!mm->pmd_huge_pte) + INIT_LIST_HEAD(&pgtable->lru); + else + list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); + mm->pmd_huge_pte = pgtable; +} + +static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) +{ + if (likely(vma->vm_flags & VM_WRITE)) + pmd = pmd_mkwrite(pmd); + return pmd; +} + +static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long haddr, pmd_t *pmd, + struct page *page) +{ + int ret = 0; + pgtable_t pgtable; + + VM_BUG_ON(!PageCompound(page)); + pgtable = pte_alloc_one(mm, haddr); + if (unlikely(!pgtable)) { + mem_cgroup_uncharge_page(page); + put_page(page); + return VM_FAULT_OOM; + } + + clear_huge_page(page, haddr, HPAGE_PMD_NR); + __SetPageUptodate(page); + + spin_lock(&mm->page_table_lock); + if (unlikely(!pmd_none(*pmd))) { + spin_unlock(&mm->page_table_lock); + mem_cgroup_uncharge_page(page); + put_page(page); + pte_free(mm, pgtable); + } else { + pmd_t entry; + entry = mk_pmd(page, vma->vm_page_prot); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = pmd_mkhuge(entry); + /* + * The spinlocking to take the lru_lock inside + * page_add_new_anon_rmap() acts as a full memory + * barrier to be sure clear_huge_page writes become + * visible after the set_pmd_at() write. + */ + page_add_new_anon_rmap(page, vma, haddr); + set_pmd_at(mm, haddr, pmd, entry); + prepare_pmd_huge_pte(pgtable, mm); + add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); + spin_unlock(&mm->page_table_lock); + } + + return ret; +} + +static inline gfp_t alloc_hugepage_gfpmask(int defrag) +{ + return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT); +} + +static inline struct page *alloc_hugepage_vma(int defrag, + struct vm_area_struct *vma, + unsigned long haddr) +{ + return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), + HPAGE_PMD_ORDER, vma, haddr); +} + +#ifndef CONFIG_NUMA +static inline struct page *alloc_hugepage(int defrag) +{ + return alloc_pages(alloc_hugepage_gfpmask(defrag), + HPAGE_PMD_ORDER); +} +#endif + +int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, + unsigned int flags) +{ + struct page *page; + unsigned long haddr = address & HPAGE_PMD_MASK; + pte_t *pte; + + if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { + if (unlikely(anon_vma_prepare(vma))) + return VM_FAULT_OOM; + if (unlikely(khugepaged_enter(vma))) + return VM_FAULT_OOM; + page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), + vma, haddr); + if (unlikely(!page)) + goto out; + if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { + put_page(page); + goto out; + } + + return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); + } +out: + /* + * Use __pte_alloc instead of pte_alloc_map, because we can't + * run pte_offset_map on the pmd, if an huge pmd could + * materialize from under us from a different thread. + */ + if (unlikely(__pte_alloc(mm, vma, pmd, address))) + return VM_FAULT_OOM; + /* if an huge pmd materialized from under us just retry later */ + if (unlikely(pmd_trans_huge(*pmd))) + return 0; + /* + * A regular pmd is established and it can't morph into a huge pmd + * from under us anymore at this point because we hold the mmap_sem + * read mode and khugepaged takes it in write mode. So now it's + * safe to run pte_offset_map(). + */ + pte = pte_offset_map(pmd, address); + return handle_pte_fault(mm, vma, address, pte, pmd, flags); +} + +int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, + struct vm_area_struct *vma) +{ + struct page *src_page; + pmd_t pmd; + pgtable_t pgtable; + int ret; + + ret = -ENOMEM; + pgtable = pte_alloc_one(dst_mm, addr); + if (unlikely(!pgtable)) + goto out; + + spin_lock(&dst_mm->page_table_lock); + spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); + + ret = -EAGAIN; + pmd = *src_pmd; + if (unlikely(!pmd_trans_huge(pmd))) { + pte_free(dst_mm, pgtable); + goto out_unlock; + } + if (unlikely(pmd_trans_splitting(pmd))) { + /* split huge page running from under us */ + spin_unlock(&src_mm->page_table_lock); + spin_unlock(&dst_mm->page_table_lock); + pte_free(dst_mm, pgtable); + + wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ + goto out; + } + src_page = pmd_page(pmd); + VM_BUG_ON(!PageHead(src_page)); + get_page(src_page); + page_dup_rmap(src_page); + add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); + + pmdp_set_wrprotect(src_mm, addr, src_pmd); + pmd = pmd_mkold(pmd_wrprotect(pmd)); + set_pmd_at(dst_mm, addr, dst_pmd, pmd); + prepare_pmd_huge_pte(pgtable, dst_mm); + + ret = 0; +out_unlock: + spin_unlock(&src_mm->page_table_lock); + spin_unlock(&dst_mm->page_table_lock); +out: + return ret; +} + +/* no "address" argument so destroys page coloring of some arch */ +pgtable_t get_pmd_huge_pte(struct mm_struct *mm) +{ + pgtable_t pgtable; + + assert_spin_locked(&mm->page_table_lock); + + /* FIFO */ + pgtable = mm->pmd_huge_pte; + if (list_empty(&pgtable->lru)) + mm->pmd_huge_pte = NULL; + else { + mm->pmd_huge_pte = list_entry(pgtable->lru.next, + struct page, lru); + list_del(&pgtable->lru); + } + return pgtable; +} + +static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmd, pmd_t orig_pmd, + struct page *page, + unsigned long haddr) +{ + pgtable_t pgtable; + pmd_t _pmd; + int ret = 0, i; + struct page **pages; + + pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, + GFP_KERNEL); + if (unlikely(!pages)) { + ret |= VM_FAULT_OOM; + goto out; + } + + for (i = 0; i < HPAGE_PMD_NR; i++) { + pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, + vma, address); + if (unlikely(!pages[i] || + mem_cgroup_newpage_charge(pages[i], mm, + GFP_KERNEL))) { + if (pages[i]) + put_page(pages[i]); + mem_cgroup_uncharge_start(); + while (--i >= 0) { + mem_cgroup_uncharge_page(pages[i]); + put_page(pages[i]); + } + mem_cgroup_uncharge_end(); + kfree(pages); + ret |= VM_FAULT_OOM; + goto out; + } + } + + for (i = 0; i < HPAGE_PMD_NR; i++) { + copy_user_highpage(pages[i], page + i, + haddr + PAGE_SHIFT*i, vma); + __SetPageUptodate(pages[i]); + cond_resched(); + } + + spin_lock(&mm->page_table_lock); + if (unlikely(!pmd_same(*pmd, orig_pmd))) + goto out_free_pages; + VM_BUG_ON(!PageHead(page)); + + pmdp_clear_flush_notify(vma, haddr, pmd); + /* leave pmd empty until pte is filled */ + + pgtable = get_pmd_huge_pte(mm); + pmd_populate(mm, &_pmd, pgtable); + + for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { + pte_t *pte, entry; + entry = mk_pte(pages[i], vma->vm_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + page_add_new_anon_rmap(pages[i], vma, haddr); + pte = pte_offset_map(&_pmd, haddr); + VM_BUG_ON(!pte_none(*pte)); + set_pte_at(mm, haddr, pte, entry); + pte_unmap(pte); + } + kfree(pages); + + mm->nr_ptes++; + smp_wmb(); /* make pte visible before pmd */ + pmd_populate(mm, pmd, pgtable); + page_remove_rmap(page); + spin_unlock(&mm->page_table_lock); + + ret |= VM_FAULT_WRITE; + put_page(page); + +out: + return ret; + +out_free_pages: + spin_unlock(&mm->page_table_lock); + mem_cgroup_uncharge_start(); + for (i = 0; i < HPAGE_PMD_NR; i++) { + mem_cgroup_uncharge_page(pages[i]); + put_page(pages[i]); + } + mem_cgroup_uncharge_end(); + kfree(pages); + goto out; +} + +int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, pmd_t orig_pmd) +{ + int ret = 0; + struct page *page, *new_page; + unsigned long haddr; + + VM_BUG_ON(!vma->anon_vma); + spin_lock(&mm->page_table_lock); + if (unlikely(!pmd_same(*pmd, orig_pmd))) + goto out_unlock; + + page = pmd_page(orig_pmd); + VM_BUG_ON(!PageCompound(page) || !PageHead(page)); + haddr = address & HPAGE_PMD_MASK; + if (page_mapcount(page) == 1) { + pmd_t entry; + entry = pmd_mkyoung(orig_pmd); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) + update_mmu_cache(vma, address, entry); + ret |= VM_FAULT_WRITE; + goto out_unlock; + } + get_page(page); + spin_unlock(&mm->page_table_lock); + + if (transparent_hugepage_enabled(vma) && + !transparent_hugepage_debug_cow()) + new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), + vma, haddr); + else + new_page = NULL; + + if (unlikely(!new_page)) { + ret = do_huge_pmd_wp_page_fallback(mm, vma, address, + pmd, orig_pmd, page, haddr); + put_page(page); + goto out; + } + + if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { + put_page(new_page); + put_page(page); + ret |= VM_FAULT_OOM; + goto out; + } + + copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); + __SetPageUptodate(new_page); + + spin_lock(&mm->page_table_lock); + put_page(page); + if (unlikely(!pmd_same(*pmd, orig_pmd))) { + mem_cgroup_uncharge_page(new_page); + put_page(new_page); + } else { + pmd_t entry; + VM_BUG_ON(!PageHead(page)); + entry = mk_pmd(new_page, vma->vm_page_prot); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = pmd_mkhuge(entry); + pmdp_clear_flush_notify(vma, haddr, pmd); + page_add_new_anon_rmap(new_page, vma, haddr); + set_pmd_at(mm, haddr, pmd, entry); + update_mmu_cache(vma, address, entry); + page_remove_rmap(page); + put_page(page); + ret |= VM_FAULT_WRITE; + } +out_unlock: + spin_unlock(&mm->page_table_lock); +out: + return ret; +} + +struct page *follow_trans_huge_pmd(struct mm_struct *mm, + unsigned long addr, + pmd_t *pmd, + unsigned int flags) +{ + struct page *page = NULL; + + assert_spin_locked(&mm->page_table_lock); + + if (flags & FOLL_WRITE && !pmd_write(*pmd)) + goto out; + + page = pmd_page(*pmd); + VM_BUG_ON(!PageHead(page)); + if (flags & FOLL_TOUCH) { + pmd_t _pmd; + /* + * We should set the dirty bit only for FOLL_WRITE but + * for now the dirty bit in the pmd is meaningless. + * And if the dirty bit will become meaningful and + * we'll only set it with FOLL_WRITE, an atomic + * set_bit will be required on the pmd to set the + * young bit, instead of the current set_pmd_at. + */ + _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); + set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); + } + page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; + VM_BUG_ON(!PageCompound(page)); + if (flags & FOLL_GET) + get_page(page); + +out: + return page; +} + +int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, + pmd_t *pmd) +{ + int ret = 0; + + spin_lock(&tlb->mm->page_table_lock); + if (likely(pmd_trans_huge(*pmd))) { + if (unlikely(pmd_trans_splitting(*pmd))) { + spin_unlock(&tlb->mm->page_table_lock); + wait_split_huge_page(vma->anon_vma, + pmd); + } else { + struct page *page; + pgtable_t pgtable; + pgtable = get_pmd_huge_pte(tlb->mm); + page = pmd_page(*pmd); + pmd_clear(pmd); + page_remove_rmap(page); + VM_BUG_ON(page_mapcount(page) < 0); + add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); + VM_BUG_ON(!PageHead(page)); + spin_unlock(&tlb->mm->page_table_lock); + tlb_remove_page(tlb, page); + pte_free(tlb->mm, pgtable); + ret = 1; + } + } else + spin_unlock(&tlb->mm->page_table_lock); + + return ret; +} + +int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned char *vec) +{ + int ret = 0; + + spin_lock(&vma->vm_mm->page_table_lock); + if (likely(pmd_trans_huge(*pmd))) { + ret = !pmd_trans_splitting(*pmd); + spin_unlock(&vma->vm_mm->page_table_lock); + if (unlikely(!ret)) + wait_split_huge_page(vma->anon_vma, pmd); + else { + /* + * All logical pages in the range are present + * if backed by a huge page. + */ + memset(vec, 1, (end - addr) >> PAGE_SHIFT); + } + } else + spin_unlock(&vma->vm_mm->page_table_lock); + + return ret; +} + +int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, pgprot_t newprot) +{ + struct mm_struct *mm = vma->vm_mm; + int ret = 0; + + spin_lock(&mm->page_table_lock); + if (likely(pmd_trans_huge(*pmd))) { + if (unlikely(pmd_trans_splitting(*pmd))) { + spin_unlock(&mm->page_table_lock); + wait_split_huge_page(vma->anon_vma, pmd); + } else { + pmd_t entry; + + entry = pmdp_get_and_clear(mm, addr, pmd); + entry = pmd_modify(entry, newprot); + set_pmd_at(mm, addr, pmd, entry); + spin_unlock(&vma->vm_mm->page_table_lock); + flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); + ret = 1; + } + } else + spin_unlock(&vma->vm_mm->page_table_lock); + + return ret; +} + +pmd_t *page_check_address_pmd(struct page *page, + struct mm_struct *mm, + unsigned long address, + enum page_check_address_pmd_flag flag) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd, *ret = NULL; + + if (address & ~HPAGE_PMD_MASK) + goto out; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out; + + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + goto out; + + pmd = pmd_offset(pud, address); + if (pmd_none(*pmd)) + goto out; + if (pmd_page(*pmd) != page) + goto out; + /* + * split_vma() may create temporary aliased mappings. There is + * no risk as long as all huge pmd are found and have their + * splitting bit set before __split_huge_page_refcount + * runs. Finding the same huge pmd more than once during the + * same rmap walk is not a problem. + */ + if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && + pmd_trans_splitting(*pmd)) + goto out; + if (pmd_trans_huge(*pmd)) { + VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && + !pmd_trans_splitting(*pmd)); + ret = pmd; + } +out: + return ret; +} + +static int __split_huge_page_splitting(struct page *page, + struct vm_area_struct *vma, + unsigned long address) +{ + struct mm_struct *mm = vma->vm_mm; + pmd_t *pmd; + int ret = 0; + + spin_lock(&mm->page_table_lock); + pmd = page_check_address_pmd(page, mm, address, + PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); + if (pmd) { + /* + * We can't temporarily set the pmd to null in order + * to split it, the pmd must remain marked huge at all + * times or the VM won't take the pmd_trans_huge paths + * and it won't wait on the anon_vma->root->lock to + * serialize against split_huge_page*. + */ + pmdp_splitting_flush_notify(vma, address, pmd); + ret = 1; + } + spin_unlock(&mm->page_table_lock); + + return ret; +} + +static void __split_huge_page_refcount(struct page *page) +{ + int i; + unsigned long head_index = page->index; + struct zone *zone = page_zone(page); + int zonestat; + + /* prevent PageLRU to go away from under us, and freeze lru stats */ + spin_lock_irq(&zone->lru_lock); + compound_lock(page); + + for (i = 1; i < HPAGE_PMD_NR; i++) { + struct page *page_tail = page + i; + + /* tail_page->_count cannot change */ + atomic_sub(atomic_read(&page_tail->_count), &page->_count); + BUG_ON(page_count(page) <= 0); + atomic_add(page_mapcount(page) + 1, &page_tail->_count); + BUG_ON(atomic_read(&page_tail->_count) <= 0); + + /* after clearing PageTail the gup refcount can be released */ + smp_mb(); + + page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + page_tail->flags |= (page->flags & + ((1L << PG_referenced) | + (1L << PG_swapbacked) | + (1L << PG_mlocked) | + (1L << PG_uptodate))); + page_tail->flags |= (1L << PG_dirty); + + /* + * 1) clear PageTail before overwriting first_page + * 2) clear PageTail before clearing PageHead for VM_BUG_ON + */ + smp_wmb(); + + /* + * __split_huge_page_splitting() already set the + * splitting bit in all pmd that could map this + * hugepage, that will ensure no CPU can alter the + * mapcount on the head page. The mapcount is only + * accounted in the head page and it has to be + * transferred to all tail pages in the below code. So + * for this code to be safe, the split the mapcount + * can't change. But that doesn't mean userland can't + * keep changing and reading the page contents while + * we transfer the mapcount, so the pmd splitting + * status is achieved setting a reserved bit in the + * pmd, not by clearing the present bit. + */ + BUG_ON(page_mapcount(page_tail)); + page_tail->_mapcount = page->_mapcount; + + BUG_ON(page_tail->mapping); + page_tail->mapping = page->mapping; + + page_tail->index = ++head_index; + + BUG_ON(!PageAnon(page_tail)); + BUG_ON(!PageUptodate(page_tail)); + BUG_ON(!PageDirty(page_tail)); + BUG_ON(!PageSwapBacked(page_tail)); + + lru_add_page_tail(zone, page, page_tail); + } + + __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); + __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); + + /* + * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics, + * so adjust those appropriately if this page is on the LRU. + */ + if (PageLRU(page)) { + zonestat = NR_LRU_BASE + page_lru(page); + __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1)); + } + + ClearPageCompound(page); + compound_unlock(page); + spin_unlock_irq(&zone->lru_lock); + + for (i = 1; i < HPAGE_PMD_NR; i++) { + struct page *page_tail = page + i; + BUG_ON(page_count(page_tail) <= 0); + /* + * Tail pages may be freed if there wasn't any mapping + * like if add_to_swap() is running on a lru page that + * had its mapping zapped. And freeing these pages + * requires taking the lru_lock so we do the put_page + * of the tail pages after the split is complete. + */ + put_page(page_tail); + } + + /* + * Only the head page (now become a regular page) is required + * to be pinned by the caller. + */ + BUG_ON(page_count(page) <= 0); +} + +static int __split_huge_page_map(struct page *page, + struct vm_area_struct *vma, + unsigned long address) +{ + struct mm_struct *mm = vma->vm_mm; + pmd_t *pmd, _pmd; + int ret = 0, i; + pgtable_t pgtable; + unsigned long haddr; + + spin_lock(&mm->page_table_lock); + pmd = page_check_address_pmd(page, mm, address, + PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); + if (pmd) { + pgtable = get_pmd_huge_pte(mm); + pmd_populate(mm, &_pmd, pgtable); + + for (i = 0, haddr = address; i < HPAGE_PMD_NR; + i++, haddr += PAGE_SIZE) { + pte_t *pte, entry; + BUG_ON(PageCompound(page+i)); + entry = mk_pte(page + i, vma->vm_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (!pmd_write(*pmd)) + entry = pte_wrprotect(entry); + else + BUG_ON(page_mapcount(page) != 1); + if (!pmd_young(*pmd)) + entry = pte_mkold(entry); + pte = pte_offset_map(&_pmd, haddr); + BUG_ON(!pte_none(*pte)); + set_pte_at(mm, haddr, pte, entry); + pte_unmap(pte); + } + + mm->nr_ptes++; + smp_wmb(); /* make pte visible before pmd */ + /* + * Up to this point the pmd is present and huge and + * userland has the whole access to the hugepage + * during the split (which happens in place). If we + * overwrite the pmd with the not-huge version + * pointing to the pte here (which of course we could + * if all CPUs were bug free), userland could trigger + * a small page size TLB miss on the small sized TLB + * while the hugepage TLB entry is still established + * in the huge TLB. Some CPU doesn't like that. See + * http://support.amd.com/us/Processor_TechDocs/41322.pdf, + * Erratum 383 on page 93. Intel should be safe but is + * also warns that it's only safe if the permission + * and cache attributes of the two entries loaded in + * the two TLB is identical (which should be the case + * here). But it is generally safer to never allow + * small and huge TLB entries for the same virtual + * address to be loaded simultaneously. So instead of + * doing "pmd_populate(); flush_tlb_range();" we first + * mark the current pmd notpresent (atomically because + * here the pmd_trans_huge and pmd_trans_splitting + * must remain set at all times on the pmd until the + * split is complete for this pmd), then we flush the + * SMP TLB and finally we write the non-huge version + * of the pmd entry with pmd_populate. + */ + set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd)); + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + pmd_populate(mm, pmd, pgtable); + ret = 1; + } + spin_unlock(&mm->page_table_lock); + + return ret; +} + +/* must be called with anon_vma->root->lock hold */ +static void __split_huge_page(struct page *page, + struct anon_vma *anon_vma) +{ + int mapcount, mapcount2; + struct anon_vma_chain *avc; + + BUG_ON(!PageHead(page)); + BUG_ON(PageTail(page)); + + mapcount = 0; + list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { + struct vm_area_struct *vma = avc->vma; + unsigned long addr = vma_address(page, vma); + BUG_ON(is_vma_temporary_stack(vma)); + if (addr == -EFAULT) + continue; + mapcount += __split_huge_page_splitting(page, vma, addr); + } + /* + * It is critical that new vmas are added to the tail of the + * anon_vma list. This guarantes that if copy_huge_pmd() runs + * and establishes a child pmd before + * __split_huge_page_splitting() freezes the parent pmd (so if + * we fail to prevent copy_huge_pmd() from running until the + * whole __split_huge_page() is complete), we will still see + * the newly established pmd of the child later during the + * walk, to be able to set it as pmd_trans_splitting too. + */ + if (mapcount != page_mapcount(page)) + printk(KERN_ERR "mapcount %d page_mapcount %d\n", + mapcount, page_mapcount(page)); + BUG_ON(mapcount != page_mapcount(page)); + + __split_huge_page_refcount(page); + + mapcount2 = 0; + list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { + struct vm_area_struct *vma = avc->vma; + unsigned long addr = vma_address(page, vma); + BUG_ON(is_vma_temporary_stack(vma)); + if (addr == -EFAULT) + continue; + mapcount2 += __split_huge_page_map(page, vma, addr); + } + if (mapcount != mapcount2) + printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", + mapcount, mapcount2, page_mapcount(page)); + BUG_ON(mapcount != mapcount2); +} + +int split_huge_page(struct page *page) +{ + struct anon_vma *anon_vma; + int ret = 1; + + BUG_ON(!PageAnon(page)); + anon_vma = page_lock_anon_vma(page); + if (!anon_vma) + goto out; + ret = 0; + if (!PageCompound(page)) + goto out_unlock; + + BUG_ON(!PageSwapBacked(page)); + __split_huge_page(page, anon_vma); + + BUG_ON(PageCompound(page)); +out_unlock: + page_unlock_anon_vma(anon_vma); +out: + return ret; +} + +int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice) +{ + switch (advice) { + case MADV_HUGEPAGE: + /* + * Be somewhat over-protective like KSM for now! + */ + if (*vm_flags & (VM_HUGEPAGE | + VM_SHARED | VM_MAYSHARE | + VM_PFNMAP | VM_IO | VM_DONTEXPAND | + VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | + VM_MIXEDMAP | VM_SAO)) + return -EINVAL; + *vm_flags &= ~VM_NOHUGEPAGE; + *vm_flags |= VM_HUGEPAGE; + /* + * If the vma become good for khugepaged to scan, + * register it here without waiting a page fault that + * may not happen any time soon. + */ + if (unlikely(khugepaged_enter_vma_merge(vma))) + return -ENOMEM; + break; + case MADV_NOHUGEPAGE: + /* + * Be somewhat over-protective like KSM for now! + */ + if (*vm_flags & (VM_NOHUGEPAGE | + VM_SHARED | VM_MAYSHARE | + VM_PFNMAP | VM_IO | VM_DONTEXPAND | + VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | + VM_MIXEDMAP | VM_SAO)) + return -EINVAL; + *vm_flags &= ~VM_HUGEPAGE; + *vm_flags |= VM_NOHUGEPAGE; + /* + * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning + * this vma even if we leave the mm registered in khugepaged if + * it got registered before VM_NOHUGEPAGE was set. + */ + break; + } + + return 0; +} + +static int __init khugepaged_slab_init(void) +{ + mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", + sizeof(struct mm_slot), + __alignof__(struct mm_slot), 0, NULL); + if (!mm_slot_cache) + return -ENOMEM; + + return 0; +} + +static void __init khugepaged_slab_free(void) +{ + kmem_cache_destroy(mm_slot_cache); + mm_slot_cache = NULL; +} + +static inline struct mm_slot *alloc_mm_slot(void) +{ + if (!mm_slot_cache) /* initialization failed */ + return NULL; + return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); +} + +static inline void free_mm_slot(struct mm_slot *mm_slot) +{ + kmem_cache_free(mm_slot_cache, mm_slot); +} + +static int __init mm_slots_hash_init(void) +{ + mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), + GFP_KERNEL); + if (!mm_slots_hash) + return -ENOMEM; + return 0; +} + +#if 0 +static void __init mm_slots_hash_free(void) +{ + kfree(mm_slots_hash); + mm_slots_hash = NULL; +} +#endif + +static struct mm_slot *get_mm_slot(struct mm_struct *mm) +{ + struct mm_slot *mm_slot; + struct hlist_head *bucket; + struct hlist_node *node; + + bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) + % MM_SLOTS_HASH_HEADS]; + hlist_for_each_entry(mm_slot, node, bucket, hash) { + if (mm == mm_slot->mm) + return mm_slot; + } + return NULL; +} + +static void insert_to_mm_slots_hash(struct mm_struct *mm, + struct mm_slot *mm_slot) +{ + struct hlist_head *bucket; + + bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) + % MM_SLOTS_HASH_HEADS]; + mm_slot->mm = mm; + hlist_add_head(&mm_slot->hash, bucket); +} + +static inline int khugepaged_test_exit(struct mm_struct *mm) +{ + return atomic_read(&mm->mm_users) == 0; +} + +int __khugepaged_enter(struct mm_struct *mm) +{ + struct mm_slot *mm_slot; + int wakeup; + + mm_slot = alloc_mm_slot(); + if (!mm_slot) + return -ENOMEM; + + /* __khugepaged_exit() must not run from under us */ + VM_BUG_ON(khugepaged_test_exit(mm)); + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { + free_mm_slot(mm_slot); + return 0; + } + + spin_lock(&khugepaged_mm_lock); + insert_to_mm_slots_hash(mm, mm_slot); + /* + * Insert just behind the scanning cursor, to let the area settle + * down a little. + */ + wakeup = list_empty(&khugepaged_scan.mm_head); + list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); + spin_unlock(&khugepaged_mm_lock); + + atomic_inc(&mm->mm_count); + if (wakeup) + wake_up_interruptible(&khugepaged_wait); + + return 0; +} + +int khugepaged_enter_vma_merge(struct vm_area_struct *vma) +{ + unsigned long hstart, hend; + if (!vma->anon_vma) + /* + * Not yet faulted in so we will register later in the + * page fault if needed. + */ + return 0; + if (vma->vm_file || vma->vm_ops) + /* khugepaged not yet working on file or special mappings */ + return 0; + VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (hstart < hend) + return khugepaged_enter(vma); + return 0; +} + +void __khugepaged_exit(struct mm_struct *mm) +{ + struct mm_slot *mm_slot; + int free = 0; + + spin_lock(&khugepaged_mm_lock); + mm_slot = get_mm_slot(mm); + if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { + hlist_del(&mm_slot->hash); + list_del(&mm_slot->mm_node); + free = 1; + } + + if (free) { + spin_unlock(&khugepaged_mm_lock); + clear_bit(MMF_VM_HUGEPAGE, &mm->flags); + free_mm_slot(mm_slot); + mmdrop(mm); + } else if (mm_slot) { + spin_unlock(&khugepaged_mm_lock); + /* + * This is required to serialize against + * khugepaged_test_exit() (which is guaranteed to run + * under mmap sem read mode). Stop here (after we + * return all pagetables will be destroyed) until + * khugepaged has finished working on the pagetables + * under the mmap_sem. + */ + down_write(&mm->mmap_sem); + up_write(&mm->mmap_sem); + } else + spin_unlock(&khugepaged_mm_lock); +} + +static void release_pte_page(struct page *page) +{ + /* 0 stands for page_is_file_cache(page) == false */ + dec_zone_page_state(page, NR_ISOLATED_ANON + 0); + unlock_page(page); + putback_lru_page(page); +} + +static void release_pte_pages(pte_t *pte, pte_t *_pte) +{ + while (--_pte >= pte) { + pte_t pteval = *_pte; + if (!pte_none(pteval)) + release_pte_page(pte_page(pteval)); + } +} + +static void release_all_pte_pages(pte_t *pte) +{ + release_pte_pages(pte, pte + HPAGE_PMD_NR); +} + +static int __collapse_huge_page_isolate(struct vm_area_struct *vma, + unsigned long address, + pte_t *pte) +{ + struct page *page; + pte_t *_pte; + int referenced = 0, isolated = 0, none = 0; + for (_pte = pte; _pte < pte+HPAGE_PMD_NR; + _pte++, address += PAGE_SIZE) { + pte_t pteval = *_pte; + if (pte_none(pteval)) { + if (++none <= khugepaged_max_ptes_none) + continue; + else { + release_pte_pages(pte, _pte); + goto out; + } + } + if (!pte_present(pteval) || !pte_write(pteval)) { + release_pte_pages(pte, _pte); + goto out; + } + page = vm_normal_page(vma, address, pteval); + if (unlikely(!page)) { + release_pte_pages(pte, _pte); + goto out; + } + VM_BUG_ON(PageCompound(page)); + BUG_ON(!PageAnon(page)); + VM_BUG_ON(!PageSwapBacked(page)); + + /* cannot use mapcount: can't collapse if there's a gup pin */ + if (page_count(page) != 1) { + release_pte_pages(pte, _pte); + goto out; + } + /* + * We can do it before isolate_lru_page because the + * page can't be freed from under us. NOTE: PG_lock + * is needed to serialize against split_huge_page + * when invoked from the VM. + */ + if (!trylock_page(page)) { + release_pte_pages(pte, _pte); + goto out; + } + /* + * Isolate the page to avoid collapsing an hugepage + * currently in use by the VM. + */ + if (isolate_lru_page(page)) { + unlock_page(page); + release_pte_pages(pte, _pte); + goto out; + } + /* 0 stands for page_is_file_cache(page) == false */ + inc_zone_page_state(page, NR_ISOLATED_ANON + 0); + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(PageLRU(page)); + + /* If there is no mapped pte young don't collapse the page */ + if (pte_young(pteval) || PageReferenced(page) || + mmu_notifier_test_young(vma->vm_mm, address)) + referenced = 1; + } + if (unlikely(!referenced)) + release_all_pte_pages(pte); + else + isolated = 1; +out: + return isolated; +} + +static void __collapse_huge_page_copy(pte_t *pte, struct page *page, + struct vm_area_struct *vma, + unsigned long address, + spinlock_t *ptl) +{ + pte_t *_pte; + for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { + pte_t pteval = *_pte; + struct page *src_page; + + if (pte_none(pteval)) { + clear_user_highpage(page, address); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); + } else { + src_page = pte_page(pteval); + copy_user_highpage(page, src_page, address, vma); + VM_BUG_ON(page_mapcount(src_page) != 1); + VM_BUG_ON(page_count(src_page) != 2); + release_pte_page(src_page); + /* + * ptl mostly unnecessary, but preempt has to + * be disabled to update the per-cpu stats + * inside page_remove_rmap(). + */ + spin_lock(ptl); + /* + * paravirt calls inside pte_clear here are + * superfluous. + */ + pte_clear(vma->vm_mm, address, _pte); + page_remove_rmap(src_page); + spin_unlock(ptl); + free_page_and_swap_cache(src_page); + } + + address += PAGE_SIZE; + page++; + } +} + +static void collapse_huge_page(struct mm_struct *mm, + unsigned long address, + struct page **hpage, + struct vm_area_struct *vma) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd, _pmd; + pte_t *pte; + pgtable_t pgtable; + struct page *new_page; + spinlock_t *ptl; + int isolated; + unsigned long hstart, hend; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); +#ifndef CONFIG_NUMA + VM_BUG_ON(!*hpage); + new_page = *hpage; +#else + VM_BUG_ON(*hpage); + /* + * Allocate the page while the vma is still valid and under + * the mmap_sem read mode so there is no memory allocation + * later when we take the mmap_sem in write mode. This is more + * friendly behavior (OTOH it may actually hide bugs) to + * filesystems in userland with daemons allocating memory in + * the userland I/O paths. Allocating memory with the + * mmap_sem in read mode is good idea also to allow greater + * scalability. + */ + new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); + if (unlikely(!new_page)) { + up_read(&mm->mmap_sem); + *hpage = ERR_PTR(-ENOMEM); + return; + } +#endif + if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { + up_read(&mm->mmap_sem); + put_page(new_page); + return; + } + + /* after allocating the hugepage upgrade to mmap_sem write mode */ + up_read(&mm->mmap_sem); + + /* + * Prevent all access to pagetables with the exception of + * gup_fast later hanlded by the ptep_clear_flush and the VM + * handled by the anon_vma lock + PG_lock. + */ + down_write(&mm->mmap_sem); + if (unlikely(khugepaged_test_exit(mm))) + goto out; + + vma = find_vma(mm, address); + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (address < hstart || address + HPAGE_PMD_SIZE > hend) + goto out; + + if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || + (vma->vm_flags & VM_NOHUGEPAGE)) + goto out; + + /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + goto out; + VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out; + + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + goto out; + + pmd = pmd_offset(pud, address); + /* pmd can't go away or become huge under us */ + if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) + goto out; + + anon_vma_lock(vma->anon_vma); + + pte = pte_offset_map(pmd, address); + ptl = pte_lockptr(mm, pmd); + + spin_lock(&mm->page_table_lock); /* probably unnecessary */ + /* + * After this gup_fast can't run anymore. This also removes + * any huge TLB entry from the CPU so we won't allow + * huge and small TLB entries for the same virtual address + * to avoid the risk of CPU bugs in that area. + */ + _pmd = pmdp_clear_flush_notify(vma, address, pmd); + spin_unlock(&mm->page_table_lock); + + spin_lock(ptl); + isolated = __collapse_huge_page_isolate(vma, address, pte); + spin_unlock(ptl); + pte_unmap(pte); + + if (unlikely(!isolated)) { + spin_lock(&mm->page_table_lock); + BUG_ON(!pmd_none(*pmd)); + set_pmd_at(mm, address, pmd, _pmd); + spin_unlock(&mm->page_table_lock); + anon_vma_unlock(vma->anon_vma); + mem_cgroup_uncharge_page(new_page); + goto out; + } + + /* + * All pages are isolated and locked so anon_vma rmap + * can't run anymore. + */ + anon_vma_unlock(vma->anon_vma); + + __collapse_huge_page_copy(pte, new_page, vma, address, ptl); + __SetPageUptodate(new_page); + pgtable = pmd_pgtable(_pmd); + VM_BUG_ON(page_count(pgtable) != 1); + VM_BUG_ON(page_mapcount(pgtable) != 0); + + _pmd = mk_pmd(new_page, vma->vm_page_prot); + _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); + _pmd = pmd_mkhuge(_pmd); + + /* + * spin_lock() below is not the equivalent of smp_wmb(), so + * this is needed to avoid the copy_huge_page writes to become + * visible after the set_pmd_at() write. + */ + smp_wmb(); + + spin_lock(&mm->page_table_lock); + BUG_ON(!pmd_none(*pmd)); + page_add_new_anon_rmap(new_page, vma, address); + set_pmd_at(mm, address, pmd, _pmd); + update_mmu_cache(vma, address, entry); + prepare_pmd_huge_pte(pgtable, mm); + mm->nr_ptes--; + spin_unlock(&mm->page_table_lock); + +#ifndef CONFIG_NUMA + *hpage = NULL; +#endif + khugepaged_pages_collapsed++; +out_up_write: + up_write(&mm->mmap_sem); + return; + +out: +#ifdef CONFIG_NUMA + put_page(new_page); +#endif + goto out_up_write; +} + +static int khugepaged_scan_pmd(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, + struct page **hpage) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte, *_pte; + int ret = 0, referenced = 0, none = 0; + struct page *page; + unsigned long _address; + spinlock_t *ptl; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out; + + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + goto out; + + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) + goto out; + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; + _pte++, _address += PAGE_SIZE) { + pte_t pteval = *_pte; + if (pte_none(pteval)) { + if (++none <= khugepaged_max_ptes_none) + continue; + else + goto out_unmap; + } + if (!pte_present(pteval) || !pte_write(pteval)) + goto out_unmap; + page = vm_normal_page(vma, _address, pteval); + if (unlikely(!page)) + goto out_unmap; + VM_BUG_ON(PageCompound(page)); + if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) + goto out_unmap; + /* cannot use mapcount: can't collapse if there's a gup pin */ + if (page_count(page) != 1) + goto out_unmap; + if (pte_young(pteval) || PageReferenced(page) || + mmu_notifier_test_young(vma->vm_mm, address)) + referenced = 1; + } + if (referenced) + ret = 1; +out_unmap: + pte_unmap_unlock(pte, ptl); + if (ret) + /* collapse_huge_page will return with the mmap_sem released */ + collapse_huge_page(mm, address, hpage, vma); +out: + return ret; +} + +static void collect_mm_slot(struct mm_slot *mm_slot) +{ + struct mm_struct *mm = mm_slot->mm; + + VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); + + if (khugepaged_test_exit(mm)) { + /* free mm_slot */ + hlist_del(&mm_slot->hash); + list_del(&mm_slot->mm_node); + + /* + * Not strictly needed because the mm exited already. + * + * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); + */ + + /* khugepaged_mm_lock actually not necessary for the below */ + free_mm_slot(mm_slot); + mmdrop(mm); + } +} + +static unsigned int khugepaged_scan_mm_slot(unsigned int pages, + struct page **hpage) +{ + struct mm_slot *mm_slot; + struct mm_struct *mm; + struct vm_area_struct *vma; + int progress = 0; + + VM_BUG_ON(!pages); + VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); + + if (khugepaged_scan.mm_slot) + mm_slot = khugepaged_scan.mm_slot; + else { + mm_slot = list_entry(khugepaged_scan.mm_head.next, + struct mm_slot, mm_node); + khugepaged_scan.address = 0; + khugepaged_scan.mm_slot = mm_slot; + } + spin_unlock(&khugepaged_mm_lock); + + mm = mm_slot->mm; + down_read(&mm->mmap_sem); + if (unlikely(khugepaged_test_exit(mm))) + vma = NULL; + else + vma = find_vma(mm, khugepaged_scan.address); + + progress++; + for (; vma; vma = vma->vm_next) { + unsigned long hstart, hend; + + cond_resched(); + if (unlikely(khugepaged_test_exit(mm))) { + progress++; + break; + } + + if ((!(vma->vm_flags & VM_HUGEPAGE) && + !khugepaged_always()) || + (vma->vm_flags & VM_NOHUGEPAGE)) { + progress++; + continue; + } + + /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { + khugepaged_scan.address = vma->vm_end; + progress++; + continue; + } + VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (hstart >= hend) { + progress++; + continue; + } + if (khugepaged_scan.address < hstart) + khugepaged_scan.address = hstart; + if (khugepaged_scan.address > hend) { + khugepaged_scan.address = hend + HPAGE_PMD_SIZE; + progress++; + continue; + } + BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); + + while (khugepaged_scan.address < hend) { + int ret; + cond_resched(); + if (unlikely(khugepaged_test_exit(mm))) + goto breakouterloop; + + VM_BUG_ON(khugepaged_scan.address < hstart || + khugepaged_scan.address + HPAGE_PMD_SIZE > + hend); + ret = khugepaged_scan_pmd(mm, vma, + khugepaged_scan.address, + hpage); + /* move to next address */ + khugepaged_scan.address += HPAGE_PMD_SIZE; + progress += HPAGE_PMD_NR; + if (ret) + /* we released mmap_sem so break loop */ + goto breakouterloop_mmap_sem; + if (progress >= pages) + goto breakouterloop; + } + } +breakouterloop: + up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ +breakouterloop_mmap_sem: + + spin_lock(&khugepaged_mm_lock); + BUG_ON(khugepaged_scan.mm_slot != mm_slot); + /* + * Release the current mm_slot if this mm is about to die, or + * if we scanned all vmas of this mm. + */ + if (khugepaged_test_exit(mm) || !vma) { + /* + * Make sure that if mm_users is reaching zero while + * khugepaged runs here, khugepaged_exit will find + * mm_slot not pointing to the exiting mm. + */ + if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { + khugepaged_scan.mm_slot = list_entry( + mm_slot->mm_node.next, + struct mm_slot, mm_node); + khugepaged_scan.address = 0; + } else { + khugepaged_scan.mm_slot = NULL; + khugepaged_full_scans++; + } + + collect_mm_slot(mm_slot); + } + + return progress; +} + +static int khugepaged_has_work(void) +{ + return !list_empty(&khugepaged_scan.mm_head) && + khugepaged_enabled(); +} + +static int khugepaged_wait_event(void) +{ + return !list_empty(&khugepaged_scan.mm_head) || + !khugepaged_enabled(); +} + +static void khugepaged_do_scan(struct page **hpage) +{ + unsigned int progress = 0, pass_through_head = 0; + unsigned int pages = khugepaged_pages_to_scan; + + barrier(); /* write khugepaged_pages_to_scan to local stack */ + + while (progress < pages) { + cond_resched(); + +#ifndef CONFIG_NUMA + if (!*hpage) { + *hpage = alloc_hugepage(khugepaged_defrag()); + if (unlikely(!*hpage)) + break; + } +#else + if (IS_ERR(*hpage)) + break; +#endif + + if (unlikely(kthread_should_stop() || freezing(current))) + break; + + spin_lock(&khugepaged_mm_lock); + if (!khugepaged_scan.mm_slot) + pass_through_head++; + if (khugepaged_has_work() && + pass_through_head < 2) + progress += khugepaged_scan_mm_slot(pages - progress, + hpage); + else + progress = pages; + spin_unlock(&khugepaged_mm_lock); + } +} + +static void khugepaged_alloc_sleep(void) +{ + DEFINE_WAIT(wait); + add_wait_queue(&khugepaged_wait, &wait); + schedule_timeout_interruptible( + msecs_to_jiffies( + khugepaged_alloc_sleep_millisecs)); + remove_wait_queue(&khugepaged_wait, &wait); +} + +#ifndef CONFIG_NUMA +static struct page *khugepaged_alloc_hugepage(void) +{ + struct page *hpage; + + do { + hpage = alloc_hugepage(khugepaged_defrag()); + if (!hpage) + khugepaged_alloc_sleep(); + } while (unlikely(!hpage) && + likely(khugepaged_enabled())); + return hpage; +} +#endif + +static void khugepaged_loop(void) +{ + struct page *hpage; + +#ifdef CONFIG_NUMA + hpage = NULL; +#endif + while (likely(khugepaged_enabled())) { +#ifndef CONFIG_NUMA + hpage = khugepaged_alloc_hugepage(); + if (unlikely(!hpage)) + break; +#else + if (IS_ERR(hpage)) { + khugepaged_alloc_sleep(); + hpage = NULL; + } +#endif + + khugepaged_do_scan(&hpage); +#ifndef CONFIG_NUMA + if (hpage) + put_page(hpage); +#endif + try_to_freeze(); + if (unlikely(kthread_should_stop())) + break; + if (khugepaged_has_work()) { + DEFINE_WAIT(wait); + if (!khugepaged_scan_sleep_millisecs) + continue; + add_wait_queue(&khugepaged_wait, &wait); + schedule_timeout_interruptible( + msecs_to_jiffies( + khugepaged_scan_sleep_millisecs)); + remove_wait_queue(&khugepaged_wait, &wait); + } else if (khugepaged_enabled()) + wait_event_freezable(khugepaged_wait, + khugepaged_wait_event()); + } +} + +static int khugepaged(void *none) +{ + struct mm_slot *mm_slot; + + set_freezable(); + set_user_nice(current, 19); + + /* serialize with start_khugepaged() */ + mutex_lock(&khugepaged_mutex); + + for (;;) { + mutex_unlock(&khugepaged_mutex); + BUG_ON(khugepaged_thread != current); + khugepaged_loop(); + BUG_ON(khugepaged_thread != current); + + mutex_lock(&khugepaged_mutex); + if (!khugepaged_enabled()) + break; + if (unlikely(kthread_should_stop())) + break; + } + + spin_lock(&khugepaged_mm_lock); + mm_slot = khugepaged_scan.mm_slot; + khugepaged_scan.mm_slot = NULL; + if (mm_slot) + collect_mm_slot(mm_slot); + spin_unlock(&khugepaged_mm_lock); + + khugepaged_thread = NULL; + mutex_unlock(&khugepaged_mutex); + + return 0; +} + +void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) +{ + struct page *page; + + spin_lock(&mm->page_table_lock); + if (unlikely(!pmd_trans_huge(*pmd))) { + spin_unlock(&mm->page_table_lock); + return; + } + page = pmd_page(*pmd); + VM_BUG_ON(!page_count(page)); + get_page(page); + spin_unlock(&mm->page_table_lock); + + split_huge_page(page); + + put_page(page); + BUG_ON(pmd_trans_huge(*pmd)); +} + +static void split_huge_page_address(struct mm_struct *mm, + unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + return; + + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + return; + + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) + return; + /* + * Caller holds the mmap_sem write mode, so a huge pmd cannot + * materialize from under us. + */ + split_huge_page_pmd(mm, pmd); +} + +void __vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next) +{ + /* + * If the new start address isn't hpage aligned and it could + * previously contain an hugepage: check if we need to split + * an huge pmd. + */ + if (start & ~HPAGE_PMD_MASK && + (start & HPAGE_PMD_MASK) >= vma->vm_start && + (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) + split_huge_page_address(vma->vm_mm, start); + + /* + * If the new end address isn't hpage aligned and it could + * previously contain an hugepage: check if we need to split + * an huge pmd. + */ + if (end & ~HPAGE_PMD_MASK && + (end & HPAGE_PMD_MASK) >= vma->vm_start && + (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) + split_huge_page_address(vma->vm_mm, end); + + /* + * If we're also updating the vma->vm_next->vm_start, if the new + * vm_next->vm_start isn't page aligned and it could previously + * contain an hugepage: check if we need to split an huge pmd. + */ + if (adjust_next > 0) { + struct vm_area_struct *next = vma->vm_next; + unsigned long nstart = next->vm_start; + nstart += adjust_next << PAGE_SHIFT; + if (nstart & ~HPAGE_PMD_MASK && + (nstart & HPAGE_PMD_MASK) >= next->vm_start && + (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) + split_huge_page_address(next->vm_mm, nstart); + } +} diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 85855240933d..bb0b7c128015 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -394,71 +394,6 @@ static int vma_has_reserves(struct vm_area_struct *vma) return 0; } -static void clear_gigantic_page(struct page *page, - unsigned long addr, unsigned long sz) -{ - int i; - struct page *p = page; - - might_sleep(); - for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) { - cond_resched(); - clear_user_highpage(p, addr + i * PAGE_SIZE); - } -} -static void clear_huge_page(struct page *page, - unsigned long addr, unsigned long sz) -{ - int i; - - if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) { - clear_gigantic_page(page, addr, sz); - return; - } - - might_sleep(); - for (i = 0; i < sz/PAGE_SIZE; i++) { - cond_resched(); - clear_user_highpage(page + i, addr + i * PAGE_SIZE); - } -} - -static void copy_user_gigantic_page(struct page *dst, struct page *src, - unsigned long addr, struct vm_area_struct *vma) -{ - int i; - struct hstate *h = hstate_vma(vma); - struct page *dst_base = dst; - struct page *src_base = src; - - for (i = 0; i < pages_per_huge_page(h); ) { - cond_resched(); - copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); - - i++; - dst = mem_map_next(dst, dst_base, i); - src = mem_map_next(src, src_base, i); - } -} - -static void copy_user_huge_page(struct page *dst, struct page *src, - unsigned long addr, struct vm_area_struct *vma) -{ - int i; - struct hstate *h = hstate_vma(vma); - - if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { - copy_user_gigantic_page(dst, src, addr, vma); - return; - } - - might_sleep(); - for (i = 0; i < pages_per_huge_page(h); i++) { - cond_resched(); - copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); - } -} - static void copy_gigantic_page(struct page *dst, struct page *src) { int i; @@ -1428,6 +1363,7 @@ static ssize_t nr_hugepages_show_common(struct kobject *kobj, return sprintf(buf, "%lu\n", nr_huge_pages); } + static ssize_t nr_hugepages_store_common(bool obey_mempolicy, struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) @@ -1440,9 +1376,14 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy, err = strict_strtoul(buf, 10, &count); if (err) - return 0; + goto out; h = kobj_to_hstate(kobj, &nid); + if (h->order >= MAX_ORDER) { + err = -EINVAL; + goto out; + } + if (nid == NUMA_NO_NODE) { /* * global hstate attribute @@ -1468,6 +1409,9 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy, NODEMASK_FREE(nodes_allowed); return len; +out: + NODEMASK_FREE(nodes_allowed); + return err; } static ssize_t nr_hugepages_show(struct kobject *kobj, @@ -1510,6 +1454,7 @@ static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); } + static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -1517,9 +1462,12 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, unsigned long input; struct hstate *h = kobj_to_hstate(kobj, NULL); + if (h->order >= MAX_ORDER) + return -EINVAL; + err = strict_strtoul(buf, 10, &input); if (err) - return 0; + return err; spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = input; @@ -1922,13 +1870,19 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, { struct hstate *h = &default_hstate; unsigned long tmp; + int ret; if (!write) tmp = h->max_huge_pages; + if (write && h->order >= MAX_ORDER) + return -EINVAL; + table->data = &tmp; table->maxlen = sizeof(unsigned long); - proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + if (ret) + goto out; if (write) { NODEMASK_ALLOC(nodemask_t, nodes_allowed, @@ -1943,8 +1897,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, if (nodes_allowed != &node_states[N_HIGH_MEMORY]) NODEMASK_FREE(nodes_allowed); } - - return 0; +out: + return ret; } int hugetlb_sysctl_handler(struct ctl_table *table, int write, @@ -1982,21 +1936,27 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, { struct hstate *h = &default_hstate; unsigned long tmp; + int ret; if (!write) tmp = h->nr_overcommit_huge_pages; + if (write && h->order >= MAX_ORDER) + return -EINVAL; + table->data = &tmp; table->maxlen = sizeof(unsigned long); - proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + if (ret) + goto out; if (write) { spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = tmp; spin_unlock(&hugetlb_lock); } - - return 0; +out: + return ret; } #endif /* CONFIG_SYSCTL */ @@ -2454,7 +2414,8 @@ retry_avoidcopy: return VM_FAULT_OOM; } - copy_user_huge_page(new_page, old_page, address, vma); + copy_user_huge_page(new_page, old_page, address, vma, + pages_per_huge_page(h)); __SetPageUptodate(new_page); /* @@ -2558,7 +2519,7 @@ retry: ret = -PTR_ERR(page); goto out; } - clear_huge_page(page, address, huge_page_size(h)); + clear_huge_page(page, address, pages_per_huge_page(h)); __SetPageUptodate(page); if (vma->vm_flags & VM_MAYSHARE) { diff --git a/mm/internal.h b/mm/internal.h index dedb0aff673f..4c98630f0f77 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -39,6 +39,15 @@ static inline void __put_page(struct page *page) extern unsigned long highest_memmap_pfn; +#ifdef CONFIG_SMP +extern int putback_active_lru_page(struct zone *zone, struct page *page); +#else +static inline int putback_active_lru_page(struct zone *zone, struct page *page) +{ + return 0; +} +#endif + /* * in mm/vmscan.c: */ @@ -134,6 +143,10 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) } } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern unsigned long vma_address(struct page *page, + struct vm_area_struct *vma); +#endif #else /* !CONFIG_MMU */ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) { @@ -243,7 +256,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, unsigned int foll_flags, - struct page **pages, struct vm_area_struct **vmas); + struct page **pages, struct vm_area_struct **vmas, + int *nonblocking); #define ZONE_RECLAIM_NOSCAN -2 #define ZONE_RECLAIM_FULL -1 diff --git a/mm/ksm.c b/mm/ksm.c index 43bc893470b4..c2b2a94f9d67 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include "internal.h" @@ -411,6 +412,20 @@ out: up_read(&mm->mmap_sem); } +static struct page *page_trans_compound_anon(struct page *page) +{ + if (PageTransCompound(page)) { + struct page *head = compound_trans_head(page); + /* + * head may actually be splitted and freed from under + * us but it's ok here. + */ + if (PageAnon(head)) + return head; + } + return NULL; +} + static struct page *get_mergeable_page(struct rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; @@ -430,7 +445,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) page = follow_page(vma, addr, FOLL_GET); if (IS_ERR_OR_NULL(page)) goto out; - if (PageAnon(page)) { + if (PageAnon(page) || page_trans_compound_anon(page)) { flush_anon_page(vma, page, addr); flush_dcache_page(page); } else { @@ -708,6 +723,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (addr == -EFAULT) goto out; + BUG_ON(PageTransCompound(page)); ptep = page_check_address(page, mm, addr, &ptl, 0); if (!ptep) goto out; @@ -783,6 +799,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, goto out; pmd = pmd_offset(pud, addr); + BUG_ON(pmd_trans_huge(*pmd)); if (!pmd_present(*pmd)) goto out; @@ -800,6 +817,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); page_remove_rmap(page); + if (!page_mapped(page)) + try_to_free_swap(page); put_page(page); pte_unmap_unlock(ptep, ptl); @@ -808,6 +827,33 @@ out: return err; } +static int page_trans_compound_anon_split(struct page *page) +{ + int ret = 0; + struct page *transhuge_head = page_trans_compound_anon(page); + if (transhuge_head) { + /* Get the reference on the head to split it. */ + if (get_page_unless_zero(transhuge_head)) { + /* + * Recheck we got the reference while the head + * was still anonymous. + */ + if (PageAnon(transhuge_head)) + ret = split_huge_page(transhuge_head); + else + /* + * Retry later if split_huge_page run + * from under us. + */ + ret = 1; + put_page(transhuge_head); + } else + /* Retry later if split_huge_page run from under us. */ + ret = 1; + } + return ret; +} + /* * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page @@ -828,6 +874,9 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, if (!(vma->vm_flags & VM_MERGEABLE)) goto out; + if (PageTransCompound(page) && page_trans_compound_anon_split(page)) + goto out; + BUG_ON(PageTransCompound(page)); if (!PageAnon(page)) goto out; @@ -1247,6 +1296,18 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) slot = ksm_scan.mm_slot; if (slot == &ksm_mm_head) { + /* + * A number of pages can hang around indefinitely on per-cpu + * pagevecs, raised page count preventing write_protect_page + * from merging them. Though it doesn't really matter much, + * it is puzzling to see some stuck in pages_volatile until + * other activity jostles them out, and they also prevented + * LTP's KSM test from succeeding deterministically; so drain + * them here (here rather than on entry to ksm_do_scan(), + * so we don't IPI too often when pages_to_scan is set low). + */ + lru_add_drain_all(); + root_unstable_tree = RB_ROOT; spin_lock(&ksm_mmlist_lock); @@ -1277,7 +1338,13 @@ next_mm: if (ksm_test_exit(mm)) break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); - if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { + if (IS_ERR_OR_NULL(*page)) { + ksm_scan.address += PAGE_SIZE; + cond_resched(); + continue; + } + if (PageAnon(*page) || + page_trans_compound_anon(*page)) { flush_anon_page(vma, *page, ksm_scan.address); flush_dcache_page(*page); rmap_item = get_next_rmap_item(slot, @@ -1291,8 +1358,7 @@ next_mm: up_read(&mm->mmap_sem); return rmap_item; } - if (!IS_ERR_OR_NULL(*page)) - put_page(*page); + put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); } @@ -1352,7 +1418,7 @@ static void ksm_do_scan(unsigned int scan_npages) struct rmap_item *rmap_item; struct page *uninitialized_var(page); - while (scan_npages--) { + while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) @@ -1370,6 +1436,7 @@ static int ksmd_should_run(void) static int ksm_scan_thread(void *nothing) { + set_freezable(); set_user_nice(current, 5); while (!kthread_should_stop()) { @@ -1378,11 +1445,13 @@ static int ksm_scan_thread(void *nothing) ksm_do_scan(ksm_thread_pages_to_scan); mutex_unlock(&ksm_thread_mutex); + try_to_freeze(); + if (ksmd_should_run()) { schedule_timeout_interruptible( msecs_to_jiffies(ksm_thread_sleep_millisecs)); } else { - wait_event_interruptible(ksm_thread_wait, + wait_event_freezable(ksm_thread_wait, ksmd_should_run() || kthread_should_stop()); } } diff --git a/mm/madvise.c b/mm/madvise.c index 319528b8db74..2221491ed503 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -71,6 +71,12 @@ static long madvise_behavior(struct vm_area_struct * vma, if (error) goto out; break; + case MADV_HUGEPAGE: + case MADV_NOHUGEPAGE: + error = hugepage_madvise(vma, &new_flags, behavior); + if (error) + goto out; + break; } if (new_flags == vma->vm_flags) { @@ -282,6 +288,10 @@ madvise_behavior_valid(int behavior) #ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + case MADV_HUGEPAGE: + case MADV_NOHUGEPAGE: #endif return 1; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 00bb8a64d028..8ab841031436 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -292,7 +292,6 @@ static struct move_charge_struct { unsigned long moved_charge; unsigned long moved_swap; struct task_struct *moving_task; /* a task moving charges */ - struct mm_struct *mm; wait_queue_head_t waitq; /* a waitq for other context */ } mc = { .lock = __SPIN_LOCK_UNLOCKED(mc.lock), @@ -821,7 +820,6 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) return; VM_BUG_ON(list_empty(&pc->lru)); list_del_init(&pc->lru); - return; } void mem_cgroup_del_lru(struct page *page) @@ -1087,7 +1085,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, case 0: list_move(&page->lru, dst); mem_cgroup_del_lru(page); - nr_taken++; + nr_taken += hpage_nr_pages(page); break; case -EBUSY: /* we don't affect global LRU but rotate in our LRU */ @@ -1312,8 +1310,9 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) u64 limit; u64 memsw; - limit = res_counter_read_u64(&memcg->res, RES_LIMIT) + - total_swap_pages; + limit = res_counter_read_u64(&memcg->res, RES_LIMIT); + limit += total_swap_pages << PAGE_SHIFT; + memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); /* * If memsw is finite and limits the amount of swap space available @@ -1600,11 +1599,13 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) * possibility of race condition. If there is, we take a lock. */ -static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) +void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx, int val) { struct mem_cgroup *mem; struct page_cgroup *pc = lookup_page_cgroup(page); bool need_unlock = false; + unsigned long uninitialized_var(flags); if (unlikely(!pc)) return; @@ -1616,37 +1617,34 @@ static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) /* pc->mem_cgroup is unstable ? */ if (unlikely(mem_cgroup_stealed(mem))) { /* take a lock against to access pc->mem_cgroup */ - lock_page_cgroup(pc); + move_lock_page_cgroup(pc, &flags); need_unlock = true; mem = pc->mem_cgroup; if (!mem || !PageCgroupUsed(pc)) goto out; } - this_cpu_add(mem->stat->count[idx], val); - switch (idx) { - case MEM_CGROUP_STAT_FILE_MAPPED: + case MEMCG_NR_FILE_MAPPED: if (val > 0) SetPageCgroupFileMapped(pc); else if (!page_mapped(page)) ClearPageCgroupFileMapped(pc); + idx = MEM_CGROUP_STAT_FILE_MAPPED; break; default: BUG(); } + this_cpu_add(mem->stat->count[idx], val); + out: if (unlikely(need_unlock)) - unlock_page_cgroup(pc); + move_unlock_page_cgroup(pc, &flags); rcu_read_unlock(); return; } - -void mem_cgroup_update_file_mapped(struct page *page, int val) -{ - mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val); -} +EXPORT_SYMBOL(mem_cgroup_update_page_stat); /* * size of first charge trial. "32" comes from vmscan.c's magic value. @@ -1887,12 +1885,14 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, * oom-killer can be invoked. */ static int __mem_cgroup_try_charge(struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom) + gfp_t gfp_mask, + struct mem_cgroup **memcg, bool oom, + int page_size) { int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; struct mem_cgroup *mem = NULL; int ret; - int csize = CHARGE_SIZE; + int csize = max(CHARGE_SIZE, (unsigned long) page_size); /* * Unlike gloval-vm's OOM-kill, we're not in memory shortage @@ -1917,7 +1917,7 @@ again: VM_BUG_ON(css_is_removed(&mem->css)); if (mem_cgroup_is_root(mem)) goto done; - if (consume_stock(mem)) + if (page_size == PAGE_SIZE && consume_stock(mem)) goto done; css_get(&mem->css); } else { @@ -1940,7 +1940,7 @@ again: rcu_read_unlock(); goto done; } - if (consume_stock(mem)) { + if (page_size == PAGE_SIZE && consume_stock(mem)) { /* * It seems dagerous to access memcg without css_get(). * But considering how consume_stok works, it's not @@ -1981,7 +1981,7 @@ again: case CHARGE_OK: break; case CHARGE_RETRY: /* not in OOM situation but retry */ - csize = PAGE_SIZE; + csize = page_size; css_put(&mem->css); mem = NULL; goto again; @@ -2002,8 +2002,8 @@ again: } } while (ret != CHARGE_OK); - if (csize > PAGE_SIZE) - refill_stock(mem, csize - PAGE_SIZE); + if (csize > page_size) + refill_stock(mem, csize - page_size); css_put(&mem->css); done: *memcg = mem; @@ -2031,9 +2031,10 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, } } -static void mem_cgroup_cancel_charge(struct mem_cgroup *mem) +static void mem_cgroup_cancel_charge(struct mem_cgroup *mem, + int page_size) { - __mem_cgroup_cancel_charge(mem, 1); + __mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT); } /* @@ -2087,22 +2088,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be * USED state. If already USED, uncharge and return. */ - -static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, - struct page_cgroup *pc, - enum charge_type ctype) +static void ____mem_cgroup_commit_charge(struct mem_cgroup *mem, + struct page_cgroup *pc, + enum charge_type ctype) { - /* try_charge() can return NULL to *memcg, taking care of it. */ - if (!mem) - return; - - lock_page_cgroup(pc); - if (unlikely(PageCgroupUsed(pc))) { - unlock_page_cgroup(pc); - mem_cgroup_cancel_charge(mem); - return; - } - pc->mem_cgroup = mem; /* * We access a page_cgroup asynchronously without lock_page_cgroup(). @@ -2127,6 +2116,33 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, } mem_cgroup_charge_statistics(mem, pc, true); +} + +static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, + struct page_cgroup *pc, + enum charge_type ctype, + int page_size) +{ + int i; + int count = page_size >> PAGE_SHIFT; + + /* try_charge() can return NULL to *memcg, taking care of it. */ + if (!mem) + return; + + lock_page_cgroup(pc); + if (unlikely(PageCgroupUsed(pc))) { + unlock_page_cgroup(pc); + mem_cgroup_cancel_charge(mem, page_size); + return; + } + + /* + * we don't need page_cgroup_lock about tail pages, becase they are not + * accessed by any other context at this point. + */ + for (i = 0; i < count; i++) + ____mem_cgroup_commit_charge(mem, pc + i, ctype); unlock_page_cgroup(pc); /* @@ -2173,7 +2189,7 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc, mem_cgroup_charge_statistics(from, pc, false); if (uncharge) /* This is not "cancel", but cancel_charge does all we need. */ - mem_cgroup_cancel_charge(from); + mem_cgroup_cancel_charge(from, PAGE_SIZE); /* caller should have done css_get */ pc->mem_cgroup = to; @@ -2195,9 +2211,13 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) { int ret = -EINVAL; + unsigned long flags; + lock_page_cgroup(pc); if (PageCgroupUsed(pc) && pc->mem_cgroup == from) { + move_lock_page_cgroup(pc, &flags); __mem_cgroup_move_account(pc, from, to, uncharge); + move_unlock_page_cgroup(pc, &flags); ret = 0; } unlock_page_cgroup(pc); @@ -2234,13 +2254,14 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, goto put; parent = mem_cgroup_from_cont(pcg); - ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); + ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, + PAGE_SIZE); if (ret || !parent) goto put_back; ret = mem_cgroup_move_account(pc, child, parent, true); if (ret) - mem_cgroup_cancel_charge(parent); + mem_cgroup_cancel_charge(parent, PAGE_SIZE); put_back: putback_lru_page(page); put: @@ -2261,6 +2282,12 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, struct mem_cgroup *mem = NULL; struct page_cgroup *pc; int ret; + int page_size = PAGE_SIZE; + + if (PageTransHuge(page)) { + page_size <<= compound_order(page); + VM_BUG_ON(!PageTransHuge(page)); + } pc = lookup_page_cgroup(page); /* can happen at boot */ @@ -2268,11 +2295,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, return 0; prefetchw(pc); - ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true); + ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size); if (ret || !mem) return ret; - __mem_cgroup_commit_charge(mem, pc, ctype); + __mem_cgroup_commit_charge(mem, pc, ctype, page_size); return 0; } @@ -2281,8 +2308,6 @@ int mem_cgroup_newpage_charge(struct page *page, { if (mem_cgroup_disabled()) return 0; - if (PageCompound(page)) - return 0; /* * If already mapped, we don't have to account. * If page cache, page->mapping has address_space. @@ -2388,13 +2413,13 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, if (!mem) goto charge_cur_mm; *ptr = mem; - ret = __mem_cgroup_try_charge(NULL, mask, ptr, true); + ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE); css_put(&mem->css); return ret; charge_cur_mm: if (unlikely(!mm)) mm = &init_mm; - return __mem_cgroup_try_charge(mm, mask, ptr, true); + return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE); } static void @@ -2410,7 +2435,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, cgroup_exclude_rmdir(&ptr->css); pc = lookup_page_cgroup(page); mem_cgroup_lru_del_before_commit_swapcache(page); - __mem_cgroup_commit_charge(ptr, pc, ctype); + __mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE); mem_cgroup_lru_add_after_commit_swapcache(page); /* * Now swap is on-memory. This means this page may be @@ -2459,11 +2484,12 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) return; if (!mem) return; - mem_cgroup_cancel_charge(mem); + mem_cgroup_cancel_charge(mem, PAGE_SIZE); } static void -__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype) +__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype, + int page_size) { struct memcg_batch_info *batch = NULL; bool uncharge_memsw = true; @@ -2490,6 +2516,9 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype) if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) goto direct_uncharge; + if (page_size != PAGE_SIZE) + goto direct_uncharge; + /* * In typical case, batch->memcg == mem. This means we can * merge a series of uncharges to an uncharge of res_counter. @@ -2503,9 +2532,9 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype) batch->memsw_bytes += PAGE_SIZE; return; direct_uncharge: - res_counter_uncharge(&mem->res, PAGE_SIZE); + res_counter_uncharge(&mem->res, page_size); if (uncharge_memsw) - res_counter_uncharge(&mem->memsw, PAGE_SIZE); + res_counter_uncharge(&mem->memsw, page_size); if (unlikely(batch->memcg != mem)) memcg_oom_recover(mem); return; @@ -2517,8 +2546,11 @@ direct_uncharge: static struct mem_cgroup * __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) { + int i; + int count; struct page_cgroup *pc; struct mem_cgroup *mem = NULL; + int page_size = PAGE_SIZE; if (mem_cgroup_disabled()) return NULL; @@ -2526,6 +2558,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) if (PageSwapCache(page)) return NULL; + if (PageTransHuge(page)) { + page_size <<= compound_order(page); + VM_BUG_ON(!PageTransHuge(page)); + } + + count = page_size >> PAGE_SHIFT; /* * Check if our page_cgroup is valid */ @@ -2558,7 +2596,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) break; } - mem_cgroup_charge_statistics(mem, pc, false); + for (i = 0; i < count; i++) + mem_cgroup_charge_statistics(mem, pc + i, false); ClearPageCgroupUsed(pc); /* @@ -2579,7 +2618,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) mem_cgroup_get(mem); } if (!mem_cgroup_is_root(mem)) - __do_uncharge(mem, ctype); + __do_uncharge(mem, ctype, page_size); return mem; @@ -2774,6 +2813,7 @@ int mem_cgroup_prepare_migration(struct page *page, enum charge_type ctype; int ret = 0; + VM_BUG_ON(PageTransHuge(page)); if (mem_cgroup_disabled()) return 0; @@ -2823,7 +2863,7 @@ int mem_cgroup_prepare_migration(struct page *page, return 0; *ptr = mem; - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE); css_put(&mem->css);/* drop extra refcnt */ if (ret || *ptr == NULL) { if (PageAnon(page)) { @@ -2850,13 +2890,13 @@ int mem_cgroup_prepare_migration(struct page *page, ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; else ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; - __mem_cgroup_commit_charge(mem, pc, ctype); + __mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE); return ret; } /* remove redundant charge if migration failed*/ void mem_cgroup_end_migration(struct mem_cgroup *mem, - struct page *oldpage, struct page *newpage) + struct page *oldpage, struct page *newpage, bool migration_ok) { struct page *used, *unused; struct page_cgroup *pc; @@ -2865,8 +2905,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, return; /* blocks rmdir() */ cgroup_exclude_rmdir(&mem->css); - /* at migration success, oldpage->mapping is NULL. */ - if (oldpage->mapping) { + if (!migration_ok) { used = oldpage; unused = newpage; } else { @@ -4176,13 +4215,11 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) */ if (!node_state(node, N_NORMAL_MEMORY)) tmp = -1; - pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp); + pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); if (!pn) return 1; mem->info.nodeinfo[node] = pn; - memset(pn, 0, sizeof(*pn)); - for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; for_each_lru(l) @@ -4206,14 +4243,13 @@ static struct mem_cgroup *mem_cgroup_alloc(void) /* Can be very big if MAX_NUMNODES is very big */ if (size < PAGE_SIZE) - mem = kmalloc(size, GFP_KERNEL); + mem = kzalloc(size, GFP_KERNEL); else - mem = vmalloc(size); + mem = vzalloc(size); if (!mem) return NULL; - memset(mem, 0, size); mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); if (!mem->stat) goto out_free; @@ -4461,7 +4497,8 @@ one_by_one: batch_count = PRECHARGE_COUNT_AT_ONCE; cond_resched(); } - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, + PAGE_SIZE); if (ret || !mem) /* mem_cgroup_clear_mc() will do uncharge later */ return -ENOMEM; @@ -4623,6 +4660,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, pte_t *pte; spinlock_t *ptl; + VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) if (is_target_pte_for_mc(vma, addr, *pte, NULL)) @@ -4638,7 +4676,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) unsigned long precharge; struct vm_area_struct *vma; - /* We've already held the mmap_sem */ + down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { struct mm_walk mem_cgroup_count_precharge_walk = { .pmd_entry = mem_cgroup_count_precharge_pte_range, @@ -4650,6 +4688,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) walk_page_range(vma->vm_start, vma->vm_end, &mem_cgroup_count_precharge_walk); } + up_read(&mm->mmap_sem); precharge = mc.precharge; mc.precharge = 0; @@ -4659,10 +4698,15 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) static int mem_cgroup_precharge_mc(struct mm_struct *mm) { - return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm)); + unsigned long precharge = mem_cgroup_count_precharge(mm); + + VM_BUG_ON(mc.moving_task); + mc.moving_task = current; + return mem_cgroup_do_precharge(precharge); } -static void mem_cgroup_clear_mc(void) +/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ +static void __mem_cgroup_clear_mc(void) { struct mem_cgroup *from = mc.from; struct mem_cgroup *to = mc.to; @@ -4697,23 +4741,28 @@ static void mem_cgroup_clear_mc(void) PAGE_SIZE * mc.moved_swap); } /* we've already done mem_cgroup_get(mc.to) */ - mc.moved_swap = 0; } - if (mc.mm) { - up_read(&mc.mm->mmap_sem); - mmput(mc.mm); - } + memcg_oom_recover(from); + memcg_oom_recover(to); + wake_up_all(&mc.waitq); +} + +static void mem_cgroup_clear_mc(void) +{ + struct mem_cgroup *from = mc.from; + + /* + * we must clear moving_task before waking up waiters at the end of + * task migration. + */ + mc.moving_task = NULL; + __mem_cgroup_clear_mc(); spin_lock(&mc.lock); mc.from = NULL; mc.to = NULL; spin_unlock(&mc.lock); - mc.moving_task = NULL; - mc.mm = NULL; mem_cgroup_end_move(from); - memcg_oom_recover(from); - memcg_oom_recover(to); - wake_up_all(&mc.waitq); } static int mem_cgroup_can_attach(struct cgroup_subsys *ss, @@ -4735,38 +4784,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, return 0; /* We move charges only when we move a owner of the mm */ if (mm->owner == p) { - /* - * We do all the move charge works under one mmap_sem to - * avoid deadlock with down_write(&mmap_sem) - * -> try_charge() -> if (mc.moving_task) -> sleep. - */ - down_read(&mm->mmap_sem); - VM_BUG_ON(mc.from); VM_BUG_ON(mc.to); VM_BUG_ON(mc.precharge); VM_BUG_ON(mc.moved_charge); VM_BUG_ON(mc.moved_swap); - VM_BUG_ON(mc.moving_task); - VM_BUG_ON(mc.mm); - mem_cgroup_start_move(from); spin_lock(&mc.lock); mc.from = from; mc.to = mem; - mc.precharge = 0; - mc.moved_charge = 0; - mc.moved_swap = 0; spin_unlock(&mc.lock); - mc.moving_task = current; - mc.mm = mm; + /* We set mc.moving_task later */ ret = mem_cgroup_precharge_mc(mm); if (ret) mem_cgroup_clear_mc(); - /* We call up_read() and mmput() in clear_mc(). */ - } else - mmput(mm); + } + mmput(mm); } return ret; } @@ -4789,6 +4823,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, spinlock_t *ptl; retry: + VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; addr += PAGE_SIZE) { pte_t ptent = *(pte++); @@ -4854,7 +4889,19 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) struct vm_area_struct *vma; lru_add_drain_all(); - /* We've already held the mmap_sem */ +retry: + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + /* + * Someone who are holding the mmap_sem might be waiting in + * waitq. So we cancel all extra charges, wake up all waiters, + * and retry. Because we cancel precharges, we might not be able + * to move enough charges, but moving charge is a best-effort + * feature anyway, so it wouldn't be a big problem. + */ + __mem_cgroup_clear_mc(); + cond_resched(); + goto retry; + } for (vma = mm->mmap; vma; vma = vma->vm_next) { int ret; struct mm_walk mem_cgroup_move_charge_walk = { @@ -4873,6 +4920,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) */ break; } + up_read(&mm->mmap_sem); } static void mem_cgroup_move_task(struct cgroup_subsys *ss, @@ -4881,11 +4929,17 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct task_struct *p, bool threadgroup) { - if (!mc.mm) + struct mm_struct *mm; + + if (!mc.to) /* no need to move charge */ return; - mem_cgroup_move_charge(mc.mm); + mm = get_task_mm(p); + if (mm) { + mem_cgroup_move_charge(mm); + mmput(mm); + } mem_cgroup_clear_mc(); } #else /* !CONFIG_MMU */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 46ab2c044b0e..548fbd70f026 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -203,7 +203,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, #ifdef __ARCH_SI_TRAPNO si.si_trapno = trapno; #endif - si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; + si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT; /* * Don't use force here, it's convenient if the signal * can be temporarily blocked. @@ -386,6 +386,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, struct task_struct *tsk; struct anon_vma *av; + if (!PageHuge(page) && unlikely(split_huge_page(page))) + return; read_lock(&tasklist_lock); av = page_lock_anon_vma(page); if (av == NULL) /* Not actually mapped anymore */ @@ -928,7 +930,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, static void set_page_hwpoison_huge_page(struct page *hpage) { int i; - int nr_pages = 1 << compound_order(hpage); + int nr_pages = 1 << compound_trans_order(hpage); for (i = 0; i < nr_pages; i++) SetPageHWPoison(hpage + i); } @@ -936,7 +938,7 @@ static void set_page_hwpoison_huge_page(struct page *hpage) static void clear_page_hwpoison_huge_page(struct page *hpage) { int i; - int nr_pages = 1 << compound_order(hpage); + int nr_pages = 1 << compound_trans_order(hpage); for (i = 0; i < nr_pages; i++) ClearPageHWPoison(hpage + i); } @@ -966,7 +968,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) return 0; } - nr_pages = 1 << compound_order(hpage); + nr_pages = 1 << compound_trans_order(hpage); atomic_long_add(nr_pages, &mce_bad_pages); /* @@ -1164,7 +1166,7 @@ int unpoison_memory(unsigned long pfn) return 0; } - nr_pages = 1 << compound_order(page); + nr_pages = 1 << compound_trans_order(page); if (!get_page_unless_zero(page)) { /* @@ -1290,9 +1292,10 @@ static int soft_offline_huge_page(struct page *page, int flags) /* Keep page count to indicate a given hugepage is isolated. */ list_add(&hpage->lru, &pagelist); - ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); + ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0, + true); if (ret) { - putback_lru_pages(&pagelist); + putback_lru_pages(&pagelist); pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", pfn, ret, page->flags); if (ret > 0) @@ -1301,7 +1304,7 @@ static int soft_offline_huge_page(struct page *page, int flags) } done: if (!PageHWPoison(hpage)) - atomic_long_add(1 << compound_order(hpage), &mce_bad_pages); + atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); set_page_hwpoison_huge_page(hpage); dequeue_hwpoisoned_huge_page(hpage); /* keep elevated page count for bad page */ @@ -1413,7 +1416,8 @@ int soft_offline_page(struct page *page, int flags) LIST_HEAD(pagelist); list_add(&page->lru, &pagelist); - ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); + ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, + 0, true); if (ret) { pr_info("soft offline: %#lx: migration failed %d, type %lx\n", pfn, ret, page->flags); diff --git a/mm/memory.c b/mm/memory.c index 02e48aa0ed13..31250faff390 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -394,9 +394,11 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, } } -int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + pmd_t *pmd, unsigned long address) { pgtable_t new = pte_alloc_one(mm, address); + int wait_split_huge_page; if (!new) return -ENOMEM; @@ -416,14 +418,18 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ spin_lock(&mm->page_table_lock); - if (!pmd_present(*pmd)) { /* Has another populated it ? */ + wait_split_huge_page = 0; + if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ mm->nr_ptes++; pmd_populate(mm, pmd, new); new = NULL; - } + } else if (unlikely(pmd_trans_splitting(*pmd))) + wait_split_huge_page = 1; spin_unlock(&mm->page_table_lock); if (new) pte_free(mm, new); + if (wait_split_huge_page) + wait_split_huge_page(vma->anon_vma, pmd); return 0; } @@ -436,10 +442,11 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&init_mm.page_table_lock); - if (!pmd_present(*pmd)) { /* Has another populated it ? */ + if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ pmd_populate_kernel(&init_mm, pmd, new); new = NULL; - } + } else + VM_BUG_ON(pmd_trans_splitting(*pmd)); spin_unlock(&init_mm.page_table_lock); if (new) pte_free_kernel(&init_mm, new); @@ -719,9 +726,9 @@ out_set_pte: return 0; } -static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, - pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, - unsigned long addr, unsigned long end) +int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, + unsigned long addr, unsigned long end) { pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; @@ -795,6 +802,17 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src src_pmd = pmd_offset(src_pud, addr); do { next = pmd_addr_end(addr, end); + if (pmd_trans_huge(*src_pmd)) { + int err; + VM_BUG_ON(next-addr != HPAGE_PMD_SIZE); + err = copy_huge_pmd(dst_mm, src_mm, + dst_pmd, src_pmd, addr, vma); + if (err == -ENOMEM) + return -ENOMEM; + if (!err) + continue; + /* fall through */ + } if (pmd_none_or_clear_bad(src_pmd)) continue; if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, @@ -997,6 +1015,16 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); + if (pmd_trans_huge(*pmd)) { + if (next-addr != HPAGE_PMD_SIZE) { + VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); + split_huge_page_pmd(vma->vm_mm, pmd); + } else if (zap_huge_pmd(tlb, vma, pmd)) { + (*zap_work)--; + continue; + } + /* fall through */ + } if (pmd_none_or_clear_bad(pmd)) { (*zap_work)--; continue; @@ -1262,7 +1290,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, pud = pud_offset(pgd, address); if (pud_none(*pud)) goto no_page_table; - if (pud_huge(*pud)) { + if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { BUG_ON(flags & FOLL_GET); page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); goto out; @@ -1273,11 +1301,32 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) goto no_page_table; - if (pmd_huge(*pmd)) { + if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { BUG_ON(flags & FOLL_GET); page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); goto out; } + if (pmd_trans_huge(*pmd)) { + if (flags & FOLL_SPLIT) { + split_huge_page_pmd(mm, pmd); + goto split_fallthrough; + } + spin_lock(&mm->page_table_lock); + if (likely(pmd_trans_huge(*pmd))) { + if (unlikely(pmd_trans_splitting(*pmd))) { + spin_unlock(&mm->page_table_lock); + wait_split_huge_page(vma->anon_vma, pmd); + } else { + page = follow_trans_huge_pmd(mm, address, + pmd, flags); + spin_unlock(&mm->page_table_lock); + goto out; + } + } else + spin_unlock(&mm->page_table_lock); + /* fall through */ + } +split_fallthrough: if (unlikely(pmd_bad(*pmd))) goto no_page_table; @@ -1310,6 +1359,28 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, */ mark_page_accessed(page); } + if (flags & FOLL_MLOCK) { + /* + * The preliminary mapping check is mainly to avoid the + * pointless overhead of lock_page on the ZERO_PAGE + * which might bounce very badly if there is contention. + * + * If the page is already locked, we don't need to + * handle it now - vmscan will handle it later if and + * when it attempts to reclaim the page. + */ + if (page->mapping && trylock_page(page)) { + lru_add_drain(); /* push cached pages to LRU */ + /* + * Because we lock page here and migration is + * blocked by the pte's page reference, we need + * only check for file-cache page truncation. + */ + if (page->mapping) + mlock_vma_page(page); + unlock_page(page); + } + } unlock: pte_unmap_unlock(ptep, ptl); out: @@ -1341,7 +1412,8 @@ no_page_table: int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int gup_flags, - struct page **pages, struct vm_area_struct **vmas) + struct page **pages, struct vm_area_struct **vmas, + int *nonblocking) { int i; unsigned long vm_flags; @@ -1386,6 +1458,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, pmd = pmd_offset(pud, pg); if (pmd_none(*pmd)) return i ? : -EFAULT; + VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, pg); if (pte_none(*pte)) { pte_unmap(pte); @@ -1441,10 +1514,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, cond_resched(); while (!(page = follow_page(vma, start, foll_flags))) { int ret; + unsigned int fault_flags = 0; + + if (foll_flags & FOLL_WRITE) + fault_flags |= FAULT_FLAG_WRITE; + if (nonblocking) + fault_flags |= FAULT_FLAG_ALLOW_RETRY; ret = handle_mm_fault(mm, vma, start, - (foll_flags & FOLL_WRITE) ? - FAULT_FLAG_WRITE : 0); + fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) @@ -1460,6 +1538,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, else tsk->min_flt++; + if (ret & VM_FAULT_RETRY) { + *nonblocking = 0; + return i; + } + /* * The VM_FAULT_WRITE bit tells us that * do_wp_page has broken COW when necessary, @@ -1559,7 +1642,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (force) flags |= FOLL_FORCE; - return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); + return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, + NULL); } EXPORT_SYMBOL(get_user_pages); @@ -1584,7 +1668,8 @@ struct page *get_dump_page(unsigned long addr) struct page *page; if (__get_user_pages(current, current->mm, addr, 1, - FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1) + FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, + NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; @@ -1598,8 +1683,10 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, pud_t * pud = pud_alloc(mm, pgd, addr); if (pud) { pmd_t * pmd = pmd_alloc(mm, pud, addr); - if (pmd) + if (pmd) { + VM_BUG_ON(pmd_trans_huge(*pmd)); return pte_alloc_map_lock(mm, pmd, addr, ptl); + } } return NULL; } @@ -1818,6 +1905,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; + VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); if (remap_pte_range(mm, pmd, addr, next, @@ -2048,19 +2136,6 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, return same; } -/* - * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when - * servicing faults for write access. In the normal case, do always want - * pte_mkwrite. But get_user_pages can cause write faults for mappings - * that do not have writing enabled, when used by access_process_vm. - */ -static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) -{ - if (likely(vma->vm_flags & VM_WRITE)) - pte = pte_mkwrite(pte); - return pte; -} - static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { /* @@ -2112,7 +2187,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *old_page, *new_page; pte_t entry; - int reuse = 0, ret = 0; + int ret = 0; int page_mkwrite = 0; struct page *dirty_page = NULL; @@ -2149,14 +2224,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, } page_cache_release(old_page); } - reuse = reuse_swap_page(old_page); - if (reuse) + if (reuse_swap_page(old_page)) { /* * The page is all ours. Move it to our anon_vma so * the rmap code will not search our parent or siblings. * Protected against the rmap code by the page lock. */ page_move_anon_rmap(old_page, vma, address); + unlock_page(old_page); + goto reuse; + } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { @@ -2220,18 +2297,52 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, } dirty_page = old_page; get_page(dirty_page); - reuse = 1; - } - if (reuse) { reuse: flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, address, page_table, entry,1)) update_mmu_cache(vma, address, page_table); + pte_unmap_unlock(page_table, ptl); ret |= VM_FAULT_WRITE; - goto unlock; + + if (!dirty_page) + return ret; + + /* + * Yes, Virginia, this is actually required to prevent a race + * with clear_page_dirty_for_io() from clearing the page dirty + * bit after it clear all dirty ptes, but before a racing + * do_wp_page installs a dirty pte. + * + * do_no_page is protected similarly. + */ + if (!page_mkwrite) { + wait_on_page_locked(dirty_page); + set_page_dirty_balance(dirty_page, page_mkwrite); + } + put_page(dirty_page); + if (page_mkwrite) { + struct address_space *mapping = dirty_page->mapping; + + set_page_dirty(dirty_page); + unlock_page(dirty_page); + page_cache_release(dirty_page); + if (mapping) { + /* + * Some device drivers do not set page.mapping + * but still dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + } + + /* file_update_time outside page_lock */ + if (vma->vm_file) + file_update_time(vma->vm_file); + + return ret; } /* @@ -2337,39 +2448,6 @@ gotten: page_cache_release(old_page); unlock: pte_unmap_unlock(page_table, ptl); - if (dirty_page) { - /* - * Yes, Virginia, this is actually required to prevent a race - * with clear_page_dirty_for_io() from clearing the page dirty - * bit after it clear all dirty ptes, but before a racing - * do_wp_page installs a dirty pte. - * - * do_no_page is protected similarly. - */ - if (!page_mkwrite) { - wait_on_page_locked(dirty_page); - set_page_dirty_balance(dirty_page, page_mkwrite); - } - put_page(dirty_page); - if (page_mkwrite) { - struct address_space *mapping = dirty_page->mapping; - - set_page_dirty(dirty_page); - unlock_page(dirty_page); - page_cache_release(dirty_page); - if (mapping) { - /* - * Some device drivers do not set page.mapping - * but still dirty their pages - */ - balance_dirty_pages_ratelimited(mapping); - } - } - - /* file_update_time outside page_lock */ - if (vma->vm_file) - file_update_time(vma->vm_file); - } return ret; oom_free_new: page_cache_release(new_page); @@ -3147,9 +3225,9 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ -static inline int handle_pte_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - pte_t *pte, pmd_t *pmd, unsigned int flags) +int handle_pte_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + pte_t *pte, pmd_t *pmd, unsigned int flags) { pte_t entry; spinlock_t *ptl; @@ -3228,9 +3306,40 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, pmd = pmd_alloc(mm, pud, address); if (!pmd) return VM_FAULT_OOM; - pte = pte_alloc_map(mm, pmd, address); - if (!pte) + if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { + if (!vma->vm_ops) + return do_huge_pmd_anonymous_page(mm, vma, address, + pmd, flags); + } else { + pmd_t orig_pmd = *pmd; + barrier(); + if (pmd_trans_huge(orig_pmd)) { + if (flags & FAULT_FLAG_WRITE && + !pmd_write(orig_pmd) && + !pmd_trans_splitting(orig_pmd)) + return do_huge_pmd_wp_page(mm, vma, address, + pmd, orig_pmd); + return 0; + } + } + + /* + * Use __pte_alloc instead of pte_alloc_map, because we can't + * run pte_offset_map on the pmd, if an huge pmd could + * materialize from under us from a different thread. + */ + if (unlikely(__pte_alloc(mm, vma, pmd, address))) return VM_FAULT_OOM; + /* if an huge pmd materialized from under us just retry later */ + if (unlikely(pmd_trans_huge(*pmd))) + return 0; + /* + * A regular pmd is established and it can't morph into a huge pmd + * from under us anymore at this point because we hold the mmap_sem + * read mode and khugepaged takes it in write mode. So now it's + * safe to run pte_offset_map(). + */ + pte = pte_offset_map(pmd, address); return handle_pte_fault(mm, vma, address, pte, pmd, flags); } @@ -3296,7 +3405,12 @@ int make_pages_present(unsigned long addr, unsigned long end) vma = find_vma(current->mm, addr); if (!vma) return -ENOMEM; - write = (vma->vm_flags & VM_WRITE) != 0; + /* + * We want to touch writable mappings with a write fault in order + * to break COW, except for shared mappings because these don't COW + * and we would not want to dirty them for nothing. + */ + write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE; BUG_ON(addr >= end); BUG_ON(end > vma->vm_end); len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; @@ -3368,6 +3482,7 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address, goto out; pmd = pmd_offset(pud, address); + VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto out; @@ -3608,3 +3723,74 @@ void might_fault(void) } EXPORT_SYMBOL(might_fault); #endif + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) +static void clear_gigantic_page(struct page *page, + unsigned long addr, + unsigned int pages_per_huge_page) +{ + int i; + struct page *p = page; + + might_sleep(); + for (i = 0; i < pages_per_huge_page; + i++, p = mem_map_next(p, page, i)) { + cond_resched(); + clear_user_highpage(p, addr + i * PAGE_SIZE); + } +} +void clear_huge_page(struct page *page, + unsigned long addr, unsigned int pages_per_huge_page) +{ + int i; + + if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { + clear_gigantic_page(page, addr, pages_per_huge_page); + return; + } + + might_sleep(); + for (i = 0; i < pages_per_huge_page; i++) { + cond_resched(); + clear_user_highpage(page + i, addr + i * PAGE_SIZE); + } +} + +static void copy_user_gigantic_page(struct page *dst, struct page *src, + unsigned long addr, + struct vm_area_struct *vma, + unsigned int pages_per_huge_page) +{ + int i; + struct page *dst_base = dst; + struct page *src_base = src; + + for (i = 0; i < pages_per_huge_page; ) { + cond_resched(); + copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); + + i++; + dst = mem_map_next(dst, dst_base, i); + src = mem_map_next(src, src_base, i); + } +} + +void copy_user_huge_page(struct page *dst, struct page *src, + unsigned long addr, struct vm_area_struct *vma, + unsigned int pages_per_huge_page) +{ + int i; + + if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { + copy_user_gigantic_page(dst, src, addr, vma, + pages_per_huge_page); + return; + } + + might_sleep(); + for (i = 0; i < pages_per_huge_page; i++) { + cond_resched(); + copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); + } +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2c6523af5473..e92f04749fcb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -82,9 +82,10 @@ static void release_memory_resource(struct resource *res) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #ifndef CONFIG_SPARSEMEM_VMEMMAP -static void get_page_bootmem(unsigned long info, struct page *page, int type) +static void get_page_bootmem(unsigned long info, struct page *page, + unsigned long type) { - atomic_set(&page->_mapcount, type); + page->lru.next = (struct list_head *) type; SetPagePrivate(page); set_page_private(page, info); atomic_inc(&page->_count); @@ -94,15 +95,16 @@ static void get_page_bootmem(unsigned long info, struct page *page, int type) * so use __ref to tell modpost not to generate a warning */ void __ref put_page_bootmem(struct page *page) { - int type; + unsigned long type; - type = atomic_read(&page->_mapcount); - BUG_ON(type >= -1); + type = (unsigned long) page->lru.next; + BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || + type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); if (atomic_dec_return(&page->_count) == 1) { ClearPagePrivate(page); set_page_private(page, 0); - reset_page_mapcount(page); + INIT_LIST_HEAD(&page->lru); __free_pages_bootmem(page, 0); } @@ -733,7 +735,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) goto out; } /* this function returns # of failed pages */ - ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); + ret = migrate_pages(&source, hotremove_migrate_alloc, 0, + true, true); if (ret) putback_lru_pages(&source); } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 11ff260fb282..368fc9d23610 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -514,6 +514,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); + split_huge_page_pmd(vma->vm_mm, pmd); if (pmd_none_or_clear_bad(pmd)) continue; if (check_pte_range(vma, pmd, addr, next, nodes, @@ -935,7 +936,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, return PTR_ERR(vma); if (!list_empty(&pagelist)) { - err = migrate_pages(&pagelist, new_node_page, dest, 0); + err = migrate_pages(&pagelist, new_node_page, dest, + false, true); if (err) putback_lru_pages(&pagelist); } @@ -1155,7 +1157,8 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) { nr_failed = migrate_pages(&pagelist, new_vma_page, - (unsigned long)vma, 0); + (unsigned long)vma, + false, true); if (nr_failed) putback_lru_pages(&pagelist); } @@ -1308,16 +1311,13 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, /* Find the mm_struct */ rcu_read_lock(); - read_lock(&tasklist_lock); task = pid ? find_task_by_vpid(pid) : current; if (!task) { - read_unlock(&tasklist_lock); rcu_read_unlock(); err = -ESRCH; goto out; } mm = get_task_mm(task); - read_unlock(&tasklist_lock); rcu_read_unlock(); err = -EINVAL; @@ -1796,7 +1796,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, } /** - * alloc_page_vma - Allocate a page for a VMA. + * alloc_pages_vma - Allocate a page for a VMA. * * @gfp: * %GFP_USER user allocation. @@ -1805,6 +1805,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, * %GFP_FS allocation should not call back into a file system. * %GFP_ATOMIC don't sleep. * + * @order:Order of the GFP allocation. * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * @@ -1818,7 +1819,8 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, * Should be called with the mm_sem of the vma hold. */ struct page * -alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) +alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, + unsigned long addr) { struct mempolicy *pol = get_vma_policy(current, vma, addr); struct zonelist *zl; @@ -1830,7 +1832,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); mpol_cond_put(pol); - page = alloc_page_interleave(gfp, 0, nid); + page = alloc_page_interleave(gfp, order, nid); put_mems_allowed(); return page; } @@ -1839,7 +1841,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) /* * slow path: ref counted shared policy */ - struct page *page = __alloc_pages_nodemask(gfp, 0, + struct page *page = __alloc_pages_nodemask(gfp, order, zl, policy_nodemask(gfp, pol)); __mpol_put(pol); put_mems_allowed(); @@ -1848,7 +1850,8 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) /* * fast path: default or task policy */ - page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol)); + page = __alloc_pages_nodemask(gfp, order, zl, + policy_nodemask(gfp, pol)); put_mems_allowed(); return page; } diff --git a/mm/migrate.c b/mm/migrate.c index 6ae8a66a7045..46fe8cc13d67 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -113,6 +113,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, goto out; pmd = pmd_offset(pud, addr); + if (pmd_trans_huge(*pmd)) + goto out; if (!pmd_present(*pmd)) goto out; @@ -246,7 +248,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, expected_count = 2 + page_has_private(page); if (page_count(page) != expected_count || - (struct page *)radix_tree_deref_slot(pslot) != page) { + radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } @@ -318,7 +320,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, expected_count = 2 + page_has_private(page); if (page_count(page) != expected_count || - (struct page *)radix_tree_deref_slot(pslot) != page) { + radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } @@ -614,13 +616,12 @@ static int move_to_new_page(struct page *newpage, struct page *page, * to the newly allocated page in newpage. */ static int unmap_and_move(new_page_t get_new_page, unsigned long private, - struct page *page, int force, int offlining) + struct page *page, int force, bool offlining, bool sync) { int rc = 0; int *result = NULL; struct page *newpage = get_new_page(page, private, &result); int remap_swapcache = 1; - int rcu_locked = 0; int charge = 0; struct mem_cgroup *mem = NULL; struct anon_vma *anon_vma = NULL; @@ -632,6 +633,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, /* page was freed from under us. So we are done. */ goto move_newpage; } + if (unlikely(PageTransHuge(page))) + if (unlikely(split_huge_page(page))) + goto move_newpage; /* prepare cgroup just returns 0 or -ENOMEM */ rc = -EAGAIN; @@ -639,6 +643,23 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, if (!trylock_page(page)) { if (!force) goto move_newpage; + + /* + * It's not safe for direct compaction to call lock_page. + * For example, during page readahead pages are added locked + * to the LRU. Later, when the IO completes the pages are + * marked uptodate and unlocked. However, the queueing + * could be merging multiple pages for one bio (e.g. + * mpage_readpages). If an allocation happens for the + * second or third page, the process can end up locking + * the same page twice and deadlocking. Rather than + * trying to be clever about what pages can be locked, + * avoid the use of lock_page for direct compaction + * altogether. + */ + if (current->flags & PF_MEMALLOC) + goto move_newpage; + lock_page(page); } @@ -665,27 +686,33 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, BUG_ON(charge); if (PageWriteback(page)) { - if (!force) + if (!force || !sync) goto uncharge; wait_on_page_writeback(page); } /* * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, * we cannot notice that anon_vma is freed while we migrates a page. - * This rcu_read_lock() delays freeing anon_vma pointer until the end + * This get_anon_vma() delays freeing anon_vma pointer until the end * of migration. File cache pages are no problem because of page_lock() * File Caches may use write_page() or lock_page() in migration, then, * just care Anon page here. */ if (PageAnon(page)) { - rcu_read_lock(); - rcu_locked = 1; - - /* Determine how to safely use anon_vma */ - if (!page_mapped(page)) { - if (!PageSwapCache(page)) - goto rcu_unlock; - + /* + * Only page_lock_anon_vma() understands the subtleties of + * getting a hold on an anon_vma from outside one of its mms. + */ + anon_vma = page_lock_anon_vma(page); + if (anon_vma) { + /* + * Take a reference count on the anon_vma if the + * page is mapped so that it is guaranteed to + * exist when the page is remapped later + */ + get_anon_vma(anon_vma); + page_unlock_anon_vma(anon_vma); + } else if (PageSwapCache(page)) { /* * We cannot be sure that the anon_vma of an unmapped * swapcache page is safe to use because we don't @@ -700,13 +727,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, */ remap_swapcache = 0; } else { - /* - * Take a reference count on the anon_vma if the - * page is mapped so that it is guaranteed to - * exist when the page is remapped later - */ - anon_vma = page_anon_vma(page); - get_anon_vma(anon_vma); + goto uncharge; } } @@ -723,16 +744,10 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, * free the metadata, so the page can be freed. */ if (!page->mapping) { - if (!PageAnon(page) && page_has_private(page)) { - /* - * Go direct to try_to_free_buffers() here because - * a) that's what try_to_release_page() would do anyway - * b) we may be under rcu_read_lock() here, so we can't - * use GFP_KERNEL which is what try_to_release_page() - * needs to be effective. - */ + VM_BUG_ON(PageAnon(page)); + if (page_has_private(page)) { try_to_free_buffers(page); - goto rcu_unlock; + goto uncharge; } goto skip_unmap; } @@ -746,17 +761,14 @@ skip_unmap: if (rc && remap_swapcache) remove_migration_ptes(page, page); -rcu_unlock: /* Drop an anon_vma reference if we took one */ if (anon_vma) drop_anon_vma(anon_vma); - if (rcu_locked) - rcu_read_unlock(); uncharge: if (!charge) - mem_cgroup_end_migration(mem, page, newpage); + mem_cgroup_end_migration(mem, page, newpage, rc == 0); unlock: unlock_page(page); @@ -810,12 +822,11 @@ move_newpage: */ static int unmap_and_move_huge_page(new_page_t get_new_page, unsigned long private, struct page *hpage, - int force, int offlining) + int force, bool offlining, bool sync) { int rc = 0; int *result = NULL; struct page *new_hpage = get_new_page(hpage, private, &result); - int rcu_locked = 0; struct anon_vma *anon_vma = NULL; if (!new_hpage) @@ -824,18 +835,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, rc = -EAGAIN; if (!trylock_page(hpage)) { - if (!force) + if (!force || !sync) goto out; lock_page(hpage); } if (PageAnon(hpage)) { - rcu_read_lock(); - rcu_locked = 1; - - if (page_mapped(hpage)) { - anon_vma = page_anon_vma(hpage); - atomic_inc(&anon_vma->external_refcount); + anon_vma = page_lock_anon_vma(hpage); + if (anon_vma) { + get_anon_vma(anon_vma); + page_unlock_anon_vma(anon_vma); } } @@ -847,16 +856,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (rc) remove_migration_ptes(hpage, hpage); - if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, - &anon_vma->lock)) { - int empty = list_empty(&anon_vma->head); - spin_unlock(&anon_vma->lock); - if (empty) - anon_vma_free(anon_vma); - } - - if (rcu_locked) - rcu_read_unlock(); + if (anon_vma) + drop_anon_vma(anon_vma); out: unlock_page(hpage); @@ -892,7 +893,8 @@ out: * Return: Number of pages not migrated or error code. */ int migrate_pages(struct list_head *from, - new_page_t get_new_page, unsigned long private, int offlining) + new_page_t get_new_page, unsigned long private, bool offlining, + bool sync) { int retry = 1; int nr_failed = 0; @@ -912,7 +914,8 @@ int migrate_pages(struct list_head *from, cond_resched(); rc = unmap_and_move(get_new_page, private, - page, pass > 2, offlining); + page, pass > 2, offlining, + sync); switch(rc) { case -ENOMEM: @@ -941,7 +944,8 @@ out: } int migrate_huge_pages(struct list_head *from, - new_page_t get_new_page, unsigned long private, int offlining) + new_page_t get_new_page, unsigned long private, bool offlining, + bool sync) { int retry = 1; int nr_failed = 0; @@ -957,7 +961,8 @@ int migrate_huge_pages(struct list_head *from, cond_resched(); rc = unmap_and_move_huge_page(get_new_page, - private, page, pass > 2, offlining); + private, page, pass > 2, offlining, + sync); switch(rc) { case -ENOMEM: @@ -1042,7 +1047,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm, if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) goto set_status; - page = follow_page(vma, pp->addr, FOLL_GET); + page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); err = PTR_ERR(page); if (IS_ERR(page)) @@ -1090,7 +1095,7 @@ set_status: err = 0; if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, new_page_node, - (unsigned long)pm, 0); + (unsigned long)pm, 0, true); if (err) putback_lru_pages(&pagelist); } diff --git a/mm/mincore.c b/mm/mincore.c index 9ac42dc6d7b6..a4e6b9d75c76 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -154,6 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); + if (pmd_trans_huge(*pmd)) { + if (mincore_huge_pmd(vma, pmd, addr, next, vec)) { + vec += (next - addr) >> PAGE_SHIFT; + continue; + } + /* fall through */ + } if (pmd_none_or_clear_bad(pmd)) mincore_unmapped_range(vma, addr, next, vec); else diff --git a/mm/mlock.c b/mm/mlock.c index b70919ce4f72..13e81ee8be9d 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -155,13 +155,12 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add * vma->vm_mm->mmap_sem must be held for at least read. */ static long __mlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, + int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long addr = start; - struct page *pages[16]; /* 16 gives a reasonable batch */ int nr_pages = (end - start) / PAGE_SIZE; - int ret = 0; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); @@ -170,73 +169,26 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - gup_flags = FOLL_TOUCH | FOLL_GET; - if (vma->vm_flags & VM_WRITE) + gup_flags = FOLL_TOUCH; + /* + * We want to touch writable mappings with a write fault in order + * to break COW, except for shared mappings because these don't COW + * and we would not want to dirty them for nothing. + */ + if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; + if (vma->vm_flags & VM_LOCKED) + gup_flags |= FOLL_MLOCK; + /* We don't try to access the guard page of a stack vma */ if (stack_guard_page(vma, start)) { addr += PAGE_SIZE; nr_pages--; } - while (nr_pages > 0) { - int i; - - cond_resched(); - - /* - * get_user_pages makes pages present if we are - * setting mlock. and this extra reference count will - * disable migration of this page. However, page may - * still be truncated out from under us. - */ - ret = __get_user_pages(current, mm, addr, - min_t(int, nr_pages, ARRAY_SIZE(pages)), - gup_flags, pages, NULL); - /* - * This can happen for, e.g., VM_NONLINEAR regions before - * a page has been allocated and mapped at a given offset, - * or for addresses that map beyond end of a file. - * We'll mlock the pages if/when they get faulted in. - */ - if (ret < 0) - break; - - lru_add_drain(); /* push cached pages to LRU */ - - for (i = 0; i < ret; i++) { - struct page *page = pages[i]; - - if (page->mapping) { - /* - * That preliminary check is mainly to avoid - * the pointless overhead of lock_page on the - * ZERO_PAGE: which might bounce very badly if - * there is contention. However, we're still - * dirtying its cacheline with get/put_page: - * we'll add another __get_user_pages flag to - * avoid it if that case turns out to matter. - */ - lock_page(page); - /* - * Because we lock page here and migration is - * blocked by the elevated reference, we need - * only check for file-cache page truncation. - */ - if (page->mapping) - mlock_vma_page(page); - unlock_page(page); - } - put_page(page); /* ref from get_user_pages() */ - } - - addr += ret * PAGE_SIZE; - nr_pages -= ret; - ret = 0; - } - - return ret; /* 0 or negative error code */ + return __get_user_pages(current, mm, addr, nr_pages, gup_flags, + NULL, NULL, nonblocking); } /* @@ -280,7 +232,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))) { - __mlock_vma_pages_range(vma, start, end); + __mlock_vma_pages_range(vma, start, end, NULL); /* Hide errors from mmap() and other callers */ return 0; @@ -372,18 +324,10 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, int ret = 0; int lock = newflags & VM_LOCKED; - if (newflags == vma->vm_flags || - (vma->vm_flags & (VM_IO | VM_PFNMAP))) + if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || + is_vm_hugetlb_page(vma) || vma == get_gate_vma(current)) goto out; /* don't set VM_LOCKED, don't count */ - if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || - is_vm_hugetlb_page(vma) || - vma == get_gate_vma(current)) { - if (lock) - make_pages_present(start, end); - goto out; /* don't set VM_LOCKED, don't count */ - } - pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); @@ -419,14 +363,10 @@ success: * set VM_LOCKED, __mlock_vma_pages_range will bring it back. */ - if (lock) { + if (lock) vma->vm_flags = newflags; - ret = __mlock_vma_pages_range(vma, start, end); - if (ret < 0) - ret = __mlock_posix_error_return(ret); - } else { + else munlock_vma_pages_range(vma, start, end); - } out: *prev = vma; @@ -439,7 +379,8 @@ static int do_mlock(unsigned long start, size_t len, int on) struct vm_area_struct * vma, * prev; int error; - len = PAGE_ALIGN(len); + VM_BUG_ON(start & ~PAGE_MASK); + VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; @@ -482,6 +423,62 @@ static int do_mlock(unsigned long start, size_t len, int on) return error; } +static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors) +{ + struct mm_struct *mm = current->mm; + unsigned long end, nstart, nend; + struct vm_area_struct *vma = NULL; + int locked = 0; + int ret = 0; + + VM_BUG_ON(start & ~PAGE_MASK); + VM_BUG_ON(len != PAGE_ALIGN(len)); + end = start + len; + + for (nstart = start; nstart < end; nstart = nend) { + /* + * We want to fault in pages for [nstart; end) address range. + * Find first corresponding VMA. + */ + if (!locked) { + locked = 1; + down_read(&mm->mmap_sem); + vma = find_vma(mm, nstart); + } else if (nstart >= vma->vm_end) + vma = vma->vm_next; + if (!vma || vma->vm_start >= end) + break; + /* + * Set [nstart; nend) to intersection of desired address + * range with the first VMA. Also, skip undesirable VMA types. + */ + nend = min(end, vma->vm_end); + if (vma->vm_flags & (VM_IO | VM_PFNMAP)) + continue; + if (nstart < vma->vm_start) + nstart = vma->vm_start; + /* + * Now fault in a range of pages. __mlock_vma_pages_range() + * double checks the vma flags, so that it won't mlock pages + * if the vma was already munlocked. + */ + ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); + if (ret < 0) { + if (ignore_errors) { + ret = 0; + continue; /* continue at next VMA */ + } + ret = __mlock_posix_error_return(ret); + break; + } + nend = nstart + ret * PAGE_SIZE; + ret = 0; + } + if (locked) + up_read(&mm->mmap_sem); + return ret; /* 0 or negative error code */ +} + SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { unsigned long locked; @@ -507,6 +504,8 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); up_write(¤t->mm->mmap_sem); + if (!error) + error = do_mlock_pages(start, len, 0); return error; } @@ -571,6 +570,10 @@ SYSCALL_DEFINE1(mlockall, int, flags) capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); up_write(¤t->mm->mmap_sem); + if (!ret && (flags & MCL_CURRENT)) { + /* Ignore errors */ + do_mlock_pages(0, TASK_SIZE, 1); + } out: return ret; } diff --git a/mm/mmap.c b/mm/mmap.c index 50a4aa0255a0..2ec8eb5a9cdd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -253,7 +254,15 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) down_write(&mm->mmap_sem); #ifdef CONFIG_COMPAT_BRK - min_brk = mm->end_code; + /* + * CONFIG_COMPAT_BRK can still be overridden by setting + * randomize_va_space to 2, which will still cause mm->start_brk + * to be arbitrarily shifted + */ + if (mm->start_brk > PAGE_ALIGN(mm->end_data)) + min_brk = mm->start_brk; + else + min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif @@ -588,6 +597,8 @@ again: remove_next = 1 + (end > next->vm_end); } } + vma_adjust_trans_huge(vma, start, end, adjust_next); + /* * When changing only vma->vm_end, we don't really need anon_vma * lock. This is a fairly rare case by itself, but the anon_vma @@ -815,6 +826,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, end, prev->vm_pgoff, NULL); if (err) return NULL; + khugepaged_enter_vma_merge(prev); return prev; } @@ -833,6 +845,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, next->vm_pgoff - pglen, NULL); if (err) return NULL; + khugepaged_enter_vma_merge(area); return area; } @@ -1761,6 +1774,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } } vma_unlock_anon_vma(vma); + khugepaged_enter_vma_merge(vma); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ @@ -1808,6 +1822,7 @@ static int expand_downwards(struct vm_area_struct *vma, } } vma_unlock_anon_vma(vma); + khugepaged_enter_vma_merge(vma); return error; } diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 438951d366f2..8d032de4088e 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -100,6 +100,26 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, return young; } +int __mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + struct mmu_notifier *mn; + struct hlist_node *n; + int young = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->test_young) { + young = mn->ops->test_young(mn, mm, address); + if (young) + break; + } + } + rcu_read_unlock(); + + return young; +} + void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) { diff --git a/mm/mmzone.c b/mm/mmzone.c index e35bfb82c855..f5b7d1760213 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -87,24 +87,3 @@ int memmap_valid_within(unsigned long pfn, return 1; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ - -#ifdef CONFIG_SMP -/* Called when a more accurate view of NR_FREE_PAGES is needed */ -unsigned long zone_nr_free_pages(struct zone *zone) -{ - unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES); - - /* - * While kswapd is awake, it is considered the zone is under some - * memory pressure. Under pressure, there is a risk that - * per-cpu-counter-drift will allow the min watermark to be breached - * potentially causing a live-lock. While kswapd is awake and - * free pages are low, get a better estimate for free pages - */ - if (nr_free_pages < zone->percpu_drift_mark && - !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) - return zone_page_state_snapshot(zone, NR_FREE_PAGES); - - return nr_free_pages; -} -#endif /* CONFIG_SMP */ diff --git a/mm/mprotect.c b/mm/mprotect.c index 4c5133873097..5a688a2756be 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -78,7 +78,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, pte_unmap_unlock(pte - 1, ptl); } -static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, +static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) { @@ -88,13 +88,21 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); + if (pmd_trans_huge(*pmd)) { + if (next - addr != HPAGE_PMD_SIZE) + split_huge_page_pmd(vma->vm_mm, pmd); + else if (change_huge_pmd(vma, pmd, addr, newprot)) + continue; + /* fall through */ + } if (pmd_none_or_clear_bad(pmd)) continue; - change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); + change_pte_range(vma->vm_mm, pmd, addr, next, newprot, + dirty_accountable); } while (pmd++, addr = next, addr != end); } -static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, +static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) { @@ -106,7 +114,8 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable); + change_pmd_range(vma, pud, addr, next, newprot, + dirty_accountable); } while (pud++, addr = next, addr != end); } @@ -126,7 +135,8 @@ static void change_protection(struct vm_area_struct *vma, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable); + change_pud_range(vma, pgd, addr, next, newprot, + dirty_accountable); } while (pgd++, addr = next, addr != end); flush_tlb_range(vma, start, end); } diff --git a/mm/mremap.c b/mm/mremap.c index 563fbdd6293a..9925b6391b80 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -41,13 +41,15 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) return NULL; pmd = pmd_offset(pud, addr); + split_huge_page_pmd(mm, pmd); if (pmd_none_or_clear_bad(pmd)) return NULL; return pmd; } -static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) +static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr) { pgd_t *pgd; pud_t *pud; @@ -62,7 +64,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) if (!pmd) return NULL; - if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) + VM_BUG_ON(pmd_trans_huge(*pmd)); + if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) return NULL; return pmd; @@ -147,7 +150,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; - new_pmd = alloc_new_pmd(vma->vm_mm, new_addr); + new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; next = (new_addr + PMD_SIZE) & PMD_MASK; diff --git a/mm/nommu.c b/mm/nommu.c index ef4045d010d5..f59e1424d3db 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -127,7 +127,8 @@ unsigned int kobjsize(const void *objp) int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int foll_flags, - struct page **pages, struct vm_area_struct **vmas) + struct page **pages, struct vm_area_struct **vmas, + int *retry) { struct vm_area_struct *vma; unsigned long vm_flags; @@ -185,7 +186,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (force) flags |= FOLL_FORCE; - return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); + return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, + NULL); } EXPORT_SYMBOL(get_user_pages); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b4edfe7ce06c..2cb01f6ec5d0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -404,15 +404,18 @@ unsigned long determine_dirtyable_memory(void) * - vm.dirty_background_ratio or vm.dirty_background_bytes * - vm.dirty_ratio or vm.dirty_bytes * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and - * runtime tasks. + * real-time tasks. */ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) { unsigned long background; unsigned long dirty; - unsigned long available_memory = determine_dirtyable_memory(); + unsigned long uninitialized_var(available_memory); struct task_struct *tsk; + if (!vm_dirty_bytes || !dirty_background_bytes) + available_memory = determine_dirtyable_memory(); + if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); else @@ -1103,7 +1106,7 @@ EXPORT_SYMBOL(write_one_page); int __set_page_dirty_no_writeback(struct page *page) { if (!PageDirty(page)) - SetPageDirty(page); + return !TestSetPageDirty(page); return 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ff7e15872398..90c1439549fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -357,6 +357,7 @@ void prep_compound_page(struct page *page, unsigned long order) } } +/* update __split_huge_page_refcount if you change this function */ static int destroy_compound_page(struct page *page, unsigned long order) { int i; @@ -426,18 +427,10 @@ static inline void rmv_page_order(struct page *page) * * Assumption: *_mem_map is contiguous at least up to MAX_ORDER */ -static inline struct page * -__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) -{ - unsigned long buddy_idx = page_idx ^ (1 << order); - - return page + (buddy_idx - page_idx); -} - static inline unsigned long -__find_combined_index(unsigned long page_idx, unsigned int order) +__find_buddy_index(unsigned long page_idx, unsigned int order) { - return (page_idx & ~(1 << order)); + return page_idx ^ (1 << order); } /* @@ -448,8 +441,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order) * (c) a page and its buddy have the same order && * (d) a page and its buddy are in the same zone. * - * For recording whether a page is in the buddy system, we use PG_buddy. - * Setting, clearing, and testing PG_buddy is serialized by zone->lock. + * For recording whether a page is in the buddy system, we set ->_mapcount -2. + * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock. * * For recording page's order, we use page_private(page). */ @@ -482,7 +475,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, * as necessary, plus some accounting needed to play nicely with other * parts of the VM system. * At each level, we keep a list of pages, which are heads of continuous - * free pages of length of (1 << order) and marked with PG_buddy. Page's + * free pages of length of (1 << order) and marked with _mapcount -2. Page's * order is recorded in page_private(page) field. * So when we are allocating or freeing one, we can derive the state of the * other. That is, if we allocate a small block, and both were @@ -499,6 +492,7 @@ static inline void __free_one_page(struct page *page, { unsigned long page_idx; unsigned long combined_idx; + unsigned long uninitialized_var(buddy_idx); struct page *buddy; if (unlikely(PageCompound(page))) @@ -513,7 +507,8 @@ static inline void __free_one_page(struct page *page, VM_BUG_ON(bad_range(zone, page)); while (order < MAX_ORDER-1) { - buddy = __page_find_buddy(page, page_idx, order); + buddy_idx = __find_buddy_index(page_idx, order); + buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) break; @@ -521,7 +516,7 @@ static inline void __free_one_page(struct page *page, list_del(&buddy->lru); zone->free_area[order].nr_free--; rmv_page_order(buddy); - combined_idx = __find_combined_index(page_idx, order); + combined_idx = buddy_idx & page_idx; page = page + (combined_idx - page_idx); page_idx = combined_idx; order++; @@ -538,9 +533,10 @@ static inline void __free_one_page(struct page *page, */ if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { struct page *higher_page, *higher_buddy; - combined_idx = __find_combined_index(page_idx, order); - higher_page = page + combined_idx - page_idx; - higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1); + combined_idx = buddy_idx & page_idx; + higher_page = page + (combined_idx - page_idx); + buddy_idx = __find_buddy_index(combined_idx, order + 1); + higher_buddy = page + (buddy_idx - combined_idx); if (page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, &zone->free_area[order].free_list[migratetype]); @@ -651,13 +647,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order) trace_mm_page_free_direct(page, order); kmemcheck_free_shadow(page, order); - for (i = 0; i < (1 << order); i++) { - struct page *pg = page + i; - - if (PageAnon(pg)) - pg->mapping = NULL; - bad += free_pages_check(pg); - } + if (PageAnon(page)) + page->mapping = NULL; + for (i = 0; i < (1 << order); i++) + bad += free_pages_check(page + i); if (bad) return false; @@ -1460,24 +1453,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) #endif /* CONFIG_FAIL_PAGE_ALLOC */ /* - * Return 1 if free pages are above 'mark'. This takes into account the order + * Return true if free pages are above 'mark'. This takes into account the order * of the allocation. */ -int zone_watermark_ok(struct zone *z, int order, unsigned long mark, - int classzone_idx, int alloc_flags) +static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags, long free_pages) { /* free_pages my go negative - that's OK */ long min = mark; - long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; int o; + free_pages -= (1 << order) + 1; if (alloc_flags & ALLOC_HIGH) min -= min / 2; if (alloc_flags & ALLOC_HARDER) min -= min / 4; if (free_pages <= min + z->lowmem_reserve[classzone_idx]) - return 0; + return false; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ free_pages -= z->free_area[o].nr_free << o; @@ -1486,9 +1479,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, min >>= 1; if (free_pages <= min) - return 0; + return false; } - return 1; + return true; +} + +bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags) +{ + return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, + zone_page_state(z, NR_FREE_PAGES)); +} + +bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags) +{ + long free_pages = zone_page_state(z, NR_FREE_PAGES); + + if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) + free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); + + return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, + free_pages); } #ifdef CONFIG_NUMA @@ -1793,15 +1805,18 @@ static struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, - int migratetype, unsigned long *did_some_progress) + int migratetype, unsigned long *did_some_progress, + bool sync_migration) { struct page *page; if (!order || compaction_deferred(preferred_zone)) return NULL; + current->flags |= PF_MEMALLOC; *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, - nodemask); + nodemask, sync_migration); + current->flags &= ~PF_MEMALLOC; if (*did_some_progress != COMPACT_SKIPPED) { /* Page migration frees to the PCP lists but we want merging */ @@ -1837,7 +1852,8 @@ static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, - int migratetype, unsigned long *did_some_progress) + int migratetype, unsigned long *did_some_progress, + bool sync_migration) { return NULL; } @@ -1852,23 +1868,22 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, { struct page *page = NULL; struct reclaim_state reclaim_state; - struct task_struct *p = current; bool drained = false; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); - p->flags |= PF_MEMALLOC; + current->flags |= PF_MEMALLOC; lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; - p->reclaim_state = &reclaim_state; + current->reclaim_state = &reclaim_state; *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); - p->reclaim_state = NULL; + current->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); - p->flags &= ~PF_MEMALLOC; + current->flags &= ~PF_MEMALLOC; cond_resched(); @@ -1920,19 +1935,19 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, static inline void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, - enum zone_type high_zoneidx) + enum zone_type high_zoneidx, + enum zone_type classzone_idx) { struct zoneref *z; struct zone *zone; for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) - wakeup_kswapd(zone, order); + wakeup_kswapd(zone, order, classzone_idx); } static inline int gfp_to_alloc_flags(gfp_t gfp_mask) { - struct task_struct *p = current; int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; const gfp_t wait = gfp_mask & __GFP_WAIT; @@ -1948,18 +1963,23 @@ gfp_to_alloc_flags(gfp_t gfp_mask) alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); if (!wait) { - alloc_flags |= ALLOC_HARDER; + /* + * Not worth trying to allocate harder for + * __GFP_NOMEMALLOC even if it can't schedule. + */ + if (!(gfp_mask & __GFP_NOMEMALLOC)) + alloc_flags |= ALLOC_HARDER; /* * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ alloc_flags &= ~ALLOC_CPUSET; - } else if (unlikely(rt_task(p)) && !in_interrupt()) + } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { if (!in_interrupt() && - ((p->flags & PF_MEMALLOC) || + ((current->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))) alloc_flags |= ALLOC_NO_WATERMARKS; } @@ -1978,7 +1998,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int alloc_flags; unsigned long pages_reclaimed = 0; unsigned long did_some_progress; - struct task_struct *p = current; + bool sync_migration = false; /* * In the slowpath, we sanity check order to avoid ever trying to @@ -2003,7 +2023,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto nopage; restart: - wake_all_kswapd(order, zonelist, high_zoneidx); + if (!(gfp_mask & __GFP_NO_KSWAPD)) + wake_all_kswapd(order, zonelist, high_zoneidx, + zone_idx(preferred_zone)); /* * OK, we're below the kswapd watermark and have kicked background @@ -2034,21 +2056,26 @@ rebalance: goto nopage; /* Avoid recursion of direct reclaim */ - if (p->flags & PF_MEMALLOC) + if (current->flags & PF_MEMALLOC) goto nopage; /* Avoid allocations with no watermarks from looping endlessly */ if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) goto nopage; - /* Try direct compaction */ + /* + * Try direct compaction. The first pass is asynchronous. Subsequent + * attempts after direct reclaim are synchronous + */ page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, high_zoneidx, nodemask, alloc_flags, preferred_zone, - migratetype, &did_some_progress); + migratetype, &did_some_progress, + sync_migration); if (page) goto got_pg; + sync_migration = true; /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, @@ -2102,13 +2129,27 @@ rebalance: /* Wait for some write requests to complete then retry */ wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); goto rebalance; + } else { + /* + * High-order allocations do not necessarily loop after + * direct reclaim and reclaim/compaction depends on compaction + * being called after reclaim so call directly if necessary + */ + page = __alloc_pages_direct_compact(gfp_mask, order, + zonelist, high_zoneidx, + nodemask, + alloc_flags, preferred_zone, + migratetype, &did_some_progress, + sync_migration); + if (page) + goto got_pg; } nopage: if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { printk(KERN_WARNING "%s: page allocation failure." " order:%d, mode:0x%x\n", - p->comm, order, gfp_mask); + current->comm, order, gfp_mask); dump_stack(); show_mem(); } @@ -2442,7 +2483,7 @@ void show_free_areas(void) " all_unreclaimable? %s" "\n", zone->name, - K(zone_nr_free_pages(zone)), + K(zone_page_state(zone, NR_FREE_PAGES)), K(min_wmark_pages(zone)), K(low_wmark_pages(zone)), K(high_wmark_pages(zone)), @@ -2585,9 +2626,16 @@ static int __parse_numa_zonelist_order(char *s) static __init int setup_numa_zonelist_order(char *s) { - if (s) - return __parse_numa_zonelist_order(s); - return 0; + int ret; + + if (!s) + return 0; + + ret = __parse_numa_zonelist_order(s); + if (ret == 0) + strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); + + return ret; } early_param("numa_zonelist_order", setup_numa_zonelist_order); @@ -4014,7 +4062,7 @@ static void __init setup_usemap(struct pglist_data *pgdat, zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); } #else -static void inline setup_usemap(struct pglist_data *pgdat, +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) {} #endif /* CONFIG_SPARSEMEM */ @@ -5517,7 +5565,6 @@ static struct trace_print_flags pageflag_names[] = { {1UL << PG_swapcache, "swapcache" }, {1UL << PG_mappedtodisk, "mappedtodisk" }, {1UL << PG_reclaim, "reclaim" }, - {1UL << PG_buddy, "buddy" }, {1UL << PG_swapbacked, "swapbacked" }, {1UL << PG_unevictable, "unevictable" }, #ifdef CONFIG_MMU @@ -5565,7 +5612,7 @@ void dump_page(struct page *page) { printk(KERN_ALERT "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", - page, page_count(page), page_mapcount(page), + page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); dump_page_flags(page->flags); } diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 38cc58b8b2b0..7cfa6ae02303 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -34,6 +34,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); + split_huge_page_pmd(walk->mm, pmd); if (pmd_none_or_clear_bad(pmd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 7d9c1d0ebd3f..ea534960a04b 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -421,7 +421,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void) return NULL; vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, - pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL); + pcpu_nr_groups, pcpu_atom_size); if (!vms) { pcpu_free_chunk(chunk); return NULL; diff --git a/mm/percpu.c b/mm/percpu.c index 3dd4984bdef8..3f930018aa60 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -258,7 +258,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, /* * (Un)populated page region iterators. Iterate over (un)populated - * page regions betwen @start and @end in @chunk. @rs and @re should + * page regions between @start and @end in @chunk. @rs and @re should * be integer variables and will be set to start and end page index of * the current region. */ diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c new file mode 100644 index 000000000000..d030548047e2 --- /dev/null +++ b/mm/pgtable-generic.c @@ -0,0 +1,123 @@ +/* + * mm/pgtable-generic.c + * + * Generic pgtable methods declared in asm-generic/pgtable.h + * + * Copyright (C) 2010 Linus Torvalds + */ + +#include +#include + +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +/* + * Only sets the access flags (dirty, accessed, and + * writable). Furthermore, we know it always gets set to a "more + * permissive" setting, which allows most architectures to optimize + * this. We return whether the PTE actually changed, which in turn + * instructs the caller to do things like update__mmu_cache. This + * used to be done in the caller, but sparc needs minor faults to + * force that call on sun4c so we changed this macro slightly + */ +int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + int changed = !pte_same(*ptep, entry); + if (changed) { + set_pte_at(vma->vm_mm, address, ptep, entry); + flush_tlb_page(vma, address); + } + return changed; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + int changed = !pmd_same(*pmdp, entry); + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + if (changed) { + set_pmd_at(vma->vm_mm, address, pmdp, entry); + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + return changed; +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ + BUG(); + return 0; +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + int young; + young = ptep_test_and_clear_young(vma, address, ptep); + if (young) + flush_tlb_page(vma, address); + return young; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int young; +#ifndef CONFIG_TRANSPARENT_HUGEPAGE + BUG(); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + young = pmdp_test_and_clear_young(vma, address, pmdp); + if (young) + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + return young; +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep) +{ + pte_t pte; + pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); + flush_tlb_page(vma, address); + return pte; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH +pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd; +#ifndef CONFIG_TRANSPARENT_HUGEPAGE + BUG(); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + return pmd; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH +pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pmd_t pmd = pmd_mksplitting(*pmdp); + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + /* tlb flush only to serialize against gup-fast */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ + BUG(); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +} +#endif diff --git a/mm/rmap.c b/mm/rmap.c index 1a8bf76bfd03..f21f4a1d6a1c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -94,7 +94,7 @@ static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, but if - * if not we either need to find an adjacent mapping that we + * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. @@ -177,6 +177,10 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_lock(anon_vma); + /* + * It's critical to add new vmas to the tail of the anon_vma, + * see comment in huge_memory.c:__split_huge_page(). + */ list_add_tail(&avc->same_anon_vma, &anon_vma->head); anon_vma_unlock(anon_vma); } @@ -360,7 +364,7 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma) * Returns virtual address or -EFAULT if page's index/offset is not * within the range mapped the @vma. */ -static inline unsigned long +inline unsigned long vma_address(struct page *page, struct vm_area_struct *vma) { pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -435,6 +439,8 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return NULL; + if (pmd_trans_huge(*pmd)) + return NULL; pte = pte_offset_map(pmd, address); /* Make a quick check before getting the lock */ @@ -489,35 +495,17 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; - pte_t *pte; - spinlock_t *ptl; int referenced = 0; - pte = page_check_address(page, mm, address, &ptl, 0); - if (!pte) - goto out; - /* * Don't want to elevate referenced for mlocked page that gets this far, * in order that it progresses to try_to_unmap and is moved to the * unevictable list. */ if (vma->vm_flags & VM_LOCKED) { - *mapcount = 1; /* break early from loop */ + *mapcount = 0; /* break early from loop */ *vm_flags |= VM_LOCKED; - goto out_unmap; - } - - if (ptep_clear_flush_young_notify(vma, address, pte)) { - /* - * Don't treat a reference through a sequentially read - * mapping as such. If the page has been used in - * another mapping, we will catch it; if this other - * mapping is already gone, the unmap path will have - * set PG_referenced or activated the page. - */ - if (likely(!VM_SequentialReadHint(vma))) - referenced++; + goto out; } /* Pretend the page is referenced if the task has the @@ -526,9 +514,39 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, rwsem_is_locked(&mm->mmap_sem)) referenced++; -out_unmap: + if (unlikely(PageTransHuge(page))) { + pmd_t *pmd; + + spin_lock(&mm->page_table_lock); + pmd = page_check_address_pmd(page, mm, address, + PAGE_CHECK_ADDRESS_PMD_FLAG); + if (pmd && !pmd_trans_splitting(*pmd) && + pmdp_clear_flush_young_notify(vma, address, pmd)) + referenced++; + spin_unlock(&mm->page_table_lock); + } else { + pte_t *pte; + spinlock_t *ptl; + + pte = page_check_address(page, mm, address, &ptl, 0); + if (!pte) + goto out; + + if (ptep_clear_flush_young_notify(vma, address, pte)) { + /* + * Don't treat a reference through a sequentially read + * mapping as such. If the page has been used in + * another mapping, we will catch it; if this other + * mapping is already gone, the unmap path will have + * set PG_referenced or activated the page. + */ + if (likely(!VM_SequentialReadHint(vma))) + referenced++; + } + pte_unmap_unlock(pte, ptl); + } + (*mapcount)--; - pte_unmap_unlock(pte, ptl); if (referenced) *vm_flags |= vma->vm_flags; @@ -864,8 +882,13 @@ void do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { int first = atomic_inc_and_test(&page->_mapcount); - if (first) - __inc_zone_page_state(page, NR_ANON_PAGES); + if (first) { + if (!PageTransHuge(page)) + __inc_zone_page_state(page, NR_ANON_PAGES); + else + __inc_zone_page_state(page, + NR_ANON_TRANSPARENT_HUGEPAGES); + } if (unlikely(PageKsm(page))) return; @@ -893,7 +916,10 @@ void page_add_new_anon_rmap(struct page *page, VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ - __inc_zone_page_state(page, NR_ANON_PAGES); + if (!PageTransHuge(page)) + __inc_zone_page_state(page, NR_ANON_PAGES); + else + __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); __page_set_anon_rmap(page, vma, address, 1); if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE_ANON); @@ -911,7 +937,7 @@ void page_add_file_rmap(struct page *page) { if (atomic_inc_and_test(&page->_mapcount)) { __inc_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_update_file_mapped(page, 1); + mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); } } @@ -946,10 +972,14 @@ void page_remove_rmap(struct page *page) return; if (PageAnon(page)) { mem_cgroup_uncharge_page(page); - __dec_zone_page_state(page, NR_ANON_PAGES); + if (!PageTransHuge(page)) + __dec_zone_page_state(page, NR_ANON_PAGES); + else + __dec_zone_page_state(page, + NR_ANON_TRANSPARENT_HUGEPAGES); } else { __dec_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_update_file_mapped(page, -1); + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); } /* * It would be tidy to reset the PageAnon mapping here, @@ -1202,7 +1232,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, return ret; } -static bool is_vma_temporary_stack(struct vm_area_struct *vma) +bool is_vma_temporary_stack(struct vm_area_struct *vma) { int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); @@ -1400,6 +1430,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) int ret; BUG_ON(!PageLocked(page)); + VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); if (unlikely(PageKsm(page))) ret = try_to_unmap_ksm(page, flags); diff --git a/mm/slub.c b/mm/slub.c index 008cd743a36a..c7ef0070dd86 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3636,7 +3636,7 @@ static int list_locations(struct kmem_cache *s, char *buf, len += sprintf(buf + len, "%7ld ", l->count); if (l->addr) - len += sprint_symbol(buf + len, (unsigned long)l->addr); + len += sprintf(buf + len, "%pS", (void *)l->addr); else len += sprintf(buf + len, ""); @@ -3946,12 +3946,9 @@ SLAB_ATTR(min_partial); static ssize_t ctor_show(struct kmem_cache *s, char *buf) { - if (s->ctor) { - int n = sprint_symbol(buf, (unsigned long)s->ctor); - - return n + sprintf(buf + n, "\n"); - } - return 0; + if (!s->ctor) + return 0; + return sprintf(buf, "%pS\n", s->ctor); } SLAB_ATTR_RO(ctor); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 29d6cbffb283..64b984091edb 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -9,7 +9,7 @@ * * However, virtual mappings need a page table and TLBs. Many Linux * architectures already map their physical space using 1-1 mappings - * via TLBs. For those arches the virtual memmory map is essentially + * via TLBs. For those arches the virtual memory map is essentially * for free if we use the same page size as the 1-1 mappings. In that * case the overhead consists of a few additional pages that are * allocated to create a view of memory for vmemmap. diff --git a/mm/sparse.c b/mm/sparse.c index 95ac219af379..93250207c5cf 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -671,10 +671,10 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) static void free_map_bootmem(struct page *page, unsigned long nr_pages) { unsigned long maps_section_nr, removing_section_nr, i; - int magic; + unsigned long magic; for (i = 0; i < nr_pages; i++, page++) { - magic = atomic_read(&page->_mapcount); + magic = (unsigned long) page->lru.next; BUG_ON(magic == NODE_INFO); diff --git a/mm/swap.c b/mm/swap.c index 3f4854205b16..bbc1ce9f9460 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -56,17 +56,97 @@ static void __page_cache_release(struct page *page) del_page_from_lru(zone, page); spin_unlock_irqrestore(&zone->lru_lock, flags); } +} + +static void __put_single_page(struct page *page) +{ + __page_cache_release(page); free_hot_cold_page(page, 0); } -static void put_compound_page(struct page *page) +static void __put_compound_page(struct page *page) { - page = compound_head(page); - if (put_page_testzero(page)) { - compound_page_dtor *dtor; + compound_page_dtor *dtor; - dtor = get_compound_page_dtor(page); - (*dtor)(page); + __page_cache_release(page); + dtor = get_compound_page_dtor(page); + (*dtor)(page); +} + +static void put_compound_page(struct page *page) +{ + if (unlikely(PageTail(page))) { + /* __split_huge_page_refcount can run under us */ + struct page *page_head = page->first_page; + smp_rmb(); + /* + * If PageTail is still set after smp_rmb() we can be sure + * that the page->first_page we read wasn't a dangling pointer. + * See __split_huge_page_refcount() smp_wmb(). + */ + if (likely(PageTail(page) && get_page_unless_zero(page_head))) { + unsigned long flags; + /* + * Verify that our page_head wasn't converted + * to a a regular page before we got a + * reference on it. + */ + if (unlikely(!PageHead(page_head))) { + /* PageHead is cleared after PageTail */ + smp_rmb(); + VM_BUG_ON(PageTail(page)); + goto out_put_head; + } + /* + * Only run compound_lock on a valid PageHead, + * after having it pinned with + * get_page_unless_zero() above. + */ + smp_mb(); + /* page_head wasn't a dangling pointer */ + flags = compound_lock_irqsave(page_head); + if (unlikely(!PageTail(page))) { + /* __split_huge_page_refcount run before us */ + compound_unlock_irqrestore(page_head, flags); + VM_BUG_ON(PageHead(page_head)); + out_put_head: + if (put_page_testzero(page_head)) + __put_single_page(page_head); + out_put_single: + if (put_page_testzero(page)) + __put_single_page(page); + return; + } + VM_BUG_ON(page_head != page->first_page); + /* + * We can release the refcount taken by + * get_page_unless_zero now that + * split_huge_page_refcount is blocked on the + * compound_lock. + */ + if (put_page_testzero(page_head)) + VM_BUG_ON(1); + /* __split_huge_page_refcount will wait now */ + VM_BUG_ON(atomic_read(&page->_count) <= 0); + atomic_dec(&page->_count); + VM_BUG_ON(atomic_read(&page_head->_count) <= 0); + compound_unlock_irqrestore(page_head, flags); + if (put_page_testzero(page_head)) { + if (PageHead(page_head)) + __put_compound_page(page_head); + else + __put_single_page(page_head); + } + } else { + /* page_head is a dangling pointer */ + VM_BUG_ON(PageTail(page)); + goto out_put_single; + } + } else if (put_page_testzero(page)) { + if (PageHead(page)) + __put_compound_page(page); + else + __put_single_page(page); } } @@ -75,7 +155,7 @@ void put_page(struct page *page) if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) - __page_cache_release(page); + __put_single_page(page); } EXPORT_SYMBOL(put_page); @@ -98,15 +178,13 @@ void put_pages_list(struct list_head *pages) } EXPORT_SYMBOL(put_pages_list); -/* - * pagevec_move_tail() must be called with IRQ disabled. - * Otherwise this may cause nasty races. - */ -static void pagevec_move_tail(struct pagevec *pvec) +static void pagevec_lru_move_fn(struct pagevec *pvec, + void (*move_fn)(struct page *page, void *arg), + void *arg) { int i; - int pgmoved = 0; struct zone *zone = NULL; + unsigned long flags = 0; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; @@ -114,29 +192,49 @@ static void pagevec_move_tail(struct pagevec *pvec) if (pagezone != zone) { if (zone) - spin_unlock(&zone->lru_lock); + spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; - spin_lock(&zone->lru_lock); - } - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - int lru = page_lru_base_type(page); - list_move_tail(&page->lru, &zone->lru[lru].list); - pgmoved++; + spin_lock_irqsave(&zone->lru_lock, flags); } + + (*move_fn)(page, arg); } if (zone) - spin_unlock(&zone->lru_lock); - __count_vm_events(PGROTATED, pgmoved); - release_pages(pvec->pages, pvec->nr, pvec->cold); + spin_unlock_irqrestore(&zone->lru_lock, flags); + release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } +static void pagevec_move_tail_fn(struct page *page, void *arg) +{ + int *pgmoved = arg; + struct zone *zone = page_zone(page); + + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { + int lru = page_lru_base_type(page); + list_move_tail(&page->lru, &zone->lru[lru].list); + (*pgmoved)++; + } +} + +/* + * pagevec_move_tail() must be called with IRQ disabled. + * Otherwise this may cause nasty races. + */ +static void pagevec_move_tail(struct pagevec *pvec) +{ + int pgmoved = 0; + + pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); + __count_vm_events(PGROTATED, pgmoved); +} + /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ -void rotate_reclaimable_page(struct page *page) +void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && !PageUnevictable(page) && PageLRU(page)) { @@ -173,27 +271,94 @@ static void update_page_reclaim_stat(struct zone *zone, struct page *page, } /* - * FIXME: speed this up? + * A page will go to active list either by activate_page or putback_lru_page. + * In the activate_page case, the page hasn't active bit set. The page might + * not in LRU list because it's isolated before it gets a chance to be moved to + * active list. The window is small because pagevec just stores several pages. + * For such case, we do nothing for such page. + * In the putback_lru_page case, the page isn't in lru list but has active + * bit set */ -void activate_page(struct page *page) +static void __activate_page(struct page *page, void *arg) { struct zone *zone = page_zone(page); + int file = page_is_file_cache(page); + int lru = page_lru_base_type(page); + bool putback = !PageLRU(page); - spin_lock_irq(&zone->lru_lock); - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - int file = page_is_file_cache(page); - int lru = page_lru_base_type(page); + /* The page is isolated before it's moved to active list */ + if (!PageLRU(page) && !PageActive(page)) + return; + if ((PageLRU(page) && PageActive(page)) || PageUnevictable(page)) + return; + + if (!putback) del_page_from_lru_list(zone, page, lru); + else + SetPageLRU(page); - SetPageActive(page); - lru += LRU_ACTIVE; - add_page_to_lru_list(zone, page, lru); - __count_vm_event(PGACTIVATE); + SetPageActive(page); + lru += LRU_ACTIVE; + add_page_to_lru_list(zone, page, lru); - update_page_reclaim_stat(zone, page, file, 1); + if (putback) + return; + __count_vm_event(PGACTIVATE); + update_page_reclaim_stat(zone, page, file, 1); +} + +#ifdef CONFIG_SMP +static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); + +static void activate_page_drain(int cpu) +{ + struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); + + if (pagevec_count(pvec)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +} + +void activate_page(struct page *page) +{ + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { + struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + + page_cache_get(page); + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); + put_cpu_var(activate_page_pvecs); + } +} + +/* Caller should hold zone->lru_lock */ +int putback_active_lru_page(struct zone *zone, struct page *page) +{ + struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + + if (!pagevec_add(pvec, page)) { + spin_unlock_irq(&zone->lru_lock); + pagevec_lru_move_fn(pvec, __activate_page, NULL); + spin_lock_irq(&zone->lru_lock); } + put_cpu_var(activate_page_pvecs); + return 1; +} + +#else +static inline void activate_page_drain(int cpu) +{ +} + +void activate_page(struct page *page) +{ + struct zone *zone = page_zone(page); + + spin_lock_irq(&zone->lru_lock); + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) + __activate_page(page, NULL); spin_unlock_irq(&zone->lru_lock); } +#endif /* * Mark a page as having seen activity. @@ -292,6 +457,7 @@ static void drain_cpu_pagevecs(int cpu) pagevec_move_tail(pvec); local_irq_restore(flags); } + activate_page_drain(cpu); } void lru_add_drain(void) @@ -399,44 +565,70 @@ void __pagevec_release(struct pagevec *pvec) EXPORT_SYMBOL(__pagevec_release); +/* used by __split_huge_page_refcount() */ +void lru_add_page_tail(struct zone* zone, + struct page *page, struct page *page_tail) +{ + int active; + enum lru_list lru; + const int file = 0; + struct list_head *head; + + VM_BUG_ON(!PageHead(page)); + VM_BUG_ON(PageCompound(page_tail)); + VM_BUG_ON(PageLRU(page_tail)); + VM_BUG_ON(!spin_is_locked(&zone->lru_lock)); + + SetPageLRU(page_tail); + + if (page_evictable(page_tail, NULL)) { + if (PageActive(page)) { + SetPageActive(page_tail); + active = 1; + lru = LRU_ACTIVE_ANON; + } else { + active = 0; + lru = LRU_INACTIVE_ANON; + } + update_page_reclaim_stat(zone, page_tail, file, active); + if (likely(PageLRU(page))) + head = page->lru.prev; + else + head = &zone->lru[lru].list; + __add_page_to_lru_list(zone, page_tail, lru, head); + } else { + SetPageUnevictable(page_tail); + add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); + } +} + +static void ____pagevec_lru_add_fn(struct page *page, void *arg) +{ + enum lru_list lru = (enum lru_list)arg; + struct zone *zone = page_zone(page); + int file = is_file_lru(lru); + int active = is_active_lru(lru); + + VM_BUG_ON(PageActive(page)); + VM_BUG_ON(PageUnevictable(page)); + VM_BUG_ON(PageLRU(page)); + + SetPageLRU(page); + if (active) + SetPageActive(page); + update_page_reclaim_stat(zone, page, file, active); + add_page_to_lru_list(zone, page, lru); +} + /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { - int i; - struct zone *zone = NULL; - VM_BUG_ON(is_unevictable_lru(lru)); - for (i = 0; i < pagevec_count(pvec); i++) { - struct page *page = pvec->pages[i]; - struct zone *pagezone = page_zone(page); - int file; - int active; - - if (pagezone != zone) { - if (zone) - spin_unlock_irq(&zone->lru_lock); - zone = pagezone; - spin_lock_irq(&zone->lru_lock); - } - VM_BUG_ON(PageActive(page)); - VM_BUG_ON(PageUnevictable(page)); - VM_BUG_ON(PageLRU(page)); - SetPageLRU(page); - active = is_active_lru(lru); - file = is_file_lru(lru); - if (active) - SetPageActive(page); - update_page_reclaim_stat(zone, page, file, active); - add_page_to_lru_list(zone, page, lru); - } - if (zone) - spin_unlock_irq(&zone->lru_lock); - release_pages(pvec->pages, pvec->nr, pvec->cold); - pagevec_reinit(pvec); + pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); } EXPORT_SYMBOL(____pagevec_lru_add); diff --git a/mm/swap_state.c b/mm/swap_state.c index e10f5833167f..5c8cfabbc9bc 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -157,6 +157,12 @@ int add_to_swap(struct page *page) if (!entry.val) return 0; + if (unlikely(PageTransHuge(page))) + if (unlikely(split_huge_page(page))) { + swapcache_free(entry, NULL); + return 0; + } + /* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC diff --git a/mm/swapfile.c b/mm/swapfile.c index 67ddaaf98c74..07a458d72fa8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -964,6 +964,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); + if (unlikely(pmd_trans_huge(*pmd))) + continue; if (pmd_none_or_clear_bad(pmd)) continue; ret = unuse_pte_range(vma, pmd, addr, next, entry, page); @@ -1677,7 +1679,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode); set_blocksize(bdev, p->old_block_size); - bd_release(bdev); + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); } else { mutex_lock(&inode->i_mutex); inode->i_flags &= ~S_SWAPFILE; @@ -1939,7 +1941,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = -EINVAL; if (S_ISBLK(inode->i_mode)) { bdev = I_BDEV(inode); - error = bd_claim(bdev, sys_swapon); + error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, + sys_swapon); if (error < 0) { bdev = NULL; error = -EINVAL; @@ -2136,7 +2139,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) bad_swap: if (bdev) { set_blocksize(bdev, p->old_block_size); - bd_release(bdev); + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); } destroy_swap_extents(p); swap_cgroup_swapoff(type); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index eb5cc7d00c5a..f9b166732e70 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -748,7 +748,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask); - if (unlikely(IS_ERR(va))) { + if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); } @@ -1175,6 +1175,7 @@ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { vunmap_page_range(addr, addr + size); } +EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); /** * unmap_kernel_range - unmap kernel VM area and flush cache and TLB @@ -1315,13 +1316,6 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, -1, GFP_KERNEL, caller); } -struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, - int node, gfp_t gfp_mask) -{ - return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, - node, gfp_mask, __builtin_return_address(0)); -} - static struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; @@ -1537,25 +1531,12 @@ fail: return NULL; } -void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) -{ - void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, - __builtin_return_address(0)); - - /* - * A ref_count = 3 is needed because the vm_struct and vmap_area - * structures allocated in the __get_vm_area_node() function contain - * references to the virtual address of the vmalloc'ed block. - */ - kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); - - return addr; -} - /** - * __vmalloc_node - allocate virtually contiguous memory + * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment + * @start: vm area range start + * @end: vm area range end * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or -1 @@ -1565,9 +1546,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ -static void *__vmalloc_node(unsigned long size, unsigned long align, - gfp_t gfp_mask, pgprot_t prot, - int node, void *caller) +void *__vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, int node, void *caller) { struct vm_struct *area; void *addr; @@ -1577,8 +1558,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, if (!size || (size >> PAGE_SHIFT) > totalram_pages) return NULL; - area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, - VMALLOC_END, node, gfp_mask, caller); + area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node, + gfp_mask, caller); if (!area) return NULL; @@ -1595,6 +1576,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, return addr; } +/** + * __vmalloc_node - allocate virtually contiguous memory + * @size: allocation size + * @align: desired alignment + * @gfp_mask: flags for the page level allocator + * @prot: protection mask for the allocated pages + * @node: node to use for allocation or -1 + * @caller: caller's return address + * + * Allocate enough pages to cover @size from the page level + * allocator with @gfp_mask flags. Map them into contiguous + * kernel virtual space, using a pagetable protection of @prot. + */ +static void *__vmalloc_node(unsigned long size, unsigned long align, + gfp_t gfp_mask, pgprot_t prot, + int node, void *caller) +{ + return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, + gfp_mask, prot, node, caller); +} + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { return __vmalloc_node(size, 1, gfp_mask, prot, -1, @@ -2203,17 +2205,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, * @sizes: array containing size of each area * @nr_vms: the number of areas to allocate * @align: alignment, all entries in @offsets and @sizes must be aligned to this - * @gfp_mask: allocation mask * * Returns: kmalloc'd vm_struct pointer array pointing to allocated * vm_structs on success, %NULL on failure * * Percpu allocator wants to use congruent vm areas so that it can * maintain the offsets among percpu areas. This function allocates - * congruent vmalloc areas for it. These areas tend to be scattered - * pretty far, distance between two areas easily going up to - * gigabytes. To avoid interacting with regular vmallocs, these areas - * are allocated from top. + * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to + * be scattered pretty far, distance between two areas easily going up + * to gigabytes. To avoid interacting with regular vmallocs, these + * areas are allocated from top. * * Despite its complicated look, this allocator is rather simple. It * does everything top-down and scans areas from the end looking for @@ -2224,7 +2225,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, - size_t align, gfp_t gfp_mask) + size_t align) { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); @@ -2234,8 +2235,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, unsigned long base, start, end, last_end; bool purged = false; - gfp_mask &= GFP_RECLAIM_MASK; - /* verify parameters and allocate data structures */ BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); for (last_area = 0, area = 0; area < nr_vms; area++) { @@ -2268,14 +2267,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, return NULL; } - vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); - vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); + vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL); + vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL); if (!vas || !vms) goto err_free; for (area = 0; area < nr_vms; area++) { - vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); - vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); + vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); + vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } @@ -2456,13 +2455,8 @@ static int s_show(struct seq_file *m, void *p) seq_printf(m, "0x%p-0x%p %7ld", v->addr, v->addr + v->size, v->size); - if (v->caller) { - char buff[KSYM_SYMBOL_LEN]; - - seq_putc(m, ' '); - sprint_symbol(buff, (unsigned long)v->caller); - seq_puts(m, buff); - } + if (v->caller) + seq_printf(m, " %pS", v->caller); if (v->nr_pages) seq_printf(m, " pages=%d", v->nr_pages); diff --git a/mm/vmscan.c b/mm/vmscan.c index 9ca587c69274..99999a9b2b0b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include @@ -51,11 +53,23 @@ #define CREATE_TRACE_POINTS #include -enum lumpy_mode { - LUMPY_MODE_NONE, - LUMPY_MODE_ASYNC, - LUMPY_MODE_SYNC, -}; +/* + * reclaim_mode determines how the inactive list is shrunk + * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages + * RECLAIM_MODE_ASYNC: Do not block + * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback + * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference + * page from the LRU and reclaim all pages within a + * naturally aligned range + * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of + * order-0 pages and then compact the zone + */ +typedef unsigned __bitwise__ reclaim_mode_t; +#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) +#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) +#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) +#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) +#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u) struct scan_control { /* Incremented by the number of inactive pages that were scanned */ @@ -88,7 +102,7 @@ struct scan_control { * Intend to reclaim enough continuous memory rather than reclaim * enough amount of memory. i.e, mode for high order allocation. */ - enum lumpy_mode lumpy_reclaim_mode; + reclaim_mode_t reclaim_mode; /* Which cgroup do we reclaim from */ struct mem_cgroup *mem_cgroup; @@ -271,34 +285,37 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, return ret; } -static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, +static void set_reclaim_mode(int priority, struct scan_control *sc, bool sync) { - enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; + reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC; /* - * Some reclaim have alredy been failed. No worth to try synchronous - * lumpy reclaim. + * Initially assume we are entering either lumpy reclaim or + * reclaim/compaction.Depending on the order, we will either set the + * sync mode or just reclaim order-0 pages later. */ - if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) - return; + if (COMPACTION_BUILD) + sc->reclaim_mode = RECLAIM_MODE_COMPACTION; + else + sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM; /* - * If we need a large contiguous chunk of memory, or have - * trouble getting a small set of contiguous pages, we - * will reclaim both active and inactive pages. + * Avoid using lumpy reclaim or reclaim/compaction if possible by + * restricting when its set to either costly allocations or when + * under memory pressure */ if (sc->order > PAGE_ALLOC_COSTLY_ORDER) - sc->lumpy_reclaim_mode = mode; + sc->reclaim_mode |= syncmode; else if (sc->order && priority < DEF_PRIORITY - 2) - sc->lumpy_reclaim_mode = mode; + sc->reclaim_mode |= syncmode; else - sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; + sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; } -static void disable_lumpy_reclaim_mode(struct scan_control *sc) +static void reset_reclaim_mode(struct scan_control *sc) { - sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; + sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; } static inline int is_page_cache_freeable(struct page *page) @@ -429,7 +446,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, * first attempt to free a range of pages fails. */ if (PageWriteback(page) && - sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC) + (sc->reclaim_mode & RECLAIM_MODE_SYNC)) wait_on_page_writeback(page); if (!PageWriteback(page)) { @@ -437,7 +454,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, ClearPageReclaim(page); } trace_mm_vmscan_writepage(page, - trace_reclaim_flags(page, sc->lumpy_reclaim_mode)); + trace_reclaim_flags(page, sc->reclaim_mode)); inc_zone_page_state(page, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } @@ -622,7 +639,7 @@ static enum page_references page_check_references(struct page *page, referenced_page = TestClearPageReferenced(page); /* Lumpy reclaim - ignore references */ - if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE) + if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) return PAGEREF_RECLAIM; /* @@ -739,7 +756,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * for any page for which writeback has already * started. */ - if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC && + if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && may_enter_fs) wait_on_page_writeback(page); else { @@ -895,7 +912,7 @@ cull_mlocked: try_to_free_swap(page); unlock_page(page); putback_lru_page(page); - disable_lumpy_reclaim_mode(sc); + reset_reclaim_mode(sc); continue; activate_locked: @@ -908,7 +925,7 @@ activate_locked: keep_locked: unlock_page(page); keep: - disable_lumpy_reclaim_mode(sc); + reset_reclaim_mode(sc); keep_lumpy: list_add(&page->lru, &ret_pages); VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); @@ -1028,7 +1045,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, case 0: list_move(&page->lru, dst); mem_cgroup_del_lru(page); - nr_taken++; + nr_taken += hpage_nr_pages(page); break; case -EBUSY: @@ -1086,7 +1103,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, if (__isolate_lru_page(cursor_page, mode, file) == 0) { list_move(&cursor_page->lru, dst); mem_cgroup_del_lru(cursor_page); - nr_taken++; + nr_taken += hpage_nr_pages(page); nr_lumpy_taken++; if (PageDirty(cursor_page)) nr_lumpy_dirty++; @@ -1141,14 +1158,15 @@ static unsigned long clear_active_flags(struct list_head *page_list, struct page *page; list_for_each_entry(page, page_list, lru) { + int numpages = hpage_nr_pages(page); lru = page_lru_base_type(page); if (PageActive(page)) { lru += LRU_ACTIVE; ClearPageActive(page); - nr_active++; + nr_active += numpages; } if (count) - count[lru]++; + count[lru] += numpages; } return nr_active; @@ -1253,13 +1271,16 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc, spin_lock_irq(&zone->lru_lock); continue; } - SetPageLRU(page); lru = page_lru(page); - add_page_to_lru_list(zone, page, lru); if (is_active_lru(lru)) { int file = is_file_lru(lru); - reclaim_stat->recent_rotated[file]++; + int numpages = hpage_nr_pages(page); + reclaim_stat->recent_rotated[file] += numpages; + if (putback_active_lru_page(zone, page)) + continue; } + SetPageLRU(page); + add_page_to_lru_list(zone, page, lru); if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); __pagevec_release(&pvec); @@ -1324,7 +1345,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken, return false; /* Only stall on lumpy reclaim */ - if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) + if (sc->reclaim_mode & RECLAIM_MODE_SINGLE) return false; /* If we have relaimed everything on the isolated list, no stall */ @@ -1368,15 +1389,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, return SWAP_CLUSTER_MAX; } - set_lumpy_reclaim_mode(priority, sc, false); + set_reclaim_mode(priority, sc, false); lru_add_drain(); spin_lock_irq(&zone->lru_lock); if (scanning_global_lru(sc)) { nr_taken = isolate_pages_global(nr_to_scan, &page_list, &nr_scanned, sc->order, - sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? - ISOLATE_INACTIVE : ISOLATE_BOTH, + sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ? + ISOLATE_BOTH : ISOLATE_INACTIVE, zone, 0, file); zone->pages_scanned += nr_scanned; if (current_is_kswapd()) @@ -1388,8 +1409,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, } else { nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list, &nr_scanned, sc->order, - sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? - ISOLATE_INACTIVE : ISOLATE_BOTH, + sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ? + ISOLATE_BOTH : ISOLATE_INACTIVE, zone, sc->mem_cgroup, 0, file); /* @@ -1411,7 +1432,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, /* Check if we should syncronously wait for writeback */ if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { - set_lumpy_reclaim_mode(priority, sc, true); + set_reclaim_mode(priority, sc, true); nr_reclaimed += shrink_page_list(&page_list, zone, sc); } @@ -1426,7 +1447,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, zone_idx(zone), nr_scanned, nr_reclaimed, priority, - trace_shrink_flags(file, sc->lumpy_reclaim_mode)); + trace_shrink_flags(file, sc->reclaim_mode)); return nr_reclaimed; } @@ -1466,7 +1487,7 @@ static void move_active_pages_to_lru(struct zone *zone, list_move(&page->lru, &zone->lru[lru].list); mem_cgroup_add_lru_list(page, lru); - pgmoved++; + pgmoved += hpage_nr_pages(page); if (!pagevec_add(&pvec, page) || list_empty(list)) { spin_unlock_irq(&zone->lru_lock); @@ -1534,7 +1555,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, } if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { - nr_rotated++; + nr_rotated += hpage_nr_pages(page); /* * Identify referenced, file-backed active pages and * give them one more trip around the active list. So @@ -1804,6 +1825,57 @@ out: } } +/* + * Reclaim/compaction depends on a number of pages being freed. To avoid + * disruption to the system, a small number of order-0 pages continue to be + * rotated and reclaimed in the normal fashion. However, by the time we get + * back to the allocator and call try_to_compact_zone(), we ensure that + * there are enough free pages for it to be likely successful + */ +static inline bool should_continue_reclaim(struct zone *zone, + unsigned long nr_reclaimed, + unsigned long nr_scanned, + struct scan_control *sc) +{ + unsigned long pages_for_compaction; + unsigned long inactive_lru_pages; + + /* If not in reclaim/compaction mode, stop */ + if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) + return false; + + /* + * If we failed to reclaim and have scanned the full list, stop. + * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far + * faster but obviously would be less likely to succeed + * allocation. If this is desirable, use GFP_REPEAT to decide + * if both reclaimed and scanned should be checked or just + * reclaimed + */ + if (!nr_reclaimed && !nr_scanned) + return false; + + /* + * If we have not reclaimed enough pages for compaction and the + * inactive lists are large enough, continue reclaiming + */ + pages_for_compaction = (2UL << sc->order); + inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) + + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); + if (sc->nr_reclaimed < pages_for_compaction && + inactive_lru_pages > pages_for_compaction) + return true; + + /* If compaction would go ahead or the allocation would succeed, stop */ + switch (compaction_suitable(zone, sc->order)) { + case COMPACT_PARTIAL: + case COMPACT_CONTINUE: + return false; + default: + return true; + } +} + /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ @@ -1813,9 +1885,12 @@ static void shrink_zone(int priority, struct zone *zone, unsigned long nr[NR_LRU_LISTS]; unsigned long nr_to_scan; enum lru_list l; - unsigned long nr_reclaimed = sc->nr_reclaimed; + unsigned long nr_reclaimed; unsigned long nr_to_reclaim = sc->nr_to_reclaim; + unsigned long nr_scanned = sc->nr_scanned; +restart: + nr_reclaimed = 0; get_scan_count(zone, sc, nr, priority); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || @@ -1841,8 +1916,7 @@ static void shrink_zone(int priority, struct zone *zone, if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) break; } - - sc->nr_reclaimed = nr_reclaimed; + sc->nr_reclaimed += nr_reclaimed; /* * Even if we did not try to evict anon pages at all, we want to @@ -1851,6 +1925,11 @@ static void shrink_zone(int priority, struct zone *zone, if (inactive_anon_is_low(zone, sc)) shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); + /* reclaim/compaction might need reclaim to continue */ + if (should_continue_reclaim(zone, nr_reclaimed, + sc->nr_scanned - nr_scanned, sc)) + goto restart; + throttle_vm_writeout(sc->gfp_mask); } @@ -2124,38 +2203,87 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, } #endif +/* + * pgdat_balanced is used when checking if a node is balanced for high-order + * allocations. Only zones that meet watermarks and are in a zone allowed + * by the callers classzone_idx are added to balanced_pages. The total of + * balanced pages must be at least 25% of the zones allowed by classzone_idx + * for the node to be considered balanced. Forcing all zones to be balanced + * for high orders can cause excessive reclaim when there are imbalanced zones. + * The choice of 25% is due to + * o a 16M DMA zone that is balanced will not balance a zone on any + * reasonable sized machine + * o On all other machines, the top zone must be at least a reasonable + * precentage of the middle zones. For example, on 32-bit x86, highmem + * would need to be at least 256M for it to be balance a whole node. + * Similarly, on x86-64 the Normal zone would need to be at least 1G + * to balance a node on its own. These seemed like reasonable ratios. + */ +static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, + int classzone_idx) +{ + unsigned long present_pages = 0; + int i; + + for (i = 0; i <= classzone_idx; i++) + present_pages += pgdat->node_zones[i].present_pages; + + return balanced_pages > (present_pages >> 2); +} + /* is kswapd sleeping prematurely? */ -static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) +static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, + int classzone_idx) { int i; + unsigned long balanced = 0; + bool all_zones_ok = true; /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ if (remaining) - return 1; + return true; - /* If after HZ/10, a zone is below the high mark, it's premature */ + /* Check the watermark levels */ for (i = 0; i < pgdat->nr_zones; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; - if (zone->all_unreclaimable) + /* + * balance_pgdat() skips over all_unreclaimable after + * DEF_PRIORITY. Effectively, it considers them balanced so + * they must be considered balanced here as well if kswapd + * is to sleep + */ + if (zone->all_unreclaimable) { + balanced += zone->present_pages; continue; + } - if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), - 0, 0)) - return 1; + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), + classzone_idx, 0)) + all_zones_ok = false; + else + balanced += zone->present_pages; } - return 0; + /* + * For high-order requests, the balanced zones must contain at least + * 25% of the nodes pages for kswapd to sleep. For order-0, all zones + * must be balanced + */ + if (order) + return pgdat_balanced(pgdat, balanced, classzone_idx); + else + return !all_zones_ok; } /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at high_wmark_pages(zone). * - * Returns the number of pages which were actually freed. + * Returns the final order kswapd was reclaiming at * * There is special handling here for zones which are full of pinned pages. * This can happen if the pages are all mlocked, or if they are all used by @@ -2172,11 +2300,14 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) * interoperates with the page allocator fallback scheme to ensure that aging * of pages is balanced across the zones. */ -static unsigned long balance_pgdat(pg_data_t *pgdat, int order) +static unsigned long balance_pgdat(pg_data_t *pgdat, int order, + int *classzone_idx) { int all_zones_ok; + unsigned long balanced; int priority; int i; + int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long total_scanned; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc = { @@ -2199,7 +2330,6 @@ loop_again: count_vm_event(PAGEOUTRUN); for (priority = DEF_PRIORITY; priority >= 0; priority--) { - int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long lru_pages = 0; int has_under_min_watermark_zone = 0; @@ -2208,6 +2338,7 @@ loop_again: disable_swap_token(); all_zones_ok = 1; + balanced = 0; /* * Scan in the highmem->dma direction for the highest @@ -2230,9 +2361,10 @@ loop_again: shrink_active_list(SWAP_CLUSTER_MAX, zone, &sc, priority, 0); - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 0, 0)) { end_zone = i; + *classzone_idx = i; break; } } @@ -2255,6 +2387,7 @@ loop_again: * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { + int compaction; struct zone *zone = pgdat->node_zones + i; int nr_slab; @@ -2276,7 +2409,7 @@ loop_again: * We put equal pressure on every zone, unless one * zone has way too many pages free already. */ - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, 8*high_wmark_pages(zone), end_zone, 0)) shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; @@ -2284,9 +2417,26 @@ loop_again: lru_pages); sc.nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; + + compaction = 0; + if (order && + zone_watermark_ok(zone, 0, + high_wmark_pages(zone), + end_zone, 0) && + !zone_watermark_ok(zone, order, + high_wmark_pages(zone), + end_zone, 0)) { + compact_zone_order(zone, + order, + sc.gfp_mask, false, + COMPACT_MODE_KSWAPD); + compaction = 1; + } + if (zone->all_unreclaimable) continue; - if (nr_slab == 0 && !zone_reclaimable(zone)) + if (!compaction && nr_slab == 0 && + !zone_reclaimable(zone)) zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and @@ -2297,7 +2447,7 @@ loop_again: total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) sc.may_writepage = 1; - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), end_zone, 0)) { all_zones_ok = 0; /* @@ -2305,7 +2455,7 @@ loop_again: * means that we have a GFP_ATOMIC allocation * failure risk. Hurry up! */ - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, min_wmark_pages(zone), end_zone, 0)) has_under_min_watermark_zone = 1; } else { @@ -2317,10 +2467,12 @@ loop_again: * spectulatively avoid congestion waits */ zone_clear_flag(zone, ZONE_CONGESTED); + if (i <= *classzone_idx) + balanced += zone->present_pages; } } - if (all_zones_ok) + if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) break; /* kswapd: all done */ /* * OK, kswapd is getting into trouble. Take a nap, then take @@ -2343,7 +2495,13 @@ loop_again: break; } out: - if (!all_zones_ok) { + + /* + * order-0: All zones must meet high watermark for a balanced node + * high-order: Balanced zones must make up at least 25% of the node + * for the node to be balanced + */ + if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { cond_resched(); try_to_freeze(); @@ -2368,7 +2526,88 @@ out: goto loop_again; } - return sc.nr_reclaimed; + /* + * If kswapd was reclaiming at a higher order, it has the option of + * sleeping without all zones being balanced. Before it does, it must + * ensure that the watermarks for order-0 on *all* zones are met and + * that the congestion flags are cleared. The congestion flag must + * be cleared as kswapd is the only mechanism that clears the flag + * and it is potentially going to sleep here. + */ + if (order) { + for (i = 0; i <= end_zone; i++) { + struct zone *zone = pgdat->node_zones + i; + + if (!populated_zone(zone)) + continue; + + if (zone->all_unreclaimable && priority != DEF_PRIORITY) + continue; + + /* Confirm the zone is balanced for order-0 */ + if (!zone_watermark_ok(zone, 0, + high_wmark_pages(zone), 0, 0)) { + order = sc.order = 0; + goto loop_again; + } + + /* If balanced, clear the congested flag */ + zone_clear_flag(zone, ZONE_CONGESTED); + } + } + + /* + * Return the order we were reclaiming at so sleeping_prematurely() + * makes a decision on the order we were last reclaiming at. However, + * if another caller entered the allocator slow path while kswapd + * was awake, order will remain at the higher level + */ + *classzone_idx = end_zone; + return order; +} + +static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) +{ + long remaining = 0; + DEFINE_WAIT(wait); + + if (freezing(current) || kthread_should_stop()) + return; + + prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); + + /* Try to sleep for a short interval */ + if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { + remaining = schedule_timeout(HZ/10); + finish_wait(&pgdat->kswapd_wait, &wait); + prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); + } + + /* + * After a short sleep, check if it was a premature sleep. If not, then + * go fully to sleep until explicitly woken up. + */ + if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { + trace_mm_vmscan_kswapd_sleep(pgdat->node_id); + + /* + * vmstat counters are not perfectly accurate and the estimated + * value for counters such as NR_FREE_PAGES can deviate from the + * true value by nr_online_cpus * threshold. To avoid the zone + * watermarks being breached while under pressure, we reduce the + * per-cpu vmstat threshold while kswapd is awake and restore + * them before going back to sleep. + */ + set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); + schedule(); + set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); + } else { + if (remaining) + count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); + else + count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); + } + finish_wait(&pgdat->kswapd_wait, &wait); } /* @@ -2387,9 +2626,10 @@ out: static int kswapd(void *p) { unsigned long order; + int classzone_idx; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; - DEFINE_WAIT(wait); + struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; @@ -2417,49 +2657,30 @@ static int kswapd(void *p) set_freezable(); order = 0; + classzone_idx = MAX_NR_ZONES - 1; for ( ; ; ) { unsigned long new_order; + int new_classzone_idx; int ret; - prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; + new_classzone_idx = pgdat->classzone_idx; pgdat->kswapd_max_order = 0; - if (order < new_order) { + pgdat->classzone_idx = MAX_NR_ZONES - 1; + if (order < new_order || classzone_idx > new_classzone_idx) { /* * Don't sleep if someone wants a larger 'order' - * allocation + * allocation or has tigher zone constraints */ order = new_order; + classzone_idx = new_classzone_idx; } else { - if (!freezing(current) && !kthread_should_stop()) { - long remaining = 0; - - /* Try to sleep for a short interval */ - if (!sleeping_prematurely(pgdat, order, remaining)) { - remaining = schedule_timeout(HZ/10); - finish_wait(&pgdat->kswapd_wait, &wait); - prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); - } - - /* - * After a short sleep, check if it was a - * premature sleep. If not, then go fully - * to sleep until explicitly woken up - */ - if (!sleeping_prematurely(pgdat, order, remaining)) { - trace_mm_vmscan_kswapd_sleep(pgdat->node_id); - schedule(); - } else { - if (remaining) - count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); - else - count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); - } - } - + kswapd_try_to_sleep(pgdat, order, classzone_idx); order = pgdat->kswapd_max_order; + classzone_idx = pgdat->classzone_idx; + pgdat->kswapd_max_order = 0; + pgdat->classzone_idx = MAX_NR_ZONES - 1; } - finish_wait(&pgdat->kswapd_wait, &wait); ret = try_to_freeze(); if (kthread_should_stop()) @@ -2471,7 +2692,7 @@ static int kswapd(void *p) */ if (!ret) { trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); - balance_pgdat(pgdat, order); + order = balance_pgdat(pgdat, order, &classzone_idx); } } return 0; @@ -2480,23 +2701,26 @@ static int kswapd(void *p) /* * A zone is low on free memory, so wake its kswapd task to service it. */ -void wakeup_kswapd(struct zone *zone, int order) +void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; if (!populated_zone(zone)) return; - pgdat = zone->zone_pgdat; - if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) - return; - if (pgdat->kswapd_max_order < order) - pgdat->kswapd_max_order = order; - trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) return; + pgdat = zone->zone_pgdat; + if (pgdat->kswapd_max_order < order) { + pgdat->kswapd_max_order = order; + pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); + } if (!waitqueue_active(&pgdat->kswapd_wait)) return; + if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) + return; + + trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); wake_up_interruptible(&pgdat->kswapd_wait); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 312d728976f1..0c3b5048773e 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -83,7 +83,31 @@ EXPORT_SYMBOL(vm_stat); #ifdef CONFIG_SMP -static int calculate_threshold(struct zone *zone) +int calculate_pressure_threshold(struct zone *zone) +{ + int threshold; + int watermark_distance; + + /* + * As vmstats are not up to date, there is drift between the estimated + * and real values. For high thresholds and a high number of CPUs, it + * is possible for the min watermark to be breached while the estimated + * value looks fine. The pressure threshold is a reduced value such + * that even the maximum amount of drift will not accidentally breach + * the min watermark + */ + watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); + threshold = max(1, (int)(watermark_distance / num_online_cpus())); + + /* + * Maximum threshold is 125 + */ + threshold = min(125, threshold); + + return threshold; +} + +int calculate_normal_threshold(struct zone *zone) { int threshold; int mem; /* memory in 128 MB units */ @@ -142,7 +166,7 @@ static void refresh_zone_stat_thresholds(void) for_each_populated_zone(zone) { unsigned long max_drift, tolerate_drift; - threshold = calculate_threshold(zone); + threshold = calculate_normal_threshold(zone); for_each_online_cpu(cpu) per_cpu_ptr(zone->pageset, cpu)->stat_threshold @@ -161,6 +185,26 @@ static void refresh_zone_stat_thresholds(void) } } +void set_pgdat_percpu_threshold(pg_data_t *pgdat, + int (*calculate_pressure)(struct zone *)) +{ + struct zone *zone; + int cpu; + int threshold; + int i; + + for (i = 0; i < pgdat->nr_zones; i++) { + zone = &pgdat->node_zones[i]; + if (!zone->percpu_drift_mark) + continue; + + threshold = (*calculate_pressure)(zone); + for_each_possible_cpu(cpu) + per_cpu_ptr(zone->pageset, cpu)->stat_threshold + = threshold; + } +} + /* * For use when we know that interrupts are disabled. */ @@ -836,6 +880,7 @@ static const char * const vmstat_text[] = { "numa_local", "numa_other", #endif + "nr_anon_transparent_hugepages", "nr_dirty_threshold", "nr_dirty_background_threshold", @@ -911,7 +956,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, "\n scanned %lu" "\n spanned %lu" "\n present %lu", - zone_nr_free_pages(zone), + zone_page_state(zone, NR_FREE_PAGES), min_wmark_pages(zone), low_wmark_pages(zone), high_wmark_pages(zone), diff --git a/net/Kconfig b/net/Kconfig index ad0aafe903f8..72840626284b 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -253,7 +253,9 @@ config NET_TCPPROBE what was just said, you don't need it: say N. Documentation on how to use TCP connection probing can be found - at http://linux-net.osdl.org/index.php/TcpProbe + at: + + http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe To compile this code as a module, choose M here: the module will be called tcp_probe. diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bb86d2932394..6da5daeebab7 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, ax25_cb *ax25; int err = 0; - memset(fsa, 0, sizeof(fsa)); + memset(fsa, 0, sizeof(*fsa)); lock_sock(sk); ax25 = ax25_sk(sk); diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 815ef8826796..0a1b53bce76d 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -1,5 +1,6 @@ #include +#include /* * Robert Jenkin's hash function. @@ -104,6 +105,7 @@ unsigned ceph_str_hash(int type, const char *s, unsigned len) return -1; } } +EXPORT_SYMBOL(ceph_str_hash); const char *ceph_str_hash_name(int type) { @@ -116,3 +118,4 @@ const char *ceph_str_hash_name(int type) return "unknown"; } } +EXPORT_SYMBOL(ceph_str_hash_name); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b6ff4a1519ab..dff633d62e5b 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -96,7 +96,7 @@ struct workqueue_struct *ceph_msgr_wq; int ceph_msgr_init(void) { - ceph_msgr_wq = create_workqueue("ceph-msgr"); + ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); if (!ceph_msgr_wq) { pr_err("msgr_init failed to create workqueue\n"); return -ENOMEM; @@ -1920,20 +1920,6 @@ bad_tag: /* * Atomically queue work on a connection. Bump @con reference to * avoid races with connection teardown. - * - * There is some trickery going on with QUEUED and BUSY because we - * only want a _single_ thread operating on each connection at any - * point in time, but we want to use all available CPUs. - * - * The worker thread only proceeds if it can atomically set BUSY. It - * clears QUEUED and does it's thing. When it thinks it's done, it - * clears BUSY, then rechecks QUEUED.. if it's set again, it loops - * (tries again to set BUSY). - * - * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we - * try to queue work. If that fails (work is already queued, or BUSY) - * we give up (work also already being done or is queued) but leave QUEUED - * set so that the worker thread will loop if necessary. */ static void queue_con(struct ceph_connection *con) { @@ -1948,11 +1934,7 @@ static void queue_con(struct ceph_connection *con) return; } - set_bit(QUEUED, &con->state); - if (test_bit(BUSY, &con->state)) { - dout("queue_con %p - already BUSY\n", con); - con->ops->put(con); - } else if (!queue_work(ceph_msgr_wq, &con->work.work)) { + if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { dout("queue_con %p - already queued\n", con); con->ops->put(con); } else { @@ -1967,15 +1949,6 @@ static void con_work(struct work_struct *work) { struct ceph_connection *con = container_of(work, struct ceph_connection, work.work); - int backoff = 0; - -more: - if (test_and_set_bit(BUSY, &con->state) != 0) { - dout("con_work %p BUSY already set\n", con); - goto out; - } - dout("con_work %p start, clearing QUEUED\n", con); - clear_bit(QUEUED, &con->state); mutex_lock(&con->mutex); @@ -1994,28 +1967,13 @@ more: try_read(con) < 0 || try_write(con) < 0) { mutex_unlock(&con->mutex); - backoff = 1; ceph_fault(con); /* error/fault path */ goto done_unlocked; } done: mutex_unlock(&con->mutex); - done_unlocked: - clear_bit(BUSY, &con->state); - dout("con->state=%lu\n", con->state); - if (test_bit(QUEUED, &con->state)) { - if (!backoff || test_bit(OPENING, &con->state)) { - dout("con_work %p QUEUED reset, looping\n", con); - goto more; - } - dout("con_work %p QUEUED reset, but just faulted\n", con); - clear_bit(QUEUED, &con->state); - } - dout("con_work %p done\n", con); - -out: con->ops->put(con); } diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index d73f3f6efa36..71603ac3dff5 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -605,8 +605,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) goto bad; } err = __decode_pool(p, end, pi); - if (err < 0) + if (err < 0) { + kfree(pi); goto bad; + } __insert_pg_pool(&map->pg_pools, pi); } diff --git a/net/core/dev.c b/net/core/dev.c index a3ef808b5e36..54277df0f735 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5523,34 +5523,6 @@ void netdev_run_todo(void) } } -/** - * dev_txq_stats_fold - fold tx_queues stats - * @dev: device to get statistics from - * @stats: struct rtnl_link_stats64 to hold results - */ -void dev_txq_stats_fold(const struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; - unsigned int i; - struct netdev_queue *txq; - - for (i = 0; i < dev->num_tx_queues; i++) { - txq = netdev_get_tx_queue(dev, i); - spin_lock_bh(&txq->_xmit_lock); - tx_bytes += txq->tx_bytes; - tx_packets += txq->tx_packets; - tx_dropped += txq->tx_dropped; - spin_unlock_bh(&txq->_xmit_lock); - } - if (tx_bytes || tx_packets || tx_dropped) { - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_packets; - stats->tx_dropped = tx_dropped; - } -} -EXPORT_SYMBOL(dev_txq_stats_fold); - /* Convert net_device_stats to rtnl_link_stats64. They have the same * fields in the same order, with only the type differing. */ @@ -5594,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else { netdev_stats_to_stats64(storage, &dev->stats); - dev_txq_stats_fold(dev, storage); } storage->rx_dropped += atomic_long_read(&dev->rx_dropped); return storage; @@ -6218,7 +6189,7 @@ static void __net_exit default_device_exit(struct net *net) static void __net_exit default_device_exit_batch(struct list_head *net_list) { /* At exit all network devices most be removed from a network - * namespace. Do this in the reverse order of registeration. + * namespace. Do this in the reverse order of registration. * Do this across as many network namespaces as possible to * improve batching efficiency. */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 19d6c21220fd..d31bb36ae0dc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -380,6 +380,8 @@ static void skb_release_head_state(struct sk_buff *skb) } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb->nfct); +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED nf_conntrack_put_reasm(skb->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/net/core/sock.c b/net/core/sock.c index a658aeb6d554..7dfed792434d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -157,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , - "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , + "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { @@ -173,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-27" , "slock-28" , "slock-AF_CAN" , "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , - "slock-AF_IEEE802154", "slock-AF_CAIF" , + "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { @@ -189,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-27" , "clock-28" , "clock-AF_CAN" , "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , - "clock-AF_IEEE802154", "clock-AF_CAIF" , + "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , "clock-AF_MAX" }; diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index ad6dffd9070e..b75968a04017 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig @@ -49,7 +49,9 @@ config NET_DCCPPROBE what was just said, you don't need it: say N. Documentation on how to use DCCP connection probing can be found - at http://linux-net.osdl.org/index.php/DccpProbe + at: + + http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe To compile this code as a module, choose M here: the module will be called dccp_probe. diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 0ba15633c418..0dcaa903e00e 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1130,7 +1130,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) /* * This processes a device up event. We only start up * the loopback device & ethernet devices with correct - * MAC addreses automatically. Others must be started + * MAC addresses automatically. Others must be started * specifically. * * FIXME: How should we configure the loopback address ? If we could dispense diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f9d7ac924f15..44d2b42fda56 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -351,7 +351,7 @@ EXPORT_SYMBOL(ether_setup); * @sizeof_priv: Size of additional driver-private structure to be allocated * for this Ethernet device * @txqs: The number of TX queues this device has. - * @txqs: The number of RX queues this device has. + * @rxqs: The number of RX queues this device has. * * Fill in the fields of the device structure with Ethernet-generic * values. Basically does everything except registering the device. diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 9e95d7fb6d5a..a5a1050595d1 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -432,7 +432,9 @@ config INET_DIAG ---help--- Support for INET (TCP, DCCP, etc) socket monitoring interface used by native Linux tools such as ss. ss is included in iproute2, currently - downloadable at . + downloadable at: + + http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2 If unsure, say Y. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dc7c096ddfef..406f320336e6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1350,7 +1350,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, return 0; } -/* Intialize TSO state of a skb. +/* Initialize TSO state of a skb. * This must be invoked the first time we consider transmitting * SKB onto the wire. */ diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 059a3de647db..978e80e2c4a8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -300,7 +300,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; } - /* Reproduce AF_INET checks to make the bindings consitant */ + /* Reproduce AF_INET checks to make the bindings consistent */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); if (!sysctl_ip_nonlocal_bind && diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 94b5bf132b2e..5f8d242be3f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -401,6 +401,9 @@ int ip6_forward(struct sk_buff *skb) goto drop; } + if (skb->pkt_type != PACKET_HOST) + goto drop; + skb_forward_csum(skb); /* diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 99abfb53bab9..97c5b21b9674 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -19,13 +19,15 @@ #include #include +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include #include #include #include #include -#include #include +#endif +#include #include static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, @@ -33,8 +35,10 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, { u16 zone = NF_CT_DEFAULT_ZONE; +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) if (skb->nfct) zone = nf_ct_zone((struct nf_conn *)skb->nfct); +#endif #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge && @@ -56,9 +60,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum, { struct sk_buff *reasm; +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) /* Previously seen (loopback)? */ if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) return NF_ACCEPT; +#endif reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); /* queued */ diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 5cb8d3027b18..2b7eef37875c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -972,7 +972,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, free: kfree_skb(skb2); out: - return err; + /* this avoids a loop in nfnetlink. */ + return err == -EAGAIN ? -ENOBUFS : err; } #ifdef CONFIG_NF_NAT_NEEDED diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 0b9bb2085ce4..74c064c0dfdd 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -808,7 +808,7 @@ static int __init af_rxrpc_init(void) goto error_call_jar; } - rxrpc_workqueue = create_workqueue("krxrpcd"); + rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); if (!rxrpc_workqueue) { printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); goto error_work_queue; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a36270a994d7..f04d4a484d53 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -24,7 +24,7 @@ menuconfig NET_SCHED To administer these schedulers, you'll need the user-level utilities from the package iproute2+tc at . That package also contains some documentation; for more, check out - . + . This Quality of Service (QoS) support will enable you to use Differentiated Services (diffserv) and Resource Reservation Protocol diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index af9360d1f6eb..84ce48eadff4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -59,6 +59,10 @@ struct teql_master struct net_device *dev; struct Qdisc *slaves; struct list_head master_list; + unsigned long tx_bytes; + unsigned long tx_packets; + unsigned long tx_errors; + unsigned long tx_dropped; }; struct teql_sched_data @@ -274,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb, static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) { struct teql_master *master = netdev_priv(dev); - struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); struct Qdisc *start, *q; int busy; int nores; @@ -314,8 +317,8 @@ restart: __netif_tx_unlock(slave_txq); master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); - txq->tx_packets++; - txq->tx_bytes += length; + master->tx_packets++; + master->tx_bytes += length; return NETDEV_TX_OK; } __netif_tx_unlock(slave_txq); @@ -342,10 +345,10 @@ restart: netif_stop_queue(dev); return NETDEV_TX_BUSY; } - dev->stats.tx_errors++; + master->tx_errors++; drop: - txq->tx_dropped++; + master->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -398,6 +401,18 @@ static int teql_master_close(struct net_device *dev) return 0; } +static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct teql_master *m = netdev_priv(dev); + + stats->tx_packets = m->tx_packets; + stats->tx_bytes = m->tx_bytes; + stats->tx_errors = m->tx_errors; + stats->tx_dropped = m->tx_dropped; + return stats; +} + static int teql_master_mtu(struct net_device *dev, int new_mtu) { struct teql_master *m = netdev_priv(dev); @@ -422,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = { .ndo_open = teql_master_open, .ndo_stop = teql_master_close, .ndo_start_xmit = teql_master_xmit, + .ndo_get_stats64 = teql_master_stats64, .ndo_change_mtu = teql_master_mtu, }; diff --git a/net/socket.c b/net/socket.c index ccc576a6a508..ac2219f90d5d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -306,20 +306,6 @@ static const struct super_operations sockfs_ops = { .statfs = simple_statfs, }; -static struct dentry *sockfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) -{ - return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC); -} - -static struct vfsmount *sock_mnt __read_mostly; - -static struct file_system_type sock_fs_type = { - .name = "sockfs", - .mount = sockfs_mount, - .kill_sb = kill_anon_super, -}; - /* * sockfs_dname() is called from d_path(). */ @@ -333,6 +319,21 @@ static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; +static struct dentry *sockfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + return mount_pseudo(fs_type, "socket:", &sockfs_ops, + &sockfs_dentry_operations, SOCKFS_MAGIC); +} + +static struct vfsmount *sock_mnt __read_mostly; + +static struct file_system_type sock_fs_type = { + .name = "sockfs", + .mount = sockfs_mount, + .kill_sb = kill_anon_super, +}; + /* * Obtains the first available file descriptor and sets it up for use. * @@ -368,7 +369,6 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) } path.mnt = mntget(sock_mnt); - d_set_d_op(path.dentry, &sockfs_dentry_operations); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 75ee993ea057..9576f35ab701 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -137,7 +137,7 @@ arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) ms_usage = 13; break; default: - return EINVAL;; + return -EINVAL; } salt[0] = (ms_usage >> 0) & 0xff; salt[1] = (ms_usage >> 8) & 0xff; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index dec2a6fc7c12..bcdae78fdfc6 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -67,7 +67,6 @@ static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b) #define RSI_HASHBITS 6 #define RSI_HASHMAX (1<expiry_time = expiry; head->last_refresh = seconds_since_boot(); + smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ set_bit(CACHE_VALID, &head->flags); } @@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head /* entry is valid */ if (test_bit(CACHE_NEGATIVE, &h->flags)) return -ENOENT; - else + else { + /* + * In combination with write barrier in + * sunrpc_cache_update, ensures that anyone + * using the cache entry after this sees the + * updated contents: + */ + smp_rmb(); return 0; + } } } +static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) +{ + int rv; + + write_lock(&detail->hash_lock); + rv = cache_is_valid(detail, h); + if (rv != -EAGAIN) { + write_unlock(&detail->hash_lock); + return rv; + } + set_bit(CACHE_NEGATIVE, &h->flags); + cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(h, detail); + return -ENOENT; +} + /* * This is the generic cache management routine for all * the authentication caches. @@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail, case -EINVAL: clear_bit(CACHE_PENDING, &h->flags); cache_revisit_request(h); - if (rv == -EAGAIN) { - set_bit(CACHE_NEGATIVE, &h->flags); - cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); - cache_fresh_unlocked(h, detail); - rv = -ENOENT; - } + rv = try_to_negate_entry(detail, h); break; - case -EAGAIN: clear_bit(CACHE_PENDING, &h->flags); cache_revisit_request(h); @@ -268,9 +288,11 @@ int cache_check(struct cache_detail *detail, } if (rv == -EAGAIN) { - cache_defer_req(rqstp, h); - if (!test_bit(CACHE_PENDING, &h->flags)) { - /* Request is not deferred */ + if (!cache_defer_req(rqstp, h)) { + /* + * Request was not deferred; handle it as best + * we can ourselves: + */ rv = cache_is_valid(detail, h); if (rv == -EAGAIN) rv = -ETIMEDOUT; @@ -618,18 +640,19 @@ static void cache_limit_defers(void) discard->revisit(discard, 1); } -static void cache_defer_req(struct cache_req *req, struct cache_head *item) +/* Return true if and only if a deferred request is queued. */ +static bool cache_defer_req(struct cache_req *req, struct cache_head *item) { struct cache_deferred_req *dreq; if (req->thread_wait) { cache_wait_req(req, item); if (!test_bit(CACHE_PENDING, &item->flags)) - return; + return false; } dreq = req->defer(req); if (dreq == NULL) - return; + return false; setup_deferral(dreq, item, 1); if (!test_bit(CACHE_PENDING, &item->flags)) /* Bit could have been cleared before we managed to @@ -638,6 +661,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) cache_revisit_request(item); cache_limit_defers(); + return true; } static void cache_revisit_request(struct cache_head *item) diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 0e659c665a8d..08e05a8ce025 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1001,6 +1001,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) rqstp->rq_splice_ok = 1; /* Will be turned off only when NFSv4 Sessions are used */ rqstp->rq_usedeferral = 1; + rqstp->rq_dropme = false; /* Setup reply header */ rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); @@ -1102,7 +1103,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); /* Encode reply */ - if (*statp == rpc_drop_reply) { + if (rqstp->rq_dropme) { if (procp->pc_release) procp->pc_release(rqstp, NULL, rqstp->rq_resp); goto dropit; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 3f2c5559ca1a..ab86b7927f84 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -13,6 +13,7 @@ #include #include #include +#include #define RPCDBG_FACILITY RPCDBG_SVCXPRT @@ -128,6 +129,9 @@ static void svc_xprt_free(struct kref *kref) if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) svcauth_unix_info_release(xprt); put_net(xprt->xpt_net); + /* See comment on corresponding get in xs_setup_bc_tcp(): */ + if (xprt->xpt_bc_xprt) + xprt_put(xprt->xpt_bc_xprt); xprt->xpt_ops->xpo_free(xprt); module_put(owner); } @@ -303,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) list_del(&rqstp->rq_list); } +static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) +{ + if (xprt->xpt_flags & ((1<xpt_flags & ((1<xpt_ops->xpo_has_wspace(xprt); + return false; +} + /* * Queue up a transport with data pending. If there are idle nfsd * processes, wake 'em up. @@ -315,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) struct svc_rqst *rqstp; int cpu; - if (!(xprt->xpt_flags & - ((1<xpt_pool != NULL); - xprt->xpt_pool = pool; - - /* Handle pending connection */ - if (test_bit(XPT_CONN, &xprt->xpt_flags)) - goto process; - - /* Handle close in-progress */ - if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) - goto process; - - /* Check if we have space to reply to a request */ - if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { - /* Don't enqueue while not enough space for reply */ - dprintk("svc: no write space, transport %p not enqueued\n", - xprt); - xprt->xpt_pool = NULL; - clear_bit(XPT_BUSY, &xprt->xpt_flags); - goto out_unlock; - } - process: if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, @@ -381,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); pool->sp_stats.threads_woken++; - BUG_ON(xprt->xpt_pool != pool); wake_up(&rqstp->rq_wait); } else { dprintk("svc: transport %p put into queue\n", xprt); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); pool->sp_stats.sockets_queued++; - BUG_ON(xprt->xpt_pool != pool); } out_unlock: @@ -426,7 +415,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) void svc_xprt_received(struct svc_xprt *xprt) { BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); - xprt->xpt_pool = NULL; /* As soon as we clear busy, the xprt could be closed and * 'put', so we need a reference to call svc_xprt_enqueue with: */ @@ -722,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { dprintk("svc_recv: found XPT_CLOSE\n"); svc_delete_xprt(xprt); - } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { + /* Leave XPT_BUSY set on the dead xprt: */ + goto out; + } + if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { struct svc_xprt *newxpt; newxpt = xprt->xpt_ops->xpo_accept(xprt); if (newxpt) { @@ -747,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) spin_unlock_bh(&serv->sv_lock); svc_xprt_received(newxpt); } - svc_xprt_received(xprt); - } else { + } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", rqstp, pool->sp_id, xprt, atomic_read(&xprt->xpt_ref.refcount)); rqstp->rq_deferred = svc_deferred_dequeue(xprt); - if (rqstp->rq_deferred) { - svc_xprt_received(xprt); + if (rqstp->rq_deferred) len = svc_deferred_recv(rqstp); - } else { + else len = xprt->xpt_ops->xpo_recvfrom(rqstp); - svc_xprt_received(xprt); - } dprintk("svc: got len=%d\n", len); } + svc_xprt_received(xprt); /* No data, incomplete (TCP) read, or accept() */ - if (len == 0 || len == -EAGAIN) { - rqstp->rq_res.len = 0; - svc_xprt_release(rqstp); - return -EAGAIN; - } + if (len == 0 || len == -EAGAIN) + goto out; + clear_bit(XPT_OLD, &xprt->xpt_flags); rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); @@ -777,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (serv->sv_stats) serv->sv_stats->netcnt++; return len; +out: + rqstp->rq_res.len = 0; + svc_xprt_release(rqstp); + return -EAGAIN; } EXPORT_SYMBOL_GPL(svc_recv); @@ -935,7 +925,12 @@ void svc_close_xprt(struct svc_xprt *xprt) if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) /* someone else will have to effect the close */ return; - + /* + * We expect svc_close_xprt() to work even when no threads are + * running (e.g., while configuring the server before starting + * any threads), so if the transport isn't busy, we delete + * it ourself: + */ svc_delete_xprt(xprt); } EXPORT_SYMBOL_GPL(svc_close_xprt); @@ -945,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list) struct svc_xprt *xprt; struct svc_xprt *tmp; + /* + * The server is shutting down, and no more threads are running. + * svc_xprt_enqueue() might still be running, but at worst it + * will re-add the xprt to sp_sockets, which will soon get + * freed. So we don't bother with any more locking, and don't + * leave the close to the (nonexistent) server threads: + */ list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { set_bit(XPT_CLOSE, &xprt->xpt_flags); - if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { - /* Waiting to be processed, but no threads left, - * So just remove it from the waiting list - */ - list_del_init(&xprt->xpt_ready); - clear_bit(XPT_BUSY, &xprt->xpt_flags); - } - svc_close_xprt(xprt); + svc_delete_xprt(xprt); } } @@ -1028,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) } svc_xprt_get(rqstp->rq_xprt); dr->xprt = rqstp->rq_xprt; + rqstp->rq_dropme = true; dr->handle.revisit = svc_revisit; return &dr->handle; @@ -1065,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) return NULL; spin_lock(&xprt->xpt_lock); - clear_bit(XPT_DEFERRED, &xprt->xpt_flags); if (!list_empty(&xprt->xpt_deferred)) { dr = list_entry(xprt->xpt_deferred.next, struct svc_deferred_req, handle.recent); list_del_init(&dr->handle.recent); - set_bit(XPT_DEFERRED, &xprt->xpt_flags); - } + } else + clear_bit(XPT_DEFERRED, &xprt->xpt_flags); spin_unlock(&xprt->xpt_lock); return dr; } diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 4e9393c24687..7963569fc04f 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -118,7 +118,6 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister); #define DN_HASHBITS 6 #define DN_HASHMAX (1<h.flavour = &svcauth_unix; +#ifdef CONFIG_NFSD_DEPRECATED new->addr_changes = 0; +#endif /* CONFIG_NFSD_DEPRECATED */ rv = auth_domain_lookup(name, &new->h); } } @@ -85,14 +89,15 @@ static void svcauth_unix_domain_release(struct auth_domain *dom) */ #define IP_HASHBITS 8 #define IP_HASHMAX (1<m_client->h.ref); new->m_client = item->m_client; +#ifdef CONFIG_NFSD_DEPRECATED new->m_add_change = item->m_add_change; +#endif /* CONFIG_NFSD_DEPRECATED */ } static struct cache_head *ip_map_alloc(void) { @@ -331,6 +338,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, ip.h.flags = 0; if (!udom) set_bit(CACHE_NEGATIVE, &ip.h.flags); +#ifdef CONFIG_NFSD_DEPRECATED else { ip.m_add_change = udom->addr_changes; /* if this is from the legacy set_client system call, @@ -339,6 +347,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, if (expiry == NEVER) ip.m_add_change++; } +#endif /* CONFIG_NFSD_DEPRECATED */ ip.h.expiry_time = expiry; ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, hash_str(ipm->m_class, IP_HASHBITS) ^ @@ -358,6 +367,7 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm, return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); } +#ifdef CONFIG_NFSD_DEPRECATED int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) { struct unix_domain *udom; @@ -402,8 +412,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) return NULL; if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { - if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0) - auth_domain_put(&ipm->m_client->h); + sunrpc_invalidate(&ipm->h, sn->ip_map_cache); rv = NULL; } else { rv = &ipm->m_client->h; @@ -413,6 +422,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) return rv; } EXPORT_SYMBOL_GPL(auth_unix_lookup); +#endif /* CONFIG_NFSD_DEPRECATED */ void svcauth_unix_purge(void) { @@ -497,7 +507,6 @@ svcauth_unix_info_release(struct svc_xprt *xpt) */ #define GID_HASHBITS 8 #define GID_HASHMAX (1<sk_xprt); + } else len += onelen; } spin_unlock_bh(&serv->sv_lock); - if (closesk) + if (closesk) { /* Should unregister with portmap, but you cannot * unregister just one protocol... */ svc_close_xprt(&closesk->sk_xprt); - else if (toclose) + svc_xprt_put(&closesk->sk_xprt); + } else if (toclose) return -ENOENT; return len; } @@ -992,15 +994,17 @@ static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp, vec[0] = rqstp->rq_arg.head[0]; } else { /* REPLY */ - if (svsk->sk_bc_xprt) - req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid); + struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; + + if (bc_xprt) + req = xprt_lookup_rqst(bc_xprt, xid); if (!req) { printk(KERN_NOTICE "%s: Got unrecognized reply: " - "calldir 0x%x sk_bc_xprt %p xid %08x\n", + "calldir 0x%x xpt_bc_xprt %p xid %08x\n", __func__, ntohl(calldir), - svsk->sk_bc_xprt, xid); + bc_xprt, xid); vec[0] = rqstp->rq_arg.head[0]; goto out; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 4c8f18aff7c3..856274d7e85c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -965,6 +965,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) xprt = kzalloc(size, GFP_KERNEL); if (xprt == NULL) goto out; + kref_init(&xprt->kref); xprt->max_reqs = max_req; xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); @@ -1101,8 +1102,10 @@ found: -PTR_ERR(xprt)); return xprt; } + if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state)) + /* ->setup returned a pre-initialized xprt: */ + return xprt; - kref_init(&xprt->kref); spin_lock_init(&xprt->transport_lock); spin_lock_init(&xprt->reserve_lock); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 96549df836ee..c431f5a57960 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2359,6 +2359,15 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) struct svc_sock *bc_sock; struct rpc_xprt *ret; + if (args->bc_xprt->xpt_bc_xprt) { + /* + * This server connection already has a backchannel + * export; we can't create a new one, as we wouldn't be + * able to match replies based on xid any more. So, + * reuse the already-existing one: + */ + return args->bc_xprt->xpt_bc_xprt; + } xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); if (IS_ERR(xprt)) return xprt; @@ -2375,16 +2384,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) xprt->reestablish_timeout = 0; xprt->idle_timeout = 0; - /* - * The backchannel uses the same socket connection as the - * forechannel - */ - xprt->bc_xprt = args->bc_xprt; - bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); - bc_sock->sk_bc_xprt = xprt; - transport->sock = bc_sock->sk_sock; - transport->inet = bc_sock->sk_sk; - xprt->ops = &bc_tcp_ops; switch (addr->sa_family) { @@ -2406,6 +2405,20 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) xprt->address_strings[RPC_DISPLAY_PORT], xprt->address_strings[RPC_DISPLAY_PROTO]); + /* + * Once we've associated a backchannel xprt with a connection, + * we want to keep it around as long as long as the connection + * lasts, in case we need to start using it for a backchannel + * again; this reference won't be dropped until bc_xprt is + * destroyed. + */ + xprt_get(xprt); + args->bc_xprt->xpt_bc_xprt = xprt; + xprt->bc_xprt = args->bc_xprt; + bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); + transport->sock = bc_sock->sk_sock; + transport->inet = bc_sock->sk_sk; + /* * Since we don't want connections for the backchannel, we set * the xprt status to connected @@ -2415,6 +2428,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) if (try_module_get(THIS_MODULE)) return xprt; + xprt_put(xprt); ret = ERR_PTR(-EINVAL); out_err: xprt_free(xprt); diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 396da16aabf8..1c702ca8aac8 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -262,6 +262,34 @@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) +# XZ +# --------------------------------------------------------------------------- +# Use xzkern to compress the kernel image and xzmisc to compress other things. +# +# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage +# of the kernel decompressor. A BCJ filter is used if it is available for +# the target architecture. xzkern also appends uncompressed size of the data +# using size_append. The .xz format has the size information available at +# the end of the file too, but it's in more complex format and it's good to +# avoid changing the part of the boot code that reads the uncompressed size. +# Note that the bytes added by size_append will make the xz tool think that +# the file is corrupt. This is expected. +# +# xzmisc doesn't use size_append, so it can be used to create normal .xz +# files. xzmisc uses smaller LZMA2 dictionary than xzkern, because a very +# big dictionary would increase the memory usage too much in the multi-call +# decompression mode. A BCJ filter isn't used either. +quiet_cmd_xzkern = XZKERN $@ +cmd_xzkern = (cat $(filter-out FORCE,$^) | \ + sh $(srctree)/scripts/xz_wrap.sh && \ + $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + (rm -f $@ ; false) + +quiet_cmd_xzmisc = XZMISC $@ +cmd_xzmisc = (cat $(filter-out FORCE,$^) | \ + xz --check=crc32 --lzma2=dict=1MiB) > $@ || \ + (rm -f $@ ; false) + # misc stuff # --------------------------------------------------------------------------- quote:=" diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index e3c7fc0dca38..4c0383da1c9a 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -859,7 +859,7 @@ sub annotate_values { $av_preprocessor = 0; } - } elsif ($cur =~ /^(\(\s*$Type\s*)\)/) { + } elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') { print "CAST($1)\n" if ($dbg_values > 1); push(@av_paren_type, $type); $type = 'C'; @@ -2743,6 +2743,11 @@ sub process { WARN("plain inline is preferred over $1\n" . $herecurr); } +# Check for __attribute__ packed, prefer __packed + if ($line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) { + WARN("__packed is preferred over __attribute__((packed))\n" . $herecurr); + } + # check for sizeof(&) if ($line =~ /\bsizeof\s*\(\s*\&/) { WARN("sizeof(& should be avoided\n" . $herecurr); @@ -2785,10 +2790,15 @@ sub process { } # check for pointless casting of kmalloc return - if ($line =~ /\*\s*\)\s*k[czm]alloc\b/) { + if ($line =~ /\*\s*\)\s*[kv][czm]alloc(_node){0,1}\b/) { WARN("unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr); } +# check for multiple semicolons + if ($line =~ /;\s*;\s*$/) { + WARN("Statements terminations use 1 semicolon\n" . $herecurr); + } + # check for gcc specific __FUNCTION__ if ($line =~ /__FUNCTION__/) { WARN("__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr); @@ -2892,6 +2902,11 @@ sub process { ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr); } } + + if ($line =~ /debugfs_create_file.*S_IWUGO/ || + $line =~ /DEVICE_ATTR.*S_IWUGO/ ) { + WARN("Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr); + } } # If we have no input at all, then there is nothing to report on diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh index 5958fffb2114..55caecdad995 100644 --- a/scripts/gen_initramfs_list.sh +++ b/scripts/gen_initramfs_list.sh @@ -243,6 +243,8 @@ case "$arg" in echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f" echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f" echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f" + echo "$output_file" | grep -q "\.xz$" && \ + compr="xz --check=crc32 --lzma2=dict=1MiB" echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f" echo "$output_file" | grep -q "\.cpio$" && compr="cat" shift diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index d21ec3a89603..139e0fff8e31 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl @@ -13,7 +13,7 @@ use strict; my $P = $0; -my $V = '0.26-beta6'; +my $V = '0.26'; use Getopt::Long qw(:config no_auto_abbrev); @@ -40,7 +40,7 @@ my $email_use_mailmap = 1; my $output_multiline = 1; my $output_separator = ", "; my $output_roles = 0; -my $output_rolestats = 0; +my $output_rolestats = 1; my $scm = 0; my $web = 0; my $subsystem = 0; @@ -494,6 +494,40 @@ if ($web) { exit($exit); +sub range_is_maintained { + my ($start, $end) = @_; + + for (my $i = $start; $i < $end; $i++) { + my $line = $typevalue[$i]; + if ($line =~ m/^(\C):\s*(.*)/) { + my $type = $1; + my $value = $2; + if ($type eq 'S') { + if ($value =~ /(maintain|support)/i) { + return 1; + } + } + } + } + return 0; +} + +sub range_has_maintainer { + my ($start, $end) = @_; + + for (my $i = $start; $i < $end; $i++) { + my $line = $typevalue[$i]; + if ($line =~ m/^(\C):\s*(.*)/) { + my $type = $1; + my $value = $2; + if ($type eq 'M') { + return 1; + } + } + } + return 0; +} + sub get_maintainers { %email_hash_name = (); %email_hash_address = (); @@ -556,7 +590,9 @@ sub get_maintainers { my $file_pd = ($file =~ tr@/@@); $value_pd++ if (substr($value,-1,1) ne "/"); $value_pd = -1 if ($value =~ /^\.\*/); - if ($value_pd >= $file_pd) { + if ($value_pd >= $file_pd && + range_is_maintained($start, $end) && + range_has_maintainer($start, $end)) { $exact_pattern_match_hash{$file} = 1; } if ($pattern_depth == 0 || @@ -720,7 +756,8 @@ Other options: --help => show this help information Default options: - [--email --git --m --n --l --multiline --pattern-depth=0 --remove-duplicates] + [--email --nogit --git-fallback --m --n --l --multiline -pattern-depth=0 + --remove-duplicates --rolestats] Notes: Using "-f directory" may give unexpected results: diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 97d2259ae999..e8fba959fffb 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1615,7 +1615,7 @@ static void section_rel(const char *modname, struct elf_info *elf, * A module includes a number of sections that are discarded * either when loaded or when used as built-in. * For loaded modules all functions marked __init and all data - * marked __initdata will be discarded when the module has been intialized. + * marked __initdata will be discarded when the module has been initialized. * Likewise for modules used built-in the sections marked __exit * are discarded because __exit marked function are supposed to be called * only when a module is unloaded which never happens for built-in modules. diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh new file mode 100644 index 000000000000..17a5798c29da --- /dev/null +++ b/scripts/xz_wrap.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# +# This is a wrapper for xz to compress the kernel image using appropriate +# compression options depending on the architecture. +# +# Author: Lasse Collin +# +# This file has been put into the public domain. +# You can do whatever you want with this file. +# + +BCJ= +LZMA2OPTS= + +case $ARCH in + x86|x86_64) BCJ=--x86 ;; + powerpc) BCJ=--powerpc ;; + ia64) BCJ=--ia64; LZMA2OPTS=pb=4 ;; + arm) BCJ=--arm ;; + sparc) BCJ=--sparc ;; +esac + +exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h index 19ba16e8aacd..a4a863997bd5 100644 --- a/security/apparmor/include/match.h +++ b/security/apparmor/include/match.h @@ -28,7 +28,7 @@ * The format used for transition tables is based on the GNU flex table * file format (--tables-file option; see Table File Format in the flex * info pages and the flex sources for documentation). The magic number - * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because + * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used * slightly differently (see the apparmor-parser package). */ diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c index a351dd0a09c7..2b50cbe6aca9 100644 --- a/sound/ac97_bus.c +++ b/sound/ac97_bus.c @@ -19,8 +19,8 @@ /* * Let drivers decide whether they want to support given codec from their - * probe method. Drivers have direct access to the struct snd_ac97 structure and may - * decide based on the id field amongst other things. + * probe method. Drivers have direct access to the struct snd_ac97 + * structure and may decide based on the id field amongst other things. */ static int ac97_bus_match(struct device *dev, struct device_driver *drv) { diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c index 91852e49910e..3687a6cc9881 100644 --- a/sound/aoa/codecs/onyx.c +++ b/sound/aoa/codecs/onyx.c @@ -1114,7 +1114,6 @@ static int onyx_i2c_remove(struct i2c_client *client) of_node_put(onyx->codec.node); if (onyx->codec_info) kfree(onyx->codec_info); - i2c_set_clientdata(client, onyx); kfree(onyx); return 0; } diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c index de8e03afa97b..faa317490545 100644 --- a/sound/aoa/core/gpio-feature.c +++ b/sound/aoa/core/gpio-feature.c @@ -287,10 +287,9 @@ static void ftr_gpio_exit(struct gpio_runtime *rt) free_irq(linein_detect_irq, &rt->line_in_notify); if (rt->line_out_notify.gpio_private) free_irq(lineout_detect_irq, &rt->line_out_notify); - cancel_delayed_work(&rt->headphone_notify.work); - cancel_delayed_work(&rt->line_in_notify.work); - cancel_delayed_work(&rt->line_out_notify.work); - flush_scheduled_work(); + cancel_delayed_work_sync(&rt->headphone_notify.work); + cancel_delayed_work_sync(&rt->line_in_notify.work); + cancel_delayed_work_sync(&rt->line_out_notify.work); mutex_destroy(&rt->headphone_notify.mutex); mutex_destroy(&rt->line_in_notify.mutex); mutex_destroy(&rt->line_out_notify.mutex); diff --git a/sound/aoa/core/gpio-pmf.c b/sound/aoa/core/gpio-pmf.c index 7e267c9379bc..c8d8a1a6f964 100644 --- a/sound/aoa/core/gpio-pmf.c +++ b/sound/aoa/core/gpio-pmf.c @@ -107,10 +107,9 @@ static void pmf_gpio_exit(struct gpio_runtime *rt) /* make sure no work is pending before freeing * all things */ - cancel_delayed_work(&rt->headphone_notify.work); - cancel_delayed_work(&rt->line_in_notify.work); - cancel_delayed_work(&rt->line_out_notify.work); - flush_scheduled_work(); + cancel_delayed_work_sync(&rt->headphone_notify.work); + cancel_delayed_work_sync(&rt->line_in_notify.work); + cancel_delayed_work_sync(&rt->line_out_notify.work); mutex_destroy(&rt->headphone_notify.mutex); mutex_destroy(&rt->line_in_notify.mutex); diff --git a/sound/core/control.c b/sound/core/control.c index 45a818002d99..9ce00ed20fba 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1488,7 +1488,7 @@ int snd_ctl_create(struct snd_card *card) } /* - * Frequently used control callbacks + * Frequently used control callbacks/helpers */ int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) @@ -1513,3 +1513,29 @@ int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol, } EXPORT_SYMBOL(snd_ctl_boolean_stereo_info); + +/** + * snd_ctl_enum_info - fills the info structure for an enumerated control + * @info: the structure to be filled + * @channels: the number of the control's channels; often one + * @items: the number of control values; also the size of @names + * @names: an array containing the names of all control values + * + * Sets all required fields in @info to their appropriate values. + * If the control's accessibility is not the default (readable and writable), + * the caller has to fill @info->access. + */ +int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels, + unsigned int items, const char *const names[]) +{ + info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; + info->count = channels; + info->value.enumerated.items = items; + if (info->value.enumerated.item >= items) + info->value.enumerated.item = items - 1; + strlcpy(info->value.enumerated.name, + names[info->value.enumerated.item], + sizeof(info->value.enumerated.name)); + return 0; +} +EXPORT_SYMBOL(snd_ctl_enum_info); diff --git a/sound/core/init.c b/sound/core/init.c index 57b792e2439a..3e65da21a08c 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -642,7 +642,7 @@ static struct device_attribute card_number_attrs = * external accesses. Thus, you should call this function at the end * of the initialization of the card. * - * Returns zero otherwise a negative error code if the registrain failed. + * Returns zero otherwise a negative error code if the registration failed. */ int snd_card_register(struct snd_card *card) { diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index b753ec661fcf..a2e4eb324699 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -453,8 +453,10 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm, } else { *params = *save; max = snd_pcm_hw_param_max(pcm, params, var, max, &maxdir); - if (max < 0) + if (max < 0) { + kfree(save); return max; + } last = 1; } _end: diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 11446a1506da..a82e3756a72d 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -373,6 +373,27 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, (unsigned long)new_hw_ptr, (unsigned long)runtime->hw_ptr_base); } + + if (runtime->no_period_wakeup) { + /* + * Without regular period interrupts, we have to check + * the elapsed time to detect xruns. + */ + jdelta = jiffies - runtime->hw_ptr_jiffies; + if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) + goto no_delta_check; + hdelta = jdelta - delta * HZ / runtime->rate; + while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) { + delta += runtime->buffer_size; + hw_base += runtime->buffer_size; + if (hw_base >= runtime->boundary) + hw_base = 0; + new_hw_ptr = hw_base + pos; + hdelta -= runtime->hw_ptr_buffer_jiffies; + } + goto no_delta_check; + } + /* something must be really wrong */ if (delta >= runtime->buffer_size + runtime->period_size) { hw_ptr_error(substream, @@ -442,6 +463,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, (long)old_hw_ptr); } + no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) return 0; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index e82c1f97d99e..4be45e7be8ad 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -422,6 +422,9 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, runtime->info = params->info; runtime->rate_num = params->rate_num; runtime->rate_den = params->rate_den; + runtime->no_period_wakeup = + (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) && + (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP); bits = snd_pcm_format_physical_width(runtime->format); runtime->sample_bits = bits; @@ -984,7 +987,7 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) if (push) snd_pcm_update_hw_ptr(substream); /* The jiffies check in snd_pcm_update_hw_ptr*() is done by - * a delta betwen the current jiffies, this gives a large enough + * a delta between the current jiffies, this gives a large enough * delta, effectively to skip the check once. */ substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c index bf09a5ad1865..119fddb6fc99 100644 --- a/sound/core/seq/seq.c +++ b/sound/core/seq/seq.c @@ -32,6 +32,7 @@ #include "seq_timer.h" #include "seq_system.h" #include "seq_info.h" +#include #include #if defined(CONFIG_SND_SEQ_DUMMY_MODULE) @@ -73,6 +74,9 @@ MODULE_PARM_DESC(seq_default_timer_subdevice, "The default timer subdevice numbe module_param(seq_default_timer_resolution, int, 0644); MODULE_PARM_DESC(seq_default_timer_resolution, "The default timer resolution in Hz."); +MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_SEQUENCER); +MODULE_ALIAS("devname:snd/seq"); + /* * INIT PART */ diff --git a/sound/core/sound.c b/sound/core/sound.c index 66691fe437e6..1c7a3efe1778 100644 --- a/sound/core/sound.c +++ b/sound/core/sound.c @@ -188,14 +188,22 @@ static const struct file_operations snd_fops = }; #ifdef CONFIG_SND_DYNAMIC_MINORS -static int snd_find_free_minor(void) +static int snd_find_free_minor(int type) { int minor; + /* static minors for module auto loading */ + if (type == SNDRV_DEVICE_TYPE_SEQUENCER) + return SNDRV_MINOR_SEQUENCER; + if (type == SNDRV_DEVICE_TYPE_TIMER) + return SNDRV_MINOR_TIMER; + for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor) { - /* skip minors still used statically for autoloading devices */ - if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL || - minor == SNDRV_MINOR_SEQUENCER) + /* skip static minors still used for module auto loading */ + if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL) + continue; + if (minor == SNDRV_MINOR_SEQUENCER || + minor == SNDRV_MINOR_TIMER) continue; if (!snd_minors[minor]) return minor; @@ -269,7 +277,7 @@ int snd_register_device_for_dev(int type, struct snd_card *card, int dev, preg->private_data = private_data; mutex_lock(&sound_mutex); #ifdef CONFIG_SND_DYNAMIC_MINORS - minor = snd_find_free_minor(); + minor = snd_find_free_minor(type); #else minor = snd_kernel_minor(type, card, dev); if (minor >= 0 && snd_minors[minor]) diff --git a/sound/core/timer.c b/sound/core/timer.c index 13afb60999b9..ed016329e911 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -34,8 +34,8 @@ #include #include -#if defined(CONFIG_SND_HPET) || defined(CONFIG_SND_HPET_MODULE) -#define DEFAULT_TIMER_LIMIT 3 +#if defined(CONFIG_SND_HRTIMER) || defined(CONFIG_SND_HRTIMER_MODULE) +#define DEFAULT_TIMER_LIMIT 4 #elif defined(CONFIG_SND_RTCTIMER) || defined(CONFIG_SND_RTCTIMER_MODULE) #define DEFAULT_TIMER_LIMIT 2 #else @@ -52,6 +52,9 @@ MODULE_PARM_DESC(timer_limit, "Maximum global timers in system."); module_param(timer_tstamp_monotonic, int, 0444); MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default)."); +MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER); +MODULE_ALIAS("devname:snd/timer"); + struct snd_timer_user { struct snd_timer_instance *timeri; int tread; /* enhanced read with timestamps and events */ diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c index a1282c1c0591..5cfcb908c430 100644 --- a/sound/drivers/ml403-ac97cr.c +++ b/sound/drivers/ml403-ac97cr.c @@ -1143,8 +1143,8 @@ snd_ml403_ac97cr_create(struct snd_card *card, struct platform_device *pfdev, (resource->start) + 1); if (ml403_ac97cr->port == NULL) { snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": " - "unable to remap memory region (%x to %x)\n", - resource->start, resource->end); + "unable to remap memory region (%pR)\n", + resource); snd_ml403_ac97cr_free(ml403_ac97cr); return -EBUSY; } diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c index 971a84a4fa77..c424d329f806 100644 --- a/sound/i2c/other/ak4113.c +++ b/sound/i2c/other/ak4113.c @@ -57,8 +57,7 @@ static void snd_ak4113_free(struct ak4113 *chip) { chip->init = 1; /* don't schedule new work */ mb(); - cancel_delayed_work(&chip->work); - flush_scheduled_work(); + cancel_delayed_work_sync(&chip->work); kfree(chip); } @@ -141,7 +140,7 @@ void snd_ak4113_reinit(struct ak4113 *chip) { chip->init = 1; mb(); - flush_scheduled_work(); + flush_delayed_work_sync(&chip->work); ak4113_init_regs(chip); /* bring up statistics / event queing */ chip->init = 0; diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c index 0341451f814c..d9fb537b0b94 100644 --- a/sound/i2c/other/ak4114.c +++ b/sound/i2c/other/ak4114.c @@ -67,8 +67,7 @@ static void snd_ak4114_free(struct ak4114 *chip) { chip->init = 1; /* don't schedule new work */ mb(); - cancel_delayed_work(&chip->work); - flush_scheduled_work(); + cancel_delayed_work_sync(&chip->work); kfree(chip); } @@ -154,7 +153,7 @@ void snd_ak4114_reinit(struct ak4114 *chip) { chip->init = 1; mb(); - flush_scheduled_work(); + flush_delayed_work_sync(&chip->work); ak4114_init_regs(chip); /* bring up statistics / event queing */ chip->init = 0; diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c index 265abcce9dba..9b915e27b5bd 100644 --- a/sound/isa/opl3sa2.c +++ b/sound/isa/opl3sa2.c @@ -264,7 +264,7 @@ static int __devinit snd_opl3sa2_detect(struct snd_card *card) snd_printd("OPL3-SA [0x%lx] detect (1) = 0x%x (0x%x)\n", port, tmp, tmp1); return -ENODEV; } - /* try if the MIC register is accesible */ + /* try if the MIC register is accessible */ tmp = snd_opl3sa2_read(chip, OPL3SA2_MIC); snd_opl3sa2_write(chip, OPL3SA2_MIC, 0x8a); if (((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MIC)) & 0x9f) != 0x8a) { diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig index 12e34653b8a8..9823d59d7ad7 100644 --- a/sound/pci/Kconfig +++ b/sound/pci/Kconfig @@ -209,7 +209,7 @@ config SND_OXYGEN_LIB tristate config SND_OXYGEN - tristate "C-Media 8788 (Oxygen)" + tristate "C-Media 8786, 8787, 8788 (Oxygen)" select SND_OXYGEN_LIB select SND_PCM select SND_MPU401_UART @@ -217,13 +217,18 @@ config SND_OXYGEN Say Y here to include support for sound cards based on the C-Media CMI8788 (Oxygen HD Audio) chip: * Asound A-8788 + * Asus Xonar DG * AuzenTech X-Meridian + * AuzenTech X-Meridian 2G * Bgears b-Enspirer * Club3D Theatron DTS * HT-Omega Claro (plus) * HT-Omega Claro halo (XT) + * Kuroutoshikou CMI8787-HG2PCI * Razer Barracuda AC-1 * Sondigo Inferno + * TempoTec/MediaTek HiFier Fantasia + * TempoTec/MediaTek HiFier Serenade To compile this driver as a module, choose M here: the module will be called snd-oxygen. @@ -578,18 +583,6 @@ config SND_HDSPM To compile this driver as a module, choose M here: the module will be called snd-hdspm. -config SND_HIFIER - tristate "TempoTec HiFier Fantasia" - select SND_OXYGEN_LIB - select SND_PCM - select SND_MPU401_UART - help - Say Y here to include support for the MediaTek/TempoTec HiFier - Fantasia sound card. - - To compile this driver as a module, choose M here: the module - will be called snd-hifier. - config SND_ICE1712 tristate "ICEnsemble ICE1712 (Envy24)" select SND_MPU401_UART @@ -826,8 +819,8 @@ config SND_VIRTUOSO Say Y here to include support for sound cards based on the Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), and Essence STX. - Support for the HDAV1.3 (Deluxe) is incomplete; for the - HDAV1.3 Slim and Xense, missing. + Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental; + for the Xense, missing. To compile this driver as a module, choose M here: the module will be called snd-virtuoso. diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index a7630e9edf8a..0fc614ce16c1 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -1014,8 +1014,7 @@ static int snd_ac97_free(struct snd_ac97 *ac97) { if (ac97) { #ifdef CONFIG_SND_AC97_POWER_SAVE - cancel_delayed_work(&ac97->power_work); - flush_scheduled_work(); + cancel_delayed_work_sync(&ac97->power_work); #endif snd_ac97_proc_done(ac97); if (ac97->bus) @@ -2456,8 +2455,7 @@ void snd_ac97_suspend(struct snd_ac97 *ac97) if (ac97->build_ops->suspend) ac97->build_ops->suspend(ac97); #ifdef CONFIG_SND_AC97_POWER_SAVE - cancel_delayed_work(&ac97->power_work); - flush_scheduled_work(); + cancel_delayed_work_sync(&ac97->power_work); #endif snd_ac97_powerdown(ac97); } diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c index 2f3cacbd5528..6117595fc075 100644 --- a/sound/pci/azt3328.c +++ b/sound/pci/azt3328.c @@ -1,6 +1,6 @@ /* * azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168). - * Copyright (C) 2002, 2005 - 2009 by Andreas Mohr + * Copyright (C) 2002, 2005 - 2010 by Andreas Mohr * * Framework borrowed from Bart Hartgers's als4000.c. * Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801), @@ -175,6 +175,7 @@ #include #include +#include /* WARN_ONCE */ #include #include #include @@ -201,14 +202,15 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}"); /* === Debug settings === Further diagnostic functionality than the settings below - does not need to be provided, since one can easily write a bash script + does not need to be provided, since one can easily write a POSIX shell script to dump the card's I/O ports (those listed in lspci -v -v): - function dump() + dump() { local descr=$1; local addr=$2; local count=$3 echo "${descr}: ${count} @ ${addr}:" - dd if=/dev/port skip=$[${addr}] count=${count} bs=1 2>/dev/null| hexdump -C + dd if=/dev/port skip=`printf %d ${addr}` count=${count} bs=1 \ + 2>/dev/null| hexdump -C } and then use something like "dump joy200 0x200 8", "dump mpu388 0x388 4", "dump joy 0xb400 8", @@ -216,14 +218,14 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}"); possibly within a "while true; do ... sleep 1; done" loop. Tweaking ports could be done using VALSTRING="`printf "%02x" $value`" - printf "\x""$VALSTRING"|dd of=/dev/port seek=$[${addr}] bs=1 2>/dev/null + printf "\x""$VALSTRING"|dd of=/dev/port seek=`printf %d ${addr}` bs=1 \ + 2>/dev/null */ #define DEBUG_MISC 0 #define DEBUG_CALLS 0 #define DEBUG_MIXER 0 #define DEBUG_CODEC 0 -#define DEBUG_IO 0 #define DEBUG_TIMER 0 #define DEBUG_GAME 0 #define DEBUG_PM 0 @@ -291,19 +293,23 @@ static int seqtimer_scaling = 128; module_param(seqtimer_scaling, int, 0444); MODULE_PARM_DESC(seqtimer_scaling, "Set 1024000Hz sequencer timer scale factor (lockup danger!). Default 128."); -struct snd_azf3328_codec_data { - unsigned long io_base; - struct snd_pcm_substream *substream; - bool running; - const char *name; -}; - enum snd_azf3328_codec_type { + /* warning: fixed indices (also used for bitmask checks!) */ AZF_CODEC_PLAYBACK = 0, AZF_CODEC_CAPTURE = 1, AZF_CODEC_I2S_OUT = 2, }; +struct snd_azf3328_codec_data { + unsigned long io_base; /* keep first! (avoid offset calc) */ + unsigned int dma_base; /* helper to avoid an indirection in hotpath */ + spinlock_t *lock; /* TODO: convert to our own per-codec lock member */ + struct snd_pcm_substream *substream; + bool running; + enum snd_azf3328_codec_type type; + const char *name; +}; + struct snd_azf3328 { /* often-used fields towards beginning, then grouped */ @@ -362,6 +368,9 @@ MODULE_DEVICE_TABLE(pci, snd_azf3328_ids); static int snd_azf3328_io_reg_setb(unsigned reg, u8 mask, bool do_set) { + /* Well, strictly spoken, the inb/outb sequence isn't atomic + and would need locking. However we currently don't care + since it potentially complicates matters. */ u8 prev = inb(reg), new; new = (do_set) ? (prev|mask) : (prev & ~mask); @@ -413,6 +422,21 @@ snd_azf3328_codec_outl(const struct snd_azf3328_codec_data *codec, outl(value, codec->io_base + reg); } +static inline void +snd_azf3328_codec_outl_multi(const struct snd_azf3328_codec_data *codec, + unsigned reg, const void *buffer, int count +) +{ + unsigned long addr = codec->io_base + reg; + if (count) { + const u32 *buf = buffer; + do { + outl(*buf++, addr); + addr += 4; + } while (--count); + } +} + static inline u32 snd_azf3328_codec_inl(const struct snd_azf3328_codec_data *codec, unsigned reg) { @@ -943,38 +967,43 @@ snd_azf3328_hw_free(struct snd_pcm_substream *substream) } static void -snd_azf3328_codec_setfmt(struct snd_azf3328 *chip, - enum snd_azf3328_codec_type codec_type, +snd_azf3328_codec_setfmt(struct snd_azf3328_codec_data *codec, enum azf_freq_t bitrate, unsigned int format_width, unsigned int channels ) { unsigned long flags; - const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type]; u16 val = 0xff00; + u8 freq = 0; snd_azf3328_dbgcallenter(); switch (bitrate) { - case AZF_FREQ_4000: val |= SOUNDFORMAT_FREQ_SUSPECTED_4000; break; - case AZF_FREQ_4800: val |= SOUNDFORMAT_FREQ_SUSPECTED_4800; break; - case AZF_FREQ_5512: - /* the AZF3328 names it "5510" for some strange reason */ - val |= SOUNDFORMAT_FREQ_5510; break; - case AZF_FREQ_6620: val |= SOUNDFORMAT_FREQ_6620; break; - case AZF_FREQ_8000: val |= SOUNDFORMAT_FREQ_8000; break; - case AZF_FREQ_9600: val |= SOUNDFORMAT_FREQ_9600; break; - case AZF_FREQ_11025: val |= SOUNDFORMAT_FREQ_11025; break; - case AZF_FREQ_13240: val |= SOUNDFORMAT_FREQ_SUSPECTED_13240; break; - case AZF_FREQ_16000: val |= SOUNDFORMAT_FREQ_16000; break; - case AZF_FREQ_22050: val |= SOUNDFORMAT_FREQ_22050; break; - case AZF_FREQ_32000: val |= SOUNDFORMAT_FREQ_32000; break; +#define AZF_FMT_XLATE(in_freq, out_bits) \ + do { \ + case AZF_FREQ_ ## in_freq: \ + freq = SOUNDFORMAT_FREQ_ ## out_bits; \ + break; \ + } while (0); + AZF_FMT_XLATE(4000, SUSPECTED_4000) + AZF_FMT_XLATE(4800, SUSPECTED_4800) + /* the AZF3328 names it "5510" for some strange reason: */ + AZF_FMT_XLATE(5512, 5510) + AZF_FMT_XLATE(6620, 6620) + AZF_FMT_XLATE(8000, 8000) + AZF_FMT_XLATE(9600, 9600) + AZF_FMT_XLATE(11025, 11025) + AZF_FMT_XLATE(13240, SUSPECTED_13240) + AZF_FMT_XLATE(16000, 16000) + AZF_FMT_XLATE(22050, 22050) + AZF_FMT_XLATE(32000, 32000) default: snd_printk(KERN_WARNING "unknown bitrate %d, assuming 44.1kHz!\n", bitrate); /* fall-through */ - case AZF_FREQ_44100: val |= SOUNDFORMAT_FREQ_44100; break; - case AZF_FREQ_48000: val |= SOUNDFORMAT_FREQ_48000; break; - case AZF_FREQ_66200: val |= SOUNDFORMAT_FREQ_SUSPECTED_66200; break; + AZF_FMT_XLATE(44100, 44100) + AZF_FMT_XLATE(48000, 48000) + AZF_FMT_XLATE(66200, SUSPECTED_66200) +#undef AZF_FMT_XLATE } /* val = 0xff07; 3m27.993s (65301Hz; -> 64000Hz???) hmm, 66120, 65967, 66123 */ /* val = 0xff09; 17m15.098s (13123,478Hz; -> 12000Hz???) hmm, 13237.2Hz? */ @@ -986,13 +1015,15 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip, /* val = 0xff0d; 41m23.135s (5523,600Hz; -> 5512Hz???) */ /* val = 0xff0e; 28m30.777s (8017Hz; -> 8000Hz???) */ + val |= freq; + if (channels == 2) val |= SOUNDFORMAT_FLAG_2CHANNELS; if (format_width == 16) val |= SOUNDFORMAT_FLAG_16BIT; - spin_lock_irqsave(&chip->reg_lock, flags); + spin_lock_irqsave(codec->lock, flags); /* set bitrate/format */ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_SOUNDFORMAT, val); @@ -1004,7 +1035,8 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip, * (FIXME: yes, it works, but what exactly am I doing here?? :) * FIXME: does this have some side effects for full-duplex * or other dramatic side effects? */ - if (codec_type == AZF_CODEC_PLAYBACK) /* only do it for playback */ + /* do it for non-capture codecs only */ + if (codec->type != AZF_CODEC_CAPTURE) snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS) | DMA_RUN_SOMETHING1 | @@ -1014,20 +1046,19 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip, DMA_SOMETHING_ELSE ); - spin_unlock_irqrestore(&chip->reg_lock, flags); + spin_unlock_irqrestore(codec->lock, flags); snd_azf3328_dbgcallleave(); } static inline void -snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328 *chip, - enum snd_azf3328_codec_type codec_type +snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328_codec_data *codec ) { /* choose lowest frequency for low power consumption. * While this will cause louder noise due to rather coarse frequency, * it should never matter since output should always * get disabled properly when idle anyway. */ - snd_azf3328_codec_setfmt(chip, codec_type, AZF_FREQ_4000, 8, 1); + snd_azf3328_codec_setfmt(codec, AZF_FREQ_4000, 8, 1); } static void @@ -1101,69 +1132,87 @@ snd_azf3328_ctrl_codec_activity(struct snd_azf3328 *chip, /* ...and adjust clock, too * (reduce noise and power consumption) */ if (!enable) - snd_azf3328_codec_setfmt_lowpower( - chip, - codec_type - ); + snd_azf3328_codec_setfmt_lowpower(codec); codec->running = enable; } } static void -snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip, - enum snd_azf3328_codec_type codec_type, +snd_azf3328_codec_setdmaa(struct snd_azf3328_codec_data *codec, unsigned long addr, - unsigned int count, - unsigned int size + unsigned int period_bytes, + unsigned int buffer_bytes ) { - const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type]; snd_azf3328_dbgcallenter(); + WARN_ONCE(period_bytes & 1, "odd period length!?\n"); + WARN_ONCE(buffer_bytes != 2 * period_bytes, + "missed our input expectations! %u vs. %u\n", + buffer_bytes, period_bytes); if (!codec->running) { /* AZF3328 uses a two buffer pointer DMA transfer approach */ - unsigned long flags, addr_area2; + unsigned long flags; /* width 32bit (prevent overflow): */ - u32 count_areas, lengths; + u32 area_length; + struct codec_setup_io { + u32 dma_start_1; + u32 dma_start_2; + u32 dma_lengths; + } __attribute__((packed)) setup_io; + + area_length = buffer_bytes/2; + + setup_io.dma_start_1 = addr; + setup_io.dma_start_2 = addr+area_length; - count_areas = size/2; - addr_area2 = addr+count_areas; - snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n", - addr, count_areas, addr_area2, count_areas); + snd_azf3328_dbgcodec( + "setdma: buffers %08x[%u] / %08x[%u], %u, %u\n", + setup_io.dma_start_1, area_length, + setup_io.dma_start_2, area_length, + period_bytes, buffer_bytes); - count_areas--; /* max. index */ + /* Hmm, are we really supposed to decrement this by 1?? + Most definitely certainly not: configuring full length does + work properly (i.e. likely better), and BTW we + violated possibly differing frame sizes with this... + + area_length--; |* max. index *| + */ /* build combined I/O buffer length word */ - lengths = (count_areas << 16) | (count_areas); - spin_lock_irqsave(&chip->reg_lock, flags); - snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_1, addr); - snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_2, - addr_area2); - snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_LENGTHS, - lengths); - spin_unlock_irqrestore(&chip->reg_lock, flags); + setup_io.dma_lengths = (area_length << 16) | (area_length); + + spin_lock_irqsave(codec->lock, flags); + snd_azf3328_codec_outl_multi( + codec, IDX_IO_CODEC_DMA_START_1, &setup_io, 3 + ); + spin_unlock_irqrestore(codec->lock, flags); } snd_azf3328_dbgcallleave(); } static int -snd_azf3328_codec_prepare(struct snd_pcm_substream *substream) +snd_azf3328_pcm_prepare(struct snd_pcm_substream *substream) { -#if 0 - struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_azf3328_codec_data *codec = runtime->private_data; +#if 0 unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); #endif snd_azf3328_dbgcallenter(); + + codec->dma_base = runtime->dma_addr; + #if 0 - snd_azf3328_codec_setfmt(chip, AZF_CODEC_..., + snd_azf3328_codec_setfmt(codec, runtime->rate, snd_pcm_format_width(runtime->format), runtime->channels); - snd_azf3328_codec_setdmaa(chip, AZF_CODEC_..., + snd_azf3328_codec_setdmaa(codec, runtime->dma_addr, count, size); #endif snd_azf3328_dbgcallleave(); @@ -1171,24 +1220,23 @@ snd_azf3328_codec_prepare(struct snd_pcm_substream *substream) } static int -snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, - struct snd_pcm_substream *substream, int cmd) +snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); - const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type]; struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_azf3328_codec_data *codec = runtime->private_data; int result = 0; u16 flags1; bool previously_muted = 0; - bool is_playback_codec = (AZF_CODEC_PLAYBACK == codec_type); + bool is_main_mixer_playback_codec = (AZF_CODEC_PLAYBACK == codec->type); - snd_azf3328_dbgcalls("snd_azf3328_codec_trigger cmd %d\n", cmd); + snd_azf3328_dbgcalls("snd_azf3328_pcm_trigger cmd %d\n", cmd); switch (cmd) { case SNDRV_PCM_TRIGGER_START: snd_azf3328_dbgcodec("START %s\n", codec->name); - if (is_playback_codec) { + if (is_main_mixer_playback_codec) { /* mute WaveOut (avoid clicking during setup) */ previously_muted = snd_azf3328_mixer_set_mute( @@ -1196,12 +1244,12 @@ snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, ); } - snd_azf3328_codec_setfmt(chip, codec_type, + snd_azf3328_codec_setfmt(codec, runtime->rate, snd_pcm_format_width(runtime->format), runtime->channels); - spin_lock(&chip->reg_lock); + spin_lock(codec->lock); /* first, remember current value: */ flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS); @@ -1211,14 +1259,14 @@ snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, /* FIXME: clear interrupts or what??? */ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_IRQTYPE, 0xffff); - spin_unlock(&chip->reg_lock); + spin_unlock(codec->lock); - snd_azf3328_codec_setdmaa(chip, codec_type, runtime->dma_addr, + snd_azf3328_codec_setdmaa(codec, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream) ); - spin_lock(&chip->reg_lock); + spin_lock(codec->lock); #ifdef WIN9X /* FIXME: enable playback/recording??? */ flags1 |= DMA_RUN_SOMETHING1 | DMA_RUN_SOMETHING2; @@ -1242,10 +1290,10 @@ snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, DMA_EPILOGUE_SOMETHING | DMA_SOMETHING_ELSE); #endif - spin_unlock(&chip->reg_lock); - snd_azf3328_ctrl_codec_activity(chip, codec_type, 1); + spin_unlock(codec->lock); + snd_azf3328_ctrl_codec_activity(chip, codec->type, 1); - if (is_playback_codec) { + if (is_main_mixer_playback_codec) { /* now unmute WaveOut */ if (!previously_muted) snd_azf3328_mixer_set_mute( @@ -1258,19 +1306,19 @@ snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, case SNDRV_PCM_TRIGGER_RESUME: snd_azf3328_dbgcodec("RESUME %s\n", codec->name); /* resume codec if we were active */ - spin_lock(&chip->reg_lock); + spin_lock(codec->lock); if (codec->running) snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, snd_azf3328_codec_inw( codec, IDX_IO_CODEC_DMA_FLAGS ) | DMA_RESUME ); - spin_unlock(&chip->reg_lock); + spin_unlock(codec->lock); break; case SNDRV_PCM_TRIGGER_STOP: snd_azf3328_dbgcodec("STOP %s\n", codec->name); - if (is_playback_codec) { + if (is_main_mixer_playback_codec) { /* mute WaveOut (avoid clicking during setup) */ previously_muted = snd_azf3328_mixer_set_mute( @@ -1278,7 +1326,7 @@ snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, ); } - spin_lock(&chip->reg_lock); + spin_lock(codec->lock); /* first, remember current value: */ flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS); @@ -1293,10 +1341,10 @@ snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, flags1 &= ~DMA_RUN_SOMETHING1; snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1); - spin_unlock(&chip->reg_lock); - snd_azf3328_ctrl_codec_activity(chip, codec_type, 0); + spin_unlock(codec->lock); + snd_azf3328_ctrl_codec_activity(chip, codec->type, 0); - if (is_playback_codec) { + if (is_main_mixer_playback_codec) { /* now unmute WaveOut */ if (!previously_muted) snd_azf3328_mixer_set_mute( @@ -1330,67 +1378,29 @@ snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type, return result; } -static int -snd_azf3328_codec_playback_trigger(struct snd_pcm_substream *substream, int cmd) -{ - return snd_azf3328_codec_trigger(AZF_CODEC_PLAYBACK, substream, cmd); -} - -static int -snd_azf3328_codec_capture_trigger(struct snd_pcm_substream *substream, int cmd) -{ - return snd_azf3328_codec_trigger(AZF_CODEC_CAPTURE, substream, cmd); -} - -static int -snd_azf3328_codec_i2s_out_trigger(struct snd_pcm_substream *substream, int cmd) -{ - return snd_azf3328_codec_trigger(AZF_CODEC_I2S_OUT, substream, cmd); -} - static snd_pcm_uframes_t -snd_azf3328_codec_pointer(struct snd_pcm_substream *substream, - enum snd_azf3328_codec_type codec_type +snd_azf3328_pcm_pointer(struct snd_pcm_substream *substream ) { - const struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); - const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type]; - unsigned long bufptr, result; + const struct snd_azf3328_codec_data *codec = + substream->runtime->private_data; + unsigned long result; snd_pcm_uframes_t frmres; -#ifdef QUERY_HARDWARE - bufptr = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1); -#else - bufptr = substream->runtime->dma_addr; -#endif result = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_CURRPOS); /* calculate offset */ - result -= bufptr; +#ifdef QUERY_HARDWARE + result -= snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1); +#else + result -= codec->dma_base; +#endif frmres = bytes_to_frames( substream->runtime, result); - snd_azf3328_dbgcodec("%s @ 0x%8lx, frames %8ld\n", - codec->name, result, frmres); + snd_azf3328_dbgcodec("%08li %s @ 0x%8lx, frames %8ld\n", + jiffies, codec->name, result, frmres); return frmres; } -static snd_pcm_uframes_t -snd_azf3328_codec_playback_pointer(struct snd_pcm_substream *substream) -{ - return snd_azf3328_codec_pointer(substream, AZF_CODEC_PLAYBACK); -} - -static snd_pcm_uframes_t -snd_azf3328_codec_capture_pointer(struct snd_pcm_substream *substream) -{ - return snd_azf3328_codec_pointer(substream, AZF_CODEC_CAPTURE); -} - -static snd_pcm_uframes_t -snd_azf3328_codec_i2s_out_pointer(struct snd_pcm_substream *substream) -{ - return snd_azf3328_codec_pointer(substream, AZF_CODEC_I2S_OUT); -} - /******************************************************************/ #ifdef SUPPORT_GAMEPORT @@ -1532,7 +1542,7 @@ snd_azf3328_gameport_cooked_read(struct gameport *gameport, } } - /* trigger next axes sampling, to be evaluated the next time we + /* trigger next sampling of axes, to be evaluated the next time we * enter this function */ /* for some very, very strange reason we cannot enable @@ -1624,29 +1634,29 @@ snd_azf3328_irq_log_unknown_type(u8 which) } static inline void -snd_azf3328_codec_interrupt(struct snd_azf3328 *chip, u8 status) +snd_azf3328_pcm_interrupt(const struct snd_azf3328_codec_data *first_codec, + u8 status +) { u8 which; enum snd_azf3328_codec_type codec_type; - const struct snd_azf3328_codec_data *codec; + const struct snd_azf3328_codec_data *codec = first_codec; for (codec_type = AZF_CODEC_PLAYBACK; codec_type <= AZF_CODEC_I2S_OUT; - ++codec_type) { + ++codec_type, ++codec) { /* skip codec if there's no interrupt for it */ if (!(status & (1 << codec_type))) continue; - codec = &chip->codecs[codec_type]; - - spin_lock(&chip->reg_lock); + spin_lock(codec->lock); which = snd_azf3328_codec_inb(codec, IDX_IO_CODEC_IRQTYPE); /* ack all IRQ types immediately */ snd_azf3328_codec_outb(codec, IDX_IO_CODEC_IRQTYPE, which); - spin_unlock(&chip->reg_lock); + spin_unlock(codec->lock); - if ((chip->pcm[codec_type]) && (codec->substream)) { + if (codec->substream) { snd_pcm_period_elapsed(codec->substream); snd_azf3328_dbgcodec("%s period done (#%x), @ %x\n", codec->name, @@ -1701,7 +1711,7 @@ snd_azf3328_interrupt(int irq, void *dev_id) } if (status & (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT)) - snd_azf3328_codec_interrupt(chip, status); + snd_azf3328_pcm_interrupt(chip->codecs, status); if (status & IRQ_GAMEPORT) snd_azf3328_gameport_interrupt(chip); @@ -1789,101 +1799,85 @@ snd_azf3328_pcm_open(struct snd_pcm_substream *substream, { struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type]; snd_azf3328_dbgcallenter(); - chip->codecs[codec_type].substream = substream; + codec->substream = substream; /* same parameters for all our codecs - at least we think so... */ runtime->hw = snd_azf3328_hardware; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_azf3328_hw_constraints_rates); + runtime->private_data = codec; snd_azf3328_dbgcallleave(); return 0; } static int -snd_azf3328_playback_open(struct snd_pcm_substream *substream) +snd_azf3328_pcm_playback_open(struct snd_pcm_substream *substream) { return snd_azf3328_pcm_open(substream, AZF_CODEC_PLAYBACK); } static int -snd_azf3328_capture_open(struct snd_pcm_substream *substream) +snd_azf3328_pcm_capture_open(struct snd_pcm_substream *substream) { return snd_azf3328_pcm_open(substream, AZF_CODEC_CAPTURE); } static int -snd_azf3328_i2s_out_open(struct snd_pcm_substream *substream) +snd_azf3328_pcm_i2s_out_open(struct snd_pcm_substream *substream) { return snd_azf3328_pcm_open(substream, AZF_CODEC_I2S_OUT); } static int -snd_azf3328_pcm_close(struct snd_pcm_substream *substream, - enum snd_azf3328_codec_type codec_type +snd_azf3328_pcm_close(struct snd_pcm_substream *substream ) { - struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); + struct snd_azf3328_codec_data *codec = + substream->runtime->private_data; snd_azf3328_dbgcallenter(); - chip->codecs[codec_type].substream = NULL; + codec->substream = NULL; snd_azf3328_dbgcallleave(); return 0; } -static int -snd_azf3328_playback_close(struct snd_pcm_substream *substream) -{ - return snd_azf3328_pcm_close(substream, AZF_CODEC_PLAYBACK); -} - -static int -snd_azf3328_capture_close(struct snd_pcm_substream *substream) -{ - return snd_azf3328_pcm_close(substream, AZF_CODEC_CAPTURE); -} - -static int -snd_azf3328_i2s_out_close(struct snd_pcm_substream *substream) -{ - return snd_azf3328_pcm_close(substream, AZF_CODEC_I2S_OUT); -} - /******************************************************************/ static struct snd_pcm_ops snd_azf3328_playback_ops = { - .open = snd_azf3328_playback_open, - .close = snd_azf3328_playback_close, + .open = snd_azf3328_pcm_playback_open, + .close = snd_azf3328_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_azf3328_hw_params, .hw_free = snd_azf3328_hw_free, - .prepare = snd_azf3328_codec_prepare, - .trigger = snd_azf3328_codec_playback_trigger, - .pointer = snd_azf3328_codec_playback_pointer + .prepare = snd_azf3328_pcm_prepare, + .trigger = snd_azf3328_pcm_trigger, + .pointer = snd_azf3328_pcm_pointer }; static struct snd_pcm_ops snd_azf3328_capture_ops = { - .open = snd_azf3328_capture_open, - .close = snd_azf3328_capture_close, + .open = snd_azf3328_pcm_capture_open, + .close = snd_azf3328_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_azf3328_hw_params, .hw_free = snd_azf3328_hw_free, - .prepare = snd_azf3328_codec_prepare, - .trigger = snd_azf3328_codec_capture_trigger, - .pointer = snd_azf3328_codec_capture_pointer + .prepare = snd_azf3328_pcm_prepare, + .trigger = snd_azf3328_pcm_trigger, + .pointer = snd_azf3328_pcm_pointer }; static struct snd_pcm_ops snd_azf3328_i2s_out_ops = { - .open = snd_azf3328_i2s_out_open, - .close = snd_azf3328_i2s_out_close, + .open = snd_azf3328_pcm_i2s_out_open, + .close = snd_azf3328_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_azf3328_hw_params, .hw_free = snd_azf3328_hw_free, - .prepare = snd_azf3328_codec_prepare, - .trigger = snd_azf3328_codec_i2s_out_trigger, - .pointer = snd_azf3328_codec_i2s_out_pointer + .prepare = snd_azf3328_pcm_prepare, + .trigger = snd_azf3328_pcm_trigger, + .pointer = snd_azf3328_pcm_pointer }; static int __devinit @@ -1966,7 +1960,7 @@ snd_azf3328_timer_start(struct snd_timer *timer) snd_azf3328_dbgtimer("delay was too low (%d)!\n", delay); delay = 49; /* minimum time is 49 ticks */ } - snd_azf3328_dbgtimer("setting timer countdown value %d, add COUNTDOWN|IRQ\n", delay); + snd_azf3328_dbgtimer("setting timer countdown value %d\n", delay); delay |= TIMER_COUNTDOWN_ENABLE | TIMER_IRQ_ENABLE; spin_lock_irqsave(&chip->reg_lock, flags); snd_azf3328_ctrl_outl(chip, IDX_IO_TIMER_VALUE, delay); @@ -2180,6 +2174,7 @@ snd_azf3328_create(struct snd_card *card, }; u8 dma_init; enum snd_azf3328_codec_type codec_type; + struct snd_azf3328_codec_data *codec_setup; *rchip = NULL; @@ -2217,15 +2212,23 @@ snd_azf3328_create(struct snd_card *card, chip->opl3_io = pci_resource_start(pci, 3); chip->mixer_io = pci_resource_start(pci, 4); - chip->codecs[AZF_CODEC_PLAYBACK].io_base = - chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK; - chip->codecs[AZF_CODEC_PLAYBACK].name = "PLAYBACK"; - chip->codecs[AZF_CODEC_CAPTURE].io_base = - chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE; - chip->codecs[AZF_CODEC_CAPTURE].name = "CAPTURE"; - chip->codecs[AZF_CODEC_I2S_OUT].io_base = - chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT; - chip->codecs[AZF_CODEC_I2S_OUT].name = "I2S_OUT"; + codec_setup = &chip->codecs[AZF_CODEC_PLAYBACK]; + codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK; + codec_setup->lock = &chip->reg_lock; + codec_setup->type = AZF_CODEC_PLAYBACK; + codec_setup->name = "PLAYBACK"; + + codec_setup = &chip->codecs[AZF_CODEC_CAPTURE]; + codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE; + codec_setup->lock = &chip->reg_lock; + codec_setup->type = AZF_CODEC_CAPTURE; + codec_setup->name = "CAPTURE"; + + codec_setup = &chip->codecs[AZF_CODEC_I2S_OUT]; + codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT; + codec_setup->lock = &chip->reg_lock; + codec_setup->type = AZF_CODEC_I2S_OUT; + codec_setup->name = "I2S_OUT"; if (request_irq(pci->irq, snd_azf3328_interrupt, IRQF_SHARED, card->shortname, chip)) { @@ -2257,15 +2260,15 @@ snd_azf3328_create(struct snd_card *card, struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type]; - /* shutdown codecs to save power */ + /* shutdown codecs to reduce power / noise */ /* have ...ctrl_codec_activity() act properly */ codec->running = 1; snd_azf3328_ctrl_codec_activity(chip, codec_type, 0); - spin_lock_irq(&chip->reg_lock); + spin_lock_irq(codec->lock); snd_azf3328_codec_outb(codec, IDX_IO_CODEC_DMA_FLAGS, dma_init); - spin_unlock_irq(&chip->reg_lock); + spin_unlock_irq(codec->lock); } snd_card_set_dev(card, &pci->dev); @@ -2419,6 +2422,7 @@ snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state) snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); + /* same pcm object for playback/capture */ snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]); snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]); diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c index 37e1b5df5ab8..2958a05b5293 100644 --- a/sound/pci/bt87x.c +++ b/sound/pci/bt87x.c @@ -637,15 +637,9 @@ static struct snd_kcontrol_new snd_bt87x_capture_boost = { static int snd_bt87x_capture_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { - static char *texts[3] = {"TV Tuner", "FM", "Mic/Line"}; + static const char *const texts[3] = {"TV Tuner", "FM", "Mic/Line"}; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 3; - if (info->value.enumerated.item > 2) - info->value.enumerated.item = 2; - strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 3, texts); } static int snd_bt87x_capture_source_get(struct snd_kcontrol *kcontrol, diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h index f19c11077255..fc53b9bca26d 100644 --- a/sound/pci/ca0106/ca0106.h +++ b/sound/pci/ca0106/ca0106.h @@ -188,7 +188,7 @@ #define PLAYBACK_LIST_PTR 0x02 /* Pointer to the current period being played */ /* PTR[5:0], Default: 0x0 */ #define PLAYBACK_UNKNOWN3 0x03 /* Not used ?? */ -#define PLAYBACK_DMA_ADDR 0x04 /* Playback DMA addresss */ +#define PLAYBACK_DMA_ADDR 0x04 /* Playback DMA address */ /* DMA[31:0], Default: 0x0 */ #define PLAYBACK_PERIOD_SIZE 0x05 /* Playback period size. win2000 uses 0x04000000 */ /* SIZE[31:16], Default: 0x0 */ diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index d2d12c08f937..01b49388fafd 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -1082,7 +1082,7 @@ snd_ca0106_pcm_pointer_capture(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ca0106_pcm *epcm = runtime->private_data; snd_pcm_uframes_t ptr, ptr1, ptr2 = 0; - int channel = channel=epcm->channel_id; + int channel = epcm->channel_id; if (!epcm->running) return 0; diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index 329968edca9b..b5bb036ef73c 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c @@ -2507,14 +2507,12 @@ static int snd_cmipci_line_in_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); - static char *texts[3] = { "Line-In", "Rear Output", "Bass Output" }; - uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - uinfo->count = 1; - uinfo->value.enumerated.items = cm->chip_version >= 39 ? 3 : 2; - if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) - uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; - strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); - return 0; + static const char *const texts[3] = { + "Line-In", "Rear Output", "Bass Output" + }; + + return snd_ctl_enum_info(uinfo, 1, + cm->chip_version >= 39 ? 3 : 2, texts); } static inline unsigned int get_line_in_mode(struct cmipci *cm) @@ -2564,14 +2562,9 @@ static int snd_cmipci_line_in_mode_put(struct snd_kcontrol *kcontrol, static int snd_cmipci_mic_in_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { - static char *texts[2] = { "Mic-In", "Center/LFE Output" }; - uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - uinfo->count = 1; - uinfo->value.enumerated.items = 2; - if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) - uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; - strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); - return 0; + static const char *const texts[2] = { "Mic-In", "Center/LFE Output" }; + + return snd_ctl_enum_info(uinfo, 1, 2, texts); } static int snd_cmipci_mic_in_mode_get(struct snd_kcontrol *kcontrol, diff --git a/sound/pci/cs5535audio/cs5535audio_pm.c b/sound/pci/cs5535audio/cs5535audio_pm.c index a3301cc4ab82..185b00088320 100644 --- a/sound/pci/cs5535audio/cs5535audio_pm.c +++ b/sound/pci/cs5535audio/cs5535audio_pm.c @@ -90,12 +90,7 @@ int snd_cs5535audio_resume(struct pci_dev *pci) int i; pci_set_power_state(pci, PCI_D0); - if (pci_restore_state(pci) < 0) { - printk(KERN_ERR "cs5535audio: pci_restore_state failed, " - "disabling device\n"); - snd_card_disconnect(card); - return -EIO; - } + pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "cs5535audio: pci_enable_device failed, " "disabling device\n"); diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c index df47f738098d..0c701e4ec8a5 100644 --- a/sound/pci/emu10k1/emu10k1x.c +++ b/sound/pci/emu10k1/emu10k1x.c @@ -114,7 +114,7 @@ MODULE_PARM_DESC(enable, "Enable the EMU10K1X soundcard."); */ #define PLAYBACK_LIST_SIZE 0x01 /* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000 */ #define PLAYBACK_LIST_PTR 0x02 /* Pointer to the current period being played */ -#define PLAYBACK_DMA_ADDR 0x04 /* Playback DMA addresss */ +#define PLAYBACK_DMA_ADDR 0x04 /* Playback DMA address */ #define PLAYBACK_PERIOD_SIZE 0x05 /* Playback period size */ #define PLAYBACK_POINTER 0x06 /* Playback period pointer. Sample currently in DAC */ #define PLAYBACK_UNKNOWN1 0x07 diff --git a/sound/pci/emu10k1/p16v.h b/sound/pci/emu10k1/p16v.h index 153214940336..00f4817533b1 100644 --- a/sound/pci/emu10k1/p16v.h +++ b/sound/pci/emu10k1/p16v.h @@ -96,7 +96,7 @@ #define PLAYBACK_LIST_SIZE 0x01 /* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000 */ #define PLAYBACK_LIST_PTR 0x02 /* Pointer to the current period being played */ #define PLAYBACK_UNKNOWN3 0x03 /* Not used */ -#define PLAYBACK_DMA_ADDR 0x04 /* Playback DMA addresss */ +#define PLAYBACK_DMA_ADDR 0x04 /* Playback DMA address */ #define PLAYBACK_PERIOD_SIZE 0x05 /* Playback period size. win2000 uses 0x04000000 */ #define PLAYBACK_POINTER 0x06 /* Playback period pointer. Used with PLAYBACK_LIST_PTR to determine buffer position currently in DAC */ #define PLAYBACK_FIFO_END_ADDRESS 0x07 /* Playback FIFO end address */ diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c index 23a58f0d6cb9..7c17f45d876d 100644 --- a/sound/pci/es1968.c +++ b/sound/pci/es1968.c @@ -220,7 +220,7 @@ MODULE_PARM_DESC(joystick, "Enable joystick."); #define RINGB_EN_2CODEC 0x0020 #define RINGB_SING_BIT_DUAL 0x0040 -/* ****Port Adresses**** */ +/* ****Port Addresses**** */ /* Write & Read */ #define ESM_INDEX 0x02 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 98b6d02a36c9..05e5ec88c2d9 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -4571,6 +4571,9 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, } memset(cfg->hp_pins + cfg->hp_outs, 0, sizeof(hda_nid_t) * (AUTO_CFG_MAX_OUTS - cfg->hp_outs)); + if (!cfg->hp_outs) + cfg->line_out_type = AUTO_PIN_HP_OUT; + } /* sort by sequence */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index a1c4008af891..d3d18be483e1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1235,7 +1235,8 @@ static int azx_setup_periods(struct azx *chip, pos_adj = 0; } else { ofs = setup_bdle(substream, azx_dev, - &bdl, ofs, pos_adj, 1); + &bdl, ofs, pos_adj, + !substream->runtime->no_period_wakeup); if (ofs < 0) goto error; } @@ -1247,7 +1248,8 @@ static int azx_setup_periods(struct azx *chip, period_bytes - pos_adj, 0); else ofs = setup_bdle(substream, azx_dev, &bdl, ofs, - period_bytes, 1); + period_bytes, + !substream->runtime->no_period_wakeup); if (ofs < 0) goto error; } @@ -1515,7 +1517,8 @@ static struct snd_pcm_hardware azx_pcm_hw = { /* No full-resume yet implemented */ /* SNDRV_PCM_INFO_RESUME |*/ SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_SYNC_START), + SNDRV_PCM_INFO_SYNC_START | + SNDRV_PCM_INFO_NO_PERIOD_WAKEUP), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index f7ff3f7ccb8e..46780670162b 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -666,7 +666,7 @@ static struct snd_kcontrol_new ad1986a_mixers[] = { HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT), @@ -729,7 +729,7 @@ static struct snd_kcontrol_new ad1986a_laptop_mixers[] = { HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT), /* HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT), */ @@ -775,7 +775,7 @@ static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = { HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT), { @@ -1358,7 +1358,7 @@ static struct snd_kcontrol_new ad1983_mixers[] = { HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x0c, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT), { @@ -1515,8 +1515,8 @@ static struct snd_kcontrol_new ad1981_mixers[] = { HDA_CODEC_MUTE("Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x08, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x08, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT), { @@ -1726,8 +1726,8 @@ static struct snd_kcontrol_new ad1981_hp_mixers[] = { HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT), #endif - HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x18, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x18, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT), { @@ -1774,7 +1774,7 @@ static struct snd_kcontrol_new ad1981_thinkpad_mixers[] = { HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT), { @@ -2160,8 +2160,8 @@ static struct snd_kcontrol_new ad1988_6stack_mixers2[] = { HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT), { } /* end */ }; @@ -2203,8 +2203,8 @@ static struct snd_kcontrol_new ad1988_3stack_mixers2[] = { HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Channel Mode", @@ -2232,7 +2232,7 @@ static struct snd_kcontrol_new ad1988_laptop_mixers[] = { HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x39, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, @@ -2902,7 +2902,7 @@ static int new_analog_input(struct ad198x_spec *spec, hda_nid_t pin, idx = ad1988_pin_idx(pin); bnid = ad1988_boost_nids[idx]; if (bnid) { - sprintf(name, "%s Boost", ctlname); + sprintf(name, "%s Boost Volume", ctlname); return add_control(spec, AD_CTL_WIDGET_VOL, name, HDA_COMPOSE_AMP_VAL(bnid, 3, idx, HDA_OUTPUT)); @@ -3300,8 +3300,8 @@ static struct snd_kcontrol_new ad1884_base_mixers[] = { HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x15, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT), @@ -3499,9 +3499,9 @@ static struct snd_kcontrol_new ad1984_thinkpad_mixers[] = { HDA_CODEC_MUTE("Beep Playback Switch", 0x20, 0x03, HDA_INPUT), HDA_CODEC_VOLUME("Docking Mic Playback Volume", 0x20, 0x04, HDA_INPUT), HDA_CODEC_MUTE("Docking Mic Playback Switch", 0x20, 0x04, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Docking Mic Boost", 0x25, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT), @@ -3560,8 +3560,8 @@ static struct snd_kcontrol_new ad1984_dell_desktop_mixers[] = { HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT), HDA_CODEC_VOLUME("Line-In Playback Volume", 0x20, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Line-In Playback Switch", 0x20, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Line-In Boost", 0x15, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Line-In Boost Volume", 0x15, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT), @@ -3745,9 +3745,9 @@ static struct snd_kcontrol_new ad1884a_base_mixers[] = { HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Line Boost", 0x15, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT), @@ -3888,9 +3888,9 @@ static struct snd_kcontrol_new ad1884a_laptop_mixers[] = { HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT), HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT), HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Dock Mic Boost", 0x25, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), { } /* end */ @@ -4126,8 +4126,8 @@ static struct snd_kcontrol_new ad1984a_thinkpad_mixers[] = { HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), { @@ -4255,8 +4255,8 @@ static struct snd_kcontrol_new ad1984a_touchsmart_mixers[] = { HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT), { } /* end */ }; @@ -4494,9 +4494,9 @@ static struct snd_kcontrol_new ad1882_base_mixers[] = { HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Line-In Boost", 0x3a, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Line-In Boost Volume", 0x3a, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT), @@ -4547,7 +4547,7 @@ static struct snd_kcontrol_new ad1882a_loopback_mixers[] = { HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT), - HDA_CODEC_VOLUME("Digital Mic Boost", 0x1f, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Digital Mic Boost Volume", 0x1f, 0x0, HDA_INPUT), { } /* end */ }; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 76bd58a0e2b6..e96581fcdbdb 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -869,16 +869,16 @@ static void cxt5045_hp_unsol_event(struct hda_codec *codec, } static struct snd_kcontrol_new cxt5045_mixers[] = { - HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x01, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x02, HDA_INPUT), - HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x02, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x01, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x01, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x02, HDA_INPUT), + HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT), HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x2, HDA_INPUT), - HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x2, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x2, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x2, HDA_INPUT), HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, @@ -910,16 +910,16 @@ static struct snd_kcontrol_new cxt5045_benq_mixers[] = { }; static struct snd_kcontrol_new cxt5045_mixers_hp530[] = { - HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x02, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x02, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x01, HDA_INPUT), - HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x01, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x02, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x02, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x01, HDA_INPUT), + HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x01, HDA_INPUT), HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT), HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x2, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x2, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x2, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x1, HDA_INPUT), HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, @@ -947,7 +947,7 @@ static struct hda_verb cxt5045_init_verbs[] = { {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, - /* Record selector: Int mic */ + /* Record selector: Internal mic */ {0x1a, AC_VERB_SET_CONNECT_SEL,0x1}, {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17}, @@ -960,7 +960,7 @@ static struct hda_verb cxt5045_init_verbs[] = { }; static struct hda_verb cxt5045_benq_init_verbs[] = { - /* Int Mic, Mic */ + /* Internal Mic, Mic */ {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 }, {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 }, /* Line In,HP, Amp */ @@ -973,7 +973,7 @@ static struct hda_verb cxt5045_benq_init_verbs[] = { {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, - /* Record selector: Int mic */ + /* Record selector: Internal mic */ {0x1a, AC_VERB_SET_CONNECT_SEL, 0x1}, {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17}, @@ -1376,7 +1376,7 @@ static void cxt5047_hp_unsol_event(struct hda_codec *codec, static struct snd_kcontrol_new cxt5047_base_mixers[] = { HDA_CODEC_VOLUME("Mic Playback Volume", 0x19, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x19, 0x02, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x1a, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x1a, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x03, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x12, 0x03, HDA_INPUT), HDA_CODEC_VOLUME("PCM Volume", 0x10, 0x00, HDA_OUTPUT), @@ -1796,8 +1796,8 @@ static struct snd_kcontrol_new cxt5051_playback_mixers[] = { static struct snd_kcontrol_new cxt5051_capture_mixers[] = { HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT), HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT), - HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT), + HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT), HDA_CODEC_VOLUME("Docking Mic Volume", 0x15, 0x00, HDA_INPUT), HDA_CODEC_MUTE("Docking Mic Switch", 0x15, 0x00, HDA_INPUT), {} @@ -1806,8 +1806,8 @@ static struct snd_kcontrol_new cxt5051_capture_mixers[] = { static struct snd_kcontrol_new cxt5051_hp_mixers[] = { HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT), HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("External Mic Volume", 0x15, 0x00, HDA_INPUT), - HDA_CODEC_MUTE("External Mic Switch", 0x15, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Volume", 0x15, 0x00, HDA_INPUT), + HDA_CODEC_MUTE("Mic Switch", 0x15, 0x00, HDA_INPUT), {} }; @@ -1826,8 +1826,8 @@ static struct snd_kcontrol_new cxt5051_f700_mixers[] = { static struct snd_kcontrol_new cxt5051_toshiba_mixers[] = { HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT), HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT), - HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT), + HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT), {} }; @@ -1847,7 +1847,7 @@ static struct hda_verb cxt5051_init_verbs[] = { {0x16, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, - /* Record selector: Int mic */ + /* Record selector: Internal mic */ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44}, {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44}, {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44}, @@ -1874,7 +1874,7 @@ static struct hda_verb cxt5051_hp_dv6736_init_verbs[] = { {0x16, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, - /* Record selector: Int mic */ + /* Record selector: Internal mic */ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44}, {0x14, AC_VERB_SET_CONNECT_SEL, 0x1}, /* SPDIF route: PCM */ @@ -1904,7 +1904,7 @@ static struct hda_verb cxt5051_lenovo_x200_init_verbs[] = { {0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, - /* Record selector: Int mic */ + /* Record selector: Internal mic */ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44}, {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44}, {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44}, @@ -1932,7 +1932,7 @@ static struct hda_verb cxt5051_f700_init_verbs[] = { {0x16, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, - /* Record selector: Int mic */ + /* Record selector: Internal mic */ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44}, {0x14, AC_VERB_SET_CONNECT_SEL, 0x1}, /* SPDIF route: PCM */ @@ -2111,6 +2111,11 @@ static struct hda_channel_mode cxt5066_modes[1] = { { 2, NULL }, }; +#define HP_PRESENT_PORT_A (1 << 0) +#define HP_PRESENT_PORT_D (1 << 1) +#define hp_port_a_present(spec) ((spec)->hp_present & HP_PRESENT_PORT_A) +#define hp_port_d_present(spec) ((spec)->hp_present & HP_PRESENT_PORT_D) + static void cxt5066_update_speaker(struct hda_codec *codec) { struct conexant_spec *spec = codec->spec; @@ -2120,24 +2125,20 @@ static void cxt5066_update_speaker(struct hda_codec *codec) spec->hp_present, spec->cur_eapd); /* Port A (HP) */ - pinctl = ((spec->hp_present & 1) && spec->cur_eapd) ? PIN_HP : 0; + pinctl = (hp_port_a_present(spec) && spec->cur_eapd) ? PIN_HP : 0; snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); /* Port D (HP/LO) */ - if (spec->dell_automute) { - /* DELL AIO Port Rule: PortA> PortD> IntSpk */ - pinctl = (!(spec->hp_present & 1) && spec->cur_eapd) - ? PIN_OUT : 0; - } else if (spec->thinkpad) { - if (spec->cur_eapd) - pinctl = spec->port_d_mode; - /* Mute dock line-out if Port A (laptop HP) is present */ - if (spec->hp_present& 1) + pinctl = spec->cur_eapd ? spec->port_d_mode : 0; + if (spec->dell_automute || spec->thinkpad) { + /* Mute if Port A is connected */ + if (hp_port_a_present(spec)) pinctl = 0; } else { - pinctl = ((spec->hp_present & 2) && spec->cur_eapd) - ? spec->port_d_mode : 0; + /* Thinkpad/Dell doesn't give pin-D status */ + if (!hp_port_d_present(spec)) + pinctl = 0; } snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); @@ -2379,8 +2380,8 @@ static void cxt5066_hp_automute(struct hda_codec *codec) /* Port D */ portD = snd_hda_jack_detect(codec, 0x1c); - spec->hp_present = !!(portA); - spec->hp_present |= portD ? 2 : 0; + spec->hp_present = portA ? HP_PRESENT_PORT_A : 0; + spec->hp_present |= portD ? HP_PRESENT_PORT_D : 0; snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n", portA, portD, spec->hp_present); cxt5066_update_speaker(codec); @@ -2728,7 +2729,7 @@ static struct snd_kcontrol_new cxt5066_mixers[] = { static struct snd_kcontrol_new cxt5066_vostro_mixers[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, - .name = "Int Mic Boost Capture Enum", + .name = "Internal Mic Boost Capture Enum", .info = cxt5066_mic_boost_mux_enum_info, .get = cxt5066_mic_boost_mux_enum_get, .put = cxt5066_mic_boost_mux_enum_put, @@ -2954,7 +2955,7 @@ static struct hda_verb cxt5066_init_verbs_ideapad[] = { {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, /* internal microphone */ - {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */ + {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */ /* EAPD */ {0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */ @@ -3009,7 +3010,7 @@ static struct hda_verb cxt5066_init_verbs_thinkpad[] = { {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, /* internal microphone */ - {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */ + {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */ /* EAPD */ {0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */ @@ -3097,6 +3098,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), @@ -3108,16 +3110,9 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { CXT5066_LAPTOP), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), - SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), - SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), - SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), - SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), - SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), - SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), - SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD), - SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), + SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ {} }; @@ -3422,6 +3417,9 @@ static void cx_auto_hp_automute(struct hda_codec *codec) AC_VERB_SET_PIN_WIDGET_CONTROL, present ? 0 : PIN_OUT); } + for (i = 0; !present && i < cfg->line_outs; i++) + if (snd_hda_jack_detect(codec, cfg->line_out_pins[i])) + present = 1; for (i = 0; i < cfg->speaker_outs; i++) { snd_hda_codec_write(codec, cfg->speaker_pins[i], 0, AC_VERB_SET_PIN_WIDGET_CONTROL, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 31df7747990d..f29b97b5de8f 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -31,10 +31,15 @@ #include #include #include +#include #include #include "hda_codec.h" #include "hda_local.h" +static bool static_hdmi_pcm; +module_param(static_hdmi_pcm, bool, 0644); +MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info"); + /* * The HDMI/DisplayPort configuration can be highly dynamic. A graphics device * could support two independent pipes, each of them can be connected to one or @@ -827,7 +832,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, *codec_pars = *hinfo; eld = &spec->sink_eld[idx]; - if (eld->sad_count > 0) { + if (!static_hdmi_pcm && eld->eld_valid && eld->sad_count > 0) { hdmi_eld_update_pcm_info(eld, hinfo, codec_pars); if (hinfo->channels_min > hinfo->channels_max || !hinfo->rates || !hinfo->formats) @@ -904,23 +909,28 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) spec->pin[spec->num_pins] = pin_nid; spec->num_pins++; - /* - * It is assumed that converter nodes come first in the node list and - * hence have been registered and usable now. - */ return hdmi_read_pin_conn(codec, pin_nid); } static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t nid) { + int i, found_pin = 0; struct hdmi_spec *spec = codec->spec; - if (spec->num_cvts >= MAX_HDMI_CVTS) { - snd_printk(KERN_WARNING - "HDMI: no space for converter %d\n", nid); - return -E2BIG; + for (i = 0; i < spec->num_pins; i++) + if (nid == spec->pin_cvt[i]) { + found_pin = 1; + break; + } + + if (!found_pin) { + snd_printdd("HDMI: Skipping node %d (no connection)\n", nid); + return -EINVAL; } + if (snd_BUG_ON(spec->num_cvts >= MAX_HDMI_CVTS)) + return -E2BIG; + spec->cvt[spec->num_cvts] = nid; spec->num_cvts++; @@ -931,6 +941,8 @@ static int hdmi_parse_codec(struct hda_codec *codec) { hda_nid_t nid; int i, nodes; + int num_tmp_cvts = 0; + hda_nid_t tmp_cvt[MAX_HDMI_CVTS]; nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid); if (!nid || nodes < 0) { @@ -941,6 +953,7 @@ static int hdmi_parse_codec(struct hda_codec *codec) for (i = 0; i < nodes; i++, nid++) { unsigned int caps; unsigned int type; + unsigned int config; caps = snd_hda_param_read(codec, nid, AC_PAR_AUDIO_WIDGET_CAP); type = get_wcaps_type(caps); @@ -950,17 +963,32 @@ static int hdmi_parse_codec(struct hda_codec *codec) switch (type) { case AC_WID_AUD_OUT: - hdmi_add_cvt(codec, nid); + if (num_tmp_cvts >= MAX_HDMI_CVTS) { + snd_printk(KERN_WARNING + "HDMI: no space for converter %d\n", nid); + continue; + } + tmp_cvt[num_tmp_cvts] = nid; + num_tmp_cvts++; break; case AC_WID_PIN: caps = snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP); if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP))) continue; + + config = snd_hda_codec_read(codec, nid, 0, + AC_VERB_GET_CONFIG_DEFAULT, 0); + if (get_defcfg_connect(config) == AC_JACK_PORT_NONE) + continue; + hdmi_add_pin(codec, nid); break; } } + for (i = 0; i < num_tmp_cvts; i++) + hdmi_add_cvt(codec, tmp_cvt[i]); + /* * G45/IbexPeak don't support EPSS: the unsolicited pin hot plug event * can be lost and presence sense verb will become inaccurate if the @@ -1165,11 +1193,53 @@ static int nvhdmi_7x_init(struct hda_codec *codec) return 0; } +static unsigned int channels_2_6_8[] = { + 2, 6, 8 +}; + +static unsigned int channels_2_8[] = { + 2, 8 +}; + +static struct snd_pcm_hw_constraint_list hw_constraints_2_6_8_channels = { + .count = ARRAY_SIZE(channels_2_6_8), + .list = channels_2_6_8, + .mask = 0, +}; + +static struct snd_pcm_hw_constraint_list hw_constraints_2_8_channels = { + .count = ARRAY_SIZE(channels_2_8), + .list = channels_2_8, + .mask = 0, +}; + static int simple_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hdmi_spec *spec = codec->spec; + struct snd_pcm_hw_constraint_list *hw_constraints_channels = NULL; + + switch (codec->preset->id) { + case 0x10de0002: + case 0x10de0003: + case 0x10de0005: + case 0x10de0006: + hw_constraints_channels = &hw_constraints_2_8_channels; + break; + case 0x10de0007: + hw_constraints_channels = &hw_constraints_2_6_8_channels; + break; + default: + break; + } + + if (hw_constraints_channels != NULL) { + snd_pcm_hw_constraint_list(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_CHANNELS, + hw_constraints_channels); + } + return snd_hda_multi_out_dig_open(codec, &spec->multiout); } @@ -1532,7 +1602,7 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x1002793c, .name = "RS600 HDMI", .patch = patch_atihdmi }, { .id = 0x10027919, .name = "RS600 HDMI", .patch = patch_atihdmi }, { .id = 0x1002791a, .name = "RS690/780 HDMI", .patch = patch_atihdmi }, -{ .id = 0x1002aa01, .name = "R6xx HDMI", .patch = patch_atihdmi }, +{ .id = 0x1002aa01, .name = "R6xx HDMI", .patch = patch_generic_hdmi }, { .id = 0x10951390, .name = "SiI1390 HDMI", .patch = patch_generic_hdmi }, { .id = 0x10951392, .name = "SiI1392 HDMI", .patch = patch_generic_hdmi }, { .id = 0x17e80047, .name = "Chrontel HDMI", .patch = patch_generic_hdmi }, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 552a09e9211f..51c08edd7563 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -231,7 +231,6 @@ enum { ALC888_ACER_ASPIRE_8930G, ALC888_ACER_ASPIRE_7730G, ALC883_MEDION, - ALC883_MEDION_MD2, ALC883_MEDION_WIM2160, ALC883_LAPTOP_EAPD, ALC883_LENOVO_101E_2ch, @@ -1678,29 +1677,32 @@ struct alc_pincfg { u32 val; }; +struct alc_model_fixup { + const int id; + const char *name; +}; + struct alc_fixup { unsigned int sku; const struct alc_pincfg *pins; const struct hda_verb *verbs; + void (*func)(struct hda_codec *codec, const struct alc_fixup *fix, + int pre_init); }; -static void alc_pick_fixup(struct hda_codec *codec, - const struct snd_pci_quirk *quirk, - const struct alc_fixup *fix, - int pre_init) +static void __alc_pick_fixup(struct hda_codec *codec, + const struct alc_fixup *fix, + const char *modelname, + int pre_init) { const struct alc_pincfg *cfg; struct alc_spec *spec; - quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); - if (!quirk) - return; - fix += quirk->value; cfg = fix->pins; if (pre_init && fix->sku) { #ifdef CONFIG_SND_DEBUG_VERBOSE snd_printdd(KERN_INFO "hda_codec: %s: Apply sku override for %s\n", - codec->chip_name, quirk->name); + codec->chip_name, modelname); #endif spec = codec->spec; spec->cdefine.sku_cfg = fix->sku; @@ -1709,7 +1711,7 @@ static void alc_pick_fixup(struct hda_codec *codec, if (pre_init && cfg) { #ifdef CONFIG_SND_DEBUG_VERBOSE snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n", - codec->chip_name, quirk->name); + codec->chip_name, modelname); #endif for (; cfg->nid; cfg++) snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); @@ -1717,10 +1719,53 @@ static void alc_pick_fixup(struct hda_codec *codec, if (!pre_init && fix->verbs) { #ifdef CONFIG_SND_DEBUG_VERBOSE snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n", - codec->chip_name, quirk->name); + codec->chip_name, modelname); #endif add_verb(codec->spec, fix->verbs); } + if (fix->func) { +#ifdef CONFIG_SND_DEBUG_VERBOSE + snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-func for %s\n", + codec->chip_name, modelname); +#endif + fix->func(codec, fix, pre_init); + } +} + +static void alc_pick_fixup(struct hda_codec *codec, + const struct snd_pci_quirk *quirk, + const struct alc_fixup *fix, + int pre_init) +{ + quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); + if (quirk) { + fix += quirk->value; +#ifdef CONFIG_SND_DEBUG_VERBOSE + __alc_pick_fixup(codec, fix, quirk->name, pre_init); +#else + __alc_pick_fixup(codec, fix, NULL, pre_init); +#endif + } +} + +static void alc_pick_fixup_model(struct hda_codec *codec, + const struct alc_model_fixup *models, + const struct snd_pci_quirk *quirk, + const struct alc_fixup *fix, + int pre_init) +{ + if (codec->modelname && models) { + while (models->name) { + if (!strcmp(codec->modelname, models->name)) { + fix += models->id; + break; + } + models++; + } + __alc_pick_fixup(codec, fix, codec->modelname, pre_init); + } else { + alc_pick_fixup(codec, quirk, fix, pre_init); + } } static int alc_read_coef_idx(struct hda_codec *codec, @@ -1981,6 +2026,7 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = { {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, + {0x15, AC_VERB_SET_EAPD_BTLENABLE, 2}, { } }; @@ -2120,17 +2166,17 @@ static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = { { .num_items = 5, .items = { - { "Ext Mic", 0x0 }, + { "Mic", 0x0 }, { "Line In", 0x2 }, { "CD", 0x4 }, { "Input Mix", 0xa }, - { "Int Mic", 0xb }, + { "Internal Mic", 0xb }, }, }, { .num_items = 4, .items = { - { "Ext Mic", 0x0 }, + { "Mic", 0x0 }, { "Line In", 0x2 }, { "CD", 0x4 }, { "Input Mix", 0xa }, @@ -2187,7 +2233,7 @@ static struct snd_kcontrol_new alc888_base_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -2205,7 +2251,7 @@ static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -2796,10 +2842,10 @@ static struct snd_kcontrol_new alc880_fujitsu_mixer[] = { HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -3307,7 +3353,7 @@ static struct hda_verb alc880_beep_init_verbs[] = { }; /* auto-toggle front mic */ -static void alc880_uniwill_mic_automute(struct hda_codec *codec) +static void alc88x_simple_mic_automute(struct hda_codec *codec) { unsigned int present; unsigned char bits; @@ -3329,7 +3375,7 @@ static void alc880_uniwill_setup(struct hda_codec *codec) static void alc880_uniwill_init_hook(struct hda_codec *codec) { alc_automute_amp(codec); - alc880_uniwill_mic_automute(codec); + alc88x_simple_mic_automute(codec); } static void alc880_uniwill_unsol_event(struct hda_codec *codec, @@ -3340,7 +3386,7 @@ static void alc880_uniwill_unsol_event(struct hda_codec *codec, */ switch (res >> 28) { case ALC880_MIC_EVENT: - alc880_uniwill_mic_automute(codec); + alc88x_simple_mic_automute(codec); break; default: alc_automute_amp_unsol_event(codec, res); @@ -5023,6 +5069,25 @@ static int alc880_auto_fill_dac_nids(struct alc_spec *spec, return 0; } +static const char *alc_get_line_out_pfx(const struct auto_pin_cfg *cfg, + bool can_be_master) +{ + if (!cfg->hp_outs && !cfg->speaker_outs && can_be_master) + return "Master"; + + switch (cfg->line_out_type) { + case AUTO_PIN_SPEAKER_OUT: + return "Speaker"; + case AUTO_PIN_HP_OUT: + return "Headphone"; + default: + if (cfg->line_outs == 1) + return "PCM"; + break; + } + return NULL; +} + /* add playback controls from the parsed DAC table */ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec, const struct auto_pin_cfg *cfg) @@ -5030,6 +5095,7 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec, static const char *chname[4] = { "Front", "Surround", NULL /*CLFE*/, "Side" }; + const char *pfx = alc_get_line_out_pfx(cfg, false); hda_nid_t nid; int i, err; @@ -5037,7 +5103,7 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec, if (!spec->multiout.dac_nids[i]) continue; nid = alc880_idx_to_mixer(alc880_dac_to_idx(spec->multiout.dac_nids[i])); - if (i == 2) { + if (!pfx && i == 2) { /* Center/LFE */ err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, "Center", @@ -5064,18 +5130,17 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec, if (err < 0) return err; } else { - const char *pfx; - if (cfg->line_outs == 1 && - cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) - pfx = "Speaker"; - else - pfx = chname[i]; - err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx, + const char *name = pfx; + if (!name) + name = chname[i]; + err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, + name, i, HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT)); if (err < 0) return err; - err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx, + err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, + name, i, HDA_COMPOSE_AMP_VAL(nid, 3, 2, HDA_INPUT)); if (err < 0) @@ -5155,7 +5220,8 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec, { struct alc_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->private_imux[0]; - int i, err, idx, type, type_idx = 0; + int i, err, idx, type_idx = 0; + const char *prev_label = NULL; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t pin; @@ -5165,12 +5231,13 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec, if (!alc_is_input_pin(codec, pin)) continue; - type = cfg->inputs[i].type; - if (i > 0 && type == cfg->inputs[i - 1].type) + label = hda_get_autocfg_input_label(codec, cfg, i); + if (prev_label && !strcmp(label, prev_label)) type_idx++; else type_idx = 0; - label = hda_get_autocfg_input_label(codec, cfg, i); + prev_label = label; + if (mixer) { idx = get_connection_index(codec, mixer, pin); if (idx >= 0) { @@ -7406,7 +7473,7 @@ static struct hda_input_mux alc883_lenovo_nb0763_capture_source = { .num_items = 4, .items = { { "Mic", 0x0 }, - { "Int Mic", 0x1 }, + { "Internal Mic", 0x1 }, { "Line", 0x2 }, { "CD", 0x4 }, }, @@ -7416,7 +7483,7 @@ static struct hda_input_mux alc883_fujitsu_pi2515_capture_source = { .num_items = 2, .items = { { "Mic", 0x0 }, - { "Int Mic", 0x1 }, + { "Internal Mic", 0x1 }, }, }; @@ -7851,10 +7918,10 @@ static struct snd_kcontrol_new alc882_base_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -7878,8 +7945,8 @@ static struct snd_kcontrol_new alc885_mbp3_mixer[] = { HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT), HDA_CODEC_MUTE ("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT), { } /* end */ }; @@ -7896,8 +7963,8 @@ static struct snd_kcontrol_new alc885_mb5_mixer[] = { HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE ("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x19, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0x00, HDA_INPUT), { } /* end */ }; @@ -7912,7 +7979,7 @@ static struct snd_kcontrol_new alc885_macmini3_mixer[] = { HDA_BIND_MUTE ("Headphone Playback Switch", 0x0f, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x07, HDA_INPUT), HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT), - HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT), { } /* end */ }; @@ -7931,7 +7998,7 @@ static struct snd_kcontrol_new alc882_w2jc_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -7946,10 +8013,10 @@ static struct snd_kcontrol_new alc882_targa_mixer[] = { HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), { } /* end */ }; @@ -7969,7 +8036,7 @@ static struct snd_kcontrol_new alc882_asus_a7j_mixer[] = { HDA_CODEC_MUTE("Mobile Line Playback Switch", 0x0b, 0x03, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), { } /* end */ }; @@ -7982,7 +8049,7 @@ static struct snd_kcontrol_new alc882_asus_a7m_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -8763,10 +8830,10 @@ static struct snd_kcontrol_new alc883_mitac_mixer[] = { HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8777,11 +8844,11 @@ static struct snd_kcontrol_new alc883_clevo_m720_mixer[] = { HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8791,11 +8858,11 @@ static struct snd_kcontrol_new alc883_2ch_fujitsu_pi2515_mixer[] = { HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8808,10 +8875,10 @@ static struct snd_kcontrol_new alc883_3ST_2ch_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8831,10 +8898,10 @@ static struct snd_kcontrol_new alc883_3ST_6ch_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8855,10 +8922,10 @@ static struct snd_kcontrol_new alc883_3ST_6ch_intel_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -8879,10 +8946,10 @@ static struct snd_kcontrol_new alc885_8ch_intel_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x3, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x1b, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x1b, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x3, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -8902,10 +8969,10 @@ static struct snd_kcontrol_new alc883_fivestack_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8926,7 +8993,7 @@ static struct snd_kcontrol_new alc883_targa_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -8939,20 +9006,20 @@ static struct snd_kcontrol_new alc883_targa_2ch_mixer[] = { HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; static struct snd_kcontrol_new alc883_targa_8ch_mixer[] = { HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8963,7 +9030,7 @@ static struct snd_kcontrol_new alc883_lenovo_101e_2ch_mixer[] = { HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -8976,21 +9043,8 @@ static struct snd_kcontrol_new alc883_lenovo_nb0763_mixer[] = { HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), - { } /* end */ -}; - -static struct snd_kcontrol_new alc883_medion_md2_mixer[] = { - HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), - HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), - HDA_CODEC_MUTE("Front Playback Switch", 0x15, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), - HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), - HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -9037,7 +9091,7 @@ static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -9050,7 +9104,7 @@ static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = { HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -9072,10 +9126,10 @@ static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -9096,8 +9150,8 @@ static struct snd_kcontrol_new alc889A_mb31_mixer[] = { HDA_CODEC_MUTE("Enable Headphones", 0x15, 0x00, HDA_OUTPUT), HDA_CODEC_MUTE_MONO("Enable LFE", 0x16, 2, 0x00, HDA_OUTPUT), /* Boost mixers */ - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT), - HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT), + HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT), /* Input mixers */ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT), @@ -9111,7 +9165,7 @@ static struct snd_kcontrol_new alc883_vaiott_mixer[] = { HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -9141,7 +9195,7 @@ static struct snd_kcontrol_new alc883_asus_eee1601_mixer[] = { HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), { } /* end */ }; @@ -9182,16 +9236,6 @@ static void alc883_mitac_setup(struct hda_codec *codec) spec->autocfg.speaker_pins[1] = 0x17; } -/* auto-toggle front mic */ -/* -static void alc883_mitac_mic_automute(struct hda_codec *codec) -{ - unsigned char bits = snd_hda_jack_detect(codec, 0x18) ? HDA_AMP_MUTE : 0; - - snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits); -} -*/ - static struct hda_verb alc883_mitac_verbs[] = { /* HP */ {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, @@ -9435,18 +9479,8 @@ static void alc883_lenovo_ms7195_unsol_event(struct hda_codec *codec, alc888_lenovo_ms7195_rca_automute(codec); } -static struct hda_verb alc883_medion_md2_verbs[] = { - {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, - {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, - - {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, - - {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, - { } /* end */ -}; - /* toggle speaker-output according to the hp-jack state */ -static void alc883_medion_md2_setup(struct hda_codec *codec) +static void alc883_lenovo_nb0763_setup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -9458,15 +9492,6 @@ static void alc883_medion_md2_setup(struct hda_codec *codec) #define alc883_targa_init_hook alc882_targa_init_hook #define alc883_targa_unsol_event alc882_targa_unsol_event -static void alc883_clevo_m720_mic_automute(struct hda_codec *codec) -{ - unsigned int present; - - present = snd_hda_jack_detect(codec, 0x18); - snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, - HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); -} - static void alc883_clevo_m720_setup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -9478,7 +9503,7 @@ static void alc883_clevo_m720_setup(struct hda_codec *codec) static void alc883_clevo_m720_init_hook(struct hda_codec *codec) { alc_automute_amp(codec); - alc883_clevo_m720_mic_automute(codec); + alc88x_simple_mic_automute(codec); } static void alc883_clevo_m720_unsol_event(struct hda_codec *codec, @@ -9486,7 +9511,7 @@ static void alc883_clevo_m720_unsol_event(struct hda_codec *codec, { switch (res >> 26) { case ALC880_MIC_EVENT: - alc883_clevo_m720_mic_automute(codec); + alc88x_simple_mic_automute(codec); break; default: alc_automute_amp_unsol_event(codec, res); @@ -9731,7 +9756,6 @@ static const char *alc882_models[ALC882_MODEL_LAST] = { [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g", [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", [ALC883_MEDION] = "medion", - [ALC883_MEDION_MD2] = "medion-md2", [ALC883_MEDION_WIM2160] = "medion-wim2160", [ALC883_LAPTOP_EAPD] = "laptop-eapd", [ALC883_LENOVO_101E_2ch] = "lenovo-101e", @@ -10379,19 +10403,6 @@ static struct alc_config_preset alc882_presets[] = { .channel_mode = alc883_sixstack_modes, .input_mux = &alc883_capture_source, }, - [ALC883_MEDION_MD2] = { - .mixers = { alc883_medion_md2_mixer}, - .init_verbs = { alc883_init_verbs, alc883_medion_md2_verbs}, - .num_dacs = ARRAY_SIZE(alc883_dac_nids), - .dac_nids = alc883_dac_nids, - .dig_out_nid = ALC883_DIGOUT_NID, - .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), - .channel_mode = alc883_3ST_2ch_modes, - .input_mux = &alc883_capture_source, - .unsol_event = alc_automute_amp_unsol_event, - .setup = alc883_medion_md2_setup, - .init_hook = alc_automute_amp, - }, [ALC883_MEDION_WIM2160] = { .mixers = { alc883_medion_wim2160_mixer }, .init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs }, @@ -10468,7 +10479,7 @@ static struct alc_config_preset alc882_presets[] = { .need_dac_fix = 1, .input_mux = &alc883_lenovo_nb0763_capture_source, .unsol_event = alc_automute_amp_unsol_event, - .setup = alc883_medion_md2_setup, + .setup = alc883_lenovo_nb0763_setup, .init_hook = alc_automute_amp, }, [ALC888_LENOVO_MS7195_DIG] = { @@ -10830,25 +10841,30 @@ static int alc_auto_add_mic_boost(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; - int i, err, type; + int i, err; int type_idx = 0; hda_nid_t nid; + const char *prev_label = NULL; for (i = 0; i < cfg->num_inputs; i++) { if (cfg->inputs[i].type > AUTO_PIN_MIC) break; nid = cfg->inputs[i].pin; if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) { - char label[32]; - type = cfg->inputs[i].type; - if (i > 0 && type == cfg->inputs[i - 1].type) + const char *label; + char boost_label[32]; + + label = hda_get_autocfg_input_label(codec, cfg, i); + if (prev_label && !strcmp(label, prev_label)) type_idx++; else type_idx = 0; - snprintf(label, sizeof(label), "%s Boost", - hda_get_autocfg_input_label(codec, cfg, i)); - err = add_control(spec, ALC_CTL_WIDGET_VOL, label, - type_idx, + prev_label = label; + + snprintf(boost_label, sizeof(boost_label), + "%s Boost Volume", label); + err = add_control(spec, ALC_CTL_WIDGET_VOL, + boost_label, type_idx, HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT)); if (err < 0) return err; @@ -10857,6 +10873,9 @@ static int alc_auto_add_mic_boost(struct hda_codec *codec) return 0; } +static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, + const struct auto_pin_cfg *cfg); + /* almost identical with ALC880 parser... */ static int alc882_parse_auto_config(struct hda_codec *codec) { @@ -10874,7 +10893,10 @@ static int alc882_parse_auto_config(struct hda_codec *codec) err = alc880_auto_fill_dac_nids(spec, &spec->autocfg); if (err < 0) return err; - err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg); + if (codec->vendor_id == 0x10ec0887) + err = alc861vd_auto_create_multi_out_ctls(spec, &spec->autocfg); + else + err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg); if (err < 0) return err; err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0], @@ -11090,10 +11112,10 @@ static struct snd_kcontrol_new alc262_base_mixer[] = { HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT), @@ -11194,10 +11216,10 @@ static struct snd_kcontrol_new alc262_HP_BPC_mixer[] = { HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), @@ -11219,7 +11241,7 @@ static struct snd_kcontrol_new alc262_HP_BPC_WildWest_mixer[] = { HDA_OUTPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x1a, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x1a, 0, HDA_INPUT), HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), @@ -11230,7 +11252,7 @@ static struct snd_kcontrol_new alc262_HP_BPC_WildWest_mixer[] = { static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = { HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Rear Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Rear Mic Boost Volume", 0x18, 0, HDA_INPUT), { } /* end */ }; @@ -11250,7 +11272,7 @@ static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = { HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), { } /* end */ }; @@ -11357,10 +11379,10 @@ static struct snd_kcontrol_new alc262_hippo_mixer[] = { HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT), { } /* end */ }; @@ -11374,10 +11396,10 @@ static struct snd_kcontrol_new alc262_hippo1_mixer[] = { HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), { } /* end */ }; @@ -11445,10 +11467,10 @@ static struct snd_kcontrol_new alc262_tyan_mixer[] = { HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), { } /* end */ }; @@ -11632,7 +11654,7 @@ static struct snd_kcontrol_new alc262_nec_mixer[] = { HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), @@ -11687,7 +11709,7 @@ static struct hda_input_mux alc262_fujitsu_capture_source = { .num_items = 3, .items = { { "Mic", 0x0 }, - { "Int Mic", 0x1 }, + { "Internal Mic", 0x1 }, { "CD", 0x4 }, }, }; @@ -11839,12 +11861,12 @@ static struct snd_kcontrol_new alc262_fujitsu_mixer[] = { }, HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -11875,12 +11897,12 @@ static struct snd_kcontrol_new alc262_lenovo_3000_mixer[] = { }, HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -11889,10 +11911,10 @@ static struct snd_kcontrol_new alc262_toshiba_rx1_mixer[] = { ALC262_HIPPO_MASTER_SWITCH, HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), { } /* end */ }; @@ -11918,8 +11940,8 @@ static struct snd_kcontrol_new alc262_ultra_mixer[] = { HDA_BIND_MUTE("Master Playback Switch", 0x0c, 2, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Headphone Mic Boost", 0x15, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Headphone Mic Boost Volume", 0x15, 0, HDA_INPUT), { } /* end */ }; @@ -12089,13 +12111,8 @@ static int alc262_auto_create_multi_out_ctls(struct alc_spec *spec, spec->multiout.dac_nids = spec->private_dac_nids; spec->multiout.dac_nids[0] = 2; - if (!cfg->speaker_pins[0] && !cfg->hp_pins[0]) - pfx = "Master"; - else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) - pfx = "Speaker"; - else if (cfg->line_out_type == AUTO_PIN_HP_OUT) - pfx = "Headphone"; - else + pfx = alc_get_line_out_pfx(cfg, true); + if (!pfx) pfx = "Front"; for (i = 0; i < 2; i++) { err = alc262_add_out_sw_ctl(spec, cfg->line_out_pins[i], pfx, i); @@ -12996,9 +13013,9 @@ static struct snd_kcontrol_new alc268_base_mixer[] = { HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT), { } }; @@ -13007,9 +13024,9 @@ static struct snd_kcontrol_new alc268_toshiba_mixer[] = { HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT), ALC262_HIPPO_MASTER_SWITCH, - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT), { } }; @@ -13113,9 +13130,9 @@ static struct snd_kcontrol_new alc268_acer_mixer[] = { .put = alc268_acer_master_sw_put, .private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT), }, - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT), { } }; @@ -13131,8 +13148,8 @@ static struct snd_kcontrol_new alc268_acer_dmic_mixer[] = { .put = alc268_acer_master_sw_put, .private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT), }, - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT), { } }; @@ -13224,8 +13241,8 @@ static struct snd_kcontrol_new alc268_dell_mixer[] = { HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), { } }; @@ -13258,8 +13275,8 @@ static struct snd_kcontrol_new alc267_quanta_il1_mixer[] = { HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Mic Capture Volume", 0x23, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Mic Capture Switch", 0x23, 2, HDA_OUTPUT), - HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), { } }; @@ -14082,10 +14099,10 @@ static struct snd_kcontrol_new alc269_base_mixer[] = { HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT), { } /* end */ @@ -14105,10 +14122,10 @@ static struct snd_kcontrol_new alc269_quanta_fl1_mixer[] = { }, HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), { } }; @@ -14126,13 +14143,13 @@ static struct snd_kcontrol_new alc269_lifebook_mixer[] = { }, HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), - HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x0b, 0x03, HDA_INPUT), HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x0b, 0x03, HDA_INPUT), - HDA_CODEC_VOLUME("Dock Mic Boost", 0x1b, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x1b, 0, HDA_INPUT), { } }; @@ -14162,30 +14179,30 @@ static struct snd_kcontrol_new alc269_asus_mixer[] = { static struct snd_kcontrol_new alc269_laptop_analog_capture_mixer[] = { HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), { } /* end */ }; static struct snd_kcontrol_new alc269_laptop_digital_capture_mixer[] = { HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), { } /* end */ }; static struct snd_kcontrol_new alc269vb_laptop_analog_capture_mixer[] = { HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), { } /* end */ }; static struct snd_kcontrol_new alc269vb_laptop_digital_capture_mixer[] = { HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), { } /* end */ }; @@ -14804,12 +14821,23 @@ static int alc269_resume(struct hda_codec *codec) } #endif /* SND_HDA_NEEDS_RESUME */ +static void alc269_fixup_hweq(struct hda_codec *codec, + const struct alc_fixup *fix, int pre_init) +{ + int coef; + + coef = alc_read_coef_idx(codec, 0x1e); + alc_write_coef_idx(codec, 0x1e, coef | 0x80); +} + enum { ALC269_FIXUP_SONY_VAIO, ALC275_FIX_SONY_VAIO_GPIO2, ALC269_FIXUP_DELL_M101Z, ALC269_FIXUP_SKU_IGNORE, ALC269_FIXUP_ASUS_G73JW, + ALC269_FIXUP_LENOVO_EAPD, + ALC275_FIXUP_SONY_HWEQ, }; static const struct alc_fixup alc269_fixups[] = { @@ -14824,6 +14852,7 @@ static const struct alc_fixup alc269_fixups[] = { {0x01, AC_VERB_SET_GPIO_MASK, 0x04}, {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04}, {0x01, AC_VERB_SET_GPIO_DATA, 0x00}, + {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD}, { } } }, @@ -14844,17 +14873,34 @@ static const struct alc_fixup alc269_fixups[] = { { } } }, + [ALC269_FIXUP_LENOVO_EAPD] = { + .verbs = (const struct hda_verb[]) { + {0x14, AC_VERB_SET_EAPD_BTLENABLE, 0}, + {} + } + }, + [ALC275_FIXUP_SONY_HWEQ] = { + .func = alc269_fixup_hweq, + .verbs = (const struct hda_verb[]) { + {0x01, AC_VERB_SET_GPIO_MASK, 0x04}, + {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04}, + {0x01, AC_VERB_SET_GPIO_DATA, 0x00}, + {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD}, + { } + } + } }; static struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2), - SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2), - SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2), + SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), + SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), {} }; @@ -15889,13 +15935,16 @@ static int alc861_auto_fill_dac_nids(struct hda_codec *codec, return 0; } -static int alc861_create_out_sw(struct hda_codec *codec, const char *pfx, - hda_nid_t nid, unsigned int chs) +static int __alc861_create_out_sw(struct hda_codec *codec, const char *pfx, + hda_nid_t nid, int idx, unsigned int chs) { - return add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx, + return __add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx, idx, HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT)); } +#define alc861_create_out_sw(codec, pfx, nid, chs) \ + __alc861_create_out_sw(codec, pfx, nid, 0, chs) + /* add playback controls from the parsed DAC table */ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) @@ -15904,26 +15953,15 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec, static const char *chname[4] = { "Front", "Surround", NULL /*CLFE*/, "Side" }; + const char *pfx = alc_get_line_out_pfx(cfg, true); hda_nid_t nid; int i, err; - if (cfg->line_outs == 1) { - const char *pfx = NULL; - if (!cfg->hp_outs) - pfx = "Master"; - else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) - pfx = "Speaker"; - if (pfx) { - nid = spec->multiout.dac_nids[0]; - return alc861_create_out_sw(codec, pfx, nid, 3); - } - } - for (i = 0; i < cfg->line_outs; i++) { nid = spec->multiout.dac_nids[i]; if (!nid) continue; - if (i == 2) { + if (!pfx && i == 2) { /* Center/LFE */ err = alc861_create_out_sw(codec, "Center", nid, 1); if (err < 0) @@ -15932,7 +15970,10 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec, if (err < 0) return err; } else { - err = alc861_create_out_sw(codec, chname[i], nid, 3); + const char *name = pfx; + if (!name) + name = chname[i]; + err = __alc861_create_out_sw(codec, name, nid, i, 3); if (err < 0) return err; } @@ -16404,8 +16445,8 @@ static struct hda_input_mux alc861vd_capture_source = { static struct hda_input_mux alc861vd_dallas_capture_source = { .num_items = 2, .items = { - { "Ext Mic", 0x0 }, - { "Int Mic", 0x1 }, + { "Mic", 0x0 }, + { "Internal Mic", 0x1 }, }, }; @@ -16484,11 +16525,11 @@ static struct snd_kcontrol_new alc861vd_6st_mixer[] = { HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), @@ -16507,11 +16548,11 @@ static struct snd_kcontrol_new alc861vd_3st_mixer[] = { HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), @@ -16531,11 +16572,11 @@ static struct snd_kcontrol_new alc861vd_lenovo_mixer[] = { HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT), HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), @@ -16546,19 +16587,19 @@ static struct snd_kcontrol_new alc861vd_lenovo_mixer[] = { }; /* Pin assignment: Speaker=0x14, HP = 0x15, - * Ext Mic=0x18, Int Mic = 0x19, CD = 0x1c, PC Beep = 0x1d + * Mic=0x18, Internal Mic = 0x19, CD = 0x1c, PC Beep = 0x1d */ static struct snd_kcontrol_new alc861vd_dallas_mixer[] = { HDA_CODEC_VOLUME("Speaker Playback Volume", 0x02, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Speaker Playback Switch", 0x0c, 2, HDA_INPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Headphone Playback Switch", 0x0d, 2, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -16723,18 +16764,6 @@ static struct hda_verb alc861vd_lenovo_unsol_verbs[] = { {} }; -static void alc861vd_lenovo_mic_automute(struct hda_codec *codec) -{ - unsigned int present; - unsigned char bits; - - present = snd_hda_jack_detect(codec, 0x18); - bits = present ? HDA_AMP_MUTE : 0; - - snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, - HDA_AMP_MUTE, bits); -} - static void alc861vd_lenovo_setup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -16745,7 +16774,7 @@ static void alc861vd_lenovo_setup(struct hda_codec *codec) static void alc861vd_lenovo_init_hook(struct hda_codec *codec) { alc_automute_amp(codec); - alc861vd_lenovo_mic_automute(codec); + alc88x_simple_mic_automute(codec); } static void alc861vd_lenovo_unsol_event(struct hda_codec *codec, @@ -16753,7 +16782,7 @@ static void alc861vd_lenovo_unsol_event(struct hda_codec *codec, { switch (res >> 26) { case ALC880_MIC_EVENT: - alc861vd_lenovo_mic_automute(codec); + alc88x_simple_mic_automute(codec); break; default: alc_automute_amp_unsol_event(codec, res); @@ -17043,12 +17072,13 @@ static void alc861vd_auto_init_analog_input(struct hda_codec *codec) #define alc861vd_idx_to_mixer_switch(nid) ((nid) + 0x0c) /* add playback controls from the parsed DAC table */ -/* Based on ALC880 version. But ALC861VD has separate, +/* Based on ALC880 version. But ALC861VD and ALC887 have separate, * different NIDs for mute/unmute switch and volume control */ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, const struct auto_pin_cfg *cfg) { static const char *chname[4] = {"Front", "Surround", "CLFE", "Side"}; + const char *pfx = alc_get_line_out_pfx(cfg, true); hda_nid_t nid_v, nid_s; int i, err; @@ -17062,7 +17092,7 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, alc880_dac_to_idx( spec->multiout.dac_nids[i])); - if (i == 2) { + if (!pfx && i == 2) { /* Center/LFE */ err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, "Center", @@ -17089,24 +17119,17 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, if (err < 0) return err; } else { - const char *pfx; - if (cfg->line_outs == 1 && - cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) { - if (!cfg->hp_pins) - pfx = "Speaker"; - else - pfx = "PCM"; - } else - pfx = chname[i]; - err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx, + const char *name = pfx; + if (!name) + name = chname[i]; + err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, + name, i, HDA_COMPOSE_AMP_VAL(nid_v, 3, 0, HDA_OUTPUT)); if (err < 0) return err; - if (cfg->line_outs == 1 && - cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) - pfx = "Speaker"; - err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx, + err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, + name, i, HDA_COMPOSE_AMP_VAL(nid_s, 3, 2, HDA_INPUT)); if (err < 0) @@ -17570,13 +17593,13 @@ static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = { HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT), ALC262_HIPPO_MASTER_SWITCH, - HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_MUTE("e-Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -17720,8 +17743,8 @@ static struct snd_kcontrol_new alc663_g71v_mixer[] = { HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -17732,8 +17755,8 @@ static struct snd_kcontrol_new alc663_g50v_mixer[] = { HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), { } /* end */ @@ -18566,13 +18589,13 @@ static struct snd_kcontrol_new alc662_ecs_mixer[] = { HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT), ALC262_HIPPO_MASTER_SWITCH, - HDA_CODEC_VOLUME("e-Mic/LineIn Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("e-Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_MUTE("e-Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic/LineIn Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_MUTE("Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT), - HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -18583,13 +18606,13 @@ static struct snd_kcontrol_new alc272_nc10_mixer[] = { HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x21, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT), { } /* end */ }; @@ -19094,20 +19117,24 @@ static int alc662_auto_fill_dac_nids(struct hda_codec *codec, return 0; } -static inline int alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx, - hda_nid_t nid, unsigned int chs) +static inline int __alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx, + hda_nid_t nid, int idx, unsigned int chs) { - return add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx, + return __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx, idx, HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT)); } -static inline int alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx, - hda_nid_t nid, unsigned int chs) +static inline int __alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx, + hda_nid_t nid, int idx, unsigned int chs) { - return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, + return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, idx, HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_INPUT)); } +#define alc662_add_vol_ctl(spec, pfx, nid, chs) \ + __alc662_add_vol_ctl(spec, pfx, nid, 0, chs) +#define alc662_add_sw_ctl(spec, pfx, nid, chs) \ + __alc662_add_sw_ctl(spec, pfx, nid, 0, chs) #define alc662_add_stereo_vol(spec, pfx, nid) \ alc662_add_vol_ctl(spec, pfx, nid, 3) #define alc662_add_stereo_sw(spec, pfx, nid) \ @@ -19121,6 +19148,7 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec, static const char *chname[4] = { "Front", "Surround", NULL /*CLFE*/, "Side" }; + const char *pfx = alc_get_line_out_pfx(cfg, true); hda_nid_t nid, mix; int i, err; @@ -19131,7 +19159,7 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec, mix = alc662_dac_to_mix(codec, cfg->line_out_pins[i], nid); if (!mix) continue; - if (i == 2) { + if (!pfx && i == 2) { /* Center/LFE */ err = alc662_add_vol_ctl(spec, "Center", nid, 1); if (err < 0) @@ -19146,22 +19174,13 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec, if (err < 0) return err; } else { - const char *pfx; - if (cfg->line_outs == 1 && - cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) { - if (cfg->hp_outs) - pfx = "Speaker"; - else - pfx = "PCM"; - } else - pfx = chname[i]; - err = alc662_add_vol_ctl(spec, pfx, nid, 3); + const char *name = pfx; + if (!name) + name = chname[i]; + err = __alc662_add_vol_ctl(spec, name, nid, i, 3); if (err < 0) return err; - if (cfg->line_outs == 1 && - cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) - pfx = "Speaker"; - err = alc662_add_sw_ctl(spec, pfx, mix, 3); + err = __alc662_add_sw_ctl(spec, name, mix, i, 3); if (err < 0) return err; } @@ -19358,9 +19377,21 @@ static void alc662_auto_init(struct hda_codec *codec) alc_inithook(codec); } +static void alc272_fixup_mario(struct hda_codec *codec, + const struct alc_fixup *fix, int pre_init) { + if (snd_hda_override_amp_caps(codec, 0x2, HDA_OUTPUT, + (0x3b << AC_AMPCAP_OFFSET_SHIFT) | + (0x3b << AC_AMPCAP_NUM_STEPS_SHIFT) | + (0x03 << AC_AMPCAP_STEP_SIZE_SHIFT) | + (0 << AC_AMPCAP_MUTE_SHIFT))) + printk(KERN_WARNING + "hda_codec: failed to override amp caps for NID 0x2\n"); +} + enum { ALC662_FIXUP_ASPIRE, ALC662_FIXUP_IDEAPAD, + ALC272_FIXUP_MARIO, }; static const struct alc_fixup alc662_fixups[] = { @@ -19376,6 +19407,9 @@ static const struct alc_fixup alc662_fixups[] = { { } } }, + [ALC272_FIXUP_MARIO] = { + .func = alc272_fixup_mario, + } }; static struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -19386,6 +19420,10 @@ static struct snd_pci_quirk alc662_fixup_tbl[] = { {} }; +static const struct alc_model_fixup alc662_fixup_models[] = { + {.id = ALC272_FIXUP_MARIO, .name = "mario"}, + {} +}; static int patch_alc662(struct hda_codec *codec) @@ -19485,7 +19523,8 @@ static int patch_alc662(struct hda_codec *codec) codec->patch_ops = alc_patch_ops; if (board_config == ALC662_AUTO) { spec->init_hook = alc662_auto_init; - alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 0); + alc_pick_fixup_model(codec, alc662_fixup_models, + alc662_fixup_tbl, alc662_fixups, 0); } alc_init_jacks(codec); @@ -19612,9 +19651,9 @@ static struct snd_kcontrol_new alc680_base_mixer[] = { HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT), - HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), - HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x12, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_VOLUME("Line In Boost Volume", 0x19, 0, HDA_INPUT), { } }; diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index f03b2ff90496..4ab019d0924e 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -389,6 +389,9 @@ static hda_nid_t stac92hd83xxx_dmic_nids[STAC92HD83XXX_NUM_DMICS + 1] = { 0x11, 0x20, 0 }; +#define STAC92HD88XXX_NUM_DMICS STAC92HD83XXX_NUM_DMICS +#define stac92hd88xxx_dmic_nids stac92hd83xxx_dmic_nids + #define STAC92HD87B_NUM_DMICS 1 static hda_nid_t stac92hd87b_dmic_nids[STAC92HD87B_NUM_DMICS + 1] = { 0x11, 0 @@ -3591,7 +3594,7 @@ static int stac_check_auto_mic(struct hda_codec *codec) if (check_mic_pin(codec, spec->dmic_nids[i], &fixed, &ext, &dock)) return 0; - if (!fixed && !ext && !dock) + if (!fixed || (!ext && !dock)) return 0; /* no input to switch */ if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP)) return 0; /* no unsol support */ @@ -5422,7 +5425,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7ED, 0); codec->no_trigger_sense = 1; codec->spec = spec; - spec->linear_tone_beep = 1; + spec->linear_tone_beep = 0; codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; spec->digbeep_nid = 0x21; spec->dmic_nids = stac92hd83xxx_dmic_nids; @@ -5462,15 +5465,21 @@ again: spec->num_dmics = stac92xx_connected_ports(codec, stac92hd87b_dmic_nids, STAC92HD87B_NUM_DMICS); - /* Fall through */ + spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); + spec->pin_nids = stac92hd88xxx_pin_nids; + spec->mono_nid = 0; + spec->num_pwrs = 0; + break; case 0x111d7666: case 0x111d7667: case 0x111d7668: case 0x111d7669: + spec->num_dmics = stac92xx_connected_ports(codec, + stac92hd88xxx_dmic_nids, + STAC92HD88XXX_NUM_DMICS); spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); spec->pin_nids = stac92hd88xxx_pin_nids; spec->mono_nid = 0; - spec->digbeep_nid = 0; spec->num_pwrs = 0; break; case 0x111d7604: diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index d1c3f8defc48..7f4852a478a1 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -263,8 +263,7 @@ static void vt1708_stop_hp_work(struct via_spec *spec) return; snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, !spec->vt1708_jack_detectect); - cancel_delayed_work(&spec->vt1708_hp_work); - flush_scheduled_work(); + cancel_delayed_work_sync(&spec->vt1708_hp_work); } diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c index 712c1710f9a2..7b62de089fee 100644 --- a/sound/pci/ice1712/delta.c +++ b/sound/pci/ice1712/delta.c @@ -96,6 +96,11 @@ static unsigned char ap_cs8427_codec_select(struct snd_ice1712 *ice) tmp |= ICE1712_DELTA_AP_CCLK | ICE1712_DELTA_AP_CS_CODEC; tmp &= ~ICE1712_DELTA_AP_CS_DIGITAL; break; + case ICE1712_SUBDEVICE_DELTA66E: + tmp |= ICE1712_DELTA_66E_CCLK | ICE1712_DELTA_66E_CS_CHIP_A | + ICE1712_DELTA_66E_CS_CHIP_B; + tmp &= ~ICE1712_DELTA_66E_CS_CS8427; + break; case ICE1712_SUBDEVICE_VX442: tmp |= ICE1712_VX442_CCLK | ICE1712_VX442_CODEC_CHIP_A | ICE1712_VX442_CODEC_CHIP_B; tmp &= ~ICE1712_VX442_CS_DIGITAL; @@ -119,6 +124,9 @@ static void ap_cs8427_codec_deassert(struct snd_ice1712 *ice, unsigned char tmp) case ICE1712_SUBDEVICE_DELTA410: tmp |= ICE1712_DELTA_AP_CS_DIGITAL; break; + case ICE1712_SUBDEVICE_DELTA66E: + tmp |= ICE1712_DELTA_66E_CS_CS8427; + break; case ICE1712_SUBDEVICE_VX442: tmp |= ICE1712_VX442_CS_DIGITAL; break; @@ -275,6 +283,20 @@ static void delta1010lt_ak4524_lock(struct snd_akm4xxx *ak, int chip) priv->cs_addr = chip << 4; } +/* + * AK4524 on Delta66 rev E to choose the chip address + */ +static void delta66e_ak4524_lock(struct snd_akm4xxx *ak, int chip) +{ + struct snd_ak4xxx_private *priv = (void *)ak->private_value[0]; + struct snd_ice1712 *ice = ak->private_data[0]; + + snd_ice1712_save_gpio_status(ice); + priv->cs_mask = + priv->cs_addr = chip == 0 ? ICE1712_DELTA_66E_CS_CHIP_A : + ICE1712_DELTA_66E_CS_CHIP_B; +} + /* * AK4528 on VX442 to choose the chip mask */ @@ -487,6 +509,29 @@ static struct snd_ak4xxx_private akm_delta1010lt_priv __devinitdata = { .mask_flags = 0, }; +static struct snd_akm4xxx akm_delta66e __devinitdata = { + .type = SND_AK4524, + .num_adcs = 4, + .num_dacs = 4, + .ops = { + .lock = delta66e_ak4524_lock, + .set_rate_val = delta_ak4524_set_rate_val + } +}; + +static struct snd_ak4xxx_private akm_delta66e_priv __devinitdata = { + .caddr = 2, + .cif = 0, /* the default level of the CIF pin from AK4524 */ + .data_mask = ICE1712_DELTA_66E_DOUT, + .clk_mask = ICE1712_DELTA_66E_CCLK, + .cs_mask = 0, + .cs_addr = 0, /* set later */ + .cs_none = 0, + .add_flags = 0, + .mask_flags = 0, +}; + + static struct snd_akm4xxx akm_delta44 __devinitdata = { .type = SND_AK4524, .num_adcs = 4, @@ -644,9 +689,11 @@ static int __devinit snd_ice1712_delta_init(struct snd_ice1712 *ice) err = snd_ice1712_akm4xxx_init(ak, &akm_delta44, &akm_delta44_priv, ice); break; case ICE1712_SUBDEVICE_VX442: - case ICE1712_SUBDEVICE_DELTA66E: err = snd_ice1712_akm4xxx_init(ak, &akm_vx442, &akm_vx442_priv, ice); break; + case ICE1712_SUBDEVICE_DELTA66E: + err = snd_ice1712_akm4xxx_init(ak, &akm_delta66e, &akm_delta66e_priv, ice); + break; default: snd_BUG(); return -EINVAL; diff --git a/sound/pci/ice1712/delta.h b/sound/pci/ice1712/delta.h index 1a0ac6cd6501..11a9c3a76507 100644 --- a/sound/pci/ice1712/delta.h +++ b/sound/pci/ice1712/delta.h @@ -144,6 +144,17 @@ extern struct snd_ice1712_card_info snd_ice1712_delta_cards[]; #define ICE1712_DELTA_1010LT_CS_NONE 0x50 /* nothing */ #define ICE1712_DELTA_1010LT_WORDCLOCK 0x80 /* sample clock source: 0 = Word Clock Input, 1 = S/PDIF Input ??? */ +/* M-Audio Delta 66 rev. E definitions. + * Newer revisions of Delta 66 have CS8427 over SPI for + * S/PDIF transceiver instead of CS8404/CS8414. */ +/* 0x01 = DFS */ +#define ICE1712_DELTA_66E_CCLK 0x02 /* SPI clock */ +#define ICE1712_DELTA_66E_DIN 0x04 /* data input */ +#define ICE1712_DELTA_66E_DOUT 0x08 /* data output */ +#define ICE1712_DELTA_66E_CS_CS8427 0x10 /* chip select, low = CS8427 */ +#define ICE1712_DELTA_66E_CS_CHIP_A 0x20 /* AK4524 #0 */ +#define ICE1712_DELTA_66E_CS_CHIP_B 0x40 /* AK4524 #1 */ + /* Digigram VX442 definitions */ #define ICE1712_VX442_CCLK 0x02 /* SPI clock */ #define ICE1712_VX442_DIN 0x04 /* data input */ diff --git a/sound/pci/oxygen/Makefile b/sound/pci/oxygen/Makefile index acd8f15f7bff..0f8726551fde 100644 --- a/sound/pci/oxygen/Makefile +++ b/sound/pci/oxygen/Makefile @@ -1,10 +1,8 @@ snd-oxygen-lib-objs := oxygen_io.o oxygen_lib.o oxygen_mixer.o oxygen_pcm.o -snd-hifier-objs := hifier.o -snd-oxygen-objs := oxygen.o +snd-oxygen-objs := oxygen.o xonar_dg.o snd-virtuoso-objs := virtuoso.o xonar_lib.o \ xonar_pcm179x.o xonar_cs43xx.o xonar_wm87x6.o xonar_hdmi.o obj-$(CONFIG_SND_OXYGEN_LIB) += snd-oxygen-lib.o -obj-$(CONFIG_SND_HIFIER) += snd-hifier.o obj-$(CONFIG_SND_OXYGEN) += snd-oxygen.o obj-$(CONFIG_SND_VIRTUOSO) += snd-virtuoso.o diff --git a/sound/pci/oxygen/cs4245.h b/sound/pci/oxygen/cs4245.h new file mode 100644 index 000000000000..5e0197e07dd1 --- /dev/null +++ b/sound/pci/oxygen/cs4245.h @@ -0,0 +1,107 @@ +#define CS4245_CHIP_ID 0x01 +#define CS4245_POWER_CTRL 0x02 +#define CS4245_DAC_CTRL_1 0x03 +#define CS4245_ADC_CTRL 0x04 +#define CS4245_MCLK_FREQ 0x05 +#define CS4245_SIGNAL_SEL 0x06 +#define CS4245_PGA_B_CTRL 0x07 +#define CS4245_PGA_A_CTRL 0x08 +#define CS4245_ANALOG_IN 0x09 +#define CS4245_DAC_A_CTRL 0x0a +#define CS4245_DAC_B_CTRL 0x0b +#define CS4245_DAC_CTRL_2 0x0c +#define CS4245_INT_STATUS 0x0d +#define CS4245_INT_MASK 0x0e +#define CS4245_INT_MODE_MSB 0x0f +#define CS4245_INT_MODE_LSB 0x10 + +/* Chip ID */ +#define CS4245_CHIP_PART_MASK 0xf0 +#define CS4245_CHIP_REV_MASK 0x0f + +/* Power Control */ +#define CS4245_FREEZE 0x80 +#define CS4245_PDN_MIC 0x08 +#define CS4245_PDN_ADC 0x04 +#define CS4245_PDN_DAC 0x02 +#define CS4245_PDN 0x01 + +/* DAC Control */ +#define CS4245_DAC_FM_MASK 0xc0 +#define CS4245_DAC_FM_SINGLE 0x00 +#define CS4245_DAC_FM_DOUBLE 0x40 +#define CS4245_DAC_FM_QUAD 0x80 +#define CS4245_DAC_DIF_MASK 0x30 +#define CS4245_DAC_DIF_LJUST 0x00 +#define CS4245_DAC_DIF_I2S 0x10 +#define CS4245_DAC_DIF_RJUST_16 0x20 +#define CS4245_DAC_DIF_RJUST_24 0x30 +#define CS4245_RESERVED_1 0x08 +#define CS4245_MUTE_DAC 0x04 +#define CS4245_DEEMPH 0x02 +#define CS4245_DAC_MASTER 0x01 + +/* ADC Control */ +#define CS4245_ADC_FM_MASK 0xc0 +#define CS4245_ADC_FM_SINGLE 0x00 +#define CS4245_ADC_FM_DOUBLE 0x40 +#define CS4245_ADC_FM_QUAD 0x80 +#define CS4245_ADC_DIF_MASK 0x10 +#define CS4245_ADC_DIF_LJUST 0x00 +#define CS4245_ADC_DIF_I2S 0x10 +#define CS4245_MUTE_ADC 0x04 +#define CS4245_HPF_FREEZE 0x02 +#define CS4245_ADC_MASTER 0x01 + +/* MCLK Frequency */ +#define CS4245_MCLK1_MASK 0x70 +#define CS4245_MCLK1_SHIFT 4 +#define CS4245_MCLK2_MASK 0x07 +#define CS4245_MCLK2_SHIFT 0 +#define CS4245_MCLK_1 0 +#define CS4245_MCLK_1_5 1 +#define CS4245_MCLK_2 2 +#define CS4245_MCLK_3 3 +#define CS4245_MCLK_4 4 + +/* Signal Selection */ +#define CS4245_A_OUT_SEL_MASK 0x60 +#define CS4245_A_OUT_SEL_HIZ 0x00 +#define CS4245_A_OUT_SEL_DAC 0x20 +#define CS4245_A_OUT_SEL_PGA 0x40 +#define CS4245_LOOP 0x02 +#define CS4245_ASYNCH 0x01 + +/* Channel B/A PGA Control */ +#define CS4245_PGA_GAIN_MASK 0x3f + +/* ADC Input Control */ +#define CS4245_PGA_SOFT 0x10 +#define CS4245_PGA_ZERO 0x08 +#define CS4245_SEL_MASK 0x07 +#define CS4245_SEL_MIC 0x00 +#define CS4245_SEL_INPUT_1 0x01 +#define CS4245_SEL_INPUT_2 0x02 +#define CS4245_SEL_INPUT_3 0x03 +#define CS4245_SEL_INPUT_4 0x04 +#define CS4245_SEL_INPUT_5 0x05 +#define CS4245_SEL_INPUT_6 0x06 + +/* DAC Channel A/B Volume Control */ +#define CS4245_VOL_MASK 0xff + +/* DAC Control 2 */ +#define CS4245_DAC_SOFT 0x80 +#define CS4245_DAC_ZERO 0x40 +#define CS4245_INVERT_DAC 0x20 +#define CS4245_INT_ACTIVE_HIGH 0x01 + +/* Interrupt Status/Mask/Mode */ +#define CS4245_ADC_CLK_ERR 0x08 +#define CS4245_DAC_CLK_ERR 0x04 +#define CS4245_ADC_OVFL 0x02 +#define CS4245_ADC_UNDRFL 0x01 + + +#define CS4245_SPI_ADDRESS (0x9e << 16) +#define CS4245_SPI_WRITE (0 << 16) diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c deleted file mode 100644 index 5a87d683691f..000000000000 --- a/sound/pci/oxygen/hifier.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * C-Media CMI8788 driver for the MediaTek/TempoTec HiFier Fantasia - * - * Copyright (c) Clemens Ladisch - * - * - * This driver is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2. - * - * This driver is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this driver; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* - * CMI8788: - * - * SPI 0 -> AK4396 - */ - -#include -#include -#include -#include -#include -#include -#include -#include "oxygen.h" -#include "ak4396.h" - -MODULE_AUTHOR("Clemens Ladisch "); -MODULE_DESCRIPTION("TempoTec HiFier driver"); -MODULE_LICENSE("GPL v2"); - -static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; -static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; -static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; - -module_param_array(index, int, NULL, 0444); -MODULE_PARM_DESC(index, "card index"); -module_param_array(id, charp, NULL, 0444); -MODULE_PARM_DESC(id, "ID string"); -module_param_array(enable, bool, NULL, 0444); -MODULE_PARM_DESC(enable, "enable card"); - -static DEFINE_PCI_DEVICE_TABLE(hifier_ids) = { - { OXYGEN_PCI_SUBID(0x14c3, 0x1710) }, - { OXYGEN_PCI_SUBID(0x14c3, 0x1711) }, - { OXYGEN_PCI_SUBID_BROKEN_EEPROM }, - { } -}; -MODULE_DEVICE_TABLE(pci, hifier_ids); - -struct hifier_data { - u8 ak4396_regs[5]; -}; - -static void ak4396_write(struct oxygen *chip, u8 reg, u8 value) -{ - struct hifier_data *data = chip->model_data; - - oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | - OXYGEN_SPI_DATA_LENGTH_2 | - OXYGEN_SPI_CLOCK_160 | - (0 << OXYGEN_SPI_CODEC_SHIFT) | - OXYGEN_SPI_CEN_LATCH_CLOCK_HI, - AK4396_WRITE | (reg << 8) | value); - data->ak4396_regs[reg] = value; -} - -static void ak4396_write_cached(struct oxygen *chip, u8 reg, u8 value) -{ - struct hifier_data *data = chip->model_data; - - if (value != data->ak4396_regs[reg]) - ak4396_write(chip, reg, value); -} - -static void hifier_registers_init(struct oxygen *chip) -{ - struct hifier_data *data = chip->model_data; - - ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN); - ak4396_write(chip, AK4396_CONTROL_2, - data->ak4396_regs[AK4396_CONTROL_2]); - ak4396_write(chip, AK4396_CONTROL_3, AK4396_PCM); - ak4396_write(chip, AK4396_LCH_ATT, chip->dac_volume[0]); - ak4396_write(chip, AK4396_RCH_ATT, chip->dac_volume[1]); -} - -static void hifier_init(struct oxygen *chip) -{ - struct hifier_data *data = chip->model_data; - - data->ak4396_regs[AK4396_CONTROL_2] = - AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL; - hifier_registers_init(chip); - - snd_component_add(chip->card, "AK4396"); - snd_component_add(chip->card, "CS5340"); -} - -static void hifier_cleanup(struct oxygen *chip) -{ -} - -static void hifier_resume(struct oxygen *chip) -{ - hifier_registers_init(chip); -} - -static void set_ak4396_params(struct oxygen *chip, - struct snd_pcm_hw_params *params) -{ - struct hifier_data *data = chip->model_data; - u8 value; - - value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_DFS_MASK; - if (params_rate(params) <= 54000) - value |= AK4396_DFS_NORMAL; - else if (params_rate(params) <= 108000) - value |= AK4396_DFS_DOUBLE; - else - value |= AK4396_DFS_QUAD; - - msleep(1); /* wait for the new MCLK to become stable */ - - if (value != data->ak4396_regs[AK4396_CONTROL_2]) { - ak4396_write(chip, AK4396_CONTROL_1, - AK4396_DIF_24_MSB); - ak4396_write(chip, AK4396_CONTROL_2, value); - ak4396_write(chip, AK4396_CONTROL_1, - AK4396_DIF_24_MSB | AK4396_RSTN); - } -} - -static void update_ak4396_volume(struct oxygen *chip) -{ - ak4396_write_cached(chip, AK4396_LCH_ATT, chip->dac_volume[0]); - ak4396_write_cached(chip, AK4396_RCH_ATT, chip->dac_volume[1]); -} - -static void update_ak4396_mute(struct oxygen *chip) -{ - struct hifier_data *data = chip->model_data; - u8 value; - - value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_SMUTE; - if (chip->dac_mute) - value |= AK4396_SMUTE; - ak4396_write_cached(chip, AK4396_CONTROL_2, value); -} - -static void set_cs5340_params(struct oxygen *chip, - struct snd_pcm_hw_params *params) -{ -} - -static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0); - -static const struct oxygen_model model_hifier = { - .shortname = "C-Media CMI8787", - .longname = "C-Media Oxygen HD Audio", - .chip = "CMI8788", - .init = hifier_init, - .cleanup = hifier_cleanup, - .resume = hifier_resume, - .get_i2s_mclk = oxygen_default_i2s_mclk, - .set_dac_params = set_ak4396_params, - .set_adc_params = set_cs5340_params, - .update_dac_volume = update_ak4396_volume, - .update_dac_mute = update_ak4396_mute, - .dac_tlv = ak4396_db_scale, - .model_data_size = sizeof(struct hifier_data), - .device_config = PLAYBACK_0_TO_I2S | - PLAYBACK_1_TO_SPDIF | - CAPTURE_0_FROM_I2S_1, - .dac_channels = 2, - .dac_volume_min = 0, - .dac_volume_max = 255, - .function_flags = OXYGEN_FUNCTION_SPI, - .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, - .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, -}; - -static int __devinit get_hifier_model(struct oxygen *chip, - const struct pci_device_id *id) -{ - chip->model = model_hifier; - return 0; -} - -static int __devinit hifier_probe(struct pci_dev *pci, - const struct pci_device_id *pci_id) -{ - static int dev; - int err; - - if (dev >= SNDRV_CARDS) - return -ENODEV; - if (!enable[dev]) { - ++dev; - return -ENOENT; - } - err = oxygen_pci_probe(pci, index[dev], id[dev], THIS_MODULE, - hifier_ids, get_hifier_model); - if (err >= 0) - ++dev; - return err; -} - -static struct pci_driver hifier_driver = { - .name = "CMI8787HiFier", - .id_table = hifier_ids, - .probe = hifier_probe, - .remove = __devexit_p(oxygen_pci_remove), -#ifdef CONFIG_PM - .suspend = oxygen_pci_suspend, - .resume = oxygen_pci_resume, -#endif -}; - -static int __init alsa_card_hifier_init(void) -{ - return pci_register_driver(&hifier_driver); -} - -static void __exit alsa_card_hifier_exit(void) -{ - pci_unregister_driver(&hifier_driver); -} - -module_init(alsa_card_hifier_init) -module_exit(alsa_card_hifier_exit) diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c index 98a8eb3c92f7..d7e8ddd9a67b 100644 --- a/sound/pci/oxygen/oxygen.c +++ b/sound/pci/oxygen/oxygen.c @@ -20,19 +20,32 @@ /* * CMI8788: * - * SPI 0 -> 1st AK4396 (front) - * SPI 1 -> 2nd AK4396 (surround) - * SPI 2 -> 3rd AK4396 (center/LFE) - * SPI 3 -> WM8785 - * SPI 4 -> 4th AK4396 (back) + * SPI 0 -> 1st AK4396 (front) + * SPI 1 -> 2nd AK4396 (surround) + * SPI 2 -> 3rd AK4396 (center/LFE) + * SPI 3 -> WM8785 + * SPI 4 -> 4th AK4396 (back) * - * GPIO 0 -> DFS0 of AK5385 - * GPIO 1 -> DFS1 of AK5385 - * GPIO 8 -> enable headphone amplifier on HT-Omega models + * GPIO 0 -> DFS0 of AK5385 + * GPIO 1 -> DFS1 of AK5385 + * + * X-Meridian models: + * GPIO 4 -> enable extension S/PDIF input + * GPIO 6 -> enable on-board S/PDIF input + * + * Claro models: + * GPIO 6 -> S/PDIF from optical (0) or coaxial (1) input + * GPIO 8 -> enable headphone amplifier * * CM9780: * - * GPO 0 -> route line-in (0) or AC97 output (1) to ADC input + * LINE_OUT -> input of ADC + * + * AUX_IN <- aux + * CD_IN <- CD + * MIC_IN <- mic + * + * GPO 0 -> route line-in (0) or AC97 output (1) to ADC input */ #include @@ -41,18 +54,22 @@ #include #include #include +#include #include #include #include #include #include "oxygen.h" +#include "xonar_dg.h" #include "ak4396.h" #include "wm8785.h" MODULE_AUTHOR("Clemens Ladisch "); MODULE_DESCRIPTION("C-Media CMI8788 driver"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8788}}"); +MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8786}" + ",{C-Media,CMI8787}" + ",{C-Media,CMI8788}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; @@ -66,24 +83,46 @@ module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "enable card"); enum { - MODEL_CMEDIA_REF, /* C-Media's reference design */ - MODEL_MERIDIAN, /* AuzenTech X-Meridian */ - MODEL_CLARO, /* HT-Omega Claro */ - MODEL_CLARO_HALO, /* HT-Omega Claro halo */ + MODEL_CMEDIA_REF, + MODEL_MERIDIAN, + MODEL_MERIDIAN_2G, + MODEL_CLARO, + MODEL_CLARO_HALO, + MODEL_FANTASIA, + MODEL_SERENADE, + MODEL_2CH_OUTPUT, + MODEL_HG2PCI, + MODEL_XONAR_DG, }; static DEFINE_PCI_DEVICE_TABLE(oxygen_ids) = { + /* C-Media's reference design */ { OXYGEN_PCI_SUBID(0x10b0, 0x0216), .driver_data = MODEL_CMEDIA_REF }, + { OXYGEN_PCI_SUBID(0x10b0, 0x0217), .driver_data = MODEL_CMEDIA_REF }, { OXYGEN_PCI_SUBID(0x10b0, 0x0218), .driver_data = MODEL_CMEDIA_REF }, { OXYGEN_PCI_SUBID(0x10b0, 0x0219), .driver_data = MODEL_CMEDIA_REF }, { OXYGEN_PCI_SUBID(0x13f6, 0x0001), .driver_data = MODEL_CMEDIA_REF }, { OXYGEN_PCI_SUBID(0x13f6, 0x0010), .driver_data = MODEL_CMEDIA_REF }, { OXYGEN_PCI_SUBID(0x13f6, 0x8788), .driver_data = MODEL_CMEDIA_REF }, - { OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_CMEDIA_REF }, { OXYGEN_PCI_SUBID(0x147a, 0xa017), .driver_data = MODEL_CMEDIA_REF }, { OXYGEN_PCI_SUBID(0x1a58, 0x0910), .driver_data = MODEL_CMEDIA_REF }, + /* Asus Xonar DG */ + { OXYGEN_PCI_SUBID(0x1043, 0x8467), .driver_data = MODEL_XONAR_DG }, + /* PCI 2.0 HD Audio */ + { OXYGEN_PCI_SUBID(0x13f6, 0x8782), .driver_data = MODEL_2CH_OUTPUT }, + /* Kuroutoshikou CMI8787-HG2PCI */ + { OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_HG2PCI }, + /* TempoTec HiFier Fantasia */ + { OXYGEN_PCI_SUBID(0x14c3, 0x1710), .driver_data = MODEL_FANTASIA }, + /* TempoTec HiFier Serenade */ + { OXYGEN_PCI_SUBID(0x14c3, 0x1711), .driver_data = MODEL_SERENADE }, + /* AuzenTech X-Meridian */ { OXYGEN_PCI_SUBID(0x415a, 0x5431), .driver_data = MODEL_MERIDIAN }, + /* AuzenTech X-Meridian 2G */ + { OXYGEN_PCI_SUBID(0x5431, 0x017a), .driver_data = MODEL_MERIDIAN_2G }, + /* HT-Omega Claro */ { OXYGEN_PCI_SUBID(0x7284, 0x9761), .driver_data = MODEL_CLARO }, + /* HT-Omega Claro halo */ { OXYGEN_PCI_SUBID(0x7284, 0x9781), .driver_data = MODEL_CLARO_HALO }, { } }; @@ -95,9 +134,15 @@ MODULE_DEVICE_TABLE(pci, oxygen_ids); #define GPIO_AK5385_DFS_DOUBLE 0x0001 #define GPIO_AK5385_DFS_QUAD 0x0002 +#define GPIO_MERIDIAN_DIG_MASK 0x0050 +#define GPIO_MERIDIAN_DIG_EXT 0x0010 +#define GPIO_MERIDIAN_DIG_BOARD 0x0040 + +#define GPIO_CLARO_DIG_COAX 0x0040 #define GPIO_CLARO_HP 0x0100 struct generic_data { + unsigned int dacs; u8 ak4396_regs[4][5]; u16 wm8785_regs[3]; }; @@ -148,7 +193,7 @@ static void ak4396_registers_init(struct oxygen *chip) struct generic_data *data = chip->model_data; unsigned int i; - for (i = 0; i < 4; ++i) { + for (i = 0; i < data->dacs; ++i) { ak4396_write(chip, i, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN); ak4396_write(chip, i, AK4396_CONTROL_2, @@ -166,6 +211,7 @@ static void ak4396_init(struct oxygen *chip) { struct generic_data *data = chip->model_data; + data->dacs = chip->model.dac_channels_pcm / 2; data->ak4396_regs[0][AK4396_CONTROL_2] = AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL; ak4396_registers_init(chip); @@ -207,6 +253,10 @@ static void generic_init(struct oxygen *chip) static void meridian_init(struct oxygen *chip) { + oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, + GPIO_MERIDIAN_DIG_MASK); + oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, + GPIO_MERIDIAN_DIG_BOARD, GPIO_MERIDIAN_DIG_MASK); ak4396_init(chip); ak5385_init(chip); } @@ -220,6 +270,8 @@ static void claro_enable_hp(struct oxygen *chip) static void claro_init(struct oxygen *chip) { + oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX); + oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX); ak4396_init(chip); wm8785_init(chip); claro_enable_hp(chip); @@ -227,11 +279,24 @@ static void claro_init(struct oxygen *chip) static void claro_halo_init(struct oxygen *chip) { + oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX); + oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX); ak4396_init(chip); ak5385_init(chip); claro_enable_hp(chip); } +static void fantasia_init(struct oxygen *chip) +{ + ak4396_init(chip); + snd_component_add(chip->card, "CS5340"); +} + +static void stereo_output_init(struct oxygen *chip) +{ + ak4396_init(chip); +} + static void generic_cleanup(struct oxygen *chip) { } @@ -268,6 +333,11 @@ static void claro_resume(struct oxygen *chip) claro_enable_hp(chip); } +static void stereo_resume(struct oxygen *chip) +{ + ak4396_registers_init(chip); +} + static void set_ak4396_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { @@ -286,7 +356,7 @@ static void set_ak4396_params(struct oxygen *chip, msleep(1); /* wait for the new MCLK to become stable */ if (value != data->ak4396_regs[0][AK4396_CONTROL_2]) { - for (i = 0; i < 4; ++i) { + for (i = 0; i < data->dacs; ++i) { ak4396_write(chip, i, AK4396_CONTROL_1, AK4396_DIF_24_MSB); ak4396_write(chip, i, AK4396_CONTROL_2, value); @@ -298,9 +368,10 @@ static void set_ak4396_params(struct oxygen *chip, static void update_ak4396_volume(struct oxygen *chip) { + struct generic_data *data = chip->model_data; unsigned int i; - for (i = 0; i < 4; ++i) { + for (i = 0; i < data->dacs; ++i) { ak4396_write_cached(chip, i, AK4396_LCH_ATT, chip->dac_volume[i * 2]); ak4396_write_cached(chip, i, AK4396_RCH_ATT, @@ -317,7 +388,7 @@ static void update_ak4396_mute(struct oxygen *chip) value = data->ak4396_regs[0][AK4396_CONTROL_2] & ~AK4396_SMUTE; if (chip->dac_mute) value |= AK4396_SMUTE; - for (i = 0; i < 4; ++i) + for (i = 0; i < data->dacs; ++i) ak4396_write_cached(chip, i, AK4396_CONTROL_2, value); } @@ -356,6 +427,10 @@ static void set_ak5385_params(struct oxygen *chip, value, GPIO_AK5385_DFS_MASK); } +static void set_no_params(struct oxygen *chip, struct snd_pcm_hw_params *params) +{ +} + static int rolloff_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { @@ -363,13 +438,7 @@ static int rolloff_info(struct snd_kcontrol *ctl, "Sharp Roll-off", "Slow Roll-off" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 2; - if (info->value.enumerated.item >= 2) - info->value.enumerated.item = 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 2, names); } static int rolloff_get(struct snd_kcontrol *ctl, @@ -400,7 +469,7 @@ static int rolloff_put(struct snd_kcontrol *ctl, reg &= ~AK4396_SLOW; changed = reg != data->ak4396_regs[0][AK4396_CONTROL_2]; if (changed) { - for (i = 0; i < 4; ++i) + for (i = 0; i < data->dacs; ++i) ak4396_write(chip, i, AK4396_CONTROL_2, reg); } mutex_unlock(&chip->mutex); @@ -421,13 +490,7 @@ static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) "None", "High-pass Filter" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 2; - if (info->value.enumerated.item >= 2) - info->value.enumerated.item = 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 2, names); } static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) @@ -466,6 +529,100 @@ static const struct snd_kcontrol_new hpf_control = { .put = hpf_put, }; +static int meridian_dig_source_info(struct snd_kcontrol *ctl, + struct snd_ctl_elem_info *info) +{ + static const char *const names[2] = { "On-board", "Extension" }; + + return snd_ctl_enum_info(info, 1, 2, names); +} + +static int claro_dig_source_info(struct snd_kcontrol *ctl, + struct snd_ctl_elem_info *info) +{ + static const char *const names[2] = { "Optical", "Coaxial" }; + + return snd_ctl_enum_info(info, 1, 2, names); +} + +static int meridian_dig_source_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + + value->value.enumerated.item[0] = + !!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & + GPIO_MERIDIAN_DIG_EXT); + return 0; +} + +static int claro_dig_source_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + + value->value.enumerated.item[0] = + !!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & + GPIO_CLARO_DIG_COAX); + return 0; +} + +static int meridian_dig_source_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + u16 old_reg, new_reg; + int changed; + + mutex_lock(&chip->mutex); + old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA); + new_reg = old_reg & ~GPIO_MERIDIAN_DIG_MASK; + if (value->value.enumerated.item[0] == 0) + new_reg |= GPIO_MERIDIAN_DIG_BOARD; + else + new_reg |= GPIO_MERIDIAN_DIG_EXT; + changed = new_reg != old_reg; + if (changed) + oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg); + mutex_unlock(&chip->mutex); + return changed; +} + +static int claro_dig_source_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + u16 old_reg, new_reg; + int changed; + + mutex_lock(&chip->mutex); + old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA); + new_reg = old_reg & ~GPIO_CLARO_DIG_COAX; + if (value->value.enumerated.item[0]) + new_reg |= GPIO_CLARO_DIG_COAX; + changed = new_reg != old_reg; + if (changed) + oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg); + mutex_unlock(&chip->mutex); + return changed; +} + +static const struct snd_kcontrol_new meridian_dig_source_control = { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "IEC958 Source Capture Enum", + .info = meridian_dig_source_info, + .get = meridian_dig_source_get, + .put = meridian_dig_source_put, +}; + +static const struct snd_kcontrol_new claro_dig_source_control = { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "IEC958 Source Capture Enum", + .info = claro_dig_source_info, + .get = claro_dig_source_get, + .put = claro_dig_source_put, +}; + static int generic_mixer_init(struct oxygen *chip) { return snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip)); @@ -484,6 +641,81 @@ static int generic_wm8785_mixer_init(struct oxygen *chip) return 0; } +static int meridian_mixer_init(struct oxygen *chip) +{ + int err; + + err = generic_mixer_init(chip); + if (err < 0) + return err; + err = snd_ctl_add(chip->card, + snd_ctl_new1(&meridian_dig_source_control, chip)); + if (err < 0) + return err; + return 0; +} + +static int claro_mixer_init(struct oxygen *chip) +{ + int err; + + err = generic_wm8785_mixer_init(chip); + if (err < 0) + return err; + err = snd_ctl_add(chip->card, + snd_ctl_new1(&claro_dig_source_control, chip)); + if (err < 0) + return err; + return 0; +} + +static int claro_halo_mixer_init(struct oxygen *chip) +{ + int err; + + err = generic_mixer_init(chip); + if (err < 0) + return err; + err = snd_ctl_add(chip->card, + snd_ctl_new1(&claro_dig_source_control, chip)); + if (err < 0) + return err; + return 0; +} + +static void dump_ak4396_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct generic_data *data = chip->model_data; + unsigned int dac, i; + + for (dac = 0; dac < data->dacs; ++dac) { + snd_iprintf(buffer, "\nAK4396 %u:", dac + 1); + for (i = 0; i < 5; ++i) + snd_iprintf(buffer, " %02x", data->ak4396_regs[dac][i]); + } + snd_iprintf(buffer, "\n"); +} + +static void dump_wm8785_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct generic_data *data = chip->model_data; + unsigned int i; + + snd_iprintf(buffer, "\nWM8785:"); + for (i = 0; i < 3; ++i) + snd_iprintf(buffer, " %03x", data->wm8785_regs[i]); + snd_iprintf(buffer, "\n"); +} + +static void dump_oxygen_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + dump_ak4396_registers(chip, buffer); + dump_wm8785_registers(chip, buffer); +} + static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0); static const struct oxygen_model model_generic = { @@ -494,11 +726,11 @@ static const struct oxygen_model model_generic = { .mixer_init = generic_wm8785_mixer_init, .cleanup = generic_cleanup, .resume = generic_resume, - .get_i2s_mclk = oxygen_default_i2s_mclk, .set_dac_params = set_ak4396_params, .set_adc_params = set_wm8785_params, .update_dac_volume = update_ak4396_volume, .update_dac_mute = update_ak4396_mute, + .dump_registers = dump_oxygen_registers, .dac_tlv = ak4396_db_scale, .model_data_size = sizeof(struct generic_data), .device_config = PLAYBACK_0_TO_I2S | @@ -508,11 +740,14 @@ static const struct oxygen_model model_generic = { CAPTURE_1_FROM_SPDIF | CAPTURE_2_FROM_AC97_1 | AC97_CD_INPUT, - .dac_channels = 8, + .dac_channels_pcm = 8, + .dac_channels_mixer = 8, .dac_volume_min = 0, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_SPI | OXYGEN_FUNCTION_ENABLE_SPI_4_5, + .dac_mclks = OXYGEN_MCLKS(256, 128, 128), + .adc_mclks = OXYGEN_MCLKS(256, 256, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; @@ -520,42 +755,87 @@ static const struct oxygen_model model_generic = { static int __devinit get_oxygen_model(struct oxygen *chip, const struct pci_device_id *id) { + static const char *const names[] = { + [MODEL_MERIDIAN] = "AuzenTech X-Meridian", + [MODEL_MERIDIAN_2G] = "AuzenTech X-Meridian 2G", + [MODEL_CLARO] = "HT-Omega Claro", + [MODEL_CLARO_HALO] = "HT-Omega Claro halo", + [MODEL_FANTASIA] = "TempoTec HiFier Fantasia", + [MODEL_SERENADE] = "TempoTec HiFier Serenade", + [MODEL_HG2PCI] = "CMI8787-HG2PCI", + }; + chip->model = model_generic; switch (id->driver_data) { case MODEL_MERIDIAN: + case MODEL_MERIDIAN_2G: chip->model.init = meridian_init; - chip->model.mixer_init = generic_mixer_init; + chip->model.mixer_init = meridian_mixer_init; chip->model.resume = meridian_resume; chip->model.set_adc_params = set_ak5385_params; + chip->model.dump_registers = dump_ak4396_registers; chip->model.device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_2 | CAPTURE_1_FROM_SPDIF; + if (id->driver_data == MODEL_MERIDIAN) + chip->model.device_config |= AC97_CD_INPUT; break; case MODEL_CLARO: chip->model.init = claro_init; + chip->model.mixer_init = claro_mixer_init; chip->model.cleanup = claro_cleanup; chip->model.suspend = claro_suspend; chip->model.resume = claro_resume; break; case MODEL_CLARO_HALO: chip->model.init = claro_halo_init; - chip->model.mixer_init = generic_mixer_init; + chip->model.mixer_init = claro_halo_mixer_init; chip->model.cleanup = claro_cleanup; chip->model.suspend = claro_suspend; chip->model.resume = claro_resume; chip->model.set_adc_params = set_ak5385_params; + chip->model.dump_registers = dump_ak4396_registers; chip->model.device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_2 | CAPTURE_1_FROM_SPDIF; break; + case MODEL_FANTASIA: + case MODEL_SERENADE: + case MODEL_2CH_OUTPUT: + case MODEL_HG2PCI: + chip->model.shortname = "C-Media CMI8787"; + chip->model.chip = "CMI8787"; + if (id->driver_data == MODEL_FANTASIA) + chip->model.init = fantasia_init; + else + chip->model.init = stereo_output_init; + chip->model.resume = stereo_resume; + chip->model.mixer_init = generic_mixer_init; + chip->model.set_adc_params = set_no_params; + chip->model.dump_registers = dump_ak4396_registers; + chip->model.device_config = PLAYBACK_0_TO_I2S | + PLAYBACK_1_TO_SPDIF; + if (id->driver_data == MODEL_FANTASIA) { + chip->model.device_config |= CAPTURE_0_FROM_I2S_1; + chip->model.adc_mclks = OXYGEN_MCLKS(256, 128, 128); + } + chip->model.dac_channels_pcm = 2; + chip->model.dac_channels_mixer = 2; + break; + case MODEL_XONAR_DG: + chip->model = model_xonar_dg; + break; } if (id->driver_data == MODEL_MERIDIAN || + id->driver_data == MODEL_MERIDIAN_2G || id->driver_data == MODEL_CLARO_HALO) { chip->model.misc_flags = OXYGEN_MISC_MIDI; chip->model.device_config |= MIDI_OUTPUT | MIDI_INPUT; } + if (id->driver_data < ARRAY_SIZE(names) && names[id->driver_data]) + chip->model.shortname = names[id->driver_data]; return 0; } diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h index 7d5222caa0a9..c2ae63d17cd2 100644 --- a/sound/pci/oxygen/oxygen.h +++ b/sound/pci/oxygen/oxygen.h @@ -16,6 +16,10 @@ #define PCM_AC97 5 #define PCM_COUNT 6 +#define OXYGEN_MCLKS(f_single, f_double, f_quad) ((MCLK_##f_single << 0) | \ + (MCLK_##f_double << 2) | \ + (MCLK_##f_quad << 4)) + #define OXYGEN_IO_SIZE 0x100 #define OXYGEN_EEPROM_ID 0x434d /* "CM" */ @@ -35,6 +39,7 @@ #define MIDI_OUTPUT 0x0800 #define MIDI_INPUT 0x1000 #define AC97_CD_INPUT 0x2000 +#define AC97_FMIC_SWITCH 0x4000 enum { CONTROL_SPDIF_PCM, @@ -65,6 +70,7 @@ struct snd_pcm_hardware; struct snd_pcm_hw_params; struct snd_kcontrol_new; struct snd_rawmidi; +struct snd_info_buffer; struct oxygen; struct oxygen_model { @@ -79,8 +85,6 @@ struct oxygen_model { void (*resume)(struct oxygen *chip); void (*pcm_hardware_filter)(unsigned int channel, struct snd_pcm_hardware *hardware); - unsigned int (*get_i2s_mclk)(struct oxygen *chip, unsigned int channel, - struct snd_pcm_hw_params *hw_params); void (*set_dac_params)(struct oxygen *chip, struct snd_pcm_hw_params *params); void (*set_adc_params)(struct oxygen *chip, @@ -92,15 +96,19 @@ struct oxygen_model { void (*uart_input)(struct oxygen *chip); void (*ac97_switch)(struct oxygen *chip, unsigned int reg, unsigned int mute); + void (*dump_registers)(struct oxygen *chip, + struct snd_info_buffer *buffer); const unsigned int *dac_tlv; - unsigned long private_data; size_t model_data_size; unsigned int device_config; - u8 dac_channels; + u8 dac_channels_pcm; + u8 dac_channels_mixer; u8 dac_volume_min; u8 dac_volume_max; u8 misc_flags; u8 function_flags; + u8 dac_mclks; + u8 adc_mclks; u16 dac_i2s_format; u16 adc_i2s_format; }; @@ -121,7 +129,6 @@ struct oxygen { u8 pcm_running; u8 dac_routing; u8 spdif_playback_enable; - u8 revision; u8 has_ac97_0; u8 has_ac97_1; u32 spdif_bits; @@ -167,8 +174,6 @@ void oxygen_update_spdif_source(struct oxygen *chip); /* oxygen_pcm.c */ int oxygen_pcm_init(struct oxygen *chip); -unsigned int oxygen_default_i2s_mclk(struct oxygen *chip, unsigned int channel, - struct snd_pcm_hw_params *hw_params); /* oxygen_io.c */ diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c index 09b2b2a36df5..f5164b1e1c80 100644 --- a/sound/pci/oxygen/oxygen_io.c +++ b/sound/pci/oxygen/oxygen_io.c @@ -197,11 +197,11 @@ void oxygen_write_spi(struct oxygen *chip, u8 control, unsigned int data) { unsigned int count; - /* should not need more than 7.68 us (24 * 320 ns) */ + /* should not need more than 30.72 us (24 * 1.28 us) */ count = 10; while ((oxygen_read8(chip, OXYGEN_SPI_CONTROL) & OXYGEN_SPI_BUSY) && count > 0) { - udelay(1); + udelay(4); --count; } diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c index e5ebe56fb0c5..70b739816fcc 100644 --- a/sound/pci/oxygen/oxygen_lib.c +++ b/sound/pci/oxygen/oxygen_lib.c @@ -202,7 +202,13 @@ static void oxygen_proc_read(struct snd_info_entry *entry, struct oxygen *chip = entry->private_data; int i, j; - snd_iprintf(buffer, "CMI8788\n\n"); + switch (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_PACKAGE_ID_MASK) { + case OXYGEN_PACKAGE_ID_8786: i = '6'; break; + case OXYGEN_PACKAGE_ID_8787: i = '7'; break; + case OXYGEN_PACKAGE_ID_8788: i = '8'; break; + default: i = '?'; break; + } + snd_iprintf(buffer, "CMI878%c:\n", i); for (i = 0; i < OXYGEN_IO_SIZE; i += 0x10) { snd_iprintf(buffer, "%02x:", i); for (j = 0; j < 0x10; ++j) @@ -212,7 +218,7 @@ static void oxygen_proc_read(struct snd_info_entry *entry, if (mutex_lock_interruptible(&chip->mutex) < 0) return; if (chip->has_ac97_0) { - snd_iprintf(buffer, "\nAC97\n"); + snd_iprintf(buffer, "\nAC97:\n"); for (i = 0; i < 0x80; i += 0x10) { snd_iprintf(buffer, "%02x:", i); for (j = 0; j < 0x10; j += 2) @@ -222,7 +228,7 @@ static void oxygen_proc_read(struct snd_info_entry *entry, } } if (chip->has_ac97_1) { - snd_iprintf(buffer, "\nAC97 2\n"); + snd_iprintf(buffer, "\nAC97 2:\n"); for (i = 0; i < 0x80; i += 0x10) { snd_iprintf(buffer, "%02x:", i); for (j = 0; j < 0x10; j += 2) @@ -232,13 +238,15 @@ static void oxygen_proc_read(struct snd_info_entry *entry, } } mutex_unlock(&chip->mutex); + if (chip->model.dump_registers) + chip->model.dump_registers(chip, buffer); } static void oxygen_proc_init(struct oxygen *chip) { struct snd_info_entry *entry; - if (!snd_card_proc_new(chip->card, "cmi8788", &entry)) + if (!snd_card_proc_new(chip->card, "oxygen", &entry)) snd_info_set_text_ops(entry, chip, oxygen_proc_read); } #else @@ -262,7 +270,7 @@ oxygen_search_pci_id(struct oxygen *chip, const struct pci_device_id ids[]) */ subdevice = oxygen_read_eeprom(chip, 2); /* use default ID if EEPROM is missing */ - if (subdevice == 0xffff) + if (subdevice == 0xffff && oxygen_read_eeprom(chip, 1) == 0xffff) subdevice = 0x8788; /* * We use only the subsystem device ID for searching because it is @@ -364,12 +372,7 @@ static void oxygen_init(struct oxygen *chip) (IEC958_AES1_CON_PCM_CODER << OXYGEN_SPDIF_CATEGORY_SHIFT); chip->spdif_pcm_bits = chip->spdif_bits; - if (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2) - chip->revision = 2; - else - chip->revision = 1; - - if (chip->revision == 1) + if (!(oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2)) oxygen_set_bits8(chip, OXYGEN_MISC, OXYGEN_MISC_PCI_MEM_W_1_CLOCK); @@ -406,28 +409,40 @@ static void oxygen_init(struct oxygen *chip) (OXYGEN_FORMAT_16 << OXYGEN_MULTICH_FORMAT_SHIFT)); oxygen_write8(chip, OXYGEN_REC_CHANNELS, OXYGEN_REC_CHANNELS_2_2_2); oxygen_write16(chip, OXYGEN_I2S_MULTICH_FORMAT, - OXYGEN_RATE_48000 | chip->model.dac_i2s_format | - OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 | - OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64); + OXYGEN_RATE_48000 | + chip->model.dac_i2s_format | + OXYGEN_I2S_MCLK(chip->model.dac_mclks) | + OXYGEN_I2S_BITS_16 | + OXYGEN_I2S_MASTER | + OXYGEN_I2S_BCLK_64); if (chip->model.device_config & CAPTURE_0_FROM_I2S_1) oxygen_write16(chip, OXYGEN_I2S_A_FORMAT, - OXYGEN_RATE_48000 | chip->model.adc_i2s_format | - OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 | - OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64); + OXYGEN_RATE_48000 | + chip->model.adc_i2s_format | + OXYGEN_I2S_MCLK(chip->model.adc_mclks) | + OXYGEN_I2S_BITS_16 | + OXYGEN_I2S_MASTER | + OXYGEN_I2S_BCLK_64); else oxygen_write16(chip, OXYGEN_I2S_A_FORMAT, - OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK); + OXYGEN_I2S_MASTER | + OXYGEN_I2S_MUTE_MCLK); if (chip->model.device_config & (CAPTURE_0_FROM_I2S_2 | CAPTURE_2_FROM_I2S_2)) oxygen_write16(chip, OXYGEN_I2S_B_FORMAT, - OXYGEN_RATE_48000 | chip->model.adc_i2s_format | - OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 | - OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64); + OXYGEN_RATE_48000 | + chip->model.adc_i2s_format | + OXYGEN_I2S_MCLK(chip->model.adc_mclks) | + OXYGEN_I2S_BITS_16 | + OXYGEN_I2S_MASTER | + OXYGEN_I2S_BCLK_64); else oxygen_write16(chip, OXYGEN_I2S_B_FORMAT, - OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK); + OXYGEN_I2S_MASTER | + OXYGEN_I2S_MUTE_MCLK); oxygen_write16(chip, OXYGEN_I2S_C_FORMAT, - OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK); + OXYGEN_I2S_MASTER | + OXYGEN_I2S_MUTE_MCLK); oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL, OXYGEN_SPDIF_OUT_ENABLE | OXYGEN_SPDIF_LOOPBACK); @@ -557,7 +572,8 @@ static void oxygen_card_free(struct snd_card *card) oxygen_shutdown(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); - flush_scheduled_work(); + flush_work_sync(&chip->spdif_input_bits_work); + flush_work_sync(&chip->gpio_work); chip->model.cleanup(chip); kfree(chip->model_data); mutex_destroy(&chip->mutex); @@ -648,8 +664,8 @@ int oxygen_pci_probe(struct pci_dev *pci, int index, char *id, strcpy(card->driver, chip->model.chip); strcpy(card->shortname, chip->model.shortname); - sprintf(card->longname, "%s (rev %u) at %#lx, irq %i", - chip->model.longname, chip->revision, chip->addr, chip->irq); + sprintf(card->longname, "%s at %#lx, irq %i", + chip->model.longname, chip->addr, chip->irq); strcpy(card->mixername, chip->model.chip); snd_component_add(card, chip->model.chip); @@ -733,7 +749,8 @@ int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state) spin_unlock_irq(&chip->reg_lock); synchronize_irq(chip->irq); - flush_scheduled_work(); + flush_work_sync(&chip->spdif_input_bits_work); + flush_work_sync(&chip->gpio_work); chip->interrupt_mask = saved_interrupt_mask; pci_disable_device(pci); diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c index 2849b36f5f7e..9bff14d5895d 100644 --- a/sound/pci/oxygen/oxygen_mixer.c +++ b/sound/pci/oxygen/oxygen_mixer.c @@ -31,7 +31,7 @@ static int dac_volume_info(struct snd_kcontrol *ctl, struct oxygen *chip = ctl->private_data; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; - info->count = chip->model.dac_channels; + info->count = chip->model.dac_channels_mixer; info->value.integer.min = chip->model.dac_volume_min; info->value.integer.max = chip->model.dac_volume_max; return 0; @@ -44,7 +44,7 @@ static int dac_volume_get(struct snd_kcontrol *ctl, unsigned int i; mutex_lock(&chip->mutex); - for (i = 0; i < chip->model.dac_channels; ++i) + for (i = 0; i < chip->model.dac_channels_mixer; ++i) value->value.integer.value[i] = chip->dac_volume[i]; mutex_unlock(&chip->mutex); return 0; @@ -59,7 +59,7 @@ static int dac_volume_put(struct snd_kcontrol *ctl, changed = 0; mutex_lock(&chip->mutex); - for (i = 0; i < chip->model.dac_channels; ++i) + for (i = 0; i < chip->model.dac_channels_mixer; ++i) if (value->value.integer.value[i] != chip->dac_volume[i]) { chip->dac_volume[i] = value->value.integer.value[i]; changed = 1; @@ -97,6 +97,16 @@ static int dac_mute_put(struct snd_kcontrol *ctl, return changed; } +static unsigned int upmix_item_count(struct oxygen *chip) +{ + if (chip->model.dac_channels_pcm < 8) + return 2; + else if (chip->model.update_center_lfe_mix) + return 5; + else + return 3; +} + static int upmix_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[5] = { @@ -107,15 +117,9 @@ static int upmix_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) "Front+Surround+Center/LFE+Back", }; struct oxygen *chip = ctl->private_data; - unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3; + unsigned int count = upmix_item_count(chip); - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = count; - if (info->value.enumerated.item >= count) - info->value.enumerated.item = count - 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, count, names); } static int upmix_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) @@ -188,7 +192,7 @@ void oxygen_update_dac_routing(struct oxygen *chip) static int upmix_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; - unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3; + unsigned int count = upmix_item_count(chip); int changed; if (value->value.enumerated.item[0] >= count) @@ -430,30 +434,31 @@ static int spdif_input_default_get(struct snd_kcontrol *ctl, return 0; } -static int spdif_loopback_get(struct snd_kcontrol *ctl, - struct snd_ctl_elem_value *value) +static int spdif_bit_switch_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; + u32 bit = ctl->private_value; value->value.integer.value[0] = - !!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL) - & OXYGEN_SPDIF_LOOPBACK); + !!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL) & bit); return 0; } -static int spdif_loopback_put(struct snd_kcontrol *ctl, - struct snd_ctl_elem_value *value) +static int spdif_bit_switch_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; + u32 bit = ctl->private_value; u32 oldreg, newreg; int changed; spin_lock_irq(&chip->reg_lock); oldreg = oxygen_read32(chip, OXYGEN_SPDIF_CONTROL); if (value->value.integer.value[0]) - newreg = oldreg | OXYGEN_SPDIF_LOOPBACK; + newreg = oldreg | bit; else - newreg = oldreg & ~OXYGEN_SPDIF_LOOPBACK; + newreg = oldreg & ~bit; changed = newreg != oldreg; if (changed) oxygen_write32(chip, OXYGEN_SPDIF_CONTROL, newreg); @@ -644,6 +649,46 @@ static int ac97_volume_put(struct snd_kcontrol *ctl, return change; } +static int mic_fmic_source_info(struct snd_kcontrol *ctl, + struct snd_ctl_elem_info *info) +{ + static const char *const names[] = { "Mic Jack", "Front Panel" }; + + return snd_ctl_enum_info(info, 1, 2, names); +} + +static int mic_fmic_source_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + + mutex_lock(&chip->mutex); + value->value.enumerated.item[0] = + !!(oxygen_read_ac97(chip, 0, CM9780_JACK) & CM9780_FMIC2MIC); + mutex_unlock(&chip->mutex); + return 0; +} + +static int mic_fmic_source_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + u16 oldreg, newreg; + int change; + + mutex_lock(&chip->mutex); + oldreg = oxygen_read_ac97(chip, 0, CM9780_JACK); + if (value->value.enumerated.item[0]) + newreg = oldreg | CM9780_FMIC2MIC; + else + newreg = oldreg & ~CM9780_FMIC2MIC; + change = newreg != oldreg; + if (change) + oxygen_write_ac97(chip, 0, CM9780_JACK, newreg); + mutex_unlock(&chip->mutex); + return change; +} + static int ac97_fp_rec_volume_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { @@ -791,8 +836,17 @@ static const struct snd_kcontrol_new spdif_input_controls[] = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Loopback ", NONE, SWITCH), .info = snd_ctl_boolean_mono_info, - .get = spdif_loopback_get, - .put = spdif_loopback_put, + .get = spdif_bit_switch_get, + .put = spdif_bit_switch_put, + .private_value = OXYGEN_SPDIF_LOOPBACK, + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = SNDRV_CTL_NAME_IEC958("Validity Check ",CAPTURE,SWITCH), + .info = snd_ctl_boolean_mono_info, + .get = spdif_bit_switch_get, + .put = spdif_bit_switch_put, + .private_value = OXYGEN_SPDIF_SPDVALID, }, }; @@ -908,6 +962,13 @@ static const struct snd_kcontrol_new ac97_controls[] = { AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0), AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1), AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0), + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Mic Source Capture Enum", + .info = mic_fmic_source_info, + .get = mic_fmic_source_get, + .put = mic_fmic_source_put, + }, AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1), AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1), AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1), @@ -970,7 +1031,10 @@ static int add_controls(struct oxygen *chip, continue; } if (!strcmp(template.name, "Stereo Upmixing") && - chip->model.dac_channels == 2) + chip->model.dac_channels_pcm == 2) + continue; + if (!strcmp(template.name, "Mic Source Capture Enum") && + !(chip->model.device_config & AC97_FMIC_SWITCH)) continue; if (!strncmp(template.name, "CD Capture ", 11) && !(chip->model.device_config & AC97_CD_INPUT)) diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c index 814667442eb0..d5533e34ece9 100644 --- a/sound/pci/oxygen/oxygen_pcm.c +++ b/sound/pci/oxygen/oxygen_pcm.c @@ -39,7 +39,8 @@ static const struct snd_pcm_hardware oxygen_stereo_hardware = { SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_SYNC_START, + SNDRV_PCM_INFO_SYNC_START | + SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_32000 | @@ -65,7 +66,8 @@ static const struct snd_pcm_hardware oxygen_multichannel_hardware = { SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_SYNC_START, + SNDRV_PCM_INFO_SYNC_START | + SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_32000 | @@ -91,7 +93,8 @@ static const struct snd_pcm_hardware oxygen_ac97_hardware = { SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_SYNC_START, + SNDRV_PCM_INFO_SYNC_START | + SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, @@ -140,7 +143,7 @@ static int oxygen_open(struct snd_pcm_substream *substream, runtime->hw.rate_min = 44100; break; case PCM_MULTICH: - runtime->hw.channels_max = chip->model.dac_channels; + runtime->hw.channels_max = chip->model.dac_channels_pcm; break; } if (chip->model.pcm_hardware_filter) @@ -271,17 +274,6 @@ static unsigned int oxygen_rate(struct snd_pcm_hw_params *hw_params) } } -unsigned int oxygen_default_i2s_mclk(struct oxygen *chip, - unsigned int channel, - struct snd_pcm_hw_params *hw_params) -{ - if (params_rate(hw_params) <= 96000) - return OXYGEN_I2S_MCLK_256; - else - return OXYGEN_I2S_MCLK_128; -} -EXPORT_SYMBOL(oxygen_default_i2s_mclk); - static unsigned int oxygen_i2s_bits(struct snd_pcm_hw_params *hw_params) { if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE) @@ -341,6 +333,26 @@ static int oxygen_hw_params(struct snd_pcm_substream *substream, return 0; } +static u16 get_mclk(struct oxygen *chip, unsigned int channel, + struct snd_pcm_hw_params *params) +{ + unsigned int mclks, shift; + + if (channel == PCM_MULTICH) + mclks = chip->model.dac_mclks; + else + mclks = chip->model.adc_mclks; + + if (params_rate(params) <= 48000) + shift = 0; + else if (params_rate(params) <= 96000) + shift = 2; + else + shift = 4; + + return OXYGEN_I2S_MCLK(mclks >> shift); +} + static int oxygen_rec_a_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { @@ -357,8 +369,8 @@ static int oxygen_rec_a_hw_params(struct snd_pcm_substream *substream, OXYGEN_REC_FORMAT_A_MASK); oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT, oxygen_rate(hw_params) | - chip->model.get_i2s_mclk(chip, PCM_A, hw_params) | chip->model.adc_i2s_format | + get_mclk(chip, PCM_A, hw_params) | oxygen_i2s_bits(hw_params), OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_FORMAT_MASK | @@ -393,9 +405,8 @@ static int oxygen_rec_b_hw_params(struct snd_pcm_substream *substream, if (!is_ac97) oxygen_write16_masked(chip, OXYGEN_I2S_B_FORMAT, oxygen_rate(hw_params) | - chip->model.get_i2s_mclk(chip, PCM_B, - hw_params) | chip->model.adc_i2s_format | + get_mclk(chip, PCM_B, hw_params) | oxygen_i2s_bits(hw_params), OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_FORMAT_MASK | @@ -476,8 +487,7 @@ static int oxygen_multich_hw_params(struct snd_pcm_substream *substream, oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT, oxygen_rate(hw_params) | chip->model.dac_i2s_format | - chip->model.get_i2s_mclk(chip, PCM_MULTICH, - hw_params) | + get_mclk(chip, PCM_MULTICH, hw_params) | oxygen_i2s_bits(hw_params), OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_FORMAT_MASK | @@ -530,7 +540,10 @@ static int oxygen_prepare(struct snd_pcm_substream *substream) oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask); oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask); - chip->interrupt_mask |= channel_mask; + if (substream->runtime->no_period_wakeup) + chip->interrupt_mask &= ~channel_mask; + else + chip->interrupt_mask |= channel_mask; oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask); spin_unlock_irq(&chip->reg_lock); return 0; diff --git a/sound/pci/oxygen/oxygen_regs.h b/sound/pci/oxygen/oxygen_regs.h index 4dcd41b78258..63dc7a0ab555 100644 --- a/sound/pci/oxygen/oxygen_regs.h +++ b/sound/pci/oxygen/oxygen_regs.h @@ -139,9 +139,11 @@ #define OXYGEN_I2S_FORMAT_I2S 0x0000 #define OXYGEN_I2S_FORMAT_LJUST 0x0008 #define OXYGEN_I2S_MCLK_MASK 0x0030 /* MCLK/LRCK */ -#define OXYGEN_I2S_MCLK_128 0x0000 -#define OXYGEN_I2S_MCLK_256 0x0010 -#define OXYGEN_I2S_MCLK_512 0x0020 +#define OXYGEN_I2S_MCLK_SHIFT 4 +#define MCLK_128 0 +#define MCLK_256 1 +#define MCLK_512 2 +#define OXYGEN_I2S_MCLK(f) (((f) & 3) << OXYGEN_I2S_MCLK_SHIFT) #define OXYGEN_I2S_BITS_MASK 0x00c0 #define OXYGEN_I2S_BITS_16 0x0000 #define OXYGEN_I2S_BITS_20 0x0040 @@ -238,11 +240,11 @@ #define OXYGEN_SPI_DATA_LENGTH_MASK 0x02 #define OXYGEN_SPI_DATA_LENGTH_2 0x00 #define OXYGEN_SPI_DATA_LENGTH_3 0x02 -#define OXYGEN_SPI_CLOCK_MASK 0xc0 +#define OXYGEN_SPI_CLOCK_MASK 0x0c #define OXYGEN_SPI_CLOCK_160 0x00 /* ns */ -#define OXYGEN_SPI_CLOCK_320 0x40 -#define OXYGEN_SPI_CLOCK_640 0x80 -#define OXYGEN_SPI_CLOCK_1280 0xc0 +#define OXYGEN_SPI_CLOCK_320 0x04 +#define OXYGEN_SPI_CLOCK_640 0x08 +#define OXYGEN_SPI_CLOCK_1280 0x0c #define OXYGEN_SPI_CODEC_MASK 0x70 /* 0..5 */ #define OXYGEN_SPI_CODEC_SHIFT 4 #define OXYGEN_SPI_CEN_MASK 0x80 diff --git a/sound/pci/oxygen/xonar.h b/sound/pci/oxygen/xonar.h index b35343b0a9a5..0434c207e811 100644 --- a/sound/pci/oxygen/xonar.h +++ b/sound/pci/oxygen/xonar.h @@ -24,6 +24,8 @@ void xonar_init_ext_power(struct oxygen *chip); void xonar_init_cs53x1(struct oxygen *chip); void xonar_set_cs53x1_params(struct oxygen *chip, struct snd_pcm_hw_params *params); + +#define XONAR_GPIO_BIT_INVERT (1 << 16) int xonar_gpio_bit_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value); int xonar_gpio_bit_switch_put(struct snd_kcontrol *ctl, diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c index aa27c31049af..9f72d424969c 100644 --- a/sound/pci/oxygen/xonar_cs43xx.c +++ b/sound/pci/oxygen/xonar_cs43xx.c @@ -22,29 +22,28 @@ * * CMI8788: * - * I²C <-> CS4398 (front) - * <-> CS4362A (surround, center/LFE, back) + * I²C <-> CS4398 (addr 1001111) (front) + * <-> CS4362A (addr 0011000) (surround, center/LFE, back) * - * GPI 0 <- external power present (DX only) + * GPI 0 <- external power present (DX only) * - * GPIO 0 -> enable output to speakers - * GPIO 1 -> enable front panel I/O - * GPIO 2 -> M0 of CS5361 - * GPIO 3 -> M1 of CS5361 - * GPIO 8 -> route input jack to line-in (0) or mic-in (1) + * GPIO 0 -> enable output to speakers + * GPIO 1 -> route output to front panel + * GPIO 2 -> M0 of CS5361 + * GPIO 3 -> M1 of CS5361 + * GPIO 6 -> ? + * GPIO 7 -> ? + * GPIO 8 -> route input jack to line-in (0) or mic-in (1) * - * CS4398: - * - * AD0 <- 1 - * AD1 <- 1 + * CM9780: * - * CS4362A: + * LINE_OUT -> input of ADC * - * AD0 <- 0 + * AUX_IN <- aux + * MIC_IN <- mic + * FMIC_IN <- front mic * - * CM9780: - * - * GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input + * GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input */ #include @@ -63,6 +62,7 @@ #define GPI_EXT_POWER 0x01 #define GPIO_D1_OUTPUT_ENABLE 0x0001 #define GPIO_D1_FRONT_PANEL 0x0002 +#define GPIO_D1_MAGIC 0x00c0 #define GPIO_D1_INPUT_ROUTE 0x0100 #define I2C_DEVICE_CS4398 0x9e /* 10011, AD1=1, AD0=1, /W=0 */ @@ -169,12 +169,12 @@ static void xonar_d1_init(struct oxygen *chip) cs43xx_registers_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, - GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE); + GPIO_D1_FRONT_PANEL | + GPIO_D1_MAGIC | + GPIO_D1_INPUT_ROUTE); oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE); - oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC); - xonar_init_cs53x1(chip); xonar_enable_output(chip); @@ -284,7 +284,7 @@ static void update_cs43xx_center_lfe_mix(struct oxygen *chip, bool mixed) static const struct snd_kcontrol_new front_panel_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, - .name = "Front Panel Switch", + .name = "Front Panel Playback Switch", .info = snd_ctl_boolean_mono_info, .get = xonar_gpio_bit_switch_get, .put = xonar_gpio_bit_switch_put, @@ -298,13 +298,7 @@ static int rolloff_info(struct snd_kcontrol *ctl, "Fast Roll-off", "Slow Roll-off" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 2; - if (info->value.enumerated.item >= 2) - info->value.enumerated.item = 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 2, names); } static int rolloff_get(struct snd_kcontrol *ctl, @@ -380,6 +374,30 @@ static int xonar_d1_mixer_init(struct oxygen *chip) return 0; } +static void dump_cs4362a_registers(struct xonar_cs43xx *data, + struct snd_info_buffer *buffer) +{ + unsigned int i; + + snd_iprintf(buffer, "\nCS4362A:"); + for (i = 1; i <= 14; ++i) + snd_iprintf(buffer, " %02x", data->cs4362a_regs[i]); + snd_iprintf(buffer, "\n"); +} + +static void dump_d1_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct xonar_cs43xx *data = chip->model_data; + unsigned int i; + + snd_iprintf(buffer, "\nCS4398: 7?"); + for (i = 2; i <= 8; ++i) + snd_iprintf(buffer, " %02x", data->cs4398_regs[i]); + snd_iprintf(buffer, "\n"); + dump_cs4362a_registers(data, buffer); +} + static const struct oxygen_model model_xonar_d1 = { .longname = "Asus Virtuoso 100", .chip = "AV200", @@ -388,22 +406,26 @@ static const struct oxygen_model model_xonar_d1 = { .cleanup = xonar_d1_cleanup, .suspend = xonar_d1_suspend, .resume = xonar_d1_resume, - .get_i2s_mclk = oxygen_default_i2s_mclk, .set_dac_params = set_cs43xx_params, .set_adc_params = xonar_set_cs53x1_params, .update_dac_volume = update_cs43xx_volume, .update_dac_mute = update_cs43xx_mute, .update_center_lfe_mix = update_cs43xx_center_lfe_mix, .ac97_switch = xonar_d1_line_mic_ac97_switch, + .dump_registers = dump_d1_registers, .dac_tlv = cs4362a_db_scale, .model_data_size = sizeof(struct xonar_cs43xx), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | - CAPTURE_0_FROM_I2S_2, - .dac_channels = 8, + CAPTURE_0_FROM_I2S_2 | + AC97_FMIC_SWITCH, + .dac_channels_pcm = 8, + .dac_channels_mixer = 8, .dac_volume_min = 127 - 60, .dac_volume_max = 127, .function_flags = OXYGEN_FUNCTION_2WIRE, + .dac_mclks = OXYGEN_MCLKS(256, 128, 128), + .adc_mclks = OXYGEN_MCLKS(256, 128, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c new file mode 100644 index 000000000000..e4de0b8d087a --- /dev/null +++ b/sound/pci/oxygen/xonar_dg.c @@ -0,0 +1,572 @@ +/* + * card driver for the Xonar DG + * + * Copyright (c) Clemens Ladisch + * + * + * This driver is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2. + * + * This driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this driver; if not, see . + */ + +/* + * Xonar DG + * -------- + * + * CMI8788: + * + * SPI 0 -> CS4245 + * + * GPIO 3 <- ? + * GPIO 4 <- headphone detect + * GPIO 5 -> route input jack to line-in (0) or mic-in (1) + * GPIO 6 -> route input jack to line-in (0) or mic-in (1) + * GPIO 7 -> enable rear headphone amp + * GPIO 8 -> enable output to speakers + * + * CS4245: + * + * input 1 <- aux + * input 2 <- front mic + * input 4 <- line/mic + * aux out -> front panel headphones + */ + +#include +#include +#include +#include +#include +#include +#include +#include "oxygen.h" +#include "xonar_dg.h" +#include "cs4245.h" + +#define GPIO_MAGIC 0x0008 +#define GPIO_HP_DETECT 0x0010 +#define GPIO_INPUT_ROUTE 0x0060 +#define GPIO_HP_REAR 0x0080 +#define GPIO_OUTPUT_ENABLE 0x0100 + +struct dg { + unsigned int output_sel; + s8 input_vol[4][2]; + unsigned int input_sel; + u8 hp_vol_att; + u8 cs4245_regs[0x11]; +}; + +static void cs4245_write(struct oxygen *chip, unsigned int reg, u8 value) +{ + struct dg *data = chip->model_data; + + oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | + OXYGEN_SPI_DATA_LENGTH_3 | + OXYGEN_SPI_CLOCK_1280 | + (0 << OXYGEN_SPI_CODEC_SHIFT) | + OXYGEN_SPI_CEN_LATCH_CLOCK_HI, + CS4245_SPI_ADDRESS | + CS4245_SPI_WRITE | + (value << 8) | reg); + data->cs4245_regs[reg] = value; +} + +static void cs4245_write_cached(struct oxygen *chip, unsigned int reg, u8 value) +{ + struct dg *data = chip->model_data; + + if (value != data->cs4245_regs[reg]) + cs4245_write(chip, reg, value); +} + +static void cs4245_registers_init(struct oxygen *chip) +{ + struct dg *data = chip->model_data; + + cs4245_write(chip, CS4245_POWER_CTRL, CS4245_PDN); + cs4245_write(chip, CS4245_DAC_CTRL_1, + data->cs4245_regs[CS4245_DAC_CTRL_1]); + cs4245_write(chip, CS4245_ADC_CTRL, + data->cs4245_regs[CS4245_ADC_CTRL]); + cs4245_write(chip, CS4245_SIGNAL_SEL, + data->cs4245_regs[CS4245_SIGNAL_SEL]); + cs4245_write(chip, CS4245_PGA_B_CTRL, + data->cs4245_regs[CS4245_PGA_B_CTRL]); + cs4245_write(chip, CS4245_PGA_A_CTRL, + data->cs4245_regs[CS4245_PGA_A_CTRL]); + cs4245_write(chip, CS4245_ANALOG_IN, + data->cs4245_regs[CS4245_ANALOG_IN]); + cs4245_write(chip, CS4245_DAC_A_CTRL, + data->cs4245_regs[CS4245_DAC_A_CTRL]); + cs4245_write(chip, CS4245_DAC_B_CTRL, + data->cs4245_regs[CS4245_DAC_B_CTRL]); + cs4245_write(chip, CS4245_DAC_CTRL_2, + CS4245_DAC_SOFT | CS4245_DAC_ZERO | CS4245_INVERT_DAC); + cs4245_write(chip, CS4245_INT_MASK, 0); + cs4245_write(chip, CS4245_POWER_CTRL, 0); +} + +static void cs4245_init(struct oxygen *chip) +{ + struct dg *data = chip->model_data; + + data->cs4245_regs[CS4245_DAC_CTRL_1] = + CS4245_DAC_FM_SINGLE | CS4245_DAC_DIF_LJUST; + data->cs4245_regs[CS4245_ADC_CTRL] = + CS4245_ADC_FM_SINGLE | CS4245_ADC_DIF_LJUST; + data->cs4245_regs[CS4245_SIGNAL_SEL] = + CS4245_A_OUT_SEL_HIZ | CS4245_ASYNCH; + data->cs4245_regs[CS4245_PGA_B_CTRL] = 0; + data->cs4245_regs[CS4245_PGA_A_CTRL] = 0; + data->cs4245_regs[CS4245_ANALOG_IN] = + CS4245_PGA_SOFT | CS4245_PGA_ZERO | CS4245_SEL_INPUT_4; + data->cs4245_regs[CS4245_DAC_A_CTRL] = 0; + data->cs4245_regs[CS4245_DAC_B_CTRL] = 0; + cs4245_registers_init(chip); + snd_component_add(chip->card, "CS4245"); +} + +static void dg_output_enable(struct oxygen *chip) +{ + msleep(2500); + oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE); +} + +static void dg_init(struct oxygen *chip) +{ + struct dg *data = chip->model_data; + + data->output_sel = 0; + data->input_sel = 3; + data->hp_vol_att = 2 * 16; + + cs4245_init(chip); + + oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, + GPIO_MAGIC | GPIO_HP_DETECT); + oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, + GPIO_INPUT_ROUTE | GPIO_HP_REAR | GPIO_OUTPUT_ENABLE); + oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, + GPIO_INPUT_ROUTE | GPIO_HP_REAR); + dg_output_enable(chip); +} + +static void dg_cleanup(struct oxygen *chip) +{ + oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE); +} + +static void dg_suspend(struct oxygen *chip) +{ + dg_cleanup(chip); +} + +static void dg_resume(struct oxygen *chip) +{ + cs4245_registers_init(chip); + dg_output_enable(chip); +} + +static void set_cs4245_dac_params(struct oxygen *chip, + struct snd_pcm_hw_params *params) +{ + struct dg *data = chip->model_data; + u8 value; + + value = data->cs4245_regs[CS4245_DAC_CTRL_1] & ~CS4245_DAC_FM_MASK; + if (params_rate(params) <= 50000) + value |= CS4245_DAC_FM_SINGLE; + else if (params_rate(params) <= 100000) + value |= CS4245_DAC_FM_DOUBLE; + else + value |= CS4245_DAC_FM_QUAD; + cs4245_write_cached(chip, CS4245_DAC_CTRL_1, value); +} + +static void set_cs4245_adc_params(struct oxygen *chip, + struct snd_pcm_hw_params *params) +{ + struct dg *data = chip->model_data; + u8 value; + + value = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_ADC_FM_MASK; + if (params_rate(params) <= 50000) + value |= CS4245_ADC_FM_SINGLE; + else if (params_rate(params) <= 100000) + value |= CS4245_ADC_FM_DOUBLE; + else + value |= CS4245_ADC_FM_QUAD; + cs4245_write_cached(chip, CS4245_ADC_CTRL, value); +} + +static int output_switch_info(struct snd_kcontrol *ctl, + struct snd_ctl_elem_info *info) +{ + static const char *const names[3] = { + "Speakers", "Headphones", "FP Headphones" + }; + + return snd_ctl_enum_info(info, 1, 3, names); +} + +static int output_switch_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + + mutex_lock(&chip->mutex); + value->value.enumerated.item[0] = data->output_sel; + mutex_unlock(&chip->mutex); + return 0; +} + +static int output_switch_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + u8 reg; + int changed; + + if (value->value.enumerated.item[0] > 2) + return -EINVAL; + + mutex_lock(&chip->mutex); + changed = value->value.enumerated.item[0] != data->output_sel; + if (changed) { + data->output_sel = value->value.enumerated.item[0]; + + reg = data->cs4245_regs[CS4245_SIGNAL_SEL] & + ~CS4245_A_OUT_SEL_MASK; + reg |= data->output_sel == 2 ? + CS4245_A_OUT_SEL_DAC : CS4245_A_OUT_SEL_HIZ; + cs4245_write_cached(chip, CS4245_SIGNAL_SEL, reg); + + cs4245_write_cached(chip, CS4245_DAC_A_CTRL, + data->output_sel ? data->hp_vol_att : 0); + cs4245_write_cached(chip, CS4245_DAC_B_CTRL, + data->output_sel ? data->hp_vol_att : 0); + + oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, + data->output_sel == 1 ? GPIO_HP_REAR : 0, + GPIO_HP_REAR); + } + mutex_unlock(&chip->mutex); + return changed; +} + +static int hp_volume_offset_info(struct snd_kcontrol *ctl, + struct snd_ctl_elem_info *info) +{ + static const char *const names[3] = { + "< 64 ohms", "64-150 ohms", "150-300 ohms" + }; + + return snd_ctl_enum_info(info, 1, 3, names); +} + +static int hp_volume_offset_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + + mutex_lock(&chip->mutex); + if (data->hp_vol_att > 2 * 7) + value->value.enumerated.item[0] = 0; + else if (data->hp_vol_att > 0) + value->value.enumerated.item[0] = 1; + else + value->value.enumerated.item[0] = 2; + mutex_unlock(&chip->mutex); + return 0; +} + +static int hp_volume_offset_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + static const s8 atts[3] = { 2 * 16, 2 * 7, 0 }; + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + s8 att; + int changed; + + if (value->value.enumerated.item[0] > 2) + return -EINVAL; + att = atts[value->value.enumerated.item[0]]; + mutex_lock(&chip->mutex); + changed = att != data->hp_vol_att; + if (changed) { + data->hp_vol_att = att; + if (data->output_sel) { + cs4245_write_cached(chip, CS4245_DAC_A_CTRL, att); + cs4245_write_cached(chip, CS4245_DAC_B_CTRL, att); + } + } + mutex_unlock(&chip->mutex); + return changed; +} + +static int input_vol_info(struct snd_kcontrol *ctl, + struct snd_ctl_elem_info *info) +{ + info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; + info->count = 2; + info->value.integer.min = 2 * -12; + info->value.integer.max = 2 * 12; + return 0; +} + +static int input_vol_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + unsigned int idx = ctl->private_value; + + mutex_lock(&chip->mutex); + value->value.integer.value[0] = data->input_vol[idx][0]; + value->value.integer.value[1] = data->input_vol[idx][1]; + mutex_unlock(&chip->mutex); + return 0; +} + +static int input_vol_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + unsigned int idx = ctl->private_value; + int changed = 0; + + if (value->value.integer.value[0] < 2 * -12 || + value->value.integer.value[0] > 2 * 12 || + value->value.integer.value[1] < 2 * -12 || + value->value.integer.value[1] > 2 * 12) + return -EINVAL; + mutex_lock(&chip->mutex); + changed = data->input_vol[idx][0] != value->value.integer.value[0] || + data->input_vol[idx][1] != value->value.integer.value[1]; + if (changed) { + data->input_vol[idx][0] = value->value.integer.value[0]; + data->input_vol[idx][1] = value->value.integer.value[1]; + if (idx == data->input_sel) { + cs4245_write_cached(chip, CS4245_PGA_A_CTRL, + data->input_vol[idx][0]); + cs4245_write_cached(chip, CS4245_PGA_B_CTRL, + data->input_vol[idx][1]); + } + } + mutex_unlock(&chip->mutex); + return changed; +} + +static DECLARE_TLV_DB_SCALE(cs4245_pga_db_scale, -1200, 50, 0); + +static int input_sel_info(struct snd_kcontrol *ctl, + struct snd_ctl_elem_info *info) +{ + static const char *const names[4] = { + "Mic", "Aux", "Front Mic", "Line" + }; + + return snd_ctl_enum_info(info, 1, 4, names); +} + +static int input_sel_get(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + + mutex_lock(&chip->mutex); + value->value.enumerated.item[0] = data->input_sel; + mutex_unlock(&chip->mutex); + return 0; +} + +static int input_sel_put(struct snd_kcontrol *ctl, + struct snd_ctl_elem_value *value) +{ + static const u8 sel_values[4] = { + CS4245_SEL_MIC, + CS4245_SEL_INPUT_1, + CS4245_SEL_INPUT_2, + CS4245_SEL_INPUT_4 + }; + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + int changed; + + if (value->value.enumerated.item[0] > 3) + return -EINVAL; + + mutex_lock(&chip->mutex); + changed = value->value.enumerated.item[0] != data->input_sel; + if (changed) { + data->input_sel = value->value.enumerated.item[0]; + + cs4245_write(chip, CS4245_ANALOG_IN, + (data->cs4245_regs[CS4245_ANALOG_IN] & + ~CS4245_SEL_MASK) | + sel_values[data->input_sel]); + + cs4245_write_cached(chip, CS4245_PGA_A_CTRL, + data->input_vol[data->input_sel][0]); + cs4245_write_cached(chip, CS4245_PGA_B_CTRL, + data->input_vol[data->input_sel][1]); + + oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, + data->input_sel ? 0 : GPIO_INPUT_ROUTE, + GPIO_INPUT_ROUTE); + } + mutex_unlock(&chip->mutex); + return changed; +} + +static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) +{ + static const char *const names[2] = { "Active", "Frozen" }; + + return snd_ctl_enum_info(info, 1, 2, names); +} + +static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + + value->value.enumerated.item[0] = + !!(data->cs4245_regs[CS4245_ADC_CTRL] & CS4245_HPF_FREEZE); + return 0; +} + +static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) +{ + struct oxygen *chip = ctl->private_data; + struct dg *data = chip->model_data; + u8 reg; + int changed; + + mutex_lock(&chip->mutex); + reg = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_HPF_FREEZE; + if (value->value.enumerated.item[0]) + reg |= CS4245_HPF_FREEZE; + changed = reg != data->cs4245_regs[CS4245_ADC_CTRL]; + if (changed) + cs4245_write(chip, CS4245_ADC_CTRL, reg); + mutex_unlock(&chip->mutex); + return changed; +} + +#define INPUT_VOLUME(xname, index) { \ + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ + .name = xname, \ + .info = input_vol_info, \ + .get = input_vol_get, \ + .put = input_vol_put, \ + .tlv = { .p = cs4245_pga_db_scale }, \ + .private_value = index, \ +} +static const struct snd_kcontrol_new dg_controls[] = { + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Analog Output Playback Enum", + .info = output_switch_info, + .get = output_switch_get, + .put = output_switch_put, + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Headphones Impedance Playback Enum", + .info = hp_volume_offset_info, + .get = hp_volume_offset_get, + .put = hp_volume_offset_put, + }, + INPUT_VOLUME("Mic Capture Volume", 0), + INPUT_VOLUME("Aux Capture Volume", 1), + INPUT_VOLUME("Front Mic Capture Volume", 2), + INPUT_VOLUME("Line Capture Volume", 3), + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Capture Source", + .info = input_sel_info, + .get = input_sel_get, + .put = input_sel_put, + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "ADC High-pass Filter Capture Enum", + .info = hpf_info, + .get = hpf_get, + .put = hpf_put, + }, +}; + +static int dg_control_filter(struct snd_kcontrol_new *template) +{ + if (!strncmp(template->name, "Master Playback ", 16)) + return 1; + return 0; +} + +static int dg_mixer_init(struct oxygen *chip) +{ + unsigned int i; + int err; + + for (i = 0; i < ARRAY_SIZE(dg_controls); ++i) { + err = snd_ctl_add(chip->card, + snd_ctl_new1(&dg_controls[i], chip)); + if (err < 0) + return err; + } + return 0; +} + +static void dump_cs4245_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct dg *data = chip->model_data; + unsigned int i; + + snd_iprintf(buffer, "\nCS4245:"); + for (i = 1; i <= 0x10; ++i) + snd_iprintf(buffer, " %02x", data->cs4245_regs[i]); + snd_iprintf(buffer, "\n"); +} + +struct oxygen_model model_xonar_dg = { + .shortname = "Xonar DG", + .longname = "C-Media Oxygen HD Audio", + .chip = "CMI8786", + .init = dg_init, + .control_filter = dg_control_filter, + .mixer_init = dg_mixer_init, + .cleanup = dg_cleanup, + .suspend = dg_suspend, + .resume = dg_resume, + .set_dac_params = set_cs4245_dac_params, + .set_adc_params = set_cs4245_adc_params, + .dump_registers = dump_cs4245_registers, + .model_data_size = sizeof(struct dg), + .device_config = PLAYBACK_0_TO_I2S | + PLAYBACK_1_TO_SPDIF | + CAPTURE_0_FROM_I2S_2, + .dac_channels_pcm = 6, + .dac_channels_mixer = 0, + .function_flags = OXYGEN_FUNCTION_SPI, + .dac_mclks = OXYGEN_MCLKS(256, 128, 128), + .adc_mclks = OXYGEN_MCLKS(256, 128, 128), + .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, + .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, +}; diff --git a/sound/pci/oxygen/xonar_dg.h b/sound/pci/oxygen/xonar_dg.h new file mode 100644 index 000000000000..5688d78609a9 --- /dev/null +++ b/sound/pci/oxygen/xonar_dg.h @@ -0,0 +1,8 @@ +#ifndef XONAR_DG_H_INCLUDED +#define XONAR_DG_H_INCLUDED + +#include "oxygen.h" + +extern struct oxygen_model model_xonar_dg; + +#endif diff --git a/sound/pci/oxygen/xonar_hdmi.c b/sound/pci/oxygen/xonar_hdmi.c index b12db1f1cea9..136dac6a3964 100644 --- a/sound/pci/oxygen/xonar_hdmi.c +++ b/sound/pci/oxygen/xonar_hdmi.c @@ -1,5 +1,5 @@ /* - * helper functions for HDMI models (Xonar HDAV1.3) + * helper functions for HDMI models (Xonar HDAV1.3/HDAV1.3 Slim) * * Copyright (c) Clemens Ladisch * diff --git a/sound/pci/oxygen/xonar_lib.c b/sound/pci/oxygen/xonar_lib.c index b3ff71316653..0ebe7f5916f9 100644 --- a/sound/pci/oxygen/xonar_lib.c +++ b/sound/pci/oxygen/xonar_lib.c @@ -104,9 +104,10 @@ int xonar_gpio_bit_switch_get(struct snd_kcontrol *ctl, { struct oxygen *chip = ctl->private_data; u16 bit = ctl->private_value; + bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT; value->value.integer.value[0] = - !!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit); + !!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit) ^ invert; return 0; } @@ -115,12 +116,13 @@ int xonar_gpio_bit_switch_put(struct snd_kcontrol *ctl, { struct oxygen *chip = ctl->private_data; u16 bit = ctl->private_value; + bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT; u16 old_bits, new_bits; int changed; spin_lock_irq(&chip->reg_lock); old_bits = oxygen_read16(chip, OXYGEN_GPIO_DATA); - if (value->value.integer.value[0]) + if (!!value->value.integer.value[0] ^ invert) new_bits = old_bits | bit; else new_bits = old_bits & ~bit; diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c index d491fd6c0be2..54cad38ec30a 100644 --- a/sound/pci/oxygen/xonar_pcm179x.c +++ b/sound/pci/oxygen/xonar_pcm179x.c @@ -22,20 +22,26 @@ * * CMI8788: * - * SPI 0 -> 1st PCM1796 (front) - * SPI 1 -> 2nd PCM1796 (surround) - * SPI 2 -> 3rd PCM1796 (center/LFE) - * SPI 4 -> 4th PCM1796 (back) + * SPI 0 -> 1st PCM1796 (front) + * SPI 1 -> 2nd PCM1796 (surround) + * SPI 2 -> 3rd PCM1796 (center/LFE) + * SPI 4 -> 4th PCM1796 (back) * - * GPIO 2 -> M0 of CS5381 - * GPIO 3 -> M1 of CS5381 - * GPIO 5 <- external power present (D2X only) - * GPIO 7 -> ALT - * GPIO 8 -> enable output to speakers + * GPIO 2 -> M0 of CS5381 + * GPIO 3 -> M1 of CS5381 + * GPIO 5 <- external power present (D2X only) + * GPIO 7 -> ALT + * GPIO 8 -> enable output to speakers * * CM9780: * - * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input + * LINE_OUT -> input of ADC + * + * AUX_IN <- aux + * VIDEO_IN <- CD + * FMIC_IN <- mic + * + * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input */ /* @@ -44,52 +50,53 @@ * * CMI8788: * - * I²C <-> PCM1796 (front) + * I²C <-> PCM1796 (addr 1001100) (front) * - * GPI 0 <- external power present + * GPI 0 <- external power present * - * GPIO 0 -> enable output to speakers - * GPIO 2 -> M0 of CS5381 - * GPIO 3 -> M1 of CS5381 - * GPIO 8 -> route input jack to line-in (0) or mic-in (1) + * GPIO 0 -> enable HDMI (0) or speaker (1) output + * GPIO 2 -> M0 of CS5381 + * GPIO 3 -> M1 of CS5381 + * GPIO 4 <- daughterboard detection + * GPIO 5 <- daughterboard detection + * GPIO 6 -> ? + * GPIO 7 -> ? + * GPIO 8 -> route input jack to line-in (0) or mic-in (1) * - * TXD -> HDMI controller - * RXD <- HDMI controller - * - * PCM1796 front: AD1,0 <- 0,0 + * UART <-> HDMI controller * * CM9780: * - * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input + * LINE_OUT -> input of ADC + * + * AUX_IN <- aux + * CD_IN <- CD + * MIC_IN <- mic + * + * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input * * no daughterboard * ---------------- * - * GPIO 4 <- 1 + * GPIO 4 <- 1 * * H6 daughterboard * ---------------- * - * GPIO 4 <- 0 - * GPIO 5 <- 0 - * - * I²C <-> PCM1796 (surround) - * <-> PCM1796 (center/LFE) - * <-> PCM1796 (back) + * GPIO 4 <- 0 + * GPIO 5 <- 0 * - * PCM1796 surround: AD1,0 <- 0,1 - * PCM1796 center/LFE: AD1,0 <- 1,0 - * PCM1796 back: AD1,0 <- 1,1 + * I²C <-> PCM1796 (addr 1001101) (surround) + * <-> PCM1796 (addr 1001110) (center/LFE) + * <-> PCM1796 (addr 1001111) (back) * * unknown daughterboard * --------------------- * - * GPIO 4 <- 0 - * GPIO 5 <- 1 - * - * I²C <-> CS4362A (surround, center/LFE, back) + * GPIO 4 <- 0 + * GPIO 5 <- 1 * - * CS4362A: AD0 <- 0 + * I²C <-> CS4362A (addr 0011000) (surround, center/LFE, back) */ /* @@ -98,32 +105,35 @@ * * CMI8788: * - * I²C <-> PCM1792A - * <-> CS2000 (ST only) + * I²C <-> PCM1792A (addr 1001100) + * <-> CS2000 (addr 1001110) (ST only) * - * ADC1 MCLK -> REF_CLK of CS2000 (ST only) + * ADC1 MCLK -> REF_CLK of CS2000 (ST only) * - * GPI 0 <- external power present (STX only) + * GPI 0 <- external power present (STX only) * - * GPIO 0 -> enable output to speakers - * GPIO 1 -> route HP to front panel (0) or rear jack (1) - * GPIO 2 -> M0 of CS5381 - * GPIO 3 -> M1 of CS5381 - * GPIO 7 -> route output to speaker jacks (0) or HP (1) - * GPIO 8 -> route input jack to line-in (0) or mic-in (1) + * GPIO 0 -> enable output to speakers + * GPIO 1 -> route HP to front panel (0) or rear jack (1) + * GPIO 2 -> M0 of CS5381 + * GPIO 3 -> M1 of CS5381 + * GPIO 4 <- daughterboard detection + * GPIO 5 <- daughterboard detection + * GPIO 6 -> ? + * GPIO 7 -> route output to speaker jacks (0) or HP (1) + * GPIO 8 -> route input jack to line-in (0) or mic-in (1) * * PCM1792A: * - * AD1,0 <- 0,0 - * SCK <- CLK_OUT of CS2000 (ST only) + * SCK <- CLK_OUT of CS2000 (ST only) * - * CS2000: + * CM9780: * - * AD0 <- 0 + * LINE_OUT -> input of ADC * - * CM9780: + * AUX_IN <- aux + * MIC_IN <- mic * - * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input + * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input * * H6 daughterboard * ---------------- @@ -133,15 +143,39 @@ */ /* - * Xonar HDAV1.3 Slim - * ------------------ + * Xonar Xense + * ----------- * * CMI8788: * - * GPIO 1 -> enable output + * I²C <-> PCM1796 (addr 1001100) (front) + * <-> CS4362A (addr 0011000) (surround, center/LFE, back) + * <-> CS2000 (addr 1001110) + * + * ADC1 MCLK -> REF_CLK of CS2000 + * + * GPI 0 <- external power present + * + * GPIO 0 -> enable output + * GPIO 1 -> route HP to front panel (0) or rear jack (1) + * GPIO 2 -> M0 of CS5381 + * GPIO 3 -> M1 of CS5381 + * GPIO 4 -> enable output + * GPIO 5 -> enable output + * GPIO 6 -> ? + * GPIO 7 -> route output to HP (0) or speaker (1) + * GPIO 8 -> route input jack to mic-in (0) or line-in (1) * - * TXD -> HDMI controller - * RXD <- HDMI controller + * CM9780: + * + * LINE_OUT -> input of ADC + * + * AUX_IN <- aux + * VIDEO_IN <- ? + * FMIC_IN <- mic + * + * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input + * GPO 1 -> route mic-in from input jack (0) or front panel header (1) */ #include @@ -150,6 +184,7 @@ #include #include #include +#include #include #include #include @@ -167,12 +202,14 @@ #define GPIO_INPUT_ROUTE 0x0100 #define GPIO_HDAV_OUTPUT_ENABLE 0x0001 +#define GPIO_HDAV_MAGIC 0x00c0 #define GPIO_DB_MASK 0x0030 #define GPIO_DB_H6 0x0000 #define GPIO_ST_OUTPUT_ENABLE 0x0001 #define GPIO_ST_HP_REAR 0x0002 +#define GPIO_ST_MAGIC 0x0040 #define GPIO_ST_HP 0x0080 #define I2C_DEVICE_PCM1796(i) (0x98 + ((i) << 1)) /* 10011, ii, /W=0 */ @@ -186,11 +223,12 @@ struct xonar_pcm179x { unsigned int dacs; u8 pcm1796_regs[4][5]; unsigned int current_rate; - bool os_128; + bool h6; bool hp_active; s8 hp_gain_offset; bool has_cs2000; - u8 cs2000_fun_cfg_1; + u8 cs2000_regs[0x1f]; + bool broken_i2c; }; struct xonar_hdav { @@ -249,16 +287,14 @@ static void cs2000_write(struct oxygen *chip, u8 reg, u8 value) struct xonar_pcm179x *data = chip->model_data; oxygen_write_i2c(chip, I2C_DEVICE_CS2000, reg, value); - if (reg == CS2000_FUN_CFG_1) - data->cs2000_fun_cfg_1 = value; + data->cs2000_regs[reg] = value; } static void cs2000_write_cached(struct oxygen *chip, u8 reg, u8 value) { struct xonar_pcm179x *data = chip->model_data; - if (reg != CS2000_FUN_CFG_1 || - value != data->cs2000_fun_cfg_1) + if (value != data->cs2000_regs[reg]) cs2000_write(chip, reg, value); } @@ -268,6 +304,7 @@ static void pcm1796_registers_init(struct oxygen *chip) unsigned int i; s8 gain_offset; + msleep(1); gain_offset = data->hp_active ? data->hp_gain_offset : 0; for (i = 0; i < data->dacs; ++i) { /* set ATLD before ATL/ATR */ @@ -282,6 +319,7 @@ static void pcm1796_registers_init(struct oxygen *chip) pcm1796_write(chip, i, 20, data->pcm1796_regs[0][20 - PCM1796_REG_BASE]); pcm1796_write(chip, i, 21, 0); + gain_offset = 0; } } @@ -290,10 +328,11 @@ static void pcm1796_init(struct oxygen *chip) struct xonar_pcm179x *data = chip->model_data; data->pcm1796_regs[0][18 - PCM1796_REG_BASE] = PCM1796_MUTE | - PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD; + PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD; data->pcm1796_regs[0][19 - PCM1796_REG_BASE] = PCM1796_FLT_SHARP | PCM1796_ATS_1; - data->pcm1796_regs[0][20 - PCM1796_REG_BASE] = PCM1796_OS_64; + data->pcm1796_regs[0][20 - PCM1796_REG_BASE] = + data->h6 ? PCM1796_OS_64 : PCM1796_OS_128; pcm1796_registers_init(chip); data->current_rate = 48000; } @@ -339,18 +378,20 @@ static void xonar_hdav_init(struct oxygen *chip) oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS, OXYGEN_2WIRE_LENGTH_8 | OXYGEN_2WIRE_INTERRUPT_MASK | - OXYGEN_2WIRE_SPEED_FAST); + OXYGEN_2WIRE_SPEED_STANDARD); data->pcm179x.generic.anti_pop_delay = 100; data->pcm179x.generic.output_enable_bit = GPIO_HDAV_OUTPUT_ENABLE; data->pcm179x.generic.ext_power_reg = OXYGEN_GPI_DATA; data->pcm179x.generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK; data->pcm179x.generic.ext_power_bit = GPI_EXT_POWER; - data->pcm179x.dacs = chip->model.private_data ? 4 : 1; + data->pcm179x.dacs = chip->model.dac_channels_mixer / 2; + data->pcm179x.h6 = chip->model.dac_channels_mixer > 2; pcm1796_init(chip); - oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_INPUT_ROUTE); + oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, + GPIO_HDAV_MAGIC | GPIO_INPUT_ROUTE); oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE); xonar_init_cs53x1(chip); @@ -367,7 +408,7 @@ static void xonar_st_init_i2c(struct oxygen *chip) oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS, OXYGEN_2WIRE_LENGTH_8 | OXYGEN_2WIRE_INTERRUPT_MASK | - OXYGEN_2WIRE_SPEED_FAST); + OXYGEN_2WIRE_SPEED_STANDARD); } static void xonar_st_init_common(struct oxygen *chip) @@ -375,13 +416,14 @@ static void xonar_st_init_common(struct oxygen *chip) struct xonar_pcm179x *data = chip->model_data; data->generic.output_enable_bit = GPIO_ST_OUTPUT_ENABLE; - data->dacs = chip->model.private_data ? 4 : 1; + data->dacs = chip->model.dac_channels_mixer / 2; data->hp_gain_offset = 2*-18; pcm1796_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, - GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP); + GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | + GPIO_ST_MAGIC | GPIO_ST_HP); oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP); @@ -410,9 +452,11 @@ static void cs2000_registers_init(struct oxygen *chip) cs2000_write(chip, CS2000_RATIO_0 + 1, 0x10); cs2000_write(chip, CS2000_RATIO_0 + 2, 0x00); cs2000_write(chip, CS2000_RATIO_0 + 3, 0x00); - cs2000_write(chip, CS2000_FUN_CFG_1, data->cs2000_fun_cfg_1); + cs2000_write(chip, CS2000_FUN_CFG_1, + data->cs2000_regs[CS2000_FUN_CFG_1]); cs2000_write(chip, CS2000_FUN_CFG_2, 0); cs2000_write(chip, CS2000_GLOBAL_CFG, CS2000_EN_DEV_CFG_2); + msleep(3); /* PLL lock delay */ } static void xonar_st_init(struct oxygen *chip) @@ -420,13 +464,18 @@ static void xonar_st_init(struct oxygen *chip) struct xonar_pcm179x *data = chip->model_data; data->generic.anti_pop_delay = 100; + data->h6 = chip->model.dac_channels_mixer > 2; data->has_cs2000 = 1; - data->cs2000_fun_cfg_1 = CS2000_REF_CLK_DIV_1; + data->cs2000_regs[CS2000_FUN_CFG_1] = CS2000_REF_CLK_DIV_1; + data->broken_i2c = true; oxygen_write16(chip, OXYGEN_I2S_A_FORMAT, - OXYGEN_RATE_48000 | OXYGEN_I2S_FORMAT_I2S | - OXYGEN_I2S_MCLK_128 | OXYGEN_I2S_BITS_16 | - OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64); + OXYGEN_RATE_48000 | + OXYGEN_I2S_FORMAT_I2S | + OXYGEN_I2S_MCLK(data->h6 ? MCLK_256 : MCLK_512) | + OXYGEN_I2S_BITS_16 | + OXYGEN_I2S_MASTER | + OXYGEN_I2S_BCLK_64); xonar_st_init_i2c(chip); cs2000_registers_init(chip); @@ -507,44 +556,16 @@ static void xonar_st_resume(struct oxygen *chip) xonar_stx_resume(chip); } -static unsigned int mclk_from_rate(struct oxygen *chip, unsigned int rate) -{ - struct xonar_pcm179x *data = chip->model_data; - - if (rate <= 32000) - return OXYGEN_I2S_MCLK_512; - else if (rate <= 48000 && data->os_128) - return OXYGEN_I2S_MCLK_512; - else if (rate <= 96000) - return OXYGEN_I2S_MCLK_256; - else - return OXYGEN_I2S_MCLK_128; -} - -static unsigned int get_pcm1796_i2s_mclk(struct oxygen *chip, - unsigned int channel, - struct snd_pcm_hw_params *params) -{ - if (channel == PCM_MULTICH) - return mclk_from_rate(chip, params_rate(params)); - else - return oxygen_default_i2s_mclk(chip, channel, params); -} - static void update_pcm1796_oversampling(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; unsigned int i; u8 reg; - if (data->current_rate <= 32000) + if (data->current_rate <= 48000 && !data->h6) reg = PCM1796_OS_128; - else if (data->current_rate <= 48000 && data->os_128) - reg = PCM1796_OS_128; - else if (data->current_rate <= 96000 || data->os_128) - reg = PCM1796_OS_64; else - reg = PCM1796_OS_32; + reg = PCM1796_OS_64; for (i = 0; i < data->dacs; ++i) pcm1796_write_cached(chip, i, 20, reg); } @@ -554,6 +575,7 @@ static void set_pcm1796_params(struct oxygen *chip, { struct xonar_pcm179x *data = chip->model_data; + msleep(1); data->current_rate = params_rate(params); update_pcm1796_oversampling(chip); } @@ -570,6 +592,7 @@ static void update_pcm1796_volume(struct oxygen *chip) + gain_offset); pcm1796_write_cached(chip, i, 17, chip->dac_volume[i * 2 + 1] + gain_offset); + gain_offset = 0; } } @@ -579,7 +602,7 @@ static void update_pcm1796_mute(struct oxygen *chip) unsigned int i; u8 value; - value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD; + value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD; if (chip->dac_mute) value |= PCM1796_MUTE; for (i = 0; i < data->dacs; ++i) @@ -592,45 +615,35 @@ static void update_cs2000_rate(struct oxygen *chip, unsigned int rate) u8 rate_mclk, reg; switch (rate) { - /* XXX Why is the I2S A MCLK half the actual I2S MCLK? */ case 32000: - rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256; - break; - case 44100: - if (data->os_128) - rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256; - else - rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_128; - break; - default: /* 48000 */ - if (data->os_128) - rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256; - else - rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_128; - break; case 64000: - rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256; + rate_mclk = OXYGEN_RATE_32000; break; + case 44100: case 88200: - rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256; - break; - case 96000: - rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256; - break; case 176400: - rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256; + rate_mclk = OXYGEN_RATE_44100; break; + default: + case 48000: + case 96000: case 192000: - rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256; + rate_mclk = OXYGEN_RATE_48000; break; } - oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT, rate_mclk, - OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_MCLK_MASK); - if ((rate_mclk & OXYGEN_I2S_MCLK_MASK) <= OXYGEN_I2S_MCLK_128) + + if (rate <= 96000 && (rate > 48000 || data->h6)) { + rate_mclk |= OXYGEN_I2S_MCLK(MCLK_256); reg = CS2000_REF_CLK_DIV_1; - else + } else { + rate_mclk |= OXYGEN_I2S_MCLK(MCLK_512); reg = CS2000_REF_CLK_DIV_2; + } + + oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT, rate_mclk, + OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_MCLK_MASK); cs2000_write_cached(chip, CS2000_FUN_CFG_1, reg); + msleep(3); /* PLL lock delay */ } static void set_st_params(struct oxygen *chip, @@ -665,13 +678,7 @@ static int rolloff_info(struct snd_kcontrol *ctl, "Sharp Roll-off", "Slow Roll-off" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 2; - if (info->value.enumerated.item >= 2) - info->value.enumerated.item = 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 2, names); } static int rolloff_get(struct snd_kcontrol *ctl, @@ -719,57 +726,13 @@ static const struct snd_kcontrol_new rolloff_control = { .put = rolloff_put, }; -static int os_128_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) -{ - static const char *const names[2] = { "64x", "128x" }; - - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 2; - if (info->value.enumerated.item >= 2) - info->value.enumerated.item = 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; -} - -static int os_128_get(struct snd_kcontrol *ctl, - struct snd_ctl_elem_value *value) -{ - struct oxygen *chip = ctl->private_data; - struct xonar_pcm179x *data = chip->model_data; - - value->value.enumerated.item[0] = data->os_128; - return 0; -} - -static int os_128_put(struct snd_kcontrol *ctl, - struct snd_ctl_elem_value *value) -{ - struct oxygen *chip = ctl->private_data; - struct xonar_pcm179x *data = chip->model_data; - int changed; - - mutex_lock(&chip->mutex); - changed = value->value.enumerated.item[0] != data->os_128; - if (changed) { - data->os_128 = value->value.enumerated.item[0]; - if (data->has_cs2000) - update_cs2000_rate(chip, data->current_rate); - oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT, - mclk_from_rate(chip, data->current_rate), - OXYGEN_I2S_MCLK_MASK); - update_pcm1796_oversampling(chip); - } - mutex_unlock(&chip->mutex); - return changed; -} - -static const struct snd_kcontrol_new os_128_control = { +static const struct snd_kcontrol_new hdav_hdmi_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, - .name = "DAC Oversampling Playback Enum", - .info = os_128_info, - .get = os_128_get, - .put = os_128_put, + .name = "HDMI Playback Switch", + .info = snd_ctl_boolean_mono_info, + .get = xonar_gpio_bit_switch_get, + .put = xonar_gpio_bit_switch_put, + .private_value = GPIO_HDAV_OUTPUT_ENABLE | XONAR_GPIO_BIT_INVERT, }; static int st_output_switch_info(struct snd_kcontrol *ctl, @@ -779,13 +742,7 @@ static int st_output_switch_info(struct snd_kcontrol *ctl, "Speakers", "Headphones", "FP Headphones" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 3; - if (info->value.enumerated.item >= 3) - info->value.enumerated.item = 2; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 3, names); } static int st_output_switch_get(struct snd_kcontrol *ctl, @@ -840,13 +797,7 @@ static int st_hp_volume_offset_info(struct snd_kcontrol *ctl, "< 64 ohms", "64-300 ohms", "300-600 ohms" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 3; - if (info->value.enumerated.item > 2) - info->value.enumerated.item = 2; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 3, names); } static int st_hp_volume_offset_get(struct snd_kcontrol *ctl, @@ -928,16 +879,25 @@ static int xonar_d2_control_filter(struct snd_kcontrol_new *template) return 0; } +static int xonar_st_h6_control_filter(struct snd_kcontrol_new *template) +{ + if (!strncmp(template->name, "Master Playback ", 16)) + /* no volume/mute, as I²C to the third DAC does not work */ + return 1; + return 0; +} + static int add_pcm1796_controls(struct oxygen *chip) { + struct xonar_pcm179x *data = chip->model_data; int err; - err = snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip)); - if (err < 0) - return err; - err = snd_ctl_add(chip->card, snd_ctl_new1(&os_128_control, chip)); - if (err < 0) - return err; + if (!data->broken_i2c) { + err = snd_ctl_add(chip->card, + snd_ctl_new1(&rolloff_control, chip)); + if (err < 0) + return err; + } return 0; } @@ -956,7 +916,15 @@ static int xonar_d2_mixer_init(struct oxygen *chip) static int xonar_hdav_mixer_init(struct oxygen *chip) { - return add_pcm1796_controls(chip); + int err; + + err = snd_ctl_add(chip->card, snd_ctl_new1(&hdav_hdmi_control, chip)); + if (err < 0) + return err; + err = add_pcm1796_controls(chip); + if (err < 0) + return err; + return 0; } static int xonar_st_mixer_init(struct oxygen *chip) @@ -976,6 +944,45 @@ static int xonar_st_mixer_init(struct oxygen *chip) return 0; } +static void dump_pcm1796_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct xonar_pcm179x *data = chip->model_data; + unsigned int dac, i; + + for (dac = 0; dac < data->dacs; ++dac) { + snd_iprintf(buffer, "\nPCM1796 %u:", dac + 1); + for (i = 0; i < 5; ++i) + snd_iprintf(buffer, " %02x", + data->pcm1796_regs[dac][i]); + } + snd_iprintf(buffer, "\n"); +} + +static void dump_cs2000_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct xonar_pcm179x *data = chip->model_data; + unsigned int i; + + if (data->has_cs2000) { + snd_iprintf(buffer, "\nCS2000:\n00: "); + for (i = 1; i < 0x10; ++i) + snd_iprintf(buffer, " %02x", data->cs2000_regs[i]); + snd_iprintf(buffer, "\n10:"); + for (i = 0x10; i < 0x1f; ++i) + snd_iprintf(buffer, " %02x", data->cs2000_regs[i]); + snd_iprintf(buffer, "\n"); + } +} + +static void dump_st_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + dump_pcm1796_registers(chip, buffer); + dump_cs2000_registers(chip, buffer); +} + static const struct oxygen_model model_xonar_d2 = { .longname = "Asus Virtuoso 200", .chip = "AV200", @@ -985,11 +992,11 @@ static const struct oxygen_model model_xonar_d2 = { .cleanup = xonar_d2_cleanup, .suspend = xonar_d2_suspend, .resume = xonar_d2_resume, - .get_i2s_mclk = get_pcm1796_i2s_mclk, .set_dac_params = set_pcm1796_params, .set_adc_params = xonar_set_cs53x1_params, .update_dac_volume = update_pcm1796_volume, .update_dac_mute = update_pcm1796_mute, + .dump_registers = dump_pcm1796_registers, .dac_tlv = pcm1796_db_scale, .model_data_size = sizeof(struct xonar_pcm179x), .device_config = PLAYBACK_0_TO_I2S | @@ -999,13 +1006,16 @@ static const struct oxygen_model model_xonar_d2 = { MIDI_OUTPUT | MIDI_INPUT | AC97_CD_INPUT, - .dac_channels = 8, + .dac_channels_pcm = 8, + .dac_channels_mixer = 8, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .misc_flags = OXYGEN_MISC_MIDI, .function_flags = OXYGEN_FUNCTION_SPI | OXYGEN_FUNCTION_ENABLE_SPI_4_5, - .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, + .dac_mclks = OXYGEN_MCLKS(512, 128, 128), + .adc_mclks = OXYGEN_MCLKS(256, 128, 128), + .dac_i2s_format = OXYGEN_I2S_FORMAT_I2S, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; @@ -1018,25 +1028,28 @@ static const struct oxygen_model model_xonar_hdav = { .suspend = xonar_hdav_suspend, .resume = xonar_hdav_resume, .pcm_hardware_filter = xonar_hdmi_pcm_hardware_filter, - .get_i2s_mclk = get_pcm1796_i2s_mclk, .set_dac_params = set_hdav_params, .set_adc_params = xonar_set_cs53x1_params, .update_dac_volume = update_pcm1796_volume, .update_dac_mute = update_pcm1796_mute, .uart_input = xonar_hdmi_uart_input, .ac97_switch = xonar_line_mic_ac97_switch, + .dump_registers = dump_pcm1796_registers, .dac_tlv = pcm1796_db_scale, .model_data_size = sizeof(struct xonar_hdav), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_2 | CAPTURE_1_FROM_SPDIF, - .dac_channels = 8, + .dac_channels_pcm = 8, + .dac_channels_mixer = 2, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .misc_flags = OXYGEN_MISC_MIDI, .function_flags = OXYGEN_FUNCTION_2WIRE, - .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, + .dac_mclks = OXYGEN_MCLKS(512, 128, 128), + .adc_mclks = OXYGEN_MCLKS(256, 128, 128), + .dac_i2s_format = OXYGEN_I2S_FORMAT_I2S, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; @@ -1048,22 +1061,26 @@ static const struct oxygen_model model_xonar_st = { .cleanup = xonar_st_cleanup, .suspend = xonar_st_suspend, .resume = xonar_st_resume, - .get_i2s_mclk = get_pcm1796_i2s_mclk, .set_dac_params = set_st_params, .set_adc_params = xonar_set_cs53x1_params, .update_dac_volume = update_pcm1796_volume, .update_dac_mute = update_pcm1796_mute, .ac97_switch = xonar_line_mic_ac97_switch, + .dump_registers = dump_st_registers, .dac_tlv = pcm1796_db_scale, .model_data_size = sizeof(struct xonar_pcm179x), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | - CAPTURE_0_FROM_I2S_2, - .dac_channels = 2, + CAPTURE_0_FROM_I2S_2 | + AC97_FMIC_SWITCH, + .dac_channels_pcm = 2, + .dac_channels_mixer = 2, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_2WIRE, - .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, + .dac_mclks = OXYGEN_MCLKS(512, 128, 128), + .adc_mclks = OXYGEN_MCLKS(256, 128, 128), + .dac_i2s_format = OXYGEN_I2S_FORMAT_I2S, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; @@ -1089,7 +1106,8 @@ int __devinit get_xonar_pcm179x_model(struct oxygen *chip, break; case GPIO_DB_H6: chip->model.shortname = "Xonar HDAV1.3+H6"; - chip->model.private_data = 1; + chip->model.dac_channels_mixer = 8; + chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128); break; } break; @@ -1102,8 +1120,10 @@ int __devinit get_xonar_pcm179x_model(struct oxygen *chip, break; case GPIO_DB_H6: chip->model.shortname = "Xonar ST+H6"; - chip->model.dac_channels = 8; - chip->model.private_data = 1; + chip->model.control_filter = xonar_st_h6_control_filter; + chip->model.dac_channels_pcm = 8; + chip->model.dac_channels_mixer = 8; + chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128); break; } break; @@ -1114,9 +1134,6 @@ int __devinit get_xonar_pcm179x_model(struct oxygen *chip, chip->model.resume = xonar_stx_resume; chip->model.set_dac_params = set_pcm1796_params; break; - case 0x835e: - snd_printk(KERN_ERR "the HDAV1.3 Slim is not supported\n"); - return -ENODEV; default: return -EINVAL; } diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c index 200f7601276f..42d1ab136217 100644 --- a/sound/pci/oxygen/xonar_wm87x6.c +++ b/sound/pci/oxygen/xonar_wm87x6.c @@ -1,5 +1,5 @@ /* - * card driver for models with WM8776/WM8766 DACs (Xonar DS) + * card driver for models with WM8776/WM8766 DACs (Xonar DS/HDAV1.3 Slim) * * Copyright (c) Clemens Ladisch * @@ -22,26 +22,48 @@ * * CMI8788: * - * SPI 0 -> WM8766 (surround, center/LFE, back) - * SPI 1 -> WM8776 (front, input) + * SPI 0 -> WM8766 (surround, center/LFE, back) + * SPI 1 -> WM8776 (front, input) * - * GPIO 4 <- headphone detect, 0 = plugged - * GPIO 6 -> route input jack to mic-in (0) or line-in (1) - * GPIO 7 -> enable output to front L/R speaker channels - * GPIO 8 -> enable output to other speaker channels and front panel headphone + * GPIO 4 <- headphone detect, 0 = plugged + * GPIO 6 -> route input jack to mic-in (0) or line-in (1) + * GPIO 7 -> enable output to front L/R speaker channels + * GPIO 8 -> enable output to other speaker channels and front panel headphone * - * WM8766: + * WM8776: * - * input 1 <- line - * input 2 <- mic - * input 3 <- front mic - * input 4 <- aux + * input 1 <- line + * input 2 <- mic + * input 3 <- front mic + * input 4 <- aux + */ + +/* + * Xonar HDAV1.3 Slim + * ------------------ + * + * CMI8788: + * + * I²C <-> WM8776 (addr 0011010) + * + * GPIO 0 -> disable HDMI output + * GPIO 1 -> enable HP output + * GPIO 6 -> firmware EEPROM I²C clock + * GPIO 7 <-> firmware EEPROM I²C data + * + * UART <-> HDMI controller + * + * WM8776: + * + * input 1 <- mic + * input 2 <- aux */ #include #include #include #include +#include #include #include #include @@ -55,6 +77,13 @@ #define GPIO_DS_OUTPUT_FRONTLR 0x0080 #define GPIO_DS_OUTPUT_ENABLE 0x0100 +#define GPIO_SLIM_HDMI_DISABLE 0x0001 +#define GPIO_SLIM_OUTPUT_ENABLE 0x0002 +#define GPIO_SLIM_FIRMWARE_CLK 0x0040 +#define GPIO_SLIM_FIRMWARE_DATA 0x0080 + +#define I2C_DEVICE_WM8776 0x34 /* 001101, 0, /W=0 */ + #define LC_CONTROL_LIMITER 0x40000000 #define LC_CONTROL_ALC 0x20000000 @@ -66,19 +95,37 @@ struct xonar_wm87x6 { struct snd_kcontrol *mic_adcmux_control; struct snd_kcontrol *lc_controls[13]; struct snd_jack *hp_jack; + struct xonar_hdmi hdmi; }; -static void wm8776_write(struct oxygen *chip, - unsigned int reg, unsigned int value) +static void wm8776_write_spi(struct oxygen *chip, + unsigned int reg, unsigned int value) { - struct xonar_wm87x6 *data = chip->model_data; - oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | OXYGEN_SPI_DATA_LENGTH_2 | OXYGEN_SPI_CLOCK_160 | (1 << OXYGEN_SPI_CODEC_SHIFT) | OXYGEN_SPI_CEN_LATCH_CLOCK_LO, (reg << 9) | value); +} + +static void wm8776_write_i2c(struct oxygen *chip, + unsigned int reg, unsigned int value) +{ + oxygen_write_i2c(chip, I2C_DEVICE_WM8776, + (reg << 1) | (value >> 8), value); +} + +static void wm8776_write(struct oxygen *chip, + unsigned int reg, unsigned int value) +{ + struct xonar_wm87x6 *data = chip->model_data; + + if ((chip->model.function_flags & OXYGEN_FUNCTION_2WIRE_SPI_MASK) == + OXYGEN_FUNCTION_SPI) + wm8776_write_spi(chip, reg, value); + else + wm8776_write_i2c(chip, reg, value); if (reg < ARRAY_SIZE(data->wm8776_regs)) { if (reg >= WM8776_HPLVOL && reg <= WM8776_DACMASTER) value &= ~WM8776_UPDATE; @@ -245,17 +292,50 @@ static void xonar_ds_init(struct oxygen *chip) snd_component_add(chip->card, "WM8766"); } +static void xonar_hdav_slim_init(struct oxygen *chip) +{ + struct xonar_wm87x6 *data = chip->model_data; + + data->generic.anti_pop_delay = 300; + data->generic.output_enable_bit = GPIO_SLIM_OUTPUT_ENABLE; + + wm8776_init(chip); + + oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, + GPIO_SLIM_HDMI_DISABLE | + GPIO_SLIM_FIRMWARE_CLK | + GPIO_SLIM_FIRMWARE_DATA); + + xonar_hdmi_init(chip, &data->hdmi); + xonar_enable_output(chip); + + snd_component_add(chip->card, "WM8776"); +} + static void xonar_ds_cleanup(struct oxygen *chip) { xonar_disable_output(chip); wm8776_write(chip, WM8776_RESET, 0); } +static void xonar_hdav_slim_cleanup(struct oxygen *chip) +{ + xonar_hdmi_cleanup(chip); + xonar_disable_output(chip); + wm8776_write(chip, WM8776_RESET, 0); + msleep(2); +} + static void xonar_ds_suspend(struct oxygen *chip) { xonar_ds_cleanup(chip); } +static void xonar_hdav_slim_suspend(struct oxygen *chip) +{ + xonar_hdav_slim_cleanup(chip); +} + static void xonar_ds_resume(struct oxygen *chip) { wm8776_registers_init(chip); @@ -264,6 +344,15 @@ static void xonar_ds_resume(struct oxygen *chip) xonar_ds_handle_hp_jack(chip); } +static void xonar_hdav_slim_resume(struct oxygen *chip) +{ + struct xonar_wm87x6 *data = chip->model_data; + + wm8776_registers_init(chip); + xonar_hdmi_resume(chip, &data->hdmi); + xonar_enable_output(chip); +} + static void wm8776_adc_hardware_filter(unsigned int channel, struct snd_pcm_hardware *hardware) { @@ -278,6 +367,13 @@ static void wm8776_adc_hardware_filter(unsigned int channel, } } +static void xonar_hdav_slim_hardware_filter(unsigned int channel, + struct snd_pcm_hardware *hardware) +{ + wm8776_adc_hardware_filter(channel, hardware); + xonar_hdmi_pcm_hardware_filter(channel, hardware); +} + static void set_wm87x6_dac_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { @@ -294,6 +390,14 @@ static void set_wm8776_adc_params(struct oxygen *chip, wm8776_write_cached(chip, WM8776_MSTRCTRL, reg); } +static void set_hdav_slim_dac_params(struct oxygen *chip, + struct snd_pcm_hw_params *params) +{ + struct xonar_wm87x6 *data = chip->model_data; + + xonar_set_hdmi_params(chip, &data->hdmi, params); +} + static void update_wm8776_volume(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; @@ -473,11 +577,6 @@ static int wm8776_field_enum_info(struct snd_kcontrol *ctl, const char *const *names; max = (ctl->private_value >> 12) & 0xf; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = max + 1; - if (info->value.enumerated.item > max) - info->value.enumerated.item = max; switch ((ctl->private_value >> 24) & 0x1f) { case WM8776_ALCCTRL2: names = hld; @@ -501,8 +600,7 @@ static int wm8776_field_enum_info(struct snd_kcontrol *ctl, default: return -ENXIO; } - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, max + 1, names); } static int wm8776_field_volume_info(struct snd_kcontrol *ctl, @@ -759,13 +857,8 @@ static int wm8776_level_control_info(struct snd_kcontrol *ctl, static const char *const names[3] = { "None", "Peak Limiter", "Automatic Level Control" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 3; - if (info->value.enumerated.item >= 3) - info->value.enumerated.item = 2; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + + return snd_ctl_enum_info(info, 1, 3, names); } static int wm8776_level_control_get(struct snd_kcontrol *ctl, @@ -851,13 +944,7 @@ static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) "None", "High-pass Filter" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 2; - if (info->value.enumerated.item >= 2) - info->value.enumerated.item = 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 2, names); } static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) @@ -985,6 +1072,53 @@ static const struct snd_kcontrol_new ds_controls[] = { .private_value = 0, }, }; +static const struct snd_kcontrol_new hdav_slim_controls[] = { + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "HDMI Playback Switch", + .info = snd_ctl_boolean_mono_info, + .get = xonar_gpio_bit_switch_get, + .put = xonar_gpio_bit_switch_put, + .private_value = GPIO_SLIM_HDMI_DISABLE | XONAR_GPIO_BIT_INVERT, + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Headphone Playback Volume", + .info = wm8776_hp_vol_info, + .get = wm8776_hp_vol_get, + .put = wm8776_hp_vol_put, + .tlv = { .p = wm8776_hp_db_scale }, + }, + WM8776_BIT_SWITCH("Headphone Playback Switch", + WM8776_PWRDOWN, WM8776_HPPD, 1, 0), + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Input Capture Volume", + .info = wm8776_input_vol_info, + .get = wm8776_input_vol_get, + .put = wm8776_input_vol_put, + .tlv = { .p = wm8776_adc_db_scale }, + }, + WM8776_BIT_SWITCH("Mic Capture Switch", + WM8776_ADCMUX, 1 << 0, 0, 0), + WM8776_BIT_SWITCH("Aux Capture Switch", + WM8776_ADCMUX, 1 << 1, 0, 0), + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "ADC Filter Capture Enum", + .info = hpf_info, + .get = hpf_get, + .put = hpf_put, + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Level Control Capture Enum", + .info = wm8776_level_control_info, + .get = wm8776_level_control_get, + .put = wm8776_level_control_put, + .private_value = 0, + }, +}; static const struct snd_kcontrol_new lc_controls[] = { WM8776_FIELD_CTL_VOLUME("Limiter Threshold", WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf, @@ -1028,6 +1162,26 @@ static const struct snd_kcontrol_new lc_controls[] = { LC_CONTROL_ALC, wm8776_ngth_db_scale), }; +static int add_lc_controls(struct oxygen *chip) +{ + struct xonar_wm87x6 *data = chip->model_data; + unsigned int i; + struct snd_kcontrol *ctl; + int err; + + BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); + for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { + ctl = snd_ctl_new1(&lc_controls[i], chip); + if (!ctl) + return -ENOMEM; + err = snd_ctl_add(chip->card, ctl); + if (err < 0) + return err; + data->lc_controls[i] = ctl; + } + return 0; +} + static int xonar_ds_mixer_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; @@ -1049,17 +1203,54 @@ static int xonar_ds_mixer_init(struct oxygen *chip) } if (!data->line_adcmux_control || !data->mic_adcmux_control) return -ENXIO; - BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); - for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { - ctl = snd_ctl_new1(&lc_controls[i], chip); + + return add_lc_controls(chip); +} + +static int xonar_hdav_slim_mixer_init(struct oxygen *chip) +{ + unsigned int i; + struct snd_kcontrol *ctl; + int err; + + for (i = 0; i < ARRAY_SIZE(hdav_slim_controls); ++i) { + ctl = snd_ctl_new1(&hdav_slim_controls[i], chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; - data->lc_controls[i] = ctl; } - return 0; + + return add_lc_controls(chip); +} + +static void dump_wm8776_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct xonar_wm87x6 *data = chip->model_data; + unsigned int i; + + snd_iprintf(buffer, "\nWM8776:\n00:"); + for (i = 0; i < 0x10; ++i) + snd_iprintf(buffer, " %03x", data->wm8776_regs[i]); + snd_iprintf(buffer, "\n10:"); + for (i = 0x10; i < 0x17; ++i) + snd_iprintf(buffer, " %03x", data->wm8776_regs[i]); + snd_iprintf(buffer, "\n"); +} + +static void dump_wm87x6_registers(struct oxygen *chip, + struct snd_info_buffer *buffer) +{ + struct xonar_wm87x6 *data = chip->model_data; + unsigned int i; + + dump_wm8776_registers(chip, buffer); + snd_iprintf(buffer, "\nWM8766:\n00:"); + for (i = 0; i < 0x10; ++i) + snd_iprintf(buffer, " %03x", data->wm8766_regs[i]); + snd_iprintf(buffer, "\n"); } static const struct oxygen_model model_xonar_ds = { @@ -1072,22 +1263,57 @@ static const struct oxygen_model model_xonar_ds = { .suspend = xonar_ds_suspend, .resume = xonar_ds_resume, .pcm_hardware_filter = wm8776_adc_hardware_filter, - .get_i2s_mclk = oxygen_default_i2s_mclk, .set_dac_params = set_wm87x6_dac_params, .set_adc_params = set_wm8776_adc_params, .update_dac_volume = update_wm87x6_volume, .update_dac_mute = update_wm87x6_mute, .update_center_lfe_mix = update_wm8766_center_lfe_mix, .gpio_changed = xonar_ds_gpio_changed, + .dump_registers = dump_wm87x6_registers, .dac_tlv = wm87x6_dac_db_scale, .model_data_size = sizeof(struct xonar_wm87x6), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_1, - .dac_channels = 8, + .dac_channels_pcm = 8, + .dac_channels_mixer = 8, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_SPI, + .dac_mclks = OXYGEN_MCLKS(256, 256, 128), + .adc_mclks = OXYGEN_MCLKS(256, 256, 128), + .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, + .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, +}; + +static const struct oxygen_model model_xonar_hdav_slim = { + .shortname = "Xonar HDAV1.3 Slim", + .longname = "Asus Virtuoso 200", + .chip = "AV200", + .init = xonar_hdav_slim_init, + .mixer_init = xonar_hdav_slim_mixer_init, + .cleanup = xonar_hdav_slim_cleanup, + .suspend = xonar_hdav_slim_suspend, + .resume = xonar_hdav_slim_resume, + .pcm_hardware_filter = xonar_hdav_slim_hardware_filter, + .set_dac_params = set_hdav_slim_dac_params, + .set_adc_params = set_wm8776_adc_params, + .update_dac_volume = update_wm8776_volume, + .update_dac_mute = update_wm8776_mute, + .uart_input = xonar_hdmi_uart_input, + .dump_registers = dump_wm8776_registers, + .dac_tlv = wm87x6_dac_db_scale, + .model_data_size = sizeof(struct xonar_wm87x6), + .device_config = PLAYBACK_0_TO_I2S | + PLAYBACK_1_TO_SPDIF | + CAPTURE_0_FROM_I2S_1, + .dac_channels_pcm = 8, + .dac_channels_mixer = 2, + .dac_volume_min = 255 - 2*60, + .dac_volume_max = 255, + .function_flags = OXYGEN_FUNCTION_2WIRE, + .dac_mclks = OXYGEN_MCLKS(256, 256, 128), + .adc_mclks = OXYGEN_MCLKS(256, 256, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; @@ -1099,6 +1325,9 @@ int __devinit get_xonar_wm87x6_model(struct oxygen *chip, case 0x838e: chip->model = model_xonar_ds; break; + case 0x835e: + chip->model = model_xonar_hdav_slim; + break; default: return -EINVAL; } diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index 0b720cf7783e..2d8332416c83 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -60,6 +60,7 @@ MODULE_SUPPORTED_DEVICE("{{RME Hammerfall-DSP}," "{RME HDSP-9652}," "{RME HDSP-9632}}"); #ifdef HDSP_FW_LOADER +MODULE_FIRMWARE("rpm_firmware.bin"); MODULE_FIRMWARE("multiface_firmware.bin"); MODULE_FIRMWARE("multiface_firmware_rev11.bin"); MODULE_FIRMWARE("digiface_firmware.bin"); @@ -81,6 +82,7 @@ MODULE_FIRMWARE("digiface_firmware_rev11.bin"); #define H9632_SS_CHANNELS 12 #define H9632_DS_CHANNELS 8 #define H9632_QS_CHANNELS 4 +#define RPM_CHANNELS 6 /* Write registers. These are defined as byte-offsets from the iobase value. */ @@ -191,6 +193,25 @@ MODULE_FIRMWARE("digiface_firmware_rev11.bin"); #define HDSP_PhoneGain1 (1<<30) #define HDSP_QuadSpeed (1<<31) +/* RPM uses some of the registers for special purposes */ +#define HDSP_RPM_Inp12 0x04A00 +#define HDSP_RPM_Inp12_Phon_6dB 0x00800 /* Dolby */ +#define HDSP_RPM_Inp12_Phon_0dB 0x00000 /* .. */ +#define HDSP_RPM_Inp12_Phon_n6dB 0x04000 /* inp_0 */ +#define HDSP_RPM_Inp12_Line_0dB 0x04200 /* Dolby+PRO */ +#define HDSP_RPM_Inp12_Line_n6dB 0x00200 /* PRO */ + +#define HDSP_RPM_Inp34 0x32000 +#define HDSP_RPM_Inp34_Phon_6dB 0x20000 /* SyncRef1 */ +#define HDSP_RPM_Inp34_Phon_0dB 0x00000 /* .. */ +#define HDSP_RPM_Inp34_Phon_n6dB 0x02000 /* SyncRef2 */ +#define HDSP_RPM_Inp34_Line_0dB 0x30000 /* SyncRef1+SyncRef0 */ +#define HDSP_RPM_Inp34_Line_n6dB 0x10000 /* SyncRef0 */ + +#define HDSP_RPM_Bypass 0x01000 + +#define HDSP_RPM_Disconnect 0x00001 + #define HDSP_ADGainMask (HDSP_ADGain0|HDSP_ADGain1) #define HDSP_ADGainMinus10dBV HDSP_ADGainMask #define HDSP_ADGainPlus4dBu (HDSP_ADGain0) @@ -450,7 +471,7 @@ struct hdsp { u32 creg_spdif; u32 creg_spdif_stream; int clock_source_locked; - char *card_name; /* digiface/multiface */ + char *card_name; /* digiface/multiface/rpm */ enum HDSP_IO_Type io_type; /* ditto, but for code use */ unsigned short firmware_rev; unsigned short state; /* stores state bits */ @@ -612,6 +633,7 @@ static int hdsp_playback_to_output_key (struct hdsp *hdsp, int in, int out) switch (hdsp->io_type) { case Multiface: case Digiface: + case RPM: default: if (hdsp->firmware_rev == 0xa) return (64 * out) + (32 + (in)); @@ -629,6 +651,7 @@ static int hdsp_input_to_output_key (struct hdsp *hdsp, int in, int out) switch (hdsp->io_type) { case Multiface: case Digiface: + case RPM: default: if (hdsp->firmware_rev == 0xa) return (64 * out) + in; @@ -655,7 +678,7 @@ static int hdsp_check_for_iobox (struct hdsp *hdsp) { if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return 0; if (hdsp_read (hdsp, HDSP_statusRegister) & HDSP_ConfigError) { - snd_printk ("Hammerfall-DSP: no Digiface or Multiface connected!\n"); + snd_printk("Hammerfall-DSP: no IO box connected!\n"); hdsp->state &= ~HDSP_FirmwareLoaded; return -EIO; } @@ -680,7 +703,7 @@ static int hdsp_wait_for_iobox(struct hdsp *hdsp, unsigned int loops, } } - snd_printk("Hammerfall-DSP: no Digiface or Multiface connected!\n"); + snd_printk("Hammerfall-DSP: no IO box connected!\n"); hdsp->state &= ~HDSP_FirmwareLoaded; return -EIO; } @@ -752,17 +775,21 @@ static int hdsp_get_iobox_version (struct hdsp *hdsp) hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD); hdsp_write (hdsp, HDSP_fifoData, 0); - if (hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT)) { - hdsp->io_type = Multiface; - hdsp_write (hdsp, HDSP_control2Reg, HDSP_VERSION_BIT); - hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD); - hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT); + if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT)) { + hdsp_write(hdsp, HDSP_control2Reg, HDSP_VERSION_BIT); + hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD); + if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT)) + hdsp->io_type = RPM; + else + hdsp->io_type = Multiface; } else { hdsp->io_type = Digiface; } } else { /* firmware was already loaded, get iobox type */ - if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1) + if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2) + hdsp->io_type = RPM; + else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1) hdsp->io_type = Multiface; else hdsp->io_type = Digiface; @@ -1184,6 +1211,7 @@ static int hdsp_set_rate(struct hdsp *hdsp, int rate, int called_internally) hdsp->channel_map = channel_map_ds; } else { switch (hdsp->io_type) { + case RPM: case Multiface: hdsp->channel_map = channel_map_mf_ss; break; @@ -3231,6 +3259,318 @@ HDSP_PRECISE_POINTER("Precise Pointer", 0), HDSP_USE_MIDI_TASKLET("Use Midi Tasklet", 0), }; + +static int hdsp_rpm_input12(struct hdsp *hdsp) +{ + switch (hdsp->control_register & HDSP_RPM_Inp12) { + case HDSP_RPM_Inp12_Phon_6dB: + return 0; + case HDSP_RPM_Inp12_Phon_n6dB: + return 2; + case HDSP_RPM_Inp12_Line_0dB: + return 3; + case HDSP_RPM_Inp12_Line_n6dB: + return 4; + } + return 1; +} + + +static int snd_hdsp_get_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + + ucontrol->value.enumerated.item[0] = hdsp_rpm_input12(hdsp); + return 0; +} + + +static int hdsp_set_rpm_input12(struct hdsp *hdsp, int mode) +{ + hdsp->control_register &= ~HDSP_RPM_Inp12; + switch (mode) { + case 0: + hdsp->control_register |= HDSP_RPM_Inp12_Phon_6dB; + break; + case 1: + break; + case 2: + hdsp->control_register |= HDSP_RPM_Inp12_Phon_n6dB; + break; + case 3: + hdsp->control_register |= HDSP_RPM_Inp12_Line_0dB; + break; + case 4: + hdsp->control_register |= HDSP_RPM_Inp12_Line_n6dB; + break; + default: + return -1; + } + + hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register); + return 0; +} + + +static int snd_hdsp_put_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + int change; + int val; + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; + val = ucontrol->value.enumerated.item[0]; + if (val < 0) + val = 0; + if (val > 4) + val = 4; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_rpm_input12(hdsp)) + change = (hdsp_set_rpm_input12(hdsp, val) == 0) ? 1 : 0; + else + change = 0; + spin_unlock_irq(&hdsp->lock); + return change; +} + + +static int snd_hdsp_info_rpm_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) +{ + static char *texts[] = {"Phono +6dB", "Phono 0dB", "Phono -6dB", "Line 0dB", "Line -6dB"}; + + uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; + uinfo->count = 1; + uinfo->value.enumerated.items = 5; + if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) + uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; + strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); + return 0; +} + + +static int hdsp_rpm_input34(struct hdsp *hdsp) +{ + switch (hdsp->control_register & HDSP_RPM_Inp34) { + case HDSP_RPM_Inp34_Phon_6dB: + return 0; + case HDSP_RPM_Inp34_Phon_n6dB: + return 2; + case HDSP_RPM_Inp34_Line_0dB: + return 3; + case HDSP_RPM_Inp34_Line_n6dB: + return 4; + } + return 1; +} + + +static int snd_hdsp_get_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + + ucontrol->value.enumerated.item[0] = hdsp_rpm_input34(hdsp); + return 0; +} + + +static int hdsp_set_rpm_input34(struct hdsp *hdsp, int mode) +{ + hdsp->control_register &= ~HDSP_RPM_Inp34; + switch (mode) { + case 0: + hdsp->control_register |= HDSP_RPM_Inp34_Phon_6dB; + break; + case 1: + break; + case 2: + hdsp->control_register |= HDSP_RPM_Inp34_Phon_n6dB; + break; + case 3: + hdsp->control_register |= HDSP_RPM_Inp34_Line_0dB; + break; + case 4: + hdsp->control_register |= HDSP_RPM_Inp34_Line_n6dB; + break; + default: + return -1; + } + + hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register); + return 0; +} + + +static int snd_hdsp_put_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + int change; + int val; + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; + val = ucontrol->value.enumerated.item[0]; + if (val < 0) + val = 0; + if (val > 4) + val = 4; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_rpm_input34(hdsp)) + change = (hdsp_set_rpm_input34(hdsp, val) == 0) ? 1 : 0; + else + change = 0; + spin_unlock_irq(&hdsp->lock); + return change; +} + + +/* RPM Bypass switch */ +static int hdsp_rpm_bypass(struct hdsp *hdsp) +{ + return (hdsp->control_register & HDSP_RPM_Bypass) ? 1 : 0; +} + + +static int snd_hdsp_get_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + + ucontrol->value.integer.value[0] = hdsp_rpm_bypass(hdsp); + return 0; +} + + +static int hdsp_set_rpm_bypass(struct hdsp *hdsp, int on) +{ + if (on) + hdsp->control_register |= HDSP_RPM_Bypass; + else + hdsp->control_register &= ~HDSP_RPM_Bypass; + hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register); + return 0; +} + + +static int snd_hdsp_put_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + int change; + unsigned int val; + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; + val = ucontrol->value.integer.value[0] & 1; + spin_lock_irq(&hdsp->lock); + change = (int)val != hdsp_rpm_bypass(hdsp); + hdsp_set_rpm_bypass(hdsp, val); + spin_unlock_irq(&hdsp->lock); + return change; +} + + +static int snd_hdsp_info_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) +{ + static char *texts[] = {"On", "Off"}; + + uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; + uinfo->count = 1; + uinfo->value.enumerated.items = 2; + if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) + uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; + strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); + return 0; +} + + +/* RPM Disconnect switch */ +static int hdsp_rpm_disconnect(struct hdsp *hdsp) +{ + return (hdsp->control_register & HDSP_RPM_Disconnect) ? 1 : 0; +} + + +static int snd_hdsp_get_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + + ucontrol->value.integer.value[0] = hdsp_rpm_disconnect(hdsp); + return 0; +} + + +static int hdsp_set_rpm_disconnect(struct hdsp *hdsp, int on) +{ + if (on) + hdsp->control_register |= HDSP_RPM_Disconnect; + else + hdsp->control_register &= ~HDSP_RPM_Disconnect; + hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register); + return 0; +} + + +static int snd_hdsp_put_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) +{ + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + int change; + unsigned int val; + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; + val = ucontrol->value.integer.value[0] & 1; + spin_lock_irq(&hdsp->lock); + change = (int)val != hdsp_rpm_disconnect(hdsp); + hdsp_set_rpm_disconnect(hdsp, val); + spin_unlock_irq(&hdsp->lock); + return change; +} + +static int snd_hdsp_info_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) +{ + static char *texts[] = {"On", "Off"}; + + uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; + uinfo->count = 1; + uinfo->value.enumerated.items = 2; + if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) + uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; + strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); + return 0; +} + +static struct snd_kcontrol_new snd_hdsp_rpm_controls[] = { + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "RPM Bypass", + .get = snd_hdsp_get_rpm_bypass, + .put = snd_hdsp_put_rpm_bypass, + .info = snd_hdsp_info_rpm_bypass + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "RPM Disconnect", + .get = snd_hdsp_get_rpm_disconnect, + .put = snd_hdsp_put_rpm_disconnect, + .info = snd_hdsp_info_rpm_disconnect + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Input 1/2", + .get = snd_hdsp_get_rpm_input12, + .put = snd_hdsp_put_rpm_input12, + .info = snd_hdsp_info_rpm_input + }, + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Input 3/4", + .get = snd_hdsp_get_rpm_input34, + .put = snd_hdsp_put_rpm_input34, + .info = snd_hdsp_info_rpm_input + }, + HDSP_SYSTEM_SAMPLE_RATE("System Sample Rate", 0), + HDSP_MIXER("Mixer", 0) +}; + static struct snd_kcontrol_new snd_hdsp_96xx_aeb = HDSP_AEB("Analog Extension Board", 0); static struct snd_kcontrol_new snd_hdsp_adat_sync_check = HDSP_ADAT_SYNC_CHECK; @@ -3240,6 +3580,16 @@ static int snd_hdsp_create_controls(struct snd_card *card, struct hdsp *hdsp) int err; struct snd_kcontrol *kctl; + if (hdsp->io_type == RPM) { + /* RPM Bypass, Disconnect and Input switches */ + for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_rpm_controls); idx++) { + err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_rpm_controls[idx], hdsp)); + if (err < 0) + return err; + } + return 0; + } + for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_controls); idx++) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_controls[idx], hdsp))) < 0) return err; @@ -3459,48 +3809,102 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) snd_iprintf(buffer, "\n"); - switch (hdsp_spdif_in(hdsp)) { - case HDSP_SPDIFIN_OPTICAL: - snd_iprintf(buffer, "IEC958 input: Optical\n"); - break; - case HDSP_SPDIFIN_COAXIAL: - snd_iprintf(buffer, "IEC958 input: Coaxial\n"); - break; - case HDSP_SPDIFIN_INTERNAL: - snd_iprintf(buffer, "IEC958 input: Internal\n"); - break; - case HDSP_SPDIFIN_AES: - snd_iprintf(buffer, "IEC958 input: AES\n"); - break; - default: - snd_iprintf(buffer, "IEC958 input: ???\n"); - break; + if (hdsp->io_type != RPM) { + switch (hdsp_spdif_in(hdsp)) { + case HDSP_SPDIFIN_OPTICAL: + snd_iprintf(buffer, "IEC958 input: Optical\n"); + break; + case HDSP_SPDIFIN_COAXIAL: + snd_iprintf(buffer, "IEC958 input: Coaxial\n"); + break; + case HDSP_SPDIFIN_INTERNAL: + snd_iprintf(buffer, "IEC958 input: Internal\n"); + break; + case HDSP_SPDIFIN_AES: + snd_iprintf(buffer, "IEC958 input: AES\n"); + break; + default: + snd_iprintf(buffer, "IEC958 input: ???\n"); + break; + } } - if (hdsp->control_register & HDSP_SPDIFOpticalOut) - snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n"); - else - snd_iprintf(buffer, "IEC958 output: Coaxial only\n"); + if (RPM == hdsp->io_type) { + if (hdsp->control_register & HDSP_RPM_Bypass) + snd_iprintf(buffer, "RPM Bypass: disabled\n"); + else + snd_iprintf(buffer, "RPM Bypass: enabled\n"); + if (hdsp->control_register & HDSP_RPM_Disconnect) + snd_iprintf(buffer, "RPM disconnected\n"); + else + snd_iprintf(buffer, "RPM connected\n"); - if (hdsp->control_register & HDSP_SPDIFProfessional) - snd_iprintf(buffer, "IEC958 quality: Professional\n"); - else - snd_iprintf(buffer, "IEC958 quality: Consumer\n"); + switch (hdsp->control_register & HDSP_RPM_Inp12) { + case HDSP_RPM_Inp12_Phon_6dB: + snd_iprintf(buffer, "Input 1/2: Phono, 6dB\n"); + break; + case HDSP_RPM_Inp12_Phon_0dB: + snd_iprintf(buffer, "Input 1/2: Phono, 0dB\n"); + break; + case HDSP_RPM_Inp12_Phon_n6dB: + snd_iprintf(buffer, "Input 1/2: Phono, -6dB\n"); + break; + case HDSP_RPM_Inp12_Line_0dB: + snd_iprintf(buffer, "Input 1/2: Line, 0dB\n"); + break; + case HDSP_RPM_Inp12_Line_n6dB: + snd_iprintf(buffer, "Input 1/2: Line, -6dB\n"); + break; + default: + snd_iprintf(buffer, "Input 1/2: ???\n"); + } - if (hdsp->control_register & HDSP_SPDIFEmphasis) - snd_iprintf(buffer, "IEC958 emphasis: on\n"); - else - snd_iprintf(buffer, "IEC958 emphasis: off\n"); + switch (hdsp->control_register & HDSP_RPM_Inp34) { + case HDSP_RPM_Inp34_Phon_6dB: + snd_iprintf(buffer, "Input 3/4: Phono, 6dB\n"); + break; + case HDSP_RPM_Inp34_Phon_0dB: + snd_iprintf(buffer, "Input 3/4: Phono, 0dB\n"); + break; + case HDSP_RPM_Inp34_Phon_n6dB: + snd_iprintf(buffer, "Input 3/4: Phono, -6dB\n"); + break; + case HDSP_RPM_Inp34_Line_0dB: + snd_iprintf(buffer, "Input 3/4: Line, 0dB\n"); + break; + case HDSP_RPM_Inp34_Line_n6dB: + snd_iprintf(buffer, "Input 3/4: Line, -6dB\n"); + break; + default: + snd_iprintf(buffer, "Input 3/4: ???\n"); + } - if (hdsp->control_register & HDSP_SPDIFNonAudio) - snd_iprintf(buffer, "IEC958 NonAudio: on\n"); - else - snd_iprintf(buffer, "IEC958 NonAudio: off\n"); - if ((x = hdsp_spdif_sample_rate (hdsp)) != 0) - snd_iprintf (buffer, "IEC958 sample rate: %d\n", x); - else - snd_iprintf (buffer, "IEC958 sample rate: Error flag set\n"); + } else { + if (hdsp->control_register & HDSP_SPDIFOpticalOut) + snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n"); + else + snd_iprintf(buffer, "IEC958 output: Coaxial only\n"); + + if (hdsp->control_register & HDSP_SPDIFProfessional) + snd_iprintf(buffer, "IEC958 quality: Professional\n"); + else + snd_iprintf(buffer, "IEC958 quality: Consumer\n"); + + if (hdsp->control_register & HDSP_SPDIFEmphasis) + snd_iprintf(buffer, "IEC958 emphasis: on\n"); + else + snd_iprintf(buffer, "IEC958 emphasis: off\n"); + if (hdsp->control_register & HDSP_SPDIFNonAudio) + snd_iprintf(buffer, "IEC958 NonAudio: on\n"); + else + snd_iprintf(buffer, "IEC958 NonAudio: off\n"); + x = hdsp_spdif_sample_rate(hdsp); + if (x != 0) + snd_iprintf(buffer, "IEC958 sample rate: %d\n", x); + else + snd_iprintf(buffer, "IEC958 sample rate: Error flag set\n"); + } snd_iprintf(buffer, "\n"); /* Sync Check */ @@ -3765,7 +4169,7 @@ static irqreturn_t snd_hdsp_interrupt(int irq, void *dev_id) snd_hdsp_midi_input_read (&hdsp->midi[0]); } } - if (hdsp->io_type != Multiface && hdsp->io_type != H9632 && midi1 && midi1status) { + if (hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632 && midi1 && midi1status) { if (hdsp->use_midi_tasklet) { /* we disable interrupts for this input until processing is done */ hdsp->control_register &= ~HDSP_Midi1InterruptEnable; @@ -4093,7 +4497,7 @@ static struct snd_pcm_hardware snd_hdsp_playback_subinfo = SNDRV_PCM_RATE_96000), .rate_min = 32000, .rate_max = 96000, - .channels_min = 14, + .channels_min = 6, .channels_max = HDSP_MAX_CHANNELS, .buffer_bytes_max = HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS, .period_bytes_min = (64 * 4) * 10, @@ -4122,7 +4526,7 @@ static struct snd_pcm_hardware snd_hdsp_capture_subinfo = SNDRV_PCM_RATE_96000), .rate_min = 32000, .rate_max = 96000, - .channels_min = 14, + .channels_min = 5, .channels_max = HDSP_MAX_CHANNELS, .buffer_bytes_max = HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS, .period_bytes_min = (64 * 4) * 10, @@ -4357,10 +4761,12 @@ static int snd_hdsp_playback_open(struct snd_pcm_substream *substream) snd_hdsp_hw_rule_rate_out_channels, hdsp, SNDRV_PCM_HW_PARAM_CHANNELS, -1); - hdsp->creg_spdif_stream = hdsp->creg_spdif; - hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; - snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE | - SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id); + if (RPM != hdsp->io_type) { + hdsp->creg_spdif_stream = hdsp->creg_spdif; + hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; + snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE | + SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id); + } return 0; } @@ -4375,9 +4781,11 @@ static int snd_hdsp_playback_release(struct snd_pcm_substream *substream) spin_unlock_irq(&hdsp->lock); - hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; - snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE | - SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id); + if (RPM != hdsp->io_type) { + hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; + snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE | + SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id); + } return 0; } @@ -4616,7 +5024,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne if (hdsp->io_type != H9632) info.adatsync_sync_check = (unsigned char)hdsp_adatsync_sync_check(hdsp); info.spdif_sync_check = (unsigned char)hdsp_spdif_sync_check(hdsp); - for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != H9632) ? 3 : 1); ++i) + for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632) ? 3 : 1); ++i) info.adat_sync_check[i] = (unsigned char)hdsp_adat_sync_check(hdsp, i); info.spdif_in = (unsigned char)hdsp_spdif_in(hdsp); info.spdif_out = (unsigned char)hdsp_spdif_out(hdsp); @@ -4636,6 +5044,9 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne info.phone_gain = (unsigned char)hdsp_phone_gain(hdsp); info.xlr_breakout_cable = (unsigned char)hdsp_xlr_breakout_cable(hdsp); + } else if (hdsp->io_type == RPM) { + info.da_gain = (unsigned char) hdsp_rpm_input12(hdsp); + info.ad_gain = (unsigned char) hdsp_rpm_input34(hdsp); } if (hdsp->io_type == H9632 || hdsp->io_type == H9652) info.analog_extension_board = (unsigned char)hdsp_aeb(hdsp); @@ -4844,6 +5255,14 @@ static void snd_hdsp_initialize_channels(struct hdsp *hdsp) hdsp->ds_in_channels = hdsp->ds_out_channels = MULTIFACE_DS_CHANNELS; break; + case RPM: + hdsp->card_name = "RME Hammerfall DSP + RPM"; + hdsp->ss_in_channels = RPM_CHANNELS-1; + hdsp->ss_out_channels = RPM_CHANNELS; + hdsp->ds_in_channels = RPM_CHANNELS-1; + hdsp->ds_out_channels = RPM_CHANNELS; + break; + default: /* should never get here */ break; @@ -4930,6 +5349,9 @@ static int hdsp_request_fw_loader(struct hdsp *hdsp) /* caution: max length of firmware filename is 30! */ switch (hdsp->io_type) { + case RPM: + fwfile = "rpm_firmware.bin"; + break; case Multiface: if (hdsp->firmware_rev == 0xa) fwfile = "multiface_firmware.bin"; @@ -5100,7 +5522,9 @@ static int __devinit snd_hdsp_create(struct snd_card *card, return 0; } else { snd_printk(KERN_INFO "Hammerfall-DSP: Firmware already present, initializing card.\n"); - if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1) + if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2) + hdsp->io_type = RPM; + else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1) hdsp->io_type = Multiface; else hdsp->io_type = Digiface; diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 0c98ef9156d8..f5eadfc0672a 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -487,7 +487,7 @@ struct hdspm { struct snd_kcontrol *playback_mixer_ctls[HDSPM_MAX_CHANNELS]; /* but input to much, so not used */ struct snd_kcontrol *input_mixer_ctls[HDSPM_MAX_CHANNELS]; - /* full mixer accessable over mixer ioctl or hwdep-device */ + /* full mixer accessible over mixer ioctl or hwdep-device */ struct hdspm_mixer *mixer; }; @@ -550,7 +550,7 @@ static inline int HDSPM_bit2freq(int n) return bit2freq_tab[n]; } -/* Write/read to/from HDSPM with Adresses in Bytes +/* Write/read to/from HDSPM with Addresses in Bytes not words but only 32Bit writes are allowed */ static inline void hdspm_write(struct hdspm * hdspm, unsigned int reg, @@ -2908,7 +2908,7 @@ static int snd_hdspm_create_controls(struct snd_card *card, struct hdspm * hdspm /* Channel playback mixer as default control Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders, - thats too * big for any alsamixer they are accesible via special + thats too * big for any alsamixer they are accessible via special IOCTL on hwdep and the mixer 2dimensional mixer control */ diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c index 5518371db13f..c94c051ad0c8 100644 --- a/sound/pci/ymfpci/ymfpci_main.c +++ b/sound/pci/ymfpci/ymfpci_main.c @@ -1389,15 +1389,9 @@ static struct snd_kcontrol_new snd_ymfpci_spdif_stream __devinitdata = static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { - static char *texts[3] = {"AC'97", "IEC958", "ZV Port"}; - - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 3; - if (info->value.enumerated.item > 2) - info->value.enumerated.item = 2; - strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]); - return 0; + static const char *const texts[3] = {"AC'97", "IEC958", "ZV Port"}; + + return snd_ctl_enum_info(info, 1, 3, texts); } static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index 581a670e8261..edce8a27e3ee 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c @@ -51,7 +51,7 @@ static struct snd_ps3_card_info the_card; static int snd_ps3_start_delay = CONFIG_SND_PS3_DEFAULT_START_DELAY; module_param_named(start_delay, snd_ps3_start_delay, uint, 0644); -MODULE_PARM_DESC(start_delay, "time to insert silent data in milisec"); +MODULE_PARM_DESC(start_delay, "time to insert silent data in ms"); static int index = SNDRV_DEFAULT_IDX1; static char *id = SNDRV_DEFAULT_STR1; diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 3e598e756e54..a3efc52a34da 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -20,6 +20,21 @@ menuconfig SND_SOC if SND_SOC +config SND_SOC_CACHE_LZO + bool "Support LZO compression for register caches" + select LZO_COMPRESS + select LZO_DECOMPRESS + ---help--- + Select this to enable LZO compression for register caches. + This will allow machine or CODEC drivers to compress register + caches in memory, reducing the memory consumption at the + expense of performance. If this is not present and is used + the system will fall back to uncompressed caches. + + Usually it is safe to disable this option, where cache + compression in used the rbtree option will typically perform + better. + config SND_SOC_AC97_BUS bool @@ -36,7 +51,7 @@ source "sound/soc/nuc900/Kconfig" source "sound/soc/omap/Kconfig" source "sound/soc/kirkwood/Kconfig" source "sound/soc/pxa/Kconfig" -source "sound/soc/s3c24xx/Kconfig" +source "sound/soc/samsung/Kconfig" source "sound/soc/s6000/Kconfig" source "sound/soc/sh/Kconfig" source "sound/soc/txx9/Kconfig" diff --git a/sound/soc/Makefile b/sound/soc/Makefile index eb183443eee4..ce913bf5213c 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -14,7 +14,7 @@ obj-$(CONFIG_SND_SOC) += nuc900/ obj-$(CONFIG_SND_SOC) += omap/ obj-$(CONFIG_SND_SOC) += kirkwood/ obj-$(CONFIG_SND_SOC) += pxa/ -obj-$(CONFIG_SND_SOC) += s3c24xx/ +obj-$(CONFIG_SND_SOC) += samsung/ obj-$(CONFIG_SND_SOC) += s6000/ obj-$(CONFIG_SND_SOC) += sh/ obj-$(CONFIG_SND_SOC) += txx9/ diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c index 5f4e59f4461c..1aac2f4dbcf6 100644 --- a/sound/soc/atmel/playpaq_wm8510.c +++ b/sound/soc/atmel/playpaq_wm8510.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include @@ -318,27 +317,28 @@ static const struct snd_soc_dapm_route intercon[] = { static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int i; /* * Add DAPM widgets */ for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++) - snd_soc_dapm_new_control(codec, &playpaq_dapm_widgets[i]); + snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]); /* * Setup audio path interconnects */ - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); /* always connected pins */ - snd_soc_dapm_enable_pin(codec, "Int Mic"); - snd_soc_dapm_enable_pin(codec, "Ext Spk"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_enable_pin(dapm, "Int Mic"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); + snd_soc_dapm_sync(dapm); diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c index e521ada80542..af3c73053ee4 100644 --- a/sound/soc/atmel/sam9g20_wm8731.c +++ b/sound/soc/atmel/sam9g20_wm8731.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include @@ -140,6 +139,7 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dai *codec_dai = rtd->codec_dai; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; printk(KERN_DEBUG @@ -154,25 +154,25 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd) } /* Add specific widgets */ - snd_soc_dapm_new_controls(codec, at91sam9g20ek_dapm_widgets, + snd_soc_dapm_new_controls(dapm, at91sam9g20ek_dapm_widgets, ARRAY_SIZE(at91sam9g20ek_dapm_widgets)); /* Set up specific audio path interconnects */ - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); /* not connected */ - snd_soc_dapm_nc_pin(codec, "RLINEIN"); - snd_soc_dapm_nc_pin(codec, "LLINEIN"); + snd_soc_dapm_nc_pin(dapm, "RLINEIN"); + snd_soc_dapm_nc_pin(dapm, "LLINEIN"); #ifdef ENABLE_MIC_INPUT - snd_soc_dapm_enable_pin(codec, "Int Mic"); + snd_soc_dapm_enable_pin(dapm, "Int Mic"); #else - snd_soc_dapm_nc_pin(codec, "Int Mic"); + snd_soc_dapm_nc_pin(dapm, "Int Mic"); #endif /* always connected */ - snd_soc_dapm_enable_pin(codec, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c index 86e0f8586dc3..da2208e06b0d 100644 --- a/sound/soc/atmel/snd-soc-afeb9260.c +++ b/sound/soc/atmel/snd-soc-afeb9260.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include @@ -105,19 +104,20 @@ static const struct snd_soc_dapm_route audio_map[] = { static int afeb9260_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; /* Add afeb9260 specific widgets */ - snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets, + snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets, ARRAY_SIZE(tlv320aic23_dapm_widgets)); /* Set up afeb9260 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Line In"); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Line In"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c index b62fcd33e586..cb99f04abe88 100644 --- a/sound/soc/au1x/db1200.c +++ b/sound/soc/au1x/db1200.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c index 2394bff2b655..83012da9dfc2 100644 --- a/sound/soc/blackfin/bf5xx-ad1836.c +++ b/sound/soc/blackfin/bf5xx-ad1836.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c index e4a625317a1a..d3ccb926b5e4 100644 --- a/sound/soc/blackfin/bf5xx-ad193x.c +++ b/sound/soc/blackfin/bf5xx-ad193x.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c index 900ced54ac79..732fb8bad076 100644 --- a/sound/soc/blackfin/bf5xx-ad73311.c +++ b/sound/soc/blackfin/bf5xx-ad73311.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c index 36f2769eb912..e902b24c1856 100644 --- a/sound/soc/blackfin/bf5xx-ssm2602.c +++ b/sound/soc/blackfin/bf5xx-ssm2602.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c index 01d19e9f53f9..06b6981b8d6d 100644 --- a/sound/soc/codecs/88pm860x-codec.c +++ b/sound/soc/codecs/88pm860x-codec.c @@ -19,10 +19,10 @@ #include #include #include -#include #include #include #include +#include #include "88pm860x-codec.h" @@ -146,7 +146,6 @@ struct pm860x_priv { int irq[4]; unsigned char name[4][MAX_NAME_LEN]; - unsigned char reg_cache[REG_CACHE_SIZE]; }; /* -9450dB to 0dB in 150dB steps ( mute instead of -9450dB) */ @@ -1172,7 +1171,7 @@ static int pm860x_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Enable Audio PLL & Audio section */ data = AUDIO_PLL | AUDIO_SECTION_RESET | AUDIO_SECTION_ON; @@ -1185,7 +1184,7 @@ static int pm860x_set_bias_level(struct snd_soc_codec *codec, pm860x_set_bits(codec->control_data, REG_MISC2, data, 0); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1263,6 +1262,12 @@ static irqreturn_t pm860x_codec_handler(int irq, void *data) mask = pm860x->det.hs_shrt | pm860x->det.hook_det | pm860x->det.lo_shrt | pm860x->det.hp_det; +#ifndef CONFIG_SND_SOC_88PM860X_MODULE + if (status & (HEADSET_STATUS | MIC_STATUS | SHORT_HS1 | SHORT_HS2 | + SHORT_LO1 | SHORT_LO2)) + trace_snd_soc_jack_irq(dev_name(pm860x->codec->dev)); +#endif + if ((pm860x->det.hp_det & SND_JACK_HEADPHONE) && (status & HEADSET_STATUS)) report |= SND_JACK_HEADPHONE; @@ -1346,6 +1351,7 @@ EXPORT_SYMBOL_GPL(pm860x_mic_jack_detect); static int pm860x_probe(struct snd_soc_codec *codec) { struct pm860x_priv *pm860x = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int i, ret; pm860x->codec = codec; @@ -1358,7 +1364,7 @@ static int pm860x_probe(struct snd_soc_codec *codec) pm860x->name[i], pm860x); if (ret < 0) { dev_err(codec->dev, "Failed to request IRQ!\n"); - goto out_irq; + goto out; } } @@ -1369,22 +1375,20 @@ static int pm860x_probe(struct snd_soc_codec *codec) if (ret < 0) { dev_err(codec->dev, "Failed to fill register cache: %d\n", ret); - goto out_codec; + goto out; } snd_soc_add_controls(codec, pm860x_snd_controls, ARRAY_SIZE(pm860x_snd_controls)); - snd_soc_dapm_new_controls(codec, pm860x_dapm_widgets, + snd_soc_dapm_new_controls(dapm, pm860x_dapm_widgets, ARRAY_SIZE(pm860x_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; -out_codec: - i = 3; -out_irq: - for (; i >= 0; i--) +out: + while (--i >= 0) free_irq(pm860x->irq[i], pm860x); - return -EINVAL; + return ret; } static int pm860x_remove(struct snd_soc_codec *codec) diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 3b5690d28b8b..883a312bb293 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -22,6 +22,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_AK4535 if I2C select SND_SOC_AK4642 if I2C select SND_SOC_AK4671 if I2C + select SND_SOC_ALC5623 if I2C select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC select SND_SOC_CS42L51 if I2C select SND_SOC_CS4270 if I2C @@ -54,9 +55,11 @@ config SND_SOC_ALL_CODECS select SND_SOC_WM8727 select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI + select SND_SOC_WM8737 if SND_SOC_I2C_AND_SPI select SND_SOC_WM8741 if SND_SOC_I2C_AND_SPI select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI + select SND_SOC_WM8770 if SPI_MASTER select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI select SND_SOC_WM8804 if SND_SOC_I2C_AND_SPI select SND_SOC_WM8900 if I2C @@ -75,6 +78,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_WM8990 if I2C select SND_SOC_WM8993 if I2C select SND_SOC_WM8994 if MFD_WM8994 + select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI select SND_SOC_WM9081 if I2C select SND_SOC_WM9090 if I2C select SND_SOC_WM9705 if SND_SOC_AC97_BUS @@ -130,6 +134,9 @@ config SND_SOC_AK4642 config SND_SOC_AK4671 tristate +config SND_SOC_ALC5623 + tristate + config SND_SOC_CQ0093VC tristate @@ -160,6 +167,9 @@ config SND_SOC_L3 config SND_SOC_DA7210 tristate +config SND_SOC_DMIC + tristate + config SND_SOC_MAX98088 tristate @@ -231,6 +241,9 @@ config SND_SOC_WM8728 config SND_SOC_WM8731 tristate +config SND_SOC_WM8737 + tristate + config SND_SOC_WM8741 tristate @@ -240,6 +253,9 @@ config SND_SOC_WM8750 config SND_SOC_WM8753 tristate +config SND_SOC_WM8770 + tristate + config SND_SOC_WM8776 tristate @@ -294,6 +310,9 @@ config SND_SOC_WM8993 config SND_SOC_WM8994 tristate +config SND_SOC_WM8995 + tristate + config SND_SOC_WM9081 tristate @@ -318,3 +337,4 @@ config SND_SOC_WM2000 config SND_SOC_WM9090 tristate + diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index f67a2d6f7a46..579af9c4f128 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -14,9 +14,11 @@ snd-soc-cs42l51-objs := cs42l51.o snd-soc-cs4270-objs := cs4270.o snd-soc-cx20442-objs := cx20442.o snd-soc-da7210-objs := da7210.o +snd-soc-dmic-objs := dmic.o snd-soc-l3-objs := l3.o snd-soc-max98088-objs := max98088.o snd-soc-pcm3008-objs := pcm3008.o +snd-soc-alc5623-objs := alc5623.o snd-soc-spdif-objs := spdif_transciever.o snd-soc-ssm2602-objs := ssm2602.o snd-soc-stac9766-objs := stac9766.o @@ -38,9 +40,11 @@ snd-soc-wm8711-objs := wm8711.o snd-soc-wm8727-objs := wm8727.o snd-soc-wm8728-objs := wm8728.o snd-soc-wm8731-objs := wm8731.o +snd-soc-wm8737-objs := wm8737.o snd-soc-wm8741-objs := wm8741.o snd-soc-wm8750-objs := wm8750.o snd-soc-wm8753-objs := wm8753.o +snd-soc-wm8770-objs := wm8770.o snd-soc-wm8776-objs := wm8776.o snd-soc-wm8804-objs := wm8804.o snd-soc-wm8900-objs := wm8900.o @@ -58,7 +62,8 @@ snd-soc-wm8985-objs := wm8985.o snd-soc-wm8988-objs := wm8988.o snd-soc-wm8990-objs := wm8990.o snd-soc-wm8993-objs := wm8993.o -snd-soc-wm8994-objs := wm8994.o +snd-soc-wm8994-objs := wm8994.o wm8994-tables.o +snd-soc-wm8995-objs := wm8995.o snd-soc-wm9081-objs := wm9081.o snd-soc-wm9705-objs := wm9705.o snd-soc-wm9712-objs := wm9712.o @@ -88,10 +93,12 @@ obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o +obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o obj-$(CONFIG_SND_SOC_JZ4740_CODEC) += snd-soc-jz4740-codec.o obj-$(CONFIG_SND_SOC_MAX98088) += snd-soc-max98088.o obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o +obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o @@ -113,9 +120,11 @@ obj-$(CONFIG_SND_SOC_WM8711) += snd-soc-wm8711.o obj-$(CONFIG_SND_SOC_WM8727) += snd-soc-wm8727.o obj-$(CONFIG_SND_SOC_WM8728) += snd-soc-wm8728.o obj-$(CONFIG_SND_SOC_WM8731) += snd-soc-wm8731.o +obj-$(CONFIG_SND_SOC_WM8737) += snd-soc-wm8737.o obj-$(CONFIG_SND_SOC_WM8741) += snd-soc-wm8741.o obj-$(CONFIG_SND_SOC_WM8750) += snd-soc-wm8750.o obj-$(CONFIG_SND_SOC_WM8753) += snd-soc-wm8753.o +obj-$(CONFIG_SND_SOC_WM8770) += snd-soc-wm8770.o obj-$(CONFIG_SND_SOC_WM8776) += snd-soc-wm8776.o obj-$(CONFIG_SND_SOC_WM8804) += snd-soc-wm8804.o obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o @@ -134,6 +143,7 @@ obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o obj-$(CONFIG_SND_SOC_WM8993) += snd-soc-wm8993.o obj-$(CONFIG_SND_SOC_WM8994) += snd-soc-wm8994.o +obj-$(CONFIG_SND_SOC_WM8995) += snd-soc-wm8995.o obj-$(CONFIG_SND_SOC_WM9081) += snd-soc-wm9081.o obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c index d272534c8f84..ab63d52e36e1 100644 --- a/sound/soc/codecs/ad1836.c +++ b/sound/soc/codecs/ad1836.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include "ad1836.h" @@ -220,6 +219,7 @@ static struct snd_soc_dai_driver ad1836_dai = { static int ad1836_probe(struct snd_soc_codec *codec) { struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret = 0; codec->control_data = ad1836->control_data; @@ -227,7 +227,6 @@ static int ad1836_probe(struct snd_soc_codec *codec) if (ret < 0) { dev_err(codec->dev, "failed to set cache I/O: %d\n", ret); - kfree(ad1836); return ret; } @@ -252,9 +251,9 @@ static int ad1836_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, ad1836_snd_controls, ARRAY_SIZE(ad1836_snd_controls)); - snd_soc_dapm_new_controls(codec, ad1836_dapm_widgets, + snd_soc_dapm_new_controls(dapm, ad1836_dapm_widgets, ARRAY_SIZE(ad1836_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths)); + snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths)); return ret; } diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c index fa2834c91b9f..da46479bfcfa 100644 --- a/sound/soc/codecs/ad193x.c +++ b/sound/soc/codecs/ad193x.c @@ -19,12 +19,10 @@ #include #include #include -#include #include "ad193x.h" /* codec private data */ struct ad193x_priv { - u8 reg_cache[AD193X_NUM_REGS]; enum snd_soc_control_type bus_type; void *control_data; int sysclk; @@ -353,6 +351,7 @@ static struct snd_soc_dai_driver ad193x_dai = { static int ad193x_probe(struct snd_soc_codec *codec) { struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; codec->control_data = ad193x->control_data; @@ -363,7 +362,6 @@ static int ad193x_probe(struct snd_soc_codec *codec) if (ret < 0) { dev_err(codec->dev, "failed to set cache I/O: %d\n", ret); - kfree(ad193x); return ret; } @@ -385,9 +383,9 @@ static int ad193x_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, ad193x_snd_controls, ARRAY_SIZE(ad193x_snd_controls)); - snd_soc_dapm_new_controls(codec, ad193x_dapm_widgets, + snd_soc_dapm_new_controls(dapm, ad193x_dapm_widgets, ARRAY_SIZE(ad193x_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths)); + snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths)); return ret; } diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c index 410ccd5d41cd..34cb51ef2156 100644 --- a/sound/soc/codecs/ad1980.c +++ b/sound/soc/codecs/ad1980.c @@ -29,7 +29,6 @@ #include #include #include -#include #include "ad1980.h" diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c index cd88c8f32a38..8b38739c88f8 100644 --- a/sound/soc/codecs/ak4535.c +++ b/sound/soc/codecs/ak4535.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include "ak4535.h" @@ -290,10 +289,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int ak4535_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, ak4535_dapm_widgets, - ARRAY_SIZE(ak4535_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, ak4535_dapm_widgets, + ARRAY_SIZE(ak4535_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -366,9 +366,9 @@ static int ak4535_set_dai_fmt(struct snd_soc_dai *codec_dai, static int ak4535_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; - u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf; + u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC); if (!mute) - ak4535_write(codec, AK4535_DAC, mute_reg); + ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20); else ak4535_write(codec, AK4535_DAC, mute_reg | 0x20); return 0; @@ -381,11 +381,11 @@ static int ak4535_set_bias_level(struct snd_soc_codec *codec, switch (level) { case SND_SOC_BIAS_ON: - mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf; - ak4535_write(codec, AK4535_DAC, mute_reg); + mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC); + ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20); break; case SND_SOC_BIAS_PREPARE: - mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf; + mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC); ak4535_write(codec, AK4535_DAC, mute_reg | 0x20); break; case SND_SOC_BIAS_STANDBY: @@ -399,7 +399,7 @@ static int ak4535_set_bias_level(struct snd_soc_codec *codec, ak4535_write(codec, AK4535_PM1, i & (~0x80)); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c index 90c90b7f4a2e..f00eba313dfd 100644 --- a/sound/soc/codecs/ak4642.c +++ b/sound/soc/codecs/ak4642.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c index 24f5f49bb9d2..2ec75abfa3e9 100644 --- a/sound/soc/codecs/ak4671.c +++ b/sound/soc/codecs/ak4671.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -28,7 +27,6 @@ struct ak4671_priv { enum snd_soc_control_type control_type; void *control_data; - u8 reg_cache[AK4671_CACHEREGNUM]; }; /* ak4671 register cache & default register settings */ @@ -437,10 +435,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int ak4671_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, ak4671_dapm_widgets, - ARRAY_SIZE(ak4671_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, ak4671_dapm_widgets, + ARRAY_SIZE(ak4671_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -602,7 +601,7 @@ static int ak4671_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, AK4671_AD_DA_POWER_MANAGEMENT, 0x00); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c new file mode 100644 index 000000000000..4f377c9e868d --- /dev/null +++ b/sound/soc/codecs/alc5623.c @@ -0,0 +1,1117 @@ +/* + * alc5623.c -- alc562[123] ALSA Soc Audio driver + * + * Copyright 2008 Realtek Microelectronics + * Author: flove Ethan + * + * Copyright 2010 Arnaud Patard + * + * + * Based on WM8753.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "alc5623.h" + +static int caps_charge = 2000; +module_param(caps_charge, int, 0); +MODULE_PARM_DESC(caps_charge, "ALC5623 cap charge time (msecs)"); + +/* codec private data */ +struct alc5623_priv { + enum snd_soc_control_type control_type; + void *control_data; + struct mutex mutex; + u8 id; + unsigned int sysclk; + u16 reg_cache[ALC5623_VENDOR_ID2+2]; + unsigned int add_ctrl; + unsigned int jack_det_ctrl; +}; + +static void alc5623_fill_cache(struct snd_soc_codec *codec) +{ + int i, step = codec->driver->reg_cache_step; + u16 *cache = codec->reg_cache; + + /* not really efficient ... */ + for (i = 0 ; i < codec->driver->reg_cache_size ; i += step) + cache[i] = codec->hw_read(codec, i); +} + +static inline int alc5623_reset(struct snd_soc_codec *codec) +{ + return snd_soc_write(codec, ALC5623_RESET, 0); +} + +static int amp_mixer_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + /* to power-on/off class-d amp generators/speaker */ + /* need to write to 'index-46h' register : */ + /* so write index num (here 0x46) to reg 0x6a */ + /* and then 0xffff/0 to reg 0x6c */ + snd_soc_write(w->codec, ALC5623_HID_CTRL_INDEX, 0x46); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0xFFFF); + break; + case SND_SOC_DAPM_POST_PMD: + snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0); + break; + } + + return 0; +} + +/* + * ALC5623 Controls + */ + +static const DECLARE_TLV_DB_SCALE(vol_tlv, -3450, 150, 0); +static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0); +static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0); +static const unsigned int boost_tlv[] = { + TLV_DB_RANGE_HEAD(3), + 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), + 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0), + 2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0), +}; +static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0); + +static const struct snd_kcontrol_new rt5621_vol_snd_controls[] = { + SOC_DOUBLE_TLV("Speaker Playback Volume", + ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv), + SOC_DOUBLE("Speaker Playback Switch", + ALC5623_SPK_OUT_VOL, 15, 7, 1, 1), + SOC_DOUBLE_TLV("Headphone Playback Volume", + ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv), + SOC_DOUBLE("Headphone Playback Switch", + ALC5623_HP_OUT_VOL, 15, 7, 1, 1), +}; + +static const struct snd_kcontrol_new rt5622_vol_snd_controls[] = { + SOC_DOUBLE_TLV("Speaker Playback Volume", + ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv), + SOC_DOUBLE("Speaker Playback Switch", + ALC5623_SPK_OUT_VOL, 15, 7, 1, 1), + SOC_DOUBLE_TLV("Line Playback Volume", + ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv), + SOC_DOUBLE("Line Playback Switch", + ALC5623_HP_OUT_VOL, 15, 7, 1, 1), +}; + +static const struct snd_kcontrol_new alc5623_vol_snd_controls[] = { + SOC_DOUBLE_TLV("Line Playback Volume", + ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv), + SOC_DOUBLE("Line Playback Switch", + ALC5623_SPK_OUT_VOL, 15, 7, 1, 1), + SOC_DOUBLE_TLV("Headphone Playback Volume", + ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv), + SOC_DOUBLE("Headphone Playback Switch", + ALC5623_HP_OUT_VOL, 15, 7, 1, 1), +}; + +static const struct snd_kcontrol_new alc5623_snd_controls[] = { + SOC_DOUBLE_TLV("Auxout Playback Volume", + ALC5623_MONO_AUX_OUT_VOL, 8, 0, 31, 1, hp_tlv), + SOC_DOUBLE("Auxout Playback Switch", + ALC5623_MONO_AUX_OUT_VOL, 15, 7, 1, 1), + SOC_DOUBLE_TLV("PCM Playback Volume", + ALC5623_STEREO_DAC_VOL, 8, 0, 31, 1, vol_tlv), + SOC_DOUBLE_TLV("AuxI Capture Volume", + ALC5623_AUXIN_VOL, 8, 0, 31, 1, vol_tlv), + SOC_DOUBLE_TLV("LineIn Capture Volume", + ALC5623_LINE_IN_VOL, 8, 0, 31, 1, vol_tlv), + SOC_SINGLE_TLV("Mic1 Capture Volume", + ALC5623_MIC_VOL, 8, 31, 1, vol_tlv), + SOC_SINGLE_TLV("Mic2 Capture Volume", + ALC5623_MIC_VOL, 0, 31, 1, vol_tlv), + SOC_DOUBLE_TLV("Rec Capture Volume", + ALC5623_ADC_REC_GAIN, 7, 0, 31, 0, adc_rec_tlv), + SOC_SINGLE_TLV("Mic 1 Boost Volume", + ALC5623_MIC_CTRL, 10, 2, 0, boost_tlv), + SOC_SINGLE_TLV("Mic 2 Boost Volume", + ALC5623_MIC_CTRL, 8, 2, 0, boost_tlv), + SOC_SINGLE_TLV("Digital Boost Volume", + ALC5623_ADD_CTRL_REG, 4, 3, 0, dig_tlv), +}; + +/* + * DAPM Controls + */ +static const struct snd_kcontrol_new alc5623_hp_mixer_controls[] = { +SOC_DAPM_SINGLE("LI2HP Playback Switch", ALC5623_LINE_IN_VOL, 15, 1, 1), +SOC_DAPM_SINGLE("AUXI2HP Playback Switch", ALC5623_AUXIN_VOL, 15, 1, 1), +SOC_DAPM_SINGLE("MIC12HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 15, 1, 1), +SOC_DAPM_SINGLE("MIC22HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 7, 1, 1), +SOC_DAPM_SINGLE("DAC2HP Playback Switch", ALC5623_STEREO_DAC_VOL, 15, 1, 1), +}; + +static const struct snd_kcontrol_new alc5623_hpl_mixer_controls[] = { +SOC_DAPM_SINGLE("ADC2HP_L Playback Switch", ALC5623_ADC_REC_GAIN, 15, 1, 1), +}; + +static const struct snd_kcontrol_new alc5623_hpr_mixer_controls[] = { +SOC_DAPM_SINGLE("ADC2HP_R Playback Switch", ALC5623_ADC_REC_GAIN, 14, 1, 1), +}; + +static const struct snd_kcontrol_new alc5623_mono_mixer_controls[] = { +SOC_DAPM_SINGLE("ADC2MONO_L Playback Switch", ALC5623_ADC_REC_GAIN, 13, 1, 1), +SOC_DAPM_SINGLE("ADC2MONO_R Playback Switch", ALC5623_ADC_REC_GAIN, 12, 1, 1), +SOC_DAPM_SINGLE("LI2MONO Playback Switch", ALC5623_LINE_IN_VOL, 13, 1, 1), +SOC_DAPM_SINGLE("AUXI2MONO Playback Switch", ALC5623_AUXIN_VOL, 13, 1, 1), +SOC_DAPM_SINGLE("MIC12MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 13, 1, 1), +SOC_DAPM_SINGLE("MIC22MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 5, 1, 1), +SOC_DAPM_SINGLE("DAC2MONO Playback Switch", ALC5623_STEREO_DAC_VOL, 13, 1, 1), +}; + +static const struct snd_kcontrol_new alc5623_speaker_mixer_controls[] = { +SOC_DAPM_SINGLE("LI2SPK Playback Switch", ALC5623_LINE_IN_VOL, 14, 1, 1), +SOC_DAPM_SINGLE("AUXI2SPK Playback Switch", ALC5623_AUXIN_VOL, 14, 1, 1), +SOC_DAPM_SINGLE("MIC12SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 14, 1, 1), +SOC_DAPM_SINGLE("MIC22SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 6, 1, 1), +SOC_DAPM_SINGLE("DAC2SPK Playback Switch", ALC5623_STEREO_DAC_VOL, 14, 1, 1), +}; + +/* Left Record Mixer */ +static const struct snd_kcontrol_new alc5623_captureL_mixer_controls[] = { +SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 14, 1, 1), +SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 13, 1, 1), +SOC_DAPM_SINGLE("LineInL Capture Switch", ALC5623_ADC_REC_MIXER, 12, 1, 1), +SOC_DAPM_SINGLE("Left AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 11, 1, 1), +SOC_DAPM_SINGLE("HPMixerL Capture Switch", ALC5623_ADC_REC_MIXER, 10, 1, 1), +SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 9, 1, 1), +SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 8, 1, 1), +}; + +/* Right Record Mixer */ +static const struct snd_kcontrol_new alc5623_captureR_mixer_controls[] = { +SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 6, 1, 1), +SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 5, 1, 1), +SOC_DAPM_SINGLE("LineInR Capture Switch", ALC5623_ADC_REC_MIXER, 4, 1, 1), +SOC_DAPM_SINGLE("Right AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 3, 1, 1), +SOC_DAPM_SINGLE("HPMixerR Capture Switch", ALC5623_ADC_REC_MIXER, 2, 1, 1), +SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 1, 1, 1), +SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 0, 1, 1), +}; + +static const char *alc5623_spk_n_sour_sel[] = { + "RN/-R", "RP/+R", "LN/-R", "Vmid" }; +static const char *alc5623_hpl_out_input_sel[] = { + "Vmid", "HP Left Mix"}; +static const char *alc5623_hpr_out_input_sel[] = { + "Vmid", "HP Right Mix"}; +static const char *alc5623_spkout_input_sel[] = { + "Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"}; +static const char *alc5623_aux_out_input_sel[] = { + "Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"}; + +/* auxout output mux */ +static const struct soc_enum alc5623_aux_out_input_enum = +SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 6, 4, alc5623_aux_out_input_sel); +static const struct snd_kcontrol_new alc5623_auxout_mux_controls = +SOC_DAPM_ENUM("Route", alc5623_aux_out_input_enum); + +/* speaker output mux */ +static const struct soc_enum alc5623_spkout_input_enum = +SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 10, 4, alc5623_spkout_input_sel); +static const struct snd_kcontrol_new alc5623_spkout_mux_controls = +SOC_DAPM_ENUM("Route", alc5623_spkout_input_enum); + +/* headphone left output mux */ +static const struct soc_enum alc5623_hpl_out_input_enum = +SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 9, 2, alc5623_hpl_out_input_sel); +static const struct snd_kcontrol_new alc5623_hpl_out_mux_controls = +SOC_DAPM_ENUM("Route", alc5623_hpl_out_input_enum); + +/* headphone right output mux */ +static const struct soc_enum alc5623_hpr_out_input_enum = +SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 8, 2, alc5623_hpr_out_input_sel); +static const struct snd_kcontrol_new alc5623_hpr_out_mux_controls = +SOC_DAPM_ENUM("Route", alc5623_hpr_out_input_enum); + +/* speaker output N select */ +static const struct soc_enum alc5623_spk_n_sour_enum = +SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 14, 4, alc5623_spk_n_sour_sel); +static const struct snd_kcontrol_new alc5623_spkoutn_mux_controls = +SOC_DAPM_ENUM("Route", alc5623_spk_n_sour_enum); + +static const struct snd_soc_dapm_widget alc5623_dapm_widgets[] = { +/* Muxes */ +SND_SOC_DAPM_MUX("AuxOut Mux", SND_SOC_NOPM, 0, 0, + &alc5623_auxout_mux_controls), +SND_SOC_DAPM_MUX("SpeakerOut Mux", SND_SOC_NOPM, 0, 0, + &alc5623_spkout_mux_controls), +SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, + &alc5623_hpl_out_mux_controls), +SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, + &alc5623_hpr_out_mux_controls), +SND_SOC_DAPM_MUX("SpeakerOut N Mux", SND_SOC_NOPM, 0, 0, + &alc5623_spkoutn_mux_controls), + +/* output mixers */ +SND_SOC_DAPM_MIXER("HP Mix", SND_SOC_NOPM, 0, 0, + &alc5623_hp_mixer_controls[0], + ARRAY_SIZE(alc5623_hp_mixer_controls)), +SND_SOC_DAPM_MIXER("HPR Mix", ALC5623_PWR_MANAG_ADD2, 4, 0, + &alc5623_hpr_mixer_controls[0], + ARRAY_SIZE(alc5623_hpr_mixer_controls)), +SND_SOC_DAPM_MIXER("HPL Mix", ALC5623_PWR_MANAG_ADD2, 5, 0, + &alc5623_hpl_mixer_controls[0], + ARRAY_SIZE(alc5623_hpl_mixer_controls)), +SND_SOC_DAPM_MIXER("HPOut Mix", SND_SOC_NOPM, 0, 0, NULL, 0), +SND_SOC_DAPM_MIXER("Mono Mix", ALC5623_PWR_MANAG_ADD2, 2, 0, + &alc5623_mono_mixer_controls[0], + ARRAY_SIZE(alc5623_mono_mixer_controls)), +SND_SOC_DAPM_MIXER("Speaker Mix", ALC5623_PWR_MANAG_ADD2, 3, 0, + &alc5623_speaker_mixer_controls[0], + ARRAY_SIZE(alc5623_speaker_mixer_controls)), + +/* input mixers */ +SND_SOC_DAPM_MIXER("Left Capture Mix", ALC5623_PWR_MANAG_ADD2, 1, 0, + &alc5623_captureL_mixer_controls[0], + ARRAY_SIZE(alc5623_captureL_mixer_controls)), +SND_SOC_DAPM_MIXER("Right Capture Mix", ALC5623_PWR_MANAG_ADD2, 0, 0, + &alc5623_captureR_mixer_controls[0], + ARRAY_SIZE(alc5623_captureR_mixer_controls)), + +SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback", + ALC5623_PWR_MANAG_ADD2, 9, 0), +SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback", + ALC5623_PWR_MANAG_ADD2, 8, 0), +SND_SOC_DAPM_MIXER("I2S Mix", ALC5623_PWR_MANAG_ADD1, 15, 0, NULL, 0), +SND_SOC_DAPM_MIXER("AuxI Mix", SND_SOC_NOPM, 0, 0, NULL, 0), +SND_SOC_DAPM_MIXER("Line Mix", SND_SOC_NOPM, 0, 0, NULL, 0), +SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture", + ALC5623_PWR_MANAG_ADD2, 7, 0), +SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture", + ALC5623_PWR_MANAG_ADD2, 6, 0), +SND_SOC_DAPM_PGA("Left Headphone", ALC5623_PWR_MANAG_ADD3, 10, 0, NULL, 0), +SND_SOC_DAPM_PGA("Right Headphone", ALC5623_PWR_MANAG_ADD3, 9, 0, NULL, 0), +SND_SOC_DAPM_PGA("SpeakerOut", ALC5623_PWR_MANAG_ADD3, 12, 0, NULL, 0), +SND_SOC_DAPM_PGA("Left AuxOut", ALC5623_PWR_MANAG_ADD3, 14, 0, NULL, 0), +SND_SOC_DAPM_PGA("Right AuxOut", ALC5623_PWR_MANAG_ADD3, 13, 0, NULL, 0), +SND_SOC_DAPM_PGA("Left LineIn", ALC5623_PWR_MANAG_ADD3, 7, 0, NULL, 0), +SND_SOC_DAPM_PGA("Right LineIn", ALC5623_PWR_MANAG_ADD3, 6, 0, NULL, 0), +SND_SOC_DAPM_PGA("Left AuxI", ALC5623_PWR_MANAG_ADD3, 5, 0, NULL, 0), +SND_SOC_DAPM_PGA("Right AuxI", ALC5623_PWR_MANAG_ADD3, 4, 0, NULL, 0), +SND_SOC_DAPM_PGA("MIC1 PGA", ALC5623_PWR_MANAG_ADD3, 3, 0, NULL, 0), +SND_SOC_DAPM_PGA("MIC2 PGA", ALC5623_PWR_MANAG_ADD3, 2, 0, NULL, 0), +SND_SOC_DAPM_PGA("MIC1 Pre Amp", ALC5623_PWR_MANAG_ADD3, 1, 0, NULL, 0), +SND_SOC_DAPM_PGA("MIC2 Pre Amp", ALC5623_PWR_MANAG_ADD3, 0, 0, NULL, 0), +SND_SOC_DAPM_MICBIAS("Mic Bias1", ALC5623_PWR_MANAG_ADD1, 11, 0), + +SND_SOC_DAPM_OUTPUT("AUXOUTL"), +SND_SOC_DAPM_OUTPUT("AUXOUTR"), +SND_SOC_DAPM_OUTPUT("HPL"), +SND_SOC_DAPM_OUTPUT("HPR"), +SND_SOC_DAPM_OUTPUT("SPKOUT"), +SND_SOC_DAPM_OUTPUT("SPKOUTN"), +SND_SOC_DAPM_INPUT("LINEINL"), +SND_SOC_DAPM_INPUT("LINEINR"), +SND_SOC_DAPM_INPUT("AUXINL"), +SND_SOC_DAPM_INPUT("AUXINR"), +SND_SOC_DAPM_INPUT("MIC1"), +SND_SOC_DAPM_INPUT("MIC2"), +SND_SOC_DAPM_VMID("Vmid"), +}; + +static const char *alc5623_amp_names[] = {"AB Amp", "D Amp"}; +static const struct soc_enum alc5623_amp_enum = + SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 13, 2, alc5623_amp_names); +static const struct snd_kcontrol_new alc5623_amp_mux_controls = + SOC_DAPM_ENUM("Route", alc5623_amp_enum); + +static const struct snd_soc_dapm_widget alc5623_dapm_amp_widgets[] = { +SND_SOC_DAPM_PGA_E("D Amp", ALC5623_PWR_MANAG_ADD2, 14, 0, NULL, 0, + amp_mixer_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), +SND_SOC_DAPM_PGA("AB Amp", ALC5623_PWR_MANAG_ADD2, 15, 0, NULL, 0), +SND_SOC_DAPM_MUX("AB-D Amp Mux", SND_SOC_NOPM, 0, 0, + &alc5623_amp_mux_controls), +}; + +static const struct snd_soc_dapm_route intercon[] = { + /* virtual mixer - mixes left & right channels */ + {"I2S Mix", NULL, "Left DAC"}, + {"I2S Mix", NULL, "Right DAC"}, + {"Line Mix", NULL, "Right LineIn"}, + {"Line Mix", NULL, "Left LineIn"}, + {"AuxI Mix", NULL, "Left AuxI"}, + {"AuxI Mix", NULL, "Right AuxI"}, + {"AUXOUTL", NULL, "Left AuxOut"}, + {"AUXOUTR", NULL, "Right AuxOut"}, + + /* HP mixer */ + {"HPL Mix", "ADC2HP_L Playback Switch", "Left Capture Mix"}, + {"HPL Mix", NULL, "HP Mix"}, + {"HPR Mix", "ADC2HP_R Playback Switch", "Right Capture Mix"}, + {"HPR Mix", NULL, "HP Mix"}, + {"HP Mix", "LI2HP Playback Switch", "Line Mix"}, + {"HP Mix", "AUXI2HP Playback Switch", "AuxI Mix"}, + {"HP Mix", "MIC12HP Playback Switch", "MIC1 PGA"}, + {"HP Mix", "MIC22HP Playback Switch", "MIC2 PGA"}, + {"HP Mix", "DAC2HP Playback Switch", "I2S Mix"}, + + /* speaker mixer */ + {"Speaker Mix", "LI2SPK Playback Switch", "Line Mix"}, + {"Speaker Mix", "AUXI2SPK Playback Switch", "AuxI Mix"}, + {"Speaker Mix", "MIC12SPK Playback Switch", "MIC1 PGA"}, + {"Speaker Mix", "MIC22SPK Playback Switch", "MIC2 PGA"}, + {"Speaker Mix", "DAC2SPK Playback Switch", "I2S Mix"}, + + /* mono mixer */ + {"Mono Mix", "ADC2MONO_L Playback Switch", "Left Capture Mix"}, + {"Mono Mix", "ADC2MONO_R Playback Switch", "Right Capture Mix"}, + {"Mono Mix", "LI2MONO Playback Switch", "Line Mix"}, + {"Mono Mix", "AUXI2MONO Playback Switch", "AuxI Mix"}, + {"Mono Mix", "MIC12MONO Playback Switch", "MIC1 PGA"}, + {"Mono Mix", "MIC22MONO Playback Switch", "MIC2 PGA"}, + {"Mono Mix", "DAC2MONO Playback Switch", "I2S Mix"}, + + /* Left record mixer */ + {"Left Capture Mix", "LineInL Capture Switch", "LINEINL"}, + {"Left Capture Mix", "Left AuxI Capture Switch", "AUXINL"}, + {"Left Capture Mix", "Mic1 Capture Switch", "MIC1 Pre Amp"}, + {"Left Capture Mix", "Mic2 Capture Switch", "MIC2 Pre Amp"}, + {"Left Capture Mix", "HPMixerL Capture Switch", "HPL Mix"}, + {"Left Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"}, + {"Left Capture Mix", "MonoMixer Capture Switch", "Mono Mix"}, + + /*Right record mixer */ + {"Right Capture Mix", "LineInR Capture Switch", "LINEINR"}, + {"Right Capture Mix", "Right AuxI Capture Switch", "AUXINR"}, + {"Right Capture Mix", "Mic1 Capture Switch", "MIC1 Pre Amp"}, + {"Right Capture Mix", "Mic2 Capture Switch", "MIC2 Pre Amp"}, + {"Right Capture Mix", "HPMixerR Capture Switch", "HPR Mix"}, + {"Right Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"}, + {"Right Capture Mix", "MonoMixer Capture Switch", "Mono Mix"}, + + /* headphone left mux */ + {"Left Headphone Mux", "HP Left Mix", "HPL Mix"}, + {"Left Headphone Mux", "Vmid", "Vmid"}, + + /* headphone right mux */ + {"Right Headphone Mux", "HP Right Mix", "HPR Mix"}, + {"Right Headphone Mux", "Vmid", "Vmid"}, + + /* speaker out mux */ + {"SpeakerOut Mux", "Vmid", "Vmid"}, + {"SpeakerOut Mux", "HPOut Mix", "HPOut Mix"}, + {"SpeakerOut Mux", "Speaker Mix", "Speaker Mix"}, + {"SpeakerOut Mux", "Mono Mix", "Mono Mix"}, + + /* Mono/Aux Out mux */ + {"AuxOut Mux", "Vmid", "Vmid"}, + {"AuxOut Mux", "HPOut Mix", "HPOut Mix"}, + {"AuxOut Mux", "Speaker Mix", "Speaker Mix"}, + {"AuxOut Mux", "Mono Mix", "Mono Mix"}, + + /* output pga */ + {"HPL", NULL, "Left Headphone"}, + {"Left Headphone", NULL, "Left Headphone Mux"}, + {"HPR", NULL, "Right Headphone"}, + {"Right Headphone", NULL, "Right Headphone Mux"}, + {"Left AuxOut", NULL, "AuxOut Mux"}, + {"Right AuxOut", NULL, "AuxOut Mux"}, + + /* input pga */ + {"Left LineIn", NULL, "LINEINL"}, + {"Right LineIn", NULL, "LINEINR"}, + {"Left AuxI", NULL, "AUXINL"}, + {"Right AuxI", NULL, "AUXINR"}, + {"MIC1 Pre Amp", NULL, "MIC1"}, + {"MIC2 Pre Amp", NULL, "MIC2"}, + {"MIC1 PGA", NULL, "MIC1 Pre Amp"}, + {"MIC2 PGA", NULL, "MIC2 Pre Amp"}, + + /* left ADC */ + {"Left ADC", NULL, "Left Capture Mix"}, + + /* right ADC */ + {"Right ADC", NULL, "Right Capture Mix"}, + + {"SpeakerOut N Mux", "RN/-R", "SpeakerOut"}, + {"SpeakerOut N Mux", "RP/+R", "SpeakerOut"}, + {"SpeakerOut N Mux", "LN/-R", "SpeakerOut"}, + {"SpeakerOut N Mux", "Vmid", "Vmid"}, + + {"SPKOUT", NULL, "SpeakerOut"}, + {"SPKOUTN", NULL, "SpeakerOut N Mux"}, +}; + +static const struct snd_soc_dapm_route intercon_spk[] = { + {"SpeakerOut", NULL, "SpeakerOut Mux"}, +}; + +static const struct snd_soc_dapm_route intercon_amp_spk[] = { + {"AB Amp", NULL, "SpeakerOut Mux"}, + {"D Amp", NULL, "SpeakerOut Mux"}, + {"AB-D Amp Mux", "AB Amp", "AB Amp"}, + {"AB-D Amp Mux", "D Amp", "D Amp"}, + {"SpeakerOut", NULL, "AB-D Amp Mux"}, +}; + +/* PLL divisors */ +struct _pll_div { + u32 pll_in; + u32 pll_out; + u16 regvalue; +}; + +/* Note : pll code from original alc5623 driver. Not sure of how good it is */ +/* usefull only for master mode */ +static const struct _pll_div codec_master_pll_div[] = { + + { 2048000, 8192000, 0x0ea0}, + { 3686400, 8192000, 0x4e27}, + { 12000000, 8192000, 0x456b}, + { 13000000, 8192000, 0x495f}, + { 13100000, 8192000, 0x0320}, + { 2048000, 11289600, 0xf637}, + { 3686400, 11289600, 0x2f22}, + { 12000000, 11289600, 0x3e2f}, + { 13000000, 11289600, 0x4d5b}, + { 13100000, 11289600, 0x363b}, + { 2048000, 16384000, 0x1ea0}, + { 3686400, 16384000, 0x9e27}, + { 12000000, 16384000, 0x452b}, + { 13000000, 16384000, 0x542f}, + { 13100000, 16384000, 0x03a0}, + { 2048000, 16934400, 0xe625}, + { 3686400, 16934400, 0x9126}, + { 12000000, 16934400, 0x4d2c}, + { 13000000, 16934400, 0x742f}, + { 13100000, 16934400, 0x3c27}, + { 2048000, 22579200, 0x2aa0}, + { 3686400, 22579200, 0x2f20}, + { 12000000, 22579200, 0x7e2f}, + { 13000000, 22579200, 0x742f}, + { 13100000, 22579200, 0x3c27}, + { 2048000, 24576000, 0x2ea0}, + { 3686400, 24576000, 0xee27}, + { 12000000, 24576000, 0x2915}, + { 13000000, 24576000, 0x772e}, + { 13100000, 24576000, 0x0d20}, +}; + +static const struct _pll_div codec_slave_pll_div[] = { + + { 1024000, 16384000, 0x3ea0}, + { 1411200, 22579200, 0x3ea0}, + { 1536000, 24576000, 0x3ea0}, + { 2048000, 16384000, 0x1ea0}, + { 2822400, 22579200, 0x1ea0}, + { 3072000, 24576000, 0x1ea0}, + +}; + +static int alc5623_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, + int source, unsigned int freq_in, unsigned int freq_out) +{ + int i; + struct snd_soc_codec *codec = codec_dai->codec; + int gbl_clk = 0, pll_div = 0; + u16 reg; + + if (pll_id < ALC5623_PLL_FR_MCLK || pll_id > ALC5623_PLL_FR_BCK) + return -ENODEV; + + /* Disable PLL power */ + snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2, + ALC5623_PWR_ADD2_PLL, + 0); + + /* pll is not used in slave mode */ + reg = snd_soc_read(codec, ALC5623_DAI_CONTROL); + if (reg & ALC5623_DAI_SDP_SLAVE_MODE) + return 0; + + if (!freq_in || !freq_out) + return 0; + + switch (pll_id) { + case ALC5623_PLL_FR_MCLK: + for (i = 0; i < ARRAY_SIZE(codec_master_pll_div); i++) { + if (codec_master_pll_div[i].pll_in == freq_in + && codec_master_pll_div[i].pll_out == freq_out) { + /* PLL source from MCLK */ + pll_div = codec_master_pll_div[i].regvalue; + break; + } + } + break; + case ALC5623_PLL_FR_BCK: + for (i = 0; i < ARRAY_SIZE(codec_slave_pll_div); i++) { + if (codec_slave_pll_div[i].pll_in == freq_in + && codec_slave_pll_div[i].pll_out == freq_out) { + /* PLL source from Bitclk */ + gbl_clk = ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK; + pll_div = codec_slave_pll_div[i].regvalue; + break; + } + } + break; + default: + return -EINVAL; + } + + if (!pll_div) + return -EINVAL; + + snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk); + snd_soc_write(codec, ALC5623_PLL_CTRL, pll_div); + snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2, + ALC5623_PWR_ADD2_PLL, + ALC5623_PWR_ADD2_PLL); + gbl_clk |= ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL; + snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk); + + return 0; +} + +struct _coeff_div { + u16 fs; + u16 regvalue; +}; + +/* codec hifi mclk (after PLL) clock divider coefficients */ +/* values inspired from column BCLK=32Fs of Appendix A table */ +static const struct _coeff_div coeff_div[] = { + {256*8, 0x3a69}, + {384*8, 0x3c6b}, + {256*4, 0x2a69}, + {384*4, 0x2c6b}, + {256*2, 0x1a69}, + {384*2, 0x1c6b}, + {256*1, 0x0a69}, + {384*1, 0x0c6b}, +}; + +static int get_coeff(struct snd_soc_codec *codec, int rate) +{ + struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec); + int i; + + for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { + if (coeff_div[i].fs * rate == alc5623->sysclk) + return i; + } + return -EINVAL; +} + +/* + * Clock after PLL and dividers + */ +static int alc5623_set_dai_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_codec *codec = codec_dai->codec; + struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec); + + switch (freq) { + case 8192000: + case 11289600: + case 12288000: + case 16384000: + case 16934400: + case 18432000: + case 22579200: + case 24576000: + alc5623->sysclk = freq; + return 0; + } + return -EINVAL; +} + +static int alc5623_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_codec *codec = codec_dai->codec; + u16 iface = 0; + + /* set master/slave audio interface */ + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + iface = ALC5623_DAI_SDP_MASTER_MODE; + break; + case SND_SOC_DAIFMT_CBS_CFS: + iface = ALC5623_DAI_SDP_SLAVE_MODE; + break; + default: + return -EINVAL; + } + + /* interface format */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + iface |= ALC5623_DAI_I2S_DF_I2S; + break; + case SND_SOC_DAIFMT_RIGHT_J: + iface |= ALC5623_DAI_I2S_DF_RIGHT; + break; + case SND_SOC_DAIFMT_LEFT_J: + iface |= ALC5623_DAI_I2S_DF_LEFT; + break; + case SND_SOC_DAIFMT_DSP_A: + iface |= ALC5623_DAI_I2S_DF_PCM; + break; + case SND_SOC_DAIFMT_DSP_B: + iface |= ALC5623_DAI_I2S_DF_PCM | ALC5623_DAI_I2S_PCM_MODE; + break; + default: + return -EINVAL; + } + + /* clock inversion */ + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_IB_IF: + iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL; + break; + case SND_SOC_DAIFMT_IB_NF: + iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL; + break; + case SND_SOC_DAIFMT_NB_IF: + break; + default: + return -EINVAL; + } + + return snd_soc_write(codec, ALC5623_DAI_CONTROL, iface); +} + +static int alc5623_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_codec *codec = rtd->codec; + struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec); + int coeff, rate; + u16 iface; + + iface = snd_soc_read(codec, ALC5623_DAI_CONTROL); + iface &= ~ALC5623_DAI_I2S_DL_MASK; + + /* bit size */ + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + iface |= ALC5623_DAI_I2S_DL_16; + break; + case SNDRV_PCM_FORMAT_S20_3LE: + iface |= ALC5623_DAI_I2S_DL_20; + break; + case SNDRV_PCM_FORMAT_S24_LE: + iface |= ALC5623_DAI_I2S_DL_24; + break; + case SNDRV_PCM_FORMAT_S32_LE: + iface |= ALC5623_DAI_I2S_DL_32; + break; + default: + return -EINVAL; + } + + /* set iface & srate */ + snd_soc_write(codec, ALC5623_DAI_CONTROL, iface); + rate = params_rate(params); + coeff = get_coeff(codec, rate); + if (coeff < 0) + return -EINVAL; + + coeff = coeff_div[coeff].regvalue; + dev_dbg(codec->dev, "%s: sysclk=%d,rate=%d,coeff=0x%04x\n", + __func__, alc5623->sysclk, rate, coeff); + snd_soc_write(codec, ALC5623_STEREO_AD_DA_CLK_CTRL, coeff); + + return 0; +} + +static int alc5623_mute(struct snd_soc_dai *dai, int mute) +{ + struct snd_soc_codec *codec = dai->codec; + u16 hp_mute = ALC5623_MISC_M_DAC_L_INPUT | ALC5623_MISC_M_DAC_R_INPUT; + u16 mute_reg = snd_soc_read(codec, ALC5623_MISC_CTRL) & ~hp_mute; + + if (mute) + mute_reg |= hp_mute; + + return snd_soc_write(codec, ALC5623_MISC_CTRL, mute_reg); +} + +#define ALC5623_ADD2_POWER_EN (ALC5623_PWR_ADD2_VREF \ + | ALC5623_PWR_ADD2_DAC_REF_CIR) + +#define ALC5623_ADD3_POWER_EN (ALC5623_PWR_ADD3_MAIN_BIAS \ + | ALC5623_PWR_ADD3_MIC1_BOOST_AD) + +#define ALC5623_ADD1_POWER_EN \ + (ALC5623_PWR_ADD1_SHORT_CURR_DET_EN | ALC5623_PWR_ADD1_SOFTGEN_EN \ + | ALC5623_PWR_ADD1_DEPOP_BUF_HP | ALC5623_PWR_ADD1_HP_OUT_AMP \ + | ALC5623_PWR_ADD1_HP_OUT_ENH_AMP) + +#define ALC5623_ADD1_POWER_EN_5622 \ + (ALC5623_PWR_ADD1_SHORT_CURR_DET_EN \ + | ALC5623_PWR_ADD1_HP_OUT_AMP) + +static void enable_power_depop(struct snd_soc_codec *codec) +{ + struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec); + + snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD1, + ALC5623_PWR_ADD1_SOFTGEN_EN, + ALC5623_PWR_ADD1_SOFTGEN_EN); + + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, ALC5623_ADD3_POWER_EN); + + snd_soc_update_bits(codec, ALC5623_MISC_CTRL, + ALC5623_MISC_HP_DEPOP_MODE2_EN, + ALC5623_MISC_HP_DEPOP_MODE2_EN); + + msleep(500); + + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, ALC5623_ADD2_POWER_EN); + + /* avoid writing '1' into 5622 reserved bits */ + if (alc5623->id == 0x22) + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1, + ALC5623_ADD1_POWER_EN_5622); + else + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1, + ALC5623_ADD1_POWER_EN); + + /* disable HP Depop2 */ + snd_soc_update_bits(codec, ALC5623_MISC_CTRL, + ALC5623_MISC_HP_DEPOP_MODE2_EN, + 0); + +} + +static int alc5623_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + switch (level) { + case SND_SOC_BIAS_ON: + enable_power_depop(codec); + break; + case SND_SOC_BIAS_PREPARE: + break; + case SND_SOC_BIAS_STANDBY: + /* everything off except vref/vmid, */ + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, + ALC5623_PWR_ADD2_VREF); + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, + ALC5623_PWR_ADD3_MAIN_BIAS); + break; + case SND_SOC_BIAS_OFF: + /* everything off, dac mute, inactive */ + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, 0); + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, 0); + snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1, 0); + break; + } + codec->dapm.bias_level = level; + return 0; +} + +#define ALC5623_FORMATS (SNDRV_PCM_FMTBIT_S16_LE \ + | SNDRV_PCM_FMTBIT_S24_LE \ + | SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_ops alc5623_dai_ops = { + .hw_params = alc5623_pcm_hw_params, + .digital_mute = alc5623_mute, + .set_fmt = alc5623_set_dai_fmt, + .set_sysclk = alc5623_set_dai_sysclk, + .set_pll = alc5623_set_dai_pll, +}; + +static struct snd_soc_dai_driver alc5623_dai = { + .name = "alc5623-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 1, + .channels_max = 2, + .rate_min = 8000, + .rate_max = 48000, + .rates = SNDRV_PCM_RATE_8000_48000, + .formats = ALC5623_FORMATS,}, + .capture = { + .stream_name = "Capture", + .channels_min = 1, + .channels_max = 2, + .rate_min = 8000, + .rate_max = 48000, + .rates = SNDRV_PCM_RATE_8000_48000, + .formats = ALC5623_FORMATS,}, + + .ops = &alc5623_dai_ops, +}; + +static int alc5623_suspend(struct snd_soc_codec *codec, pm_message_t mesg) +{ + alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF); + return 0; +} + +static int alc5623_resume(struct snd_soc_codec *codec) +{ + int i, step = codec->driver->reg_cache_step; + u16 *cache = codec->reg_cache; + + /* Sync reg_cache with the hardware */ + for (i = 2 ; i < codec->driver->reg_cache_size ; i += step) + snd_soc_write(codec, i, cache[i]); + + alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + + /* charge alc5623 caps */ + if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) { + alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + codec->dapm.bias_level = SND_SOC_BIAS_ON; + alc5623_set_bias_level(codec, codec->dapm.bias_level); + } + + return 0; +} + +static int alc5623_probe(struct snd_soc_codec *codec) +{ + struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; + int ret; + + ret = snd_soc_codec_set_cache_io(codec, 8, 16, alc5623->control_type); + if (ret < 0) { + dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); + return ret; + } + + alc5623_reset(codec); + alc5623_fill_cache(codec); + + /* power on device */ + alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + + if (alc5623->add_ctrl) { + snd_soc_write(codec, ALC5623_ADD_CTRL_REG, + alc5623->add_ctrl); + } + + if (alc5623->jack_det_ctrl) { + snd_soc_write(codec, ALC5623_JACK_DET_CTRL, + alc5623->jack_det_ctrl); + } + + switch (alc5623->id) { + case 0x21: + snd_soc_add_controls(codec, rt5621_vol_snd_controls, + ARRAY_SIZE(rt5621_vol_snd_controls)); + break; + case 0x22: + snd_soc_add_controls(codec, rt5622_vol_snd_controls, + ARRAY_SIZE(rt5622_vol_snd_controls)); + break; + case 0x23: + snd_soc_add_controls(codec, alc5623_vol_snd_controls, + ARRAY_SIZE(alc5623_vol_snd_controls)); + break; + default: + return -EINVAL; + } + + snd_soc_add_controls(codec, alc5623_snd_controls, + ARRAY_SIZE(alc5623_snd_controls)); + + snd_soc_dapm_new_controls(dapm, alc5623_dapm_widgets, + ARRAY_SIZE(alc5623_dapm_widgets)); + + /* set up audio path interconnects */ + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); + + switch (alc5623->id) { + case 0x21: + case 0x22: + snd_soc_dapm_new_controls(dapm, alc5623_dapm_amp_widgets, + ARRAY_SIZE(alc5623_dapm_amp_widgets)); + snd_soc_dapm_add_routes(dapm, intercon_amp_spk, + ARRAY_SIZE(intercon_amp_spk)); + break; + case 0x23: + snd_soc_dapm_add_routes(dapm, intercon_spk, + ARRAY_SIZE(intercon_spk)); + break; + default: + return -EINVAL; + } + + return ret; +} + +/* power down chip */ +static int alc5623_remove(struct snd_soc_codec *codec) +{ + alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF); + return 0; +} + +static struct snd_soc_codec_driver soc_codec_device_alc5623 = { + .probe = alc5623_probe, + .remove = alc5623_remove, + .suspend = alc5623_suspend, + .resume = alc5623_resume, + .set_bias_level = alc5623_set_bias_level, + .reg_cache_size = ALC5623_VENDOR_ID2+2, + .reg_word_size = sizeof(u16), + .reg_cache_step = 2, +}; + +/* + * ALC5623 2 wire address is determined by A1 pin + * state during powerup. + * low = 0x1a + * high = 0x1b + */ +static int alc5623_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct alc5623_platform_data *pdata; + struct alc5623_priv *alc5623; + int ret, vid1, vid2; + + vid1 = i2c_smbus_read_word_data(client, ALC5623_VENDOR_ID1); + if (vid1 < 0) { + dev_err(&client->dev, "failed to read I2C\n"); + return -EIO; + } + vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8); + + vid2 = i2c_smbus_read_byte_data(client, ALC5623_VENDOR_ID2); + if (vid2 < 0) { + dev_err(&client->dev, "failed to read I2C\n"); + return -EIO; + } + + if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) { + dev_err(&client->dev, "unknown or wrong codec\n"); + dev_err(&client->dev, "Expected %x:%lx, got %x:%x\n", + 0x10ec, id->driver_data, + vid1, vid2); + return -ENODEV; + } + + dev_dbg(&client->dev, "Found codec id : alc56%02x\n", vid2); + + alc5623 = kzalloc(sizeof(struct alc5623_priv), GFP_KERNEL); + if (alc5623 == NULL) + return -ENOMEM; + + pdata = client->dev.platform_data; + if (pdata) { + alc5623->add_ctrl = pdata->add_ctrl; + alc5623->jack_det_ctrl = pdata->jack_det_ctrl; + } + + alc5623->id = vid2; + switch (alc5623->id) { + case 0x21: + alc5623_dai.name = "alc5621-hifi"; + break; + case 0x22: + alc5623_dai.name = "alc5622-hifi"; + break; + case 0x23: + alc5623_dai.name = "alc5623-hifi"; + break; + default: + kfree(alc5623); + return -EINVAL; + } + + i2c_set_clientdata(client, alc5623); + alc5623->control_data = client; + alc5623->control_type = SND_SOC_I2C; + mutex_init(&alc5623->mutex); + + ret = snd_soc_register_codec(&client->dev, + &soc_codec_device_alc5623, &alc5623_dai, 1); + if (ret != 0) { + dev_err(&client->dev, "Failed to register codec: %d\n", ret); + kfree(alc5623); + } + + return ret; +} + +static int alc5623_i2c_remove(struct i2c_client *client) +{ + struct alc5623_priv *alc5623 = i2c_get_clientdata(client); + + snd_soc_unregister_codec(&client->dev); + kfree(alc5623); + return 0; +} + +static const struct i2c_device_id alc5623_i2c_table[] = { + {"alc5621", 0x21}, + {"alc5622", 0x22}, + {"alc5623", 0x23}, + {} +}; +MODULE_DEVICE_TABLE(i2c, alc5623_i2c_table); + +/* i2c codec control layer */ +static struct i2c_driver alc5623_i2c_driver = { + .driver = { + .name = "alc562x-codec", + .owner = THIS_MODULE, + }, + .probe = alc5623_i2c_probe, + .remove = __devexit_p(alc5623_i2c_remove), + .id_table = alc5623_i2c_table, +}; + +static int __init alc5623_modinit(void) +{ + int ret; + + ret = i2c_add_driver(&alc5623_i2c_driver); + if (ret != 0) { + printk(KERN_ERR "%s: can't add i2c driver", __func__); + return ret; + } + + return ret; +} +module_init(alc5623_modinit); + +static void __exit alc5623_modexit(void) +{ + i2c_del_driver(&alc5623_i2c_driver); +} +module_exit(alc5623_modexit); + +MODULE_DESCRIPTION("ASoC alc5621/2/3 driver"); +MODULE_AUTHOR("Arnaud Patard "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/alc5623.h b/sound/soc/codecs/alc5623.h new file mode 100644 index 000000000000..f3d68260d425 --- /dev/null +++ b/sound/soc/codecs/alc5623.h @@ -0,0 +1,161 @@ +/* + * alc5623.h -- alc562[123] ALSA Soc Audio driver + * + * Copyright 2008 Realtek Microelectronics + * Copyright 2010 Arnaud Patard + * + * Author: flove + * Arnaud Patard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _ALC5623_H +#define _ALC5623_H + +#define ALC5623_RESET 0x00 +/* 5621 5622 5623 */ +/* speaker output vol 2 2 */ +/* line output vol 4 2 */ +/* HP output vol 4 0 4 */ +#define ALC5623_SPK_OUT_VOL 0x02 +#define ALC5623_HP_OUT_VOL 0x04 +#define ALC5623_MONO_AUX_OUT_VOL 0x06 +#define ALC5623_AUXIN_VOL 0x08 +#define ALC5623_LINE_IN_VOL 0x0A +#define ALC5623_STEREO_DAC_VOL 0x0C +#define ALC5623_MIC_VOL 0x0E +#define ALC5623_MIC_ROUTING_CTRL 0x10 +#define ALC5623_ADC_REC_GAIN 0x12 +#define ALC5623_ADC_REC_MIXER 0x14 +#define ALC5623_SOFT_VOL_CTRL_TIME 0x16 +/* ALC5623_OUTPUT_MIXER_CTRL : */ +/* same remark as for reg 2 line vs speaker */ +#define ALC5623_OUTPUT_MIXER_CTRL 0x1C +#define ALC5623_MIC_CTRL 0x22 + +#define ALC5623_DAI_CONTROL 0x34 +#define ALC5623_DAI_SDP_MASTER_MODE (0 << 15) +#define ALC5623_DAI_SDP_SLAVE_MODE (1 << 15) +#define ALC5623_DAI_I2S_PCM_MODE (1 << 14) +#define ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL (1 << 7) +#define ALC5623_DAI_ADC_DATA_L_R_SWAP (1 << 5) +#define ALC5623_DAI_DAC_DATA_L_R_SWAP (1 << 4) +#define ALC5623_DAI_I2S_DL_MASK (3 << 2) +#define ALC5623_DAI_I2S_DL_32 (3 << 2) +#define ALC5623_DAI_I2S_DL_24 (2 << 2) +#define ALC5623_DAI_I2S_DL_20 (1 << 2) +#define ALC5623_DAI_I2S_DL_16 (0 << 2) +#define ALC5623_DAI_I2S_DF_PCM (3 << 0) +#define ALC5623_DAI_I2S_DF_LEFT (2 << 0) +#define ALC5623_DAI_I2S_DF_RIGHT (1 << 0) +#define ALC5623_DAI_I2S_DF_I2S (0 << 0) + +#define ALC5623_STEREO_AD_DA_CLK_CTRL 0x36 +#define ALC5623_COMPANDING_CTRL 0x38 + +#define ALC5623_PWR_MANAG_ADD1 0x3A +#define ALC5623_PWR_ADD1_MAIN_I2S_EN (1 << 15) +#define ALC5623_PWR_ADD1_ZC_DET_PD_EN (1 << 14) +#define ALC5623_PWR_ADD1_MIC1_BIAS_EN (1 << 11) +#define ALC5623_PWR_ADD1_SHORT_CURR_DET_EN (1 << 10) +#define ALC5623_PWR_ADD1_SOFTGEN_EN (1 << 8) /* rsvd on 5622 */ +#define ALC5623_PWR_ADD1_DEPOP_BUF_HP (1 << 6) /* rsvd on 5622 */ +#define ALC5623_PWR_ADD1_HP_OUT_AMP (1 << 5) +#define ALC5623_PWR_ADD1_HP_OUT_ENH_AMP (1 << 4) /* rsvd on 5622 */ +#define ALC5623_PWR_ADD1_DEPOP_BUF_AUX (1 << 2) +#define ALC5623_PWR_ADD1_AUX_OUT_AMP (1 << 1) +#define ALC5623_PWR_ADD1_AUX_OUT_ENH_AMP (1 << 0) /* rsvd on 5622 */ + +#define ALC5623_PWR_MANAG_ADD2 0x3C +#define ALC5623_PWR_ADD2_LINEOUT (1 << 15) /* rt5623 */ +#define ALC5623_PWR_ADD2_CLASS_AB (1 << 15) /* rt5621 */ +#define ALC5623_PWR_ADD2_CLASS_D (1 << 14) /* rt5621 */ +#define ALC5623_PWR_ADD2_VREF (1 << 13) +#define ALC5623_PWR_ADD2_PLL (1 << 12) +#define ALC5623_PWR_ADD2_DAC_REF_CIR (1 << 10) +#define ALC5623_PWR_ADD2_L_DAC_CLK (1 << 9) +#define ALC5623_PWR_ADD2_R_DAC_CLK (1 << 8) +#define ALC5623_PWR_ADD2_L_ADC_CLK_GAIN (1 << 7) +#define ALC5623_PWR_ADD2_R_ADC_CLK_GAIN (1 << 6) +#define ALC5623_PWR_ADD2_L_HP_MIXER (1 << 5) +#define ALC5623_PWR_ADD2_R_HP_MIXER (1 << 4) +#define ALC5623_PWR_ADD2_SPK_MIXER (1 << 3) +#define ALC5623_PWR_ADD2_MONO_MIXER (1 << 2) +#define ALC5623_PWR_ADD2_L_ADC_REC_MIXER (1 << 1) +#define ALC5623_PWR_ADD2_R_ADC_REC_MIXER (1 << 0) + +#define ALC5623_PWR_MANAG_ADD3 0x3E +#define ALC5623_PWR_ADD3_MAIN_BIAS (1 << 15) +#define ALC5623_PWR_ADD3_AUXOUT_L_VOL_AMP (1 << 14) +#define ALC5623_PWR_ADD3_AUXOUT_R_VOL_AMP (1 << 13) +#define ALC5623_PWR_ADD3_SPK_OUT (1 << 12) +#define ALC5623_PWR_ADD3_HP_L_OUT_VOL (1 << 10) +#define ALC5623_PWR_ADD3_HP_R_OUT_VOL (1 << 9) +#define ALC5623_PWR_ADD3_LINEIN_L_VOL (1 << 7) +#define ALC5623_PWR_ADD3_LINEIN_R_VOL (1 << 6) +#define ALC5623_PWR_ADD3_AUXIN_L_VOL (1 << 5) +#define ALC5623_PWR_ADD3_AUXIN_R_VOL (1 << 4) +#define ALC5623_PWR_ADD3_MIC1_FUN_CTRL (1 << 3) +#define ALC5623_PWR_ADD3_MIC2_FUN_CTRL (1 << 2) +#define ALC5623_PWR_ADD3_MIC1_BOOST_AD (1 << 1) +#define ALC5623_PWR_ADD3_MIC2_BOOST_AD (1 << 0) + +#define ALC5623_ADD_CTRL_REG 0x40 + +#define ALC5623_GLOBAL_CLK_CTRL_REG 0x42 +#define ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL (1 << 15) +#define ALC5623_GBL_CLK_SYS_SOUR_SEL_MCLK (0 << 15) +#define ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK (1 << 14) +#define ALC5623_GBL_CLK_PLL_SOUR_SEL_MCLK (0 << 14) +#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV8 (3 << 1) +#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV4 (2 << 1) +#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV2 (1 << 1) +#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV1 (0 << 1) +#define ALC5623_GBL_CLK_PLL_PRE_DIV2 (1 << 0) +#define ALC5623_GBL_CLK_PLL_PRE_DIV1 (0 << 0) + +#define ALC5623_PLL_CTRL 0x44 +#define ALC5623_PLL_CTRL_N_VAL(n) (((n)&0xff) << 8) +#define ALC5623_PLL_CTRL_K_VAL(k) (((k)&0x7) << 4) +#define ALC5623_PLL_CTRL_M_VAL(m) ((m)&0xf) + +#define ALC5623_GPIO_OUTPUT_PIN_CTRL 0x4A +#define ALC5623_GPIO_PIN_CONFIG 0x4C +#define ALC5623_GPIO_PIN_POLARITY 0x4E +#define ALC5623_GPIO_PIN_STICKY 0x50 +#define ALC5623_GPIO_PIN_WAKEUP 0x52 +#define ALC5623_GPIO_PIN_STATUS 0x54 +#define ALC5623_GPIO_PIN_SHARING 0x56 +#define ALC5623_OVER_CURR_STATUS 0x58 +#define ALC5623_JACK_DET_CTRL 0x5A + +#define ALC5623_MISC_CTRL 0x5E +#define ALC5623_MISC_DISABLE_FAST_VREG (1 << 15) +#define ALC5623_MISC_SPK_CLASS_AB_OC_PD (1 << 13) /* 5621 */ +#define ALC5623_MISC_SPK_CLASS_AB_OC_DET (1 << 12) /* 5621 */ +#define ALC5623_MISC_HP_DEPOP_MODE3_EN (1 << 10) +#define ALC5623_MISC_HP_DEPOP_MODE2_EN (1 << 9) +#define ALC5623_MISC_HP_DEPOP_MODE1_EN (1 << 8) +#define ALC5623_MISC_AUXOUT_DEPOP_MODE3_EN (1 << 6) +#define ALC5623_MISC_AUXOUT_DEPOP_MODE2_EN (1 << 5) +#define ALC5623_MISC_AUXOUT_DEPOP_MODE1_EN (1 << 4) +#define ALC5623_MISC_M_DAC_L_INPUT (1 << 3) +#define ALC5623_MISC_M_DAC_R_INPUT (1 << 2) +#define ALC5623_MISC_IRQOUT_INV_CTRL (1 << 0) + +#define ALC5623_PSEDUEO_SPATIAL_CTRL 0x60 +#define ALC5623_EQ_CTRL 0x62 +#define ALC5623_EQ_MODE_ENABLE 0x66 +#define ALC5623_AVC_CTRL 0x68 +#define ALC5623_HID_CTRL_INDEX 0x6A +#define ALC5623_HID_CTRL_DATA 0x6C +#define ALC5623_VENDOR_ID1 0x7C +#define ALC5623_VENDOR_ID2 0x7E + +#define ALC5623_PLL_FR_MCLK 0 +#define ALC5623_PLL_FR_BCK 1 +#endif diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c index 823643932dde..46dbfd067f79 100644 --- a/sound/soc/codecs/cq93vc.c +++ b/sound/soc/codecs/cq93vc.c @@ -36,8 +36,6 @@ #include #include #include -#include -#include #include #include @@ -116,7 +114,7 @@ static int cq93vc_set_bias_level(struct snd_soc_codec *codec, DAVINCI_VC_REG12_POWER_ALL_OFF); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c index 6d4bdc609ac8..8b51245f2318 100644 --- a/sound/soc/codecs/cs4270.c +++ b/sound/soc/codecs/cs4270.c @@ -106,6 +106,21 @@ #define CS4270_MUTE_DAC_A 0x01 #define CS4270_MUTE_DAC_B 0x02 +/* Power-on default values for the registers + * + * This array contains the power-on default values of the registers, with the + * exception of the "CHIPID" register (01h). The lower four bits of that + * register contain the hardware revision, so it is treated as volatile. + * + * Also note that on the CS4270, the first readable register is 1, but ASoC + * assumes the first register is 0. Therfore, the array must have an entry for + * register 0, but we use cs4270_reg_is_readable() to tell ASoC that it can't + * be read. + */ +static const u8 cs4270_default_reg_cache[CS4270_LASTREG + 1] = { + 0x00, 0x00, 0x00, 0x30, 0x00, 0x60, 0x20, 0x00, 0x00 +}; + static const char *supply_names[] = { "va", "vd", "vlc" }; @@ -114,7 +129,6 @@ static const char *supply_names[] = { struct cs4270_private { enum snd_soc_control_type control_type; void *control_data; - u8 reg_cache[CS4270_NUMREGS]; unsigned int mclk; /* Input frequency of the MCLK pin */ unsigned int mode; /* The mode (I2S or left-justified) */ unsigned int slave_mode; @@ -179,6 +193,20 @@ static struct cs4270_mode_ratios cs4270_mode_ratios[] = { /* The number of MCLK/LRCK ratios supported by the CS4270 */ #define NUM_MCLK_RATIOS ARRAY_SIZE(cs4270_mode_ratios) +static int cs4270_reg_is_readable(unsigned int reg) +{ + return (reg >= CS4270_FIRSTREG) && (reg <= CS4270_LASTREG); +} + +static int cs4270_reg_is_volatile(unsigned int reg) +{ + /* Unreadable registers are considered volatile */ + if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG)) + return 1; + + return reg == CS4270_CHIPID; +} + /** * cs4270_set_dai_sysclk - determine the CS4270 samples rates. * @codec_dai: the codec DAI @@ -263,97 +291,6 @@ static int cs4270_set_dai_fmt(struct snd_soc_dai *codec_dai, return ret; } -/** - * cs4270_fill_cache - pre-fill the CS4270 register cache. - * @codec: the codec for this CS4270 - * - * This function fills in the CS4270 register cache by reading the register - * values from the hardware. - * - * This CS4270 registers are cached to avoid excessive I2C I/O operations. - * After the initial read to pre-fill the cache, the CS4270 never updates - * the register values, so we won't have a cache coherency problem. - * - * We use the auto-increment feature of the CS4270 to read all registers in - * one shot. - */ -static int cs4270_fill_cache(struct snd_soc_codec *codec) -{ - u8 *cache = codec->reg_cache; - struct i2c_client *i2c_client = codec->control_data; - s32 length; - - length = i2c_smbus_read_i2c_block_data(i2c_client, - CS4270_FIRSTREG | CS4270_I2C_INCR, CS4270_NUMREGS, cache); - - if (length != CS4270_NUMREGS) { - dev_err(codec->dev, "i2c read failure, addr=0x%x\n", - i2c_client->addr); - return -EIO; - } - - return 0; -} - -/** - * cs4270_read_reg_cache - read from the CS4270 register cache. - * @codec: the codec for this CS4270 - * @reg: the register to read - * - * This function returns the value for a given register. It reads only from - * the register cache, not the hardware itself. - * - * This CS4270 registers are cached to avoid excessive I2C I/O operations. - * After the initial read to pre-fill the cache, the CS4270 never updates - * the register values, so we won't have a cache coherency problem. - */ -static unsigned int cs4270_read_reg_cache(struct snd_soc_codec *codec, - unsigned int reg) -{ - u8 *cache = codec->reg_cache; - - if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG)) - return -EIO; - - return cache[reg - CS4270_FIRSTREG]; -} - -/** - * cs4270_i2c_write - write to a CS4270 register via the I2C bus. - * @codec: the codec for this CS4270 - * @reg: the register to write - * @value: the value to write to the register - * - * This function writes the given value to the given CS4270 register, and - * also updates the register cache. - * - * Note that we don't use the hw_write function pointer of snd_soc_codec. - * That's because it's too clunky: the hw_write_t prototype does not match - * i2c_smbus_write_byte_data(), and it's just another layer of overhead. - */ -static int cs4270_i2c_write(struct snd_soc_codec *codec, unsigned int reg, - unsigned int value) -{ - u8 *cache = codec->reg_cache; - - if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG)) - return -EIO; - - /* Only perform an I2C operation if the new value is different */ - if (cache[reg - CS4270_FIRSTREG] != value) { - struct i2c_client *client = codec->control_data; - if (i2c_smbus_write_byte_data(client, reg, value)) { - dev_err(codec->dev, "i2c write failed\n"); - return -EIO; - } - - /* We've written to the hardware, so update the cache */ - cache[reg - CS4270_FIRSTREG] = value; - } - - return 0; -} - /** * cs4270_hw_params - program the CS4270 with the given hardware parameters. * @substream: the audio stream @@ -551,15 +488,16 @@ static struct snd_soc_dai_driver cs4270_dai = { static int cs4270_probe(struct snd_soc_codec *codec) { struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec); - int i, ret, reg; + int i, ret; codec->control_data = cs4270->control_data; - /* The I2C interface is set up, so pre-fill our register cache */ - - ret = cs4270_fill_cache(codec); + /* Tell ASoC what kind of I/O to use to read the registers. ASoC will + * then do the I2C transactions itself. + */ + ret = snd_soc_codec_set_cache_io(codec, 8, 8, cs4270->control_type); if (ret < 0) { - dev_err(codec->dev, "failed to fill register cache\n"); + dev_err(codec->dev, "failed to set cache I/O (ret=%i)\n", ret); return ret; } @@ -568,10 +506,7 @@ static int cs4270_probe(struct snd_soc_codec *codec) * this feature disabled by default. An application (e.g. alsactl) can * re-enabled it by using the controls. */ - - reg = cs4270_read_reg_cache(codec, CS4270_MUTE); - reg &= ~CS4270_MUTE_AUTO; - ret = cs4270_i2c_write(codec, CS4270_MUTE, reg); + ret = snd_soc_update_bits(codec, CS4270_MUTE, CS4270_MUTE_AUTO, 0); if (ret < 0) { dev_err(codec->dev, "i2c write failed\n"); return ret; @@ -582,10 +517,8 @@ static int cs4270_probe(struct snd_soc_codec *codec) * playback has started. An application (e.g. alsactl) can * re-enabled it by using the controls. */ - - reg = cs4270_read_reg_cache(codec, CS4270_TRANS); - reg &= ~(CS4270_TRANS_SOFT | CS4270_TRANS_ZERO); - ret = cs4270_i2c_write(codec, CS4270_TRANS, reg); + ret = snd_soc_update_bits(codec, CS4270_TRANS, + CS4270_TRANS_SOFT | CS4270_TRANS_ZERO, 0); if (ret < 0) { dev_err(codec->dev, "i2c write failed\n"); return ret; @@ -708,15 +641,16 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec) * Assign this variable to the codec_dev field of the machine driver's * snd_soc_device structure. */ -static struct snd_soc_codec_driver soc_codec_device_cs4270 = { - .probe = cs4270_probe, - .remove = cs4270_remove, - .suspend = cs4270_soc_suspend, - .resume = cs4270_soc_resume, - .read = cs4270_read_reg_cache, - .write = cs4270_i2c_write, - .reg_cache_size = CS4270_NUMREGS, - .reg_word_size = sizeof(u8), +static const struct snd_soc_codec_driver soc_codec_device_cs4270 = { + .probe = cs4270_probe, + .remove = cs4270_remove, + .suspend = cs4270_soc_suspend, + .resume = cs4270_soc_resume, + .volatile_register = cs4270_reg_is_volatile, + .readable_register = cs4270_reg_is_readable, + .reg_cache_size = CS4270_LASTREG + 1, + .reg_word_size = sizeof(u8), + .reg_cache_default = cs4270_default_reg_cache, }; /** diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index cb086eaf4e07..8fb7070108dd 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -47,7 +46,6 @@ struct cs42l51_private { unsigned int mclk; unsigned int audio_mode; /* The mode (I2S or left-justified) */ enum master_slave_mode func; - u8 reg_cache[CS42L51_NUMREGS]; }; #define CS42L51_FORMATS ( \ @@ -519,6 +517,7 @@ static struct snd_soc_dai_driver cs42l51_dai = { static int cs42l51_probe(struct snd_soc_codec *codec) { struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret, reg; codec->control_data = cs42l51->control_data; @@ -550,9 +549,9 @@ static int cs42l51_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, cs42l51_snd_controls, ARRAY_SIZE(cs42l51_snd_controls)); - snd_soc_dapm_new_controls(codec, cs42l51_dapm_widgets, + snd_soc_dapm_new_controls(dapm, cs42l51_dapm_widgets, ARRAY_SIZE(cs42l51_dapm_widgets)); - snd_soc_dapm_add_routes(codec, cs42l51_routes, + snd_soc_dapm_add_routes(dapm, cs42l51_routes, ARRAY_SIZE(cs42l51_routes)); return 0; diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c index e8d27c8f9ba3..03d1e860d229 100644 --- a/sound/soc/codecs/cx20442.c +++ b/sound/soc/codecs/cx20442.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include "cx20442.h" @@ -26,7 +26,6 @@ struct cx20442_priv { enum snd_soc_control_type control_type; void *control_data; - u8 reg_cache[1]; }; #define CX20442_PM 0x0 @@ -89,10 +88,11 @@ static const struct snd_soc_dapm_route cx20442_audio_map[] = { static int cx20442_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, cx20442_dapm_widgets, - ARRAY_SIZE(cx20442_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, cx20442_audio_map, + snd_soc_dapm_new_controls(dapm, cx20442_dapm_widgets, + ARRAY_SIZE(cx20442_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, cx20442_audio_map, ARRAY_SIZE(cx20442_audio_map)); return 0; @@ -263,7 +263,7 @@ static void v253_close(struct tty_struct *tty) /* Prevent the codec driver from further accessing the modem */ codec->hw_write = NULL; cx20442->control_data = NULL; - codec->pop_time = 0; + codec->card->pop_time = 0; } /* Line discipline .hangup() */ @@ -291,7 +291,7 @@ static void v253_receive(struct tty_struct *tty, /* Set up codec driver access to modem controls */ cx20442->control_data = tty; codec->hw_write = (hw_write_t)tty->ops->write; - codec->pop_time = 1; + codec->card->pop_time = 1; } } @@ -348,7 +348,7 @@ static int cx20442_codec_probe(struct snd_soc_codec *codec) cx20442->control_data = NULL; codec->hw_write = NULL; - codec->pop_time = 0; + codec->card->pop_time = 0; return 0; } diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c index 58bb9b994811..92fd9d7a9221 100644 --- a/sound/soc/codecs/da7210.c +++ b/sound/soc/codecs/da7210.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c new file mode 100644 index 000000000000..57e9dac88d38 --- /dev/null +++ b/sound/soc/codecs/dmic.c @@ -0,0 +1,81 @@ +/* + * dmic.c -- SoC audio for Generic Digital MICs + * + * Author: Liam Girdwood + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include + +static struct snd_soc_dai_driver dmic_dai = { + .name = "dmic-hifi", + .capture = { + .stream_name = "Capture", + .channels_min = 1, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .formats = SNDRV_PCM_FMTBIT_S32_LE + | SNDRV_PCM_FMTBIT_S24_LE + | SNDRV_PCM_FMTBIT_S16_LE, + }, +}; + +static struct snd_soc_codec_driver soc_dmic = {}; + +static int __devinit dmic_dev_probe(struct platform_device *pdev) +{ + return snd_soc_register_codec(&pdev->dev, + &soc_dmic, &dmic_dai, 1); +} + +static int __devexit dmic_dev_remove(struct platform_device *pdev) +{ + snd_soc_unregister_codec(&pdev->dev); + return 0; +} + +MODULE_ALIAS("platform:dmic-codec"); + +static struct platform_driver dmic_driver = { + .driver = { + .name = "dmic-codec", + .owner = THIS_MODULE, + }, + .probe = dmic_dev_probe, + .remove = __devexit_p(dmic_dev_remove), +}; + +static int __init dmic_init(void) +{ + return platform_driver_register(&dmic_driver); +} +module_init(dmic_init); + +static void __exit dmic_exit(void) +{ + platform_driver_unregister(&dmic_driver); +} +module_exit(dmic_exit); + +MODULE_DESCRIPTION("Generic DMIC driver"); +MODULE_AUTHOR("Liam Girdwood "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c index 16253ec9b022..f7cd346fd727 100644 --- a/sound/soc/codecs/jz4740.c +++ b/sound/soc/codecs/jz4740.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #define JZ4740_REG_CODEC_1 0x0 @@ -266,7 +265,7 @@ static int jz4740_codec_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: /* The only way to clear the suspend flag is to reset the codec */ - if (codec->bias_level == SND_SOC_BIAS_OFF) + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) jz4740_codec_wakeup(codec); mask = JZ4740_CODEC_1_VREF_DISABLE | @@ -288,23 +287,25 @@ static int jz4740_codec_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } static int jz4740_codec_dev_probe(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + snd_soc_update_bits(codec, JZ4740_REG_CODEC_1, JZ4740_CODEC_1_SW2_ENABLE, JZ4740_CODEC_1_SW2_ENABLE); snd_soc_add_controls(codec, jz4740_codec_controls, ARRAY_SIZE(jz4740_codec_controls)); - snd_soc_dapm_new_controls(codec, jz4740_codec_dapm_widgets, + snd_soc_dapm_new_controls(dapm, jz4740_codec_dapm_widgets, ARRAY_SIZE(jz4740_codec_dapm_widgets)); - snd_soc_dapm_add_routes(codec, jz4740_codec_dapm_routes, + snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes, ARRAY_SIZE(jz4740_codec_dapm_routes)); snd_soc_dapm_new_widgets(codec); diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c index 6447dbb2f123..89498f9ad2e5 100644 --- a/sound/soc/codecs/max98088.c +++ b/sound/soc/codecs/max98088.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -1229,15 +1228,17 @@ static const struct snd_soc_dapm_route audio_map[] = { static int max98088_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, max98088_dapm_widgets, + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_new_controls(dapm, max98088_dapm_widgets, ARRAY_SIZE(max98088_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); snd_soc_add_controls(codec, max98088_snd_controls, ARRAY_SIZE(max98088_snd_controls)); - snd_soc_dapm_new_widgets(codec); + snd_soc_dapm_new_widgets(dapm); return 0; } @@ -1622,7 +1623,7 @@ static int max98088_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) max98088_sync_cache(codec); snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN, @@ -1635,7 +1636,7 @@ static int max98088_set_bias_level(struct snd_soc_codec *codec, codec->cache_sync = 1; break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1957,7 +1958,7 @@ static int max98088_probe(struct snd_soc_codec *codec) return ret; } - /* initalize private data */ + /* initialize private data */ max98088->sysclk = (unsigned)-1; max98088->eq_textcnt = 0; diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 6f38d619bf8a..2727befd158e 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include "ssm2602.h" @@ -207,10 +206,11 @@ static const struct snd_soc_dapm_route audio_conn[] = { static int ssm2602_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, ssm2602_dapm_widgets, - ARRAY_SIZE(ssm2602_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_conn, ARRAY_SIZE(audio_conn)); + snd_soc_dapm_new_controls(dapm, ssm2602_dapm_widgets, + ARRAY_SIZE(ssm2602_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_conn, ARRAY_SIZE(audio_conn)); return 0; } @@ -493,7 +493,7 @@ static int ssm2602_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c index 061f9e5a497b..78b2b50271e2 100644 --- a/sound/soc/codecs/stac9766.c +++ b/sound/soc/codecs/stac9766.c @@ -236,7 +236,7 @@ static int stac9766_set_bias_level(struct snd_soc_codec *codec, stac9766_ac97_write(codec, AC97_POWERDOWN, 0xffff); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c index e8652b1ae326..54a30ef0ec8b 100644 --- a/sound/soc/codecs/tlv320aic23.c +++ b/sound/soc/codecs/tlv320aic23.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include @@ -391,11 +390,12 @@ static int set_sample_rate_control(struct snd_soc_codec *codec, int mclk, static int tlv320aic23_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets, - ARRAY_SIZE(tlv320aic23_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; + snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets, + ARRAY_SIZE(tlv320aic23_dapm_widgets)); /* set up audio path interconnects */ - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -574,7 +574,7 @@ static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec, tlv320aic23_write(codec, TLV320AIC23_PWR, 0xffff); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c index 6b7d71ec0004..e2a7608d3944 100644 --- a/sound/soc/codecs/tlv320aic26.c +++ b/sound/soc/codecs/tlv320aic26.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include "tlv320aic26.h" @@ -31,7 +30,6 @@ MODULE_LICENSE("GPL"); struct aic26 { struct spi_device *spi; struct snd_soc_codec codec; - u16 reg_cache[AIC26_NUM_REGS]; /* shadow registers */ int master; int datfm; int mclk; @@ -355,7 +353,6 @@ static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set); */ static int aic26_probe(struct snd_soc_codec *codec) { - struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec); int ret, err, i, reg; dev_info(codec->dev, "Probing AIC26 SoC CODEC driver\n"); @@ -373,7 +370,7 @@ static int aic26_probe(struct snd_soc_codec *codec) aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg); /* Fill register cache */ - for (i = 0; i < ARRAY_SIZE(aic26->reg_cache); i++) + for (i = 0; i < codec->driver->reg_cache_size; i++) aic26_reg_read(codec, i); /* Register the sysfs files for debugging */ diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 77b8f9ae29be..3bedab26892f 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include #include @@ -61,6 +60,8 @@ static const char *aic3x_supply_names[AIC3X_NUM_SUPPLIES] = { "DRVDD", /* ADC Analog and Output Driver Voltage */ }; +static LIST_HEAD(reset_list); + struct aic3x_priv; struct aic3x_disable_nb { @@ -77,6 +78,7 @@ struct aic3x_priv { struct aic3x_setup_data *setup; void *control_data; unsigned int sysclk; + struct list_head list; int master; int gpio_reset; int power; @@ -183,7 +185,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol, if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) { /* find dapm widget path assoc with kcontrol */ - list_for_each_entry(path, &widget->codec->dapm_paths, list) { + list_for_each_entry(path, &widget->dapm->card->paths, list) { if (path->kcontrol != kcontrol) continue; @@ -199,7 +201,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol, } if (found) - snd_soc_dapm_sync(widget->codec); + snd_soc_dapm_sync(widget->dapm); } ret = snd_soc_update_bits(widget->codec, reg, val_mask, val); @@ -788,17 +790,19 @@ static const struct snd_soc_dapm_route intercon_3007[] = { static int aic3x_add_widgets(struct snd_soc_codec *codec) { struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets, + snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets, ARRAY_SIZE(aic3x_dapm_widgets)); /* set up audio path interconnects */ - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); if (aic3x->model == AIC3X_MODEL_3007) { - snd_soc_dapm_new_controls(codec, aic3007_dapm_widgets, + snd_soc_dapm_new_controls(dapm, aic3007_dapm_widgets, ARRAY_SIZE(aic3007_dapm_widgets)); - snd_soc_dapm_add_routes(codec, intercon_3007, ARRAY_SIZE(intercon_3007)); + snd_soc_dapm_add_routes(dapm, intercon_3007, + ARRAY_SIZE(intercon_3007)); } return 0; @@ -1075,7 +1079,7 @@ static int aic3x_regulator_event(struct notifier_block *nb, * Put codec to reset and require cache sync as at least one * of the supplies was disabled */ - if (aic3x->gpio_reset >= 0) + if (gpio_is_valid(aic3x->gpio_reset)) gpio_set_value(aic3x->gpio_reset, 0); aic3x->codec->cache_sync = 1; } @@ -1102,7 +1106,7 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power) if (!codec->cache_sync) goto out; - if (aic3x->gpio_reset >= 0) { + if (gpio_is_valid(aic3x->gpio_reset)) { udelay(1); gpio_set_value(aic3x->gpio_reset, 1); } @@ -1135,7 +1139,7 @@ static int aic3x_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: - if (codec->bias_level == SND_SOC_BIAS_STANDBY && + if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY && aic3x->master) { /* enable pll */ reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG); @@ -1146,7 +1150,7 @@ static int aic3x_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_STANDBY: if (!aic3x->power) aic3x_set_power(codec, 1); - if (codec->bias_level == SND_SOC_BIAS_PREPARE && + if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE && aic3x->master) { /* disable pll */ reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG); @@ -1159,7 +1163,7 @@ static int aic3x_set_bias_level(struct snd_soc_codec *codec, aic3x_set_power(codec, 0); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1344,14 +1348,28 @@ static int aic3x_init(struct snd_soc_codec *codec) return 0; } +static bool aic3x_is_shared_reset(struct aic3x_priv *aic3x) +{ + struct aic3x_priv *a; + + list_for_each_entry(a, &reset_list, list) { + if (gpio_is_valid(aic3x->gpio_reset) && + aic3x->gpio_reset == a->gpio_reset) + return true; + } + + return false; +} + static int aic3x_probe(struct snd_soc_codec *codec) { struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec); int ret, i; + INIT_LIST_HEAD(&aic3x->list); codec->control_data = aic3x->control_data; aic3x->codec = codec; - codec->idle_bias_off = 1; + codec->dapm.idle_bias_off = 1; ret = snd_soc_codec_set_cache_io(codec, 8, 8, aic3x->control_type); if (ret != 0) { @@ -1359,7 +1377,8 @@ static int aic3x_probe(struct snd_soc_codec *codec) return ret; } - if (aic3x->gpio_reset >= 0) { + if (gpio_is_valid(aic3x->gpio_reset) && + !aic3x_is_shared_reset(aic3x)) { ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset"); if (ret != 0) goto err_gpio; @@ -1405,6 +1424,7 @@ static int aic3x_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, &aic3x_classd_amp_gain_ctrl, 1); aic3x_add_widgets(codec); + list_add(&aic3x->list, &reset_list); return 0; @@ -1414,10 +1434,10 @@ err_notif: &aic3x->disable_nb[i].nb); regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies); err_get: - if (aic3x->gpio_reset >= 0) + if (gpio_is_valid(aic3x->gpio_reset) && + !aic3x_is_shared_reset(aic3x)) gpio_free(aic3x->gpio_reset); err_gpio: - kfree(aic3x); return ret; } @@ -1427,7 +1447,9 @@ static int aic3x_remove(struct snd_soc_codec *codec) int i; aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF); - if (aic3x->gpio_reset >= 0) { + list_del(&aic3x->list); + if (gpio_is_valid(aic3x->gpio_reset) && + !aic3x_is_shared_reset(aic3x)) { gpio_set_value(aic3x->gpio_reset, 0); gpio_free(aic3x->gpio_reset); } @@ -1523,21 +1545,6 @@ static struct i2c_driver aic3x_i2c_driver = { .remove = aic3x_i2c_remove, .id_table = aic3x_i2c_id, }; - -static inline void aic3x_i2c_init(void) -{ - int ret; - - ret = i2c_add_driver(&aic3x_i2c_driver); - if (ret) - printk(KERN_ERR "%s: error regsitering i2c driver, %d\n", - __func__, ret); -} - -static inline void aic3x_i2c_exit(void) -{ - i2c_del_driver(&aic3x_i2c_driver); -} #endif static int __init aic3x_modinit(void) diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c index c5ab8c805771..71d7be8ac488 100644 --- a/sound/soc/codecs/tlv320dac33.c +++ b/sound/soc/codecs/tlv320dac33.c @@ -36,21 +36,21 @@ #include #include #include -#include #include #include #include #include "tlv320dac33.h" -#define DAC33_BUFFER_SIZE_BYTES 24576 /* bytes, 12288 16 bit words, - * 6144 stereo */ -#define DAC33_BUFFER_SIZE_SAMPLES 6144 - -#define NSAMPLE_MAX 5700 - -#define MODE7_LTHR 10 -#define MODE7_UTHR (DAC33_BUFFER_SIZE_SAMPLES - 10) +/* + * The internal FIFO is 24576 bytes long + * It can be configured to hold 16bit or 24bit samples + * In 16bit configuration the FIFO can hold 6144 stereo samples + * In 24bit configuration the FIFO can hold 4096 stereo samples + */ +#define DAC33_FIFO_SIZE_16BIT 6144 +#define DAC33_FIFO_SIZE_24BIT 4096 +#define DAC33_MODE7_MARGIN 10 /* Safety margin for FIFO in Mode7 */ #define BURST_BASEFREQ_HZ 49152000 @@ -100,16 +100,11 @@ struct tlv320dac33_priv { unsigned int refclk; unsigned int alarm_threshold; /* set to be half of LATENCY_TIME_MS */ - unsigned int nsample_min; /* nsample should not be lower than - * this */ - unsigned int nsample_max; /* nsample should not be higher than - * this */ enum dac33_fifo_modes fifo_mode;/* FIFO mode selection */ + unsigned int fifo_size; /* Size of the FIFO in samples */ unsigned int nsample; /* burst read amount from host */ int mode1_latency; /* latency caused by the i2c writes in * us */ - int auto_fifo_config; /* Configure the FIFO based on the - * period size */ u8 burst_bclkdiv; /* BCLK divider value in burst mode */ unsigned int burst_rate; /* Interface speed in Burst modes */ @@ -303,7 +298,6 @@ static void dac33_init_chip(struct snd_soc_codec *codec) if (unlikely(!dac33->chip_power)) return; - /* 44-46: DAC Control Registers */ /* A : DAC sample rate Fsref/1.5 */ dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(0)); /* B : DAC src=normal, not muted */ @@ -316,8 +310,6 @@ static void dac33_init_chip(struct snd_soc_codec *codec) clock source = internal osc (?) */ dac33_write(codec, DAC33_ANA_VOL_SOFT_STEP_CTRL, DAC33_VOLCLKEN); - dac33_write(codec, DAC33_PWR_CTRL, DAC33_PDNALLB); - /* Restore only selected registers (gains mostly) */ dac33_write(codec, DAC33_LDAC_DIG_VOL_CTRL, dac33_read_reg_cache(codec, DAC33_LDAC_DIG_VOL_CTRL)); @@ -328,6 +320,10 @@ static void dac33_init_chip(struct snd_soc_codec *codec) dac33_read_reg_cache(codec, DAC33_LINEL_TO_LLO_VOL)); dac33_write(codec, DAC33_LINER_TO_RLO_VOL, dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL)); + + dac33_write(codec, DAC33_OUT_AMP_CTRL, + dac33_read_reg_cache(codec, DAC33_OUT_AMP_CTRL)); + } static inline int dac33_read_id(struct snd_soc_codec *codec) @@ -357,6 +353,21 @@ static inline void dac33_soft_power(struct snd_soc_codec *codec, int power) dac33_write(codec, DAC33_PWR_CTRL, reg); } +static inline void dac33_disable_digital(struct snd_soc_codec *codec) +{ + u8 reg; + + /* Stop the DAI clock */ + reg = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_B); + reg &= ~DAC33_BCLKON; + dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_B, reg); + + /* Power down the Oscillator, and DACs */ + reg = dac33_read_reg_cache(codec, DAC33_PWR_CTRL); + reg &= ~(DAC33_OSCPDNB | DAC33_DACRPDNB | DAC33_DACLPDNB); + dac33_write(codec, DAC33_PWR_CTRL, reg); +} + static int dac33_hard_power(struct snd_soc_codec *codec, int power) { struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); @@ -405,7 +416,7 @@ exit: return ret; } -static int playback_event(struct snd_soc_dapm_widget *w, +static int dac33_playback_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(w->codec); @@ -417,77 +428,13 @@ static int playback_event(struct snd_soc_dapm_widget *w, dac33_prepare_chip(dac33->substream); } break; + case SND_SOC_DAPM_POST_PMD: + dac33_disable_digital(w->codec); + break; } return 0; } -static int dac33_get_nsample(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); - - ucontrol->value.integer.value[0] = dac33->nsample; - - return 0; -} - -static int dac33_set_nsample(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); - int ret = 0; - - if (dac33->nsample == ucontrol->value.integer.value[0]) - return 0; - - if (ucontrol->value.integer.value[0] < dac33->nsample_min || - ucontrol->value.integer.value[0] > dac33->nsample_max) { - ret = -EINVAL; - } else { - dac33->nsample = ucontrol->value.integer.value[0]; - /* Re calculate the burst time */ - dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate, - dac33->nsample); - } - - return ret; -} - -static int dac33_get_uthr(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); - - ucontrol->value.integer.value[0] = dac33->uthr; - - return 0; -} - -static int dac33_set_uthr(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); - int ret = 0; - - if (dac33->substream) - return -EBUSY; - - if (dac33->uthr == ucontrol->value.integer.value[0]) - return 0; - - if (ucontrol->value.integer.value[0] < (MODE7_LTHR + 10) || - ucontrol->value.integer.value[0] > MODE7_UTHR) - ret = -EINVAL; - else - dac33->uthr = ucontrol->value.integer.value[0]; - - return ret; -} - static int dac33_get_fifo_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -572,13 +519,6 @@ static const struct snd_kcontrol_new dac33_mode_snd_controls[] = { dac33_get_fifo_mode, dac33_set_fifo_mode), }; -static const struct snd_kcontrol_new dac33_fifo_snd_controls[] = { - SOC_SINGLE_EXT("nSample", 0, 0, 5900, 0, - dac33_get_nsample, dac33_set_nsample), - SOC_SINGLE_EXT("UTHR", 0, 0, MODE7_UTHR, 0, - dac33_get_uthr, dac33_set_uthr), -}; - /* Analog bypass */ static const struct snd_kcontrol_new dac33_dapm_abypassl_control = SOC_DAPM_SINGLE("Switch", DAC33_LINEL_TO_LLO_VOL, 7, 1, 1); @@ -586,6 +526,25 @@ static const struct snd_kcontrol_new dac33_dapm_abypassl_control = static const struct snd_kcontrol_new dac33_dapm_abypassr_control = SOC_DAPM_SINGLE("Switch", DAC33_LINER_TO_RLO_VOL, 7, 1, 1); +/* LOP L/R invert selection */ +static const char *dac33_lr_lom_texts[] = {"DAC", "LOP"}; + +static const struct soc_enum dac33_left_lom_enum = + SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 3, + ARRAY_SIZE(dac33_lr_lom_texts), + dac33_lr_lom_texts); + +static const struct snd_kcontrol_new dac33_dapm_left_lom_control = +SOC_DAPM_ENUM("Route", dac33_left_lom_enum); + +static const struct soc_enum dac33_right_lom_enum = + SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 2, + ARRAY_SIZE(dac33_lr_lom_texts), + dac33_lr_lom_texts); + +static const struct snd_kcontrol_new dac33_dapm_right_lom_control = +SOC_DAPM_ENUM("Route", dac33_right_lom_enum); + static const struct snd_soc_dapm_widget dac33_dapm_widgets[] = { SND_SOC_DAPM_OUTPUT("LEFT_LO"), SND_SOC_DAPM_OUTPUT("RIGHT_LO"), @@ -593,8 +552,8 @@ static const struct snd_soc_dapm_widget dac33_dapm_widgets[] = { SND_SOC_DAPM_INPUT("LINEL"), SND_SOC_DAPM_INPUT("LINER"), - SND_SOC_DAPM_DAC("DACL", "Left Playback", DAC33_LDAC_PWR_CTRL, 2, 0), - SND_SOC_DAPM_DAC("DACR", "Right Playback", DAC33_RDAC_PWR_CTRL, 2, 0), + SND_SOC_DAPM_DAC("DACL", "Left Playback", SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_DAC("DACR", "Right Playback", SND_SOC_NOPM, 0, 0), /* Analog bypass */ SND_SOC_DAPM_SWITCH("Analog Left Bypass", SND_SOC_NOPM, 0, 0, @@ -602,12 +561,30 @@ static const struct snd_soc_dapm_widget dac33_dapm_widgets[] = { SND_SOC_DAPM_SWITCH("Analog Right Bypass", SND_SOC_NOPM, 0, 0, &dac33_dapm_abypassr_control), - SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amp Power", + SND_SOC_DAPM_MUX("Left LOM Inverted From", SND_SOC_NOPM, 0, 0, + &dac33_dapm_left_lom_control), + SND_SOC_DAPM_MUX("Right LOM Inverted From", SND_SOC_NOPM, 0, 0, + &dac33_dapm_right_lom_control), + /* + * For DAPM path, when only the anlog bypass path is enabled, and the + * LOP inverted from the corresponding DAC side. + * This is needed, so we can attach the DAC power supply in this case. + */ + SND_SOC_DAPM_PGA("Left Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA("Right Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0), + + SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amplifier", DAC33_OUT_AMP_PWR_CTRL, 6, 3, 3, 0), - SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amp Power", + SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amplifier", DAC33_OUT_AMP_PWR_CTRL, 4, 3, 3, 0), - SND_SOC_DAPM_PRE("Prepare Playback", playback_event), + SND_SOC_DAPM_SUPPLY("Left DAC Power", + DAC33_LDAC_PWR_CTRL, 2, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Right DAC Power", + DAC33_RDAC_PWR_CTRL, 2, 0, NULL, 0), + + SND_SOC_DAPM_PRE("Pre Playback", dac33_playback_event), + SND_SOC_DAPM_POST("Post Playback", dac33_playback_event), }; static const struct snd_soc_dapm_route audio_map[] = { @@ -615,24 +592,39 @@ static const struct snd_soc_dapm_route audio_map[] = { {"Analog Left Bypass", "Switch", "LINEL"}, {"Analog Right Bypass", "Switch", "LINER"}, - {"Output Left Amp Power", NULL, "DACL"}, - {"Output Right Amp Power", NULL, "DACR"}, + {"Output Left Amplifier", NULL, "DACL"}, + {"Output Right Amplifier", NULL, "DACR"}, - {"Output Left Amp Power", NULL, "Analog Left Bypass"}, - {"Output Right Amp Power", NULL, "Analog Right Bypass"}, + {"Left Bypass PGA", NULL, "Analog Left Bypass"}, + {"Right Bypass PGA", NULL, "Analog Right Bypass"}, + + {"Left LOM Inverted From", "DAC", "Left Bypass PGA"}, + {"Right LOM Inverted From", "DAC", "Right Bypass PGA"}, + {"Left LOM Inverted From", "LOP", "Analog Left Bypass"}, + {"Right LOM Inverted From", "LOP", "Analog Right Bypass"}, + + {"Output Left Amplifier", NULL, "Left LOM Inverted From"}, + {"Output Right Amplifier", NULL, "Right LOM Inverted From"}, + + {"DACL", NULL, "Left DAC Power"}, + {"DACR", NULL, "Right DAC Power"}, + + {"Left Bypass PGA", NULL, "Left DAC Power"}, + {"Right Bypass PGA", NULL, "Right DAC Power"}, /* output */ - {"LEFT_LO", NULL, "Output Left Amp Power"}, - {"RIGHT_LO", NULL, "Output Right Amp Power"}, + {"LEFT_LO", NULL, "Output Left Amplifier"}, + {"RIGHT_LO", NULL, "Output Right Amplifier"}, }; static int dac33_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, dac33_dapm_widgets, - ARRAY_SIZE(dac33_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; + snd_soc_dapm_new_controls(dapm, dac33_dapm_widgets, + ARRAY_SIZE(dac33_dapm_widgets)); /* set up audio path interconnects */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -640,16 +632,18 @@ static int dac33_add_widgets(struct snd_soc_codec *codec) static int dac33_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { + struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); int ret; switch (level) { case SND_SOC_BIAS_ON: - dac33_soft_power(codec, 1); + if (!dac33->substream) + dac33_soft_power(codec, 1); break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Coming from OFF, switch on the codec */ ret = dac33_hard_power(codec, 1); if (ret != 0) @@ -660,14 +654,14 @@ static int dac33_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_OFF: /* Do not power off, when the codec is already off */ - if (codec->bias_level == SND_SOC_BIAS_OFF) + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) return 0; ret = dac33_hard_power(codec, 0); if (ret != 0) return ret; break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -705,7 +699,7 @@ static inline void dac33_prefill_handler(struct tlv320dac33_priv *dac33) spin_unlock_irq(&dac33->lock); dac33_write16(codec, DAC33_PREFILL_MSB, - DAC33_THRREG(MODE7_LTHR)); + DAC33_THRREG(DAC33_MODE7_MARGIN)); /* Enable Upper Threshold IRQ */ dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MUT); @@ -815,6 +809,8 @@ static int dac33_startup(struct snd_pcm_substream *substream, /* Stream started, save the substream pointer */ dac33->substream = substream; + snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24); + return 0; } @@ -826,18 +822,17 @@ static void dac33_shutdown(struct snd_pcm_substream *substream, struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); dac33->substream = NULL; - - /* Reset the nSample restrictions */ - dac33->nsample_min = 0; - dac33->nsample_max = NSAMPLE_MAX; } +#define CALC_BURST_RATE(bclkdiv, bclk_per_sample) \ + (BURST_BASEFREQ_HZ / bclkdiv / bclk_per_sample) static int dac33_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; + struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); /* Check parameters for validity */ switch (params_rate(params)) { @@ -852,6 +847,12 @@ static int dac33_hw_params(struct snd_pcm_substream *substream, switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: + dac33->fifo_size = DAC33_FIFO_SIZE_16BIT; + dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 32); + break; + case SNDRV_PCM_FORMAT_S32_LE: + dac33->fifo_size = DAC33_FIFO_SIZE_24BIT; + dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 64); break; default: dev_err(codec->dev, "unsupported format %d\n", @@ -906,6 +907,9 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream) aictrl_a |= (DAC33_NCYCL_16 | DAC33_WLEN_16); fifoctrl_a |= DAC33_WIDTH; break; + case SNDRV_PCM_FORMAT_S32_LE: + aictrl_a |= (DAC33_NCYCL_32 | DAC33_WLEN_24); + break; default: dev_err(codec->dev, "unsupported format %d\n", substream->runtime->format); @@ -1040,7 +1044,10 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream) dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, dac33->burst_bclkdiv); else - dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32); + if (substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE) + dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32); + else + dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 16); switch (dac33->fifo_mode) { case DAC33_FIFO_MODE1: @@ -1053,7 +1060,8 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream) * at the bottom, and also at the top of the FIFO */ dac33_write16(codec, DAC33_UTHR_MSB, DAC33_THRREG(dac33->uthr)); - dac33_write16(codec, DAC33_LTHR_MSB, DAC33_THRREG(MODE7_LTHR)); + dac33_write16(codec, DAC33_LTHR_MSB, + DAC33_THRREG(DAC33_MODE7_MARGIN)); break; default: break; @@ -1082,42 +1090,21 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream) /* Number of samples under i2c latency */ dac33->alarm_threshold = US_TO_SAMPLES(rate, dac33->mode1_latency); - nsample_limit = DAC33_BUFFER_SIZE_SAMPLES - - dac33->alarm_threshold; - - if (dac33->auto_fifo_config) { - if (period_size <= dac33->alarm_threshold) - /* - * Configure nSamaple to number of periods, - * which covers the latency requironment. - */ - dac33->nsample = period_size * - ((dac33->alarm_threshold / period_size) + - (dac33->alarm_threshold % period_size ? - 1 : 0)); - else if (period_size > nsample_limit) - dac33->nsample = nsample_limit; - else - dac33->nsample = period_size; - } else { - /* nSample time shall not be shorter than i2c latency */ - dac33->nsample_min = dac33->alarm_threshold; + nsample_limit = dac33->fifo_size - dac33->alarm_threshold; + + if (period_size <= dac33->alarm_threshold) /* - * nSample should not be bigger than alsa buffer minus - * size of one period to avoid overruns + * Configure nSamaple to number of periods, + * which covers the latency requironment. */ - dac33->nsample_max = substream->runtime->buffer_size - - period_size; - - if (dac33->nsample_max > nsample_limit) - dac33->nsample_max = nsample_limit; - - /* Correct the nSample if it is outside of the ranges */ - if (dac33->nsample < dac33->nsample_min) - dac33->nsample = dac33->nsample_min; - if (dac33->nsample > dac33->nsample_max) - dac33->nsample = dac33->nsample_max; - } + dac33->nsample = period_size * + ((dac33->alarm_threshold / period_size) + + (dac33->alarm_threshold % period_size ? + 1 : 0)); + else if (period_size > nsample_limit) + dac33->nsample = nsample_limit; + else + dac33->nsample = period_size; dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate, dac33->nsample); @@ -1125,19 +1112,16 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream) dac33->t_stamp2 = 0; break; case DAC33_FIFO_MODE7: - if (dac33->auto_fifo_config) { - dac33->uthr = UTHR_FROM_PERIOD_SIZE( - period_size, - rate, - dac33->burst_rate) + 9; - if (dac33->uthr > MODE7_UTHR) - dac33->uthr = MODE7_UTHR; - if (dac33->uthr < (MODE7_LTHR + 10)) - dac33->uthr = (MODE7_LTHR + 10); - } + dac33->uthr = UTHR_FROM_PERIOD_SIZE(period_size, rate, + dac33->burst_rate) + 9; + if (dac33->uthr > (dac33->fifo_size - DAC33_MODE7_MARGIN)) + dac33->uthr = dac33->fifo_size - DAC33_MODE7_MARGIN; + if (dac33->uthr < (DAC33_MODE7_MARGIN + 10)) + dac33->uthr = (DAC33_MODE7_MARGIN + 10); + dac33->mode7_us_to_lthr = SAMPLES_TO_US(substream->runtime->rate, - dac33->uthr - MODE7_LTHR + 1); + dac33->uthr - DAC33_MODE7_MARGIN + 1); dac33->t_stamp1 = 0; break; default: @@ -1255,8 +1239,8 @@ static snd_pcm_sframes_t dac33_dai_delay( samples += (samples_in - samples_out); if (likely(samples > 0)) - delay = samples > DAC33_BUFFER_SIZE_SAMPLES ? - DAC33_BUFFER_SIZE_SAMPLES : samples; + delay = samples > dac33->fifo_size ? + dac33->fifo_size : samples; else delay = 0; } @@ -1308,7 +1292,7 @@ static snd_pcm_sframes_t dac33_dai_delay( samples_in = US_TO_SAMPLES( dac33->burst_rate, time_delta); - delay = MODE7_LTHR + samples_in - samples_out; + delay = DAC33_MODE7_MARGIN + samples_in - samples_out; if (unlikely(delay > uthr)) delay = uthr; @@ -1415,7 +1399,7 @@ static int dac33_soc_probe(struct snd_soc_codec *codec) codec->control_data = dac33->control_data; codec->hw_write = (hw_write_t) i2c_master_send; - codec->idle_bias_off = 1; + codec->dapm.idle_bias_off = 1; dac33->codec = codec; /* Read the tlv320dac33 ID registers */ @@ -1459,14 +1443,10 @@ static int dac33_soc_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, dac33_snd_controls, ARRAY_SIZE(dac33_snd_controls)); /* Only add the FIFO controls, if we have valid IRQ number */ - if (dac33->irq >= 0) { + if (dac33->irq >= 0) snd_soc_add_controls(codec, dac33_mode_snd_controls, ARRAY_SIZE(dac33_mode_snd_controls)); - /* FIFO usage controls only, if autoio config is not selected */ - if (!dac33->auto_fifo_config) - snd_soc_add_controls(codec, dac33_fifo_snd_controls, - ARRAY_SIZE(dac33_fifo_snd_controls)); - } + dac33_add_widgets(codec); err_power: @@ -1515,7 +1495,7 @@ static struct snd_soc_codec_driver soc_codec_dev_tlv320dac33 = { #define DAC33_RATES (SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000) -#define DAC33_FORMATS SNDRV_PCM_FMTBIT_S16_LE +#define DAC33_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_ops dac33_dai_ops = { .startup = dac33_startup, @@ -1563,17 +1543,11 @@ static int __devinit dac33_i2c_probe(struct i2c_client *client, dac33->power_gpio = pdata->power_gpio; dac33->burst_bclkdiv = pdata->burst_bclkdiv; - /* Pre calculate the burst rate */ - dac33->burst_rate = BURST_BASEFREQ_HZ / dac33->burst_bclkdiv / 32; dac33->keep_bclk = pdata->keep_bclk; - dac33->auto_fifo_config = pdata->auto_fifo_config; dac33->mode1_latency = pdata->mode1_latency; if (!dac33->mode1_latency) dac33->mode1_latency = 10000; /* 10ms */ dac33->irq = client->irq; - dac33->nsample = NSAMPLE_MAX; - dac33->nsample_max = NSAMPLE_MAX; - dac33->uthr = MODE7_UTHR; /* Disable FIFO use by default */ dac33->fifo_mode = DAC33_FIFO_BYPASS; diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c index d2c243095673..1f1ac8110bef 100644 --- a/sound/soc/codecs/tpa6130a2.c +++ b/sound/soc/codecs/tpa6130a2.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include "tpa6130a2.h" @@ -42,7 +41,7 @@ struct tpa6130a2_data { unsigned char regs[TPA6130A2_CACHEREGNUM]; struct regulator *supply; int power_gpio; - unsigned char power_state; + u8 power_state:1; enum tpa_model id; }; @@ -117,7 +116,7 @@ static int tpa6130a2_initialize(void) return ret; } -static int tpa6130a2_power(int power) +static int tpa6130a2_power(u8 power) { struct tpa6130a2_data *data; u8 val; @@ -127,17 +126,19 @@ static int tpa6130a2_power(int power) data = i2c_get_clientdata(tpa6130a2_client); mutex_lock(&data->mutex); - if (power && !data->power_state) { - /* Power on */ - if (data->power_gpio >= 0) - gpio_set_value(data->power_gpio, 1); + if (power == data->power_state) + goto exit; + if (power) { ret = regulator_enable(data->supply); if (ret != 0) { dev_err(&tpa6130a2_client->dev, "Failed to enable supply: %d\n", ret); goto exit; } + /* Power on */ + if (data->power_gpio >= 0) + gpio_set_value(data->power_gpio, 1); data->power_state = 1; ret = tpa6130a2_initialize(); @@ -150,12 +151,7 @@ static int tpa6130a2_power(int power) data->power_state = 0; goto exit; } - - /* Clear SWS */ - val = tpa6130a2_read(TPA6130A2_REG_CONTROL); - val &= ~TPA6130A2_SWS; - tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val); - } else if (!power && data->power_state) { + } else { /* set SWS */ val = tpa6130a2_read(TPA6130A2_REG_CONTROL); val |= TPA6130A2_SWS; @@ -300,6 +296,7 @@ static void tpa6130a2_channel_enable(u8 channel, int enable) /* Enable amplifier */ val = tpa6130a2_read(TPA6130A2_REG_CONTROL); val |= channel; + val &= ~TPA6130A2_SWS; tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val); /* Unmute channel */ @@ -320,72 +317,24 @@ static void tpa6130a2_channel_enable(u8 channel, int enable) } } -static int tpa6130a2_left_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - switch (event) { - case SND_SOC_DAPM_POST_PMU: - tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 1); - break; - case SND_SOC_DAPM_POST_PMD: - tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 0); - break; - } - return 0; -} - -static int tpa6130a2_right_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - switch (event) { - case SND_SOC_DAPM_POST_PMU: - tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 1); - break; - case SND_SOC_DAPM_POST_PMD: - tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 0); - break; - } - return 0; -} - -static int tpa6130a2_supply_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) +int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable) { int ret = 0; - - switch (event) { - case SND_SOC_DAPM_POST_PMU: + if (enable) { ret = tpa6130a2_power(1); - break; - case SND_SOC_DAPM_POST_PMD: + if (ret < 0) + return ret; + tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L, + 1); + } else { + tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L, + 0); ret = tpa6130a2_power(0); - break; } + return ret; } - -static const struct snd_soc_dapm_widget tpa6130a2_dapm_widgets[] = { - SND_SOC_DAPM_PGA_E("TPA6130A2 Left", SND_SOC_NOPM, - 0, 0, NULL, 0, tpa6130a2_left_event, - SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("TPA6130A2 Right", SND_SOC_NOPM, - 0, 0, NULL, 0, tpa6130a2_right_event, - SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_SUPPLY("TPA6130A2 Enable", SND_SOC_NOPM, - 0, 0, tpa6130a2_supply_event, - SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), - /* Outputs */ - SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Left"), - SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Right"), -}; - -static const struct snd_soc_dapm_route audio_map[] = { - {"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Left"}, - {"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Right"}, - - {"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Enable"}, - {"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Enable"}, -}; +EXPORT_SYMBOL_GPL(tpa6130a2_stereo_enable); int tpa6130a2_add_controls(struct snd_soc_codec *codec) { @@ -396,18 +345,12 @@ int tpa6130a2_add_controls(struct snd_soc_codec *codec) data = i2c_get_clientdata(tpa6130a2_client); - snd_soc_dapm_new_controls(codec, tpa6130a2_dapm_widgets, - ARRAY_SIZE(tpa6130a2_dapm_widgets)); - - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); - if (data->id == TPA6140A2) return snd_soc_add_controls(codec, tpa6140a2_controls, ARRAY_SIZE(tpa6140a2_controls)); else return snd_soc_add_controls(codec, tpa6130a2_controls, ARRAY_SIZE(tpa6130a2_controls)); - } EXPORT_SYMBOL_GPL(tpa6130a2_add_controls); diff --git a/sound/soc/codecs/tpa6130a2.h b/sound/soc/codecs/tpa6130a2.h index 57e867fd86d1..5df49c8756b2 100644 --- a/sound/soc/codecs/tpa6130a2.h +++ b/sound/soc/codecs/tpa6130a2.h @@ -57,5 +57,6 @@ #define TPA6130A2_VERSION_MASK (0x0f) extern int tpa6130a2_add_controls(struct snd_soc_codec *codec); +extern int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable); #endif /* __TPA6130A2_H__ */ diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c index cbebec6ba1ba..e4d464b937d6 100644 --- a/sound/soc/codecs/twl4030.c +++ b/sound/soc/codecs/twl4030.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include @@ -233,6 +232,16 @@ static int twl4030_write(struct snd_soc_codec *codec, return 0; } +static inline void twl4030_wait_ms(int time) +{ + if (time < 60) { + time *= 1000; + usleep_range(time, time + 500); + } else { + msleep(time); + } +} + static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable) { struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec); @@ -338,10 +347,14 @@ static void twl4030_init_chip(struct snd_soc_codec *codec) twl4030_write(codec, TWL4030_REG_ANAMICL, reg | TWL4030_CNCL_OFFSET_START); - /* wait for offset cancellation to complete */ + /* + * Wait for offset cancellation to complete. + * Since this takes a while, do not slam the i2c. + * Start polling the status after ~20ms. + */ + msleep(20); do { - /* this takes a little while, so don't slam i2c */ - udelay(2000); + usleep_range(1000, 2000); twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte, TWL4030_REG_ANAMICL); } while ((i++ < 100) && @@ -725,9 +738,12 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp) /* Base values for ramp delay calculation: 2^19 - 2^26 */ unsigned int ramp_base[] = {524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864}; + unsigned int delay; hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET); hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET); + delay = (ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] / + twl4030->sysclk) + 1; /* Enable external mute control, this dramatically reduces * the pop-noise */ @@ -751,16 +767,14 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp) hs_pop |= TWL4030_RAMP_EN; twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop); /* Wait ramp delay time + 1, so the VMID can settle */ - mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] / - twl4030->sysclk) + 1); + twl4030_wait_ms(delay); } else { /* Headset ramp-down _not_ according to * the TRM, but in a way that it is working */ hs_pop &= ~TWL4030_RAMP_EN; twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop); /* Wait ramp delay time + 1, so the VMID can settle */ - mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] / - twl4030->sysclk) + 1); + twl4030_wait_ms(delay); /* Bypass the reg_cache to mute the headset */ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, hs_gain & (~0x0f), @@ -835,7 +849,7 @@ static int digimic_event(struct snd_soc_dapm_widget *w, struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec); if (twl4030->digimic_delay) - mdelay(twl4030->digimic_delay); + twl4030_wait_ms(twl4030->digimic_delay); return 0; } @@ -1621,10 +1635,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int twl4030_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, twl4030_dapm_widgets, - ARRAY_SIZE(twl4030_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, twl4030_dapm_widgets, + ARRAY_SIZE(twl4030_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -1638,14 +1653,14 @@ static int twl4030_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) twl4030_codec_enable(codec, 1); break; case SND_SOC_BIAS_OFF: twl4030_codec_enable(codec, 0); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1709,6 +1724,7 @@ static int twl4030_startup(struct snd_pcm_substream *substream, struct snd_soc_codec *codec = rtd->codec; struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec); + snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24); if (twl4030->master_substream) { twl4030->slave_substream = substream; /* The DAI has one configuration for playback and capture, so @@ -1833,7 +1849,7 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream, case SNDRV_PCM_FORMAT_S16_LE: format |= TWL4030_DATA_WIDTH_16S_16W; break; - case SNDRV_PCM_FORMAT_S24_LE: + case SNDRV_PCM_FORMAT_S32_LE: format |= TWL4030_DATA_WIDTH_32S_24W; break; default: @@ -2166,7 +2182,7 @@ static int twl4030_voice_set_tristate(struct snd_soc_dai *dai, int tristate) } #define TWL4030_RATES (SNDRV_PCM_RATE_8000_48000) -#define TWL4030_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE) +#define TWL4030_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_ops twl4030_dai_hifi_ops = { .startup = twl4030_startup, @@ -2245,7 +2261,7 @@ static int twl4030_soc_probe(struct snd_soc_codec *codec) snd_soc_codec_set_drvdata(codec, twl4030); /* Set the defaults, and power up the codec */ twl4030->sysclk = twl4030_codec_get_mclk() / 1000; - codec->idle_bias_off = 1; + codec->dapm.idle_bias_off = 1; twl4030_init_chip(codec); @@ -2257,9 +2273,12 @@ static int twl4030_soc_probe(struct snd_soc_codec *codec) static int twl4030_soc_remove(struct snd_soc_codec *codec) { + struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec); + /* Reset registers to their chip default before leaving */ twl4030_reset_registers(codec); twl4030_set_bias_level(codec, SND_SOC_BIAS_OFF); + kfree(twl4030); return 0; } @@ -2291,10 +2310,7 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev) static int __devexit twl4030_codec_remove(struct platform_device *pdev) { - struct twl4030_priv *twl4030 = dev_get_drvdata(&pdev->dev); - snd_soc_unregister_codec(&pdev->dev); - kfree(twl4030); return 0; } diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c index 10f6e5214511..4bbf1b15a493 100644 --- a/sound/soc/codecs/twl6040.c +++ b/sound/soc/codecs/twl6040.c @@ -34,14 +34,46 @@ #include #include #include -#include #include #include #include "twl6040.h" -#define TWL6040_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) -#define TWL6040_FORMATS (SNDRV_PCM_FMTBIT_S32_LE) +#define TWL6040_RATES SNDRV_PCM_RATE_8000_96000 +#define TWL6040_FORMATS (SNDRV_PCM_FMTBIT_S32_LE) + +#define TWL6040_OUTHS_0dB 0x00 +#define TWL6040_OUTHS_M30dB 0x0F +#define TWL6040_OUTHF_0dB 0x03 +#define TWL6040_OUTHF_M52dB 0x1D + +#define TWL6040_RAMP_NONE 0 +#define TWL6040_RAMP_UP 1 +#define TWL6040_RAMP_DOWN 2 + +#define TWL6040_HSL_VOL_MASK 0x0F +#define TWL6040_HSL_VOL_SHIFT 0 +#define TWL6040_HSR_VOL_MASK 0xF0 +#define TWL6040_HSR_VOL_SHIFT 4 +#define TWL6040_HF_VOL_MASK 0x1F +#define TWL6040_HF_VOL_SHIFT 0 + +struct twl6040_output { + u16 active; + u16 left_vol; + u16 right_vol; + u16 left_step; + u16 right_step; + unsigned int step_delay; + u16 ramp; + u16 mute; + struct completion ramp_done; +}; + +struct twl6040_jack_data { + struct snd_soc_jack *jack; + int report; +}; /* codec private data */ struct twl6040_data { @@ -53,6 +85,17 @@ struct twl6040_data { unsigned int sysclk; struct snd_pcm_hw_constraint_list *sysclk_constraints; struct completion ready; + struct twl6040_jack_data hs_jack; + struct snd_soc_codec *codec; + struct workqueue_struct *workqueue; + struct delayed_work delayed_work; + struct mutex mutex; + struct twl6040_output headset; + struct twl6040_output handsfree; + struct workqueue_struct *hf_workqueue; + struct workqueue_struct *hs_workqueue; + struct delayed_work hs_delayed_work; + struct delayed_work hf_delayed_work; }; /* @@ -201,7 +244,7 @@ static int twl6040_read_reg_volatile(struct snd_soc_codec *codec, if (reg >= TWL6040_CACHEREGNUM) return -EIO; - twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &value, reg); + twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &value, reg); twl6040_write_reg_cache(codec, reg, value); return value; @@ -217,7 +260,7 @@ static int twl6040_write(struct snd_soc_codec *codec, return -EIO; twl6040_write_reg_cache(codec, reg, value); - return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, reg); + return twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, value, reg); } static void twl6040_init_vio_regs(struct snd_soc_codec *codec) @@ -254,6 +297,305 @@ static void twl6040_init_vdd_regs(struct snd_soc_codec *codec) } } +/* + * Ramp HS PGA volume to minimise pops at stream startup and shutdown. + */ +static inline int twl6040_hs_ramp_step(struct snd_soc_codec *codec, + unsigned int left_step, unsigned int right_step) +{ + + struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_output *headset = &priv->headset; + int left_complete = 0, right_complete = 0; + u8 reg, val; + + /* left channel */ + left_step = (left_step > 0xF) ? 0xF : left_step; + reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN); + val = (~reg & TWL6040_HSL_VOL_MASK); + + if (headset->ramp == TWL6040_RAMP_UP) { + /* ramp step up */ + if (val < headset->left_vol) { + val += left_step; + reg &= ~TWL6040_HSL_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HSGAIN, + (reg | (~val & TWL6040_HSL_VOL_MASK))); + } else { + left_complete = 1; + } + } else if (headset->ramp == TWL6040_RAMP_DOWN) { + /* ramp step down */ + if (val > 0x0) { + val -= left_step; + reg &= ~TWL6040_HSL_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HSGAIN, reg | + (~val & TWL6040_HSL_VOL_MASK)); + } else { + left_complete = 1; + } + } + + /* right channel */ + right_step = (right_step > 0xF) ? 0xF : right_step; + reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN); + val = (~reg & TWL6040_HSR_VOL_MASK) >> TWL6040_HSR_VOL_SHIFT; + + if (headset->ramp == TWL6040_RAMP_UP) { + /* ramp step up */ + if (val < headset->right_vol) { + val += right_step; + reg &= ~TWL6040_HSR_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HSGAIN, + (reg | (~val << TWL6040_HSR_VOL_SHIFT))); + } else { + right_complete = 1; + } + } else if (headset->ramp == TWL6040_RAMP_DOWN) { + /* ramp step down */ + if (val > 0x0) { + val -= right_step; + reg &= ~TWL6040_HSR_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HSGAIN, + reg | (~val << TWL6040_HSR_VOL_SHIFT)); + } else { + right_complete = 1; + } + } + + return left_complete & right_complete; +} + +/* + * Ramp HF PGA volume to minimise pops at stream startup and shutdown. + */ +static inline int twl6040_hf_ramp_step(struct snd_soc_codec *codec, + unsigned int left_step, unsigned int right_step) +{ + struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_output *handsfree = &priv->handsfree; + int left_complete = 0, right_complete = 0; + u16 reg, val; + + /* left channel */ + left_step = (left_step > 0x1D) ? 0x1D : left_step; + reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFLGAIN); + reg = 0x1D - reg; + val = (reg & TWL6040_HF_VOL_MASK); + if (handsfree->ramp == TWL6040_RAMP_UP) { + /* ramp step up */ + if (val < handsfree->left_vol) { + val += left_step; + reg &= ~TWL6040_HF_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HFLGAIN, + reg | (0x1D - val)); + } else { + left_complete = 1; + } + } else if (handsfree->ramp == TWL6040_RAMP_DOWN) { + /* ramp step down */ + if (val > 0) { + val -= left_step; + reg &= ~TWL6040_HF_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HFLGAIN, + reg | (0x1D - val)); + } else { + left_complete = 1; + } + } + + /* right channel */ + right_step = (right_step > 0x1D) ? 0x1D : right_step; + reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFRGAIN); + reg = 0x1D - reg; + val = (reg & TWL6040_HF_VOL_MASK); + if (handsfree->ramp == TWL6040_RAMP_UP) { + /* ramp step up */ + if (val < handsfree->right_vol) { + val += right_step; + reg &= ~TWL6040_HF_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HFRGAIN, + reg | (0x1D - val)); + } else { + right_complete = 1; + } + } else if (handsfree->ramp == TWL6040_RAMP_DOWN) { + /* ramp step down */ + if (val > 0) { + val -= right_step; + reg &= ~TWL6040_HF_VOL_MASK; + twl6040_write(codec, TWL6040_REG_HFRGAIN, + reg | (0x1D - val)); + } + } + + return left_complete & right_complete; +} + +/* + * This work ramps both output PGAs at stream start/stop time to + * minimise pop associated with DAPM power switching. + */ +static void twl6040_pga_hs_work(struct work_struct *work) +{ + struct twl6040_data *priv = + container_of(work, struct twl6040_data, hs_delayed_work.work); + struct snd_soc_codec *codec = priv->codec; + struct twl6040_output *headset = &priv->headset; + unsigned int delay = headset->step_delay; + int i, headset_complete; + + /* do we need to ramp at all ? */ + if (headset->ramp == TWL6040_RAMP_NONE) + return; + + /* HS PGA volumes have 4 bits of resolution to ramp */ + for (i = 0; i <= 16; i++) { + headset_complete = 1; + if (headset->ramp != TWL6040_RAMP_NONE) + headset_complete = twl6040_hs_ramp_step(codec, + headset->left_step, + headset->right_step); + + /* ramp finished ? */ + if (headset_complete) + break; + + /* + * TODO: tune: delay is longer over 0dB + * as increases are larger. + */ + if (i >= 8) + schedule_timeout_interruptible(msecs_to_jiffies(delay + + (delay >> 1))); + else + schedule_timeout_interruptible(msecs_to_jiffies(delay)); + } + + if (headset->ramp == TWL6040_RAMP_DOWN) { + headset->active = 0; + complete(&headset->ramp_done); + } else { + headset->active = 1; + } + headset->ramp = TWL6040_RAMP_NONE; +} + +static void twl6040_pga_hf_work(struct work_struct *work) +{ + struct twl6040_data *priv = + container_of(work, struct twl6040_data, hf_delayed_work.work); + struct snd_soc_codec *codec = priv->codec; + struct twl6040_output *handsfree = &priv->handsfree; + unsigned int delay = handsfree->step_delay; + int i, handsfree_complete; + + /* do we need to ramp at all ? */ + if (handsfree->ramp == TWL6040_RAMP_NONE) + return; + + /* HF PGA volumes have 5 bits of resolution to ramp */ + for (i = 0; i <= 32; i++) { + handsfree_complete = 1; + if (handsfree->ramp != TWL6040_RAMP_NONE) + handsfree_complete = twl6040_hf_ramp_step(codec, + handsfree->left_step, + handsfree->right_step); + + /* ramp finished ? */ + if (handsfree_complete) + break; + + /* + * TODO: tune: delay is longer over 0dB + * as increases are larger. + */ + if (i >= 16) + schedule_timeout_interruptible(msecs_to_jiffies(delay + + (delay >> 1))); + else + schedule_timeout_interruptible(msecs_to_jiffies(delay)); + } + + + if (handsfree->ramp == TWL6040_RAMP_DOWN) { + handsfree->active = 0; + complete(&handsfree->ramp_done); + } else + handsfree->active = 1; + handsfree->ramp = TWL6040_RAMP_NONE; +} + +static int pga_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_output *out; + struct delayed_work *work; + struct workqueue_struct *queue; + + switch (w->shift) { + case 2: + case 3: + out = &priv->headset; + work = &priv->hs_delayed_work; + queue = priv->hs_workqueue; + out->step_delay = 5; /* 5 ms between volume ramp steps */ + break; + case 4: + out = &priv->handsfree; + work = &priv->hf_delayed_work; + queue = priv->hf_workqueue; + out->step_delay = 5; /* 5 ms between volume ramp steps */ + if (SND_SOC_DAPM_EVENT_ON(event)) + priv->non_lp++; + else + priv->non_lp--; + break; + default: + return -1; + } + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + if (out->active) + break; + + /* don't use volume ramp for power-up */ + out->left_step = out->left_vol; + out->right_step = out->right_vol; + + if (!delayed_work_pending(work)) { + out->ramp = TWL6040_RAMP_UP; + queue_delayed_work(queue, work, + msecs_to_jiffies(1)); + } + break; + + case SND_SOC_DAPM_PRE_PMD: + if (!out->active) + break; + + if (!delayed_work_pending(work)) { + /* use volume ramp for power-down */ + out->left_step = 1; + out->right_step = 1; + out->ramp = TWL6040_RAMP_DOWN; + INIT_COMPLETION(out->ramp_done); + + queue_delayed_work(queue, work, + msecs_to_jiffies(1)); + + wait_for_completion_timeout(&out->ramp_done, + msecs_to_jiffies(2000)); + } + break; + } + + return 0; +} + /* twl6040 codec manual power-up sequence */ static void twl6040_power_up(struct snd_soc_codec *codec) { @@ -382,6 +724,47 @@ static int twl6040_power_mode_event(struct snd_soc_dapm_widget *w, return 0; } +void twl6040_hs_jack_report(struct snd_soc_codec *codec, + struct snd_soc_jack *jack, int report) +{ + struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); + int status; + + mutex_lock(&priv->mutex); + + /* Sync status */ + status = twl6040_read_reg_volatile(codec, TWL6040_REG_STATUS); + if (status & TWL6040_PLUGCOMP) + snd_soc_jack_report(jack, report, report); + else + snd_soc_jack_report(jack, 0, report); + + mutex_unlock(&priv->mutex); +} + +void twl6040_hs_jack_detect(struct snd_soc_codec *codec, + struct snd_soc_jack *jack, int report) +{ + struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_jack_data *hs_jack = &priv->hs_jack; + + hs_jack->jack = jack; + hs_jack->report = report; + + twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report); +} +EXPORT_SYMBOL_GPL(twl6040_hs_jack_detect); + +static void twl6040_accessory_work(struct work_struct *work) +{ + struct twl6040_data *priv = container_of(work, + struct twl6040_data, delayed_work.work); + struct snd_soc_codec *codec = priv->codec; + struct twl6040_jack_data *hs_jack = &priv->hs_jack; + + twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report); +} + /* audio interrupt handler */ static irqreturn_t twl6040_naudint_handler(int irq, void *data) { @@ -389,33 +772,180 @@ static irqreturn_t twl6040_naudint_handler(int irq, void *data) struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); u8 intid; - twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID); + twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID); - switch (intid) { - case TWL6040_THINT: + if (intid & TWL6040_THINT) dev_alert(codec->dev, "die temp over-limit detection\n"); + + if ((intid & TWL6040_PLUGINT) || (intid & TWL6040_UNPLUGINT)) + queue_delayed_work(priv->workqueue, &priv->delayed_work, + msecs_to_jiffies(200)); + + if (intid & TWL6040_HOOKINT) + dev_info(codec->dev, "hook detection\n"); + + if (intid & TWL6040_HFINT) + dev_alert(codec->dev, "hf drivers over current detection\n"); + + if (intid & TWL6040_VIBINT) + dev_alert(codec->dev, "vib drivers over current detection\n"); + + if (intid & TWL6040_READYINT) + complete(&priv->ready); + + return IRQ_HANDLED; +} + +static int twl6040_put_volsw(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_output *out = NULL; + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + int ret; + unsigned int reg = mc->reg; + + /* For HS and HF we shadow the values and only actually write + * them out when active in order to ensure the amplifier comes on + * as quietly as possible. */ + switch (reg) { + case TWL6040_REG_HSGAIN: + out = &twl6040_priv->headset; break; - case TWL6040_PLUGINT: - case TWL6040_UNPLUGINT: - case TWL6040_HOOKINT: + default: break; - case TWL6040_HFINT: - dev_alert(codec->dev, "hf drivers over current detection\n"); + } + + if (out) { + out->left_vol = ucontrol->value.integer.value[0]; + out->right_vol = ucontrol->value.integer.value[1]; + if (!out->active) + return 1; + } + + ret = snd_soc_put_volsw(kcontrol, ucontrol); + if (ret < 0) + return ret; + + return 1; +} + +static int twl6040_get_volsw(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_output *out = &twl6040_priv->headset; + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + unsigned int reg = mc->reg; + + switch (reg) { + case TWL6040_REG_HSGAIN: + out = &twl6040_priv->headset; + ucontrol->value.integer.value[0] = out->left_vol; + ucontrol->value.integer.value[1] = out->right_vol; + return 0; + + default: break; - case TWL6040_VIBINT: - dev_alert(codec->dev, "vib drivers over current detection\n"); + } + + return snd_soc_get_volsw(kcontrol, ucontrol); +} + +static int twl6040_put_volsw_2r_vu(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_output *out = NULL; + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + int ret; + unsigned int reg = mc->reg; + + /* For HS and HF we shadow the values and only actually write + * them out when active in order to ensure the amplifier comes on + * as quietly as possible. */ + switch (reg) { + case TWL6040_REG_HFLGAIN: + case TWL6040_REG_HFRGAIN: + out = &twl6040_priv->handsfree; break; - case TWL6040_READYINT: - complete(&priv->ready); + default: break; + } + + if (out) { + out->left_vol = ucontrol->value.integer.value[0]; + out->right_vol = ucontrol->value.integer.value[1]; + if (!out->active) + return 1; + } + + ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); + if (ret < 0) + return ret; + + return 1; +} + +static int twl6040_get_volsw_2r(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); + struct twl6040_output *out = &twl6040_priv->handsfree; + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + unsigned int reg = mc->reg; + + /* If these are cached registers use the cache */ + switch (reg) { + case TWL6040_REG_HFLGAIN: + case TWL6040_REG_HFRGAIN: + out = &twl6040_priv->handsfree; + ucontrol->value.integer.value[0] = out->left_vol; + ucontrol->value.integer.value[1] = out->right_vol; + return 0; + default: - dev_err(codec->dev, "unknown audio interrupt %d\n", intid); break; } - return IRQ_HANDLED; + return snd_soc_get_volsw_2r(kcontrol, ucontrol); } +/* double control with volume update */ +#define SOC_TWL6040_DOUBLE_TLV(xname, xreg, shift_left, shift_right, xmax,\ + xinvert, tlv_array)\ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ + .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ + SNDRV_CTL_ELEM_ACCESS_READWRITE,\ + .tlv.p = (tlv_array), \ + .info = snd_soc_info_volsw, .get = twl6040_get_volsw, \ + .put = twl6040_put_volsw, \ + .private_value = (unsigned long)&(struct soc_mixer_control) \ + {.reg = xreg, .shift = shift_left, .rshift = shift_right,\ + .max = xmax, .platform_max = xmax, .invert = xinvert} } + +/* double control with volume update */ +#define SOC_TWL6040_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax,\ + xinvert, tlv_array)\ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ + .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ + SNDRV_CTL_ELEM_ACCESS_READWRITE | \ + SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ + .tlv.p = (tlv_array), \ + .info = snd_soc_info_volsw_2r, \ + .get = twl6040_get_volsw_2r, .put = twl6040_put_volsw_2r_vu, \ + .private_value = (unsigned long)&(struct soc_mixer_control) \ + {.reg = reg_left, .rreg = reg_right, .shift = xshift, \ + .rshift = xshift, .max = xmax, .invert = xinvert}, } + /* * MICATT volume control: * from -6 to 0 dB in 6 dB steps @@ -424,9 +954,15 @@ static DECLARE_TLV_DB_SCALE(mic_preamp_tlv, -600, 600, 0); /* * MICGAIN volume control: - * from 6 to 30 dB in 6 dB steps + * from -6 to 30 dB in 6 dB steps */ -static DECLARE_TLV_DB_SCALE(mic_amp_tlv, 600, 600, 0); +static DECLARE_TLV_DB_SCALE(mic_amp_tlv, -600, 600, 0); + +/* + * AFMGAIN volume control: + * from 18 to 24 dB in 6 dB steps + */ +static DECLARE_TLV_DB_SCALE(afm_amp_tlv, 1800, 600, 0); /* * HSGAIN volume control: @@ -455,8 +991,30 @@ static const char *twl6040_amicr_texts[] = {"Headset Mic", "Sub Mic", "Aux/FM Right", "Off"}; static const struct soc_enum twl6040_enum[] = { - SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 3, twl6040_amicl_texts), - SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 3, twl6040_amicr_texts), + SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 4, twl6040_amicl_texts), + SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 4, twl6040_amicr_texts), +}; + +static const char *twl6040_hs_texts[] = { + "Off", "HS DAC", "Line-In amp" +}; + +static const struct soc_enum twl6040_hs_enum[] = { + SOC_ENUM_SINGLE(TWL6040_REG_HSLCTL, 5, ARRAY_SIZE(twl6040_hs_texts), + twl6040_hs_texts), + SOC_ENUM_SINGLE(TWL6040_REG_HSRCTL, 5, ARRAY_SIZE(twl6040_hs_texts), + twl6040_hs_texts), +}; + +static const char *twl6040_hf_texts[] = { + "Off", "HF DAC", "Line-In amp" +}; + +static const struct soc_enum twl6040_hf_enum[] = { + SOC_ENUM_SINGLE(TWL6040_REG_HFLCTL, 2, ARRAY_SIZE(twl6040_hf_texts), + twl6040_hf_texts), + SOC_ENUM_SINGLE(TWL6040_REG_HFRCTL, 2, ARRAY_SIZE(twl6040_hf_texts), + twl6040_hf_texts), }; static const struct snd_kcontrol_new amicl_control = @@ -466,18 +1024,18 @@ static const struct snd_kcontrol_new amicr_control = SOC_DAPM_ENUM("Route", twl6040_enum[1]); /* Headset DAC playback switches */ -static const struct snd_kcontrol_new hsdacl_switch_controls = - SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSLCTL, 5, 1, 0); +static const struct snd_kcontrol_new hsl_mux_controls = + SOC_DAPM_ENUM("Route", twl6040_hs_enum[0]); -static const struct snd_kcontrol_new hsdacr_switch_controls = - SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSRCTL, 5, 1, 0); +static const struct snd_kcontrol_new hsr_mux_controls = + SOC_DAPM_ENUM("Route", twl6040_hs_enum[1]); /* Handsfree DAC playback switches */ -static const struct snd_kcontrol_new hfdacl_switch_controls = - SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 2, 1, 0); +static const struct snd_kcontrol_new hfl_mux_controls = + SOC_DAPM_ENUM("Route", twl6040_hf_enum[0]); -static const struct snd_kcontrol_new hfdacr_switch_controls = - SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFRCTL, 2, 1, 0); +static const struct snd_kcontrol_new hfr_mux_controls = + SOC_DAPM_ENUM("Route", twl6040_hf_enum[1]); static const struct snd_kcontrol_new ep_driver_switch_controls = SOC_DAPM_SINGLE("Switch", TWL6040_REG_EARCTL, 0, 1, 0); @@ -489,10 +1047,14 @@ static const struct snd_kcontrol_new twl6040_snd_controls[] = { SOC_DOUBLE_TLV("Capture Volume", TWL6040_REG_MICGAIN, 0, 3, 4, 0, mic_amp_tlv), + /* AFM gains */ + SOC_DOUBLE_TLV("Aux FM Volume", + TWL6040_REG_LINEGAIN, 0, 4, 0xF, 0, afm_amp_tlv), + /* Playback gains */ - SOC_DOUBLE_TLV("Headset Playback Volume", + SOC_TWL6040_DOUBLE_TLV("Headset Playback Volume", TWL6040_REG_HSGAIN, 0, 4, 0xF, 1, hs_tlv), - SOC_DOUBLE_R_TLV("Handsfree Playback Volume", + SOC_TWL6040_DOUBLE_R_TLV("Handsfree Playback Volume", TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1, hf_tlv), SOC_SINGLE_TLV("Earphone Playback Volume", TWL6040_REG_EARCTL, 1, 0xF, 1, ep_tlv), @@ -525,6 +1087,12 @@ static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = { SND_SOC_DAPM_PGA("MicAmpR", TWL6040_REG_MICRCTL, 0, 0, NULL, 0), + /* Auxiliary FM PGAs */ + SND_SOC_DAPM_PGA("AFMAmpL", + TWL6040_REG_MICLCTL, 1, 0, NULL, 0), + SND_SOC_DAPM_PGA("AFMAmpR", + TWL6040_REG_MICRCTL, 1, 0, NULL, 0), + /* ADCs */ SND_SOC_DAPM_ADC("ADC Left", "Left Front Capture", TWL6040_REG_MICLCTL, 2, 0), @@ -559,29 +1127,33 @@ static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = { twl6040_power_mode_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - /* Analog playback switches */ - SND_SOC_DAPM_SWITCH("HSDAC Left Playback", - SND_SOC_NOPM, 0, 0, &hsdacl_switch_controls), - SND_SOC_DAPM_SWITCH("HSDAC Right Playback", - SND_SOC_NOPM, 0, 0, &hsdacr_switch_controls), - SND_SOC_DAPM_SWITCH("HFDAC Left Playback", - SND_SOC_NOPM, 0, 0, &hfdacl_switch_controls), - SND_SOC_DAPM_SWITCH("HFDAC Right Playback", - SND_SOC_NOPM, 0, 0, &hfdacr_switch_controls), + SND_SOC_DAPM_MUX("HF Left Playback", + SND_SOC_NOPM, 0, 0, &hfl_mux_controls), + SND_SOC_DAPM_MUX("HF Right Playback", + SND_SOC_NOPM, 0, 0, &hfr_mux_controls), + /* Analog playback Muxes */ + SND_SOC_DAPM_MUX("HS Left Playback", + SND_SOC_NOPM, 0, 0, &hsl_mux_controls), + SND_SOC_DAPM_MUX("HS Right Playback", + SND_SOC_NOPM, 0, 0, &hsr_mux_controls), /* Analog playback drivers */ - SND_SOC_DAPM_PGA_E("Handsfree Left Driver", + SND_SOC_DAPM_OUT_DRV_E("Handsfree Left Driver", TWL6040_REG_HFLCTL, 4, 0, NULL, 0, - twl6040_power_mode_event, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("Handsfree Right Driver", + pga_event, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_OUT_DRV_E("Handsfree Right Driver", TWL6040_REG_HFRCTL, 4, 0, NULL, 0, - twl6040_power_mode_event, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA("Headset Left Driver", - TWL6040_REG_HSLCTL, 2, 0, NULL, 0), - SND_SOC_DAPM_PGA("Headset Right Driver", - TWL6040_REG_HSRCTL, 2, 0, NULL, 0), + pga_event, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_OUT_DRV_E("Headset Left Driver", + TWL6040_REG_HSLCTL, 2, 0, NULL, 0, + pga_event, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_OUT_DRV_E("Headset Right Driver", + TWL6040_REG_HSRCTL, 2, 0, NULL, 0, + pga_event, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_SWITCH_E("Earphone Driver", SND_SOC_NOPM, 0, 0, &ep_driver_switch_controls, twl6040_power_mode_event, @@ -611,12 +1183,18 @@ static const struct snd_soc_dapm_route intercon[] = { {"ADC Left", NULL, "MicAmpL"}, {"ADC Right", NULL, "MicAmpR"}, - /* Headset playback path */ - {"HSDAC Left Playback", "Switch", "HSDAC Left"}, - {"HSDAC Right Playback", "Switch", "HSDAC Right"}, + /* AFM path */ + {"AFMAmpL", "NULL", "AFML"}, + {"AFMAmpR", "NULL", "AFMR"}, + + {"HS Left Playback", "HS DAC", "HSDAC Left"}, + {"HS Left Playback", "Line-In amp", "AFMAmpL"}, - {"Headset Left Driver", NULL, "HSDAC Left Playback"}, - {"Headset Right Driver", NULL, "HSDAC Right Playback"}, + {"HS Right Playback", "HS DAC", "HSDAC Right"}, + {"HS Right Playback", "Line-In amp", "AFMAmpR"}, + + {"Headset Left Driver", "NULL", "HS Left Playback"}, + {"Headset Right Driver", "NULL", "HS Right Playback"}, {"HSOL", NULL, "Headset Left Driver"}, {"HSOR", NULL, "Headset Right Driver"}, @@ -625,12 +1203,14 @@ static const struct snd_soc_dapm_route intercon[] = { {"Earphone Driver", "Switch", "HSDAC Left"}, {"EP", NULL, "Earphone Driver"}, - /* Handsfree playback path */ - {"HFDAC Left Playback", "Switch", "HFDAC Left"}, - {"HFDAC Right Playback", "Switch", "HFDAC Right"}, + {"HF Left Playback", "HF DAC", "HFDAC Left"}, + {"HF Left Playback", "Line-In amp", "AFMAmpL"}, + + {"HF Right Playback", "HF DAC", "HFDAC Right"}, + {"HF Right Playback", "Line-In amp", "AFMAmpR"}, - {"HFDAC Left PGA", NULL, "HFDAC Left Playback"}, - {"HFDAC Right PGA", NULL, "HFDAC Right Playback"}, + {"HFDAC Left PGA", NULL, "HF Left Playback"}, + {"HFDAC Right PGA", NULL, "HF Right Playback"}, {"Handsfree Left Driver", "Switch", "HFDAC Left PGA"}, {"Handsfree Right Driver", "Switch", "HFDAC Right PGA"}, @@ -641,12 +1221,12 @@ static const struct snd_soc_dapm_route intercon[] = { static int twl6040_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, twl6040_dapm_widgets, - ARRAY_SIZE(twl6040_dapm_widgets)); - - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_widgets(codec); + snd_soc_dapm_new_controls(dapm, twl6040_dapm_widgets, + ARRAY_SIZE(twl6040_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_widgets(dapm); return 0; } @@ -659,10 +1239,10 @@ static int twl6040_power_up_completion(struct snd_soc_codec *codec, u8 intid; time_left = wait_for_completion_timeout(&priv->ready, - msecs_to_jiffies(48)); + msecs_to_jiffies(144)); if (!time_left) { - twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid, + twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID); if (!(intid & TWL6040_READYINT)) { dev_err(codec->dev, "timeout waiting for READYINT\n"); @@ -713,6 +1293,15 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec, /* initialize vdd/vss registers with reg_cache */ twl6040_init_vdd_regs(codec); + + /* Set external boost GPO */ + twl6040_write(codec, TWL6040_REG_GPOCTL, 0x02); + + /* Set initial minimal gain values */ + twl6040_write(codec, TWL6040_REG_HSGAIN, 0xFF); + twl6040_write(codec, TWL6040_REG_EARCTL, 0x1E); + twl6040_write(codec, TWL6040_REG_HFLGAIN, 0x1D); + twl6040_write(codec, TWL6040_REG_HFRGAIN, 0x1D); break; case SND_SOC_BIAS_OFF: if (!priv->codec_powered) @@ -739,7 +1328,7 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -772,23 +1361,6 @@ static int twl6040_startup(struct snd_pcm_substream *substream, struct snd_soc_codec *codec = rtd->codec; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); - if (!priv->sysclk) { - dev_err(codec->dev, - "no mclk configured, call set_sysclk() on init\n"); - return -EINVAL; - } - - /* - * capture is not supported at 17.64 MHz, - * it's reserved for headset low-power playback scenario - */ - if ((priv->sysclk == 17640000) && substream->stream) { - dev_err(codec->dev, - "capture mode is not supported at %dHz\n", - priv->sysclk); - return -EINVAL; - } - snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, priv->sysclk_constraints); @@ -814,10 +1386,17 @@ static int twl6040_hw_params(struct snd_pcm_substream *substream, rate = params_rate(params); switch (rate) { + case 11250: + case 22500: + case 44100: case 88200: lppllctl |= TWL6040_LPLLFIN; priv->sysclk = 17640000; break; + case 8000: + case 16000: + case 32000: + case 48000: case 96000: lppllctl &= ~TWL6040_LPLLFIN; priv->sysclk = 19200000; @@ -832,31 +1411,37 @@ static int twl6040_hw_params(struct snd_pcm_substream *substream, return 0; } -static int twl6040_trigger(struct snd_pcm_substream *substream, - int cmd, struct snd_soc_dai *dai) +static int twl6040_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - /* - * low-power playback mode is restricted - * for headset path only - */ - if ((priv->sysclk == 17640000) && priv->non_lp) { + if (!priv->sysclk) { + dev_err(codec->dev, + "no mclk configured, call set_sysclk() on init\n"); + return -EINVAL; + } + + /* + * capture is not supported at 17.64 MHz, + * it's reserved for headset low-power playback scenario + */ + if ((priv->sysclk == 17640000) && + substream->stream == SNDRV_PCM_STREAM_CAPTURE) { + dev_err(codec->dev, + "capture mode is not supported at %dHz\n", + priv->sysclk); + return -EINVAL; + } + + if ((priv->sysclk == 17640000) && priv->non_lp) { dev_err(codec->dev, "some enabled paths aren't supported at %dHz\n", priv->sysclk); return -EPERM; - } - break; - default: - break; } - return 0; } @@ -970,7 +1555,7 @@ static int twl6040_set_dai_sysclk(struct snd_soc_dai *codec_dai, static struct snd_soc_dai_ops twl6040_dai_ops = { .startup = twl6040_startup, .hw_params = twl6040_hw_params, - .trigger = twl6040_trigger, + .prepare = twl6040_prepare, .set_sysclk = twl6040_set_dai_sysclk, }; @@ -1004,6 +1589,7 @@ static int twl6040_suspend(struct snd_soc_codec *codec, pm_message_t state) static int twl6040_resume(struct snd_soc_codec *codec) { twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + twl6040_set_bias_level(codec, codec->dapm.suspend_bias_level); return 0; } @@ -1018,24 +1604,41 @@ static int twl6040_probe(struct snd_soc_codec *codec) struct twl6040_data *priv; int audpwron, naudint; int ret = 0; + u8 icrev, intmr = TWL6040_ALLINT_MSK; priv = kzalloc(sizeof(struct twl6040_data), GFP_KERNEL); if (priv == NULL) return -ENOMEM; snd_soc_codec_set_drvdata(codec, priv); - if (twl_codec) { + priv->codec = codec; + + twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &icrev, TWL6040_REG_ASICREV); + + if (twl_codec && (icrev > 0)) audpwron = twl_codec->audpwron_gpio; - naudint = twl_codec->naudint_irq; - } else { + else audpwron = -EINVAL; + + if (twl_codec) + naudint = twl_codec->naudint_irq; + else naudint = 0; - } priv->audpwron = audpwron; priv->naudint = naudint; + priv->workqueue = create_singlethread_workqueue("twl6040-codec"); + + if (!priv->workqueue) + goto work_err; + + INIT_DELAYED_WORK(&priv->delayed_work, twl6040_accessory_work); + + mutex_init(&priv->mutex); init_completion(&priv->ready); + init_completion(&priv->headset.ramp_done); + init_completion(&priv->handsfree.ramp_done); if (gpio_is_valid(audpwron)) { ret = gpio_request(audpwron, "audpwron"); @@ -1047,7 +1650,14 @@ static int twl6040_probe(struct snd_soc_codec *codec) goto gpio2_err; priv->codec_powered = 0; + + /* enable only codec ready interrupt */ + intmr &= ~(TWL6040_READYMSK | TWL6040_PLUGMSK); + + /* reset interrupt status to allow correct power up sequence */ + twl6040_read_reg_volatile(codec, TWL6040_REG_INTID); } + twl6040_write(codec, TWL6040_REG_INTMR, intmr); if (naudint) { /* audio interrupt */ @@ -1057,25 +1667,29 @@ static int twl6040_probe(struct snd_soc_codec *codec) "twl6040_codec", codec); if (ret) goto gpio2_err; - } else { - if (gpio_is_valid(audpwron)) { - /* enable only codec ready interrupt */ - twl6040_write_reg_cache(codec, TWL6040_REG_INTMR, - ~TWL6040_READYMSK & TWL6040_ALLINT_MSK); - } else { - /* no interrupts at all */ - twl6040_write_reg_cache(codec, TWL6040_REG_INTMR, - TWL6040_ALLINT_MSK); - } } /* init vio registers */ twl6040_init_vio_regs(codec); + priv->hf_workqueue = create_singlethread_workqueue("twl6040-hf"); + if (priv->hf_workqueue == NULL) { + ret = -ENOMEM; + goto irq_err; + } + priv->hs_workqueue = create_singlethread_workqueue("twl6040-hs"); + if (priv->hs_workqueue == NULL) { + ret = -ENOMEM; + goto wq_err; + } + + INIT_DELAYED_WORK(&priv->hs_delayed_work, twl6040_pga_hs_work); + INIT_DELAYED_WORK(&priv->hf_delayed_work, twl6040_pga_hf_work); + /* power on device */ ret = twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY); if (ret) - goto irq_err; + goto bias_err; snd_soc_add_controls(codec, twl6040_snd_controls, ARRAY_SIZE(twl6040_snd_controls)); @@ -1083,6 +1697,10 @@ static int twl6040_probe(struct snd_soc_codec *codec) return 0; +bias_err: + destroy_workqueue(priv->hs_workqueue); +wq_err: + destroy_workqueue(priv->hf_workqueue); irq_err: if (naudint) free_irq(naudint, codec); @@ -1090,6 +1708,8 @@ gpio2_err: if (gpio_is_valid(audpwron)) gpio_free(audpwron); gpio1_err: + destroy_workqueue(priv->workqueue); +work_err: kfree(priv); return ret; } @@ -1108,6 +1728,9 @@ static int twl6040_remove(struct snd_soc_codec *codec) if (naudint) free_irq(naudint, codec); + destroy_workqueue(priv->workqueue); + destroy_workqueue(priv->hf_workqueue); + destroy_workqueue(priv->hs_workqueue); kfree(priv); return 0; diff --git a/sound/soc/codecs/twl6040.h b/sound/soc/codecs/twl6040.h index f7c77fa58a3c..23aeed0963e6 100644 --- a/sound/soc/codecs/twl6040.h +++ b/sound/soc/codecs/twl6040.h @@ -79,6 +79,7 @@ /* INTMR (0x04) fields */ +#define TWL6040_PLUGMSK 0x02 #define TWL6040_READYMSK 0x40 #define TWL6040_ALLINT_MSK 0x7B @@ -135,4 +136,11 @@ #define TWL6040_HPPLL_ID 1 #define TWL6040_LPPLL_ID 2 +/* STATUS (0x2E) fields */ + +#define TWL6040_PLUGCOMP 0x02 + +void twl6040_hs_jack_detect(struct snd_soc_codec *codec, + struct snd_soc_jack *jack, int report); + #endif /* End of __TWL6040_H__ */ diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index 464f0cfa4c7a..e76847a9438b 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include @@ -389,7 +388,7 @@ static int uda134x_set_bias_level(struct snd_soc_codec *codec, pd->power(0); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c index 0c6c725736c6..c5ca8cfea60f 100644 --- a/sound/soc/codecs/uda1380.c +++ b/sound/soc/codecs/uda1380.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -36,7 +35,6 @@ /* codec private data */ struct uda1380_priv { struct snd_soc_codec *codec; - u16 reg_cache[UDA1380_CACHEREGNUM]; unsigned int dac_clk; struct work_struct work; void *control_data; @@ -414,10 +412,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int uda1380_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets, - ARRAY_SIZE(uda1380_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets, + ARRAY_SIZE(uda1380_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -603,7 +602,7 @@ static int uda1380_set_bias_level(struct snd_soc_codec *codec, int reg; struct uda1380_platform_data *pdata = codec->dev->platform_data; - if (codec->bias_level == level) + if (codec->dapm.bias_level == level) return 0; switch (level) { @@ -613,7 +612,7 @@ static int uda1380_set_bias_level(struct snd_soc_codec *codec, uda1380_write(codec, UDA1380_PM, R02_PON_BIAS | pm); break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { if (gpio_is_valid(pdata->gpio_power)) { gpio_set_value(pdata->gpio_power, 1); mdelay(1); @@ -636,7 +635,7 @@ static int uda1380_set_bias_level(struct snd_soc_codec *codec, for (reg = UDA1380_MVOL; reg < UDA1380_CACHEREGNUM; reg++) set_bit(reg - 0x10, &uda1380_cache_dirty); } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c index 0c47c788ccdf..d3ffa2f0122a 100644 --- a/sound/soc/codecs/wl1273.c +++ b/sound/soc/codecs/wl1273.c @@ -25,8 +25,7 @@ #include #include #include -#include -#include +#include #include #include "wl1273.h" diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index 4bcd168794e1..80ddf4fd23db 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -705,6 +704,7 @@ static const struct snd_soc_dapm_route audio_map[] = { /* Called from the machine driver */ int wm2000_add_controls(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; if (!wm2000_i2c) { @@ -712,12 +712,12 @@ int wm2000_add_controls(struct snd_soc_codec *codec) return -ENODEV; } - ret = snd_soc_dapm_new_controls(codec, wm2000_dapm_widgets, + ret = snd_soc_dapm_new_controls(dapm, wm2000_dapm_widgets, ARRAY_SIZE(wm2000_dapm_widgets)); if (ret < 0) return ret; - ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); if (ret < 0) return ret; diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index 7611add7f8c3..6d6dc9efe914 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c @@ -24,9 +24,9 @@ #include #include #include -#include #include #include +#include #include "wm8350.h" @@ -54,6 +54,7 @@ struct wm8350_output { struct wm8350_jack_data { struct snd_soc_jack *jack; + struct delayed_work work; int report; int short_report; }; @@ -230,8 +231,9 @@ static inline int wm8350_out2_ramp_step(struct snd_soc_codec *codec) */ static void wm8350_pga_work(struct work_struct *work) { - struct snd_soc_codec *codec = - container_of(work, struct snd_soc_codec, delayed_work.work); + struct snd_soc_dapm_context *dapm = + container_of(work, struct snd_soc_dapm_context, delayed_work.work); + struct snd_soc_codec *codec = dapm->codec; struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec); struct wm8350_output *out1 = &wm8350_data->out1, *out2 = &wm8350_data->out2; @@ -302,8 +304,8 @@ static int pga_event(struct snd_soc_dapm_widget *w, out->ramp = WM8350_RAMP_UP; out->active = 1; - if (!delayed_work_pending(&codec->delayed_work)) - schedule_delayed_work(&codec->delayed_work, + if (!delayed_work_pending(&codec->dapm.delayed_work)) + schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(1)); break; @@ -311,8 +313,8 @@ static int pga_event(struct snd_soc_dapm_widget *w, out->ramp = WM8350_RAMP_DOWN; out->active = 0; - if (!delayed_work_pending(&codec->delayed_work)) - schedule_delayed_work(&codec->delayed_work, + if (!delayed_work_pending(&codec->dapm.delayed_work)) + schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(1)); break; } @@ -786,9 +788,10 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8350_add_widgets(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; - ret = snd_soc_dapm_new_controls(codec, + ret = snd_soc_dapm_new_controls(dapm, wm8350_dapm_widgets, ARRAY_SIZE(wm8350_dapm_widgets)); if (ret != 0) { @@ -797,7 +800,7 @@ static int wm8350_add_widgets(struct snd_soc_codec *codec) } /* set up audio paths */ - ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); if (ret != 0) { dev_err(codec->dev, "DAPM route register failed\n"); return ret; @@ -1184,7 +1187,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies); if (ret != 0) @@ -1317,7 +1320,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec, priv->supplies); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1334,45 +1337,69 @@ static int wm8350_resume(struct snd_soc_codec *codec) return 0; } -static irqreturn_t wm8350_hp_jack_handler(int irq, void *data) +static void wm8350_hp_work(struct wm8350_data *priv, + struct wm8350_jack_data *jack, + u16 mask) { - struct wm8350_data *priv = data; struct wm8350 *wm8350 = priv->codec.control_data; u16 reg; int report; - int mask; + + reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS); + if (reg & mask) + report = jack->report; + else + report = 0; + + snd_soc_jack_report(jack->jack, report, jack->report); + +} + +static void wm8350_hpl_work(struct work_struct *work) +{ + struct wm8350_data *priv = + container_of(work, struct wm8350_data, hpl.work.work); + + wm8350_hp_work(priv, &priv->hpl, WM8350_JACK_L_LVL); +} + +static void wm8350_hpr_work(struct work_struct *work) +{ + struct wm8350_data *priv = + container_of(work, struct wm8350_data, hpr.work.work); + + wm8350_hp_work(priv, &priv->hpr, WM8350_JACK_R_LVL); +} + +static irqreturn_t wm8350_hp_jack_handler(int irq, void *data) +{ + struct wm8350_data *priv = data; + struct wm8350 *wm8350 = priv->codec.control_data; struct wm8350_jack_data *jack = NULL; switch (irq - wm8350->irq_base) { case WM8350_IRQ_CODEC_JCK_DET_L: +#ifndef CONFIG_SND_SOC_WM8350_MODULE + trace_snd_soc_jack_irq("WM8350 HPL"); +#endif jack = &priv->hpl; - mask = WM8350_JACK_L_LVL; break; case WM8350_IRQ_CODEC_JCK_DET_R: +#ifndef CONFIG_SND_SOC_WM8350_MODULE + trace_snd_soc_jack_irq("WM8350 HPR"); +#endif jack = &priv->hpr; - mask = WM8350_JACK_R_LVL; break; default: BUG(); } - if (!jack->jack) { - dev_warn(wm8350->dev, "Jack interrupt called with no jack\n"); - return IRQ_NONE; - } - - /* Debounce */ - msleep(200); - - reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS); - if (reg & mask) - report = jack->report; - else - report = 0; + if (device_may_wakeup(wm8350->dev)) + pm_wakeup_event(wm8350->dev, 250); - snd_soc_jack_report(jack->jack, report, jack->report); + schedule_delayed_work(&jack->work, 200); return IRQ_HANDLED; } @@ -1436,6 +1463,10 @@ static irqreturn_t wm8350_mic_handler(int irq, void *data) u16 reg; int report = 0; +#ifndef CONFIG_SND_SOC_WM8350_MODULE + trace_snd_soc_jack_irq("WM8350 mic"); +#endif + reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS); if (reg & WM8350_JACK_MICSCD_LVL) report |= priv->mic.short_report; @@ -1550,7 +1581,9 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec) /* Put the codec into reset if it wasn't already */ wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA); - INIT_DELAYED_WORK(&codec->delayed_work, wm8350_pga_work); + INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8350_pga_work); + INIT_DELAYED_WORK(&priv->hpl.work, wm8350_hpl_work); + INIT_DELAYED_WORK(&priv->hpr.work, wm8350_hpr_work); /* Enable the codec */ wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA); @@ -1626,7 +1659,6 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec) { struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec); struct wm8350 *wm8350 = dev_get_platdata(codec->dev); - int ret; wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, WM8350_JDL_ENA | WM8350_JDR_ENA); @@ -1641,15 +1673,12 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec) priv->hpr.jack = NULL; priv->mic.jack = NULL; - /* cancel any work waiting to be queued. */ - ret = cancel_delayed_work(&codec->delayed_work); + cancel_delayed_work_sync(&priv->hpl.work); + cancel_delayed_work_sync(&priv->hpr.work); /* if there was any work waiting then we run it now and * wait for its completion */ - if (ret) { - schedule_delayed_work(&codec->delayed_work, 0); - flush_scheduled_work(); - } + flush_delayed_work_sync(&codec->dapm.delayed_work); wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF); diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c index 850299786e02..3c3bc079167e 100644 --- a/sound/soc/codecs/wm8400.c +++ b/sound/soc/codecs/wm8400.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -911,10 +910,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8400_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8400_dapm_widgets, - ARRAY_SIZE(wm8400_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8400_dapm_widgets, + ARRAY_SIZE(wm8400_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -1219,7 +1219,7 @@ static int wm8400_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(power), &power[0]); if (ret != 0) { @@ -1306,7 +1306,7 @@ static int wm8400_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c index 8f107095760e..db0dced74843 100644 --- a/sound/soc/codecs/wm8510.c +++ b/sound/soc/codecs/wm8510.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include "wm8510.h" @@ -216,10 +215,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8510_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8510_dapm_widgets, - ARRAY_SIZE(wm8510_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8510_dapm_widgets, + ARRAY_SIZE(wm8510_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -478,7 +478,7 @@ static int wm8510_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_STANDBY: power1 |= WM8510_POWER1_BIASEN | WM8510_POWER1_BUFIOEN; - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Initial cap charge at VMID 5k */ snd_soc_write(codec, WM8510_POWER1, power1 | 0x3); mdelay(100); @@ -495,7 +495,7 @@ static int wm8510_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c index deca79ea2b4b..5eb2f501ce32 100644 --- a/sound/soc/codecs/wm8523.c +++ b/sound/soc/codecs/wm8523.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -109,10 +108,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int wm8523_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8523_dapm_widgets, - ARRAY_SIZE(wm8523_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, wm8523_dapm_widgets, + ARRAY_SIZE(wm8523_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -327,7 +327,7 @@ static int wm8523_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { @@ -366,7 +366,7 @@ static int wm8523_set_bias_level(struct snd_soc_codec *codec, wm8523->supplies); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c index 8725d4e75431..8f6b5ee6645b 100644 --- a/sound/soc/codecs/wm8580.c +++ b/sound/soc/codecs/wm8580.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -191,7 +190,6 @@ static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = { struct wm8580_priv { enum snd_soc_control_type control_type; struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES]; - u16 reg_cache[WM8580_MAX_REGISTER + 1]; struct pll_state a; struct pll_state b; int sysclk[2]; @@ -302,10 +300,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8580_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets, - ARRAY_SIZE(wm8580_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets, + ARRAY_SIZE(wm8580_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -507,13 +506,13 @@ static int wm8580_paif_hw_params(struct snd_pcm_substream *substream, } /* Look up the SYSCLK ratio; accept only exact matches */ - ratio = wm8580->sysclk[dai->id] / params_rate(params); + ratio = wm8580->sysclk[dai->driver->id] / params_rate(params); for (i = 0; i < ARRAY_SIZE(wm8580_sysclk_ratios); i++) if (ratio == wm8580_sysclk_ratios[i]) break; if (i == ARRAY_SIZE(wm8580_sysclk_ratios)) { dev_err(codec->dev, "Invalid clock ratio %d/%d\n", - wm8580->sysclk[dai->id], params_rate(params)); + wm8580->sysclk[dai->driver->id], params_rate(params)); return -EINVAL; } paifa |= i; @@ -716,7 +715,7 @@ static int wm8580_set_sysclk(struct snd_soc_dai *dai, int clk_id, switch (clk_id) { case WM8580_CLKSRC_ADCMCLK: - if (dai->id != WM8580_DAI_PAIFTX) + if (dai->driver->id != WM8580_DAI_PAIFTX) return -EINVAL; sel = 0 << sel_shift; break; @@ -735,7 +734,7 @@ static int wm8580_set_sysclk(struct snd_soc_dai *dai, int clk_id, } /* We really should validate PLL settings but not yet */ - wm8580->sysclk[dai->id] = freq; + wm8580->sysclk[dai->driver->id] = freq; return snd_soc_update_bits(codec, WM8580_CLKSEL, sel_mask, sel); } @@ -767,7 +766,7 @@ static int wm8580_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Power up and get individual control of the DACs */ reg = snd_soc_read(codec, WM8580_PWRDN1); reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD); @@ -785,7 +784,7 @@ static int wm8580_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -905,7 +904,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8580 = { .set_bias_level = wm8580_set_bias_level, .reg_cache_size = ARRAY_SIZE(wm8580_reg), .reg_word_size = sizeof(u16), - .reg_cache_default = &wm8580_reg, + .reg_cache_default = wm8580_reg, }; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c index 54fbd76c8bca..97c30382d3ff 100644 --- a/sound/soc/codecs/wm8711.c +++ b/sound/soc/codecs/wm8711.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include @@ -34,7 +33,6 @@ /* codec private data */ struct wm8711_priv { enum snd_soc_control_type bus_type; - u16 reg_cache[WM8711_CACHEREGNUM]; unsigned int sysclk; }; @@ -93,10 +91,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int wm8711_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8711_dapm_widgets, - ARRAY_SIZE(wm8711_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, wm8711_dapm_widgets, + ARRAY_SIZE(wm8711_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -318,7 +317,7 @@ static int wm8711_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, WM8711_PWR, 0xffff); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c index 075f35e4f4cb..736b0352d0a7 100644 --- a/sound/soc/codecs/wm8728.c +++ b/sound/soc/codecs/wm8728.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -73,10 +72,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int wm8728_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8728_dapm_widgets, - ARRAY_SIZE(wm8728_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, wm8728_dapm_widgets, + ARRAY_SIZE(wm8728_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -180,7 +180,7 @@ static int wm8728_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Power everything up... */ reg = snd_soc_read(codec, WM8728_DACCTL); snd_soc_write(codec, WM8728_DACCTL, reg & ~0x4); @@ -197,7 +197,7 @@ static int wm8728_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, WM8728_DACCTL, reg | 0x4); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index e725c09a3e79..0a67c31b2663 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -44,9 +43,10 @@ static const char *wm8731_supply_names[WM8731_NUM_SUPPLIES] = { struct wm8731_priv { enum snd_soc_control_type control_type; struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES]; - u16 reg_cache[WM8731_CACHEREGNUM]; unsigned int sysclk; int sysclk_type; + int playback_fs; + bool deemph; }; @@ -65,16 +65,79 @@ static const u16 wm8731_reg[WM8731_CACHEREGNUM] = { #define wm8731_reset(c) snd_soc_write(c, WM8731_RESET, 0) static const char *wm8731_input_select[] = {"Line In", "Mic"}; -static const char *wm8731_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"}; -static const struct soc_enum wm8731_enum[] = { - SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select), - SOC_ENUM_SINGLE(WM8731_APDIGI, 1, 4, wm8731_deemph), -}; +static const struct soc_enum wm8731_insel_enum = + SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select); + +static int wm8731_deemph[] = { 0, 32000, 44100, 48000 }; + +static int wm8731_set_deemph(struct snd_soc_codec *codec) +{ + struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); + int val, i, best; + + /* If we're using deemphasis select the nearest available sample + * rate. + */ + if (wm8731->deemph) { + best = 1; + for (i = 2; i < ARRAY_SIZE(wm8731_deemph); i++) { + if (abs(wm8731_deemph[i] - wm8731->playback_fs) < + abs(wm8731_deemph[best] - wm8731->playback_fs)) + best = i; + } + + val = best << 1; + } else { + best = 0; + val = 0; + } + + dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n", + best, wm8731_deemph[best]); + + return snd_soc_update_bits(codec, WM8731_APDIGI, 0x6, val); +} + +static int wm8731_get_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = wm8731->deemph; + + return 0; +} + +static int wm8731_put_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); + int deemph = ucontrol->value.enumerated.item[0]; + int ret = 0; + + if (deemph > 1) + return -EINVAL; + + mutex_lock(&codec->mutex); + if (wm8731->deemph != deemph) { + wm8731->deemph = deemph; + + wm8731_set_deemph(codec); + + ret = 1; + } + mutex_unlock(&codec->mutex); + + return ret; +} static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0); static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); +static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 2000, 0); static const struct snd_kcontrol_new wm8731_snd_controls[] = { @@ -87,7 +150,7 @@ SOC_DOUBLE_R_TLV("Capture Volume", WM8731_LINVOL, WM8731_RINVOL, 0, 31, 0, in_tlv), SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1), -SOC_SINGLE("Mic Boost (+20dB)", WM8731_APANA, 0, 1, 0), +SOC_SINGLE_TLV("Mic Boost Volume", WM8731_APANA, 0, 1, 0, mic_tlv), SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1), SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1, @@ -96,7 +159,8 @@ SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1, SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1), SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0), -SOC_ENUM("Playback De-emphasis", wm8731_enum[1]), +SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0, + wm8731_get_deemph, wm8731_put_deemph), }; /* Output Mixer */ @@ -108,7 +172,7 @@ SOC_DAPM_SINGLE("HiFi Playback Switch", WM8731_APANA, 4, 1, 0), /* Input mux */ static const struct snd_kcontrol_new wm8731_input_mux_controls = -SOC_DAPM_ENUM("Input Select", wm8731_enum[0]); +SOC_DAPM_ENUM("Input Select", wm8731_insel_enum); static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0), @@ -165,10 +229,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int wm8731_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets, - ARRAY_SIZE(wm8731_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets, + ARRAY_SIZE(wm8731_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -239,6 +304,8 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream, u16 srate = (coeff_div[i].sr << 2) | (coeff_div[i].bosr << 1) | coeff_div[i].usb; + wm8731->playback_fs = params_rate(params); + snd_soc_write(codec, WM8731_SRATE, srate); /* bit size */ @@ -253,6 +320,8 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream, break; } + wm8731_set_deemph(codec); + snd_soc_write(codec, WM8731_IFACE, iface); return 0; } @@ -319,7 +388,7 @@ static int wm8731_set_dai_sysclk(struct snd_soc_dai *codec_dai, return -EINVAL; } - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(&codec->dapm); return 0; } @@ -399,7 +468,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); if (ret != 0) @@ -428,7 +497,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec, wm8731->supplies); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -542,7 +611,6 @@ err_regulator_enable: err_regulator_get: regulator_bulk_free(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); - kfree(wm8731); return ret; } diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c new file mode 100644 index 000000000000..30c67d06a904 --- /dev/null +++ b/sound/soc/codecs/wm8737.c @@ -0,0 +1,754 @@ +/* + * wm8737.c -- WM8737 ALSA SoC Audio driver + * + * Copyright 2010 Wolfson Microelectronics plc + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wm8737.h" + +#define WM8737_NUM_SUPPLIES 4 +static const char *wm8737_supply_names[WM8737_NUM_SUPPLIES] = { + "DCVDD", + "DBVDD", + "AVDD", + "MVDD", +}; + +/* codec private data */ +struct wm8737_priv { + enum snd_soc_control_type control_type; + struct regulator_bulk_data supplies[WM8737_NUM_SUPPLIES]; + unsigned int mclk; +}; + +static const u16 wm8737_reg[WM8737_REGISTER_COUNT] = { + 0x00C3, /* R0 - Left PGA volume */ + 0x00C3, /* R1 - Right PGA volume */ + 0x0007, /* R2 - AUDIO path L */ + 0x0007, /* R3 - AUDIO path R */ + 0x0000, /* R4 - 3D Enhance */ + 0x0000, /* R5 - ADC Control */ + 0x0000, /* R6 - Power Management */ + 0x000A, /* R7 - Audio Format */ + 0x0000, /* R8 - Clocking */ + 0x000F, /* R9 - MIC Preamp Control */ + 0x0003, /* R10 - Misc Bias Control */ + 0x0000, /* R11 - Noise Gate */ + 0x007C, /* R12 - ALC1 */ + 0x0000, /* R13 - ALC2 */ + 0x0032, /* R14 - ALC3 */ +}; + +static int wm8737_reset(struct snd_soc_codec *codec) +{ + return snd_soc_write(codec, WM8737_RESET, 0); +} + +static const unsigned int micboost_tlv[] = { + TLV_DB_RANGE_HEAD(4), + 0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0), + 1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0), + 2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0), + 3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0), +}; +static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1); +static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0); +static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0); +static const DECLARE_TLV_DB_SCALE(alc_max_tlv, -1200, 600, 0); +static const DECLARE_TLV_DB_SCALE(alc_target_tlv, -1800, 100, 0); + +static const char *micbias_enum_text[] = { + "25%", + "50%", + "75%", + "100%", +}; + +static const struct soc_enum micbias_enum = + SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 0, 4, micbias_enum_text); + +static const char *low_cutoff_text[] = { + "Low", "High" +}; + +static const struct soc_enum low_3d = + SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 6, 2, low_cutoff_text); + +static const char *high_cutoff_text[] = { + "High", "Low" +}; + +static const struct soc_enum high_3d = + SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 5, 2, high_cutoff_text); + +static const char *alc_fn_text[] = { + "Disabled", "Right", "Left", "Stereo" +}; + +static const struct soc_enum alc_fn = + SOC_ENUM_SINGLE(WM8737_ALC1, 7, 4, alc_fn_text); + +static const char *alc_hold_text[] = { + "0", "2.67ms", "5.33ms", "10.66ms", "21.32ms", "42.64ms", "85.28ms", + "170.56ms", "341.12ms", "682.24ms", "1.364s", "2.728s", "5.458s", + "10.916s", "21.832s", "43.691s" +}; + +static const struct soc_enum alc_hold = + SOC_ENUM_SINGLE(WM8737_ALC2, 0, 16, alc_hold_text); + +static const char *alc_atk_text[] = { + "8.4ms", "16.8ms", "33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms", + "1.075s", "2.15s", "4.3s", "8.6s" +}; + +static const struct soc_enum alc_atk = + SOC_ENUM_SINGLE(WM8737_ALC3, 0, 11, alc_atk_text); + +static const char *alc_dcy_text[] = { + "33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms", "1.075s", "2.15s", + "4.3s", "8.6s", "17.2s", "34.41s" +}; + +static const struct soc_enum alc_dcy = + SOC_ENUM_SINGLE(WM8737_ALC3, 4, 11, alc_dcy_text); + +static const struct snd_kcontrol_new wm8737_snd_controls[] = { +SOC_DOUBLE_R_TLV("Mic Boost Volume", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R, + 6, 3, 0, micboost_tlv), +SOC_DOUBLE_R("Mic Boost Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R, + 4, 1, 0), +SOC_DOUBLE("Mic ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R, + 3, 1, 0), + +SOC_DOUBLE_R_TLV("Capture Volume", WM8737_LEFT_PGA_VOLUME, + WM8737_RIGHT_PGA_VOLUME, 0, 255, 0, pga_tlv), +SOC_DOUBLE("Capture ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R, + 2, 1, 0), + +SOC_DOUBLE("INPUT1 DC Bias Switch", WM8737_MISC_BIAS_CONTROL, 0, 1, 1, 0), + +SOC_ENUM("Mic PGA Bias", micbias_enum), +SOC_SINGLE("ADC Low Power Switch", WM8737_ADC_CONTROL, 2, 1, 0), +SOC_SINGLE("High Pass Filter Switch", WM8737_ADC_CONTROL, 0, 1, 1), +SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0), + +SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0), +SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0), +SOC_ENUM("3D Low Cut-off", low_3d), +SOC_ENUM("3D High Cut-off", low_3d), +SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv), + +SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0), +SOC_SINGLE_TLV("Noise Gate Threshold Volume", WM8737_NOISE_GATE, 2, 7, 0, + ng_tlv), + +SOC_ENUM("ALC", alc_fn), +SOC_SINGLE_TLV("ALC Max Gain Volume", WM8737_ALC1, 4, 7, 0, alc_max_tlv), +SOC_SINGLE_TLV("ALC Target Volume", WM8737_ALC1, 0, 15, 0, alc_target_tlv), +SOC_ENUM("ALC Hold Time", alc_hold), +SOC_SINGLE("ALC ZC Switch", WM8737_ALC2, 4, 1, 0), +SOC_ENUM("ALC Attack Time", alc_atk), +SOC_ENUM("ALC Decay Time", alc_dcy), +}; + +static const char *linsel_text[] = { + "LINPUT1", "LINPUT2", "LINPUT3", "LINPUT1 DC", +}; + +static const struct soc_enum linsel_enum = + SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_L, 7, 4, linsel_text); + +static const struct snd_kcontrol_new linsel_mux = + SOC_DAPM_ENUM("LINSEL", linsel_enum); + + +static const char *rinsel_text[] = { + "RINPUT1", "RINPUT2", "RINPUT3", "RINPUT1 DC", +}; + +static const struct soc_enum rinsel_enum = + SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_R, 7, 4, rinsel_text); + +static const struct snd_kcontrol_new rinsel_mux = + SOC_DAPM_ENUM("RINSEL", rinsel_enum); + +static const char *bypass_text[] = { + "Direct", "Preamp" +}; + +static const struct soc_enum lbypass_enum = + SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 2, 2, bypass_text); + +static const struct snd_kcontrol_new lbypass_mux = + SOC_DAPM_ENUM("Left Bypass", lbypass_enum); + + +static const struct soc_enum rbypass_enum = + SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 3, 2, bypass_text); + +static const struct snd_kcontrol_new rbypass_mux = + SOC_DAPM_ENUM("Left Bypass", rbypass_enum); + +static const struct snd_soc_dapm_widget wm8737_dapm_widgets[] = { +SND_SOC_DAPM_INPUT("LINPUT1"), +SND_SOC_DAPM_INPUT("LINPUT2"), +SND_SOC_DAPM_INPUT("LINPUT3"), +SND_SOC_DAPM_INPUT("RINPUT1"), +SND_SOC_DAPM_INPUT("RINPUT2"), +SND_SOC_DAPM_INPUT("RINPUT3"), +SND_SOC_DAPM_INPUT("LACIN"), +SND_SOC_DAPM_INPUT("RACIN"), + +SND_SOC_DAPM_MUX("LINSEL", SND_SOC_NOPM, 0, 0, &linsel_mux), +SND_SOC_DAPM_MUX("RINSEL", SND_SOC_NOPM, 0, 0, &rinsel_mux), + +SND_SOC_DAPM_MUX("Left Preamp Mux", SND_SOC_NOPM, 0, 0, &lbypass_mux), +SND_SOC_DAPM_MUX("Right Preamp Mux", SND_SOC_NOPM, 0, 0, &rbypass_mux), + +SND_SOC_DAPM_PGA("PGAL", WM8737_POWER_MANAGEMENT, 5, 0, NULL, 0), +SND_SOC_DAPM_PGA("PGAR", WM8737_POWER_MANAGEMENT, 4, 0, NULL, 0), + +SND_SOC_DAPM_DAC("ADCL", NULL, WM8737_POWER_MANAGEMENT, 3, 0), +SND_SOC_DAPM_DAC("ADCR", NULL, WM8737_POWER_MANAGEMENT, 2, 0), + +SND_SOC_DAPM_AIF_OUT("AIF", "Capture", 0, WM8737_POWER_MANAGEMENT, 6, 0), +}; + +static const struct snd_soc_dapm_route intercon[] = { + { "LINSEL", "LINPUT1", "LINPUT1" }, + { "LINSEL", "LINPUT2", "LINPUT2" }, + { "LINSEL", "LINPUT3", "LINPUT3" }, + { "LINSEL", "LINPUT1 DC", "LINPUT1" }, + + { "RINSEL", "RINPUT1", "RINPUT1" }, + { "RINSEL", "RINPUT2", "RINPUT2" }, + { "RINSEL", "RINPUT3", "RINPUT3" }, + { "RINSEL", "RINPUT1 DC", "RINPUT1" }, + + { "Left Preamp Mux", "Preamp", "LINSEL" }, + { "Left Preamp Mux", "Direct", "LACIN" }, + + { "Right Preamp Mux", "Preamp", "RINSEL" }, + { "Right Preamp Mux", "Direct", "RACIN" }, + + { "PGAL", NULL, "Left Preamp Mux" }, + { "PGAR", NULL, "Right Preamp Mux" }, + + { "ADCL", NULL, "PGAL" }, + { "ADCR", NULL, "PGAR" }, + + { "AIF", NULL, "ADCL" }, + { "AIF", NULL, "ADCR" }, +}; + +static int wm8737_add_widgets(struct snd_soc_codec *codec) +{ + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_new_controls(dapm, wm8737_dapm_widgets, + ARRAY_SIZE(wm8737_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); + + return 0; +} + +/* codec mclk clock divider coefficients */ +static const struct { + u32 mclk; + u32 rate; + u8 usb; + u8 sr; +} coeff_div[] = { + { 12288000, 8000, 0, 0x4 }, + { 12288000, 12000, 0, 0x8 }, + { 12288000, 16000, 0, 0xa }, + { 12288000, 24000, 0, 0x1c }, + { 12288000, 32000, 0, 0xc }, + { 12288000, 48000, 0, 0 }, + { 12288000, 96000, 0, 0xe }, + + { 11289600, 8000, 0, 0x14 }, + { 11289600, 11025, 0, 0x18 }, + { 11289600, 22050, 0, 0x1a }, + { 11289600, 44100, 0, 0x10 }, + { 11289600, 88200, 0, 0x1e }, + + { 18432000, 8000, 0, 0x5 }, + { 18432000, 12000, 0, 0x9 }, + { 18432000, 16000, 0, 0xb }, + { 18432000, 24000, 0, 0x1b }, + { 18432000, 32000, 0, 0xd }, + { 18432000, 48000, 0, 0x1 }, + { 18432000, 96000, 0, 0x1f }, + + { 16934400, 8000, 0, 0x15 }, + { 16934400, 11025, 0, 0x19 }, + { 16934400, 22050, 0, 0x1b }, + { 16934400, 44100, 0, 0x11 }, + { 16934400, 88200, 0, 0x1f }, + + { 12000000, 8000, 1, 0x4 }, + { 12000000, 11025, 1, 0x19 }, + { 12000000, 12000, 1, 0x8 }, + { 12000000, 16000, 1, 0xa }, + { 12000000, 22050, 1, 0x1b }, + { 12000000, 24000, 1, 0x1c }, + { 12000000, 32000, 1, 0xc }, + { 12000000, 44100, 1, 0x11 }, + { 12000000, 48000, 1, 0x0 }, + { 12000000, 88200, 1, 0x1f }, + { 12000000, 96000, 1, 0xe }, +}; + +static int wm8737_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_codec *codec = rtd->codec; + struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec); + int i; + u16 clocking = 0; + u16 af = 0; + + for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { + if (coeff_div[i].rate != params_rate(params)) + continue; + + if (coeff_div[i].mclk == wm8737->mclk) + break; + + if (coeff_div[i].mclk == wm8737->mclk * 2) { + clocking |= WM8737_CLKDIV2; + break; + } + } + + if (i == ARRAY_SIZE(coeff_div)) { + dev_err(codec->dev, "%dHz MCLK can't support %dHz\n", + wm8737->mclk, params_rate(params)); + return -EINVAL; + } + + clocking |= coeff_div[i].usb | (coeff_div[i].sr << WM8737_SR_SHIFT); + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + break; + case SNDRV_PCM_FORMAT_S20_3LE: + af |= 0x8; + break; + case SNDRV_PCM_FORMAT_S24_LE: + af |= 0x10; + break; + case SNDRV_PCM_FORMAT_S32_LE: + af |= 0x18; + break; + default: + return -EINVAL; + } + + snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT, WM8737_WL_MASK, af); + snd_soc_update_bits(codec, WM8737_CLOCKING, + WM8737_USB_MODE | WM8737_CLKDIV2 | WM8737_SR_MASK, + clocking); + + return 0; +} + +static int wm8737_set_dai_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_codec *codec = codec_dai->codec; + struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec); + int i; + + for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { + if (freq == coeff_div[i].mclk || + freq == coeff_div[i].mclk * 2) { + wm8737->mclk = freq; + return 0; + } + } + + dev_err(codec->dev, "MCLK rate %dHz not supported\n", freq); + + return -EINVAL; +} + + +static int wm8737_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_codec *codec = codec_dai->codec; + u16 af = 0; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + af |= WM8737_MS; + break; + case SND_SOC_DAIFMT_CBS_CFS: + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + af |= 0x2; + break; + case SND_SOC_DAIFMT_RIGHT_J: + break; + case SND_SOC_DAIFMT_LEFT_J: + af |= 0x1; + break; + case SND_SOC_DAIFMT_DSP_A: + af |= 0x3; + break; + case SND_SOC_DAIFMT_DSP_B: + af |= 0x13; + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_NB_IF: + af |= WM8737_LRP; + break; + default: + return -EINVAL; + } + + snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT, + WM8737_FORMAT_MASK | WM8737_LRP | WM8737_MS, af); + + return 0; +} + +static int wm8737_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec); + int ret; + + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + /* VMID at 2*75k */ + snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL, + WM8737_VMIDSEL_MASK, 0); + break; + + case SND_SOC_BIAS_STANDBY: + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { + ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies), + wm8737->supplies); + if (ret != 0) { + dev_err(codec->dev, + "Failed to enable supplies: %d\n", + ret); + return ret; + } + + snd_soc_cache_sync(codec); + + /* Fast VMID ramp at 2*2.5k */ + snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL, + WM8737_VMIDSEL_MASK, 0x4); + + /* Bring VMID up */ + snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT, + WM8737_VMID_MASK | + WM8737_VREF_MASK, + WM8737_VMID_MASK | + WM8737_VREF_MASK); + + msleep(500); + } + + /* VMID at 2*300k */ + snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL, + WM8737_VMIDSEL_MASK, 2); + + break; + + case SND_SOC_BIAS_OFF: + snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT, + WM8737_VMID_MASK | WM8737_VREF_MASK, 0); + + regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), + wm8737->supplies); + break; + } + + codec->dapm.bias_level = level; + return 0; +} + +#define WM8737_RATES SNDRV_PCM_RATE_8000_96000 + +#define WM8737_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ + SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_ops wm8737_dai_ops = { + .hw_params = wm8737_hw_params, + .set_sysclk = wm8737_set_dai_sysclk, + .set_fmt = wm8737_set_dai_fmt, +}; + +static struct snd_soc_dai_driver wm8737_dai = { + .name = "wm8737", + .capture = { + .stream_name = "Capture", + .channels_min = 2, /* Mono modes not yet supported */ + .channels_max = 2, + .rates = WM8737_RATES, + .formats = WM8737_FORMATS, + }, + .ops = &wm8737_dai_ops, +}; + +#ifdef CONFIG_PM +static int wm8737_suspend(struct snd_soc_codec *codec, pm_message_t state) +{ + wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF); + return 0; +} + +static int wm8737_resume(struct snd_soc_codec *codec) +{ + wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + return 0; +} +#else +#define wm8737_suspend NULL +#define wm8737_resume NULL +#endif + +static int wm8737_probe(struct snd_soc_codec *codec) +{ + struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec); + int ret, i; + + ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8737->control_type); + if (ret != 0) { + dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(wm8737->supplies); i++) + wm8737->supplies[i].supply = wm8737_supply_names[i]; + + ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8737->supplies), + wm8737->supplies); + if (ret != 0) { + dev_err(codec->dev, "Failed to request supplies: %d\n", ret); + return ret; + } + + ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies), + wm8737->supplies); + if (ret != 0) { + dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); + goto err_get; + } + + ret = wm8737_reset(codec); + if (ret < 0) { + dev_err(codec->dev, "Failed to issue reset\n"); + goto err_enable; + } + + snd_soc_update_bits(codec, WM8737_LEFT_PGA_VOLUME, WM8737_LVU, + WM8737_LVU); + snd_soc_update_bits(codec, WM8737_RIGHT_PGA_VOLUME, WM8737_RVU, + WM8737_RVU); + + wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + + /* Bias level configuration will have done an extra enable */ + regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies); + + snd_soc_add_controls(codec, wm8737_snd_controls, + ARRAY_SIZE(wm8737_snd_controls)); + wm8737_add_widgets(codec); + + return 0; + +err_enable: + regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies); +err_get: + regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies); + + return ret; +} + +static int wm8737_remove(struct snd_soc_codec *codec) +{ + struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec); + + wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF); + regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies); + return 0; +} + +static struct snd_soc_codec_driver soc_codec_dev_wm8737 = { + .probe = wm8737_probe, + .remove = wm8737_remove, + .suspend = wm8737_suspend, + .resume = wm8737_resume, + .set_bias_level = wm8737_set_bias_level, + + .reg_cache_size = WM8737_REGISTER_COUNT - 1, /* Skip reset */ + .reg_word_size = sizeof(u16), + .reg_cache_default = wm8737_reg, +}; + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +static __devinit int wm8737_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct wm8737_priv *wm8737; + int ret; + + wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL); + if (wm8737 == NULL) + return -ENOMEM; + + i2c_set_clientdata(i2c, wm8737); + wm8737->control_type = SND_SOC_I2C; + + ret = snd_soc_register_codec(&i2c->dev, + &soc_codec_dev_wm8737, &wm8737_dai, 1); + if (ret < 0) + kfree(wm8737); + return ret; + +} + +static __devexit int wm8737_i2c_remove(struct i2c_client *client) +{ + snd_soc_unregister_codec(&client->dev); + kfree(i2c_get_clientdata(client)); + return 0; +} + +static const struct i2c_device_id wm8737_i2c_id[] = { + { "wm8737", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, wm8737_i2c_id); + +static struct i2c_driver wm8737_i2c_driver = { + .driver = { + .name = "wm8737", + .owner = THIS_MODULE, + }, + .probe = wm8737_i2c_probe, + .remove = __devexit_p(wm8737_i2c_remove), + .id_table = wm8737_i2c_id, +}; +#endif + +#if defined(CONFIG_SPI_MASTER) +static int __devinit wm8737_spi_probe(struct spi_device *spi) +{ + struct wm8737_priv *wm8737; + int ret; + + wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL); + if (wm8737 == NULL) + return -ENOMEM; + + wm8737->control_type = SND_SOC_SPI; + spi_set_drvdata(spi, wm8737); + + ret = snd_soc_register_codec(&spi->dev, + &soc_codec_dev_wm8737, &wm8737_dai, 1); + if (ret < 0) + kfree(wm8737); + return ret; +} + +static int __devexit wm8737_spi_remove(struct spi_device *spi) +{ + snd_soc_unregister_codec(&spi->dev); + kfree(spi_get_drvdata(spi)); + return 0; +} + +static struct spi_driver wm8737_spi_driver = { + .driver = { + .name = "wm8737", + .owner = THIS_MODULE, + }, + .probe = wm8737_spi_probe, + .remove = __devexit_p(wm8737_spi_remove), +}; +#endif /* CONFIG_SPI_MASTER */ + +static int __init wm8737_modinit(void) +{ + int ret; +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + ret = i2c_add_driver(&wm8737_i2c_driver); + if (ret != 0) { + printk(KERN_ERR "Failed to register WM8737 I2C driver: %d\n", + ret); + } +#endif +#if defined(CONFIG_SPI_MASTER) + ret = spi_register_driver(&wm8737_spi_driver); + if (ret != 0) { + printk(KERN_ERR "Failed to register WM8737 SPI driver: %d\n", + ret); + } +#endif + return 0; +} +module_init(wm8737_modinit); + +static void __exit wm8737_exit(void) +{ +#if defined(CONFIG_SPI_MASTER) + spi_unregister_driver(&wm8737_spi_driver); +#endif +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + i2c_del_driver(&wm8737_i2c_driver); +#endif +} +module_exit(wm8737_exit); + +MODULE_DESCRIPTION("ASoC WM8737 driver"); +MODULE_AUTHOR("Mark Brown "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/wm8737.h b/sound/soc/codecs/wm8737.h new file mode 100644 index 000000000000..23d14c8ff6e7 --- /dev/null +++ b/sound/soc/codecs/wm8737.h @@ -0,0 +1,322 @@ +#ifndef _WM8737_H +#define _WM8737_H + +/* + * wm8737.c -- WM8523 ALSA SoC Audio driver + * + * Copyright 2010 Wolfson Microelectronics plc + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Register values. + */ +#define WM8737_LEFT_PGA_VOLUME 0x00 +#define WM8737_RIGHT_PGA_VOLUME 0x01 +#define WM8737_AUDIO_PATH_L 0x02 +#define WM8737_AUDIO_PATH_R 0x03 +#define WM8737_3D_ENHANCE 0x04 +#define WM8737_ADC_CONTROL 0x05 +#define WM8737_POWER_MANAGEMENT 0x06 +#define WM8737_AUDIO_FORMAT 0x07 +#define WM8737_CLOCKING 0x08 +#define WM8737_MIC_PREAMP_CONTROL 0x09 +#define WM8737_MISC_BIAS_CONTROL 0x0A +#define WM8737_NOISE_GATE 0x0B +#define WM8737_ALC1 0x0C +#define WM8737_ALC2 0x0D +#define WM8737_ALC3 0x0E +#define WM8737_RESET 0x0F + +#define WM8737_REGISTER_COUNT 16 +#define WM8737_MAX_REGISTER 0x0F + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Left PGA volume + */ +#define WM8737_LVU 0x0100 /* LVU */ +#define WM8737_LVU_MASK 0x0100 /* LVU */ +#define WM8737_LVU_SHIFT 8 /* LVU */ +#define WM8737_LVU_WIDTH 1 /* LVU */ +#define WM8737_LINVOL_MASK 0x00FF /* LINVOL - [7:0] */ +#define WM8737_LINVOL_SHIFT 0 /* LINVOL - [7:0] */ +#define WM8737_LINVOL_WIDTH 8 /* LINVOL - [7:0] */ + +/* + * R1 (0x01) - Right PGA volume + */ +#define WM8737_RVU 0x0100 /* RVU */ +#define WM8737_RVU_MASK 0x0100 /* RVU */ +#define WM8737_RVU_SHIFT 8 /* RVU */ +#define WM8737_RVU_WIDTH 1 /* RVU */ +#define WM8737_RINVOL_MASK 0x00FF /* RINVOL - [7:0] */ +#define WM8737_RINVOL_SHIFT 0 /* RINVOL - [7:0] */ +#define WM8737_RINVOL_WIDTH 8 /* RINVOL - [7:0] */ + +/* + * R2 (0x02) - AUDIO path L + */ +#define WM8737_LINSEL_MASK 0x0180 /* LINSEL - [8:7] */ +#define WM8737_LINSEL_SHIFT 7 /* LINSEL - [8:7] */ +#define WM8737_LINSEL_WIDTH 2 /* LINSEL - [8:7] */ +#define WM8737_LMICBOOST_MASK 0x0060 /* LMICBOOST - [6:5] */ +#define WM8737_LMICBOOST_SHIFT 5 /* LMICBOOST - [6:5] */ +#define WM8737_LMICBOOST_WIDTH 2 /* LMICBOOST - [6:5] */ +#define WM8737_LMBE 0x0010 /* LMBE */ +#define WM8737_LMBE_MASK 0x0010 /* LMBE */ +#define WM8737_LMBE_SHIFT 4 /* LMBE */ +#define WM8737_LMBE_WIDTH 1 /* LMBE */ +#define WM8737_LMZC 0x0008 /* LMZC */ +#define WM8737_LMZC_MASK 0x0008 /* LMZC */ +#define WM8737_LMZC_SHIFT 3 /* LMZC */ +#define WM8737_LMZC_WIDTH 1 /* LMZC */ +#define WM8737_LPZC 0x0004 /* LPZC */ +#define WM8737_LPZC_MASK 0x0004 /* LPZC */ +#define WM8737_LPZC_SHIFT 2 /* LPZC */ +#define WM8737_LPZC_WIDTH 1 /* LPZC */ +#define WM8737_LZCTO_MASK 0x0003 /* LZCTO - [1:0] */ +#define WM8737_LZCTO_SHIFT 0 /* LZCTO - [1:0] */ +#define WM8737_LZCTO_WIDTH 2 /* LZCTO - [1:0] */ + +/* + * R3 (0x03) - AUDIO path R + */ +#define WM8737_RINSEL_MASK 0x0180 /* RINSEL - [8:7] */ +#define WM8737_RINSEL_SHIFT 7 /* RINSEL - [8:7] */ +#define WM8737_RINSEL_WIDTH 2 /* RINSEL - [8:7] */ +#define WM8737_RMICBOOST_MASK 0x0060 /* RMICBOOST - [6:5] */ +#define WM8737_RMICBOOST_SHIFT 5 /* RMICBOOST - [6:5] */ +#define WM8737_RMICBOOST_WIDTH 2 /* RMICBOOST - [6:5] */ +#define WM8737_RMBE 0x0010 /* RMBE */ +#define WM8737_RMBE_MASK 0x0010 /* RMBE */ +#define WM8737_RMBE_SHIFT 4 /* RMBE */ +#define WM8737_RMBE_WIDTH 1 /* RMBE */ +#define WM8737_RMZC 0x0008 /* RMZC */ +#define WM8737_RMZC_MASK 0x0008 /* RMZC */ +#define WM8737_RMZC_SHIFT 3 /* RMZC */ +#define WM8737_RMZC_WIDTH 1 /* RMZC */ +#define WM8737_RPZC 0x0004 /* RPZC */ +#define WM8737_RPZC_MASK 0x0004 /* RPZC */ +#define WM8737_RPZC_SHIFT 2 /* RPZC */ +#define WM8737_RPZC_WIDTH 1 /* RPZC */ +#define WM8737_RZCTO_MASK 0x0003 /* RZCTO - [1:0] */ +#define WM8737_RZCTO_SHIFT 0 /* RZCTO - [1:0] */ +#define WM8737_RZCTO_WIDTH 2 /* RZCTO - [1:0] */ + +/* + * R4 (0x04) - 3D Enhance + */ +#define WM8737_DIV2 0x0080 /* DIV2 */ +#define WM8737_DIV2_MASK 0x0080 /* DIV2 */ +#define WM8737_DIV2_SHIFT 7 /* DIV2 */ +#define WM8737_DIV2_WIDTH 1 /* DIV2 */ +#define WM8737_3DLC 0x0040 /* 3DLC */ +#define WM8737_3DLC_MASK 0x0040 /* 3DLC */ +#define WM8737_3DLC_SHIFT 6 /* 3DLC */ +#define WM8737_3DLC_WIDTH 1 /* 3DLC */ +#define WM8737_3DUC 0x0020 /* 3DUC */ +#define WM8737_3DUC_MASK 0x0020 /* 3DUC */ +#define WM8737_3DUC_SHIFT 5 /* 3DUC */ +#define WM8737_3DUC_WIDTH 1 /* 3DUC */ +#define WM8737_3DDEPTH_MASK 0x001E /* 3DDEPTH - [4:1] */ +#define WM8737_3DDEPTH_SHIFT 1 /* 3DDEPTH - [4:1] */ +#define WM8737_3DDEPTH_WIDTH 4 /* 3DDEPTH - [4:1] */ +#define WM8737_3DE 0x0001 /* 3DE */ +#define WM8737_3DE_MASK 0x0001 /* 3DE */ +#define WM8737_3DE_SHIFT 0 /* 3DE */ +#define WM8737_3DE_WIDTH 1 /* 3DE */ + +/* + * R5 (0x05) - ADC Control + */ +#define WM8737_MONOMIX_MASK 0x0180 /* MONOMIX - [8:7] */ +#define WM8737_MONOMIX_SHIFT 7 /* MONOMIX - [8:7] */ +#define WM8737_MONOMIX_WIDTH 2 /* MONOMIX - [8:7] */ +#define WM8737_POLARITY_MASK 0x0060 /* POLARITY - [6:5] */ +#define WM8737_POLARITY_SHIFT 5 /* POLARITY - [6:5] */ +#define WM8737_POLARITY_WIDTH 2 /* POLARITY - [6:5] */ +#define WM8737_HPOR 0x0010 /* HPOR */ +#define WM8737_HPOR_MASK 0x0010 /* HPOR */ +#define WM8737_HPOR_SHIFT 4 /* HPOR */ +#define WM8737_HPOR_WIDTH 1 /* HPOR */ +#define WM8737_LP 0x0004 /* LP */ +#define WM8737_LP_MASK 0x0004 /* LP */ +#define WM8737_LP_SHIFT 2 /* LP */ +#define WM8737_LP_WIDTH 1 /* LP */ +#define WM8737_MONOUT 0x0002 /* MONOUT */ +#define WM8737_MONOUT_MASK 0x0002 /* MONOUT */ +#define WM8737_MONOUT_SHIFT 1 /* MONOUT */ +#define WM8737_MONOUT_WIDTH 1 /* MONOUT */ +#define WM8737_ADCHPD 0x0001 /* ADCHPD */ +#define WM8737_ADCHPD_MASK 0x0001 /* ADCHPD */ +#define WM8737_ADCHPD_SHIFT 0 /* ADCHPD */ +#define WM8737_ADCHPD_WIDTH 1 /* ADCHPD */ + +/* + * R6 (0x06) - Power Management + */ +#define WM8737_VMID 0x0100 /* VMID */ +#define WM8737_VMID_MASK 0x0100 /* VMID */ +#define WM8737_VMID_SHIFT 8 /* VMID */ +#define WM8737_VMID_WIDTH 1 /* VMID */ +#define WM8737_VREF 0x0080 /* VREF */ +#define WM8737_VREF_MASK 0x0080 /* VREF */ +#define WM8737_VREF_SHIFT 7 /* VREF */ +#define WM8737_VREF_WIDTH 1 /* VREF */ +#define WM8737_AI 0x0040 /* AI */ +#define WM8737_AI_MASK 0x0040 /* AI */ +#define WM8737_AI_SHIFT 6 /* AI */ +#define WM8737_AI_WIDTH 1 /* AI */ +#define WM8737_PGL 0x0020 /* PGL */ +#define WM8737_PGL_MASK 0x0020 /* PGL */ +#define WM8737_PGL_SHIFT 5 /* PGL */ +#define WM8737_PGL_WIDTH 1 /* PGL */ +#define WM8737_PGR 0x0010 /* PGR */ +#define WM8737_PGR_MASK 0x0010 /* PGR */ +#define WM8737_PGR_SHIFT 4 /* PGR */ +#define WM8737_PGR_WIDTH 1 /* PGR */ +#define WM8737_ADL 0x0008 /* ADL */ +#define WM8737_ADL_MASK 0x0008 /* ADL */ +#define WM8737_ADL_SHIFT 3 /* ADL */ +#define WM8737_ADL_WIDTH 1 /* ADL */ +#define WM8737_ADR 0x0004 /* ADR */ +#define WM8737_ADR_MASK 0x0004 /* ADR */ +#define WM8737_ADR_SHIFT 2 /* ADR */ +#define WM8737_ADR_WIDTH 1 /* ADR */ +#define WM8737_MICBIAS_MASK 0x0003 /* MICBIAS - [1:0] */ +#define WM8737_MICBIAS_SHIFT 0 /* MICBIAS - [1:0] */ +#define WM8737_MICBIAS_WIDTH 2 /* MICBIAS - [1:0] */ + +/* + * R7 (0x07) - Audio Format + */ +#define WM8737_SDODIS 0x0080 /* SDODIS */ +#define WM8737_SDODIS_MASK 0x0080 /* SDODIS */ +#define WM8737_SDODIS_SHIFT 7 /* SDODIS */ +#define WM8737_SDODIS_WIDTH 1 /* SDODIS */ +#define WM8737_MS 0x0040 /* MS */ +#define WM8737_MS_MASK 0x0040 /* MS */ +#define WM8737_MS_SHIFT 6 /* MS */ +#define WM8737_MS_WIDTH 1 /* MS */ +#define WM8737_LRP 0x0010 /* LRP */ +#define WM8737_LRP_MASK 0x0010 /* LRP */ +#define WM8737_LRP_SHIFT 4 /* LRP */ +#define WM8737_LRP_WIDTH 1 /* LRP */ +#define WM8737_WL_MASK 0x000C /* WL - [3:2] */ +#define WM8737_WL_SHIFT 2 /* WL - [3:2] */ +#define WM8737_WL_WIDTH 2 /* WL - [3:2] */ +#define WM8737_FORMAT_MASK 0x0003 /* FORMAT - [1:0] */ +#define WM8737_FORMAT_SHIFT 0 /* FORMAT - [1:0] */ +#define WM8737_FORMAT_WIDTH 2 /* FORMAT - [1:0] */ + +/* + * R8 (0x08) - Clocking + */ +#define WM8737_AUTODETECT 0x0080 /* AUTODETECT */ +#define WM8737_AUTODETECT_MASK 0x0080 /* AUTODETECT */ +#define WM8737_AUTODETECT_SHIFT 7 /* AUTODETECT */ +#define WM8737_AUTODETECT_WIDTH 1 /* AUTODETECT */ +#define WM8737_CLKDIV2 0x0040 /* CLKDIV2 */ +#define WM8737_CLKDIV2_MASK 0x0040 /* CLKDIV2 */ +#define WM8737_CLKDIV2_SHIFT 6 /* CLKDIV2 */ +#define WM8737_CLKDIV2_WIDTH 1 /* CLKDIV2 */ +#define WM8737_SR_MASK 0x003E /* SR - [5:1] */ +#define WM8737_SR_SHIFT 1 /* SR - [5:1] */ +#define WM8737_SR_WIDTH 5 /* SR - [5:1] */ +#define WM8737_USB_MODE 0x0001 /* USB MODE */ +#define WM8737_USB_MODE_MASK 0x0001 /* USB MODE */ +#define WM8737_USB_MODE_SHIFT 0 /* USB MODE */ +#define WM8737_USB_MODE_WIDTH 1 /* USB MODE */ + +/* + * R9 (0x09) - MIC Preamp Control + */ +#define WM8737_RBYPEN 0x0008 /* RBYPEN */ +#define WM8737_RBYPEN_MASK 0x0008 /* RBYPEN */ +#define WM8737_RBYPEN_SHIFT 3 /* RBYPEN */ +#define WM8737_RBYPEN_WIDTH 1 /* RBYPEN */ +#define WM8737_LBYPEN 0x0004 /* LBYPEN */ +#define WM8737_LBYPEN_MASK 0x0004 /* LBYPEN */ +#define WM8737_LBYPEN_SHIFT 2 /* LBYPEN */ +#define WM8737_LBYPEN_WIDTH 1 /* LBYPEN */ +#define WM8737_MBCTRL_MASK 0x0003 /* MBCTRL - [1:0] */ +#define WM8737_MBCTRL_SHIFT 0 /* MBCTRL - [1:0] */ +#define WM8737_MBCTRL_WIDTH 2 /* MBCTRL - [1:0] */ + +/* + * R10 (0x0A) - Misc Bias Control + */ +#define WM8737_VMIDSEL_MASK 0x000C /* VMIDSEL - [3:2] */ +#define WM8737_VMIDSEL_SHIFT 2 /* VMIDSEL - [3:2] */ +#define WM8737_VMIDSEL_WIDTH 2 /* VMIDSEL - [3:2] */ +#define WM8737_LINPUT1_DC_BIAS_ENABLE 0x0002 /* LINPUT1 DC BIAS ENABLE */ +#define WM8737_LINPUT1_DC_BIAS_ENABLE_MASK 0x0002 /* LINPUT1 DC BIAS ENABLE */ +#define WM8737_LINPUT1_DC_BIAS_ENABLE_SHIFT 1 /* LINPUT1 DC BIAS ENABLE */ +#define WM8737_LINPUT1_DC_BIAS_ENABLE_WIDTH 1 /* LINPUT1 DC BIAS ENABLE */ +#define WM8737_RINPUT1_DC_BIAS_ENABLE 0x0001 /* RINPUT1 DC BIAS ENABLE */ +#define WM8737_RINPUT1_DC_BIAS_ENABLE_MASK 0x0001 /* RINPUT1 DC BIAS ENABLE */ +#define WM8737_RINPUT1_DC_BIAS_ENABLE_SHIFT 0 /* RINPUT1 DC BIAS ENABLE */ +#define WM8737_RINPUT1_DC_BIAS_ENABLE_WIDTH 1 /* RINPUT1 DC BIAS ENABLE */ + +/* + * R11 (0x0B) - Noise Gate + */ +#define WM8737_NGTH_MASK 0x001C /* NGTH - [4:2] */ +#define WM8737_NGTH_SHIFT 2 /* NGTH - [4:2] */ +#define WM8737_NGTH_WIDTH 3 /* NGTH - [4:2] */ +#define WM8737_NGAT 0x0001 /* NGAT */ +#define WM8737_NGAT_MASK 0x0001 /* NGAT */ +#define WM8737_NGAT_SHIFT 0 /* NGAT */ +#define WM8737_NGAT_WIDTH 1 /* NGAT */ + +/* + * R12 (0x0C) - ALC1 + */ +#define WM8737_ALCSEL_MASK 0x0180 /* ALCSEL - [8:7] */ +#define WM8737_ALCSEL_SHIFT 7 /* ALCSEL - [8:7] */ +#define WM8737_ALCSEL_WIDTH 2 /* ALCSEL - [8:7] */ +#define WM8737_MAX_GAIN_MASK 0x0070 /* MAX GAIN - [6:4] */ +#define WM8737_MAX_GAIN_SHIFT 4 /* MAX GAIN - [6:4] */ +#define WM8737_MAX_GAIN_WIDTH 3 /* MAX GAIN - [6:4] */ +#define WM8737_ALCL_MASK 0x000F /* ALCL - [3:0] */ +#define WM8737_ALCL_SHIFT 0 /* ALCL - [3:0] */ +#define WM8737_ALCL_WIDTH 4 /* ALCL - [3:0] */ + +/* + * R13 (0x0D) - ALC2 + */ +#define WM8737_ALCZCE 0x0010 /* ALCZCE */ +#define WM8737_ALCZCE_MASK 0x0010 /* ALCZCE */ +#define WM8737_ALCZCE_SHIFT 4 /* ALCZCE */ +#define WM8737_ALCZCE_WIDTH 1 /* ALCZCE */ +#define WM8737_HLD_MASK 0x000F /* HLD - [3:0] */ +#define WM8737_HLD_SHIFT 0 /* HLD - [3:0] */ +#define WM8737_HLD_WIDTH 4 /* HLD - [3:0] */ + +/* + * R14 (0x0E) - ALC3 + */ +#define WM8737_DCY_MASK 0x00F0 /* DCY - [7:4] */ +#define WM8737_DCY_SHIFT 4 /* DCY - [7:4] */ +#define WM8737_DCY_WIDTH 4 /* DCY - [7:4] */ +#define WM8737_ATK_MASK 0x000F /* ATK - [3:0] */ +#define WM8737_ATK_SHIFT 0 /* ATK - [3:0] */ +#define WM8737_ATK_WIDTH 4 /* ATK - [3:0] */ + +/* + * R15 (0x0F) - Reset + */ +#define WM8737_RESET_MASK 0x01FF /* RESET - [8:0] */ +#define WM8737_RESET_SHIFT 0 /* RESET - [8:0] */ +#define WM8737_RESET_WIDTH 9 /* RESET - [8:0] */ + +#endif diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c index aea60ef8aba7..494f2d31d75b 100644 --- a/sound/soc/codecs/wm8741.c +++ b/sound/soc/codecs/wm8741.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -94,10 +93,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int wm8741_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8741_dapm_widgets, - ARRAY_SIZE(wm8741_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, wm8741_dapm_widgets, + ARRAY_SIZE(wm8741_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -455,7 +455,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8741 = { .resume = wm8741_resume, .reg_cache_size = ARRAY_SIZE(wm8741_reg_defaults), .reg_word_size = sizeof(u16), - .reg_cache_default = &wm8741_reg_defaults, + .reg_cache_default = wm8741_reg_defaults, }; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c index 6c924cd2cfd4..38f38fddd190 100644 --- a/sound/soc/codecs/wm8750.c +++ b/sound/soc/codecs/wm8750.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include "wm8750.h" @@ -53,7 +52,6 @@ static const u16 wm8750_reg[] = { struct wm8750_priv { unsigned int sysclk; enum snd_soc_control_type control_type; - u16 reg_cache[ARRAY_SIZE(wm8750_reg)]; }; #define wm8750_reset(c) snd_soc_write(c, WM8750_RESET, 0) @@ -399,10 +397,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8750_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets, - ARRAY_SIZE(wm8750_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets, + ARRAY_SIZE(wm8750_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -615,7 +614,7 @@ static int wm8750_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Set VMID to 5k */ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1); @@ -630,7 +629,7 @@ static int wm8750_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, WM8750_PWR1, 0x0001); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index 87caae59e939..79b02ae125c5 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include @@ -623,10 +622,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8753_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets, - ARRAY_SIZE(wm8753_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets, + ARRAY_SIZE(wm8753_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -1245,7 +1245,7 @@ static int wm8753_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, WM8753_PWR1, 0x0001); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1435,9 +1435,11 @@ static void wm8753_set_dai_mode(struct snd_soc_codec *codec, static void wm8753_work(struct work_struct *work) { - struct snd_soc_codec *codec = - container_of(work, struct snd_soc_codec, delayed_work.work); - wm8753_set_bias_level(codec, codec->bias_level); + struct snd_soc_dapm_context *dapm = + container_of(work, struct snd_soc_dapm_context, + delayed_work.work); + struct snd_soc_codec *codec = dapm->codec; + wm8753_set_bias_level(codec, dapm->bias_level); } static int wm8753_suspend(struct snd_soc_codec *codec, pm_message_t state) @@ -1466,41 +1468,22 @@ static int wm8753_resume(struct snd_soc_codec *codec) wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* charge wm8753 caps */ - if (codec->suspend_bias_level == SND_SOC_BIAS_ON) { + if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) { wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE); - codec->bias_level = SND_SOC_BIAS_ON; - schedule_delayed_work(&codec->delayed_work, + codec->dapm.bias_level = SND_SOC_BIAS_ON; + schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(caps_charge)); } return 0; } -/* - * This function forces any delayed work to be queued and run. - */ -static int run_delayed_work(struct delayed_work *dwork) -{ - int ret; - - /* cancel any work waiting to be queued. */ - ret = cancel_delayed_work(dwork); - - /* if there was any work waiting then we run it now and - * wait for it's completion */ - if (ret) { - schedule_delayed_work(dwork, 0); - flush_scheduled_work(); - } - return ret; -} - static int wm8753_probe(struct snd_soc_codec *codec) { struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); int ret; - INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work); + INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8753_work); ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8753->control_type); if (ret < 0) { @@ -1519,7 +1502,7 @@ static int wm8753_probe(struct snd_soc_codec *codec) /* charge output caps */ wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE); - schedule_delayed_work(&codec->delayed_work, + schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(caps_charge)); /* set the update bits */ @@ -1544,7 +1527,7 @@ static int wm8753_probe(struct snd_soc_codec *codec) /* power down chip */ static int wm8753_remove(struct snd_soc_codec *codec) { - run_delayed_work(&codec->delayed_work); + flush_delayed_work_sync(&codec->dapm.delayed_work); wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c new file mode 100644 index 000000000000..19b92baa9e8c --- /dev/null +++ b/sound/soc/codecs/wm8770.c @@ -0,0 +1,749 @@ +/* + * wm8770.c -- WM8770 ALSA SoC Audio driver + * + * Copyright 2010 Wolfson Microelectronics plc + * + * Author: Dimitris Papastamos + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wm8770.h" + +#define WM8770_NUM_SUPPLIES 3 +static const char *wm8770_supply_names[WM8770_NUM_SUPPLIES] = { + "AVDD1", + "AVDD2", + "DVDD" +}; + +static const u16 wm8770_reg_defs[WM8770_CACHEREGNUM] = { + 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0, 0x90, 0, + 0, 0x22, 0x22, 0x3e, + 0xc, 0xc, 0x100, 0x189, + 0x189, 0x8770 +}; + +struct wm8770_priv { + enum snd_soc_control_type control_type; + struct regulator_bulk_data supplies[WM8770_NUM_SUPPLIES]; + struct notifier_block disable_nb[WM8770_NUM_SUPPLIES]; + struct snd_soc_codec *codec; + int sysclk; +}; + +static int vout12supply_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event); +static int vout34supply_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event); + +/* + * We can't use the same notifier block for more than one supply and + * there's no way I can see to get from a callback to the caller + * except container_of(). + */ +#define WM8770_REGULATOR_EVENT(n) \ +static int wm8770_regulator_event_##n(struct notifier_block *nb, \ + unsigned long event, void *data) \ +{ \ + struct wm8770_priv *wm8770 = container_of(nb, struct wm8770_priv, \ + disable_nb[n]); \ + if (event & REGULATOR_EVENT_DISABLE) { \ + wm8770->codec->cache_sync = 1; \ + } \ + return 0; \ +} + +WM8770_REGULATOR_EVENT(0) +WM8770_REGULATOR_EVENT(1) +WM8770_REGULATOR_EVENT(2) + +static const DECLARE_TLV_DB_SCALE(adc_tlv, -1200, 100, 0); +static const DECLARE_TLV_DB_SCALE(dac_dig_tlv, -12750, 50, 1); +static const DECLARE_TLV_DB_SCALE(dac_alg_tlv, -12700, 100, 1); + +static const char *dac_phase_text[][2] = { + { "DAC1 Normal", "DAC1 Inverted" }, + { "DAC2 Normal", "DAC2 Inverted" }, + { "DAC3 Normal", "DAC3 Inverted" }, + { "DAC4 Normal", "DAC4 Inverted" }, +}; + +static const struct soc_enum dac_phase[] = { + SOC_ENUM_DOUBLE(WM8770_DACPHASE, 0, 1, 2, dac_phase_text[0]), + SOC_ENUM_DOUBLE(WM8770_DACPHASE, 2, 3, 2, dac_phase_text[1]), + SOC_ENUM_DOUBLE(WM8770_DACPHASE, 4, 5, 2, dac_phase_text[2]), + SOC_ENUM_DOUBLE(WM8770_DACPHASE, 6, 7, 2, dac_phase_text[3]), +}; + +static const struct snd_kcontrol_new wm8770_snd_controls[] = { + /* global DAC playback controls */ + SOC_SINGLE_TLV("DAC Playback Volume", WM8770_MSDIGVOL, 0, 255, 0, + dac_dig_tlv), + SOC_SINGLE("DAC Playback Switch", WM8770_DACMUTE, 4, 1, 1), + SOC_SINGLE("DAC Playback ZC Switch", WM8770_DACCTRL1, 0, 1, 0), + + /* global VOUT playback controls */ + SOC_SINGLE_TLV("VOUT Playback Volume", WM8770_MSALGVOL, 0, 127, 0, + dac_alg_tlv), + SOC_SINGLE("VOUT Playback ZC Switch", WM8770_MSALGVOL, 7, 1, 0), + + /* VOUT1/2/3/4 specific controls */ + SOC_DOUBLE_R_TLV("VOUT1 Playback Volume", WM8770_VOUT1LVOL, + WM8770_VOUT1RVOL, 0, 127, 0, dac_alg_tlv), + SOC_DOUBLE_R("VOUT1 Playback ZC Switch", WM8770_VOUT1LVOL, + WM8770_VOUT1RVOL, 7, 1, 0), + SOC_DOUBLE_R_TLV("VOUT2 Playback Volume", WM8770_VOUT2LVOL, + WM8770_VOUT2RVOL, 0, 127, 0, dac_alg_tlv), + SOC_DOUBLE_R("VOUT2 Playback ZC Switch", WM8770_VOUT2LVOL, + WM8770_VOUT2RVOL, 7, 1, 0), + SOC_DOUBLE_R_TLV("VOUT3 Playback Volume", WM8770_VOUT3LVOL, + WM8770_VOUT3RVOL, 0, 127, 0, dac_alg_tlv), + SOC_DOUBLE_R("VOUT3 Playback ZC Switch", WM8770_VOUT3LVOL, + WM8770_VOUT3RVOL, 7, 1, 0), + SOC_DOUBLE_R_TLV("VOUT4 Playback Volume", WM8770_VOUT4LVOL, + WM8770_VOUT4RVOL, 0, 127, 0, dac_alg_tlv), + SOC_DOUBLE_R("VOUT4 Playback ZC Switch", WM8770_VOUT4LVOL, + WM8770_VOUT4RVOL, 7, 1, 0), + + /* DAC1/2/3/4 specific controls */ + SOC_DOUBLE_R_TLV("DAC1 Playback Volume", WM8770_DAC1LVOL, + WM8770_DAC1RVOL, 0, 255, 0, dac_dig_tlv), + SOC_SINGLE("DAC1 Deemphasis Switch", WM8770_DACCTRL2, 0, 1, 0), + SOC_ENUM("DAC1 Phase", dac_phase[0]), + SOC_DOUBLE_R_TLV("DAC2 Playback Volume", WM8770_DAC2LVOL, + WM8770_DAC2RVOL, 0, 255, 0, dac_dig_tlv), + SOC_SINGLE("DAC2 Deemphasis Switch", WM8770_DACCTRL2, 1, 1, 0), + SOC_ENUM("DAC2 Phase", dac_phase[1]), + SOC_DOUBLE_R_TLV("DAC3 Playback Volume", WM8770_DAC3LVOL, + WM8770_DAC3RVOL, 0, 255, 0, dac_dig_tlv), + SOC_SINGLE("DAC3 Deemphasis Switch", WM8770_DACCTRL2, 2, 1, 0), + SOC_ENUM("DAC3 Phase", dac_phase[2]), + SOC_DOUBLE_R_TLV("DAC4 Playback Volume", WM8770_DAC4LVOL, + WM8770_DAC4RVOL, 0, 255, 0, dac_dig_tlv), + SOC_SINGLE("DAC4 Deemphasis Switch", WM8770_DACCTRL2, 3, 1, 0), + SOC_ENUM("DAC4 Phase", dac_phase[3]), + + /* ADC specific controls */ + SOC_DOUBLE_R_TLV("Capture Volume", WM8770_ADCLCTRL, WM8770_ADCRCTRL, + 0, 31, 0, adc_tlv), + SOC_DOUBLE_R("Capture Switch", WM8770_ADCLCTRL, WM8770_ADCRCTRL, + 5, 1, 1), + + /* other controls */ + SOC_SINGLE("ADC 128x Oversampling Switch", WM8770_MSTRCTRL, 3, 1, 0), + SOC_SINGLE("ADC Highpass Filter Switch", WM8770_IFACECTRL, 8, 1, 1) +}; + +static const char *ain_text[] = { + "AIN1", "AIN2", "AIN3", "AIN4", + "AIN5", "AIN6", "AIN7", "AIN8" +}; + +static const struct soc_enum ain_enum = + SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text); + +static const struct snd_kcontrol_new ain_mux = + SOC_DAPM_ENUM("Capture Mux", ain_enum); + +static const struct snd_kcontrol_new vout1_mix_controls[] = { + SOC_DAPM_SINGLE("DAC1 Switch", WM8770_OUTMUX1, 0, 1, 0), + SOC_DAPM_SINGLE("AUX1 Switch", WM8770_OUTMUX1, 1, 1, 0), + SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 2, 1, 0) +}; + +static const struct snd_kcontrol_new vout2_mix_controls[] = { + SOC_DAPM_SINGLE("DAC2 Switch", WM8770_OUTMUX1, 3, 1, 0), + SOC_DAPM_SINGLE("AUX2 Switch", WM8770_OUTMUX1, 4, 1, 0), + SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 5, 1, 0) +}; + +static const struct snd_kcontrol_new vout3_mix_controls[] = { + SOC_DAPM_SINGLE("DAC3 Switch", WM8770_OUTMUX2, 0, 1, 0), + SOC_DAPM_SINGLE("AUX3 Switch", WM8770_OUTMUX2, 1, 1, 0), + SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 2, 1, 0) +}; + +static const struct snd_kcontrol_new vout4_mix_controls[] = { + SOC_DAPM_SINGLE("DAC4 Switch", WM8770_OUTMUX2, 3, 1, 0), + SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 4, 1, 0) +}; + +static const struct snd_soc_dapm_widget wm8770_dapm_widgets[] = { + SND_SOC_DAPM_INPUT("AUX1"), + SND_SOC_DAPM_INPUT("AUX2"), + SND_SOC_DAPM_INPUT("AUX3"), + + SND_SOC_DAPM_INPUT("AIN1"), + SND_SOC_DAPM_INPUT("AIN2"), + SND_SOC_DAPM_INPUT("AIN3"), + SND_SOC_DAPM_INPUT("AIN4"), + SND_SOC_DAPM_INPUT("AIN5"), + SND_SOC_DAPM_INPUT("AIN6"), + SND_SOC_DAPM_INPUT("AIN7"), + SND_SOC_DAPM_INPUT("AIN8"), + + SND_SOC_DAPM_MUX("Capture Mux", WM8770_ADCMUX, 8, 1, &ain_mux), + + SND_SOC_DAPM_ADC("ADC", "Capture", WM8770_PWDNCTRL, 1, 1), + + SND_SOC_DAPM_DAC("DAC1", "Playback", WM8770_PWDNCTRL, 2, 1), + SND_SOC_DAPM_DAC("DAC2", "Playback", WM8770_PWDNCTRL, 3, 1), + SND_SOC_DAPM_DAC("DAC3", "Playback", WM8770_PWDNCTRL, 4, 1), + SND_SOC_DAPM_DAC("DAC4", "Playback", WM8770_PWDNCTRL, 5, 1), + + SND_SOC_DAPM_SUPPLY("VOUT12 Supply", SND_SOC_NOPM, 0, 0, + vout12supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY("VOUT34 Supply", SND_SOC_NOPM, 0, 0, + vout34supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_MIXER("VOUT1 Mixer", SND_SOC_NOPM, 0, 0, + vout1_mix_controls, ARRAY_SIZE(vout1_mix_controls)), + SND_SOC_DAPM_MIXER("VOUT2 Mixer", SND_SOC_NOPM, 0, 0, + vout2_mix_controls, ARRAY_SIZE(vout2_mix_controls)), + SND_SOC_DAPM_MIXER("VOUT3 Mixer", SND_SOC_NOPM, 0, 0, + vout3_mix_controls, ARRAY_SIZE(vout3_mix_controls)), + SND_SOC_DAPM_MIXER("VOUT4 Mixer", SND_SOC_NOPM, 0, 0, + vout4_mix_controls, ARRAY_SIZE(vout4_mix_controls)), + + SND_SOC_DAPM_OUTPUT("VOUT1"), + SND_SOC_DAPM_OUTPUT("VOUT2"), + SND_SOC_DAPM_OUTPUT("VOUT3"), + SND_SOC_DAPM_OUTPUT("VOUT4") +}; + +static const struct snd_soc_dapm_route wm8770_intercon[] = { + { "Capture Mux", "AIN1", "AIN1" }, + { "Capture Mux", "AIN2", "AIN2" }, + { "Capture Mux", "AIN3", "AIN3" }, + { "Capture Mux", "AIN4", "AIN4" }, + { "Capture Mux", "AIN5", "AIN5" }, + { "Capture Mux", "AIN6", "AIN6" }, + { "Capture Mux", "AIN7", "AIN7" }, + { "Capture Mux", "AIN8", "AIN8" }, + + { "ADC", NULL, "Capture Mux" }, + + { "VOUT1 Mixer", NULL, "VOUT12 Supply" }, + { "VOUT1 Mixer", "DAC1 Switch", "DAC1" }, + { "VOUT1 Mixer", "AUX1 Switch", "AUX1" }, + { "VOUT1 Mixer", "Bypass Switch", "Capture Mux" }, + + { "VOUT2 Mixer", NULL, "VOUT12 Supply" }, + { "VOUT2 Mixer", "DAC2 Switch", "DAC2" }, + { "VOUT2 Mixer", "AUX2 Switch", "AUX2" }, + { "VOUT2 Mixer", "Bypass Switch", "Capture Mux" }, + + { "VOUT3 Mixer", NULL, "VOUT34 Supply" }, + { "VOUT3 Mixer", "DAC3 Switch", "DAC3" }, + { "VOUT3 Mixer", "AUX3 Switch", "AUX3" }, + { "VOUT3 Mixer", "Bypass Switch", "Capture Mux" }, + + { "VOUT4 Mixer", NULL, "VOUT34 Supply" }, + { "VOUT4 Mixer", "DAC4 Switch", "DAC4" }, + { "VOUT4 Mixer", "Bypass Switch", "Capture Mux" }, + + { "VOUT1", NULL, "VOUT1 Mixer" }, + { "VOUT2", NULL, "VOUT2 Mixer" }, + { "VOUT3", NULL, "VOUT3 Mixer" }, + { "VOUT4", NULL, "VOUT4 Mixer" } +}; + +static int vout12supply_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec; + + codec = w->codec; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0); + break; + case SND_SOC_DAPM_POST_PMD: + snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0x180); + break; + } + + return 0; +} + +static int vout34supply_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec; + + codec = w->codec; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0); + break; + case SND_SOC_DAPM_POST_PMD: + snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0x180); + break; + } + + return 0; +} + +static int wm8770_reset(struct snd_soc_codec *codec) +{ + return snd_soc_write(codec, WM8770_RESET, 0); +} + +static int wm8770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) +{ + struct snd_soc_codec *codec; + int iface, master; + + codec = dai->codec; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + master = 0x100; + break; + case SND_SOC_DAIFMT_CBS_CFS: + master = 0; + break; + default: + return -EINVAL; + } + + iface = 0; + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + iface |= 0x2; + break; + case SND_SOC_DAIFMT_RIGHT_J: + break; + case SND_SOC_DAIFMT_LEFT_J: + iface |= 0x1; + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_IB_IF: + iface |= 0xc; + break; + case SND_SOC_DAIFMT_IB_NF: + iface |= 0x8; + break; + case SND_SOC_DAIFMT_NB_IF: + iface |= 0x4; + break; + default: + return -EINVAL; + } + + snd_soc_update_bits(codec, WM8770_IFACECTRL, 0xf, iface); + snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x100, master); + + return 0; +} + +static const int mclk_ratios[] = { + 128, + 192, + 256, + 384, + 512, + 768 +}; + +static int wm8770_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec; + struct wm8770_priv *wm8770; + int i; + int iface; + int shift; + int ratio; + + codec = dai->codec; + wm8770 = snd_soc_codec_get_drvdata(codec); + + iface = 0; + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + break; + case SNDRV_PCM_FORMAT_S20_3LE: + iface |= 0x10; + break; + case SNDRV_PCM_FORMAT_S24_LE: + iface |= 0x20; + break; + case SNDRV_PCM_FORMAT_S32_LE: + iface |= 0x30; + break; + } + + switch (substream->stream) { + case SNDRV_PCM_STREAM_PLAYBACK: + i = 0; + shift = 4; + break; + case SNDRV_PCM_STREAM_CAPTURE: + i = 2; + shift = 0; + break; + default: + return -EINVAL; + } + + /* Only need to set MCLK/LRCLK ratio if we're master */ + if (snd_soc_read(codec, WM8770_MSTRCTRL) & 0x100) { + for (; i < ARRAY_SIZE(mclk_ratios); ++i) { + ratio = wm8770->sysclk / params_rate(params); + if (ratio == mclk_ratios[i]) + break; + } + + if (i == ARRAY_SIZE(mclk_ratios)) { + dev_err(codec->dev, + "Unable to configure MCLK ratio %d/%d\n", + wm8770->sysclk, params_rate(params)); + return -EINVAL; + } + + dev_dbg(codec->dev, "MCLK is %dfs\n", mclk_ratios[i]); + + snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x7 << shift, + i << shift); + } + + snd_soc_update_bits(codec, WM8770_IFACECTRL, 0x30, iface); + + return 0; +} + +static int wm8770_mute(struct snd_soc_dai *dai, int mute) +{ + struct snd_soc_codec *codec; + + codec = dai->codec; + return snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10, + !!mute << 4); +} + +static int wm8770_set_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_codec *codec; + struct wm8770_priv *wm8770; + + codec = dai->codec; + wm8770 = snd_soc_codec_get_drvdata(codec); + wm8770->sysclk = freq; + return 0; +} + +static void wm8770_sync_cache(struct snd_soc_codec *codec) +{ + int i; + u16 *cache; + + if (!codec->cache_sync) + return; + + codec->cache_only = 0; + cache = codec->reg_cache; + for (i = 0; i < codec->driver->reg_cache_size; i++) { + if (i == WM8770_RESET || cache[i] == wm8770_reg_defs[i]) + continue; + snd_soc_write(codec, i, cache[i]); + } + codec->cache_sync = 0; +} + +static int wm8770_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + int ret; + struct wm8770_priv *wm8770; + + wm8770 = snd_soc_codec_get_drvdata(codec); + + switch (level) { + case SND_SOC_BIAS_ON: + break; + case SND_SOC_BIAS_PREPARE: + break; + case SND_SOC_BIAS_STANDBY: + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { + ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies), + wm8770->supplies); + if (ret) { + dev_err(codec->dev, + "Failed to enable supplies: %d\n", + ret); + return ret; + } + wm8770_sync_cache(codec); + /* global powerup */ + snd_soc_write(codec, WM8770_PWDNCTRL, 0); + } + break; + case SND_SOC_BIAS_OFF: + /* global powerdown */ + snd_soc_write(codec, WM8770_PWDNCTRL, 1); + regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies), + wm8770->supplies); + break; + } + + codec->dapm.bias_level = level; + return 0; +} + +#define WM8770_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ + SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_ops wm8770_dai_ops = { + .digital_mute = wm8770_mute, + .hw_params = wm8770_hw_params, + .set_fmt = wm8770_set_fmt, + .set_sysclk = wm8770_set_sysclk, +}; + +static struct snd_soc_dai_driver wm8770_dai = { + .name = "wm8770-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = WM8770_FORMATS + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = WM8770_FORMATS + }, + .ops = &wm8770_dai_ops, + .symmetric_rates = 1 +}; + +#ifdef CONFIG_PM +static int wm8770_suspend(struct snd_soc_codec *codec, pm_message_t state) +{ + wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF); + return 0; +} + +static int wm8770_resume(struct snd_soc_codec *codec) +{ + wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + return 0; +} +#else +#define wm8770_suspend NULL +#define wm8770_resume NULL +#endif + +static int wm8770_probe(struct snd_soc_codec *codec) +{ + struct wm8770_priv *wm8770; + int ret; + int i; + + wm8770 = snd_soc_codec_get_drvdata(codec); + wm8770->codec = codec; + + codec->dapm.idle_bias_off = 1; + + ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8770->control_type); + if (ret < 0) { + dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) + wm8770->supplies[i].supply = wm8770_supply_names[i]; + + ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8770->supplies), + wm8770->supplies); + if (ret) { + dev_err(codec->dev, "Failed to request supplies: %d\n", ret); + return ret; + } + + wm8770->disable_nb[0].notifier_call = wm8770_regulator_event_0; + wm8770->disable_nb[1].notifier_call = wm8770_regulator_event_1; + wm8770->disable_nb[2].notifier_call = wm8770_regulator_event_2; + + /* This should really be moved into the regulator core */ + for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) { + ret = regulator_register_notifier(wm8770->supplies[i].consumer, + &wm8770->disable_nb[i]); + if (ret) { + dev_err(codec->dev, + "Failed to register regulator notifier: %d\n", + ret); + } + } + + ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies), + wm8770->supplies); + if (ret) { + dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); + goto err_reg_get; + } + + ret = wm8770_reset(codec); + if (ret < 0) { + dev_err(codec->dev, "Failed to issue reset: %d\n", ret); + goto err_reg_enable; + } + + wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + + /* latch the volume update bits */ + snd_soc_update_bits(codec, WM8770_MSDIGVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_MSALGVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_VOUT1RVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_VOUT2RVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_VOUT3RVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_VOUT4RVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_DAC1RVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_DAC2RVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_DAC3RVOL, 0x100, 0x100); + snd_soc_update_bits(codec, WM8770_DAC4RVOL, 0x100, 0x100); + + /* mute all DACs */ + snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10, 0x10); + + snd_soc_add_controls(codec, wm8770_snd_controls, + ARRAY_SIZE(wm8770_snd_controls)); + snd_soc_dapm_new_controls(&codec->dapm, wm8770_dapm_widgets, + ARRAY_SIZE(wm8770_dapm_widgets)); + snd_soc_dapm_add_routes(&codec->dapm, wm8770_intercon, + ARRAY_SIZE(wm8770_intercon)); + return 0; + +err_reg_enable: + regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies), wm8770->supplies); +err_reg_get: + regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies); + return ret; +} + +static int wm8770_remove(struct snd_soc_codec *codec) +{ + struct wm8770_priv *wm8770; + int i; + + wm8770 = snd_soc_codec_get_drvdata(codec); + wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF); + + for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i) + regulator_unregister_notifier(wm8770->supplies[i].consumer, + &wm8770->disable_nb[i]); + regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies); + return 0; +} + +static struct snd_soc_codec_driver soc_codec_dev_wm8770 = { + .probe = wm8770_probe, + .remove = wm8770_remove, + .suspend = wm8770_suspend, + .resume = wm8770_resume, + .set_bias_level = wm8770_set_bias_level, + .reg_cache_size = ARRAY_SIZE(wm8770_reg_defs), + .reg_word_size = sizeof (u16), + .reg_cache_default = wm8770_reg_defs +}; + +#if defined(CONFIG_SPI_MASTER) +static int __devinit wm8770_spi_probe(struct spi_device *spi) +{ + struct wm8770_priv *wm8770; + int ret; + + wm8770 = kzalloc(sizeof(struct wm8770_priv), GFP_KERNEL); + if (!wm8770) + return -ENOMEM; + + wm8770->control_type = SND_SOC_SPI; + spi_set_drvdata(spi, wm8770); + + ret = snd_soc_register_codec(&spi->dev, + &soc_codec_dev_wm8770, &wm8770_dai, 1); + if (ret < 0) + kfree(wm8770); + return ret; +} + +static int __devexit wm8770_spi_remove(struct spi_device *spi) +{ + snd_soc_unregister_codec(&spi->dev); + kfree(spi_get_drvdata(spi)); + return 0; +} + +static struct spi_driver wm8770_spi_driver = { + .driver = { + .name = "wm8770", + .owner = THIS_MODULE, + }, + .probe = wm8770_spi_probe, + .remove = __devexit_p(wm8770_spi_remove) +}; +#endif + +static int __init wm8770_modinit(void) +{ + int ret = 0; + +#if defined(CONFIG_SPI_MASTER) + ret = spi_register_driver(&wm8770_spi_driver); + if (ret) { + printk(KERN_ERR "Failed to register wm8770 SPI driver: %d\n", + ret); + } +#endif + return ret; +} +module_init(wm8770_modinit); + +static void __exit wm8770_exit(void) +{ +#if defined(CONFIG_SPI_MASTER) + spi_unregister_driver(&wm8770_spi_driver); +#endif +} +module_exit(wm8770_exit); + +MODULE_DESCRIPTION("ASoC WM8770 driver"); +MODULE_AUTHOR("Dimitris Papastamos "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/wm8770.h b/sound/soc/codecs/wm8770.h new file mode 100644 index 000000000000..5f1b3bda6cc8 --- /dev/null +++ b/sound/soc/codecs/wm8770.h @@ -0,0 +1,51 @@ +/* + * wm8770.h -- WM8770 ASoC driver + * + * Copyright 2010 Wolfson Microelectronics plc + * + * Author: Dimitris Papastamos + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _WM8770_H +#define _WM8770_H + +/* Registers */ +#define WM8770_VOUT1LVOL 0 +#define WM8770_VOUT1RVOL 0x1 +#define WM8770_VOUT2LVOL 0x2 +#define WM8770_VOUT2RVOL 0x3 +#define WM8770_VOUT3LVOL 0x4 +#define WM8770_VOUT3RVOL 0x5 +#define WM8770_VOUT4LVOL 0x6 +#define WM8770_VOUT4RVOL 0x7 +#define WM8770_MSALGVOL 0x8 +#define WM8770_DAC1LVOL 0x9 +#define WM8770_DAC1RVOL 0xa +#define WM8770_DAC2LVOL 0xb +#define WM8770_DAC2RVOL 0xc +#define WM8770_DAC3LVOL 0xd +#define WM8770_DAC3RVOL 0xe +#define WM8770_DAC4LVOL 0xf +#define WM8770_DAC4RVOL 0x10 +#define WM8770_MSDIGVOL 0x11 +#define WM8770_DACPHASE 0x12 +#define WM8770_DACCTRL1 0x13 +#define WM8770_DACMUTE 0x14 +#define WM8770_DACCTRL2 0x15 +#define WM8770_IFACECTRL 0x16 +#define WM8770_MSTRCTRL 0x17 +#define WM8770_PWDNCTRL 0x18 +#define WM8770_ADCLCTRL 0x19 +#define WM8770_ADCRCTRL 0x1a +#define WM8770_ADCMUX 0x1b +#define WM8770_OUTMUX1 0x1c +#define WM8770_OUTMUX2 0x1d +#define WM8770_RESET 0x31 + +#define WM8770_CACHEREGNUM 0x20 + +#endif diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c index 0132a27140ae..8e7953b1b790 100644 --- a/sound/soc/codecs/wm8776.c +++ b/sound/soc/codecs/wm8776.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include @@ -306,7 +305,7 @@ static int wm8776_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Disable the global powerdown; DAPM does the rest */ snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 0); } @@ -317,7 +316,7 @@ static int wm8776_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -404,6 +403,7 @@ static int wm8776_resume(struct snd_soc_codec *codec) static int wm8776_probe(struct snd_soc_codec *codec) { struct wm8776_priv *wm8776 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret = 0; ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8776->control_type); @@ -427,9 +427,9 @@ static int wm8776_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, wm8776_snd_controls, ARRAY_SIZE(wm8776_snd_controls)); - snd_soc_dapm_new_controls(codec, wm8776_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8776_dapm_widgets, ARRAY_SIZE(wm8776_dapm_widgets)); - snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes)); + snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes)); return ret; } diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c index 4599e8e95aa2..6dae1b40c9f7 100644 --- a/sound/soc/codecs/wm8804.c +++ b/sound/soc/codecs/wm8804.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -515,7 +514,7 @@ static int wm8804_set_bias_level(struct snd_soc_codec *codec, snd_soc_update_bits(codec, WM8804_PWRDN, 0x9, 0); break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies), wm8804->supplies); if (ret) { @@ -537,7 +536,7 @@ static int wm8804_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -581,7 +580,7 @@ static int wm8804_probe(struct snd_soc_codec *codec) wm8804 = snd_soc_codec_get_drvdata(codec); wm8804->codec = codec; - codec->idle_bias_off = 1; + codec->dapm.idle_bias_off = 1; ret = snd_soc_codec_set_cache_io(codec, 8, 8, wm8804->control_type); if (ret < 0) { diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c index aca4b1ea10bb..cd0959926d12 100644 --- a/sound/soc/codecs/wm8900.c +++ b/sound/soc/codecs/wm8900.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include @@ -140,7 +139,6 @@ struct wm8900_priv { enum snd_soc_control_type control_type; - u16 reg_cache[WM8900_MAXREG]; u32 fll_in; /* FLL input frequency */ u32 fll_out; /* FLL output frequency */ @@ -611,10 +609,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8900_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8900_dapm_widgets, - ARRAY_SIZE(wm8900_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8900_dapm_widgets, + ARRAY_SIZE(wm8900_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -1051,7 +1050,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_STANDBY: /* Charge capacitors if initial power up */ - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* STARTUP_BIAS_ENA on */ snd_soc_write(codec, WM8900_REG_POWER1, WM8900_REG_POWER1_STARTUP_BIAS_ENA); @@ -1119,7 +1118,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec, WM8900_REG_POWER2_SYSCLK_ENA); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 622b60238a82..987476a5895f 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -29,9 +29,9 @@ #include #include #include -#include #include #include +#include #include "wm8903.h" @@ -214,15 +214,14 @@ static u16 wm8903_reg_defaults[] = { struct wm8903_priv { - u16 reg_cache[ARRAY_SIZE(wm8903_reg_defaults)]; - int sysclk; int irq; - /* Reference counts */ + int fs; + int deemph; + + /* Reference count */ int class_w_users; - int playback_active; - int capture_active; struct completion wseq; @@ -231,9 +230,6 @@ struct wm8903_priv { int mic_short; int mic_last_report; int mic_delay; - - struct snd_pcm_substream *master_substream; - struct snd_pcm_substream *slave_substream; }; static int wm8903_volatile_register(unsigned int reg) @@ -463,6 +459,72 @@ static int wm8903_class_w_put(struct snd_kcontrol *kcontrol, .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) } +static int wm8903_deemph[] = { 0, 32000, 44100, 48000 }; + +static int wm8903_set_deemph(struct snd_soc_codec *codec) +{ + struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); + int val, i, best; + + /* If we're using deemphasis select the nearest available sample + * rate. + */ + if (wm8903->deemph) { + best = 1; + for (i = 2; i < ARRAY_SIZE(wm8903_deemph); i++) { + if (abs(wm8903_deemph[i] - wm8903->fs) < + abs(wm8903_deemph[best] - wm8903->fs)) + best = i; + } + + val = best << WM8903_DEEMPH_SHIFT; + } else { + best = 0; + val = 0; + } + + dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n", + best, wm8903_deemph[best]); + + return snd_soc_update_bits(codec, WM8903_DAC_DIGITAL_1, + WM8903_DEEMPH_MASK, val); +} + +static int wm8903_get_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = wm8903->deemph; + + return 0; +} + +static int wm8903_put_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); + int deemph = ucontrol->value.enumerated.item[0]; + int ret = 0; + + if (deemph > 1) + return -EINVAL; + + mutex_lock(&codec->mutex); + if (wm8903->deemph != deemph) { + wm8903->deemph = deemph; + + wm8903_set_deemph(codec); + + ret = 1; + } + mutex_unlock(&codec->mutex); + + return ret; +} + /* ALSA can only do steps of .01dB */ static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); @@ -475,6 +537,23 @@ static const DECLARE_TLV_DB_SCALE(drc_tlv_min, 0, 600, 0); static const DECLARE_TLV_DB_SCALE(drc_tlv_max, 1200, 600, 0); static const DECLARE_TLV_DB_SCALE(drc_tlv_startup, -300, 50, 0); +static const char *hpf_mode_text[] = { + "Hi-fi", "Voice 1", "Voice 2", "Voice 3" +}; + +static const struct soc_enum hpf_mode = + SOC_ENUM_SINGLE(WM8903_ADC_DIGITAL_0, 5, 4, hpf_mode_text); + +static const char *osr_text[] = { + "Low power", "High performance" +}; + +static const struct soc_enum adc_osr = + SOC_ENUM_SINGLE(WM8903_ANALOGUE_ADC_0, 0, 2, osr_text); + +static const struct soc_enum dac_osr = + SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 0, 2, osr_text); + static const char *drc_slope_text[] = { "1", "1/2", "1/4", "1/8", "1/16", "0" }; @@ -537,13 +616,6 @@ static const char *mute_mode_text[] = { static const struct soc_enum mute_mode = SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 9, 2, mute_mode_text); -static const char *dac_deemphasis_text[] = { - "Disabled", "32kHz", "44.1kHz", "48kHz" -}; - -static const struct soc_enum dac_deemphasis = - SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 1, 4, dac_deemphasis_text); - static const char *companding_text[] = { "ulaw", "alaw" }; @@ -613,6 +685,9 @@ SOC_SINGLE("Right Input PGA Common Mode Switch", WM8903_ANALOGUE_RIGHT_INPUT_1, 6, 1, 0), /* ADCs */ +SOC_ENUM("ADC OSR", adc_osr), +SOC_SINGLE("HPF Switch", WM8903_ADC_DIGITAL_0, 4, 1, 0), +SOC_ENUM("HPF Mode", hpf_mode), SOC_SINGLE("DRC Switch", WM8903_DRC_0, 15, 1, 0), SOC_ENUM("DRC Compressor Slope R0", drc_slope_r0), SOC_ENUM("DRC Compressor Slope R1", drc_slope_r1), @@ -642,14 +717,16 @@ SOC_DOUBLE_TLV("Digital Sidetone Volume", WM8903_DAC_DIGITAL_0, 4, 8, 12, 0, digital_sidetone_tlv), /* DAC */ +SOC_ENUM("DAC OSR", dac_osr), SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8903_DAC_DIGITAL_VOLUME_LEFT, WM8903_DAC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv), SOC_ENUM("DAC Soft Mute Rate", soft_mute), SOC_ENUM("DAC Mute Mode", mute_mode), SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0), -SOC_ENUM("DAC De-emphasis", dac_deemphasis), SOC_ENUM("DAC Companding Mode", dac_companding), SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0), +SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0, + wm8903_get_deemph, wm8903_put_deemph), /* Headphones */ SOC_DOUBLE_R("Headphone Switch", @@ -923,10 +1000,11 @@ static const struct snd_soc_dapm_route intercon[] = { static int wm8903_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8903_dapm_widgets, - ARRAY_SIZE(wm8903_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_new_controls(dapm, wm8903_dapm_widgets, + ARRAY_SIZE(wm8903_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); return 0; } @@ -934,7 +1012,7 @@ static int wm8903_add_widgets(struct snd_soc_codec *codec) static int wm8903_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { - u16 reg, reg2; + u16 reg; switch (level) { case SND_SOC_BIAS_ON: @@ -946,7 +1024,7 @@ static int wm8903_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { snd_soc_write(codec, WM8903_CLOCK_RATES_2, WM8903_CLK_SYS_ENA); @@ -958,23 +1036,15 @@ static int wm8903_set_bias_level(struct snd_soc_codec *codec, wm8903_run_sequence(codec, 0); wm8903_sync_reg_cache(codec, codec->reg_cache); - /* Enable low impedence charge pump output */ - reg = snd_soc_read(codec, - WM8903_CONTROL_INTERFACE_TEST_1); - snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1, - reg | WM8903_TEST_KEY); - reg2 = snd_soc_read(codec, WM8903_CHARGE_PUMP_TEST_1); - snd_soc_write(codec, WM8903_CHARGE_PUMP_TEST_1, - reg2 | WM8903_CP_SW_KELVIN_MODE_MASK); - snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1, - reg); - /* By default no bypass paths are enabled so * enable Class W support. */ dev_dbg(codec->dev, "Enabling Class W\n"); - snd_soc_write(codec, WM8903_CLASS_W_0, reg | - WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V); + snd_soc_update_bits(codec, WM8903_CLASS_W_0, + WM8903_CP_DYN_FREQ | + WM8903_CP_DYN_V, + WM8903_CP_DYN_FREQ | + WM8903_CP_DYN_V); } reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0); @@ -991,7 +1061,7 @@ static int wm8903_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1222,58 +1292,6 @@ static struct { { 0, 0 }, }; -static int wm8903_startup(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_codec *codec = rtd->codec; - struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); - struct snd_pcm_runtime *master_runtime; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - wm8903->playback_active++; - else - wm8903->capture_active++; - - /* The DAI has shared clocks so if we already have a playback or - * capture going then constrain this substream to match it. - */ - if (wm8903->master_substream) { - master_runtime = wm8903->master_substream->runtime; - - dev_dbg(codec->dev, "Constraining to %d bits\n", - master_runtime->sample_bits); - - snd_pcm_hw_constraint_minmax(substream->runtime, - SNDRV_PCM_HW_PARAM_SAMPLE_BITS, - master_runtime->sample_bits, - master_runtime->sample_bits); - - wm8903->slave_substream = substream; - } else - wm8903->master_substream = substream; - - return 0; -} - -static void wm8903_shutdown(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_codec *codec = rtd->codec; - struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - wm8903->playback_active--; - else - wm8903->capture_active--; - - if (wm8903->master_substream == substream) - wm8903->master_substream = wm8903->slave_substream; - - wm8903->slave_substream = NULL; -} - static int wm8903_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) @@ -1298,11 +1316,6 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream, u16 clock1 = snd_soc_read(codec, WM8903_CLOCK_RATES_1); u16 dac_digital1 = snd_soc_read(codec, WM8903_DAC_DIGITAL_1); - if (substream == wm8903->slave_substream) { - dev_dbg(codec->dev, "Ignoring hw_params for slave substream\n"); - return 0; - } - /* Enable sloping stopband filter for low sample rates */ if (fs <= 24000) dac_digital1 |= WM8903_DAC_SB_FILT; @@ -1320,19 +1333,6 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream, } } - /* Constraints should stop us hitting this but let's make sure */ - if (wm8903->capture_active) - switch (sample_rates[dsp_config].rate) { - case 88200: - case 96000: - dev_err(codec->dev, "%dHz unsupported by ADC\n", - fs); - return -EINVAL; - - default: - break; - } - dev_dbg(codec->dev, "DSP fs = %dHz\n", sample_rates[dsp_config].rate); clock1 &= ~WM8903_SAMPLE_RATE_MASK; clock1 |= sample_rates[dsp_config].value; @@ -1428,6 +1428,9 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream, aif2 |= bclk_divs[bclk_div].div; aif3 |= bclk / fs; + wm8903->fs = params_rate(params); + wm8903_set_deemph(codec); + snd_soc_write(codec, WM8903_CLOCK_RATES_0, clock0); snd_soc_write(codec, WM8903_CLOCK_RATES_1, clock1); snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1); @@ -1521,6 +1524,11 @@ static irqreturn_t wm8903_irq(int irq, void *data) mic_report = wm8903->mic_last_report; int_pol = snd_soc_read(codec, WM8903_INTERRUPT_POLARITY_1); +#ifndef CONFIG_SND_SOC_WM8903_MODULE + if (int_val & (WM8903_MICSHRT_EINT | WM8903_MICDET_EINT)) + trace_snd_soc_jack_irq(dev_name(codec->dev)); +#endif + if (int_val & WM8903_MICSHRT_EINT) { dev_dbg(codec->dev, "Microphone short (pol=%x)\n", int_pol); @@ -1571,8 +1579,6 @@ static irqreturn_t wm8903_irq(int irq, void *data) SNDRV_PCM_FMTBIT_S24_LE) static struct snd_soc_dai_ops wm8903_dai_ops = { - .startup = wm8903_startup, - .shutdown = wm8903_shutdown, .hw_params = wm8903_hw_params, .digital_mute = wm8903_digital_mute, .set_fmt = wm8903_set_dai_fmt, diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h index 996435e681e5..e8490f3edd03 100644 --- a/sound/soc/codecs/wm8903.h +++ b/sound/soc/codecs/wm8903.h @@ -19,10 +19,6 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, int det, int shrt); -#define WM8903_MCLK_DIV_2 1 -#define WM8903_CLK_SYS 2 -#define WM8903_BCLK 3 -#define WM8903_LRCLK 4 /* * Register values. @@ -98,8 +94,6 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, #define WM8903_INTERRUPT_STATUS_1_MASK 0x7A #define WM8903_INTERRUPT_POLARITY_1 0x7B #define WM8903_INTERRUPT_CONTROL 0x7E -#define WM8903_CONTROL_INTERFACE_TEST_1 0x81 -#define WM8903_CHARGE_PUMP_TEST_1 0x95 #define WM8903_CLOCK_RATE_TEST_4 0xA4 #define WM8903_ANALOGUE_OUTPUT_BIAS_0 0xAC @@ -1205,25 +1199,6 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, #define WM8903_IRQ_POL_SHIFT 0 /* IRQ_POL */ #define WM8903_IRQ_POL_WIDTH 1 /* IRQ_POL */ -/* - * R129 (0x81) - Control Interface Test 1 - */ -#define WM8903_USER_KEY 0x0002 /* USER_KEY */ -#define WM8903_USER_KEY_MASK 0x0002 /* USER_KEY */ -#define WM8903_USER_KEY_SHIFT 1 /* USER_KEY */ -#define WM8903_USER_KEY_WIDTH 1 /* USER_KEY */ -#define WM8903_TEST_KEY 0x0001 /* TEST_KEY */ -#define WM8903_TEST_KEY_MASK 0x0001 /* TEST_KEY */ -#define WM8903_TEST_KEY_SHIFT 0 /* TEST_KEY */ -#define WM8903_TEST_KEY_WIDTH 1 /* TEST_KEY */ - -/* - * R149 (0x95) - Charge Pump Test 1 - */ -#define WM8903_CP_SW_KELVIN_MODE_MASK 0x0006 /* CP_SW_KELVIN_MODE - [2:1] */ -#define WM8903_CP_SW_KELVIN_MODE_SHIFT 1 /* CP_SW_KELVIN_MODE - [2:1] */ -#define WM8903_CP_SW_KELVIN_MODE_WIDTH 2 /* CP_SW_KELVIN_MODE - [2:1] */ - /* * R164 (0xA4) - Clock Rate Test 4 */ diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 1ec12eff0620..9de44a4c05c0 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -1427,10 +1426,11 @@ static const struct snd_soc_dapm_route wm8912_intercon[] = { static int wm8904_add_widgets(struct snd_soc_codec *codec) { struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, wm8904_core_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8904_core_dapm_widgets, ARRAY_SIZE(wm8904_core_dapm_widgets)); - snd_soc_dapm_add_routes(codec, core_intercon, + snd_soc_dapm_add_routes(dapm, core_intercon, ARRAY_SIZE(core_intercon)); switch (wm8904->devtype) { @@ -1442,20 +1442,20 @@ static int wm8904_add_widgets(struct snd_soc_codec *codec) snd_soc_add_controls(codec, wm8904_snd_controls, ARRAY_SIZE(wm8904_snd_controls)); - snd_soc_dapm_new_controls(codec, wm8904_adc_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8904_adc_dapm_widgets, ARRAY_SIZE(wm8904_adc_dapm_widgets)); - snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets, ARRAY_SIZE(wm8904_dac_dapm_widgets)); - snd_soc_dapm_new_controls(codec, wm8904_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8904_dapm_widgets, ARRAY_SIZE(wm8904_dapm_widgets)); - snd_soc_dapm_add_routes(codec, core_intercon, + snd_soc_dapm_add_routes(dapm, core_intercon, ARRAY_SIZE(core_intercon)); - snd_soc_dapm_add_routes(codec, adc_intercon, + snd_soc_dapm_add_routes(dapm, adc_intercon, ARRAY_SIZE(adc_intercon)); - snd_soc_dapm_add_routes(codec, dac_intercon, + snd_soc_dapm_add_routes(dapm, dac_intercon, ARRAY_SIZE(dac_intercon)); - snd_soc_dapm_add_routes(codec, wm8904_intercon, + snd_soc_dapm_add_routes(dapm, wm8904_intercon, ARRAY_SIZE(wm8904_intercon)); break; @@ -1463,17 +1463,17 @@ static int wm8904_add_widgets(struct snd_soc_codec *codec) snd_soc_add_controls(codec, wm8904_dac_snd_controls, ARRAY_SIZE(wm8904_dac_snd_controls)); - snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets, ARRAY_SIZE(wm8904_dac_dapm_widgets)); - snd_soc_dapm_add_routes(codec, dac_intercon, + snd_soc_dapm_add_routes(dapm, dac_intercon, ARRAY_SIZE(dac_intercon)); - snd_soc_dapm_add_routes(codec, wm8912_intercon, + snd_soc_dapm_add_routes(dapm, wm8912_intercon, ARRAY_SIZE(wm8912_intercon)); break; } - snd_soc_dapm_new_widgets(codec); + snd_soc_dapm_new_widgets(dapm); return 0; } @@ -1589,7 +1589,7 @@ static int wm8904_hw_params(struct snd_pcm_substream *substream, - wm8904->fs); for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) { cur_val = abs((wm8904->sysclk_rate / - clk_sys_rates[i].ratio) - wm8904->fs);; + clk_sys_rates[i].ratio) - wm8904->fs); if (cur_val < best_val) { best = i; best_val = cur_val; @@ -2138,7 +2138,7 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies); if (ret != 0) { @@ -2197,7 +2197,7 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec, wm8904->supplies); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -2373,7 +2373,7 @@ static int wm8904_probe(struct snd_soc_codec *codec) int ret, i; codec->cache_sync = 1; - codec->idle_bias_off = 1; + codec->dapm.idle_bias_off = 1; switch (wm8904->devtype) { case WM8904: diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c index 23086e2c976a..25580e3ee7c4 100644 --- a/sound/soc/codecs/wm8940.c +++ b/sound/soc/codecs/wm8940.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include @@ -43,7 +42,6 @@ struct wm8940_priv { unsigned int sysclk; - u16 reg_cache[WM8940_CACHEREGNUM]; enum snd_soc_control_type control_type; void *control_data; }; @@ -291,13 +289,14 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8940_add_widgets(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; - ret = snd_soc_dapm_new_controls(codec, wm8940_dapm_widgets, + ret = snd_soc_dapm_new_controls(dapm, wm8940_dapm_widgets, ARRAY_SIZE(wm8940_dapm_widgets)); if (ret) goto error_ret; - ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); if (ret) goto error_ret; @@ -735,7 +734,6 @@ static int wm8940_probe(struct snd_soc_codec *codec) return ret; return ret; -; } static int wm8940_remove(struct snd_soc_codec *codec) diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c index 2ac35b0be86a..7167dfc96aa7 100644 --- a/sound/soc/codecs/wm8955.c +++ b/sound/soc/codecs/wm8955.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -576,13 +575,14 @@ static const struct snd_soc_dapm_route wm8955_intercon[] = { static int wm8955_add_widgets(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + snd_soc_add_controls(codec, wm8955_snd_controls, ARRAY_SIZE(wm8955_snd_controls)); - snd_soc_dapm_new_controls(codec, wm8955_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8955_dapm_widgets, ARRAY_SIZE(wm8955_dapm_widgets)); - - snd_soc_dapm_add_routes(codec, wm8955_intercon, + snd_soc_dapm_add_routes(dapm, wm8955_intercon, ARRAY_SIZE(wm8955_intercon)); return 0; @@ -786,7 +786,7 @@ static int wm8955_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies), wm8955->supplies); if (ret != 0) { @@ -850,7 +850,7 @@ static int wm8955_set_bias_level(struct snd_soc_codec *codec, wm8955->supplies); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index ff6ff2f529d2..4393394b7bc1 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -72,7 +71,6 @@ static const u16 wm8960_reg[WM8960_CACHEREGNUM] = { }; struct wm8960_priv { - u16 reg_cache[WM8960_CACHEREGNUM]; enum snd_soc_control_type control_type; void *control_data; int (*set_bias_level)(struct snd_soc_codec *, @@ -389,27 +387,28 @@ static int wm8960_add_widgets(struct snd_soc_codec *codec) { struct wm8960_data *pdata = codec->dev->platform_data; struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; struct snd_soc_dapm_widget *w; - snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets, ARRAY_SIZE(wm8960_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths)); + snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths)); /* In capless mode OUT3 is used to provide VMID for the * headphone outputs, otherwise it is used as a mono mixer. */ if (pdata && pdata->capless) { - snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_capless, + snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_capless, ARRAY_SIZE(wm8960_dapm_widgets_capless)); - snd_soc_dapm_add_routes(codec, audio_paths_capless, + snd_soc_dapm_add_routes(dapm, audio_paths_capless, ARRAY_SIZE(audio_paths_capless)); } else { - snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_out3, + snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_out3, ARRAY_SIZE(wm8960_dapm_widgets_out3)); - snd_soc_dapm_add_routes(codec, audio_paths_out3, + snd_soc_dapm_add_routes(dapm, audio_paths_out3, ARRAY_SIZE(audio_paths_out3)); } @@ -418,7 +417,9 @@ static int wm8960_add_widgets(struct snd_soc_codec *codec) * list each time to find the desired power state do so now * and save the result. */ - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &codec->card->widgets, list) { + if (w->dapm != &codec->dapm) + continue; if (strcmp(w->name, "LOUT1 PGA") == 0) wm8960->lout1 = w; if (strcmp(w->name, "ROUT1 PGA") == 0) @@ -573,7 +574,7 @@ static int wm8960_set_bias_level_out3(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Enable anti-pop features */ snd_soc_write(codec, WM8960_APOP1, WM8960_POBCTRL | WM8960_SOFT_ST | @@ -611,7 +612,7 @@ static int wm8960_set_bias_level_out3(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -627,7 +628,7 @@ static int wm8960_set_bias_level_capless(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_PREPARE: - switch (codec->bias_level) { + switch (codec->dapm.bias_level) { case SND_SOC_BIAS_STANDBY: /* Enable anti pop mode */ snd_soc_update_bits(codec, WM8960_APOP1, @@ -682,7 +683,7 @@ static int wm8960_set_bias_level_capless(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - switch (codec->bias_level) { + switch (codec->dapm.bias_level) { case SND_SOC_BIAS_PREPARE: /* Disable HP discharge */ snd_soc_update_bits(codec, WM8960_APOP2, @@ -706,7 +707,7 @@ static int wm8960_set_bias_level_capless(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c index 8340485c9851..55252e7d02c9 100644 --- a/sound/soc/codecs/wm8961.c +++ b/sound/soc/codecs/wm8961.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -290,7 +289,6 @@ static u16 wm8961_reg_defaults[] = { struct wm8961_priv { enum snd_soc_control_type control_type; int sysclk; - u16 reg_cache[WM8961_MAX_REGISTER]; }; static int wm8961_volatile_register(unsigned int reg) @@ -882,7 +880,7 @@ static int wm8961_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_PREPARE: - if (codec->bias_level == SND_SOC_BIAS_STANDBY) { + if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) { /* Enable bias generation */ reg = snd_soc_read(codec, WM8961_ANTI_POP); reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN; @@ -897,7 +895,7 @@ static int wm8961_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_PREPARE) { + if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE) { /* VREF off */ reg = snd_soc_read(codec, WM8961_PWR_MGMT_1); reg &= ~WM8961_VREF; @@ -919,7 +917,7 @@ static int wm8961_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -959,6 +957,7 @@ static struct snd_soc_dai_driver wm8961_dai = { static int wm8961_probe(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret = 0; u16 reg; @@ -1024,9 +1023,9 @@ static int wm8961_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, wm8961_snd_controls, ARRAY_SIZE(wm8961_snd_controls)); - snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8961_dapm_widgets, ARRAY_SIZE(wm8961_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths)); + snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths)); return 0; } diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 7c421cc837bd..b9cb1fcf8c92 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -29,10 +29,10 @@ #include #include #include -#include #include #include #include +#include #include "wm8962.h" @@ -1956,7 +1956,7 @@ static int wm8962_readable_register(unsigned int reg) static int wm8962_reset(struct snd_soc_codec *codec) { - return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0); + return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243); } static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0); @@ -2677,6 +2677,7 @@ static const struct snd_soc_dapm_route wm8962_spk_stereo_intercon[] = { static int wm8962_add_widgets(struct snd_soc_codec *codec) { struct wm8962_pdata *pdata = dev_get_platdata(codec->dev); + struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_add_controls(codec, wm8962_snd_controls, ARRAY_SIZE(wm8962_snd_controls)); @@ -2688,26 +2689,26 @@ static int wm8962_add_widgets(struct snd_soc_codec *codec) ARRAY_SIZE(wm8962_spk_stereo_controls)); - snd_soc_dapm_new_controls(codec, wm8962_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8962_dapm_widgets, ARRAY_SIZE(wm8962_dapm_widgets)); if (pdata && pdata->spk_mono) - snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_mono_widgets, + snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_mono_widgets, ARRAY_SIZE(wm8962_dapm_spk_mono_widgets)); else - snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_stereo_widgets, + snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_stereo_widgets, ARRAY_SIZE(wm8962_dapm_spk_stereo_widgets)); - snd_soc_dapm_add_routes(codec, wm8962_intercon, + snd_soc_dapm_add_routes(dapm, wm8962_intercon, ARRAY_SIZE(wm8962_intercon)); if (pdata && pdata->spk_mono) - snd_soc_dapm_add_routes(codec, wm8962_spk_mono_intercon, + snd_soc_dapm_add_routes(dapm, wm8962_spk_mono_intercon, ARRAY_SIZE(wm8962_spk_mono_intercon)); else - snd_soc_dapm_add_routes(codec, wm8962_spk_stereo_intercon, + snd_soc_dapm_add_routes(dapm, wm8962_spk_stereo_intercon, ARRAY_SIZE(wm8962_spk_stereo_intercon)); - snd_soc_dapm_disable_pin(codec, "Beep"); + snd_soc_dapm_disable_pin(dapm, "Beep"); return 0; } @@ -2814,7 +2815,7 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec, struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec); int ret; - if (level == codec->bias_level) + if (level == codec->dapm.bias_level) return 0; switch (level) { @@ -2828,7 +2829,7 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); if (ret != 0) { @@ -2878,7 +2879,7 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec, wm8962->supplies); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -3348,6 +3349,12 @@ static irqreturn_t wm8962_irq(int irq, void *data) if (active & (WM8962_MICSCD_EINT | WM8962_MICD_EINT)) { dev_dbg(codec->dev, "Microphone event detected\n"); +#ifndef CONFIG_SND_SOC_WM8962_MODULE + trace_snd_soc_jack_irq(dev_name(codec->dev)); +#endif + + pm_wakeup_event(codec->dev, 300); + schedule_delayed_work(&wm8962->mic_work, msecs_to_jiffies(250)); } @@ -3433,6 +3440,7 @@ static void wm8962_beep_work(struct work_struct *work) struct wm8962_priv *wm8962 = container_of(work, struct wm8962_priv, beep_work); struct snd_soc_codec *codec = wm8962->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int i; int reg = 0; int best = 0; @@ -3449,16 +3457,16 @@ static void wm8962_beep_work(struct work_struct *work) reg = WM8962_BEEP_ENA | (best << WM8962_BEEP_RATE_SHIFT); - snd_soc_dapm_enable_pin(codec, "Beep"); + snd_soc_dapm_enable_pin(dapm, "Beep"); } else { dev_dbg(codec->dev, "Disabling beep\n"); - snd_soc_dapm_disable_pin(codec, "Beep"); + snd_soc_dapm_disable_pin(dapm, "Beep"); } snd_soc_update_bits(codec, WM8962_BEEP_GENERATOR_1, WM8962_BEEP_ENA | WM8962_BEEP_RATE_MASK, reg); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } /* For usability define a way of injecting beep events for the device - @@ -3706,7 +3714,7 @@ static int wm8962_probe(struct snd_soc_codec *codec) INIT_DELAYED_WORK(&wm8962->mic_work, wm8962_mic_work); codec->cache_sync = 1; - codec->idle_bias_off = 1; + codec->dapm.idle_bias_off = 1; ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_I2C); if (ret != 0) { @@ -3865,7 +3873,6 @@ err_enable: err_get: regulator_bulk_free(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); err: - kfree(wm8962); return ret; } diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c index 9f18db6e167c..572bb80627a4 100644 --- a/sound/soc/codecs/wm8971.c +++ b/sound/soc/codecs/wm8971.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include "wm8971.h" @@ -333,10 +332,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8971_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8971_dapm_widgets, - ARRAY_SIZE(wm8971_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8971_dapm_widgets, + ARRAY_SIZE(wm8971_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -553,7 +553,7 @@ static int wm8971_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, WM8971_PWR1, 0x0001); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -590,9 +590,11 @@ static struct snd_soc_dai_driver wm8971_dai = { static void wm8971_work(struct work_struct *work) { - struct snd_soc_codec *codec = - container_of(work, struct snd_soc_codec, delayed_work.work); - wm8971_set_bias_level(codec, codec->bias_level); + struct snd_soc_dapm_context *dapm = + container_of(work, struct snd_soc_dapm_context, + delayed_work.work); + struct snd_soc_codec *codec = dapm->codec; + wm8971_set_bias_level(codec, codec->dapm.bias_level); } static int wm8971_suspend(struct snd_soc_codec *codec, pm_message_t state) @@ -620,11 +622,11 @@ static int wm8971_resume(struct snd_soc_codec *codec) wm8971_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* charge wm8971 caps */ - if (codec->suspend_bias_level == SND_SOC_BIAS_ON) { + if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) { reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e; snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0); - codec->bias_level = SND_SOC_BIAS_ON; - queue_delayed_work(wm8971_workq, &codec->delayed_work, + codec->dapm.bias_level = SND_SOC_BIAS_ON; + queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work, msecs_to_jiffies(1000)); } @@ -643,7 +645,7 @@ static int wm8971_probe(struct snd_soc_codec *codec) return ret; } - INIT_DELAYED_WORK(&codec->delayed_work, wm8971_work); + INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8971_work); wm8971_workq = create_workqueue("wm8971"); if (wm8971_workq == NULL) return -ENOMEM; @@ -653,8 +655,8 @@ static int wm8971_probe(struct snd_soc_codec *codec) /* charge output caps - set vmid to 5k for quick power up */ reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e; snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0); - codec->bias_level = SND_SOC_BIAS_STANDBY; - queue_delayed_work(wm8971_workq, &codec->delayed_work, + codec->dapm.bias_level = SND_SOC_BIAS_STANDBY; + queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work, msecs_to_jiffies(1000)); /* set the update bits */ diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c index b4363f6d19b3..ca646a822444 100644 --- a/sound/soc/codecs/wm8974.c +++ b/sound/soc/codecs/wm8974.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -52,7 +51,6 @@ static const u16 wm8974_reg[WM8974_CACHEREGNUM] = { struct wm8974_priv { enum snd_soc_control_type control_type; - u16 reg_cache[WM8974_CACHEREGNUM]; }; #define wm8974_reset(c) snd_soc_write(c, WM8974_RESET, 0) @@ -274,10 +272,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8974_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8974_dapm_widgets, - ARRAY_SIZE(wm8974_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm8974_dapm_widgets, + ARRAY_SIZE(wm8974_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -530,7 +529,7 @@ static int wm8974_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_STANDBY: power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN; - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Initial cap charge at VMID 5k */ snd_soc_write(codec, WM8974_POWER1, power1 | 0x3); mdelay(100); @@ -547,7 +546,7 @@ static int wm8974_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c index 13b979a71a7c..4bbc3442703f 100644 --- a/sound/soc/codecs/wm8978.c +++ b/sound/soc/codecs/wm8978.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -60,7 +59,6 @@ struct wm8978_priv { unsigned int f_opclk; int mclk_idx; enum wm8978_sysclk_src sysclk; - u16 reg_cache[WM8978_CACHEREGNUM]; }; static const char *wm8978_companding[] = {"Off", "NC", "u-law", "A-law"}; @@ -355,11 +353,12 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8978_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8978_dapm_widgets, - ARRAY_SIZE(wm8978_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; + snd_soc_dapm_new_controls(dapm, wm8978_dapm_widgets, + ARRAY_SIZE(wm8978_dapm_widgets)); /* set up the WM8978 audio map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -837,7 +836,7 @@ static int wm8978_set_bias_level(struct snd_soc_codec *codec, /* bit 3: enable bias, bit 2: enable I/O tie off buffer */ power1 |= 0xc; - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Initial cap charge at VMID 5k */ snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1, power1 | 0x3); @@ -857,7 +856,7 @@ static int wm8978_set_bias_level(struct snd_soc_codec *codec, dev_dbg(codec->dev, "%s: %d, %x\n", __func__, level, power1); - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c index fd2e7cca1228..bae510acdec8 100644 --- a/sound/soc/codecs/wm8985.c +++ b/sound/soc/codecs/wm8985.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -533,10 +532,11 @@ static int eqmode_put(struct snd_kcontrol *kcontrol, static int wm8985_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8985_dapm_widgets, - ARRAY_SIZE(wm8985_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, + snd_soc_dapm_new_controls(dapm, wm8985_dapm_widgets, + ARRAY_SIZE(wm8985_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -879,7 +879,7 @@ static int wm8985_set_bias_level(struct snd_soc_codec *codec, 1 << WM8985_VMIDSEL_SHIFT); break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8985->supplies), wm8985->supplies); if (ret) { @@ -939,7 +939,7 @@ static int wm8985_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c index d7f259711970..d7170f1381aa 100644 --- a/sound/soc/codecs/wm8988.c +++ b/sound/soc/codecs/wm8988.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include "wm8988.h" @@ -54,7 +53,6 @@ struct wm8988_priv { unsigned int sysclk; enum snd_soc_control_type control_type; struct snd_pcm_hw_constraint_list *sysclk_constraints; - u16 reg_cache[WM8988_NUM_REG]; }; @@ -677,7 +675,7 @@ static int wm8988_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* VREF, VMID=2x5k */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1); @@ -693,7 +691,7 @@ static int wm8988_set_bias_level(struct snd_soc_codec *codec, snd_soc_write(codec, WM8988_PWR1, 0x0000); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -759,6 +757,7 @@ static int wm8988_resume(struct snd_soc_codec *codec) static int wm8988_probe(struct snd_soc_codec *codec) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret = 0; u16 reg; @@ -790,9 +789,9 @@ static int wm8988_probe(struct snd_soc_codec *codec) snd_soc_add_controls(codec, wm8988_snd_controls, ARRAY_SIZE(wm8988_snd_controls)); - snd_soc_dapm_new_controls(codec, wm8988_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8988_dapm_widgets, ARRAY_SIZE(wm8988_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c index 264828e4e67c..5c87a634fc04 100644 --- a/sound/soc/codecs/wm8990.c +++ b/sound/soc/codecs/wm8990.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -914,11 +913,12 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm8990_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm8990_dapm_widgets, - ARRAY_SIZE(wm8990_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; + snd_soc_dapm_new_controls(dapm, wm8990_dapm_widgets, + ARRAY_SIZE(wm8990_dapm_widgets)); /* set up the WM8990 audio map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -1170,7 +1170,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Enable all output discharge bits */ snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE | WM8990_DIS_RLINE | WM8990_DIS_OUT3 | @@ -1266,7 +1266,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c index 589e3fa24734..18c0d9ce7c32 100644 --- a/sound/soc/codecs/wm8993.c +++ b/sound/soc/codecs/wm8993.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -226,7 +225,6 @@ static struct { struct wm8993_priv { struct wm_hubs_data hubs_data; - u16 reg_cache[WM8993_REGISTER_COUNT]; struct regulator_bulk_data supplies[WM8993_NUM_SUPPLIES]; struct wm8993_platform_data pdata; enum snd_soc_control_type control_type; @@ -735,6 +733,7 @@ static int class_w_put(struct snd_kcontrol *kcontrol, 0); } wm8993->class_w_users++; + wm8993->hubs_data.class_w = true; } /* Implement the change */ @@ -751,6 +750,7 @@ static int class_w_put(struct snd_kcontrol *kcontrol, WM8993_CP_DYN_V); } wm8993->class_w_users--; + wm8993->hubs_data.class_w = false; } dev_dbg(codec->dev, "Indirect DAC use count now %d\n", @@ -968,7 +968,7 @@ static int wm8993_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8993->supplies), wm8993->supplies); if (ret != 0) @@ -1029,6 +1029,12 @@ static int wm8993_set_bias_level(struct snd_soc_codec *codec, WM8993_VMID_SEL_MASK | WM8993_BIAS_ENA, 0); + snd_soc_update_bits(codec, WM8993_ANTIPOP2, + WM8993_STARTUP_BIAS_ENA | + WM8993_VMID_BUF_ENA | + WM8993_VMID_RAMP_MASK | + WM8993_BIAS_SRC, 0); + #ifdef CONFIG_REGULATOR /* Post 2.6.34 we will be able to get a callback when * the regulators are disabled which we can use but @@ -1043,7 +1049,7 @@ static int wm8993_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1225,7 +1231,7 @@ static int wm8993_hw_params(struct snd_pcm_substream *substream, - wm8993->fs); for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) { cur_val = abs((wm8993->sysclk_rate / - clk_sys_rates[i].ratio) - wm8993->fs);; + clk_sys_rates[i].ratio) - wm8993->fs); if (cur_val < best_val) { best = i; best_val = cur_val; @@ -1422,6 +1428,7 @@ static struct snd_soc_dai_driver wm8993_dai = { static int wm8993_probe(struct snd_soc_codec *codec) { struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret, i, val; wm8993->hubs_data.hp_startup_mode = 1; @@ -1503,11 +1510,11 @@ static int wm8993_probe(struct snd_soc_codec *codec) ARRAY_SIZE(wm8993_eq_controls)); } - snd_soc_dapm_new_controls(codec, wm8993_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8993_dapm_widgets, ARRAY_SIZE(wm8993_dapm_widgets)); wm_hubs_add_analogue_controls(codec); - snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes)); + snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes)); wm_hubs_add_analogue_routes(codec, wm8993->pdata.lineout1_diff, wm8993->pdata.lineout2_diff); diff --git a/sound/soc/codecs/wm8994-tables.c b/sound/soc/codecs/wm8994-tables.c new file mode 100644 index 000000000000..68e9b024dd48 --- /dev/null +++ b/sound/soc/codecs/wm8994-tables.c @@ -0,0 +1,3147 @@ +#include "wm8994.h" + +const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE] = { + { 0xFFFF, 0xFFFF }, /* R0 - Software Reset */ + { 0x3B37, 0x3B37 }, /* R1 - Power Management (1) */ + { 0x6BF0, 0x6BF0 }, /* R2 - Power Management (2) */ + { 0x3FF0, 0x3FF0 }, /* R3 - Power Management (3) */ + { 0x3F3F, 0x3F3F }, /* R4 - Power Management (4) */ + { 0x3F0F, 0x3F0F }, /* R5 - Power Management (5) */ + { 0x003F, 0x003F }, /* R6 - Power Management (6) */ + { 0x0000, 0x0000 }, /* R7 */ + { 0x0000, 0x0000 }, /* R8 */ + { 0x0000, 0x0000 }, /* R9 */ + { 0x0000, 0x0000 }, /* R10 */ + { 0x0000, 0x0000 }, /* R11 */ + { 0x0000, 0x0000 }, /* R12 */ + { 0x0000, 0x0000 }, /* R13 */ + { 0x0000, 0x0000 }, /* R14 */ + { 0x0000, 0x0000 }, /* R15 */ + { 0x0000, 0x0000 }, /* R16 */ + { 0x0000, 0x0000 }, /* R17 */ + { 0x0000, 0x0000 }, /* R18 */ + { 0x0000, 0x0000 }, /* R19 */ + { 0x0000, 0x0000 }, /* R20 */ + { 0x01C0, 0x01C0 }, /* R21 - Input Mixer (1) */ + { 0x0000, 0x0000 }, /* R22 */ + { 0x0000, 0x0000 }, /* R23 */ + { 0x00DF, 0x01DF }, /* R24 - Left Line Input 1&2 Volume */ + { 0x00DF, 0x01DF }, /* R25 - Left Line Input 3&4 Volume */ + { 0x00DF, 0x01DF }, /* R26 - Right Line Input 1&2 Volume */ + { 0x00DF, 0x01DF }, /* R27 - Right Line Input 3&4 Volume */ + { 0x00FF, 0x01FF }, /* R28 - Left Output Volume */ + { 0x00FF, 0x01FF }, /* R29 - Right Output Volume */ + { 0x0077, 0x0077 }, /* R30 - Line Outputs Volume */ + { 0x0030, 0x0030 }, /* R31 - HPOUT2 Volume */ + { 0x00FF, 0x01FF }, /* R32 - Left OPGA Volume */ + { 0x00FF, 0x01FF }, /* R33 - Right OPGA Volume */ + { 0x007F, 0x007F }, /* R34 - SPKMIXL Attenuation */ + { 0x017F, 0x017F }, /* R35 - SPKMIXR Attenuation */ + { 0x003F, 0x003F }, /* R36 - SPKOUT Mixers */ + { 0x003F, 0x003F }, /* R37 - ClassD */ + { 0x00FF, 0x01FF }, /* R38 - Speaker Volume Left */ + { 0x00FF, 0x01FF }, /* R39 - Speaker Volume Right */ + { 0x00FF, 0x00FF }, /* R40 - Input Mixer (2) */ + { 0x01B7, 0x01B7 }, /* R41 - Input Mixer (3) */ + { 0x01B7, 0x01B7 }, /* R42 - Input Mixer (4) */ + { 0x01C7, 0x01C7 }, /* R43 - Input Mixer (5) */ + { 0x01C7, 0x01C7 }, /* R44 - Input Mixer (6) */ + { 0x01FF, 0x01FF }, /* R45 - Output Mixer (1) */ + { 0x01FF, 0x01FF }, /* R46 - Output Mixer (2) */ + { 0x0FFF, 0x0FFF }, /* R47 - Output Mixer (3) */ + { 0x0FFF, 0x0FFF }, /* R48 - Output Mixer (4) */ + { 0x0FFF, 0x0FFF }, /* R49 - Output Mixer (5) */ + { 0x0FFF, 0x0FFF }, /* R50 - Output Mixer (6) */ + { 0x0038, 0x0038 }, /* R51 - HPOUT2 Mixer */ + { 0x0077, 0x0077 }, /* R52 - Line Mixer (1) */ + { 0x0077, 0x0077 }, /* R53 - Line Mixer (2) */ + { 0x03FF, 0x03FF }, /* R54 - Speaker Mixer */ + { 0x00C1, 0x00C1 }, /* R55 - Additional Control */ + { 0x00F0, 0x00F0 }, /* R56 - AntiPOP (1) */ + { 0x01EF, 0x01EF }, /* R57 - AntiPOP (2) */ + { 0x00FF, 0x00FF }, /* R58 - MICBIAS */ + { 0x000F, 0x000F }, /* R59 - LDO 1 */ + { 0x0007, 0x0007 }, /* R60 - LDO 2 */ + { 0x0000, 0x0000 }, /* R61 */ + { 0x0000, 0x0000 }, /* R62 */ + { 0x0000, 0x0000 }, /* R63 */ + { 0x0000, 0x0000 }, /* R64 */ + { 0x0000, 0x0000 }, /* R65 */ + { 0x0000, 0x0000 }, /* R66 */ + { 0x0000, 0x0000 }, /* R67 */ + { 0x0000, 0x0000 }, /* R68 */ + { 0x0000, 0x0000 }, /* R69 */ + { 0x0000, 0x0000 }, /* R70 */ + { 0x0000, 0x0000 }, /* R71 */ + { 0x0000, 0x0000 }, /* R72 */ + { 0x0000, 0x0000 }, /* R73 */ + { 0x0000, 0x0000 }, /* R74 */ + { 0x0000, 0x0000 }, /* R75 */ + { 0x8000, 0x8000 }, /* R76 - Charge Pump (1) */ + { 0x0000, 0x0000 }, /* R77 */ + { 0x0000, 0x0000 }, /* R78 */ + { 0x0000, 0x0000 }, /* R79 */ + { 0x0000, 0x0000 }, /* R80 */ + { 0x0301, 0x0301 }, /* R81 - Class W (1) */ + { 0x0000, 0x0000 }, /* R82 */ + { 0x0000, 0x0000 }, /* R83 */ + { 0x333F, 0x333F }, /* R84 - DC Servo (1) */ + { 0x0FEF, 0x0FEF }, /* R85 - DC Servo (2) */ + { 0x0000, 0x0000 }, /* R86 */ + { 0xFFFF, 0xFFFF }, /* R87 - DC Servo (4) */ + { 0x0333, 0x0000 }, /* R88 - DC Servo Readback */ + { 0x0000, 0x0000 }, /* R89 */ + { 0x0000, 0x0000 }, /* R90 */ + { 0x0000, 0x0000 }, /* R91 */ + { 0x0000, 0x0000 }, /* R92 */ + { 0x0000, 0x0000 }, /* R93 */ + { 0x0000, 0x0000 }, /* R94 */ + { 0x0000, 0x0000 }, /* R95 */ + { 0x00EE, 0x00EE }, /* R96 - Analogue HP (1) */ + { 0x0000, 0x0000 }, /* R97 */ + { 0x0000, 0x0000 }, /* R98 */ + { 0x0000, 0x0000 }, /* R99 */ + { 0x0000, 0x0000 }, /* R100 */ + { 0x0000, 0x0000 }, /* R101 */ + { 0x0000, 0x0000 }, /* R102 */ + { 0x0000, 0x0000 }, /* R103 */ + { 0x0000, 0x0000 }, /* R104 */ + { 0x0000, 0x0000 }, /* R105 */ + { 0x0000, 0x0000 }, /* R106 */ + { 0x0000, 0x0000 }, /* R107 */ + { 0x0000, 0x0000 }, /* R108 */ + { 0x0000, 0x0000 }, /* R109 */ + { 0x0000, 0x0000 }, /* R110 */ + { 0x0000, 0x0000 }, /* R111 */ + { 0x0000, 0x0000 }, /* R112 */ + { 0x0000, 0x0000 }, /* R113 */ + { 0x0000, 0x0000 }, /* R114 */ + { 0x0000, 0x0000 }, /* R115 */ + { 0x0000, 0x0000 }, /* R116 */ + { 0x0000, 0x0000 }, /* R117 */ + { 0x0000, 0x0000 }, /* R118 */ + { 0x0000, 0x0000 }, /* R119 */ + { 0x0000, 0x0000 }, /* R120 */ + { 0x0000, 0x0000 }, /* R121 */ + { 0x0000, 0x0000 }, /* R122 */ + { 0x0000, 0x0000 }, /* R123 */ + { 0x0000, 0x0000 }, /* R124 */ + { 0x0000, 0x0000 }, /* R125 */ + { 0x0000, 0x0000 }, /* R126 */ + { 0x0000, 0x0000 }, /* R127 */ + { 0x0000, 0x0000 }, /* R128 */ + { 0x0000, 0x0000 }, /* R129 */ + { 0x0000, 0x0000 }, /* R130 */ + { 0x0000, 0x0000 }, /* R131 */ + { 0x0000, 0x0000 }, /* R132 */ + { 0x0000, 0x0000 }, /* R133 */ + { 0x0000, 0x0000 }, /* R134 */ + { 0x0000, 0x0000 }, /* R135 */ + { 0x0000, 0x0000 }, /* R136 */ + { 0x0000, 0x0000 }, /* R137 */ + { 0x0000, 0x0000 }, /* R138 */ + { 0x0000, 0x0000 }, /* R139 */ + { 0x0000, 0x0000 }, /* R140 */ + { 0x0000, 0x0000 }, /* R141 */ + { 0x0000, 0x0000 }, /* R142 */ + { 0x0000, 0x0000 }, /* R143 */ + { 0x0000, 0x0000 }, /* R144 */ + { 0x0000, 0x0000 }, /* R145 */ + { 0x0000, 0x0000 }, /* R146 */ + { 0x0000, 0x0000 }, /* R147 */ + { 0x0000, 0x0000 }, /* R148 */ + { 0x0000, 0x0000 }, /* R149 */ + { 0x0000, 0x0000 }, /* R150 */ + { 0x0000, 0x0000 }, /* R151 */ + { 0x0000, 0x0000 }, /* R152 */ + { 0x0000, 0x0000 }, /* R153 */ + { 0x0000, 0x0000 }, /* R154 */ + { 0x0000, 0x0000 }, /* R155 */ + { 0x0000, 0x0000 }, /* R156 */ + { 0x0000, 0x0000 }, /* R157 */ + { 0x0000, 0x0000 }, /* R158 */ + { 0x0000, 0x0000 }, /* R159 */ + { 0x0000, 0x0000 }, /* R160 */ + { 0x0000, 0x0000 }, /* R161 */ + { 0x0000, 0x0000 }, /* R162 */ + { 0x0000, 0x0000 }, /* R163 */ + { 0x0000, 0x0000 }, /* R164 */ + { 0x0000, 0x0000 }, /* R165 */ + { 0x0000, 0x0000 }, /* R166 */ + { 0x0000, 0x0000 }, /* R167 */ + { 0x0000, 0x0000 }, /* R168 */ + { 0x0000, 0x0000 }, /* R169 */ + { 0x0000, 0x0000 }, /* R170 */ + { 0x0000, 0x0000 }, /* R171 */ + { 0x0000, 0x0000 }, /* R172 */ + { 0x0000, 0x0000 }, /* R173 */ + { 0x0000, 0x0000 }, /* R174 */ + { 0x0000, 0x0000 }, /* R175 */ + { 0x0000, 0x0000 }, /* R176 */ + { 0x0000, 0x0000 }, /* R177 */ + { 0x0000, 0x0000 }, /* R178 */ + { 0x0000, 0x0000 }, /* R179 */ + { 0x0000, 0x0000 }, /* R180 */ + { 0x0000, 0x0000 }, /* R181 */ + { 0x0000, 0x0000 }, /* R182 */ + { 0x0000, 0x0000 }, /* R183 */ + { 0x0000, 0x0000 }, /* R184 */ + { 0x0000, 0x0000 }, /* R185 */ + { 0x0000, 0x0000 }, /* R186 */ + { 0x0000, 0x0000 }, /* R187 */ + { 0x0000, 0x0000 }, /* R188 */ + { 0x0000, 0x0000 }, /* R189 */ + { 0x0000, 0x0000 }, /* R190 */ + { 0x0000, 0x0000 }, /* R191 */ + { 0x0000, 0x0000 }, /* R192 */ + { 0x0000, 0x0000 }, /* R193 */ + { 0x0000, 0x0000 }, /* R194 */ + { 0x0000, 0x0000 }, /* R195 */ + { 0x0000, 0x0000 }, /* R196 */ + { 0x0000, 0x0000 }, /* R197 */ + { 0x0000, 0x0000 }, /* R198 */ + { 0x0000, 0x0000 }, /* R199 */ + { 0x0000, 0x0000 }, /* R200 */ + { 0x0000, 0x0000 }, /* R201 */ + { 0x0000, 0x0000 }, /* R202 */ + { 0x0000, 0x0000 }, /* R203 */ + { 0x0000, 0x0000 }, /* R204 */ + { 0x0000, 0x0000 }, /* R205 */ + { 0x0000, 0x0000 }, /* R206 */ + { 0x0000, 0x0000 }, /* R207 */ + { 0x0000, 0x0000 }, /* R208 */ + { 0x0000, 0x0000 }, /* R209 */ + { 0x0000, 0x0000 }, /* R210 */ + { 0x0000, 0x0000 }, /* R211 */ + { 0x0000, 0x0000 }, /* R212 */ + { 0x0000, 0x0000 }, /* R213 */ + { 0x0000, 0x0000 }, /* R214 */ + { 0x0000, 0x0000 }, /* R215 */ + { 0x0000, 0x0000 }, /* R216 */ + { 0x0000, 0x0000 }, /* R217 */ + { 0x0000, 0x0000 }, /* R218 */ + { 0x0000, 0x0000 }, /* R219 */ + { 0x0000, 0x0000 }, /* R220 */ + { 0x0000, 0x0000 }, /* R221 */ + { 0x0000, 0x0000 }, /* R222 */ + { 0x0000, 0x0000 }, /* R223 */ + { 0x0000, 0x0000 }, /* R224 */ + { 0x0000, 0x0000 }, /* R225 */ + { 0x0000, 0x0000 }, /* R226 */ + { 0x0000, 0x0000 }, /* R227 */ + { 0x0000, 0x0000 }, /* R228 */ + { 0x0000, 0x0000 }, /* R229 */ + { 0x0000, 0x0000 }, /* R230 */ + { 0x0000, 0x0000 }, /* R231 */ + { 0x0000, 0x0000 }, /* R232 */ + { 0x0000, 0x0000 }, /* R233 */ + { 0x0000, 0x0000 }, /* R234 */ + { 0x0000, 0x0000 }, /* R235 */ + { 0x0000, 0x0000 }, /* R236 */ + { 0x0000, 0x0000 }, /* R237 */ + { 0x0000, 0x0000 }, /* R238 */ + { 0x0000, 0x0000 }, /* R239 */ + { 0x0000, 0x0000 }, /* R240 */ + { 0x0000, 0x0000 }, /* R241 */ + { 0x0000, 0x0000 }, /* R242 */ + { 0x0000, 0x0000 }, /* R243 */ + { 0x0000, 0x0000 }, /* R244 */ + { 0x0000, 0x0000 }, /* R245 */ + { 0x0000, 0x0000 }, /* R246 */ + { 0x0000, 0x0000 }, /* R247 */ + { 0x0000, 0x0000 }, /* R248 */ + { 0x0000, 0x0000 }, /* R249 */ + { 0x0000, 0x0000 }, /* R250 */ + { 0x0000, 0x0000 }, /* R251 */ + { 0x0000, 0x0000 }, /* R252 */ + { 0x0000, 0x0000 }, /* R253 */ + { 0x0000, 0x0000 }, /* R254 */ + { 0x0000, 0x0000 }, /* R255 */ + { 0x000F, 0x0000 }, /* R256 - Chip Revision */ + { 0x0074, 0x0074 }, /* R257 - Control Interface */ + { 0x0000, 0x0000 }, /* R258 */ + { 0x0000, 0x0000 }, /* R259 */ + { 0x0000, 0x0000 }, /* R260 */ + { 0x0000, 0x0000 }, /* R261 */ + { 0x0000, 0x0000 }, /* R262 */ + { 0x0000, 0x0000 }, /* R263 */ + { 0x0000, 0x0000 }, /* R264 */ + { 0x0000, 0x0000 }, /* R265 */ + { 0x0000, 0x0000 }, /* R266 */ + { 0x0000, 0x0000 }, /* R267 */ + { 0x0000, 0x0000 }, /* R268 */ + { 0x0000, 0x0000 }, /* R269 */ + { 0x0000, 0x0000 }, /* R270 */ + { 0x0000, 0x0000 }, /* R271 */ + { 0x807F, 0x837F }, /* R272 - Write Sequencer Ctrl (1) */ + { 0x017F, 0x0000 }, /* R273 - Write Sequencer Ctrl (2) */ + { 0x0000, 0x0000 }, /* R274 */ + { 0x0000, 0x0000 }, /* R275 */ + { 0x0000, 0x0000 }, /* R276 */ + { 0x0000, 0x0000 }, /* R277 */ + { 0x0000, 0x0000 }, /* R278 */ + { 0x0000, 0x0000 }, /* R279 */ + { 0x0000, 0x0000 }, /* R280 */ + { 0x0000, 0x0000 }, /* R281 */ + { 0x0000, 0x0000 }, /* R282 */ + { 0x0000, 0x0000 }, /* R283 */ + { 0x0000, 0x0000 }, /* R284 */ + { 0x0000, 0x0000 }, /* R285 */ + { 0x0000, 0x0000 }, /* R286 */ + { 0x0000, 0x0000 }, /* R287 */ + { 0x0000, 0x0000 }, /* R288 */ + { 0x0000, 0x0000 }, /* R289 */ + { 0x0000, 0x0000 }, /* R290 */ + { 0x0000, 0x0000 }, /* R291 */ + { 0x0000, 0x0000 }, /* R292 */ + { 0x0000, 0x0000 }, /* R293 */ + { 0x0000, 0x0000 }, /* R294 */ + { 0x0000, 0x0000 }, /* R295 */ + { 0x0000, 0x0000 }, /* R296 */ + { 0x0000, 0x0000 }, /* R297 */ + { 0x0000, 0x0000 }, /* R298 */ + { 0x0000, 0x0000 }, /* R299 */ + { 0x0000, 0x0000 }, /* R300 */ + { 0x0000, 0x0000 }, /* R301 */ + { 0x0000, 0x0000 }, /* R302 */ + { 0x0000, 0x0000 }, /* R303 */ + { 0x0000, 0x0000 }, /* R304 */ + { 0x0000, 0x0000 }, /* R305 */ + { 0x0000, 0x0000 }, /* R306 */ + { 0x0000, 0x0000 }, /* R307 */ + { 0x0000, 0x0000 }, /* R308 */ + { 0x0000, 0x0000 }, /* R309 */ + { 0x0000, 0x0000 }, /* R310 */ + { 0x0000, 0x0000 }, /* R311 */ + { 0x0000, 0x0000 }, /* R312 */ + { 0x0000, 0x0000 }, /* R313 */ + { 0x0000, 0x0000 }, /* R314 */ + { 0x0000, 0x0000 }, /* R315 */ + { 0x0000, 0x0000 }, /* R316 */ + { 0x0000, 0x0000 }, /* R317 */ + { 0x0000, 0x0000 }, /* R318 */ + { 0x0000, 0x0000 }, /* R319 */ + { 0x0000, 0x0000 }, /* R320 */ + { 0x0000, 0x0000 }, /* R321 */ + { 0x0000, 0x0000 }, /* R322 */ + { 0x0000, 0x0000 }, /* R323 */ + { 0x0000, 0x0000 }, /* R324 */ + { 0x0000, 0x0000 }, /* R325 */ + { 0x0000, 0x0000 }, /* R326 */ + { 0x0000, 0x0000 }, /* R327 */ + { 0x0000, 0x0000 }, /* R328 */ + { 0x0000, 0x0000 }, /* R329 */ + { 0x0000, 0x0000 }, /* R330 */ + { 0x0000, 0x0000 }, /* R331 */ + { 0x0000, 0x0000 }, /* R332 */ + { 0x0000, 0x0000 }, /* R333 */ + { 0x0000, 0x0000 }, /* R334 */ + { 0x0000, 0x0000 }, /* R335 */ + { 0x0000, 0x0000 }, /* R336 */ + { 0x0000, 0x0000 }, /* R337 */ + { 0x0000, 0x0000 }, /* R338 */ + { 0x0000, 0x0000 }, /* R339 */ + { 0x0000, 0x0000 }, /* R340 */ + { 0x0000, 0x0000 }, /* R341 */ + { 0x0000, 0x0000 }, /* R342 */ + { 0x0000, 0x0000 }, /* R343 */ + { 0x0000, 0x0000 }, /* R344 */ + { 0x0000, 0x0000 }, /* R345 */ + { 0x0000, 0x0000 }, /* R346 */ + { 0x0000, 0x0000 }, /* R347 */ + { 0x0000, 0x0000 }, /* R348 */ + { 0x0000, 0x0000 }, /* R349 */ + { 0x0000, 0x0000 }, /* R350 */ + { 0x0000, 0x0000 }, /* R351 */ + { 0x0000, 0x0000 }, /* R352 */ + { 0x0000, 0x0000 }, /* R353 */ + { 0x0000, 0x0000 }, /* R354 */ + { 0x0000, 0x0000 }, /* R355 */ + { 0x0000, 0x0000 }, /* R356 */ + { 0x0000, 0x0000 }, /* R357 */ + { 0x0000, 0x0000 }, /* R358 */ + { 0x0000, 0x0000 }, /* R359 */ + { 0x0000, 0x0000 }, /* R360 */ + { 0x0000, 0x0000 }, /* R361 */ + { 0x0000, 0x0000 }, /* R362 */ + { 0x0000, 0x0000 }, /* R363 */ + { 0x0000, 0x0000 }, /* R364 */ + { 0x0000, 0x0000 }, /* R365 */ + { 0x0000, 0x0000 }, /* R366 */ + { 0x0000, 0x0000 }, /* R367 */ + { 0x0000, 0x0000 }, /* R368 */ + { 0x0000, 0x0000 }, /* R369 */ + { 0x0000, 0x0000 }, /* R370 */ + { 0x0000, 0x0000 }, /* R371 */ + { 0x0000, 0x0000 }, /* R372 */ + { 0x0000, 0x0000 }, /* R373 */ + { 0x0000, 0x0000 }, /* R374 */ + { 0x0000, 0x0000 }, /* R375 */ + { 0x0000, 0x0000 }, /* R376 */ + { 0x0000, 0x0000 }, /* R377 */ + { 0x0000, 0x0000 }, /* R378 */ + { 0x0000, 0x0000 }, /* R379 */ + { 0x0000, 0x0000 }, /* R380 */ + { 0x0000, 0x0000 }, /* R381 */ + { 0x0000, 0x0000 }, /* R382 */ + { 0x0000, 0x0000 }, /* R383 */ + { 0x0000, 0x0000 }, /* R384 */ + { 0x0000, 0x0000 }, /* R385 */ + { 0x0000, 0x0000 }, /* R386 */ + { 0x0000, 0x0000 }, /* R387 */ + { 0x0000, 0x0000 }, /* R388 */ + { 0x0000, 0x0000 }, /* R389 */ + { 0x0000, 0x0000 }, /* R390 */ + { 0x0000, 0x0000 }, /* R391 */ + { 0x0000, 0x0000 }, /* R392 */ + { 0x0000, 0x0000 }, /* R393 */ + { 0x0000, 0x0000 }, /* R394 */ + { 0x0000, 0x0000 }, /* R395 */ + { 0x0000, 0x0000 }, /* R396 */ + { 0x0000, 0x0000 }, /* R397 */ + { 0x0000, 0x0000 }, /* R398 */ + { 0x0000, 0x0000 }, /* R399 */ + { 0x0000, 0x0000 }, /* R400 */ + { 0x0000, 0x0000 }, /* R401 */ + { 0x0000, 0x0000 }, /* R402 */ + { 0x0000, 0x0000 }, /* R403 */ + { 0x0000, 0x0000 }, /* R404 */ + { 0x0000, 0x0000 }, /* R405 */ + { 0x0000, 0x0000 }, /* R406 */ + { 0x0000, 0x0000 }, /* R407 */ + { 0x0000, 0x0000 }, /* R408 */ + { 0x0000, 0x0000 }, /* R409 */ + { 0x0000, 0x0000 }, /* R410 */ + { 0x0000, 0x0000 }, /* R411 */ + { 0x0000, 0x0000 }, /* R412 */ + { 0x0000, 0x0000 }, /* R413 */ + { 0x0000, 0x0000 }, /* R414 */ + { 0x0000, 0x0000 }, /* R415 */ + { 0x0000, 0x0000 }, /* R416 */ + { 0x0000, 0x0000 }, /* R417 */ + { 0x0000, 0x0000 }, /* R418 */ + { 0x0000, 0x0000 }, /* R419 */ + { 0x0000, 0x0000 }, /* R420 */ + { 0x0000, 0x0000 }, /* R421 */ + { 0x0000, 0x0000 }, /* R422 */ + { 0x0000, 0x0000 }, /* R423 */ + { 0x0000, 0x0000 }, /* R424 */ + { 0x0000, 0x0000 }, /* R425 */ + { 0x0000, 0x0000 }, /* R426 */ + { 0x0000, 0x0000 }, /* R427 */ + { 0x0000, 0x0000 }, /* R428 */ + { 0x0000, 0x0000 }, /* R429 */ + { 0x0000, 0x0000 }, /* R430 */ + { 0x0000, 0x0000 }, /* R431 */ + { 0x0000, 0x0000 }, /* R432 */ + { 0x0000, 0x0000 }, /* R433 */ + { 0x0000, 0x0000 }, /* R434 */ + { 0x0000, 0x0000 }, /* R435 */ + { 0x0000, 0x0000 }, /* R436 */ + { 0x0000, 0x0000 }, /* R437 */ + { 0x0000, 0x0000 }, /* R438 */ + { 0x0000, 0x0000 }, /* R439 */ + { 0x0000, 0x0000 }, /* R440 */ + { 0x0000, 0x0000 }, /* R441 */ + { 0x0000, 0x0000 }, /* R442 */ + { 0x0000, 0x0000 }, /* R443 */ + { 0x0000, 0x0000 }, /* R444 */ + { 0x0000, 0x0000 }, /* R445 */ + { 0x0000, 0x0000 }, /* R446 */ + { 0x0000, 0x0000 }, /* R447 */ + { 0x0000, 0x0000 }, /* R448 */ + { 0x0000, 0x0000 }, /* R449 */ + { 0x0000, 0x0000 }, /* R450 */ + { 0x0000, 0x0000 }, /* R451 */ + { 0x0000, 0x0000 }, /* R452 */ + { 0x0000, 0x0000 }, /* R453 */ + { 0x0000, 0x0000 }, /* R454 */ + { 0x0000, 0x0000 }, /* R455 */ + { 0x0000, 0x0000 }, /* R456 */ + { 0x0000, 0x0000 }, /* R457 */ + { 0x0000, 0x0000 }, /* R458 */ + { 0x0000, 0x0000 }, /* R459 */ + { 0x0000, 0x0000 }, /* R460 */ + { 0x0000, 0x0000 }, /* R461 */ + { 0x0000, 0x0000 }, /* R462 */ + { 0x0000, 0x0000 }, /* R463 */ + { 0x0000, 0x0000 }, /* R464 */ + { 0x0000, 0x0000 }, /* R465 */ + { 0x0000, 0x0000 }, /* R466 */ + { 0x0000, 0x0000 }, /* R467 */ + { 0x0000, 0x0000 }, /* R468 */ + { 0x0000, 0x0000 }, /* R469 */ + { 0x0000, 0x0000 }, /* R470 */ + { 0x0000, 0x0000 }, /* R471 */ + { 0x0000, 0x0000 }, /* R472 */ + { 0x0000, 0x0000 }, /* R473 */ + { 0x0000, 0x0000 }, /* R474 */ + { 0x0000, 0x0000 }, /* R475 */ + { 0x0000, 0x0000 }, /* R476 */ + { 0x0000, 0x0000 }, /* R477 */ + { 0x0000, 0x0000 }, /* R478 */ + { 0x0000, 0x0000 }, /* R479 */ + { 0x0000, 0x0000 }, /* R480 */ + { 0x0000, 0x0000 }, /* R481 */ + { 0x0000, 0x0000 }, /* R482 */ + { 0x0000, 0x0000 }, /* R483 */ + { 0x0000, 0x0000 }, /* R484 */ + { 0x0000, 0x0000 }, /* R485 */ + { 0x0000, 0x0000 }, /* R486 */ + { 0x0000, 0x0000 }, /* R487 */ + { 0x0000, 0x0000 }, /* R488 */ + { 0x0000, 0x0000 }, /* R489 */ + { 0x0000, 0x0000 }, /* R490 */ + { 0x0000, 0x0000 }, /* R491 */ + { 0x0000, 0x0000 }, /* R492 */ + { 0x0000, 0x0000 }, /* R493 */ + { 0x0000, 0x0000 }, /* R494 */ + { 0x0000, 0x0000 }, /* R495 */ + { 0x0000, 0x0000 }, /* R496 */ + { 0x0000, 0x0000 }, /* R497 */ + { 0x0000, 0x0000 }, /* R498 */ + { 0x0000, 0x0000 }, /* R499 */ + { 0x0000, 0x0000 }, /* R500 */ + { 0x0000, 0x0000 }, /* R501 */ + { 0x0000, 0x0000 }, /* R502 */ + { 0x0000, 0x0000 }, /* R503 */ + { 0x0000, 0x0000 }, /* R504 */ + { 0x0000, 0x0000 }, /* R505 */ + { 0x0000, 0x0000 }, /* R506 */ + { 0x0000, 0x0000 }, /* R507 */ + { 0x0000, 0x0000 }, /* R508 */ + { 0x0000, 0x0000 }, /* R509 */ + { 0x0000, 0x0000 }, /* R510 */ + { 0x0000, 0x0000 }, /* R511 */ + { 0x001F, 0x001F }, /* R512 - AIF1 Clocking (1) */ + { 0x003F, 0x003F }, /* R513 - AIF1 Clocking (2) */ + { 0x0000, 0x0000 }, /* R514 */ + { 0x0000, 0x0000 }, /* R515 */ + { 0x001F, 0x001F }, /* R516 - AIF2 Clocking (1) */ + { 0x003F, 0x003F }, /* R517 - AIF2 Clocking (2) */ + { 0x0000, 0x0000 }, /* R518 */ + { 0x0000, 0x0000 }, /* R519 */ + { 0x001F, 0x001F }, /* R520 - Clocking (1) */ + { 0x0777, 0x0777 }, /* R521 - Clocking (2) */ + { 0x0000, 0x0000 }, /* R522 */ + { 0x0000, 0x0000 }, /* R523 */ + { 0x0000, 0x0000 }, /* R524 */ + { 0x0000, 0x0000 }, /* R525 */ + { 0x0000, 0x0000 }, /* R526 */ + { 0x0000, 0x0000 }, /* R527 */ + { 0x00FF, 0x00FF }, /* R528 - AIF1 Rate */ + { 0x00FF, 0x00FF }, /* R529 - AIF2 Rate */ + { 0x000F, 0x0000 }, /* R530 - Rate Status */ + { 0x0000, 0x0000 }, /* R531 */ + { 0x0000, 0x0000 }, /* R532 */ + { 0x0000, 0x0000 }, /* R533 */ + { 0x0000, 0x0000 }, /* R534 */ + { 0x0000, 0x0000 }, /* R535 */ + { 0x0000, 0x0000 }, /* R536 */ + { 0x0000, 0x0000 }, /* R537 */ + { 0x0000, 0x0000 }, /* R538 */ + { 0x0000, 0x0000 }, /* R539 */ + { 0x0000, 0x0000 }, /* R540 */ + { 0x0000, 0x0000 }, /* R541 */ + { 0x0000, 0x0000 }, /* R542 */ + { 0x0000, 0x0000 }, /* R543 */ + { 0x0007, 0x0007 }, /* R544 - FLL1 Control (1) */ + { 0x3F77, 0x3F77 }, /* R545 - FLL1 Control (2) */ + { 0xFFFF, 0xFFFF }, /* R546 - FLL1 Control (3) */ + { 0x7FEF, 0x7FEF }, /* R547 - FLL1 Control (4) */ + { 0x1FDB, 0x1FDB }, /* R548 - FLL1 Control (5) */ + { 0x0000, 0x0000 }, /* R549 */ + { 0x0000, 0x0000 }, /* R550 */ + { 0x0000, 0x0000 }, /* R551 */ + { 0x0000, 0x0000 }, /* R552 */ + { 0x0000, 0x0000 }, /* R553 */ + { 0x0000, 0x0000 }, /* R554 */ + { 0x0000, 0x0000 }, /* R555 */ + { 0x0000, 0x0000 }, /* R556 */ + { 0x0000, 0x0000 }, /* R557 */ + { 0x0000, 0x0000 }, /* R558 */ + { 0x0000, 0x0000 }, /* R559 */ + { 0x0000, 0x0000 }, /* R560 */ + { 0x0000, 0x0000 }, /* R561 */ + { 0x0000, 0x0000 }, /* R562 */ + { 0x0000, 0x0000 }, /* R563 */ + { 0x0000, 0x0000 }, /* R564 */ + { 0x0000, 0x0000 }, /* R565 */ + { 0x0000, 0x0000 }, /* R566 */ + { 0x0000, 0x0000 }, /* R567 */ + { 0x0000, 0x0000 }, /* R568 */ + { 0x0000, 0x0000 }, /* R569 */ + { 0x0000, 0x0000 }, /* R570 */ + { 0x0000, 0x0000 }, /* R571 */ + { 0x0000, 0x0000 }, /* R572 */ + { 0x0000, 0x0000 }, /* R573 */ + { 0x0000, 0x0000 }, /* R574 */ + { 0x0000, 0x0000 }, /* R575 */ + { 0x0007, 0x0007 }, /* R576 - FLL2 Control (1) */ + { 0x3F77, 0x3F77 }, /* R577 - FLL2 Control (2) */ + { 0xFFFF, 0xFFFF }, /* R578 - FLL2 Control (3) */ + { 0x7FEF, 0x7FEF }, /* R579 - FLL2 Control (4) */ + { 0x1FDB, 0x1FDB }, /* R580 - FLL2 Control (5) */ + { 0x0000, 0x0000 }, /* R581 */ + { 0x0000, 0x0000 }, /* R582 */ + { 0x0000, 0x0000 }, /* R583 */ + { 0x0000, 0x0000 }, /* R584 */ + { 0x0000, 0x0000 }, /* R585 */ + { 0x0000, 0x0000 }, /* R586 */ + { 0x0000, 0x0000 }, /* R587 */ + { 0x0000, 0x0000 }, /* R588 */ + { 0x0000, 0x0000 }, /* R589 */ + { 0x0000, 0x0000 }, /* R590 */ + { 0x0000, 0x0000 }, /* R591 */ + { 0x0000, 0x0000 }, /* R592 */ + { 0x0000, 0x0000 }, /* R593 */ + { 0x0000, 0x0000 }, /* R594 */ + { 0x0000, 0x0000 }, /* R595 */ + { 0x0000, 0x0000 }, /* R596 */ + { 0x0000, 0x0000 }, /* R597 */ + { 0x0000, 0x0000 }, /* R598 */ + { 0x0000, 0x0000 }, /* R599 */ + { 0x0000, 0x0000 }, /* R600 */ + { 0x0000, 0x0000 }, /* R601 */ + { 0x0000, 0x0000 }, /* R602 */ + { 0x0000, 0x0000 }, /* R603 */ + { 0x0000, 0x0000 }, /* R604 */ + { 0x0000, 0x0000 }, /* R605 */ + { 0x0000, 0x0000 }, /* R606 */ + { 0x0000, 0x0000 }, /* R607 */ + { 0x0000, 0x0000 }, /* R608 */ + { 0x0000, 0x0000 }, /* R609 */ + { 0x0000, 0x0000 }, /* R610 */ + { 0x0000, 0x0000 }, /* R611 */ + { 0x0000, 0x0000 }, /* R612 */ + { 0x0000, 0x0000 }, /* R613 */ + { 0x0000, 0x0000 }, /* R614 */ + { 0x0000, 0x0000 }, /* R615 */ + { 0x0000, 0x0000 }, /* R616 */ + { 0x0000, 0x0000 }, /* R617 */ + { 0x0000, 0x0000 }, /* R618 */ + { 0x0000, 0x0000 }, /* R619 */ + { 0x0000, 0x0000 }, /* R620 */ + { 0x0000, 0x0000 }, /* R621 */ + { 0x0000, 0x0000 }, /* R622 */ + { 0x0000, 0x0000 }, /* R623 */ + { 0x0000, 0x0000 }, /* R624 */ + { 0x0000, 0x0000 }, /* R625 */ + { 0x0000, 0x0000 }, /* R626 */ + { 0x0000, 0x0000 }, /* R627 */ + { 0x0000, 0x0000 }, /* R628 */ + { 0x0000, 0x0000 }, /* R629 */ + { 0x0000, 0x0000 }, /* R630 */ + { 0x0000, 0x0000 }, /* R631 */ + { 0x0000, 0x0000 }, /* R632 */ + { 0x0000, 0x0000 }, /* R633 */ + { 0x0000, 0x0000 }, /* R634 */ + { 0x0000, 0x0000 }, /* R635 */ + { 0x0000, 0x0000 }, /* R636 */ + { 0x0000, 0x0000 }, /* R637 */ + { 0x0000, 0x0000 }, /* R638 */ + { 0x0000, 0x0000 }, /* R639 */ + { 0x0000, 0x0000 }, /* R640 */ + { 0x0000, 0x0000 }, /* R641 */ + { 0x0000, 0x0000 }, /* R642 */ + { 0x0000, 0x0000 }, /* R643 */ + { 0x0000, 0x0000 }, /* R644 */ + { 0x0000, 0x0000 }, /* R645 */ + { 0x0000, 0x0000 }, /* R646 */ + { 0x0000, 0x0000 }, /* R647 */ + { 0x0000, 0x0000 }, /* R648 */ + { 0x0000, 0x0000 }, /* R649 */ + { 0x0000, 0x0000 }, /* R650 */ + { 0x0000, 0x0000 }, /* R651 */ + { 0x0000, 0x0000 }, /* R652 */ + { 0x0000, 0x0000 }, /* R653 */ + { 0x0000, 0x0000 }, /* R654 */ + { 0x0000, 0x0000 }, /* R655 */ + { 0x0000, 0x0000 }, /* R656 */ + { 0x0000, 0x0000 }, /* R657 */ + { 0x0000, 0x0000 }, /* R658 */ + { 0x0000, 0x0000 }, /* R659 */ + { 0x0000, 0x0000 }, /* R660 */ + { 0x0000, 0x0000 }, /* R661 */ + { 0x0000, 0x0000 }, /* R662 */ + { 0x0000, 0x0000 }, /* R663 */ + { 0x0000, 0x0000 }, /* R664 */ + { 0x0000, 0x0000 }, /* R665 */ + { 0x0000, 0x0000 }, /* R666 */ + { 0x0000, 0x0000 }, /* R667 */ + { 0x0000, 0x0000 }, /* R668 */ + { 0x0000, 0x0000 }, /* R669 */ + { 0x0000, 0x0000 }, /* R670 */ + { 0x0000, 0x0000 }, /* R671 */ + { 0x0000, 0x0000 }, /* R672 */ + { 0x0000, 0x0000 }, /* R673 */ + { 0x0000, 0x0000 }, /* R674 */ + { 0x0000, 0x0000 }, /* R675 */ + { 0x0000, 0x0000 }, /* R676 */ + { 0x0000, 0x0000 }, /* R677 */ + { 0x0000, 0x0000 }, /* R678 */ + { 0x0000, 0x0000 }, /* R679 */ + { 0x0000, 0x0000 }, /* R680 */ + { 0x0000, 0x0000 }, /* R681 */ + { 0x0000, 0x0000 }, /* R682 */ + { 0x0000, 0x0000 }, /* R683 */ + { 0x0000, 0x0000 }, /* R684 */ + { 0x0000, 0x0000 }, /* R685 */ + { 0x0000, 0x0000 }, /* R686 */ + { 0x0000, 0x0000 }, /* R687 */ + { 0x0000, 0x0000 }, /* R688 */ + { 0x0000, 0x0000 }, /* R689 */ + { 0x0000, 0x0000 }, /* R690 */ + { 0x0000, 0x0000 }, /* R691 */ + { 0x0000, 0x0000 }, /* R692 */ + { 0x0000, 0x0000 }, /* R693 */ + { 0x0000, 0x0000 }, /* R694 */ + { 0x0000, 0x0000 }, /* R695 */ + { 0x0000, 0x0000 }, /* R696 */ + { 0x0000, 0x0000 }, /* R697 */ + { 0x0000, 0x0000 }, /* R698 */ + { 0x0000, 0x0000 }, /* R699 */ + { 0x0000, 0x0000 }, /* R700 */ + { 0x0000, 0x0000 }, /* R701 */ + { 0x0000, 0x0000 }, /* R702 */ + { 0x0000, 0x0000 }, /* R703 */ + { 0x0000, 0x0000 }, /* R704 */ + { 0x0000, 0x0000 }, /* R705 */ + { 0x0000, 0x0000 }, /* R706 */ + { 0x0000, 0x0000 }, /* R707 */ + { 0x0000, 0x0000 }, /* R708 */ + { 0x0000, 0x0000 }, /* R709 */ + { 0x0000, 0x0000 }, /* R710 */ + { 0x0000, 0x0000 }, /* R711 */ + { 0x0000, 0x0000 }, /* R712 */ + { 0x0000, 0x0000 }, /* R713 */ + { 0x0000, 0x0000 }, /* R714 */ + { 0x0000, 0x0000 }, /* R715 */ + { 0x0000, 0x0000 }, /* R716 */ + { 0x0000, 0x0000 }, /* R717 */ + { 0x0000, 0x0000 }, /* R718 */ + { 0x0000, 0x0000 }, /* R719 */ + { 0x0000, 0x0000 }, /* R720 */ + { 0x0000, 0x0000 }, /* R721 */ + { 0x0000, 0x0000 }, /* R722 */ + { 0x0000, 0x0000 }, /* R723 */ + { 0x0000, 0x0000 }, /* R724 */ + { 0x0000, 0x0000 }, /* R725 */ + { 0x0000, 0x0000 }, /* R726 */ + { 0x0000, 0x0000 }, /* R727 */ + { 0x0000, 0x0000 }, /* R728 */ + { 0x0000, 0x0000 }, /* R729 */ + { 0x0000, 0x0000 }, /* R730 */ + { 0x0000, 0x0000 }, /* R731 */ + { 0x0000, 0x0000 }, /* R732 */ + { 0x0000, 0x0000 }, /* R733 */ + { 0x0000, 0x0000 }, /* R734 */ + { 0x0000, 0x0000 }, /* R735 */ + { 0x0000, 0x0000 }, /* R736 */ + { 0x0000, 0x0000 }, /* R737 */ + { 0x0000, 0x0000 }, /* R738 */ + { 0x0000, 0x0000 }, /* R739 */ + { 0x0000, 0x0000 }, /* R740 */ + { 0x0000, 0x0000 }, /* R741 */ + { 0x0000, 0x0000 }, /* R742 */ + { 0x0000, 0x0000 }, /* R743 */ + { 0x0000, 0x0000 }, /* R744 */ + { 0x0000, 0x0000 }, /* R745 */ + { 0x0000, 0x0000 }, /* R746 */ + { 0x0000, 0x0000 }, /* R747 */ + { 0x0000, 0x0000 }, /* R748 */ + { 0x0000, 0x0000 }, /* R749 */ + { 0x0000, 0x0000 }, /* R750 */ + { 0x0000, 0x0000 }, /* R751 */ + { 0x0000, 0x0000 }, /* R752 */ + { 0x0000, 0x0000 }, /* R753 */ + { 0x0000, 0x0000 }, /* R754 */ + { 0x0000, 0x0000 }, /* R755 */ + { 0x0000, 0x0000 }, /* R756 */ + { 0x0000, 0x0000 }, /* R757 */ + { 0x0000, 0x0000 }, /* R758 */ + { 0x0000, 0x0000 }, /* R759 */ + { 0x0000, 0x0000 }, /* R760 */ + { 0x0000, 0x0000 }, /* R761 */ + { 0x0000, 0x0000 }, /* R762 */ + { 0x0000, 0x0000 }, /* R763 */ + { 0x0000, 0x0000 }, /* R764 */ + { 0x0000, 0x0000 }, /* R765 */ + { 0x0000, 0x0000 }, /* R766 */ + { 0x0000, 0x0000 }, /* R767 */ + { 0xE1F8, 0xE1F8 }, /* R768 - AIF1 Control (1) */ + { 0xCD1F, 0xCD1F }, /* R769 - AIF1 Control (2) */ + { 0xF000, 0xF000 }, /* R770 - AIF1 Master/Slave */ + { 0x01F0, 0x01F0 }, /* R771 - AIF1 BCLK */ + { 0x0FFF, 0x0FFF }, /* R772 - AIF1ADC LRCLK */ + { 0x0FFF, 0x0FFF }, /* R773 - AIF1DAC LRCLK */ + { 0x0003, 0x0003 }, /* R774 - AIF1DAC Data */ + { 0x0003, 0x0003 }, /* R775 - AIF1ADC Data */ + { 0x0000, 0x0000 }, /* R776 */ + { 0x0000, 0x0000 }, /* R777 */ + { 0x0000, 0x0000 }, /* R778 */ + { 0x0000, 0x0000 }, /* R779 */ + { 0x0000, 0x0000 }, /* R780 */ + { 0x0000, 0x0000 }, /* R781 */ + { 0x0000, 0x0000 }, /* R782 */ + { 0x0000, 0x0000 }, /* R783 */ + { 0xF1F8, 0xF1F8 }, /* R784 - AIF2 Control (1) */ + { 0xFD1F, 0xFD1F }, /* R785 - AIF2 Control (2) */ + { 0xF000, 0xF000 }, /* R786 - AIF2 Master/Slave */ + { 0x01F0, 0x01F0 }, /* R787 - AIF2 BCLK */ + { 0x0FFF, 0x0FFF }, /* R788 - AIF2ADC LRCLK */ + { 0x0FFF, 0x0FFF }, /* R789 - AIF2DAC LRCLK */ + { 0x0003, 0x0003 }, /* R790 - AIF2DAC Data */ + { 0x0003, 0x0003 }, /* R791 - AIF2ADC Data */ + { 0x0000, 0x0000 }, /* R792 */ + { 0x0000, 0x0000 }, /* R793 */ + { 0x0000, 0x0000 }, /* R794 */ + { 0x0000, 0x0000 }, /* R795 */ + { 0x0000, 0x0000 }, /* R796 */ + { 0x0000, 0x0000 }, /* R797 */ + { 0x0000, 0x0000 }, /* R798 */ + { 0x0000, 0x0000 }, /* R799 */ + { 0x0000, 0x0000 }, /* R800 */ + { 0x0000, 0x0000 }, /* R801 */ + { 0x0000, 0x0000 }, /* R802 */ + { 0x0000, 0x0000 }, /* R803 */ + { 0x0000, 0x0000 }, /* R804 */ + { 0x0000, 0x0000 }, /* R805 */ + { 0x0000, 0x0000 }, /* R806 */ + { 0x0000, 0x0000 }, /* R807 */ + { 0x0000, 0x0000 }, /* R808 */ + { 0x0000, 0x0000 }, /* R809 */ + { 0x0000, 0x0000 }, /* R810 */ + { 0x0000, 0x0000 }, /* R811 */ + { 0x0000, 0x0000 }, /* R812 */ + { 0x0000, 0x0000 }, /* R813 */ + { 0x0000, 0x0000 }, /* R814 */ + { 0x0000, 0x0000 }, /* R815 */ + { 0x0000, 0x0000 }, /* R816 */ + { 0x0000, 0x0000 }, /* R817 */ + { 0x0000, 0x0000 }, /* R818 */ + { 0x0000, 0x0000 }, /* R819 */ + { 0x0000, 0x0000 }, /* R820 */ + { 0x0000, 0x0000 }, /* R821 */ + { 0x0000, 0x0000 }, /* R822 */ + { 0x0000, 0x0000 }, /* R823 */ + { 0x0000, 0x0000 }, /* R824 */ + { 0x0000, 0x0000 }, /* R825 */ + { 0x0000, 0x0000 }, /* R826 */ + { 0x0000, 0x0000 }, /* R827 */ + { 0x0000, 0x0000 }, /* R828 */ + { 0x0000, 0x0000 }, /* R829 */ + { 0x0000, 0x0000 }, /* R830 */ + { 0x0000, 0x0000 }, /* R831 */ + { 0x0000, 0x0000 }, /* R832 */ + { 0x0000, 0x0000 }, /* R833 */ + { 0x0000, 0x0000 }, /* R834 */ + { 0x0000, 0x0000 }, /* R835 */ + { 0x0000, 0x0000 }, /* R836 */ + { 0x0000, 0x0000 }, /* R837 */ + { 0x0000, 0x0000 }, /* R838 */ + { 0x0000, 0x0000 }, /* R839 */ + { 0x0000, 0x0000 }, /* R840 */ + { 0x0000, 0x0000 }, /* R841 */ + { 0x0000, 0x0000 }, /* R842 */ + { 0x0000, 0x0000 }, /* R843 */ + { 0x0000, 0x0000 }, /* R844 */ + { 0x0000, 0x0000 }, /* R845 */ + { 0x0000, 0x0000 }, /* R846 */ + { 0x0000, 0x0000 }, /* R847 */ + { 0x0000, 0x0000 }, /* R848 */ + { 0x0000, 0x0000 }, /* R849 */ + { 0x0000, 0x0000 }, /* R850 */ + { 0x0000, 0x0000 }, /* R851 */ + { 0x0000, 0x0000 }, /* R852 */ + { 0x0000, 0x0000 }, /* R853 */ + { 0x0000, 0x0000 }, /* R854 */ + { 0x0000, 0x0000 }, /* R855 */ + { 0x0000, 0x0000 }, /* R856 */ + { 0x0000, 0x0000 }, /* R857 */ + { 0x0000, 0x0000 }, /* R858 */ + { 0x0000, 0x0000 }, /* R859 */ + { 0x0000, 0x0000 }, /* R860 */ + { 0x0000, 0x0000 }, /* R861 */ + { 0x0000, 0x0000 }, /* R862 */ + { 0x0000, 0x0000 }, /* R863 */ + { 0x0000, 0x0000 }, /* R864 */ + { 0x0000, 0x0000 }, /* R865 */ + { 0x0000, 0x0000 }, /* R866 */ + { 0x0000, 0x0000 }, /* R867 */ + { 0x0000, 0x0000 }, /* R868 */ + { 0x0000, 0x0000 }, /* R869 */ + { 0x0000, 0x0000 }, /* R870 */ + { 0x0000, 0x0000 }, /* R871 */ + { 0x0000, 0x0000 }, /* R872 */ + { 0x0000, 0x0000 }, /* R873 */ + { 0x0000, 0x0000 }, /* R874 */ + { 0x0000, 0x0000 }, /* R875 */ + { 0x0000, 0x0000 }, /* R876 */ + { 0x0000, 0x0000 }, /* R877 */ + { 0x0000, 0x0000 }, /* R878 */ + { 0x0000, 0x0000 }, /* R879 */ + { 0x0000, 0x0000 }, /* R880 */ + { 0x0000, 0x0000 }, /* R881 */ + { 0x0000, 0x0000 }, /* R882 */ + { 0x0000, 0x0000 }, /* R883 */ + { 0x0000, 0x0000 }, /* R884 */ + { 0x0000, 0x0000 }, /* R885 */ + { 0x0000, 0x0000 }, /* R886 */ + { 0x0000, 0x0000 }, /* R887 */ + { 0x0000, 0x0000 }, /* R888 */ + { 0x0000, 0x0000 }, /* R889 */ + { 0x0000, 0x0000 }, /* R890 */ + { 0x0000, 0x0000 }, /* R891 */ + { 0x0000, 0x0000 }, /* R892 */ + { 0x0000, 0x0000 }, /* R893 */ + { 0x0000, 0x0000 }, /* R894 */ + { 0x0000, 0x0000 }, /* R895 */ + { 0x0000, 0x0000 }, /* R896 */ + { 0x0000, 0x0000 }, /* R897 */ + { 0x0000, 0x0000 }, /* R898 */ + { 0x0000, 0x0000 }, /* R899 */ + { 0x0000, 0x0000 }, /* R900 */ + { 0x0000, 0x0000 }, /* R901 */ + { 0x0000, 0x0000 }, /* R902 */ + { 0x0000, 0x0000 }, /* R903 */ + { 0x0000, 0x0000 }, /* R904 */ + { 0x0000, 0x0000 }, /* R905 */ + { 0x0000, 0x0000 }, /* R906 */ + { 0x0000, 0x0000 }, /* R907 */ + { 0x0000, 0x0000 }, /* R908 */ + { 0x0000, 0x0000 }, /* R909 */ + { 0x0000, 0x0000 }, /* R910 */ + { 0x0000, 0x0000 }, /* R911 */ + { 0x0000, 0x0000 }, /* R912 */ + { 0x0000, 0x0000 }, /* R913 */ + { 0x0000, 0x0000 }, /* R914 */ + { 0x0000, 0x0000 }, /* R915 */ + { 0x0000, 0x0000 }, /* R916 */ + { 0x0000, 0x0000 }, /* R917 */ + { 0x0000, 0x0000 }, /* R918 */ + { 0x0000, 0x0000 }, /* R919 */ + { 0x0000, 0x0000 }, /* R920 */ + { 0x0000, 0x0000 }, /* R921 */ + { 0x0000, 0x0000 }, /* R922 */ + { 0x0000, 0x0000 }, /* R923 */ + { 0x0000, 0x0000 }, /* R924 */ + { 0x0000, 0x0000 }, /* R925 */ + { 0x0000, 0x0000 }, /* R926 */ + { 0x0000, 0x0000 }, /* R927 */ + { 0x0000, 0x0000 }, /* R928 */ + { 0x0000, 0x0000 }, /* R929 */ + { 0x0000, 0x0000 }, /* R930 */ + { 0x0000, 0x0000 }, /* R931 */ + { 0x0000, 0x0000 }, /* R932 */ + { 0x0000, 0x0000 }, /* R933 */ + { 0x0000, 0x0000 }, /* R934 */ + { 0x0000, 0x0000 }, /* R935 */ + { 0x0000, 0x0000 }, /* R936 */ + { 0x0000, 0x0000 }, /* R937 */ + { 0x0000, 0x0000 }, /* R938 */ + { 0x0000, 0x0000 }, /* R939 */ + { 0x0000, 0x0000 }, /* R940 */ + { 0x0000, 0x0000 }, /* R941 */ + { 0x0000, 0x0000 }, /* R942 */ + { 0x0000, 0x0000 }, /* R943 */ + { 0x0000, 0x0000 }, /* R944 */ + { 0x0000, 0x0000 }, /* R945 */ + { 0x0000, 0x0000 }, /* R946 */ + { 0x0000, 0x0000 }, /* R947 */ + { 0x0000, 0x0000 }, /* R948 */ + { 0x0000, 0x0000 }, /* R949 */ + { 0x0000, 0x0000 }, /* R950 */ + { 0x0000, 0x0000 }, /* R951 */ + { 0x0000, 0x0000 }, /* R952 */ + { 0x0000, 0x0000 }, /* R953 */ + { 0x0000, 0x0000 }, /* R954 */ + { 0x0000, 0x0000 }, /* R955 */ + { 0x0000, 0x0000 }, /* R956 */ + { 0x0000, 0x0000 }, /* R957 */ + { 0x0000, 0x0000 }, /* R958 */ + { 0x0000, 0x0000 }, /* R959 */ + { 0x0000, 0x0000 }, /* R960 */ + { 0x0000, 0x0000 }, /* R961 */ + { 0x0000, 0x0000 }, /* R962 */ + { 0x0000, 0x0000 }, /* R963 */ + { 0x0000, 0x0000 }, /* R964 */ + { 0x0000, 0x0000 }, /* R965 */ + { 0x0000, 0x0000 }, /* R966 */ + { 0x0000, 0x0000 }, /* R967 */ + { 0x0000, 0x0000 }, /* R968 */ + { 0x0000, 0x0000 }, /* R969 */ + { 0x0000, 0x0000 }, /* R970 */ + { 0x0000, 0x0000 }, /* R971 */ + { 0x0000, 0x0000 }, /* R972 */ + { 0x0000, 0x0000 }, /* R973 */ + { 0x0000, 0x0000 }, /* R974 */ + { 0x0000, 0x0000 }, /* R975 */ + { 0x0000, 0x0000 }, /* R976 */ + { 0x0000, 0x0000 }, /* R977 */ + { 0x0000, 0x0000 }, /* R978 */ + { 0x0000, 0x0000 }, /* R979 */ + { 0x0000, 0x0000 }, /* R980 */ + { 0x0000, 0x0000 }, /* R981 */ + { 0x0000, 0x0000 }, /* R982 */ + { 0x0000, 0x0000 }, /* R983 */ + { 0x0000, 0x0000 }, /* R984 */ + { 0x0000, 0x0000 }, /* R985 */ + { 0x0000, 0x0000 }, /* R986 */ + { 0x0000, 0x0000 }, /* R987 */ + { 0x0000, 0x0000 }, /* R988 */ + { 0x0000, 0x0000 }, /* R989 */ + { 0x0000, 0x0000 }, /* R990 */ + { 0x0000, 0x0000 }, /* R991 */ + { 0x0000, 0x0000 }, /* R992 */ + { 0x0000, 0x0000 }, /* R993 */ + { 0x0000, 0x0000 }, /* R994 */ + { 0x0000, 0x0000 }, /* R995 */ + { 0x0000, 0x0000 }, /* R996 */ + { 0x0000, 0x0000 }, /* R997 */ + { 0x0000, 0x0000 }, /* R998 */ + { 0x0000, 0x0000 }, /* R999 */ + { 0x0000, 0x0000 }, /* R1000 */ + { 0x0000, 0x0000 }, /* R1001 */ + { 0x0000, 0x0000 }, /* R1002 */ + { 0x0000, 0x0000 }, /* R1003 */ + { 0x0000, 0x0000 }, /* R1004 */ + { 0x0000, 0x0000 }, /* R1005 */ + { 0x0000, 0x0000 }, /* R1006 */ + { 0x0000, 0x0000 }, /* R1007 */ + { 0x0000, 0x0000 }, /* R1008 */ + { 0x0000, 0x0000 }, /* R1009 */ + { 0x0000, 0x0000 }, /* R1010 */ + { 0x0000, 0x0000 }, /* R1011 */ + { 0x0000, 0x0000 }, /* R1012 */ + { 0x0000, 0x0000 }, /* R1013 */ + { 0x0000, 0x0000 }, /* R1014 */ + { 0x0000, 0x0000 }, /* R1015 */ + { 0x0000, 0x0000 }, /* R1016 */ + { 0x0000, 0x0000 }, /* R1017 */ + { 0x0000, 0x0000 }, /* R1018 */ + { 0x0000, 0x0000 }, /* R1019 */ + { 0x0000, 0x0000 }, /* R1020 */ + { 0x0000, 0x0000 }, /* R1021 */ + { 0x0000, 0x0000 }, /* R1022 */ + { 0x0000, 0x0000 }, /* R1023 */ + { 0x00FF, 0x01FF }, /* R1024 - AIF1 ADC1 Left Volume */ + { 0x00FF, 0x01FF }, /* R1025 - AIF1 ADC1 Right Volume */ + { 0x00FF, 0x01FF }, /* R1026 - AIF1 DAC1 Left Volume */ + { 0x00FF, 0x01FF }, /* R1027 - AIF1 DAC1 Right Volume */ + { 0x00FF, 0x01FF }, /* R1028 - AIF1 ADC2 Left Volume */ + { 0x00FF, 0x01FF }, /* R1029 - AIF1 ADC2 Right Volume */ + { 0x00FF, 0x01FF }, /* R1030 - AIF1 DAC2 Left Volume */ + { 0x00FF, 0x01FF }, /* R1031 - AIF1 DAC2 Right Volume */ + { 0x0000, 0x0000 }, /* R1032 */ + { 0x0000, 0x0000 }, /* R1033 */ + { 0x0000, 0x0000 }, /* R1034 */ + { 0x0000, 0x0000 }, /* R1035 */ + { 0x0000, 0x0000 }, /* R1036 */ + { 0x0000, 0x0000 }, /* R1037 */ + { 0x0000, 0x0000 }, /* R1038 */ + { 0x0000, 0x0000 }, /* R1039 */ + { 0xF800, 0xF800 }, /* R1040 - AIF1 ADC1 Filters */ + { 0x7800, 0x7800 }, /* R1041 - AIF1 ADC2 Filters */ + { 0x0000, 0x0000 }, /* R1042 */ + { 0x0000, 0x0000 }, /* R1043 */ + { 0x0000, 0x0000 }, /* R1044 */ + { 0x0000, 0x0000 }, /* R1045 */ + { 0x0000, 0x0000 }, /* R1046 */ + { 0x0000, 0x0000 }, /* R1047 */ + { 0x0000, 0x0000 }, /* R1048 */ + { 0x0000, 0x0000 }, /* R1049 */ + { 0x0000, 0x0000 }, /* R1050 */ + { 0x0000, 0x0000 }, /* R1051 */ + { 0x0000, 0x0000 }, /* R1052 */ + { 0x0000, 0x0000 }, /* R1053 */ + { 0x0000, 0x0000 }, /* R1054 */ + { 0x0000, 0x0000 }, /* R1055 */ + { 0x02B6, 0x02B6 }, /* R1056 - AIF1 DAC1 Filters (1) */ + { 0x3F00, 0x3F00 }, /* R1057 - AIF1 DAC1 Filters (2) */ + { 0x02B6, 0x02B6 }, /* R1058 - AIF1 DAC2 Filters (1) */ + { 0x3F00, 0x3F00 }, /* R1059 - AIF1 DAC2 Filters (2) */ + { 0x0000, 0x0000 }, /* R1060 */ + { 0x0000, 0x0000 }, /* R1061 */ + { 0x0000, 0x0000 }, /* R1062 */ + { 0x0000, 0x0000 }, /* R1063 */ + { 0x0000, 0x0000 }, /* R1064 */ + { 0x0000, 0x0000 }, /* R1065 */ + { 0x0000, 0x0000 }, /* R1066 */ + { 0x0000, 0x0000 }, /* R1067 */ + { 0x0000, 0x0000 }, /* R1068 */ + { 0x0000, 0x0000 }, /* R1069 */ + { 0x0000, 0x0000 }, /* R1070 */ + { 0x0000, 0x0000 }, /* R1071 */ + { 0x0000, 0x0000 }, /* R1072 */ + { 0x0000, 0x0000 }, /* R1073 */ + { 0x0000, 0x0000 }, /* R1074 */ + { 0x0000, 0x0000 }, /* R1075 */ + { 0x0000, 0x0000 }, /* R1076 */ + { 0x0000, 0x0000 }, /* R1077 */ + { 0x0000, 0x0000 }, /* R1078 */ + { 0x0000, 0x0000 }, /* R1079 */ + { 0x0000, 0x0000 }, /* R1080 */ + { 0x0000, 0x0000 }, /* R1081 */ + { 0x0000, 0x0000 }, /* R1082 */ + { 0x0000, 0x0000 }, /* R1083 */ + { 0x0000, 0x0000 }, /* R1084 */ + { 0x0000, 0x0000 }, /* R1085 */ + { 0x0000, 0x0000 }, /* R1086 */ + { 0x0000, 0x0000 }, /* R1087 */ + { 0xFFFF, 0xFFFF }, /* R1088 - AIF1 DRC1 (1) */ + { 0x1FFF, 0x1FFF }, /* R1089 - AIF1 DRC1 (2) */ + { 0xFFFF, 0xFFFF }, /* R1090 - AIF1 DRC1 (3) */ + { 0x07FF, 0x07FF }, /* R1091 - AIF1 DRC1 (4) */ + { 0x03FF, 0x03FF }, /* R1092 - AIF1 DRC1 (5) */ + { 0x0000, 0x0000 }, /* R1093 */ + { 0x0000, 0x0000 }, /* R1094 */ + { 0x0000, 0x0000 }, /* R1095 */ + { 0x0000, 0x0000 }, /* R1096 */ + { 0x0000, 0x0000 }, /* R1097 */ + { 0x0000, 0x0000 }, /* R1098 */ + { 0x0000, 0x0000 }, /* R1099 */ + { 0x0000, 0x0000 }, /* R1100 */ + { 0x0000, 0x0000 }, /* R1101 */ + { 0x0000, 0x0000 }, /* R1102 */ + { 0x0000, 0x0000 }, /* R1103 */ + { 0xFFFF, 0xFFFF }, /* R1104 - AIF1 DRC2 (1) */ + { 0x1FFF, 0x1FFF }, /* R1105 - AIF1 DRC2 (2) */ + { 0xFFFF, 0xFFFF }, /* R1106 - AIF1 DRC2 (3) */ + { 0x07FF, 0x07FF }, /* R1107 - AIF1 DRC2 (4) */ + { 0x03FF, 0x03FF }, /* R1108 - AIF1 DRC2 (5) */ + { 0x0000, 0x0000 }, /* R1109 */ + { 0x0000, 0x0000 }, /* R1110 */ + { 0x0000, 0x0000 }, /* R1111 */ + { 0x0000, 0x0000 }, /* R1112 */ + { 0x0000, 0x0000 }, /* R1113 */ + { 0x0000, 0x0000 }, /* R1114 */ + { 0x0000, 0x0000 }, /* R1115 */ + { 0x0000, 0x0000 }, /* R1116 */ + { 0x0000, 0x0000 }, /* R1117 */ + { 0x0000, 0x0000 }, /* R1118 */ + { 0x0000, 0x0000 }, /* R1119 */ + { 0x0000, 0x0000 }, /* R1120 */ + { 0x0000, 0x0000 }, /* R1121 */ + { 0x0000, 0x0000 }, /* R1122 */ + { 0x0000, 0x0000 }, /* R1123 */ + { 0x0000, 0x0000 }, /* R1124 */ + { 0x0000, 0x0000 }, /* R1125 */ + { 0x0000, 0x0000 }, /* R1126 */ + { 0x0000, 0x0000 }, /* R1127 */ + { 0x0000, 0x0000 }, /* R1128 */ + { 0x0000, 0x0000 }, /* R1129 */ + { 0x0000, 0x0000 }, /* R1130 */ + { 0x0000, 0x0000 }, /* R1131 */ + { 0x0000, 0x0000 }, /* R1132 */ + { 0x0000, 0x0000 }, /* R1133 */ + { 0x0000, 0x0000 }, /* R1134 */ + { 0x0000, 0x0000 }, /* R1135 */ + { 0x0000, 0x0000 }, /* R1136 */ + { 0x0000, 0x0000 }, /* R1137 */ + { 0x0000, 0x0000 }, /* R1138 */ + { 0x0000, 0x0000 }, /* R1139 */ + { 0x0000, 0x0000 }, /* R1140 */ + { 0x0000, 0x0000 }, /* R1141 */ + { 0x0000, 0x0000 }, /* R1142 */ + { 0x0000, 0x0000 }, /* R1143 */ + { 0x0000, 0x0000 }, /* R1144 */ + { 0x0000, 0x0000 }, /* R1145 */ + { 0x0000, 0x0000 }, /* R1146 */ + { 0x0000, 0x0000 }, /* R1147 */ + { 0x0000, 0x0000 }, /* R1148 */ + { 0x0000, 0x0000 }, /* R1149 */ + { 0x0000, 0x0000 }, /* R1150 */ + { 0x0000, 0x0000 }, /* R1151 */ + { 0xFFFF, 0xFFFF }, /* R1152 - AIF1 DAC1 EQ Gains (1) */ + { 0xFFC0, 0xFFC0 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */ + { 0xFFFF, 0xFFFF }, /* R1154 - AIF1 DAC1 EQ Band 1 A */ + { 0xFFFF, 0xFFFF }, /* R1155 - AIF1 DAC1 EQ Band 1 B */ + { 0xFFFF, 0xFFFF }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */ + { 0xFFFF, 0xFFFF }, /* R1157 - AIF1 DAC1 EQ Band 2 A */ + { 0xFFFF, 0xFFFF }, /* R1158 - AIF1 DAC1 EQ Band 2 B */ + { 0xFFFF, 0xFFFF }, /* R1159 - AIF1 DAC1 EQ Band 2 C */ + { 0xFFFF, 0xFFFF }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */ + { 0xFFFF, 0xFFFF }, /* R1161 - AIF1 DAC1 EQ Band 3 A */ + { 0xFFFF, 0xFFFF }, /* R1162 - AIF1 DAC1 EQ Band 3 B */ + { 0xFFFF, 0xFFFF }, /* R1163 - AIF1 DAC1 EQ Band 3 C */ + { 0xFFFF, 0xFFFF }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */ + { 0xFFFF, 0xFFFF }, /* R1165 - AIF1 DAC1 EQ Band 4 A */ + { 0xFFFF, 0xFFFF }, /* R1166 - AIF1 DAC1 EQ Band 4 B */ + { 0xFFFF, 0xFFFF }, /* R1167 - AIF1 DAC1 EQ Band 4 C */ + { 0xFFFF, 0xFFFF }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */ + { 0xFFFF, 0xFFFF }, /* R1169 - AIF1 DAC1 EQ Band 5 A */ + { 0xFFFF, 0xFFFF }, /* R1170 - AIF1 DAC1 EQ Band 5 B */ + { 0xFFFF, 0xFFFF }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */ + { 0x0000, 0x0000 }, /* R1172 */ + { 0x0000, 0x0000 }, /* R1173 */ + { 0x0000, 0x0000 }, /* R1174 */ + { 0x0000, 0x0000 }, /* R1175 */ + { 0x0000, 0x0000 }, /* R1176 */ + { 0x0000, 0x0000 }, /* R1177 */ + { 0x0000, 0x0000 }, /* R1178 */ + { 0x0000, 0x0000 }, /* R1179 */ + { 0x0000, 0x0000 }, /* R1180 */ + { 0x0000, 0x0000 }, /* R1181 */ + { 0x0000, 0x0000 }, /* R1182 */ + { 0x0000, 0x0000 }, /* R1183 */ + { 0xFFFF, 0xFFFF }, /* R1184 - AIF1 DAC2 EQ Gains (1) */ + { 0xFFC0, 0xFFC0 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */ + { 0xFFFF, 0xFFFF }, /* R1186 - AIF1 DAC2 EQ Band 1 A */ + { 0xFFFF, 0xFFFF }, /* R1187 - AIF1 DAC2 EQ Band 1 B */ + { 0xFFFF, 0xFFFF }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */ + { 0xFFFF, 0xFFFF }, /* R1189 - AIF1 DAC2 EQ Band 2 A */ + { 0xFFFF, 0xFFFF }, /* R1190 - AIF1 DAC2 EQ Band 2 B */ + { 0xFFFF, 0xFFFF }, /* R1191 - AIF1 DAC2 EQ Band 2 C */ + { 0xFFFF, 0xFFFF }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */ + { 0xFFFF, 0xFFFF }, /* R1193 - AIF1 DAC2 EQ Band 3 A */ + { 0xFFFF, 0xFFFF }, /* R1194 - AIF1 DAC2 EQ Band 3 B */ + { 0xFFFF, 0xFFFF }, /* R1195 - AIF1 DAC2 EQ Band 3 C */ + { 0xFFFF, 0xFFFF }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */ + { 0xFFFF, 0xFFFF }, /* R1197 - AIF1 DAC2 EQ Band 4 A */ + { 0xFFFF, 0xFFFF }, /* R1198 - AIF1 DAC2 EQ Band 4 B */ + { 0xFFFF, 0xFFFF }, /* R1199 - AIF1 DAC2 EQ Band 4 C */ + { 0xFFFF, 0xFFFF }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */ + { 0xFFFF, 0xFFFF }, /* R1201 - AIF1 DAC2 EQ Band 5 A */ + { 0xFFFF, 0xFFFF }, /* R1202 - AIF1 DAC2 EQ Band 5 B */ + { 0xFFFF, 0xFFFF }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */ + { 0x0000, 0x0000 }, /* R1204 */ + { 0x0000, 0x0000 }, /* R1205 */ + { 0x0000, 0x0000 }, /* R1206 */ + { 0x0000, 0x0000 }, /* R1207 */ + { 0x0000, 0x0000 }, /* R1208 */ + { 0x0000, 0x0000 }, /* R1209 */ + { 0x0000, 0x0000 }, /* R1210 */ + { 0x0000, 0x0000 }, /* R1211 */ + { 0x0000, 0x0000 }, /* R1212 */ + { 0x0000, 0x0000 }, /* R1213 */ + { 0x0000, 0x0000 }, /* R1214 */ + { 0x0000, 0x0000 }, /* R1215 */ + { 0x0000, 0x0000 }, /* R1216 */ + { 0x0000, 0x0000 }, /* R1217 */ + { 0x0000, 0x0000 }, /* R1218 */ + { 0x0000, 0x0000 }, /* R1219 */ + { 0x0000, 0x0000 }, /* R1220 */ + { 0x0000, 0x0000 }, /* R1221 */ + { 0x0000, 0x0000 }, /* R1222 */ + { 0x0000, 0x0000 }, /* R1223 */ + { 0x0000, 0x0000 }, /* R1224 */ + { 0x0000, 0x0000 }, /* R1225 */ + { 0x0000, 0x0000 }, /* R1226 */ + { 0x0000, 0x0000 }, /* R1227 */ + { 0x0000, 0x0000 }, /* R1228 */ + { 0x0000, 0x0000 }, /* R1229 */ + { 0x0000, 0x0000 }, /* R1230 */ + { 0x0000, 0x0000 }, /* R1231 */ + { 0x0000, 0x0000 }, /* R1232 */ + { 0x0000, 0x0000 }, /* R1233 */ + { 0x0000, 0x0000 }, /* R1234 */ + { 0x0000, 0x0000 }, /* R1235 */ + { 0x0000, 0x0000 }, /* R1236 */ + { 0x0000, 0x0000 }, /* R1237 */ + { 0x0000, 0x0000 }, /* R1238 */ + { 0x0000, 0x0000 }, /* R1239 */ + { 0x0000, 0x0000 }, /* R1240 */ + { 0x0000, 0x0000 }, /* R1241 */ + { 0x0000, 0x0000 }, /* R1242 */ + { 0x0000, 0x0000 }, /* R1243 */ + { 0x0000, 0x0000 }, /* R1244 */ + { 0x0000, 0x0000 }, /* R1245 */ + { 0x0000, 0x0000 }, /* R1246 */ + { 0x0000, 0x0000 }, /* R1247 */ + { 0x0000, 0x0000 }, /* R1248 */ + { 0x0000, 0x0000 }, /* R1249 */ + { 0x0000, 0x0000 }, /* R1250 */ + { 0x0000, 0x0000 }, /* R1251 */ + { 0x0000, 0x0000 }, /* R1252 */ + { 0x0000, 0x0000 }, /* R1253 */ + { 0x0000, 0x0000 }, /* R1254 */ + { 0x0000, 0x0000 }, /* R1255 */ + { 0x0000, 0x0000 }, /* R1256 */ + { 0x0000, 0x0000 }, /* R1257 */ + { 0x0000, 0x0000 }, /* R1258 */ + { 0x0000, 0x0000 }, /* R1259 */ + { 0x0000, 0x0000 }, /* R1260 */ + { 0x0000, 0x0000 }, /* R1261 */ + { 0x0000, 0x0000 }, /* R1262 */ + { 0x0000, 0x0000 }, /* R1263 */ + { 0x0000, 0x0000 }, /* R1264 */ + { 0x0000, 0x0000 }, /* R1265 */ + { 0x0000, 0x0000 }, /* R1266 */ + { 0x0000, 0x0000 }, /* R1267 */ + { 0x0000, 0x0000 }, /* R1268 */ + { 0x0000, 0x0000 }, /* R1269 */ + { 0x0000, 0x0000 }, /* R1270 */ + { 0x0000, 0x0000 }, /* R1271 */ + { 0x0000, 0x0000 }, /* R1272 */ + { 0x0000, 0x0000 }, /* R1273 */ + { 0x0000, 0x0000 }, /* R1274 */ + { 0x0000, 0x0000 }, /* R1275 */ + { 0x0000, 0x0000 }, /* R1276 */ + { 0x0000, 0x0000 }, /* R1277 */ + { 0x0000, 0x0000 }, /* R1278 */ + { 0x0000, 0x0000 }, /* R1279 */ + { 0x00FF, 0x01FF }, /* R1280 - AIF2 ADC Left Volume */ + { 0x00FF, 0x01FF }, /* R1281 - AIF2 ADC Right Volume */ + { 0x00FF, 0x01FF }, /* R1282 - AIF2 DAC Left Volume */ + { 0x00FF, 0x01FF }, /* R1283 - AIF2 DAC Right Volume */ + { 0x0000, 0x0000 }, /* R1284 */ + { 0x0000, 0x0000 }, /* R1285 */ + { 0x0000, 0x0000 }, /* R1286 */ + { 0x0000, 0x0000 }, /* R1287 */ + { 0x0000, 0x0000 }, /* R1288 */ + { 0x0000, 0x0000 }, /* R1289 */ + { 0x0000, 0x0000 }, /* R1290 */ + { 0x0000, 0x0000 }, /* R1291 */ + { 0x0000, 0x0000 }, /* R1292 */ + { 0x0000, 0x0000 }, /* R1293 */ + { 0x0000, 0x0000 }, /* R1294 */ + { 0x0000, 0x0000 }, /* R1295 */ + { 0xF800, 0xF800 }, /* R1296 - AIF2 ADC Filters */ + { 0x0000, 0x0000 }, /* R1297 */ + { 0x0000, 0x0000 }, /* R1298 */ + { 0x0000, 0x0000 }, /* R1299 */ + { 0x0000, 0x0000 }, /* R1300 */ + { 0x0000, 0x0000 }, /* R1301 */ + { 0x0000, 0x0000 }, /* R1302 */ + { 0x0000, 0x0000 }, /* R1303 */ + { 0x0000, 0x0000 }, /* R1304 */ + { 0x0000, 0x0000 }, /* R1305 */ + { 0x0000, 0x0000 }, /* R1306 */ + { 0x0000, 0x0000 }, /* R1307 */ + { 0x0000, 0x0000 }, /* R1308 */ + { 0x0000, 0x0000 }, /* R1309 */ + { 0x0000, 0x0000 }, /* R1310 */ + { 0x0000, 0x0000 }, /* R1311 */ + { 0x02B6, 0x02B6 }, /* R1312 - AIF2 DAC Filters (1) */ + { 0x3F00, 0x3F00 }, /* R1313 - AIF2 DAC Filters (2) */ + { 0x0000, 0x0000 }, /* R1314 */ + { 0x0000, 0x0000 }, /* R1315 */ + { 0x0000, 0x0000 }, /* R1316 */ + { 0x0000, 0x0000 }, /* R1317 */ + { 0x0000, 0x0000 }, /* R1318 */ + { 0x0000, 0x0000 }, /* R1319 */ + { 0x0000, 0x0000 }, /* R1320 */ + { 0x0000, 0x0000 }, /* R1321 */ + { 0x0000, 0x0000 }, /* R1322 */ + { 0x0000, 0x0000 }, /* R1323 */ + { 0x0000, 0x0000 }, /* R1324 */ + { 0x0000, 0x0000 }, /* R1325 */ + { 0x0000, 0x0000 }, /* R1326 */ + { 0x0000, 0x0000 }, /* R1327 */ + { 0x0000, 0x0000 }, /* R1328 */ + { 0x0000, 0x0000 }, /* R1329 */ + { 0x0000, 0x0000 }, /* R1330 */ + { 0x0000, 0x0000 }, /* R1331 */ + { 0x0000, 0x0000 }, /* R1332 */ + { 0x0000, 0x0000 }, /* R1333 */ + { 0x0000, 0x0000 }, /* R1334 */ + { 0x0000, 0x0000 }, /* R1335 */ + { 0x0000, 0x0000 }, /* R1336 */ + { 0x0000, 0x0000 }, /* R1337 */ + { 0x0000, 0x0000 }, /* R1338 */ + { 0x0000, 0x0000 }, /* R1339 */ + { 0x0000, 0x0000 }, /* R1340 */ + { 0x0000, 0x0000 }, /* R1341 */ + { 0x0000, 0x0000 }, /* R1342 */ + { 0x0000, 0x0000 }, /* R1343 */ + { 0xFFFF, 0xFFFF }, /* R1344 - AIF2 DRC (1) */ + { 0x1FFF, 0x1FFF }, /* R1345 - AIF2 DRC (2) */ + { 0xFFFF, 0xFFFF }, /* R1346 - AIF2 DRC (3) */ + { 0x07FF, 0x07FF }, /* R1347 - AIF2 DRC (4) */ + { 0x03FF, 0x03FF }, /* R1348 - AIF2 DRC (5) */ + { 0x0000, 0x0000 }, /* R1349 */ + { 0x0000, 0x0000 }, /* R1350 */ + { 0x0000, 0x0000 }, /* R1351 */ + { 0x0000, 0x0000 }, /* R1352 */ + { 0x0000, 0x0000 }, /* R1353 */ + { 0x0000, 0x0000 }, /* R1354 */ + { 0x0000, 0x0000 }, /* R1355 */ + { 0x0000, 0x0000 }, /* R1356 */ + { 0x0000, 0x0000 }, /* R1357 */ + { 0x0000, 0x0000 }, /* R1358 */ + { 0x0000, 0x0000 }, /* R1359 */ + { 0x0000, 0x0000 }, /* R1360 */ + { 0x0000, 0x0000 }, /* R1361 */ + { 0x0000, 0x0000 }, /* R1362 */ + { 0x0000, 0x0000 }, /* R1363 */ + { 0x0000, 0x0000 }, /* R1364 */ + { 0x0000, 0x0000 }, /* R1365 */ + { 0x0000, 0x0000 }, /* R1366 */ + { 0x0000, 0x0000 }, /* R1367 */ + { 0x0000, 0x0000 }, /* R1368 */ + { 0x0000, 0x0000 }, /* R1369 */ + { 0x0000, 0x0000 }, /* R1370 */ + { 0x0000, 0x0000 }, /* R1371 */ + { 0x0000, 0x0000 }, /* R1372 */ + { 0x0000, 0x0000 }, /* R1373 */ + { 0x0000, 0x0000 }, /* R1374 */ + { 0x0000, 0x0000 }, /* R1375 */ + { 0x0000, 0x0000 }, /* R1376 */ + { 0x0000, 0x0000 }, /* R1377 */ + { 0x0000, 0x0000 }, /* R1378 */ + { 0x0000, 0x0000 }, /* R1379 */ + { 0x0000, 0x0000 }, /* R1380 */ + { 0x0000, 0x0000 }, /* R1381 */ + { 0x0000, 0x0000 }, /* R1382 */ + { 0x0000, 0x0000 }, /* R1383 */ + { 0x0000, 0x0000 }, /* R1384 */ + { 0x0000, 0x0000 }, /* R1385 */ + { 0x0000, 0x0000 }, /* R1386 */ + { 0x0000, 0x0000 }, /* R1387 */ + { 0x0000, 0x0000 }, /* R1388 */ + { 0x0000, 0x0000 }, /* R1389 */ + { 0x0000, 0x0000 }, /* R1390 */ + { 0x0000, 0x0000 }, /* R1391 */ + { 0x0000, 0x0000 }, /* R1392 */ + { 0x0000, 0x0000 }, /* R1393 */ + { 0x0000, 0x0000 }, /* R1394 */ + { 0x0000, 0x0000 }, /* R1395 */ + { 0x0000, 0x0000 }, /* R1396 */ + { 0x0000, 0x0000 }, /* R1397 */ + { 0x0000, 0x0000 }, /* R1398 */ + { 0x0000, 0x0000 }, /* R1399 */ + { 0x0000, 0x0000 }, /* R1400 */ + { 0x0000, 0x0000 }, /* R1401 */ + { 0x0000, 0x0000 }, /* R1402 */ + { 0x0000, 0x0000 }, /* R1403 */ + { 0x0000, 0x0000 }, /* R1404 */ + { 0x0000, 0x0000 }, /* R1405 */ + { 0x0000, 0x0000 }, /* R1406 */ + { 0x0000, 0x0000 }, /* R1407 */ + { 0xFFFF, 0xFFFF }, /* R1408 - AIF2 EQ Gains (1) */ + { 0xFFC0, 0xFFC0 }, /* R1409 - AIF2 EQ Gains (2) */ + { 0xFFFF, 0xFFFF }, /* R1410 - AIF2 EQ Band 1 A */ + { 0xFFFF, 0xFFFF }, /* R1411 - AIF2 EQ Band 1 B */ + { 0xFFFF, 0xFFFF }, /* R1412 - AIF2 EQ Band 1 PG */ + { 0xFFFF, 0xFFFF }, /* R1413 - AIF2 EQ Band 2 A */ + { 0xFFFF, 0xFFFF }, /* R1414 - AIF2 EQ Band 2 B */ + { 0xFFFF, 0xFFFF }, /* R1415 - AIF2 EQ Band 2 C */ + { 0xFFFF, 0xFFFF }, /* R1416 - AIF2 EQ Band 2 PG */ + { 0xFFFF, 0xFFFF }, /* R1417 - AIF2 EQ Band 3 A */ + { 0xFFFF, 0xFFFF }, /* R1418 - AIF2 EQ Band 3 B */ + { 0xFFFF, 0xFFFF }, /* R1419 - AIF2 EQ Band 3 C */ + { 0xFFFF, 0xFFFF }, /* R1420 - AIF2 EQ Band 3 PG */ + { 0xFFFF, 0xFFFF }, /* R1421 - AIF2 EQ Band 4 A */ + { 0xFFFF, 0xFFFF }, /* R1422 - AIF2 EQ Band 4 B */ + { 0xFFFF, 0xFFFF }, /* R1423 - AIF2 EQ Band 4 C */ + { 0xFFFF, 0xFFFF }, /* R1424 - AIF2 EQ Band 4 PG */ + { 0xFFFF, 0xFFFF }, /* R1425 - AIF2 EQ Band 5 A */ + { 0xFFFF, 0xFFFF }, /* R1426 - AIF2 EQ Band 5 B */ + { 0xFFFF, 0xFFFF }, /* R1427 - AIF2 EQ Band 5 PG */ + { 0x0000, 0x0000 }, /* R1428 */ + { 0x0000, 0x0000 }, /* R1429 */ + { 0x0000, 0x0000 }, /* R1430 */ + { 0x0000, 0x0000 }, /* R1431 */ + { 0x0000, 0x0000 }, /* R1432 */ + { 0x0000, 0x0000 }, /* R1433 */ + { 0x0000, 0x0000 }, /* R1434 */ + { 0x0000, 0x0000 }, /* R1435 */ + { 0x0000, 0x0000 }, /* R1436 */ + { 0x0000, 0x0000 }, /* R1437 */ + { 0x0000, 0x0000 }, /* R1438 */ + { 0x0000, 0x0000 }, /* R1439 */ + { 0x0000, 0x0000 }, /* R1440 */ + { 0x0000, 0x0000 }, /* R1441 */ + { 0x0000, 0x0000 }, /* R1442 */ + { 0x0000, 0x0000 }, /* R1443 */ + { 0x0000, 0x0000 }, /* R1444 */ + { 0x0000, 0x0000 }, /* R1445 */ + { 0x0000, 0x0000 }, /* R1446 */ + { 0x0000, 0x0000 }, /* R1447 */ + { 0x0000, 0x0000 }, /* R1448 */ + { 0x0000, 0x0000 }, /* R1449 */ + { 0x0000, 0x0000 }, /* R1450 */ + { 0x0000, 0x0000 }, /* R1451 */ + { 0x0000, 0x0000 }, /* R1452 */ + { 0x0000, 0x0000 }, /* R1453 */ + { 0x0000, 0x0000 }, /* R1454 */ + { 0x0000, 0x0000 }, /* R1455 */ + { 0x0000, 0x0000 }, /* R1456 */ + { 0x0000, 0x0000 }, /* R1457 */ + { 0x0000, 0x0000 }, /* R1458 */ + { 0x0000, 0x0000 }, /* R1459 */ + { 0x0000, 0x0000 }, /* R1460 */ + { 0x0000, 0x0000 }, /* R1461 */ + { 0x0000, 0x0000 }, /* R1462 */ + { 0x0000, 0x0000 }, /* R1463 */ + { 0x0000, 0x0000 }, /* R1464 */ + { 0x0000, 0x0000 }, /* R1465 */ + { 0x0000, 0x0000 }, /* R1466 */ + { 0x0000, 0x0000 }, /* R1467 */ + { 0x0000, 0x0000 }, /* R1468 */ + { 0x0000, 0x0000 }, /* R1469 */ + { 0x0000, 0x0000 }, /* R1470 */ + { 0x0000, 0x0000 }, /* R1471 */ + { 0x0000, 0x0000 }, /* R1472 */ + { 0x0000, 0x0000 }, /* R1473 */ + { 0x0000, 0x0000 }, /* R1474 */ + { 0x0000, 0x0000 }, /* R1475 */ + { 0x0000, 0x0000 }, /* R1476 */ + { 0x0000, 0x0000 }, /* R1477 */ + { 0x0000, 0x0000 }, /* R1478 */ + { 0x0000, 0x0000 }, /* R1479 */ + { 0x0000, 0x0000 }, /* R1480 */ + { 0x0000, 0x0000 }, /* R1481 */ + { 0x0000, 0x0000 }, /* R1482 */ + { 0x0000, 0x0000 }, /* R1483 */ + { 0x0000, 0x0000 }, /* R1484 */ + { 0x0000, 0x0000 }, /* R1485 */ + { 0x0000, 0x0000 }, /* R1486 */ + { 0x0000, 0x0000 }, /* R1487 */ + { 0x0000, 0x0000 }, /* R1488 */ + { 0x0000, 0x0000 }, /* R1489 */ + { 0x0000, 0x0000 }, /* R1490 */ + { 0x0000, 0x0000 }, /* R1491 */ + { 0x0000, 0x0000 }, /* R1492 */ + { 0x0000, 0x0000 }, /* R1493 */ + { 0x0000, 0x0000 }, /* R1494 */ + { 0x0000, 0x0000 }, /* R1495 */ + { 0x0000, 0x0000 }, /* R1496 */ + { 0x0000, 0x0000 }, /* R1497 */ + { 0x0000, 0x0000 }, /* R1498 */ + { 0x0000, 0x0000 }, /* R1499 */ + { 0x0000, 0x0000 }, /* R1500 */ + { 0x0000, 0x0000 }, /* R1501 */ + { 0x0000, 0x0000 }, /* R1502 */ + { 0x0000, 0x0000 }, /* R1503 */ + { 0x0000, 0x0000 }, /* R1504 */ + { 0x0000, 0x0000 }, /* R1505 */ + { 0x0000, 0x0000 }, /* R1506 */ + { 0x0000, 0x0000 }, /* R1507 */ + { 0x0000, 0x0000 }, /* R1508 */ + { 0x0000, 0x0000 }, /* R1509 */ + { 0x0000, 0x0000 }, /* R1510 */ + { 0x0000, 0x0000 }, /* R1511 */ + { 0x0000, 0x0000 }, /* R1512 */ + { 0x0000, 0x0000 }, /* R1513 */ + { 0x0000, 0x0000 }, /* R1514 */ + { 0x0000, 0x0000 }, /* R1515 */ + { 0x0000, 0x0000 }, /* R1516 */ + { 0x0000, 0x0000 }, /* R1517 */ + { 0x0000, 0x0000 }, /* R1518 */ + { 0x0000, 0x0000 }, /* R1519 */ + { 0x0000, 0x0000 }, /* R1520 */ + { 0x0000, 0x0000 }, /* R1521 */ + { 0x0000, 0x0000 }, /* R1522 */ + { 0x0000, 0x0000 }, /* R1523 */ + { 0x0000, 0x0000 }, /* R1524 */ + { 0x0000, 0x0000 }, /* R1525 */ + { 0x0000, 0x0000 }, /* R1526 */ + { 0x0000, 0x0000 }, /* R1527 */ + { 0x0000, 0x0000 }, /* R1528 */ + { 0x0000, 0x0000 }, /* R1529 */ + { 0x0000, 0x0000 }, /* R1530 */ + { 0x0000, 0x0000 }, /* R1531 */ + { 0x0000, 0x0000 }, /* R1532 */ + { 0x0000, 0x0000 }, /* R1533 */ + { 0x0000, 0x0000 }, /* R1534 */ + { 0x0000, 0x0000 }, /* R1535 */ + { 0x01EF, 0x01EF }, /* R1536 - DAC1 Mixer Volumes */ + { 0x0037, 0x0037 }, /* R1537 - DAC1 Left Mixer Routing */ + { 0x0037, 0x0037 }, /* R1538 - DAC1 Right Mixer Routing */ + { 0x01EF, 0x01EF }, /* R1539 - DAC2 Mixer Volumes */ + { 0x0037, 0x0037 }, /* R1540 - DAC2 Left Mixer Routing */ + { 0x0037, 0x0037 }, /* R1541 - DAC2 Right Mixer Routing */ + { 0x0003, 0x0003 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */ + { 0x0003, 0x0003 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */ + { 0x0003, 0x0003 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */ + { 0x0003, 0x0003 }, /* R1545 - AIF1 ADC2 Right mixer Routing */ + { 0x0000, 0x0000 }, /* R1546 */ + { 0x0000, 0x0000 }, /* R1547 */ + { 0x0000, 0x0000 }, /* R1548 */ + { 0x0000, 0x0000 }, /* R1549 */ + { 0x0000, 0x0000 }, /* R1550 */ + { 0x0000, 0x0000 }, /* R1551 */ + { 0x02FF, 0x03FF }, /* R1552 - DAC1 Left Volume */ + { 0x02FF, 0x03FF }, /* R1553 - DAC1 Right Volume */ + { 0x02FF, 0x03FF }, /* R1554 - DAC2 Left Volume */ + { 0x02FF, 0x03FF }, /* R1555 - DAC2 Right Volume */ + { 0x0003, 0x0003 }, /* R1556 - DAC Softmute */ + { 0x0000, 0x0000 }, /* R1557 */ + { 0x0000, 0x0000 }, /* R1558 */ + { 0x0000, 0x0000 }, /* R1559 */ + { 0x0000, 0x0000 }, /* R1560 */ + { 0x0000, 0x0000 }, /* R1561 */ + { 0x0000, 0x0000 }, /* R1562 */ + { 0x0000, 0x0000 }, /* R1563 */ + { 0x0000, 0x0000 }, /* R1564 */ + { 0x0000, 0x0000 }, /* R1565 */ + { 0x0000, 0x0000 }, /* R1566 */ + { 0x0000, 0x0000 }, /* R1567 */ + { 0x0003, 0x0003 }, /* R1568 - Oversampling */ + { 0x03C3, 0x03C3 }, /* R1569 - Sidetone */ +}; + +const __devinitdata u16 wm8994_reg_defaults[WM8994_CACHE_SIZE] = { + 0x8994, /* R0 - Software Reset */ + 0x0000, /* R1 - Power Management (1) */ + 0x6000, /* R2 - Power Management (2) */ + 0x0000, /* R3 - Power Management (3) */ + 0x0000, /* R4 - Power Management (4) */ + 0x0000, /* R5 - Power Management (5) */ + 0x0000, /* R6 - Power Management (6) */ + 0x0000, /* R7 */ + 0x0000, /* R8 */ + 0x0000, /* R9 */ + 0x0000, /* R10 */ + 0x0000, /* R11 */ + 0x0000, /* R12 */ + 0x0000, /* R13 */ + 0x0000, /* R14 */ + 0x0000, /* R15 */ + 0x0000, /* R16 */ + 0x0000, /* R17 */ + 0x0000, /* R18 */ + 0x0000, /* R19 */ + 0x0000, /* R20 */ + 0x0000, /* R21 - Input Mixer (1) */ + 0x0000, /* R22 */ + 0x0000, /* R23 */ + 0x008B, /* R24 - Left Line Input 1&2 Volume */ + 0x008B, /* R25 - Left Line Input 3&4 Volume */ + 0x008B, /* R26 - Right Line Input 1&2 Volume */ + 0x008B, /* R27 - Right Line Input 3&4 Volume */ + 0x006D, /* R28 - Left Output Volume */ + 0x006D, /* R29 - Right Output Volume */ + 0x0066, /* R30 - Line Outputs Volume */ + 0x0020, /* R31 - HPOUT2 Volume */ + 0x0079, /* R32 - Left OPGA Volume */ + 0x0079, /* R33 - Right OPGA Volume */ + 0x0003, /* R34 - SPKMIXL Attenuation */ + 0x0003, /* R35 - SPKMIXR Attenuation */ + 0x0011, /* R36 - SPKOUT Mixers */ + 0x0140, /* R37 - ClassD */ + 0x0079, /* R38 - Speaker Volume Left */ + 0x0079, /* R39 - Speaker Volume Right */ + 0x0000, /* R40 - Input Mixer (2) */ + 0x0000, /* R41 - Input Mixer (3) */ + 0x0000, /* R42 - Input Mixer (4) */ + 0x0000, /* R43 - Input Mixer (5) */ + 0x0000, /* R44 - Input Mixer (6) */ + 0x0000, /* R45 - Output Mixer (1) */ + 0x0000, /* R46 - Output Mixer (2) */ + 0x0000, /* R47 - Output Mixer (3) */ + 0x0000, /* R48 - Output Mixer (4) */ + 0x0000, /* R49 - Output Mixer (5) */ + 0x0000, /* R50 - Output Mixer (6) */ + 0x0000, /* R51 - HPOUT2 Mixer */ + 0x0000, /* R52 - Line Mixer (1) */ + 0x0000, /* R53 - Line Mixer (2) */ + 0x0000, /* R54 - Speaker Mixer */ + 0x0000, /* R55 - Additional Control */ + 0x0000, /* R56 - AntiPOP (1) */ + 0x0000, /* R57 - AntiPOP (2) */ + 0x0000, /* R58 - MICBIAS */ + 0x000D, /* R59 - LDO 1 */ + 0x0003, /* R60 - LDO 2 */ + 0x0000, /* R61 */ + 0x0000, /* R62 */ + 0x0000, /* R63 */ + 0x0000, /* R64 */ + 0x0000, /* R65 */ + 0x0000, /* R66 */ + 0x0000, /* R67 */ + 0x0000, /* R68 */ + 0x0000, /* R69 */ + 0x0000, /* R70 */ + 0x0000, /* R71 */ + 0x0000, /* R72 */ + 0x0000, /* R73 */ + 0x0000, /* R74 */ + 0x0000, /* R75 */ + 0x1F25, /* R76 - Charge Pump (1) */ + 0x0000, /* R77 */ + 0x0000, /* R78 */ + 0x0000, /* R79 */ + 0x0000, /* R80 */ + 0x0004, /* R81 - Class W (1) */ + 0x0000, /* R82 */ + 0x0000, /* R83 */ + 0x0000, /* R84 - DC Servo (1) */ + 0x054A, /* R85 - DC Servo (2) */ + 0x0000, /* R86 */ + 0x0000, /* R87 - DC Servo (4) */ + 0x0000, /* R88 - DC Servo Readback */ + 0x0000, /* R89 */ + 0x0000, /* R90 */ + 0x0000, /* R91 */ + 0x0000, /* R92 */ + 0x0000, /* R93 */ + 0x0000, /* R94 */ + 0x0000, /* R95 */ + 0x0000, /* R96 - Analogue HP (1) */ + 0x0000, /* R97 */ + 0x0000, /* R98 */ + 0x0000, /* R99 */ + 0x0000, /* R100 */ + 0x0000, /* R101 */ + 0x0000, /* R102 */ + 0x0000, /* R103 */ + 0x0000, /* R104 */ + 0x0000, /* R105 */ + 0x0000, /* R106 */ + 0x0000, /* R107 */ + 0x0000, /* R108 */ + 0x0000, /* R109 */ + 0x0000, /* R110 */ + 0x0000, /* R111 */ + 0x0000, /* R112 */ + 0x0000, /* R113 */ + 0x0000, /* R114 */ + 0x0000, /* R115 */ + 0x0000, /* R116 */ + 0x0000, /* R117 */ + 0x0000, /* R118 */ + 0x0000, /* R119 */ + 0x0000, /* R120 */ + 0x0000, /* R121 */ + 0x0000, /* R122 */ + 0x0000, /* R123 */ + 0x0000, /* R124 */ + 0x0000, /* R125 */ + 0x0000, /* R126 */ + 0x0000, /* R127 */ + 0x0000, /* R128 */ + 0x0000, /* R129 */ + 0x0000, /* R130 */ + 0x0000, /* R131 */ + 0x0000, /* R132 */ + 0x0000, /* R133 */ + 0x0000, /* R134 */ + 0x0000, /* R135 */ + 0x0000, /* R136 */ + 0x0000, /* R137 */ + 0x0000, /* R138 */ + 0x0000, /* R139 */ + 0x0000, /* R140 */ + 0x0000, /* R141 */ + 0x0000, /* R142 */ + 0x0000, /* R143 */ + 0x0000, /* R144 */ + 0x0000, /* R145 */ + 0x0000, /* R146 */ + 0x0000, /* R147 */ + 0x0000, /* R148 */ + 0x0000, /* R149 */ + 0x0000, /* R150 */ + 0x0000, /* R151 */ + 0x0000, /* R152 */ + 0x0000, /* R153 */ + 0x0000, /* R154 */ + 0x0000, /* R155 */ + 0x0000, /* R156 */ + 0x0000, /* R157 */ + 0x0000, /* R158 */ + 0x0000, /* R159 */ + 0x0000, /* R160 */ + 0x0000, /* R161 */ + 0x0000, /* R162 */ + 0x0000, /* R163 */ + 0x0000, /* R164 */ + 0x0000, /* R165 */ + 0x0000, /* R166 */ + 0x0000, /* R167 */ + 0x0000, /* R168 */ + 0x0000, /* R169 */ + 0x0000, /* R170 */ + 0x0000, /* R171 */ + 0x0000, /* R172 */ + 0x0000, /* R173 */ + 0x0000, /* R174 */ + 0x0000, /* R175 */ + 0x0000, /* R176 */ + 0x0000, /* R177 */ + 0x0000, /* R178 */ + 0x0000, /* R179 */ + 0x0000, /* R180 */ + 0x0000, /* R181 */ + 0x0000, /* R182 */ + 0x0000, /* R183 */ + 0x0000, /* R184 */ + 0x0000, /* R185 */ + 0x0000, /* R186 */ + 0x0000, /* R187 */ + 0x0000, /* R188 */ + 0x0000, /* R189 */ + 0x0000, /* R190 */ + 0x0000, /* R191 */ + 0x0000, /* R192 */ + 0x0000, /* R193 */ + 0x0000, /* R194 */ + 0x0000, /* R195 */ + 0x0000, /* R196 */ + 0x0000, /* R197 */ + 0x0000, /* R198 */ + 0x0000, /* R199 */ + 0x0000, /* R200 */ + 0x0000, /* R201 */ + 0x0000, /* R202 */ + 0x0000, /* R203 */ + 0x0000, /* R204 */ + 0x0000, /* R205 */ + 0x0000, /* R206 */ + 0x0000, /* R207 */ + 0x0000, /* R208 */ + 0x0000, /* R209 */ + 0x0000, /* R210 */ + 0x0000, /* R211 */ + 0x0000, /* R212 */ + 0x0000, /* R213 */ + 0x0000, /* R214 */ + 0x0000, /* R215 */ + 0x0000, /* R216 */ + 0x0000, /* R217 */ + 0x0000, /* R218 */ + 0x0000, /* R219 */ + 0x0000, /* R220 */ + 0x0000, /* R221 */ + 0x0000, /* R222 */ + 0x0000, /* R223 */ + 0x0000, /* R224 */ + 0x0000, /* R225 */ + 0x0000, /* R226 */ + 0x0000, /* R227 */ + 0x0000, /* R228 */ + 0x0000, /* R229 */ + 0x0000, /* R230 */ + 0x0000, /* R231 */ + 0x0000, /* R232 */ + 0x0000, /* R233 */ + 0x0000, /* R234 */ + 0x0000, /* R235 */ + 0x0000, /* R236 */ + 0x0000, /* R237 */ + 0x0000, /* R238 */ + 0x0000, /* R239 */ + 0x0000, /* R240 */ + 0x0000, /* R241 */ + 0x0000, /* R242 */ + 0x0000, /* R243 */ + 0x0000, /* R244 */ + 0x0000, /* R245 */ + 0x0000, /* R246 */ + 0x0000, /* R247 */ + 0x0000, /* R248 */ + 0x0000, /* R249 */ + 0x0000, /* R250 */ + 0x0000, /* R251 */ + 0x0000, /* R252 */ + 0x0000, /* R253 */ + 0x0000, /* R254 */ + 0x0000, /* R255 */ + 0x0003, /* R256 - Chip Revision */ + 0x8004, /* R257 - Control Interface */ + 0x0000, /* R258 */ + 0x0000, /* R259 */ + 0x0000, /* R260 */ + 0x0000, /* R261 */ + 0x0000, /* R262 */ + 0x0000, /* R263 */ + 0x0000, /* R264 */ + 0x0000, /* R265 */ + 0x0000, /* R266 */ + 0x0000, /* R267 */ + 0x0000, /* R268 */ + 0x0000, /* R269 */ + 0x0000, /* R270 */ + 0x0000, /* R271 */ + 0x0000, /* R272 - Write Sequencer Ctrl (1) */ + 0x0000, /* R273 - Write Sequencer Ctrl (2) */ + 0x0000, /* R274 */ + 0x0000, /* R275 */ + 0x0000, /* R276 */ + 0x0000, /* R277 */ + 0x0000, /* R278 */ + 0x0000, /* R279 */ + 0x0000, /* R280 */ + 0x0000, /* R281 */ + 0x0000, /* R282 */ + 0x0000, /* R283 */ + 0x0000, /* R284 */ + 0x0000, /* R285 */ + 0x0000, /* R286 */ + 0x0000, /* R287 */ + 0x0000, /* R288 */ + 0x0000, /* R289 */ + 0x0000, /* R290 */ + 0x0000, /* R291 */ + 0x0000, /* R292 */ + 0x0000, /* R293 */ + 0x0000, /* R294 */ + 0x0000, /* R295 */ + 0x0000, /* R296 */ + 0x0000, /* R297 */ + 0x0000, /* R298 */ + 0x0000, /* R299 */ + 0x0000, /* R300 */ + 0x0000, /* R301 */ + 0x0000, /* R302 */ + 0x0000, /* R303 */ + 0x0000, /* R304 */ + 0x0000, /* R305 */ + 0x0000, /* R306 */ + 0x0000, /* R307 */ + 0x0000, /* R308 */ + 0x0000, /* R309 */ + 0x0000, /* R310 */ + 0x0000, /* R311 */ + 0x0000, /* R312 */ + 0x0000, /* R313 */ + 0x0000, /* R314 */ + 0x0000, /* R315 */ + 0x0000, /* R316 */ + 0x0000, /* R317 */ + 0x0000, /* R318 */ + 0x0000, /* R319 */ + 0x0000, /* R320 */ + 0x0000, /* R321 */ + 0x0000, /* R322 */ + 0x0000, /* R323 */ + 0x0000, /* R324 */ + 0x0000, /* R325 */ + 0x0000, /* R326 */ + 0x0000, /* R327 */ + 0x0000, /* R328 */ + 0x0000, /* R329 */ + 0x0000, /* R330 */ + 0x0000, /* R331 */ + 0x0000, /* R332 */ + 0x0000, /* R333 */ + 0x0000, /* R334 */ + 0x0000, /* R335 */ + 0x0000, /* R336 */ + 0x0000, /* R337 */ + 0x0000, /* R338 */ + 0x0000, /* R339 */ + 0x0000, /* R340 */ + 0x0000, /* R341 */ + 0x0000, /* R342 */ + 0x0000, /* R343 */ + 0x0000, /* R344 */ + 0x0000, /* R345 */ + 0x0000, /* R346 */ + 0x0000, /* R347 */ + 0x0000, /* R348 */ + 0x0000, /* R349 */ + 0x0000, /* R350 */ + 0x0000, /* R351 */ + 0x0000, /* R352 */ + 0x0000, /* R353 */ + 0x0000, /* R354 */ + 0x0000, /* R355 */ + 0x0000, /* R356 */ + 0x0000, /* R357 */ + 0x0000, /* R358 */ + 0x0000, /* R359 */ + 0x0000, /* R360 */ + 0x0000, /* R361 */ + 0x0000, /* R362 */ + 0x0000, /* R363 */ + 0x0000, /* R364 */ + 0x0000, /* R365 */ + 0x0000, /* R366 */ + 0x0000, /* R367 */ + 0x0000, /* R368 */ + 0x0000, /* R369 */ + 0x0000, /* R370 */ + 0x0000, /* R371 */ + 0x0000, /* R372 */ + 0x0000, /* R373 */ + 0x0000, /* R374 */ + 0x0000, /* R375 */ + 0x0000, /* R376 */ + 0x0000, /* R377 */ + 0x0000, /* R378 */ + 0x0000, /* R379 */ + 0x0000, /* R380 */ + 0x0000, /* R381 */ + 0x0000, /* R382 */ + 0x0000, /* R383 */ + 0x0000, /* R384 */ + 0x0000, /* R385 */ + 0x0000, /* R386 */ + 0x0000, /* R387 */ + 0x0000, /* R388 */ + 0x0000, /* R389 */ + 0x0000, /* R390 */ + 0x0000, /* R391 */ + 0x0000, /* R392 */ + 0x0000, /* R393 */ + 0x0000, /* R394 */ + 0x0000, /* R395 */ + 0x0000, /* R396 */ + 0x0000, /* R397 */ + 0x0000, /* R398 */ + 0x0000, /* R399 */ + 0x0000, /* R400 */ + 0x0000, /* R401 */ + 0x0000, /* R402 */ + 0x0000, /* R403 */ + 0x0000, /* R404 */ + 0x0000, /* R405 */ + 0x0000, /* R406 */ + 0x0000, /* R407 */ + 0x0000, /* R408 */ + 0x0000, /* R409 */ + 0x0000, /* R410 */ + 0x0000, /* R411 */ + 0x0000, /* R412 */ + 0x0000, /* R413 */ + 0x0000, /* R414 */ + 0x0000, /* R415 */ + 0x0000, /* R416 */ + 0x0000, /* R417 */ + 0x0000, /* R418 */ + 0x0000, /* R419 */ + 0x0000, /* R420 */ + 0x0000, /* R421 */ + 0x0000, /* R422 */ + 0x0000, /* R423 */ + 0x0000, /* R424 */ + 0x0000, /* R425 */ + 0x0000, /* R426 */ + 0x0000, /* R427 */ + 0x0000, /* R428 */ + 0x0000, /* R429 */ + 0x0000, /* R430 */ + 0x0000, /* R431 */ + 0x0000, /* R432 */ + 0x0000, /* R433 */ + 0x0000, /* R434 */ + 0x0000, /* R435 */ + 0x0000, /* R436 */ + 0x0000, /* R437 */ + 0x0000, /* R438 */ + 0x0000, /* R439 */ + 0x0000, /* R440 */ + 0x0000, /* R441 */ + 0x0000, /* R442 */ + 0x0000, /* R443 */ + 0x0000, /* R444 */ + 0x0000, /* R445 */ + 0x0000, /* R446 */ + 0x0000, /* R447 */ + 0x0000, /* R448 */ + 0x0000, /* R449 */ + 0x0000, /* R450 */ + 0x0000, /* R451 */ + 0x0000, /* R452 */ + 0x0000, /* R453 */ + 0x0000, /* R454 */ + 0x0000, /* R455 */ + 0x0000, /* R456 */ + 0x0000, /* R457 */ + 0x0000, /* R458 */ + 0x0000, /* R459 */ + 0x0000, /* R460 */ + 0x0000, /* R461 */ + 0x0000, /* R462 */ + 0x0000, /* R463 */ + 0x0000, /* R464 */ + 0x0000, /* R465 */ + 0x0000, /* R466 */ + 0x0000, /* R467 */ + 0x0000, /* R468 */ + 0x0000, /* R469 */ + 0x0000, /* R470 */ + 0x0000, /* R471 */ + 0x0000, /* R472 */ + 0x0000, /* R473 */ + 0x0000, /* R474 */ + 0x0000, /* R475 */ + 0x0000, /* R476 */ + 0x0000, /* R477 */ + 0x0000, /* R478 */ + 0x0000, /* R479 */ + 0x0000, /* R480 */ + 0x0000, /* R481 */ + 0x0000, /* R482 */ + 0x0000, /* R483 */ + 0x0000, /* R484 */ + 0x0000, /* R485 */ + 0x0000, /* R486 */ + 0x0000, /* R487 */ + 0x0000, /* R488 */ + 0x0000, /* R489 */ + 0x0000, /* R490 */ + 0x0000, /* R491 */ + 0x0000, /* R492 */ + 0x0000, /* R493 */ + 0x0000, /* R494 */ + 0x0000, /* R495 */ + 0x0000, /* R496 */ + 0x0000, /* R497 */ + 0x0000, /* R498 */ + 0x0000, /* R499 */ + 0x0000, /* R500 */ + 0x0000, /* R501 */ + 0x0000, /* R502 */ + 0x0000, /* R503 */ + 0x0000, /* R504 */ + 0x0000, /* R505 */ + 0x0000, /* R506 */ + 0x0000, /* R507 */ + 0x0000, /* R508 */ + 0x0000, /* R509 */ + 0x0000, /* R510 */ + 0x0000, /* R511 */ + 0x0000, /* R512 - AIF1 Clocking (1) */ + 0x0000, /* R513 - AIF1 Clocking (2) */ + 0x0000, /* R514 */ + 0x0000, /* R515 */ + 0x0000, /* R516 - AIF2 Clocking (1) */ + 0x0000, /* R517 - AIF2 Clocking (2) */ + 0x0000, /* R518 */ + 0x0000, /* R519 */ + 0x0000, /* R520 - Clocking (1) */ + 0x0000, /* R521 - Clocking (2) */ + 0x0000, /* R522 */ + 0x0000, /* R523 */ + 0x0000, /* R524 */ + 0x0000, /* R525 */ + 0x0000, /* R526 */ + 0x0000, /* R527 */ + 0x0083, /* R528 - AIF1 Rate */ + 0x0083, /* R529 - AIF2 Rate */ + 0x0000, /* R530 - Rate Status */ + 0x0000, /* R531 */ + 0x0000, /* R532 */ + 0x0000, /* R533 */ + 0x0000, /* R534 */ + 0x0000, /* R535 */ + 0x0000, /* R536 */ + 0x0000, /* R537 */ + 0x0000, /* R538 */ + 0x0000, /* R539 */ + 0x0000, /* R540 */ + 0x0000, /* R541 */ + 0x0000, /* R542 */ + 0x0000, /* R543 */ + 0x0000, /* R544 - FLL1 Control (1) */ + 0x0000, /* R545 - FLL1 Control (2) */ + 0x0000, /* R546 - FLL1 Control (3) */ + 0x0000, /* R547 - FLL1 Control (4) */ + 0x0C80, /* R548 - FLL1 Control (5) */ + 0x0000, /* R549 */ + 0x0000, /* R550 */ + 0x0000, /* R551 */ + 0x0000, /* R552 */ + 0x0000, /* R553 */ + 0x0000, /* R554 */ + 0x0000, /* R555 */ + 0x0000, /* R556 */ + 0x0000, /* R557 */ + 0x0000, /* R558 */ + 0x0000, /* R559 */ + 0x0000, /* R560 */ + 0x0000, /* R561 */ + 0x0000, /* R562 */ + 0x0000, /* R563 */ + 0x0000, /* R564 */ + 0x0000, /* R565 */ + 0x0000, /* R566 */ + 0x0000, /* R567 */ + 0x0000, /* R568 */ + 0x0000, /* R569 */ + 0x0000, /* R570 */ + 0x0000, /* R571 */ + 0x0000, /* R572 */ + 0x0000, /* R573 */ + 0x0000, /* R574 */ + 0x0000, /* R575 */ + 0x0000, /* R576 - FLL2 Control (1) */ + 0x0000, /* R577 - FLL2 Control (2) */ + 0x0000, /* R578 - FLL2 Control (3) */ + 0x0000, /* R579 - FLL2 Control (4) */ + 0x0C80, /* R580 - FLL2 Control (5) */ + 0x0000, /* R581 */ + 0x0000, /* R582 */ + 0x0000, /* R583 */ + 0x0000, /* R584 */ + 0x0000, /* R585 */ + 0x0000, /* R586 */ + 0x0000, /* R587 */ + 0x0000, /* R588 */ + 0x0000, /* R589 */ + 0x0000, /* R590 */ + 0x0000, /* R591 */ + 0x0000, /* R592 */ + 0x0000, /* R593 */ + 0x0000, /* R594 */ + 0x0000, /* R595 */ + 0x0000, /* R596 */ + 0x0000, /* R597 */ + 0x0000, /* R598 */ + 0x0000, /* R599 */ + 0x0000, /* R600 */ + 0x0000, /* R601 */ + 0x0000, /* R602 */ + 0x0000, /* R603 */ + 0x0000, /* R604 */ + 0x0000, /* R605 */ + 0x0000, /* R606 */ + 0x0000, /* R607 */ + 0x0000, /* R608 */ + 0x0000, /* R609 */ + 0x0000, /* R610 */ + 0x0000, /* R611 */ + 0x0000, /* R612 */ + 0x0000, /* R613 */ + 0x0000, /* R614 */ + 0x0000, /* R615 */ + 0x0000, /* R616 */ + 0x0000, /* R617 */ + 0x0000, /* R618 */ + 0x0000, /* R619 */ + 0x0000, /* R620 */ + 0x0000, /* R621 */ + 0x0000, /* R622 */ + 0x0000, /* R623 */ + 0x0000, /* R624 */ + 0x0000, /* R625 */ + 0x0000, /* R626 */ + 0x0000, /* R627 */ + 0x0000, /* R628 */ + 0x0000, /* R629 */ + 0x0000, /* R630 */ + 0x0000, /* R631 */ + 0x0000, /* R632 */ + 0x0000, /* R633 */ + 0x0000, /* R634 */ + 0x0000, /* R635 */ + 0x0000, /* R636 */ + 0x0000, /* R637 */ + 0x0000, /* R638 */ + 0x0000, /* R639 */ + 0x0000, /* R640 */ + 0x0000, /* R641 */ + 0x0000, /* R642 */ + 0x0000, /* R643 */ + 0x0000, /* R644 */ + 0x0000, /* R645 */ + 0x0000, /* R646 */ + 0x0000, /* R647 */ + 0x0000, /* R648 */ + 0x0000, /* R649 */ + 0x0000, /* R650 */ + 0x0000, /* R651 */ + 0x0000, /* R652 */ + 0x0000, /* R653 */ + 0x0000, /* R654 */ + 0x0000, /* R655 */ + 0x0000, /* R656 */ + 0x0000, /* R657 */ + 0x0000, /* R658 */ + 0x0000, /* R659 */ + 0x0000, /* R660 */ + 0x0000, /* R661 */ + 0x0000, /* R662 */ + 0x0000, /* R663 */ + 0x0000, /* R664 */ + 0x0000, /* R665 */ + 0x0000, /* R666 */ + 0x0000, /* R667 */ + 0x0000, /* R668 */ + 0x0000, /* R669 */ + 0x0000, /* R670 */ + 0x0000, /* R671 */ + 0x0000, /* R672 */ + 0x0000, /* R673 */ + 0x0000, /* R674 */ + 0x0000, /* R675 */ + 0x0000, /* R676 */ + 0x0000, /* R677 */ + 0x0000, /* R678 */ + 0x0000, /* R679 */ + 0x0000, /* R680 */ + 0x0000, /* R681 */ + 0x0000, /* R682 */ + 0x0000, /* R683 */ + 0x0000, /* R684 */ + 0x0000, /* R685 */ + 0x0000, /* R686 */ + 0x0000, /* R687 */ + 0x0000, /* R688 */ + 0x0000, /* R689 */ + 0x0000, /* R690 */ + 0x0000, /* R691 */ + 0x0000, /* R692 */ + 0x0000, /* R693 */ + 0x0000, /* R694 */ + 0x0000, /* R695 */ + 0x0000, /* R696 */ + 0x0000, /* R697 */ + 0x0000, /* R698 */ + 0x0000, /* R699 */ + 0x0000, /* R700 */ + 0x0000, /* R701 */ + 0x0000, /* R702 */ + 0x0000, /* R703 */ + 0x0000, /* R704 */ + 0x0000, /* R705 */ + 0x0000, /* R706 */ + 0x0000, /* R707 */ + 0x0000, /* R708 */ + 0x0000, /* R709 */ + 0x0000, /* R710 */ + 0x0000, /* R711 */ + 0x0000, /* R712 */ + 0x0000, /* R713 */ + 0x0000, /* R714 */ + 0x0000, /* R715 */ + 0x0000, /* R716 */ + 0x0000, /* R717 */ + 0x0000, /* R718 */ + 0x0000, /* R719 */ + 0x0000, /* R720 */ + 0x0000, /* R721 */ + 0x0000, /* R722 */ + 0x0000, /* R723 */ + 0x0000, /* R724 */ + 0x0000, /* R725 */ + 0x0000, /* R726 */ + 0x0000, /* R727 */ + 0x0000, /* R728 */ + 0x0000, /* R729 */ + 0x0000, /* R730 */ + 0x0000, /* R731 */ + 0x0000, /* R732 */ + 0x0000, /* R733 */ + 0x0000, /* R734 */ + 0x0000, /* R735 */ + 0x0000, /* R736 */ + 0x0000, /* R737 */ + 0x0000, /* R738 */ + 0x0000, /* R739 */ + 0x0000, /* R740 */ + 0x0000, /* R741 */ + 0x0000, /* R742 */ + 0x0000, /* R743 */ + 0x0000, /* R744 */ + 0x0000, /* R745 */ + 0x0000, /* R746 */ + 0x0000, /* R747 */ + 0x0000, /* R748 */ + 0x0000, /* R749 */ + 0x0000, /* R750 */ + 0x0000, /* R751 */ + 0x0000, /* R752 */ + 0x0000, /* R753 */ + 0x0000, /* R754 */ + 0x0000, /* R755 */ + 0x0000, /* R756 */ + 0x0000, /* R757 */ + 0x0000, /* R758 */ + 0x0000, /* R759 */ + 0x0000, /* R760 */ + 0x0000, /* R761 */ + 0x0000, /* R762 */ + 0x0000, /* R763 */ + 0x0000, /* R764 */ + 0x0000, /* R765 */ + 0x0000, /* R766 */ + 0x0000, /* R767 */ + 0x4050, /* R768 - AIF1 Control (1) */ + 0x4000, /* R769 - AIF1 Control (2) */ + 0x0000, /* R770 - AIF1 Master/Slave */ + 0x0040, /* R771 - AIF1 BCLK */ + 0x0040, /* R772 - AIF1ADC LRCLK */ + 0x0040, /* R773 - AIF1DAC LRCLK */ + 0x0004, /* R774 - AIF1DAC Data */ + 0x0100, /* R775 - AIF1ADC Data */ + 0x0000, /* R776 */ + 0x0000, /* R777 */ + 0x0000, /* R778 */ + 0x0000, /* R779 */ + 0x0000, /* R780 */ + 0x0000, /* R781 */ + 0x0000, /* R782 */ + 0x0000, /* R783 */ + 0x4050, /* R784 - AIF2 Control (1) */ + 0x4000, /* R785 - AIF2 Control (2) */ + 0x0000, /* R786 - AIF2 Master/Slave */ + 0x0040, /* R787 - AIF2 BCLK */ + 0x0040, /* R788 - AIF2ADC LRCLK */ + 0x0040, /* R789 - AIF2DAC LRCLK */ + 0x0000, /* R790 - AIF2DAC Data */ + 0x0000, /* R791 - AIF2ADC Data */ + 0x0000, /* R792 */ + 0x0000, /* R793 */ + 0x0000, /* R794 */ + 0x0000, /* R795 */ + 0x0000, /* R796 */ + 0x0000, /* R797 */ + 0x0000, /* R798 */ + 0x0000, /* R799 */ + 0x0000, /* R800 */ + 0x0000, /* R801 */ + 0x0000, /* R802 */ + 0x0000, /* R803 */ + 0x0000, /* R804 */ + 0x0000, /* R805 */ + 0x0000, /* R806 */ + 0x0000, /* R807 */ + 0x0000, /* R808 */ + 0x0000, /* R809 */ + 0x0000, /* R810 */ + 0x0000, /* R811 */ + 0x0000, /* R812 */ + 0x0000, /* R813 */ + 0x0000, /* R814 */ + 0x0000, /* R815 */ + 0x0000, /* R816 */ + 0x0000, /* R817 */ + 0x0000, /* R818 */ + 0x0000, /* R819 */ + 0x0000, /* R820 */ + 0x0000, /* R821 */ + 0x0000, /* R822 */ + 0x0000, /* R823 */ + 0x0000, /* R824 */ + 0x0000, /* R825 */ + 0x0000, /* R826 */ + 0x0000, /* R827 */ + 0x0000, /* R828 */ + 0x0000, /* R829 */ + 0x0000, /* R830 */ + 0x0000, /* R831 */ + 0x0000, /* R832 */ + 0x0000, /* R833 */ + 0x0000, /* R834 */ + 0x0000, /* R835 */ + 0x0000, /* R836 */ + 0x0000, /* R837 */ + 0x0000, /* R838 */ + 0x0000, /* R839 */ + 0x0000, /* R840 */ + 0x0000, /* R841 */ + 0x0000, /* R842 */ + 0x0000, /* R843 */ + 0x0000, /* R844 */ + 0x0000, /* R845 */ + 0x0000, /* R846 */ + 0x0000, /* R847 */ + 0x0000, /* R848 */ + 0x0000, /* R849 */ + 0x0000, /* R850 */ + 0x0000, /* R851 */ + 0x0000, /* R852 */ + 0x0000, /* R853 */ + 0x0000, /* R854 */ + 0x0000, /* R855 */ + 0x0000, /* R856 */ + 0x0000, /* R857 */ + 0x0000, /* R858 */ + 0x0000, /* R859 */ + 0x0000, /* R860 */ + 0x0000, /* R861 */ + 0x0000, /* R862 */ + 0x0000, /* R863 */ + 0x0000, /* R864 */ + 0x0000, /* R865 */ + 0x0000, /* R866 */ + 0x0000, /* R867 */ + 0x0000, /* R868 */ + 0x0000, /* R869 */ + 0x0000, /* R870 */ + 0x0000, /* R871 */ + 0x0000, /* R872 */ + 0x0000, /* R873 */ + 0x0000, /* R874 */ + 0x0000, /* R875 */ + 0x0000, /* R876 */ + 0x0000, /* R877 */ + 0x0000, /* R878 */ + 0x0000, /* R879 */ + 0x0000, /* R880 */ + 0x0000, /* R881 */ + 0x0000, /* R882 */ + 0x0000, /* R883 */ + 0x0000, /* R884 */ + 0x0000, /* R885 */ + 0x0000, /* R886 */ + 0x0000, /* R887 */ + 0x0000, /* R888 */ + 0x0000, /* R889 */ + 0x0000, /* R890 */ + 0x0000, /* R891 */ + 0x0000, /* R892 */ + 0x0000, /* R893 */ + 0x0000, /* R894 */ + 0x0000, /* R895 */ + 0x0000, /* R896 */ + 0x0000, /* R897 */ + 0x0000, /* R898 */ + 0x0000, /* R899 */ + 0x0000, /* R900 */ + 0x0000, /* R901 */ + 0x0000, /* R902 */ + 0x0000, /* R903 */ + 0x0000, /* R904 */ + 0x0000, /* R905 */ + 0x0000, /* R906 */ + 0x0000, /* R907 */ + 0x0000, /* R908 */ + 0x0000, /* R909 */ + 0x0000, /* R910 */ + 0x0000, /* R911 */ + 0x0000, /* R912 */ + 0x0000, /* R913 */ + 0x0000, /* R914 */ + 0x0000, /* R915 */ + 0x0000, /* R916 */ + 0x0000, /* R917 */ + 0x0000, /* R918 */ + 0x0000, /* R919 */ + 0x0000, /* R920 */ + 0x0000, /* R921 */ + 0x0000, /* R922 */ + 0x0000, /* R923 */ + 0x0000, /* R924 */ + 0x0000, /* R925 */ + 0x0000, /* R926 */ + 0x0000, /* R927 */ + 0x0000, /* R928 */ + 0x0000, /* R929 */ + 0x0000, /* R930 */ + 0x0000, /* R931 */ + 0x0000, /* R932 */ + 0x0000, /* R933 */ + 0x0000, /* R934 */ + 0x0000, /* R935 */ + 0x0000, /* R936 */ + 0x0000, /* R937 */ + 0x0000, /* R938 */ + 0x0000, /* R939 */ + 0x0000, /* R940 */ + 0x0000, /* R941 */ + 0x0000, /* R942 */ + 0x0000, /* R943 */ + 0x0000, /* R944 */ + 0x0000, /* R945 */ + 0x0000, /* R946 */ + 0x0000, /* R947 */ + 0x0000, /* R948 */ + 0x0000, /* R949 */ + 0x0000, /* R950 */ + 0x0000, /* R951 */ + 0x0000, /* R952 */ + 0x0000, /* R953 */ + 0x0000, /* R954 */ + 0x0000, /* R955 */ + 0x0000, /* R956 */ + 0x0000, /* R957 */ + 0x0000, /* R958 */ + 0x0000, /* R959 */ + 0x0000, /* R960 */ + 0x0000, /* R961 */ + 0x0000, /* R962 */ + 0x0000, /* R963 */ + 0x0000, /* R964 */ + 0x0000, /* R965 */ + 0x0000, /* R966 */ + 0x0000, /* R967 */ + 0x0000, /* R968 */ + 0x0000, /* R969 */ + 0x0000, /* R970 */ + 0x0000, /* R971 */ + 0x0000, /* R972 */ + 0x0000, /* R973 */ + 0x0000, /* R974 */ + 0x0000, /* R975 */ + 0x0000, /* R976 */ + 0x0000, /* R977 */ + 0x0000, /* R978 */ + 0x0000, /* R979 */ + 0x0000, /* R980 */ + 0x0000, /* R981 */ + 0x0000, /* R982 */ + 0x0000, /* R983 */ + 0x0000, /* R984 */ + 0x0000, /* R985 */ + 0x0000, /* R986 */ + 0x0000, /* R987 */ + 0x0000, /* R988 */ + 0x0000, /* R989 */ + 0x0000, /* R990 */ + 0x0000, /* R991 */ + 0x0000, /* R992 */ + 0x0000, /* R993 */ + 0x0000, /* R994 */ + 0x0000, /* R995 */ + 0x0000, /* R996 */ + 0x0000, /* R997 */ + 0x0000, /* R998 */ + 0x0000, /* R999 */ + 0x0000, /* R1000 */ + 0x0000, /* R1001 */ + 0x0000, /* R1002 */ + 0x0000, /* R1003 */ + 0x0000, /* R1004 */ + 0x0000, /* R1005 */ + 0x0000, /* R1006 */ + 0x0000, /* R1007 */ + 0x0000, /* R1008 */ + 0x0000, /* R1009 */ + 0x0000, /* R1010 */ + 0x0000, /* R1011 */ + 0x0000, /* R1012 */ + 0x0000, /* R1013 */ + 0x0000, /* R1014 */ + 0x0000, /* R1015 */ + 0x0000, /* R1016 */ + 0x0000, /* R1017 */ + 0x0000, /* R1018 */ + 0x0000, /* R1019 */ + 0x0000, /* R1020 */ + 0x0000, /* R1021 */ + 0x0000, /* R1022 */ + 0x0000, /* R1023 */ + 0x00C0, /* R1024 - AIF1 ADC1 Left Volume */ + 0x00C0, /* R1025 - AIF1 ADC1 Right Volume */ + 0x00C0, /* R1026 - AIF1 DAC1 Left Volume */ + 0x00C0, /* R1027 - AIF1 DAC1 Right Volume */ + 0x00C0, /* R1028 - AIF1 ADC2 Left Volume */ + 0x00C0, /* R1029 - AIF1 ADC2 Right Volume */ + 0x00C0, /* R1030 - AIF1 DAC2 Left Volume */ + 0x00C0, /* R1031 - AIF1 DAC2 Right Volume */ + 0x0000, /* R1032 */ + 0x0000, /* R1033 */ + 0x0000, /* R1034 */ + 0x0000, /* R1035 */ + 0x0000, /* R1036 */ + 0x0000, /* R1037 */ + 0x0000, /* R1038 */ + 0x0000, /* R1039 */ + 0x0000, /* R1040 - AIF1 ADC1 Filters */ + 0x0000, /* R1041 - AIF1 ADC2 Filters */ + 0x0000, /* R1042 */ + 0x0000, /* R1043 */ + 0x0000, /* R1044 */ + 0x0000, /* R1045 */ + 0x0000, /* R1046 */ + 0x0000, /* R1047 */ + 0x0000, /* R1048 */ + 0x0000, /* R1049 */ + 0x0000, /* R1050 */ + 0x0000, /* R1051 */ + 0x0000, /* R1052 */ + 0x0000, /* R1053 */ + 0x0000, /* R1054 */ + 0x0000, /* R1055 */ + 0x0200, /* R1056 - AIF1 DAC1 Filters (1) */ + 0x0010, /* R1057 - AIF1 DAC1 Filters (2) */ + 0x0200, /* R1058 - AIF1 DAC2 Filters (1) */ + 0x0010, /* R1059 - AIF1 DAC2 Filters (2) */ + 0x0000, /* R1060 */ + 0x0000, /* R1061 */ + 0x0000, /* R1062 */ + 0x0000, /* R1063 */ + 0x0000, /* R1064 */ + 0x0000, /* R1065 */ + 0x0000, /* R1066 */ + 0x0000, /* R1067 */ + 0x0000, /* R1068 */ + 0x0000, /* R1069 */ + 0x0000, /* R1070 */ + 0x0000, /* R1071 */ + 0x0000, /* R1072 */ + 0x0000, /* R1073 */ + 0x0000, /* R1074 */ + 0x0000, /* R1075 */ + 0x0000, /* R1076 */ + 0x0000, /* R1077 */ + 0x0000, /* R1078 */ + 0x0000, /* R1079 */ + 0x0000, /* R1080 */ + 0x0000, /* R1081 */ + 0x0000, /* R1082 */ + 0x0000, /* R1083 */ + 0x0000, /* R1084 */ + 0x0000, /* R1085 */ + 0x0000, /* R1086 */ + 0x0000, /* R1087 */ + 0x0098, /* R1088 - AIF1 DRC1 (1) */ + 0x0845, /* R1089 - AIF1 DRC1 (2) */ + 0x0000, /* R1090 - AIF1 DRC1 (3) */ + 0x0000, /* R1091 - AIF1 DRC1 (4) */ + 0x0000, /* R1092 - AIF1 DRC1 (5) */ + 0x0000, /* R1093 */ + 0x0000, /* R1094 */ + 0x0000, /* R1095 */ + 0x0000, /* R1096 */ + 0x0000, /* R1097 */ + 0x0000, /* R1098 */ + 0x0000, /* R1099 */ + 0x0000, /* R1100 */ + 0x0000, /* R1101 */ + 0x0000, /* R1102 */ + 0x0000, /* R1103 */ + 0x0098, /* R1104 - AIF1 DRC2 (1) */ + 0x0845, /* R1105 - AIF1 DRC2 (2) */ + 0x0000, /* R1106 - AIF1 DRC2 (3) */ + 0x0000, /* R1107 - AIF1 DRC2 (4) */ + 0x0000, /* R1108 - AIF1 DRC2 (5) */ + 0x0000, /* R1109 */ + 0x0000, /* R1110 */ + 0x0000, /* R1111 */ + 0x0000, /* R1112 */ + 0x0000, /* R1113 */ + 0x0000, /* R1114 */ + 0x0000, /* R1115 */ + 0x0000, /* R1116 */ + 0x0000, /* R1117 */ + 0x0000, /* R1118 */ + 0x0000, /* R1119 */ + 0x0000, /* R1120 */ + 0x0000, /* R1121 */ + 0x0000, /* R1122 */ + 0x0000, /* R1123 */ + 0x0000, /* R1124 */ + 0x0000, /* R1125 */ + 0x0000, /* R1126 */ + 0x0000, /* R1127 */ + 0x0000, /* R1128 */ + 0x0000, /* R1129 */ + 0x0000, /* R1130 */ + 0x0000, /* R1131 */ + 0x0000, /* R1132 */ + 0x0000, /* R1133 */ + 0x0000, /* R1134 */ + 0x0000, /* R1135 */ + 0x0000, /* R1136 */ + 0x0000, /* R1137 */ + 0x0000, /* R1138 */ + 0x0000, /* R1139 */ + 0x0000, /* R1140 */ + 0x0000, /* R1141 */ + 0x0000, /* R1142 */ + 0x0000, /* R1143 */ + 0x0000, /* R1144 */ + 0x0000, /* R1145 */ + 0x0000, /* R1146 */ + 0x0000, /* R1147 */ + 0x0000, /* R1148 */ + 0x0000, /* R1149 */ + 0x0000, /* R1150 */ + 0x0000, /* R1151 */ + 0x6318, /* R1152 - AIF1 DAC1 EQ Gains (1) */ + 0x6300, /* R1153 - AIF1 DAC1 EQ Gains (2) */ + 0x0FCA, /* R1154 - AIF1 DAC1 EQ Band 1 A */ + 0x0400, /* R1155 - AIF1 DAC1 EQ Band 1 B */ + 0x00D8, /* R1156 - AIF1 DAC1 EQ Band 1 PG */ + 0x1EB5, /* R1157 - AIF1 DAC1 EQ Band 2 A */ + 0xF145, /* R1158 - AIF1 DAC1 EQ Band 2 B */ + 0x0B75, /* R1159 - AIF1 DAC1 EQ Band 2 C */ + 0x01C5, /* R1160 - AIF1 DAC1 EQ Band 2 PG */ + 0x1C58, /* R1161 - AIF1 DAC1 EQ Band 3 A */ + 0xF373, /* R1162 - AIF1 DAC1 EQ Band 3 B */ + 0x0A54, /* R1163 - AIF1 DAC1 EQ Band 3 C */ + 0x0558, /* R1164 - AIF1 DAC1 EQ Band 3 PG */ + 0x168E, /* R1165 - AIF1 DAC1 EQ Band 4 A */ + 0xF829, /* R1166 - AIF1 DAC1 EQ Band 4 B */ + 0x07AD, /* R1167 - AIF1 DAC1 EQ Band 4 C */ + 0x1103, /* R1168 - AIF1 DAC1 EQ Band 4 PG */ + 0x0564, /* R1169 - AIF1 DAC1 EQ Band 5 A */ + 0x0559, /* R1170 - AIF1 DAC1 EQ Band 5 B */ + 0x4000, /* R1171 - AIF1 DAC1 EQ Band 5 PG */ + 0x0000, /* R1172 */ + 0x0000, /* R1173 */ + 0x0000, /* R1174 */ + 0x0000, /* R1175 */ + 0x0000, /* R1176 */ + 0x0000, /* R1177 */ + 0x0000, /* R1178 */ + 0x0000, /* R1179 */ + 0x0000, /* R1180 */ + 0x0000, /* R1181 */ + 0x0000, /* R1182 */ + 0x0000, /* R1183 */ + 0x6318, /* R1184 - AIF1 DAC2 EQ Gains (1) */ + 0x6300, /* R1185 - AIF1 DAC2 EQ Gains (2) */ + 0x0FCA, /* R1186 - AIF1 DAC2 EQ Band 1 A */ + 0x0400, /* R1187 - AIF1 DAC2 EQ Band 1 B */ + 0x00D8, /* R1188 - AIF1 DAC2 EQ Band 1 PG */ + 0x1EB5, /* R1189 - AIF1 DAC2 EQ Band 2 A */ + 0xF145, /* R1190 - AIF1 DAC2 EQ Band 2 B */ + 0x0B75, /* R1191 - AIF1 DAC2 EQ Band 2 C */ + 0x01C5, /* R1192 - AIF1 DAC2 EQ Band 2 PG */ + 0x1C58, /* R1193 - AIF1 DAC2 EQ Band 3 A */ + 0xF373, /* R1194 - AIF1 DAC2 EQ Band 3 B */ + 0x0A54, /* R1195 - AIF1 DAC2 EQ Band 3 C */ + 0x0558, /* R1196 - AIF1 DAC2 EQ Band 3 PG */ + 0x168E, /* R1197 - AIF1 DAC2 EQ Band 4 A */ + 0xF829, /* R1198 - AIF1 DAC2 EQ Band 4 B */ + 0x07AD, /* R1199 - AIF1 DAC2 EQ Band 4 C */ + 0x1103, /* R1200 - AIF1 DAC2 EQ Band 4 PG */ + 0x0564, /* R1201 - AIF1 DAC2 EQ Band 5 A */ + 0x0559, /* R1202 - AIF1 DAC2 EQ Band 5 B */ + 0x4000, /* R1203 - AIF1 DAC2 EQ Band 5 PG */ + 0x0000, /* R1204 */ + 0x0000, /* R1205 */ + 0x0000, /* R1206 */ + 0x0000, /* R1207 */ + 0x0000, /* R1208 */ + 0x0000, /* R1209 */ + 0x0000, /* R1210 */ + 0x0000, /* R1211 */ + 0x0000, /* R1212 */ + 0x0000, /* R1213 */ + 0x0000, /* R1214 */ + 0x0000, /* R1215 */ + 0x0000, /* R1216 */ + 0x0000, /* R1217 */ + 0x0000, /* R1218 */ + 0x0000, /* R1219 */ + 0x0000, /* R1220 */ + 0x0000, /* R1221 */ + 0x0000, /* R1222 */ + 0x0000, /* R1223 */ + 0x0000, /* R1224 */ + 0x0000, /* R1225 */ + 0x0000, /* R1226 */ + 0x0000, /* R1227 */ + 0x0000, /* R1228 */ + 0x0000, /* R1229 */ + 0x0000, /* R1230 */ + 0x0000, /* R1231 */ + 0x0000, /* R1232 */ + 0x0000, /* R1233 */ + 0x0000, /* R1234 */ + 0x0000, /* R1235 */ + 0x0000, /* R1236 */ + 0x0000, /* R1237 */ + 0x0000, /* R1238 */ + 0x0000, /* R1239 */ + 0x0000, /* R1240 */ + 0x0000, /* R1241 */ + 0x0000, /* R1242 */ + 0x0000, /* R1243 */ + 0x0000, /* R1244 */ + 0x0000, /* R1245 */ + 0x0000, /* R1246 */ + 0x0000, /* R1247 */ + 0x0000, /* R1248 */ + 0x0000, /* R1249 */ + 0x0000, /* R1250 */ + 0x0000, /* R1251 */ + 0x0000, /* R1252 */ + 0x0000, /* R1253 */ + 0x0000, /* R1254 */ + 0x0000, /* R1255 */ + 0x0000, /* R1256 */ + 0x0000, /* R1257 */ + 0x0000, /* R1258 */ + 0x0000, /* R1259 */ + 0x0000, /* R1260 */ + 0x0000, /* R1261 */ + 0x0000, /* R1262 */ + 0x0000, /* R1263 */ + 0x0000, /* R1264 */ + 0x0000, /* R1265 */ + 0x0000, /* R1266 */ + 0x0000, /* R1267 */ + 0x0000, /* R1268 */ + 0x0000, /* R1269 */ + 0x0000, /* R1270 */ + 0x0000, /* R1271 */ + 0x0000, /* R1272 */ + 0x0000, /* R1273 */ + 0x0000, /* R1274 */ + 0x0000, /* R1275 */ + 0x0000, /* R1276 */ + 0x0000, /* R1277 */ + 0x0000, /* R1278 */ + 0x0000, /* R1279 */ + 0x00C0, /* R1280 - AIF2 ADC Left Volume */ + 0x00C0, /* R1281 - AIF2 ADC Right Volume */ + 0x00C0, /* R1282 - AIF2 DAC Left Volume */ + 0x00C0, /* R1283 - AIF2 DAC Right Volume */ + 0x0000, /* R1284 */ + 0x0000, /* R1285 */ + 0x0000, /* R1286 */ + 0x0000, /* R1287 */ + 0x0000, /* R1288 */ + 0x0000, /* R1289 */ + 0x0000, /* R1290 */ + 0x0000, /* R1291 */ + 0x0000, /* R1292 */ + 0x0000, /* R1293 */ + 0x0000, /* R1294 */ + 0x0000, /* R1295 */ + 0x0000, /* R1296 - AIF2 ADC Filters */ + 0x0000, /* R1297 */ + 0x0000, /* R1298 */ + 0x0000, /* R1299 */ + 0x0000, /* R1300 */ + 0x0000, /* R1301 */ + 0x0000, /* R1302 */ + 0x0000, /* R1303 */ + 0x0000, /* R1304 */ + 0x0000, /* R1305 */ + 0x0000, /* R1306 */ + 0x0000, /* R1307 */ + 0x0000, /* R1308 */ + 0x0000, /* R1309 */ + 0x0000, /* R1310 */ + 0x0000, /* R1311 */ + 0x0200, /* R1312 - AIF2 DAC Filters (1) */ + 0x0010, /* R1313 - AIF2 DAC Filters (2) */ + 0x0000, /* R1314 */ + 0x0000, /* R1315 */ + 0x0000, /* R1316 */ + 0x0000, /* R1317 */ + 0x0000, /* R1318 */ + 0x0000, /* R1319 */ + 0x0000, /* R1320 */ + 0x0000, /* R1321 */ + 0x0000, /* R1322 */ + 0x0000, /* R1323 */ + 0x0000, /* R1324 */ + 0x0000, /* R1325 */ + 0x0000, /* R1326 */ + 0x0000, /* R1327 */ + 0x0000, /* R1328 */ + 0x0000, /* R1329 */ + 0x0000, /* R1330 */ + 0x0000, /* R1331 */ + 0x0000, /* R1332 */ + 0x0000, /* R1333 */ + 0x0000, /* R1334 */ + 0x0000, /* R1335 */ + 0x0000, /* R1336 */ + 0x0000, /* R1337 */ + 0x0000, /* R1338 */ + 0x0000, /* R1339 */ + 0x0000, /* R1340 */ + 0x0000, /* R1341 */ + 0x0000, /* R1342 */ + 0x0000, /* R1343 */ + 0x0098, /* R1344 - AIF2 DRC (1) */ + 0x0845, /* R1345 - AIF2 DRC (2) */ + 0x0000, /* R1346 - AIF2 DRC (3) */ + 0x0000, /* R1347 - AIF2 DRC (4) */ + 0x0000, /* R1348 - AIF2 DRC (5) */ + 0x0000, /* R1349 */ + 0x0000, /* R1350 */ + 0x0000, /* R1351 */ + 0x0000, /* R1352 */ + 0x0000, /* R1353 */ + 0x0000, /* R1354 */ + 0x0000, /* R1355 */ + 0x0000, /* R1356 */ + 0x0000, /* R1357 */ + 0x0000, /* R1358 */ + 0x0000, /* R1359 */ + 0x0000, /* R1360 */ + 0x0000, /* R1361 */ + 0x0000, /* R1362 */ + 0x0000, /* R1363 */ + 0x0000, /* R1364 */ + 0x0000, /* R1365 */ + 0x0000, /* R1366 */ + 0x0000, /* R1367 */ + 0x0000, /* R1368 */ + 0x0000, /* R1369 */ + 0x0000, /* R1370 */ + 0x0000, /* R1371 */ + 0x0000, /* R1372 */ + 0x0000, /* R1373 */ + 0x0000, /* R1374 */ + 0x0000, /* R1375 */ + 0x0000, /* R1376 */ + 0x0000, /* R1377 */ + 0x0000, /* R1378 */ + 0x0000, /* R1379 */ + 0x0000, /* R1380 */ + 0x0000, /* R1381 */ + 0x0000, /* R1382 */ + 0x0000, /* R1383 */ + 0x0000, /* R1384 */ + 0x0000, /* R1385 */ + 0x0000, /* R1386 */ + 0x0000, /* R1387 */ + 0x0000, /* R1388 */ + 0x0000, /* R1389 */ + 0x0000, /* R1390 */ + 0x0000, /* R1391 */ + 0x0000, /* R1392 */ + 0x0000, /* R1393 */ + 0x0000, /* R1394 */ + 0x0000, /* R1395 */ + 0x0000, /* R1396 */ + 0x0000, /* R1397 */ + 0x0000, /* R1398 */ + 0x0000, /* R1399 */ + 0x0000, /* R1400 */ + 0x0000, /* R1401 */ + 0x0000, /* R1402 */ + 0x0000, /* R1403 */ + 0x0000, /* R1404 */ + 0x0000, /* R1405 */ + 0x0000, /* R1406 */ + 0x0000, /* R1407 */ + 0x6318, /* R1408 - AIF2 EQ Gains (1) */ + 0x6300, /* R1409 - AIF2 EQ Gains (2) */ + 0x0FCA, /* R1410 - AIF2 EQ Band 1 A */ + 0x0400, /* R1411 - AIF2 EQ Band 1 B */ + 0x00D8, /* R1412 - AIF2 EQ Band 1 PG */ + 0x1EB5, /* R1413 - AIF2 EQ Band 2 A */ + 0xF145, /* R1414 - AIF2 EQ Band 2 B */ + 0x0B75, /* R1415 - AIF2 EQ Band 2 C */ + 0x01C5, /* R1416 - AIF2 EQ Band 2 PG */ + 0x1C58, /* R1417 - AIF2 EQ Band 3 A */ + 0xF373, /* R1418 - AIF2 EQ Band 3 B */ + 0x0A54, /* R1419 - AIF2 EQ Band 3 C */ + 0x0558, /* R1420 - AIF2 EQ Band 3 PG */ + 0x168E, /* R1421 - AIF2 EQ Band 4 A */ + 0xF829, /* R1422 - AIF2 EQ Band 4 B */ + 0x07AD, /* R1423 - AIF2 EQ Band 4 C */ + 0x1103, /* R1424 - AIF2 EQ Band 4 PG */ + 0x0564, /* R1425 - AIF2 EQ Band 5 A */ + 0x0559, /* R1426 - AIF2 EQ Band 5 B */ + 0x4000, /* R1427 - AIF2 EQ Band 5 PG */ + 0x0000, /* R1428 */ + 0x0000, /* R1429 */ + 0x0000, /* R1430 */ + 0x0000, /* R1431 */ + 0x0000, /* R1432 */ + 0x0000, /* R1433 */ + 0x0000, /* R1434 */ + 0x0000, /* R1435 */ + 0x0000, /* R1436 */ + 0x0000, /* R1437 */ + 0x0000, /* R1438 */ + 0x0000, /* R1439 */ + 0x0000, /* R1440 */ + 0x0000, /* R1441 */ + 0x0000, /* R1442 */ + 0x0000, /* R1443 */ + 0x0000, /* R1444 */ + 0x0000, /* R1445 */ + 0x0000, /* R1446 */ + 0x0000, /* R1447 */ + 0x0000, /* R1448 */ + 0x0000, /* R1449 */ + 0x0000, /* R1450 */ + 0x0000, /* R1451 */ + 0x0000, /* R1452 */ + 0x0000, /* R1453 */ + 0x0000, /* R1454 */ + 0x0000, /* R1455 */ + 0x0000, /* R1456 */ + 0x0000, /* R1457 */ + 0x0000, /* R1458 */ + 0x0000, /* R1459 */ + 0x0000, /* R1460 */ + 0x0000, /* R1461 */ + 0x0000, /* R1462 */ + 0x0000, /* R1463 */ + 0x0000, /* R1464 */ + 0x0000, /* R1465 */ + 0x0000, /* R1466 */ + 0x0000, /* R1467 */ + 0x0000, /* R1468 */ + 0x0000, /* R1469 */ + 0x0000, /* R1470 */ + 0x0000, /* R1471 */ + 0x0000, /* R1472 */ + 0x0000, /* R1473 */ + 0x0000, /* R1474 */ + 0x0000, /* R1475 */ + 0x0000, /* R1476 */ + 0x0000, /* R1477 */ + 0x0000, /* R1478 */ + 0x0000, /* R1479 */ + 0x0000, /* R1480 */ + 0x0000, /* R1481 */ + 0x0000, /* R1482 */ + 0x0000, /* R1483 */ + 0x0000, /* R1484 */ + 0x0000, /* R1485 */ + 0x0000, /* R1486 */ + 0x0000, /* R1487 */ + 0x0000, /* R1488 */ + 0x0000, /* R1489 */ + 0x0000, /* R1490 */ + 0x0000, /* R1491 */ + 0x0000, /* R1492 */ + 0x0000, /* R1493 */ + 0x0000, /* R1494 */ + 0x0000, /* R1495 */ + 0x0000, /* R1496 */ + 0x0000, /* R1497 */ + 0x0000, /* R1498 */ + 0x0000, /* R1499 */ + 0x0000, /* R1500 */ + 0x0000, /* R1501 */ + 0x0000, /* R1502 */ + 0x0000, /* R1503 */ + 0x0000, /* R1504 */ + 0x0000, /* R1505 */ + 0x0000, /* R1506 */ + 0x0000, /* R1507 */ + 0x0000, /* R1508 */ + 0x0000, /* R1509 */ + 0x0000, /* R1510 */ + 0x0000, /* R1511 */ + 0x0000, /* R1512 */ + 0x0000, /* R1513 */ + 0x0000, /* R1514 */ + 0x0000, /* R1515 */ + 0x0000, /* R1516 */ + 0x0000, /* R1517 */ + 0x0000, /* R1518 */ + 0x0000, /* R1519 */ + 0x0000, /* R1520 */ + 0x0000, /* R1521 */ + 0x0000, /* R1522 */ + 0x0000, /* R1523 */ + 0x0000, /* R1524 */ + 0x0000, /* R1525 */ + 0x0000, /* R1526 */ + 0x0000, /* R1527 */ + 0x0000, /* R1528 */ + 0x0000, /* R1529 */ + 0x0000, /* R1530 */ + 0x0000, /* R1531 */ + 0x0000, /* R1532 */ + 0x0000, /* R1533 */ + 0x0000, /* R1534 */ + 0x0000, /* R1535 */ + 0x0000, /* R1536 - DAC1 Mixer Volumes */ + 0x0000, /* R1537 - DAC1 Left Mixer Routing */ + 0x0000, /* R1538 - DAC1 Right Mixer Routing */ + 0x0000, /* R1539 - DAC2 Mixer Volumes */ + 0x0000, /* R1540 - DAC2 Left Mixer Routing */ + 0x0000, /* R1541 - DAC2 Right Mixer Routing */ + 0x0000, /* R1542 - AIF1 ADC1 Left Mixer Routing */ + 0x0000, /* R1543 - AIF1 ADC1 Right Mixer Routing */ + 0x0000, /* R1544 - AIF1 ADC2 Left Mixer Routing */ + 0x0000, /* R1545 - AIF1 ADC2 Right mixer Routing */ + 0x0000, /* R1546 */ + 0x0000, /* R1547 */ + 0x0000, /* R1548 */ + 0x0000, /* R1549 */ + 0x0000, /* R1550 */ + 0x0000, /* R1551 */ + 0x02C0, /* R1552 - DAC1 Left Volume */ + 0x02C0, /* R1553 - DAC1 Right Volume */ + 0x02C0, /* R1554 - DAC2 Left Volume */ + 0x02C0, /* R1555 - DAC2 Right Volume */ + 0x0000, /* R1556 - DAC Softmute */ + 0x0000, /* R1557 */ + 0x0000, /* R1558 */ + 0x0000, /* R1559 */ + 0x0000, /* R1560 */ + 0x0000, /* R1561 */ + 0x0000, /* R1562 */ + 0x0000, /* R1563 */ + 0x0000, /* R1564 */ + 0x0000, /* R1565 */ + 0x0000, /* R1566 */ + 0x0000, /* R1567 */ + 0x0002, /* R1568 - Oversampling */ + 0x0000, /* R1569 - Sidetone */ +}; diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 4d3e6f1ac584..247a6a99feb8 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -18,15 +18,17 @@ #include #include #include +#include #include #include #include +#include #include #include #include -#include #include #include +#include #include #include @@ -57,8 +59,6 @@ static int wm8994_retune_mobile_base[] = { WM8994_AIF2_EQ_GAINS_1, }; -#define WM8994_REG_CACHE_SIZE 0x621 - struct wm8994_micdet { struct snd_soc_jack *jack; int det; @@ -71,7 +71,6 @@ struct wm8994_priv { enum snd_soc_control_type control_type; void *control_data; struct snd_soc_codec *codec; - u16 reg_cache[WM8994_REG_CACHE_SIZE + 1]; int sysclk[2]; int sysclk_rate[2]; int mclk[2]; @@ -81,6 +80,8 @@ struct wm8994_priv { int dac_rates[2]; int lrclk_shared[2]; + int mbc_ena[3]; + /* Platform dependant DRC configuration */ const char **drc_texts; int drc_cfg[WM8994_NUM_DRC]; @@ -92,1588 +93,22 @@ struct wm8994_priv { int retune_mobile_cfg[WM8994_NUM_EQ]; struct soc_enum retune_mobile_enum; + /* Platform dependant MBC configuration */ + int mbc_cfg; + const char **mbc_texts; + struct soc_enum mbc_enum; + struct wm8994_micdet micdet[2]; + wm8958_micdet_cb jack_cb; + void *jack_cb_data; + bool jack_is_mic; + bool jack_is_video; + int revision; struct wm8994_pdata *pdata; }; -static const struct { - unsigned short readable; /* Mask of readable bits */ - unsigned short writable; /* Mask of writable bits */ -} access_masks[] = { - { 0xFFFF, 0xFFFF }, /* R0 - Software Reset */ - { 0x3B37, 0x3B37 }, /* R1 - Power Management (1) */ - { 0x6BF0, 0x6BF0 }, /* R2 - Power Management (2) */ - { 0x3FF0, 0x3FF0 }, /* R3 - Power Management (3) */ - { 0x3F3F, 0x3F3F }, /* R4 - Power Management (4) */ - { 0x3F0F, 0x3F0F }, /* R5 - Power Management (5) */ - { 0x003F, 0x003F }, /* R6 - Power Management (6) */ - { 0x0000, 0x0000 }, /* R7 */ - { 0x0000, 0x0000 }, /* R8 */ - { 0x0000, 0x0000 }, /* R9 */ - { 0x0000, 0x0000 }, /* R10 */ - { 0x0000, 0x0000 }, /* R11 */ - { 0x0000, 0x0000 }, /* R12 */ - { 0x0000, 0x0000 }, /* R13 */ - { 0x0000, 0x0000 }, /* R14 */ - { 0x0000, 0x0000 }, /* R15 */ - { 0x0000, 0x0000 }, /* R16 */ - { 0x0000, 0x0000 }, /* R17 */ - { 0x0000, 0x0000 }, /* R18 */ - { 0x0000, 0x0000 }, /* R19 */ - { 0x0000, 0x0000 }, /* R20 */ - { 0x01C0, 0x01C0 }, /* R21 - Input Mixer (1) */ - { 0x0000, 0x0000 }, /* R22 */ - { 0x0000, 0x0000 }, /* R23 */ - { 0x00DF, 0x01DF }, /* R24 - Left Line Input 1&2 Volume */ - { 0x00DF, 0x01DF }, /* R25 - Left Line Input 3&4 Volume */ - { 0x00DF, 0x01DF }, /* R26 - Right Line Input 1&2 Volume */ - { 0x00DF, 0x01DF }, /* R27 - Right Line Input 3&4 Volume */ - { 0x00FF, 0x01FF }, /* R28 - Left Output Volume */ - { 0x00FF, 0x01FF }, /* R29 - Right Output Volume */ - { 0x0077, 0x0077 }, /* R30 - Line Outputs Volume */ - { 0x0030, 0x0030 }, /* R31 - HPOUT2 Volume */ - { 0x00FF, 0x01FF }, /* R32 - Left OPGA Volume */ - { 0x00FF, 0x01FF }, /* R33 - Right OPGA Volume */ - { 0x007F, 0x007F }, /* R34 - SPKMIXL Attenuation */ - { 0x017F, 0x017F }, /* R35 - SPKMIXR Attenuation */ - { 0x003F, 0x003F }, /* R36 - SPKOUT Mixers */ - { 0x003F, 0x003F }, /* R37 - ClassD */ - { 0x00FF, 0x01FF }, /* R38 - Speaker Volume Left */ - { 0x00FF, 0x01FF }, /* R39 - Speaker Volume Right */ - { 0x00FF, 0x00FF }, /* R40 - Input Mixer (2) */ - { 0x01B7, 0x01B7 }, /* R41 - Input Mixer (3) */ - { 0x01B7, 0x01B7 }, /* R42 - Input Mixer (4) */ - { 0x01C7, 0x01C7 }, /* R43 - Input Mixer (5) */ - { 0x01C7, 0x01C7 }, /* R44 - Input Mixer (6) */ - { 0x01FF, 0x01FF }, /* R45 - Output Mixer (1) */ - { 0x01FF, 0x01FF }, /* R46 - Output Mixer (2) */ - { 0x0FFF, 0x0FFF }, /* R47 - Output Mixer (3) */ - { 0x0FFF, 0x0FFF }, /* R48 - Output Mixer (4) */ - { 0x0FFF, 0x0FFF }, /* R49 - Output Mixer (5) */ - { 0x0FFF, 0x0FFF }, /* R50 - Output Mixer (6) */ - { 0x0038, 0x0038 }, /* R51 - HPOUT2 Mixer */ - { 0x0077, 0x0077 }, /* R52 - Line Mixer (1) */ - { 0x0077, 0x0077 }, /* R53 - Line Mixer (2) */ - { 0x03FF, 0x03FF }, /* R54 - Speaker Mixer */ - { 0x00C1, 0x00C1 }, /* R55 - Additional Control */ - { 0x00F0, 0x00F0 }, /* R56 - AntiPOP (1) */ - { 0x01EF, 0x01EF }, /* R57 - AntiPOP (2) */ - { 0x00FF, 0x00FF }, /* R58 - MICBIAS */ - { 0x000F, 0x000F }, /* R59 - LDO 1 */ - { 0x0007, 0x0007 }, /* R60 - LDO 2 */ - { 0x0000, 0x0000 }, /* R61 */ - { 0x0000, 0x0000 }, /* R62 */ - { 0x0000, 0x0000 }, /* R63 */ - { 0x0000, 0x0000 }, /* R64 */ - { 0x0000, 0x0000 }, /* R65 */ - { 0x0000, 0x0000 }, /* R66 */ - { 0x0000, 0x0000 }, /* R67 */ - { 0x0000, 0x0000 }, /* R68 */ - { 0x0000, 0x0000 }, /* R69 */ - { 0x0000, 0x0000 }, /* R70 */ - { 0x0000, 0x0000 }, /* R71 */ - { 0x0000, 0x0000 }, /* R72 */ - { 0x0000, 0x0000 }, /* R73 */ - { 0x0000, 0x0000 }, /* R74 */ - { 0x0000, 0x0000 }, /* R75 */ - { 0x8000, 0x8000 }, /* R76 - Charge Pump (1) */ - { 0x0000, 0x0000 }, /* R77 */ - { 0x0000, 0x0000 }, /* R78 */ - { 0x0000, 0x0000 }, /* R79 */ - { 0x0000, 0x0000 }, /* R80 */ - { 0x0301, 0x0301 }, /* R81 - Class W (1) */ - { 0x0000, 0x0000 }, /* R82 */ - { 0x0000, 0x0000 }, /* R83 */ - { 0x333F, 0x333F }, /* R84 - DC Servo (1) */ - { 0x0FEF, 0x0FEF }, /* R85 - DC Servo (2) */ - { 0x0000, 0x0000 }, /* R86 */ - { 0xFFFF, 0xFFFF }, /* R87 - DC Servo (4) */ - { 0x0333, 0x0000 }, /* R88 - DC Servo Readback */ - { 0x0000, 0x0000 }, /* R89 */ - { 0x0000, 0x0000 }, /* R90 */ - { 0x0000, 0x0000 }, /* R91 */ - { 0x0000, 0x0000 }, /* R92 */ - { 0x0000, 0x0000 }, /* R93 */ - { 0x0000, 0x0000 }, /* R94 */ - { 0x0000, 0x0000 }, /* R95 */ - { 0x00EE, 0x00EE }, /* R96 - Analogue HP (1) */ - { 0x0000, 0x0000 }, /* R97 */ - { 0x0000, 0x0000 }, /* R98 */ - { 0x0000, 0x0000 }, /* R99 */ - { 0x0000, 0x0000 }, /* R100 */ - { 0x0000, 0x0000 }, /* R101 */ - { 0x0000, 0x0000 }, /* R102 */ - { 0x0000, 0x0000 }, /* R103 */ - { 0x0000, 0x0000 }, /* R104 */ - { 0x0000, 0x0000 }, /* R105 */ - { 0x0000, 0x0000 }, /* R106 */ - { 0x0000, 0x0000 }, /* R107 */ - { 0x0000, 0x0000 }, /* R108 */ - { 0x0000, 0x0000 }, /* R109 */ - { 0x0000, 0x0000 }, /* R110 */ - { 0x0000, 0x0000 }, /* R111 */ - { 0x0000, 0x0000 }, /* R112 */ - { 0x0000, 0x0000 }, /* R113 */ - { 0x0000, 0x0000 }, /* R114 */ - { 0x0000, 0x0000 }, /* R115 */ - { 0x0000, 0x0000 }, /* R116 */ - { 0x0000, 0x0000 }, /* R117 */ - { 0x0000, 0x0000 }, /* R118 */ - { 0x0000, 0x0000 }, /* R119 */ - { 0x0000, 0x0000 }, /* R120 */ - { 0x0000, 0x0000 }, /* R121 */ - { 0x0000, 0x0000 }, /* R122 */ - { 0x0000, 0x0000 }, /* R123 */ - { 0x0000, 0x0000 }, /* R124 */ - { 0x0000, 0x0000 }, /* R125 */ - { 0x0000, 0x0000 }, /* R126 */ - { 0x0000, 0x0000 }, /* R127 */ - { 0x0000, 0x0000 }, /* R128 */ - { 0x0000, 0x0000 }, /* R129 */ - { 0x0000, 0x0000 }, /* R130 */ - { 0x0000, 0x0000 }, /* R131 */ - { 0x0000, 0x0000 }, /* R132 */ - { 0x0000, 0x0000 }, /* R133 */ - { 0x0000, 0x0000 }, /* R134 */ - { 0x0000, 0x0000 }, /* R135 */ - { 0x0000, 0x0000 }, /* R136 */ - { 0x0000, 0x0000 }, /* R137 */ - { 0x0000, 0x0000 }, /* R138 */ - { 0x0000, 0x0000 }, /* R139 */ - { 0x0000, 0x0000 }, /* R140 */ - { 0x0000, 0x0000 }, /* R141 */ - { 0x0000, 0x0000 }, /* R142 */ - { 0x0000, 0x0000 }, /* R143 */ - { 0x0000, 0x0000 }, /* R144 */ - { 0x0000, 0x0000 }, /* R145 */ - { 0x0000, 0x0000 }, /* R146 */ - { 0x0000, 0x0000 }, /* R147 */ - { 0x0000, 0x0000 }, /* R148 */ - { 0x0000, 0x0000 }, /* R149 */ - { 0x0000, 0x0000 }, /* R150 */ - { 0x0000, 0x0000 }, /* R151 */ - { 0x0000, 0x0000 }, /* R152 */ - { 0x0000, 0x0000 }, /* R153 */ - { 0x0000, 0x0000 }, /* R154 */ - { 0x0000, 0x0000 }, /* R155 */ - { 0x0000, 0x0000 }, /* R156 */ - { 0x0000, 0x0000 }, /* R157 */ - { 0x0000, 0x0000 }, /* R158 */ - { 0x0000, 0x0000 }, /* R159 */ - { 0x0000, 0x0000 }, /* R160 */ - { 0x0000, 0x0000 }, /* R161 */ - { 0x0000, 0x0000 }, /* R162 */ - { 0x0000, 0x0000 }, /* R163 */ - { 0x0000, 0x0000 }, /* R164 */ - { 0x0000, 0x0000 }, /* R165 */ - { 0x0000, 0x0000 }, /* R166 */ - { 0x0000, 0x0000 }, /* R167 */ - { 0x0000, 0x0000 }, /* R168 */ - { 0x0000, 0x0000 }, /* R169 */ - { 0x0000, 0x0000 }, /* R170 */ - { 0x0000, 0x0000 }, /* R171 */ - { 0x0000, 0x0000 }, /* R172 */ - { 0x0000, 0x0000 }, /* R173 */ - { 0x0000, 0x0000 }, /* R174 */ - { 0x0000, 0x0000 }, /* R175 */ - { 0x0000, 0x0000 }, /* R176 */ - { 0x0000, 0x0000 }, /* R177 */ - { 0x0000, 0x0000 }, /* R178 */ - { 0x0000, 0x0000 }, /* R179 */ - { 0x0000, 0x0000 }, /* R180 */ - { 0x0000, 0x0000 }, /* R181 */ - { 0x0000, 0x0000 }, /* R182 */ - { 0x0000, 0x0000 }, /* R183 */ - { 0x0000, 0x0000 }, /* R184 */ - { 0x0000, 0x0000 }, /* R185 */ - { 0x0000, 0x0000 }, /* R186 */ - { 0x0000, 0x0000 }, /* R187 */ - { 0x0000, 0x0000 }, /* R188 */ - { 0x0000, 0x0000 }, /* R189 */ - { 0x0000, 0x0000 }, /* R190 */ - { 0x0000, 0x0000 }, /* R191 */ - { 0x0000, 0x0000 }, /* R192 */ - { 0x0000, 0x0000 }, /* R193 */ - { 0x0000, 0x0000 }, /* R194 */ - { 0x0000, 0x0000 }, /* R195 */ - { 0x0000, 0x0000 }, /* R196 */ - { 0x0000, 0x0000 }, /* R197 */ - { 0x0000, 0x0000 }, /* R198 */ - { 0x0000, 0x0000 }, /* R199 */ - { 0x0000, 0x0000 }, /* R200 */ - { 0x0000, 0x0000 }, /* R201 */ - { 0x0000, 0x0000 }, /* R202 */ - { 0x0000, 0x0000 }, /* R203 */ - { 0x0000, 0x0000 }, /* R204 */ - { 0x0000, 0x0000 }, /* R205 */ - { 0x0000, 0x0000 }, /* R206 */ - { 0x0000, 0x0000 }, /* R207 */ - { 0x0000, 0x0000 }, /* R208 */ - { 0x0000, 0x0000 }, /* R209 */ - { 0x0000, 0x0000 }, /* R210 */ - { 0x0000, 0x0000 }, /* R211 */ - { 0x0000, 0x0000 }, /* R212 */ - { 0x0000, 0x0000 }, /* R213 */ - { 0x0000, 0x0000 }, /* R214 */ - { 0x0000, 0x0000 }, /* R215 */ - { 0x0000, 0x0000 }, /* R216 */ - { 0x0000, 0x0000 }, /* R217 */ - { 0x0000, 0x0000 }, /* R218 */ - { 0x0000, 0x0000 }, /* R219 */ - { 0x0000, 0x0000 }, /* R220 */ - { 0x0000, 0x0000 }, /* R221 */ - { 0x0000, 0x0000 }, /* R222 */ - { 0x0000, 0x0000 }, /* R223 */ - { 0x0000, 0x0000 }, /* R224 */ - { 0x0000, 0x0000 }, /* R225 */ - { 0x0000, 0x0000 }, /* R226 */ - { 0x0000, 0x0000 }, /* R227 */ - { 0x0000, 0x0000 }, /* R228 */ - { 0x0000, 0x0000 }, /* R229 */ - { 0x0000, 0x0000 }, /* R230 */ - { 0x0000, 0x0000 }, /* R231 */ - { 0x0000, 0x0000 }, /* R232 */ - { 0x0000, 0x0000 }, /* R233 */ - { 0x0000, 0x0000 }, /* R234 */ - { 0x0000, 0x0000 }, /* R235 */ - { 0x0000, 0x0000 }, /* R236 */ - { 0x0000, 0x0000 }, /* R237 */ - { 0x0000, 0x0000 }, /* R238 */ - { 0x0000, 0x0000 }, /* R239 */ - { 0x0000, 0x0000 }, /* R240 */ - { 0x0000, 0x0000 }, /* R241 */ - { 0x0000, 0x0000 }, /* R242 */ - { 0x0000, 0x0000 }, /* R243 */ - { 0x0000, 0x0000 }, /* R244 */ - { 0x0000, 0x0000 }, /* R245 */ - { 0x0000, 0x0000 }, /* R246 */ - { 0x0000, 0x0000 }, /* R247 */ - { 0x0000, 0x0000 }, /* R248 */ - { 0x0000, 0x0000 }, /* R249 */ - { 0x0000, 0x0000 }, /* R250 */ - { 0x0000, 0x0000 }, /* R251 */ - { 0x0000, 0x0000 }, /* R252 */ - { 0x0000, 0x0000 }, /* R253 */ - { 0x0000, 0x0000 }, /* R254 */ - { 0x0000, 0x0000 }, /* R255 */ - { 0x000F, 0x0000 }, /* R256 - Chip Revision */ - { 0x0074, 0x0074 }, /* R257 - Control Interface */ - { 0x0000, 0x0000 }, /* R258 */ - { 0x0000, 0x0000 }, /* R259 */ - { 0x0000, 0x0000 }, /* R260 */ - { 0x0000, 0x0000 }, /* R261 */ - { 0x0000, 0x0000 }, /* R262 */ - { 0x0000, 0x0000 }, /* R263 */ - { 0x0000, 0x0000 }, /* R264 */ - { 0x0000, 0x0000 }, /* R265 */ - { 0x0000, 0x0000 }, /* R266 */ - { 0x0000, 0x0000 }, /* R267 */ - { 0x0000, 0x0000 }, /* R268 */ - { 0x0000, 0x0000 }, /* R269 */ - { 0x0000, 0x0000 }, /* R270 */ - { 0x0000, 0x0000 }, /* R271 */ - { 0x807F, 0x837F }, /* R272 - Write Sequencer Ctrl (1) */ - { 0x017F, 0x0000 }, /* R273 - Write Sequencer Ctrl (2) */ - { 0x0000, 0x0000 }, /* R274 */ - { 0x0000, 0x0000 }, /* R275 */ - { 0x0000, 0x0000 }, /* R276 */ - { 0x0000, 0x0000 }, /* R277 */ - { 0x0000, 0x0000 }, /* R278 */ - { 0x0000, 0x0000 }, /* R279 */ - { 0x0000, 0x0000 }, /* R280 */ - { 0x0000, 0x0000 }, /* R281 */ - { 0x0000, 0x0000 }, /* R282 */ - { 0x0000, 0x0000 }, /* R283 */ - { 0x0000, 0x0000 }, /* R284 */ - { 0x0000, 0x0000 }, /* R285 */ - { 0x0000, 0x0000 }, /* R286 */ - { 0x0000, 0x0000 }, /* R287 */ - { 0x0000, 0x0000 }, /* R288 */ - { 0x0000, 0x0000 }, /* R289 */ - { 0x0000, 0x0000 }, /* R290 */ - { 0x0000, 0x0000 }, /* R291 */ - { 0x0000, 0x0000 }, /* R292 */ - { 0x0000, 0x0000 }, /* R293 */ - { 0x0000, 0x0000 }, /* R294 */ - { 0x0000, 0x0000 }, /* R295 */ - { 0x0000, 0x0000 }, /* R296 */ - { 0x0000, 0x0000 }, /* R297 */ - { 0x0000, 0x0000 }, /* R298 */ - { 0x0000, 0x0000 }, /* R299 */ - { 0x0000, 0x0000 }, /* R300 */ - { 0x0000, 0x0000 }, /* R301 */ - { 0x0000, 0x0000 }, /* R302 */ - { 0x0000, 0x0000 }, /* R303 */ - { 0x0000, 0x0000 }, /* R304 */ - { 0x0000, 0x0000 }, /* R305 */ - { 0x0000, 0x0000 }, /* R306 */ - { 0x0000, 0x0000 }, /* R307 */ - { 0x0000, 0x0000 }, /* R308 */ - { 0x0000, 0x0000 }, /* R309 */ - { 0x0000, 0x0000 }, /* R310 */ - { 0x0000, 0x0000 }, /* R311 */ - { 0x0000, 0x0000 }, /* R312 */ - { 0x0000, 0x0000 }, /* R313 */ - { 0x0000, 0x0000 }, /* R314 */ - { 0x0000, 0x0000 }, /* R315 */ - { 0x0000, 0x0000 }, /* R316 */ - { 0x0000, 0x0000 }, /* R317 */ - { 0x0000, 0x0000 }, /* R318 */ - { 0x0000, 0x0000 }, /* R319 */ - { 0x0000, 0x0000 }, /* R320 */ - { 0x0000, 0x0000 }, /* R321 */ - { 0x0000, 0x0000 }, /* R322 */ - { 0x0000, 0x0000 }, /* R323 */ - { 0x0000, 0x0000 }, /* R324 */ - { 0x0000, 0x0000 }, /* R325 */ - { 0x0000, 0x0000 }, /* R326 */ - { 0x0000, 0x0000 }, /* R327 */ - { 0x0000, 0x0000 }, /* R328 */ - { 0x0000, 0x0000 }, /* R329 */ - { 0x0000, 0x0000 }, /* R330 */ - { 0x0000, 0x0000 }, /* R331 */ - { 0x0000, 0x0000 }, /* R332 */ - { 0x0000, 0x0000 }, /* R333 */ - { 0x0000, 0x0000 }, /* R334 */ - { 0x0000, 0x0000 }, /* R335 */ - { 0x0000, 0x0000 }, /* R336 */ - { 0x0000, 0x0000 }, /* R337 */ - { 0x0000, 0x0000 }, /* R338 */ - { 0x0000, 0x0000 }, /* R339 */ - { 0x0000, 0x0000 }, /* R340 */ - { 0x0000, 0x0000 }, /* R341 */ - { 0x0000, 0x0000 }, /* R342 */ - { 0x0000, 0x0000 }, /* R343 */ - { 0x0000, 0x0000 }, /* R344 */ - { 0x0000, 0x0000 }, /* R345 */ - { 0x0000, 0x0000 }, /* R346 */ - { 0x0000, 0x0000 }, /* R347 */ - { 0x0000, 0x0000 }, /* R348 */ - { 0x0000, 0x0000 }, /* R349 */ - { 0x0000, 0x0000 }, /* R350 */ - { 0x0000, 0x0000 }, /* R351 */ - { 0x0000, 0x0000 }, /* R352 */ - { 0x0000, 0x0000 }, /* R353 */ - { 0x0000, 0x0000 }, /* R354 */ - { 0x0000, 0x0000 }, /* R355 */ - { 0x0000, 0x0000 }, /* R356 */ - { 0x0000, 0x0000 }, /* R357 */ - { 0x0000, 0x0000 }, /* R358 */ - { 0x0000, 0x0000 }, /* R359 */ - { 0x0000, 0x0000 }, /* R360 */ - { 0x0000, 0x0000 }, /* R361 */ - { 0x0000, 0x0000 }, /* R362 */ - { 0x0000, 0x0000 }, /* R363 */ - { 0x0000, 0x0000 }, /* R364 */ - { 0x0000, 0x0000 }, /* R365 */ - { 0x0000, 0x0000 }, /* R366 */ - { 0x0000, 0x0000 }, /* R367 */ - { 0x0000, 0x0000 }, /* R368 */ - { 0x0000, 0x0000 }, /* R369 */ - { 0x0000, 0x0000 }, /* R370 */ - { 0x0000, 0x0000 }, /* R371 */ - { 0x0000, 0x0000 }, /* R372 */ - { 0x0000, 0x0000 }, /* R373 */ - { 0x0000, 0x0000 }, /* R374 */ - { 0x0000, 0x0000 }, /* R375 */ - { 0x0000, 0x0000 }, /* R376 */ - { 0x0000, 0x0000 }, /* R377 */ - { 0x0000, 0x0000 }, /* R378 */ - { 0x0000, 0x0000 }, /* R379 */ - { 0x0000, 0x0000 }, /* R380 */ - { 0x0000, 0x0000 }, /* R381 */ - { 0x0000, 0x0000 }, /* R382 */ - { 0x0000, 0x0000 }, /* R383 */ - { 0x0000, 0x0000 }, /* R384 */ - { 0x0000, 0x0000 }, /* R385 */ - { 0x0000, 0x0000 }, /* R386 */ - { 0x0000, 0x0000 }, /* R387 */ - { 0x0000, 0x0000 }, /* R388 */ - { 0x0000, 0x0000 }, /* R389 */ - { 0x0000, 0x0000 }, /* R390 */ - { 0x0000, 0x0000 }, /* R391 */ - { 0x0000, 0x0000 }, /* R392 */ - { 0x0000, 0x0000 }, /* R393 */ - { 0x0000, 0x0000 }, /* R394 */ - { 0x0000, 0x0000 }, /* R395 */ - { 0x0000, 0x0000 }, /* R396 */ - { 0x0000, 0x0000 }, /* R397 */ - { 0x0000, 0x0000 }, /* R398 */ - { 0x0000, 0x0000 }, /* R399 */ - { 0x0000, 0x0000 }, /* R400 */ - { 0x0000, 0x0000 }, /* R401 */ - { 0x0000, 0x0000 }, /* R402 */ - { 0x0000, 0x0000 }, /* R403 */ - { 0x0000, 0x0000 }, /* R404 */ - { 0x0000, 0x0000 }, /* R405 */ - { 0x0000, 0x0000 }, /* R406 */ - { 0x0000, 0x0000 }, /* R407 */ - { 0x0000, 0x0000 }, /* R408 */ - { 0x0000, 0x0000 }, /* R409 */ - { 0x0000, 0x0000 }, /* R410 */ - { 0x0000, 0x0000 }, /* R411 */ - { 0x0000, 0x0000 }, /* R412 */ - { 0x0000, 0x0000 }, /* R413 */ - { 0x0000, 0x0000 }, /* R414 */ - { 0x0000, 0x0000 }, /* R415 */ - { 0x0000, 0x0000 }, /* R416 */ - { 0x0000, 0x0000 }, /* R417 */ - { 0x0000, 0x0000 }, /* R418 */ - { 0x0000, 0x0000 }, /* R419 */ - { 0x0000, 0x0000 }, /* R420 */ - { 0x0000, 0x0000 }, /* R421 */ - { 0x0000, 0x0000 }, /* R422 */ - { 0x0000, 0x0000 }, /* R423 */ - { 0x0000, 0x0000 }, /* R424 */ - { 0x0000, 0x0000 }, /* R425 */ - { 0x0000, 0x0000 }, /* R426 */ - { 0x0000, 0x0000 }, /* R427 */ - { 0x0000, 0x0000 }, /* R428 */ - { 0x0000, 0x0000 }, /* R429 */ - { 0x0000, 0x0000 }, /* R430 */ - { 0x0000, 0x0000 }, /* R431 */ - { 0x0000, 0x0000 }, /* R432 */ - { 0x0000, 0x0000 }, /* R433 */ - { 0x0000, 0x0000 }, /* R434 */ - { 0x0000, 0x0000 }, /* R435 */ - { 0x0000, 0x0000 }, /* R436 */ - { 0x0000, 0x0000 }, /* R437 */ - { 0x0000, 0x0000 }, /* R438 */ - { 0x0000, 0x0000 }, /* R439 */ - { 0x0000, 0x0000 }, /* R440 */ - { 0x0000, 0x0000 }, /* R441 */ - { 0x0000, 0x0000 }, /* R442 */ - { 0x0000, 0x0000 }, /* R443 */ - { 0x0000, 0x0000 }, /* R444 */ - { 0x0000, 0x0000 }, /* R445 */ - { 0x0000, 0x0000 }, /* R446 */ - { 0x0000, 0x0000 }, /* R447 */ - { 0x0000, 0x0000 }, /* R448 */ - { 0x0000, 0x0000 }, /* R449 */ - { 0x0000, 0x0000 }, /* R450 */ - { 0x0000, 0x0000 }, /* R451 */ - { 0x0000, 0x0000 }, /* R452 */ - { 0x0000, 0x0000 }, /* R453 */ - { 0x0000, 0x0000 }, /* R454 */ - { 0x0000, 0x0000 }, /* R455 */ - { 0x0000, 0x0000 }, /* R456 */ - { 0x0000, 0x0000 }, /* R457 */ - { 0x0000, 0x0000 }, /* R458 */ - { 0x0000, 0x0000 }, /* R459 */ - { 0x0000, 0x0000 }, /* R460 */ - { 0x0000, 0x0000 }, /* R461 */ - { 0x0000, 0x0000 }, /* R462 */ - { 0x0000, 0x0000 }, /* R463 */ - { 0x0000, 0x0000 }, /* R464 */ - { 0x0000, 0x0000 }, /* R465 */ - { 0x0000, 0x0000 }, /* R466 */ - { 0x0000, 0x0000 }, /* R467 */ - { 0x0000, 0x0000 }, /* R468 */ - { 0x0000, 0x0000 }, /* R469 */ - { 0x0000, 0x0000 }, /* R470 */ - { 0x0000, 0x0000 }, /* R471 */ - { 0x0000, 0x0000 }, /* R472 */ - { 0x0000, 0x0000 }, /* R473 */ - { 0x0000, 0x0000 }, /* R474 */ - { 0x0000, 0x0000 }, /* R475 */ - { 0x0000, 0x0000 }, /* R476 */ - { 0x0000, 0x0000 }, /* R477 */ - { 0x0000, 0x0000 }, /* R478 */ - { 0x0000, 0x0000 }, /* R479 */ - { 0x0000, 0x0000 }, /* R480 */ - { 0x0000, 0x0000 }, /* R481 */ - { 0x0000, 0x0000 }, /* R482 */ - { 0x0000, 0x0000 }, /* R483 */ - { 0x0000, 0x0000 }, /* R484 */ - { 0x0000, 0x0000 }, /* R485 */ - { 0x0000, 0x0000 }, /* R486 */ - { 0x0000, 0x0000 }, /* R487 */ - { 0x0000, 0x0000 }, /* R488 */ - { 0x0000, 0x0000 }, /* R489 */ - { 0x0000, 0x0000 }, /* R490 */ - { 0x0000, 0x0000 }, /* R491 */ - { 0x0000, 0x0000 }, /* R492 */ - { 0x0000, 0x0000 }, /* R493 */ - { 0x0000, 0x0000 }, /* R494 */ - { 0x0000, 0x0000 }, /* R495 */ - { 0x0000, 0x0000 }, /* R496 */ - { 0x0000, 0x0000 }, /* R497 */ - { 0x0000, 0x0000 }, /* R498 */ - { 0x0000, 0x0000 }, /* R499 */ - { 0x0000, 0x0000 }, /* R500 */ - { 0x0000, 0x0000 }, /* R501 */ - { 0x0000, 0x0000 }, /* R502 */ - { 0x0000, 0x0000 }, /* R503 */ - { 0x0000, 0x0000 }, /* R504 */ - { 0x0000, 0x0000 }, /* R505 */ - { 0x0000, 0x0000 }, /* R506 */ - { 0x0000, 0x0000 }, /* R507 */ - { 0x0000, 0x0000 }, /* R508 */ - { 0x0000, 0x0000 }, /* R509 */ - { 0x0000, 0x0000 }, /* R510 */ - { 0x0000, 0x0000 }, /* R511 */ - { 0x001F, 0x001F }, /* R512 - AIF1 Clocking (1) */ - { 0x003F, 0x003F }, /* R513 - AIF1 Clocking (2) */ - { 0x0000, 0x0000 }, /* R514 */ - { 0x0000, 0x0000 }, /* R515 */ - { 0x001F, 0x001F }, /* R516 - AIF2 Clocking (1) */ - { 0x003F, 0x003F }, /* R517 - AIF2 Clocking (2) */ - { 0x0000, 0x0000 }, /* R518 */ - { 0x0000, 0x0000 }, /* R519 */ - { 0x001F, 0x001F }, /* R520 - Clocking (1) */ - { 0x0777, 0x0777 }, /* R521 - Clocking (2) */ - { 0x0000, 0x0000 }, /* R522 */ - { 0x0000, 0x0000 }, /* R523 */ - { 0x0000, 0x0000 }, /* R524 */ - { 0x0000, 0x0000 }, /* R525 */ - { 0x0000, 0x0000 }, /* R526 */ - { 0x0000, 0x0000 }, /* R527 */ - { 0x00FF, 0x00FF }, /* R528 - AIF1 Rate */ - { 0x00FF, 0x00FF }, /* R529 - AIF2 Rate */ - { 0x000F, 0x0000 }, /* R530 - Rate Status */ - { 0x0000, 0x0000 }, /* R531 */ - { 0x0000, 0x0000 }, /* R532 */ - { 0x0000, 0x0000 }, /* R533 */ - { 0x0000, 0x0000 }, /* R534 */ - { 0x0000, 0x0000 }, /* R535 */ - { 0x0000, 0x0000 }, /* R536 */ - { 0x0000, 0x0000 }, /* R537 */ - { 0x0000, 0x0000 }, /* R538 */ - { 0x0000, 0x0000 }, /* R539 */ - { 0x0000, 0x0000 }, /* R540 */ - { 0x0000, 0x0000 }, /* R541 */ - { 0x0000, 0x0000 }, /* R542 */ - { 0x0000, 0x0000 }, /* R543 */ - { 0x0007, 0x0007 }, /* R544 - FLL1 Control (1) */ - { 0x3F77, 0x3F77 }, /* R545 - FLL1 Control (2) */ - { 0xFFFF, 0xFFFF }, /* R546 - FLL1 Control (3) */ - { 0x7FEF, 0x7FEF }, /* R547 - FLL1 Control (4) */ - { 0x1FDB, 0x1FDB }, /* R548 - FLL1 Control (5) */ - { 0x0000, 0x0000 }, /* R549 */ - { 0x0000, 0x0000 }, /* R550 */ - { 0x0000, 0x0000 }, /* R551 */ - { 0x0000, 0x0000 }, /* R552 */ - { 0x0000, 0x0000 }, /* R553 */ - { 0x0000, 0x0000 }, /* R554 */ - { 0x0000, 0x0000 }, /* R555 */ - { 0x0000, 0x0000 }, /* R556 */ - { 0x0000, 0x0000 }, /* R557 */ - { 0x0000, 0x0000 }, /* R558 */ - { 0x0000, 0x0000 }, /* R559 */ - { 0x0000, 0x0000 }, /* R560 */ - { 0x0000, 0x0000 }, /* R561 */ - { 0x0000, 0x0000 }, /* R562 */ - { 0x0000, 0x0000 }, /* R563 */ - { 0x0000, 0x0000 }, /* R564 */ - { 0x0000, 0x0000 }, /* R565 */ - { 0x0000, 0x0000 }, /* R566 */ - { 0x0000, 0x0000 }, /* R567 */ - { 0x0000, 0x0000 }, /* R568 */ - { 0x0000, 0x0000 }, /* R569 */ - { 0x0000, 0x0000 }, /* R570 */ - { 0x0000, 0x0000 }, /* R571 */ - { 0x0000, 0x0000 }, /* R572 */ - { 0x0000, 0x0000 }, /* R573 */ - { 0x0000, 0x0000 }, /* R574 */ - { 0x0000, 0x0000 }, /* R575 */ - { 0x0007, 0x0007 }, /* R576 - FLL2 Control (1) */ - { 0x3F77, 0x3F77 }, /* R577 - FLL2 Control (2) */ - { 0xFFFF, 0xFFFF }, /* R578 - FLL2 Control (3) */ - { 0x7FEF, 0x7FEF }, /* R579 - FLL2 Control (4) */ - { 0x1FDB, 0x1FDB }, /* R580 - FLL2 Control (5) */ - { 0x0000, 0x0000 }, /* R581 */ - { 0x0000, 0x0000 }, /* R582 */ - { 0x0000, 0x0000 }, /* R583 */ - { 0x0000, 0x0000 }, /* R584 */ - { 0x0000, 0x0000 }, /* R585 */ - { 0x0000, 0x0000 }, /* R586 */ - { 0x0000, 0x0000 }, /* R587 */ - { 0x0000, 0x0000 }, /* R588 */ - { 0x0000, 0x0000 }, /* R589 */ - { 0x0000, 0x0000 }, /* R590 */ - { 0x0000, 0x0000 }, /* R591 */ - { 0x0000, 0x0000 }, /* R592 */ - { 0x0000, 0x0000 }, /* R593 */ - { 0x0000, 0x0000 }, /* R594 */ - { 0x0000, 0x0000 }, /* R595 */ - { 0x0000, 0x0000 }, /* R596 */ - { 0x0000, 0x0000 }, /* R597 */ - { 0x0000, 0x0000 }, /* R598 */ - { 0x0000, 0x0000 }, /* R599 */ - { 0x0000, 0x0000 }, /* R600 */ - { 0x0000, 0x0000 }, /* R601 */ - { 0x0000, 0x0000 }, /* R602 */ - { 0x0000, 0x0000 }, /* R603 */ - { 0x0000, 0x0000 }, /* R604 */ - { 0x0000, 0x0000 }, /* R605 */ - { 0x0000, 0x0000 }, /* R606 */ - { 0x0000, 0x0000 }, /* R607 */ - { 0x0000, 0x0000 }, /* R608 */ - { 0x0000, 0x0000 }, /* R609 */ - { 0x0000, 0x0000 }, /* R610 */ - { 0x0000, 0x0000 }, /* R611 */ - { 0x0000, 0x0000 }, /* R612 */ - { 0x0000, 0x0000 }, /* R613 */ - { 0x0000, 0x0000 }, /* R614 */ - { 0x0000, 0x0000 }, /* R615 */ - { 0x0000, 0x0000 }, /* R616 */ - { 0x0000, 0x0000 }, /* R617 */ - { 0x0000, 0x0000 }, /* R618 */ - { 0x0000, 0x0000 }, /* R619 */ - { 0x0000, 0x0000 }, /* R620 */ - { 0x0000, 0x0000 }, /* R621 */ - { 0x0000, 0x0000 }, /* R622 */ - { 0x0000, 0x0000 }, /* R623 */ - { 0x0000, 0x0000 }, /* R624 */ - { 0x0000, 0x0000 }, /* R625 */ - { 0x0000, 0x0000 }, /* R626 */ - { 0x0000, 0x0000 }, /* R627 */ - { 0x0000, 0x0000 }, /* R628 */ - { 0x0000, 0x0000 }, /* R629 */ - { 0x0000, 0x0000 }, /* R630 */ - { 0x0000, 0x0000 }, /* R631 */ - { 0x0000, 0x0000 }, /* R632 */ - { 0x0000, 0x0000 }, /* R633 */ - { 0x0000, 0x0000 }, /* R634 */ - { 0x0000, 0x0000 }, /* R635 */ - { 0x0000, 0x0000 }, /* R636 */ - { 0x0000, 0x0000 }, /* R637 */ - { 0x0000, 0x0000 }, /* R638 */ - { 0x0000, 0x0000 }, /* R639 */ - { 0x0000, 0x0000 }, /* R640 */ - { 0x0000, 0x0000 }, /* R641 */ - { 0x0000, 0x0000 }, /* R642 */ - { 0x0000, 0x0000 }, /* R643 */ - { 0x0000, 0x0000 }, /* R644 */ - { 0x0000, 0x0000 }, /* R645 */ - { 0x0000, 0x0000 }, /* R646 */ - { 0x0000, 0x0000 }, /* R647 */ - { 0x0000, 0x0000 }, /* R648 */ - { 0x0000, 0x0000 }, /* R649 */ - { 0x0000, 0x0000 }, /* R650 */ - { 0x0000, 0x0000 }, /* R651 */ - { 0x0000, 0x0000 }, /* R652 */ - { 0x0000, 0x0000 }, /* R653 */ - { 0x0000, 0x0000 }, /* R654 */ - { 0x0000, 0x0000 }, /* R655 */ - { 0x0000, 0x0000 }, /* R656 */ - { 0x0000, 0x0000 }, /* R657 */ - { 0x0000, 0x0000 }, /* R658 */ - { 0x0000, 0x0000 }, /* R659 */ - { 0x0000, 0x0000 }, /* R660 */ - { 0x0000, 0x0000 }, /* R661 */ - { 0x0000, 0x0000 }, /* R662 */ - { 0x0000, 0x0000 }, /* R663 */ - { 0x0000, 0x0000 }, /* R664 */ - { 0x0000, 0x0000 }, /* R665 */ - { 0x0000, 0x0000 }, /* R666 */ - { 0x0000, 0x0000 }, /* R667 */ - { 0x0000, 0x0000 }, /* R668 */ - { 0x0000, 0x0000 }, /* R669 */ - { 0x0000, 0x0000 }, /* R670 */ - { 0x0000, 0x0000 }, /* R671 */ - { 0x0000, 0x0000 }, /* R672 */ - { 0x0000, 0x0000 }, /* R673 */ - { 0x0000, 0x0000 }, /* R674 */ - { 0x0000, 0x0000 }, /* R675 */ - { 0x0000, 0x0000 }, /* R676 */ - { 0x0000, 0x0000 }, /* R677 */ - { 0x0000, 0x0000 }, /* R678 */ - { 0x0000, 0x0000 }, /* R679 */ - { 0x0000, 0x0000 }, /* R680 */ - { 0x0000, 0x0000 }, /* R681 */ - { 0x0000, 0x0000 }, /* R682 */ - { 0x0000, 0x0000 }, /* R683 */ - { 0x0000, 0x0000 }, /* R684 */ - { 0x0000, 0x0000 }, /* R685 */ - { 0x0000, 0x0000 }, /* R686 */ - { 0x0000, 0x0000 }, /* R687 */ - { 0x0000, 0x0000 }, /* R688 */ - { 0x0000, 0x0000 }, /* R689 */ - { 0x0000, 0x0000 }, /* R690 */ - { 0x0000, 0x0000 }, /* R691 */ - { 0x0000, 0x0000 }, /* R692 */ - { 0x0000, 0x0000 }, /* R693 */ - { 0x0000, 0x0000 }, /* R694 */ - { 0x0000, 0x0000 }, /* R695 */ - { 0x0000, 0x0000 }, /* R696 */ - { 0x0000, 0x0000 }, /* R697 */ - { 0x0000, 0x0000 }, /* R698 */ - { 0x0000, 0x0000 }, /* R699 */ - { 0x0000, 0x0000 }, /* R700 */ - { 0x0000, 0x0000 }, /* R701 */ - { 0x0000, 0x0000 }, /* R702 */ - { 0x0000, 0x0000 }, /* R703 */ - { 0x0000, 0x0000 }, /* R704 */ - { 0x0000, 0x0000 }, /* R705 */ - { 0x0000, 0x0000 }, /* R706 */ - { 0x0000, 0x0000 }, /* R707 */ - { 0x0000, 0x0000 }, /* R708 */ - { 0x0000, 0x0000 }, /* R709 */ - { 0x0000, 0x0000 }, /* R710 */ - { 0x0000, 0x0000 }, /* R711 */ - { 0x0000, 0x0000 }, /* R712 */ - { 0x0000, 0x0000 }, /* R713 */ - { 0x0000, 0x0000 }, /* R714 */ - { 0x0000, 0x0000 }, /* R715 */ - { 0x0000, 0x0000 }, /* R716 */ - { 0x0000, 0x0000 }, /* R717 */ - { 0x0000, 0x0000 }, /* R718 */ - { 0x0000, 0x0000 }, /* R719 */ - { 0x0000, 0x0000 }, /* R720 */ - { 0x0000, 0x0000 }, /* R721 */ - { 0x0000, 0x0000 }, /* R722 */ - { 0x0000, 0x0000 }, /* R723 */ - { 0x0000, 0x0000 }, /* R724 */ - { 0x0000, 0x0000 }, /* R725 */ - { 0x0000, 0x0000 }, /* R726 */ - { 0x0000, 0x0000 }, /* R727 */ - { 0x0000, 0x0000 }, /* R728 */ - { 0x0000, 0x0000 }, /* R729 */ - { 0x0000, 0x0000 }, /* R730 */ - { 0x0000, 0x0000 }, /* R731 */ - { 0x0000, 0x0000 }, /* R732 */ - { 0x0000, 0x0000 }, /* R733 */ - { 0x0000, 0x0000 }, /* R734 */ - { 0x0000, 0x0000 }, /* R735 */ - { 0x0000, 0x0000 }, /* R736 */ - { 0x0000, 0x0000 }, /* R737 */ - { 0x0000, 0x0000 }, /* R738 */ - { 0x0000, 0x0000 }, /* R739 */ - { 0x0000, 0x0000 }, /* R740 */ - { 0x0000, 0x0000 }, /* R741 */ - { 0x0000, 0x0000 }, /* R742 */ - { 0x0000, 0x0000 }, /* R743 */ - { 0x0000, 0x0000 }, /* R744 */ - { 0x0000, 0x0000 }, /* R745 */ - { 0x0000, 0x0000 }, /* R746 */ - { 0x0000, 0x0000 }, /* R747 */ - { 0x0000, 0x0000 }, /* R748 */ - { 0x0000, 0x0000 }, /* R749 */ - { 0x0000, 0x0000 }, /* R750 */ - { 0x0000, 0x0000 }, /* R751 */ - { 0x0000, 0x0000 }, /* R752 */ - { 0x0000, 0x0000 }, /* R753 */ - { 0x0000, 0x0000 }, /* R754 */ - { 0x0000, 0x0000 }, /* R755 */ - { 0x0000, 0x0000 }, /* R756 */ - { 0x0000, 0x0000 }, /* R757 */ - { 0x0000, 0x0000 }, /* R758 */ - { 0x0000, 0x0000 }, /* R759 */ - { 0x0000, 0x0000 }, /* R760 */ - { 0x0000, 0x0000 }, /* R761 */ - { 0x0000, 0x0000 }, /* R762 */ - { 0x0000, 0x0000 }, /* R763 */ - { 0x0000, 0x0000 }, /* R764 */ - { 0x0000, 0x0000 }, /* R765 */ - { 0x0000, 0x0000 }, /* R766 */ - { 0x0000, 0x0000 }, /* R767 */ - { 0xE1F8, 0xE1F8 }, /* R768 - AIF1 Control (1) */ - { 0xCD1F, 0xCD1F }, /* R769 - AIF1 Control (2) */ - { 0xF000, 0xF000 }, /* R770 - AIF1 Master/Slave */ - { 0x01F0, 0x01F0 }, /* R771 - AIF1 BCLK */ - { 0x0FFF, 0x0FFF }, /* R772 - AIF1ADC LRCLK */ - { 0x0FFF, 0x0FFF }, /* R773 - AIF1DAC LRCLK */ - { 0x0003, 0x0003 }, /* R774 - AIF1DAC Data */ - { 0x0003, 0x0003 }, /* R775 - AIF1ADC Data */ - { 0x0000, 0x0000 }, /* R776 */ - { 0x0000, 0x0000 }, /* R777 */ - { 0x0000, 0x0000 }, /* R778 */ - { 0x0000, 0x0000 }, /* R779 */ - { 0x0000, 0x0000 }, /* R780 */ - { 0x0000, 0x0000 }, /* R781 */ - { 0x0000, 0x0000 }, /* R782 */ - { 0x0000, 0x0000 }, /* R783 */ - { 0xF1F8, 0xF1F8 }, /* R784 - AIF2 Control (1) */ - { 0xFD1F, 0xFD1F }, /* R785 - AIF2 Control (2) */ - { 0xF000, 0xF000 }, /* R786 - AIF2 Master/Slave */ - { 0x01F0, 0x01F0 }, /* R787 - AIF2 BCLK */ - { 0x0FFF, 0x0FFF }, /* R788 - AIF2ADC LRCLK */ - { 0x0FFF, 0x0FFF }, /* R789 - AIF2DAC LRCLK */ - { 0x0003, 0x0003 }, /* R790 - AIF2DAC Data */ - { 0x0003, 0x0003 }, /* R791 - AIF2ADC Data */ - { 0x0000, 0x0000 }, /* R792 */ - { 0x0000, 0x0000 }, /* R793 */ - { 0x0000, 0x0000 }, /* R794 */ - { 0x0000, 0x0000 }, /* R795 */ - { 0x0000, 0x0000 }, /* R796 */ - { 0x0000, 0x0000 }, /* R797 */ - { 0x0000, 0x0000 }, /* R798 */ - { 0x0000, 0x0000 }, /* R799 */ - { 0x0000, 0x0000 }, /* R800 */ - { 0x0000, 0x0000 }, /* R801 */ - { 0x0000, 0x0000 }, /* R802 */ - { 0x0000, 0x0000 }, /* R803 */ - { 0x0000, 0x0000 }, /* R804 */ - { 0x0000, 0x0000 }, /* R805 */ - { 0x0000, 0x0000 }, /* R806 */ - { 0x0000, 0x0000 }, /* R807 */ - { 0x0000, 0x0000 }, /* R808 */ - { 0x0000, 0x0000 }, /* R809 */ - { 0x0000, 0x0000 }, /* R810 */ - { 0x0000, 0x0000 }, /* R811 */ - { 0x0000, 0x0000 }, /* R812 */ - { 0x0000, 0x0000 }, /* R813 */ - { 0x0000, 0x0000 }, /* R814 */ - { 0x0000, 0x0000 }, /* R815 */ - { 0x0000, 0x0000 }, /* R816 */ - { 0x0000, 0x0000 }, /* R817 */ - { 0x0000, 0x0000 }, /* R818 */ - { 0x0000, 0x0000 }, /* R819 */ - { 0x0000, 0x0000 }, /* R820 */ - { 0x0000, 0x0000 }, /* R821 */ - { 0x0000, 0x0000 }, /* R822 */ - { 0x0000, 0x0000 }, /* R823 */ - { 0x0000, 0x0000 }, /* R824 */ - { 0x0000, 0x0000 }, /* R825 */ - { 0x0000, 0x0000 }, /* R826 */ - { 0x0000, 0x0000 }, /* R827 */ - { 0x0000, 0x0000 }, /* R828 */ - { 0x0000, 0x0000 }, /* R829 */ - { 0x0000, 0x0000 }, /* R830 */ - { 0x0000, 0x0000 }, /* R831 */ - { 0x0000, 0x0000 }, /* R832 */ - { 0x0000, 0x0000 }, /* R833 */ - { 0x0000, 0x0000 }, /* R834 */ - { 0x0000, 0x0000 }, /* R835 */ - { 0x0000, 0x0000 }, /* R836 */ - { 0x0000, 0x0000 }, /* R837 */ - { 0x0000, 0x0000 }, /* R838 */ - { 0x0000, 0x0000 }, /* R839 */ - { 0x0000, 0x0000 }, /* R840 */ - { 0x0000, 0x0000 }, /* R841 */ - { 0x0000, 0x0000 }, /* R842 */ - { 0x0000, 0x0000 }, /* R843 */ - { 0x0000, 0x0000 }, /* R844 */ - { 0x0000, 0x0000 }, /* R845 */ - { 0x0000, 0x0000 }, /* R846 */ - { 0x0000, 0x0000 }, /* R847 */ - { 0x0000, 0x0000 }, /* R848 */ - { 0x0000, 0x0000 }, /* R849 */ - { 0x0000, 0x0000 }, /* R850 */ - { 0x0000, 0x0000 }, /* R851 */ - { 0x0000, 0x0000 }, /* R852 */ - { 0x0000, 0x0000 }, /* R853 */ - { 0x0000, 0x0000 }, /* R854 */ - { 0x0000, 0x0000 }, /* R855 */ - { 0x0000, 0x0000 }, /* R856 */ - { 0x0000, 0x0000 }, /* R857 */ - { 0x0000, 0x0000 }, /* R858 */ - { 0x0000, 0x0000 }, /* R859 */ - { 0x0000, 0x0000 }, /* R860 */ - { 0x0000, 0x0000 }, /* R861 */ - { 0x0000, 0x0000 }, /* R862 */ - { 0x0000, 0x0000 }, /* R863 */ - { 0x0000, 0x0000 }, /* R864 */ - { 0x0000, 0x0000 }, /* R865 */ - { 0x0000, 0x0000 }, /* R866 */ - { 0x0000, 0x0000 }, /* R867 */ - { 0x0000, 0x0000 }, /* R868 */ - { 0x0000, 0x0000 }, /* R869 */ - { 0x0000, 0x0000 }, /* R870 */ - { 0x0000, 0x0000 }, /* R871 */ - { 0x0000, 0x0000 }, /* R872 */ - { 0x0000, 0x0000 }, /* R873 */ - { 0x0000, 0x0000 }, /* R874 */ - { 0x0000, 0x0000 }, /* R875 */ - { 0x0000, 0x0000 }, /* R876 */ - { 0x0000, 0x0000 }, /* R877 */ - { 0x0000, 0x0000 }, /* R878 */ - { 0x0000, 0x0000 }, /* R879 */ - { 0x0000, 0x0000 }, /* R880 */ - { 0x0000, 0x0000 }, /* R881 */ - { 0x0000, 0x0000 }, /* R882 */ - { 0x0000, 0x0000 }, /* R883 */ - { 0x0000, 0x0000 }, /* R884 */ - { 0x0000, 0x0000 }, /* R885 */ - { 0x0000, 0x0000 }, /* R886 */ - { 0x0000, 0x0000 }, /* R887 */ - { 0x0000, 0x0000 }, /* R888 */ - { 0x0000, 0x0000 }, /* R889 */ - { 0x0000, 0x0000 }, /* R890 */ - { 0x0000, 0x0000 }, /* R891 */ - { 0x0000, 0x0000 }, /* R892 */ - { 0x0000, 0x0000 }, /* R893 */ - { 0x0000, 0x0000 }, /* R894 */ - { 0x0000, 0x0000 }, /* R895 */ - { 0x0000, 0x0000 }, /* R896 */ - { 0x0000, 0x0000 }, /* R897 */ - { 0x0000, 0x0000 }, /* R898 */ - { 0x0000, 0x0000 }, /* R899 */ - { 0x0000, 0x0000 }, /* R900 */ - { 0x0000, 0x0000 }, /* R901 */ - { 0x0000, 0x0000 }, /* R902 */ - { 0x0000, 0x0000 }, /* R903 */ - { 0x0000, 0x0000 }, /* R904 */ - { 0x0000, 0x0000 }, /* R905 */ - { 0x0000, 0x0000 }, /* R906 */ - { 0x0000, 0x0000 }, /* R907 */ - { 0x0000, 0x0000 }, /* R908 */ - { 0x0000, 0x0000 }, /* R909 */ - { 0x0000, 0x0000 }, /* R910 */ - { 0x0000, 0x0000 }, /* R911 */ - { 0x0000, 0x0000 }, /* R912 */ - { 0x0000, 0x0000 }, /* R913 */ - { 0x0000, 0x0000 }, /* R914 */ - { 0x0000, 0x0000 }, /* R915 */ - { 0x0000, 0x0000 }, /* R916 */ - { 0x0000, 0x0000 }, /* R917 */ - { 0x0000, 0x0000 }, /* R918 */ - { 0x0000, 0x0000 }, /* R919 */ - { 0x0000, 0x0000 }, /* R920 */ - { 0x0000, 0x0000 }, /* R921 */ - { 0x0000, 0x0000 }, /* R922 */ - { 0x0000, 0x0000 }, /* R923 */ - { 0x0000, 0x0000 }, /* R924 */ - { 0x0000, 0x0000 }, /* R925 */ - { 0x0000, 0x0000 }, /* R926 */ - { 0x0000, 0x0000 }, /* R927 */ - { 0x0000, 0x0000 }, /* R928 */ - { 0x0000, 0x0000 }, /* R929 */ - { 0x0000, 0x0000 }, /* R930 */ - { 0x0000, 0x0000 }, /* R931 */ - { 0x0000, 0x0000 }, /* R932 */ - { 0x0000, 0x0000 }, /* R933 */ - { 0x0000, 0x0000 }, /* R934 */ - { 0x0000, 0x0000 }, /* R935 */ - { 0x0000, 0x0000 }, /* R936 */ - { 0x0000, 0x0000 }, /* R937 */ - { 0x0000, 0x0000 }, /* R938 */ - { 0x0000, 0x0000 }, /* R939 */ - { 0x0000, 0x0000 }, /* R940 */ - { 0x0000, 0x0000 }, /* R941 */ - { 0x0000, 0x0000 }, /* R942 */ - { 0x0000, 0x0000 }, /* R943 */ - { 0x0000, 0x0000 }, /* R944 */ - { 0x0000, 0x0000 }, /* R945 */ - { 0x0000, 0x0000 }, /* R946 */ - { 0x0000, 0x0000 }, /* R947 */ - { 0x0000, 0x0000 }, /* R948 */ - { 0x0000, 0x0000 }, /* R949 */ - { 0x0000, 0x0000 }, /* R950 */ - { 0x0000, 0x0000 }, /* R951 */ - { 0x0000, 0x0000 }, /* R952 */ - { 0x0000, 0x0000 }, /* R953 */ - { 0x0000, 0x0000 }, /* R954 */ - { 0x0000, 0x0000 }, /* R955 */ - { 0x0000, 0x0000 }, /* R956 */ - { 0x0000, 0x0000 }, /* R957 */ - { 0x0000, 0x0000 }, /* R958 */ - { 0x0000, 0x0000 }, /* R959 */ - { 0x0000, 0x0000 }, /* R960 */ - { 0x0000, 0x0000 }, /* R961 */ - { 0x0000, 0x0000 }, /* R962 */ - { 0x0000, 0x0000 }, /* R963 */ - { 0x0000, 0x0000 }, /* R964 */ - { 0x0000, 0x0000 }, /* R965 */ - { 0x0000, 0x0000 }, /* R966 */ - { 0x0000, 0x0000 }, /* R967 */ - { 0x0000, 0x0000 }, /* R968 */ - { 0x0000, 0x0000 }, /* R969 */ - { 0x0000, 0x0000 }, /* R970 */ - { 0x0000, 0x0000 }, /* R971 */ - { 0x0000, 0x0000 }, /* R972 */ - { 0x0000, 0x0000 }, /* R973 */ - { 0x0000, 0x0000 }, /* R974 */ - { 0x0000, 0x0000 }, /* R975 */ - { 0x0000, 0x0000 }, /* R976 */ - { 0x0000, 0x0000 }, /* R977 */ - { 0x0000, 0x0000 }, /* R978 */ - { 0x0000, 0x0000 }, /* R979 */ - { 0x0000, 0x0000 }, /* R980 */ - { 0x0000, 0x0000 }, /* R981 */ - { 0x0000, 0x0000 }, /* R982 */ - { 0x0000, 0x0000 }, /* R983 */ - { 0x0000, 0x0000 }, /* R984 */ - { 0x0000, 0x0000 }, /* R985 */ - { 0x0000, 0x0000 }, /* R986 */ - { 0x0000, 0x0000 }, /* R987 */ - { 0x0000, 0x0000 }, /* R988 */ - { 0x0000, 0x0000 }, /* R989 */ - { 0x0000, 0x0000 }, /* R990 */ - { 0x0000, 0x0000 }, /* R991 */ - { 0x0000, 0x0000 }, /* R992 */ - { 0x0000, 0x0000 }, /* R993 */ - { 0x0000, 0x0000 }, /* R994 */ - { 0x0000, 0x0000 }, /* R995 */ - { 0x0000, 0x0000 }, /* R996 */ - { 0x0000, 0x0000 }, /* R997 */ - { 0x0000, 0x0000 }, /* R998 */ - { 0x0000, 0x0000 }, /* R999 */ - { 0x0000, 0x0000 }, /* R1000 */ - { 0x0000, 0x0000 }, /* R1001 */ - { 0x0000, 0x0000 }, /* R1002 */ - { 0x0000, 0x0000 }, /* R1003 */ - { 0x0000, 0x0000 }, /* R1004 */ - { 0x0000, 0x0000 }, /* R1005 */ - { 0x0000, 0x0000 }, /* R1006 */ - { 0x0000, 0x0000 }, /* R1007 */ - { 0x0000, 0x0000 }, /* R1008 */ - { 0x0000, 0x0000 }, /* R1009 */ - { 0x0000, 0x0000 }, /* R1010 */ - { 0x0000, 0x0000 }, /* R1011 */ - { 0x0000, 0x0000 }, /* R1012 */ - { 0x0000, 0x0000 }, /* R1013 */ - { 0x0000, 0x0000 }, /* R1014 */ - { 0x0000, 0x0000 }, /* R1015 */ - { 0x0000, 0x0000 }, /* R1016 */ - { 0x0000, 0x0000 }, /* R1017 */ - { 0x0000, 0x0000 }, /* R1018 */ - { 0x0000, 0x0000 }, /* R1019 */ - { 0x0000, 0x0000 }, /* R1020 */ - { 0x0000, 0x0000 }, /* R1021 */ - { 0x0000, 0x0000 }, /* R1022 */ - { 0x0000, 0x0000 }, /* R1023 */ - { 0x00FF, 0x01FF }, /* R1024 - AIF1 ADC1 Left Volume */ - { 0x00FF, 0x01FF }, /* R1025 - AIF1 ADC1 Right Volume */ - { 0x00FF, 0x01FF }, /* R1026 - AIF1 DAC1 Left Volume */ - { 0x00FF, 0x01FF }, /* R1027 - AIF1 DAC1 Right Volume */ - { 0x00FF, 0x01FF }, /* R1028 - AIF1 ADC2 Left Volume */ - { 0x00FF, 0x01FF }, /* R1029 - AIF1 ADC2 Right Volume */ - { 0x00FF, 0x01FF }, /* R1030 - AIF1 DAC2 Left Volume */ - { 0x00FF, 0x01FF }, /* R1031 - AIF1 DAC2 Right Volume */ - { 0x0000, 0x0000 }, /* R1032 */ - { 0x0000, 0x0000 }, /* R1033 */ - { 0x0000, 0x0000 }, /* R1034 */ - { 0x0000, 0x0000 }, /* R1035 */ - { 0x0000, 0x0000 }, /* R1036 */ - { 0x0000, 0x0000 }, /* R1037 */ - { 0x0000, 0x0000 }, /* R1038 */ - { 0x0000, 0x0000 }, /* R1039 */ - { 0xF800, 0xF800 }, /* R1040 - AIF1 ADC1 Filters */ - { 0x7800, 0x7800 }, /* R1041 - AIF1 ADC2 Filters */ - { 0x0000, 0x0000 }, /* R1042 */ - { 0x0000, 0x0000 }, /* R1043 */ - { 0x0000, 0x0000 }, /* R1044 */ - { 0x0000, 0x0000 }, /* R1045 */ - { 0x0000, 0x0000 }, /* R1046 */ - { 0x0000, 0x0000 }, /* R1047 */ - { 0x0000, 0x0000 }, /* R1048 */ - { 0x0000, 0x0000 }, /* R1049 */ - { 0x0000, 0x0000 }, /* R1050 */ - { 0x0000, 0x0000 }, /* R1051 */ - { 0x0000, 0x0000 }, /* R1052 */ - { 0x0000, 0x0000 }, /* R1053 */ - { 0x0000, 0x0000 }, /* R1054 */ - { 0x0000, 0x0000 }, /* R1055 */ - { 0x02B6, 0x02B6 }, /* R1056 - AIF1 DAC1 Filters (1) */ - { 0x3F00, 0x3F00 }, /* R1057 - AIF1 DAC1 Filters (2) */ - { 0x02B6, 0x02B6 }, /* R1058 - AIF1 DAC2 Filters (1) */ - { 0x3F00, 0x3F00 }, /* R1059 - AIF1 DAC2 Filters (2) */ - { 0x0000, 0x0000 }, /* R1060 */ - { 0x0000, 0x0000 }, /* R1061 */ - { 0x0000, 0x0000 }, /* R1062 */ - { 0x0000, 0x0000 }, /* R1063 */ - { 0x0000, 0x0000 }, /* R1064 */ - { 0x0000, 0x0000 }, /* R1065 */ - { 0x0000, 0x0000 }, /* R1066 */ - { 0x0000, 0x0000 }, /* R1067 */ - { 0x0000, 0x0000 }, /* R1068 */ - { 0x0000, 0x0000 }, /* R1069 */ - { 0x0000, 0x0000 }, /* R1070 */ - { 0x0000, 0x0000 }, /* R1071 */ - { 0x0000, 0x0000 }, /* R1072 */ - { 0x0000, 0x0000 }, /* R1073 */ - { 0x0000, 0x0000 }, /* R1074 */ - { 0x0000, 0x0000 }, /* R1075 */ - { 0x0000, 0x0000 }, /* R1076 */ - { 0x0000, 0x0000 }, /* R1077 */ - { 0x0000, 0x0000 }, /* R1078 */ - { 0x0000, 0x0000 }, /* R1079 */ - { 0x0000, 0x0000 }, /* R1080 */ - { 0x0000, 0x0000 }, /* R1081 */ - { 0x0000, 0x0000 }, /* R1082 */ - { 0x0000, 0x0000 }, /* R1083 */ - { 0x0000, 0x0000 }, /* R1084 */ - { 0x0000, 0x0000 }, /* R1085 */ - { 0x0000, 0x0000 }, /* R1086 */ - { 0x0000, 0x0000 }, /* R1087 */ - { 0xFFFF, 0xFFFF }, /* R1088 - AIF1 DRC1 (1) */ - { 0x1FFF, 0x1FFF }, /* R1089 - AIF1 DRC1 (2) */ - { 0xFFFF, 0xFFFF }, /* R1090 - AIF1 DRC1 (3) */ - { 0x07FF, 0x07FF }, /* R1091 - AIF1 DRC1 (4) */ - { 0x03FF, 0x03FF }, /* R1092 - AIF1 DRC1 (5) */ - { 0x0000, 0x0000 }, /* R1093 */ - { 0x0000, 0x0000 }, /* R1094 */ - { 0x0000, 0x0000 }, /* R1095 */ - { 0x0000, 0x0000 }, /* R1096 */ - { 0x0000, 0x0000 }, /* R1097 */ - { 0x0000, 0x0000 }, /* R1098 */ - { 0x0000, 0x0000 }, /* R1099 */ - { 0x0000, 0x0000 }, /* R1100 */ - { 0x0000, 0x0000 }, /* R1101 */ - { 0x0000, 0x0000 }, /* R1102 */ - { 0x0000, 0x0000 }, /* R1103 */ - { 0xFFFF, 0xFFFF }, /* R1104 - AIF1 DRC2 (1) */ - { 0x1FFF, 0x1FFF }, /* R1105 - AIF1 DRC2 (2) */ - { 0xFFFF, 0xFFFF }, /* R1106 - AIF1 DRC2 (3) */ - { 0x07FF, 0x07FF }, /* R1107 - AIF1 DRC2 (4) */ - { 0x03FF, 0x03FF }, /* R1108 - AIF1 DRC2 (5) */ - { 0x0000, 0x0000 }, /* R1109 */ - { 0x0000, 0x0000 }, /* R1110 */ - { 0x0000, 0x0000 }, /* R1111 */ - { 0x0000, 0x0000 }, /* R1112 */ - { 0x0000, 0x0000 }, /* R1113 */ - { 0x0000, 0x0000 }, /* R1114 */ - { 0x0000, 0x0000 }, /* R1115 */ - { 0x0000, 0x0000 }, /* R1116 */ - { 0x0000, 0x0000 }, /* R1117 */ - { 0x0000, 0x0000 }, /* R1118 */ - { 0x0000, 0x0000 }, /* R1119 */ - { 0x0000, 0x0000 }, /* R1120 */ - { 0x0000, 0x0000 }, /* R1121 */ - { 0x0000, 0x0000 }, /* R1122 */ - { 0x0000, 0x0000 }, /* R1123 */ - { 0x0000, 0x0000 }, /* R1124 */ - { 0x0000, 0x0000 }, /* R1125 */ - { 0x0000, 0x0000 }, /* R1126 */ - { 0x0000, 0x0000 }, /* R1127 */ - { 0x0000, 0x0000 }, /* R1128 */ - { 0x0000, 0x0000 }, /* R1129 */ - { 0x0000, 0x0000 }, /* R1130 */ - { 0x0000, 0x0000 }, /* R1131 */ - { 0x0000, 0x0000 }, /* R1132 */ - { 0x0000, 0x0000 }, /* R1133 */ - { 0x0000, 0x0000 }, /* R1134 */ - { 0x0000, 0x0000 }, /* R1135 */ - { 0x0000, 0x0000 }, /* R1136 */ - { 0x0000, 0x0000 }, /* R1137 */ - { 0x0000, 0x0000 }, /* R1138 */ - { 0x0000, 0x0000 }, /* R1139 */ - { 0x0000, 0x0000 }, /* R1140 */ - { 0x0000, 0x0000 }, /* R1141 */ - { 0x0000, 0x0000 }, /* R1142 */ - { 0x0000, 0x0000 }, /* R1143 */ - { 0x0000, 0x0000 }, /* R1144 */ - { 0x0000, 0x0000 }, /* R1145 */ - { 0x0000, 0x0000 }, /* R1146 */ - { 0x0000, 0x0000 }, /* R1147 */ - { 0x0000, 0x0000 }, /* R1148 */ - { 0x0000, 0x0000 }, /* R1149 */ - { 0x0000, 0x0000 }, /* R1150 */ - { 0x0000, 0x0000 }, /* R1151 */ - { 0xFFFF, 0xFFFF }, /* R1152 - AIF1 DAC1 EQ Gains (1) */ - { 0xFFC0, 0xFFC0 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */ - { 0xFFFF, 0xFFFF }, /* R1154 - AIF1 DAC1 EQ Band 1 A */ - { 0xFFFF, 0xFFFF }, /* R1155 - AIF1 DAC1 EQ Band 1 B */ - { 0xFFFF, 0xFFFF }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */ - { 0xFFFF, 0xFFFF }, /* R1157 - AIF1 DAC1 EQ Band 2 A */ - { 0xFFFF, 0xFFFF }, /* R1158 - AIF1 DAC1 EQ Band 2 B */ - { 0xFFFF, 0xFFFF }, /* R1159 - AIF1 DAC1 EQ Band 2 C */ - { 0xFFFF, 0xFFFF }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */ - { 0xFFFF, 0xFFFF }, /* R1161 - AIF1 DAC1 EQ Band 3 A */ - { 0xFFFF, 0xFFFF }, /* R1162 - AIF1 DAC1 EQ Band 3 B */ - { 0xFFFF, 0xFFFF }, /* R1163 - AIF1 DAC1 EQ Band 3 C */ - { 0xFFFF, 0xFFFF }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */ - { 0xFFFF, 0xFFFF }, /* R1165 - AIF1 DAC1 EQ Band 4 A */ - { 0xFFFF, 0xFFFF }, /* R1166 - AIF1 DAC1 EQ Band 4 B */ - { 0xFFFF, 0xFFFF }, /* R1167 - AIF1 DAC1 EQ Band 4 C */ - { 0xFFFF, 0xFFFF }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */ - { 0xFFFF, 0xFFFF }, /* R1169 - AIF1 DAC1 EQ Band 5 A */ - { 0xFFFF, 0xFFFF }, /* R1170 - AIF1 DAC1 EQ Band 5 B */ - { 0xFFFF, 0xFFFF }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */ - { 0x0000, 0x0000 }, /* R1172 */ - { 0x0000, 0x0000 }, /* R1173 */ - { 0x0000, 0x0000 }, /* R1174 */ - { 0x0000, 0x0000 }, /* R1175 */ - { 0x0000, 0x0000 }, /* R1176 */ - { 0x0000, 0x0000 }, /* R1177 */ - { 0x0000, 0x0000 }, /* R1178 */ - { 0x0000, 0x0000 }, /* R1179 */ - { 0x0000, 0x0000 }, /* R1180 */ - { 0x0000, 0x0000 }, /* R1181 */ - { 0x0000, 0x0000 }, /* R1182 */ - { 0x0000, 0x0000 }, /* R1183 */ - { 0xFFFF, 0xFFFF }, /* R1184 - AIF1 DAC2 EQ Gains (1) */ - { 0xFFC0, 0xFFC0 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */ - { 0xFFFF, 0xFFFF }, /* R1186 - AIF1 DAC2 EQ Band 1 A */ - { 0xFFFF, 0xFFFF }, /* R1187 - AIF1 DAC2 EQ Band 1 B */ - { 0xFFFF, 0xFFFF }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */ - { 0xFFFF, 0xFFFF }, /* R1189 - AIF1 DAC2 EQ Band 2 A */ - { 0xFFFF, 0xFFFF }, /* R1190 - AIF1 DAC2 EQ Band 2 B */ - { 0xFFFF, 0xFFFF }, /* R1191 - AIF1 DAC2 EQ Band 2 C */ - { 0xFFFF, 0xFFFF }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */ - { 0xFFFF, 0xFFFF }, /* R1193 - AIF1 DAC2 EQ Band 3 A */ - { 0xFFFF, 0xFFFF }, /* R1194 - AIF1 DAC2 EQ Band 3 B */ - { 0xFFFF, 0xFFFF }, /* R1195 - AIF1 DAC2 EQ Band 3 C */ - { 0xFFFF, 0xFFFF }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */ - { 0xFFFF, 0xFFFF }, /* R1197 - AIF1 DAC2 EQ Band 4 A */ - { 0xFFFF, 0xFFFF }, /* R1198 - AIF1 DAC2 EQ Band 4 B */ - { 0xFFFF, 0xFFFF }, /* R1199 - AIF1 DAC2 EQ Band 4 C */ - { 0xFFFF, 0xFFFF }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */ - { 0xFFFF, 0xFFFF }, /* R1201 - AIF1 DAC2 EQ Band 5 A */ - { 0xFFFF, 0xFFFF }, /* R1202 - AIF1 DAC2 EQ Band 5 B */ - { 0xFFFF, 0xFFFF }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */ - { 0x0000, 0x0000 }, /* R1204 */ - { 0x0000, 0x0000 }, /* R1205 */ - { 0x0000, 0x0000 }, /* R1206 */ - { 0x0000, 0x0000 }, /* R1207 */ - { 0x0000, 0x0000 }, /* R1208 */ - { 0x0000, 0x0000 }, /* R1209 */ - { 0x0000, 0x0000 }, /* R1210 */ - { 0x0000, 0x0000 }, /* R1211 */ - { 0x0000, 0x0000 }, /* R1212 */ - { 0x0000, 0x0000 }, /* R1213 */ - { 0x0000, 0x0000 }, /* R1214 */ - { 0x0000, 0x0000 }, /* R1215 */ - { 0x0000, 0x0000 }, /* R1216 */ - { 0x0000, 0x0000 }, /* R1217 */ - { 0x0000, 0x0000 }, /* R1218 */ - { 0x0000, 0x0000 }, /* R1219 */ - { 0x0000, 0x0000 }, /* R1220 */ - { 0x0000, 0x0000 }, /* R1221 */ - { 0x0000, 0x0000 }, /* R1222 */ - { 0x0000, 0x0000 }, /* R1223 */ - { 0x0000, 0x0000 }, /* R1224 */ - { 0x0000, 0x0000 }, /* R1225 */ - { 0x0000, 0x0000 }, /* R1226 */ - { 0x0000, 0x0000 }, /* R1227 */ - { 0x0000, 0x0000 }, /* R1228 */ - { 0x0000, 0x0000 }, /* R1229 */ - { 0x0000, 0x0000 }, /* R1230 */ - { 0x0000, 0x0000 }, /* R1231 */ - { 0x0000, 0x0000 }, /* R1232 */ - { 0x0000, 0x0000 }, /* R1233 */ - { 0x0000, 0x0000 }, /* R1234 */ - { 0x0000, 0x0000 }, /* R1235 */ - { 0x0000, 0x0000 }, /* R1236 */ - { 0x0000, 0x0000 }, /* R1237 */ - { 0x0000, 0x0000 }, /* R1238 */ - { 0x0000, 0x0000 }, /* R1239 */ - { 0x0000, 0x0000 }, /* R1240 */ - { 0x0000, 0x0000 }, /* R1241 */ - { 0x0000, 0x0000 }, /* R1242 */ - { 0x0000, 0x0000 }, /* R1243 */ - { 0x0000, 0x0000 }, /* R1244 */ - { 0x0000, 0x0000 }, /* R1245 */ - { 0x0000, 0x0000 }, /* R1246 */ - { 0x0000, 0x0000 }, /* R1247 */ - { 0x0000, 0x0000 }, /* R1248 */ - { 0x0000, 0x0000 }, /* R1249 */ - { 0x0000, 0x0000 }, /* R1250 */ - { 0x0000, 0x0000 }, /* R1251 */ - { 0x0000, 0x0000 }, /* R1252 */ - { 0x0000, 0x0000 }, /* R1253 */ - { 0x0000, 0x0000 }, /* R1254 */ - { 0x0000, 0x0000 }, /* R1255 */ - { 0x0000, 0x0000 }, /* R1256 */ - { 0x0000, 0x0000 }, /* R1257 */ - { 0x0000, 0x0000 }, /* R1258 */ - { 0x0000, 0x0000 }, /* R1259 */ - { 0x0000, 0x0000 }, /* R1260 */ - { 0x0000, 0x0000 }, /* R1261 */ - { 0x0000, 0x0000 }, /* R1262 */ - { 0x0000, 0x0000 }, /* R1263 */ - { 0x0000, 0x0000 }, /* R1264 */ - { 0x0000, 0x0000 }, /* R1265 */ - { 0x0000, 0x0000 }, /* R1266 */ - { 0x0000, 0x0000 }, /* R1267 */ - { 0x0000, 0x0000 }, /* R1268 */ - { 0x0000, 0x0000 }, /* R1269 */ - { 0x0000, 0x0000 }, /* R1270 */ - { 0x0000, 0x0000 }, /* R1271 */ - { 0x0000, 0x0000 }, /* R1272 */ - { 0x0000, 0x0000 }, /* R1273 */ - { 0x0000, 0x0000 }, /* R1274 */ - { 0x0000, 0x0000 }, /* R1275 */ - { 0x0000, 0x0000 }, /* R1276 */ - { 0x0000, 0x0000 }, /* R1277 */ - { 0x0000, 0x0000 }, /* R1278 */ - { 0x0000, 0x0000 }, /* R1279 */ - { 0x00FF, 0x01FF }, /* R1280 - AIF2 ADC Left Volume */ - { 0x00FF, 0x01FF }, /* R1281 - AIF2 ADC Right Volume */ - { 0x00FF, 0x01FF }, /* R1282 - AIF2 DAC Left Volume */ - { 0x00FF, 0x01FF }, /* R1283 - AIF2 DAC Right Volume */ - { 0x0000, 0x0000 }, /* R1284 */ - { 0x0000, 0x0000 }, /* R1285 */ - { 0x0000, 0x0000 }, /* R1286 */ - { 0x0000, 0x0000 }, /* R1287 */ - { 0x0000, 0x0000 }, /* R1288 */ - { 0x0000, 0x0000 }, /* R1289 */ - { 0x0000, 0x0000 }, /* R1290 */ - { 0x0000, 0x0000 }, /* R1291 */ - { 0x0000, 0x0000 }, /* R1292 */ - { 0x0000, 0x0000 }, /* R1293 */ - { 0x0000, 0x0000 }, /* R1294 */ - { 0x0000, 0x0000 }, /* R1295 */ - { 0xF800, 0xF800 }, /* R1296 - AIF2 ADC Filters */ - { 0x0000, 0x0000 }, /* R1297 */ - { 0x0000, 0x0000 }, /* R1298 */ - { 0x0000, 0x0000 }, /* R1299 */ - { 0x0000, 0x0000 }, /* R1300 */ - { 0x0000, 0x0000 }, /* R1301 */ - { 0x0000, 0x0000 }, /* R1302 */ - { 0x0000, 0x0000 }, /* R1303 */ - { 0x0000, 0x0000 }, /* R1304 */ - { 0x0000, 0x0000 }, /* R1305 */ - { 0x0000, 0x0000 }, /* R1306 */ - { 0x0000, 0x0000 }, /* R1307 */ - { 0x0000, 0x0000 }, /* R1308 */ - { 0x0000, 0x0000 }, /* R1309 */ - { 0x0000, 0x0000 }, /* R1310 */ - { 0x0000, 0x0000 }, /* R1311 */ - { 0x02B6, 0x02B6 }, /* R1312 - AIF2 DAC Filters (1) */ - { 0x3F00, 0x3F00 }, /* R1313 - AIF2 DAC Filters (2) */ - { 0x0000, 0x0000 }, /* R1314 */ - { 0x0000, 0x0000 }, /* R1315 */ - { 0x0000, 0x0000 }, /* R1316 */ - { 0x0000, 0x0000 }, /* R1317 */ - { 0x0000, 0x0000 }, /* R1318 */ - { 0x0000, 0x0000 }, /* R1319 */ - { 0x0000, 0x0000 }, /* R1320 */ - { 0x0000, 0x0000 }, /* R1321 */ - { 0x0000, 0x0000 }, /* R1322 */ - { 0x0000, 0x0000 }, /* R1323 */ - { 0x0000, 0x0000 }, /* R1324 */ - { 0x0000, 0x0000 }, /* R1325 */ - { 0x0000, 0x0000 }, /* R1326 */ - { 0x0000, 0x0000 }, /* R1327 */ - { 0x0000, 0x0000 }, /* R1328 */ - { 0x0000, 0x0000 }, /* R1329 */ - { 0x0000, 0x0000 }, /* R1330 */ - { 0x0000, 0x0000 }, /* R1331 */ - { 0x0000, 0x0000 }, /* R1332 */ - { 0x0000, 0x0000 }, /* R1333 */ - { 0x0000, 0x0000 }, /* R1334 */ - { 0x0000, 0x0000 }, /* R1335 */ - { 0x0000, 0x0000 }, /* R1336 */ - { 0x0000, 0x0000 }, /* R1337 */ - { 0x0000, 0x0000 }, /* R1338 */ - { 0x0000, 0x0000 }, /* R1339 */ - { 0x0000, 0x0000 }, /* R1340 */ - { 0x0000, 0x0000 }, /* R1341 */ - { 0x0000, 0x0000 }, /* R1342 */ - { 0x0000, 0x0000 }, /* R1343 */ - { 0xFFFF, 0xFFFF }, /* R1344 - AIF2 DRC (1) */ - { 0x1FFF, 0x1FFF }, /* R1345 - AIF2 DRC (2) */ - { 0xFFFF, 0xFFFF }, /* R1346 - AIF2 DRC (3) */ - { 0x07FF, 0x07FF }, /* R1347 - AIF2 DRC (4) */ - { 0x03FF, 0x03FF }, /* R1348 - AIF2 DRC (5) */ - { 0x0000, 0x0000 }, /* R1349 */ - { 0x0000, 0x0000 }, /* R1350 */ - { 0x0000, 0x0000 }, /* R1351 */ - { 0x0000, 0x0000 }, /* R1352 */ - { 0x0000, 0x0000 }, /* R1353 */ - { 0x0000, 0x0000 }, /* R1354 */ - { 0x0000, 0x0000 }, /* R1355 */ - { 0x0000, 0x0000 }, /* R1356 */ - { 0x0000, 0x0000 }, /* R1357 */ - { 0x0000, 0x0000 }, /* R1358 */ - { 0x0000, 0x0000 }, /* R1359 */ - { 0x0000, 0x0000 }, /* R1360 */ - { 0x0000, 0x0000 }, /* R1361 */ - { 0x0000, 0x0000 }, /* R1362 */ - { 0x0000, 0x0000 }, /* R1363 */ - { 0x0000, 0x0000 }, /* R1364 */ - { 0x0000, 0x0000 }, /* R1365 */ - { 0x0000, 0x0000 }, /* R1366 */ - { 0x0000, 0x0000 }, /* R1367 */ - { 0x0000, 0x0000 }, /* R1368 */ - { 0x0000, 0x0000 }, /* R1369 */ - { 0x0000, 0x0000 }, /* R1370 */ - { 0x0000, 0x0000 }, /* R1371 */ - { 0x0000, 0x0000 }, /* R1372 */ - { 0x0000, 0x0000 }, /* R1373 */ - { 0x0000, 0x0000 }, /* R1374 */ - { 0x0000, 0x0000 }, /* R1375 */ - { 0x0000, 0x0000 }, /* R1376 */ - { 0x0000, 0x0000 }, /* R1377 */ - { 0x0000, 0x0000 }, /* R1378 */ - { 0x0000, 0x0000 }, /* R1379 */ - { 0x0000, 0x0000 }, /* R1380 */ - { 0x0000, 0x0000 }, /* R1381 */ - { 0x0000, 0x0000 }, /* R1382 */ - { 0x0000, 0x0000 }, /* R1383 */ - { 0x0000, 0x0000 }, /* R1384 */ - { 0x0000, 0x0000 }, /* R1385 */ - { 0x0000, 0x0000 }, /* R1386 */ - { 0x0000, 0x0000 }, /* R1387 */ - { 0x0000, 0x0000 }, /* R1388 */ - { 0x0000, 0x0000 }, /* R1389 */ - { 0x0000, 0x0000 }, /* R1390 */ - { 0x0000, 0x0000 }, /* R1391 */ - { 0x0000, 0x0000 }, /* R1392 */ - { 0x0000, 0x0000 }, /* R1393 */ - { 0x0000, 0x0000 }, /* R1394 */ - { 0x0000, 0x0000 }, /* R1395 */ - { 0x0000, 0x0000 }, /* R1396 */ - { 0x0000, 0x0000 }, /* R1397 */ - { 0x0000, 0x0000 }, /* R1398 */ - { 0x0000, 0x0000 }, /* R1399 */ - { 0x0000, 0x0000 }, /* R1400 */ - { 0x0000, 0x0000 }, /* R1401 */ - { 0x0000, 0x0000 }, /* R1402 */ - { 0x0000, 0x0000 }, /* R1403 */ - { 0x0000, 0x0000 }, /* R1404 */ - { 0x0000, 0x0000 }, /* R1405 */ - { 0x0000, 0x0000 }, /* R1406 */ - { 0x0000, 0x0000 }, /* R1407 */ - { 0xFFFF, 0xFFFF }, /* R1408 - AIF2 EQ Gains (1) */ - { 0xFFC0, 0xFFC0 }, /* R1409 - AIF2 EQ Gains (2) */ - { 0xFFFF, 0xFFFF }, /* R1410 - AIF2 EQ Band 1 A */ - { 0xFFFF, 0xFFFF }, /* R1411 - AIF2 EQ Band 1 B */ - { 0xFFFF, 0xFFFF }, /* R1412 - AIF2 EQ Band 1 PG */ - { 0xFFFF, 0xFFFF }, /* R1413 - AIF2 EQ Band 2 A */ - { 0xFFFF, 0xFFFF }, /* R1414 - AIF2 EQ Band 2 B */ - { 0xFFFF, 0xFFFF }, /* R1415 - AIF2 EQ Band 2 C */ - { 0xFFFF, 0xFFFF }, /* R1416 - AIF2 EQ Band 2 PG */ - { 0xFFFF, 0xFFFF }, /* R1417 - AIF2 EQ Band 3 A */ - { 0xFFFF, 0xFFFF }, /* R1418 - AIF2 EQ Band 3 B */ - { 0xFFFF, 0xFFFF }, /* R1419 - AIF2 EQ Band 3 C */ - { 0xFFFF, 0xFFFF }, /* R1420 - AIF2 EQ Band 3 PG */ - { 0xFFFF, 0xFFFF }, /* R1421 - AIF2 EQ Band 4 A */ - { 0xFFFF, 0xFFFF }, /* R1422 - AIF2 EQ Band 4 B */ - { 0xFFFF, 0xFFFF }, /* R1423 - AIF2 EQ Band 4 C */ - { 0xFFFF, 0xFFFF }, /* R1424 - AIF2 EQ Band 4 PG */ - { 0xFFFF, 0xFFFF }, /* R1425 - AIF2 EQ Band 5 A */ - { 0xFFFF, 0xFFFF }, /* R1426 - AIF2 EQ Band 5 B */ - { 0xFFFF, 0xFFFF }, /* R1427 - AIF2 EQ Band 5 PG */ - { 0x0000, 0x0000 }, /* R1428 */ - { 0x0000, 0x0000 }, /* R1429 */ - { 0x0000, 0x0000 }, /* R1430 */ - { 0x0000, 0x0000 }, /* R1431 */ - { 0x0000, 0x0000 }, /* R1432 */ - { 0x0000, 0x0000 }, /* R1433 */ - { 0x0000, 0x0000 }, /* R1434 */ - { 0x0000, 0x0000 }, /* R1435 */ - { 0x0000, 0x0000 }, /* R1436 */ - { 0x0000, 0x0000 }, /* R1437 */ - { 0x0000, 0x0000 }, /* R1438 */ - { 0x0000, 0x0000 }, /* R1439 */ - { 0x0000, 0x0000 }, /* R1440 */ - { 0x0000, 0x0000 }, /* R1441 */ - { 0x0000, 0x0000 }, /* R1442 */ - { 0x0000, 0x0000 }, /* R1443 */ - { 0x0000, 0x0000 }, /* R1444 */ - { 0x0000, 0x0000 }, /* R1445 */ - { 0x0000, 0x0000 }, /* R1446 */ - { 0x0000, 0x0000 }, /* R1447 */ - { 0x0000, 0x0000 }, /* R1448 */ - { 0x0000, 0x0000 }, /* R1449 */ - { 0x0000, 0x0000 }, /* R1450 */ - { 0x0000, 0x0000 }, /* R1451 */ - { 0x0000, 0x0000 }, /* R1452 */ - { 0x0000, 0x0000 }, /* R1453 */ - { 0x0000, 0x0000 }, /* R1454 */ - { 0x0000, 0x0000 }, /* R1455 */ - { 0x0000, 0x0000 }, /* R1456 */ - { 0x0000, 0x0000 }, /* R1457 */ - { 0x0000, 0x0000 }, /* R1458 */ - { 0x0000, 0x0000 }, /* R1459 */ - { 0x0000, 0x0000 }, /* R1460 */ - { 0x0000, 0x0000 }, /* R1461 */ - { 0x0000, 0x0000 }, /* R1462 */ - { 0x0000, 0x0000 }, /* R1463 */ - { 0x0000, 0x0000 }, /* R1464 */ - { 0x0000, 0x0000 }, /* R1465 */ - { 0x0000, 0x0000 }, /* R1466 */ - { 0x0000, 0x0000 }, /* R1467 */ - { 0x0000, 0x0000 }, /* R1468 */ - { 0x0000, 0x0000 }, /* R1469 */ - { 0x0000, 0x0000 }, /* R1470 */ - { 0x0000, 0x0000 }, /* R1471 */ - { 0x0000, 0x0000 }, /* R1472 */ - { 0x0000, 0x0000 }, /* R1473 */ - { 0x0000, 0x0000 }, /* R1474 */ - { 0x0000, 0x0000 }, /* R1475 */ - { 0x0000, 0x0000 }, /* R1476 */ - { 0x0000, 0x0000 }, /* R1477 */ - { 0x0000, 0x0000 }, /* R1478 */ - { 0x0000, 0x0000 }, /* R1479 */ - { 0x0000, 0x0000 }, /* R1480 */ - { 0x0000, 0x0000 }, /* R1481 */ - { 0x0000, 0x0000 }, /* R1482 */ - { 0x0000, 0x0000 }, /* R1483 */ - { 0x0000, 0x0000 }, /* R1484 */ - { 0x0000, 0x0000 }, /* R1485 */ - { 0x0000, 0x0000 }, /* R1486 */ - { 0x0000, 0x0000 }, /* R1487 */ - { 0x0000, 0x0000 }, /* R1488 */ - { 0x0000, 0x0000 }, /* R1489 */ - { 0x0000, 0x0000 }, /* R1490 */ - { 0x0000, 0x0000 }, /* R1491 */ - { 0x0000, 0x0000 }, /* R1492 */ - { 0x0000, 0x0000 }, /* R1493 */ - { 0x0000, 0x0000 }, /* R1494 */ - { 0x0000, 0x0000 }, /* R1495 */ - { 0x0000, 0x0000 }, /* R1496 */ - { 0x0000, 0x0000 }, /* R1497 */ - { 0x0000, 0x0000 }, /* R1498 */ - { 0x0000, 0x0000 }, /* R1499 */ - { 0x0000, 0x0000 }, /* R1500 */ - { 0x0000, 0x0000 }, /* R1501 */ - { 0x0000, 0x0000 }, /* R1502 */ - { 0x0000, 0x0000 }, /* R1503 */ - { 0x0000, 0x0000 }, /* R1504 */ - { 0x0000, 0x0000 }, /* R1505 */ - { 0x0000, 0x0000 }, /* R1506 */ - { 0x0000, 0x0000 }, /* R1507 */ - { 0x0000, 0x0000 }, /* R1508 */ - { 0x0000, 0x0000 }, /* R1509 */ - { 0x0000, 0x0000 }, /* R1510 */ - { 0x0000, 0x0000 }, /* R1511 */ - { 0x0000, 0x0000 }, /* R1512 */ - { 0x0000, 0x0000 }, /* R1513 */ - { 0x0000, 0x0000 }, /* R1514 */ - { 0x0000, 0x0000 }, /* R1515 */ - { 0x0000, 0x0000 }, /* R1516 */ - { 0x0000, 0x0000 }, /* R1517 */ - { 0x0000, 0x0000 }, /* R1518 */ - { 0x0000, 0x0000 }, /* R1519 */ - { 0x0000, 0x0000 }, /* R1520 */ - { 0x0000, 0x0000 }, /* R1521 */ - { 0x0000, 0x0000 }, /* R1522 */ - { 0x0000, 0x0000 }, /* R1523 */ - { 0x0000, 0x0000 }, /* R1524 */ - { 0x0000, 0x0000 }, /* R1525 */ - { 0x0000, 0x0000 }, /* R1526 */ - { 0x0000, 0x0000 }, /* R1527 */ - { 0x0000, 0x0000 }, /* R1528 */ - { 0x0000, 0x0000 }, /* R1529 */ - { 0x0000, 0x0000 }, /* R1530 */ - { 0x0000, 0x0000 }, /* R1531 */ - { 0x0000, 0x0000 }, /* R1532 */ - { 0x0000, 0x0000 }, /* R1533 */ - { 0x0000, 0x0000 }, /* R1534 */ - { 0x0000, 0x0000 }, /* R1535 */ - { 0x01EF, 0x01EF }, /* R1536 - DAC1 Mixer Volumes */ - { 0x0037, 0x0037 }, /* R1537 - DAC1 Left Mixer Routing */ - { 0x0037, 0x0037 }, /* R1538 - DAC1 Right Mixer Routing */ - { 0x01EF, 0x01EF }, /* R1539 - DAC2 Mixer Volumes */ - { 0x0037, 0x0037 }, /* R1540 - DAC2 Left Mixer Routing */ - { 0x0037, 0x0037 }, /* R1541 - DAC2 Right Mixer Routing */ - { 0x0003, 0x0003 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */ - { 0x0003, 0x0003 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */ - { 0x0003, 0x0003 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */ - { 0x0003, 0x0003 }, /* R1545 - AIF1 ADC2 Right mixer Routing */ - { 0x0000, 0x0000 }, /* R1546 */ - { 0x0000, 0x0000 }, /* R1547 */ - { 0x0000, 0x0000 }, /* R1548 */ - { 0x0000, 0x0000 }, /* R1549 */ - { 0x0000, 0x0000 }, /* R1550 */ - { 0x0000, 0x0000 }, /* R1551 */ - { 0x02FF, 0x03FF }, /* R1552 - DAC1 Left Volume */ - { 0x02FF, 0x03FF }, /* R1553 - DAC1 Right Volume */ - { 0x02FF, 0x03FF }, /* R1554 - DAC2 Left Volume */ - { 0x02FF, 0x03FF }, /* R1555 - DAC2 Right Volume */ - { 0x0003, 0x0003 }, /* R1556 - DAC Softmute */ - { 0x0000, 0x0000 }, /* R1557 */ - { 0x0000, 0x0000 }, /* R1558 */ - { 0x0000, 0x0000 }, /* R1559 */ - { 0x0000, 0x0000 }, /* R1560 */ - { 0x0000, 0x0000 }, /* R1561 */ - { 0x0000, 0x0000 }, /* R1562 */ - { 0x0000, 0x0000 }, /* R1563 */ - { 0x0000, 0x0000 }, /* R1564 */ - { 0x0000, 0x0000 }, /* R1565 */ - { 0x0000, 0x0000 }, /* R1566 */ - { 0x0000, 0x0000 }, /* R1567 */ - { 0x0003, 0x0003 }, /* R1568 - Oversampling */ - { 0x03C3, 0x03C3 }, /* R1569 - Sidetone */ -}; - static int wm8994_readable(unsigned int reg) { switch (reg) { @@ -1696,14 +131,14 @@ static int wm8994_readable(unsigned int reg) break; } - if (reg >= ARRAY_SIZE(access_masks)) + if (reg >= WM8994_CACHE_SIZE) return 0; - return access_masks[reg].readable != 0; + return wm8994_access_masks[reg].readable != 0; } static int wm8994_volatile(unsigned int reg) { - if (reg >= WM8994_REG_CACHE_SIZE) + if (reg >= WM8994_CACHE_SIZE) return 1; switch (reg) { @@ -1714,6 +149,8 @@ static int wm8994_volatile(unsigned int reg) case WM8994_RATE_STATUS: case WM8994_LDO_1: case WM8994_LDO_2: + case WM8958_DSP2_EXECCONTROL: + case WM8958_MIC_DETECT_3: return 1; default: return 0; @@ -1723,14 +160,16 @@ static int wm8994_volatile(unsigned int reg) static int wm8994_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + int ret; BUG_ON(reg > WM8994_MAX_REGISTER); - if (!wm8994_volatile(reg)) - wm8994->reg_cache[reg] = value; - - dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value); + if (!wm8994_volatile(reg)) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret != 0) + dev_err(codec->dev, "Cache write to %x failed: %d\n", + reg, ret); + } return wm8994_reg_write(codec->control_data, reg, value); } @@ -1738,14 +177,22 @@ static int wm8994_write(struct snd_soc_codec *codec, unsigned int reg, static unsigned int wm8994_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *reg_cache = codec->reg_cache; + unsigned int val; + int ret; BUG_ON(reg > WM8994_MAX_REGISTER); - if (wm8994_volatile(reg)) - return wm8994_reg_read(codec->control_data, reg); - else - return reg_cache[reg]; + if (!wm8994_volatile(reg) && wm8994_readable(reg) && + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_read(codec, reg, &val); + if (ret >= 0) + return val; + else + dev_err(codec->dev, "Cache read from %x failed: %d\n", + reg, ret); + } + + return wm8994_reg_read(codec->control_data, reg); } static int configure_aif_clock(struct snd_soc_codec *codec, int aif) @@ -1837,7 +284,7 @@ static int configure_clock(struct snd_soc_codec *codec) snd_soc_update_bits(codec, WM8994_CLOCKING_1, WM8994_SYSCLK_SRC, new); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(&codec->dapm); return 0; } @@ -1864,6 +311,19 @@ static const char *sidetone_hpf_text[] = { static const struct soc_enum sidetone_hpf = SOC_ENUM_SINGLE(WM8994_SIDETONE, 7, 7, sidetone_hpf_text); +static const char *adc_hpf_text[] = { + "HiFi", "Voice 1", "Voice 2", "Voice 3" +}; + +static const struct soc_enum aif1adc1_hpf = + SOC_ENUM_SINGLE(WM8994_AIF1_ADC1_FILTERS, 13, 4, adc_hpf_text); + +static const struct soc_enum aif1adc2_hpf = + SOC_ENUM_SINGLE(WM8994_AIF1_ADC2_FILTERS, 13, 4, adc_hpf_text); + +static const struct soc_enum aif2adc_hpf = + SOC_ENUM_SINGLE(WM8994_AIF2_ADC_FILTERS, 13, 4, adc_hpf_text); + static const DECLARE_TLV_DB_SCALE(aif_tlv, 0, 600, 0); static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0); @@ -2071,21 +531,252 @@ static int wm8994_get_retune_mobile_enum(struct snd_kcontrol *kcontrol, return 0; } -static const char *aifdac_src_text[] = { +static const char *aif_chan_src_text[] = { "Left", "Right" }; +static const struct soc_enum aif1adcl_src = + SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 15, 2, aif_chan_src_text); + +static const struct soc_enum aif1adcr_src = + SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 14, 2, aif_chan_src_text); + +static const struct soc_enum aif2adcl_src = + SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 15, 2, aif_chan_src_text); + +static const struct soc_enum aif2adcr_src = + SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 14, 2, aif_chan_src_text); + static const struct soc_enum aif1dacl_src = - SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aifdac_src_text); + SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aif_chan_src_text); static const struct soc_enum aif1dacr_src = - SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aifdac_src_text); + SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aif_chan_src_text); static const struct soc_enum aif2dacl_src = - SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aifdac_src_text); + SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aif_chan_src_text); static const struct soc_enum aif2dacr_src = - SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aifdac_src_text); + SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aif_chan_src_text); + +static const char *osr_text[] = { + "Low Power", "High Performance", +}; + +static const struct soc_enum dac_osr = + SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 0, 2, osr_text); + +static const struct soc_enum adc_osr = + SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 1, 2, osr_text); + +static void wm8958_mbc_apply(struct snd_soc_codec *codec, int mbc, int start) +{ + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994_pdata *pdata = wm8994->pdata; + int pwr_reg = snd_soc_read(codec, WM8994_POWER_MANAGEMENT_5); + int ena, reg, aif, i; + + switch (mbc) { + case 0: + pwr_reg &= (WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA); + aif = 0; + break; + case 1: + pwr_reg &= (WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA); + aif = 0; + break; + case 2: + pwr_reg &= (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA); + aif = 1; + break; + default: + BUG(); + return; + } + + /* We can only enable the MBC if the AIF is enabled and we + * want it to be enabled. */ + ena = pwr_reg && wm8994->mbc_ena[mbc]; + + reg = snd_soc_read(codec, WM8958_DSP2_PROGRAM); + + dev_dbg(codec->dev, "MBC %d startup: %d, power: %x, DSP: %x\n", + mbc, start, pwr_reg, reg); + + if (start && ena) { + /* If the DSP is already running then noop */ + if (reg & WM8958_DSP2_ENA) + return; + + /* Switch the clock over to the appropriate AIF */ + snd_soc_update_bits(codec, WM8994_CLOCKING_1, + WM8958_DSP2CLK_SRC | WM8958_DSP2CLK_ENA, + aif << WM8958_DSP2CLK_SRC_SHIFT | + WM8958_DSP2CLK_ENA); + + snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM, + WM8958_DSP2_ENA, WM8958_DSP2_ENA); + + /* If we've got user supplied MBC settings use them */ + if (pdata && pdata->num_mbc_cfgs) { + struct wm8958_mbc_cfg *cfg + = &pdata->mbc_cfgs[wm8994->mbc_cfg]; + + for (i = 0; i < ARRAY_SIZE(cfg->coeff_regs); i++) + snd_soc_write(codec, i + WM8958_MBC_BAND_1_K_1, + cfg->coeff_regs[i]); + + for (i = 0; i < ARRAY_SIZE(cfg->cutoff_regs); i++) + snd_soc_write(codec, + i + WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1, + cfg->cutoff_regs[i]); + } + + /* Run the DSP */ + snd_soc_write(codec, WM8958_DSP2_EXECCONTROL, + WM8958_DSP2_RUNR); + + /* And we're off! */ + snd_soc_update_bits(codec, WM8958_DSP2_CONFIG, + WM8958_MBC_ENA | WM8958_MBC_SEL_MASK, + mbc << WM8958_MBC_SEL_SHIFT | + WM8958_MBC_ENA); + } else { + /* If the DSP is already stopped then noop */ + if (!(reg & WM8958_DSP2_ENA)) + return; + + snd_soc_update_bits(codec, WM8958_DSP2_CONFIG, + WM8958_MBC_ENA, 0); + snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM, + WM8958_DSP2_ENA, 0); + snd_soc_update_bits(codec, WM8994_CLOCKING_1, + WM8958_DSP2CLK_ENA, 0); + } +} + +static int wm8958_aif_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + int mbc; + + switch (w->shift) { + case 13: + case 12: + mbc = 2; + break; + case 11: + case 10: + mbc = 1; + break; + case 9: + case 8: + mbc = 0; + break; + default: + BUG(); + return -EINVAL; + } + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + wm8958_mbc_apply(codec, mbc, 1); + break; + case SND_SOC_DAPM_POST_PMD: + wm8958_mbc_apply(codec, mbc, 0); + break; + } + + return 0; +} + +static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994_pdata *pdata = wm8994->pdata; + int value = ucontrol->value.integer.value[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ + reg = snd_soc_read(codec, WM8994_CLOCKING_1); + if (reg < 0 || reg & WM8958_DSP2CLK_ENA) + return -EBUSY; + + if (value >= pdata->num_mbc_cfgs) + return -EINVAL; + + wm8994->mbc_cfg = value; + + return 0; +} + +static int wm8958_get_mbc_enum(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = wm8994->mbc_cfg; + + return 0; +} + +static int wm8958_mbc_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; + uinfo->count = 1; + uinfo->value.integer.min = 0; + uinfo->value.integer.max = 1; + return 0; +} + +static int wm8958_mbc_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int mbc = kcontrol->private_value; + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + ucontrol->value.integer.value[0] = wm8994->mbc_ena[mbc]; + + return 0; +} + +static int wm8958_mbc_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int mbc = kcontrol->private_value; + int i; + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + if (ucontrol->value.integer.value[0] > 1) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(wm8994->mbc_ena); i++) { + if (mbc != i && wm8994->mbc_ena[i]) { + dev_dbg(codec->dev, "MBC %d active already\n", mbc); + return -EBUSY; + } + } + + wm8994->mbc_ena[mbc] = ucontrol->value.integer.value[0]; + + wm8958_mbc_apply(codec, mbc, wm8994->mbc_ena[mbc]); + + return 0; +} + +#define WM8958_MBC_SWITCH(xname, xval) {\ + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ + .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\ + .info = wm8958_mbc_info, \ + .get = wm8958_mbc_get, .put = wm8958_mbc_put, \ + .private_value = xval } static const struct snd_kcontrol_new wm8994_snd_controls[] = { SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME, @@ -2098,10 +789,15 @@ SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2_ADC_RIGHT_VOLUME, 1, 119, 0, digital_tlv), +SOC_ENUM("AIF1ADCL Source", aif1adcl_src), +SOC_ENUM("AIF1ADCR Source", aif1adcr_src), +SOC_ENUM("AIF2ADCL Source", aif2adcl_src), +SOC_ENUM("AIF2ADCR Source", aif2adcr_src), + SOC_ENUM("AIF1DACL Source", aif1dacl_src), SOC_ENUM("AIF1DACR Source", aif1dacr_src), -SOC_ENUM("AIF2DACL Source", aif1dacl_src), -SOC_ENUM("AIF2DACR Source", aif1dacr_src), +SOC_ENUM("AIF2DACL Source", aif2dacl_src), +SOC_ENUM("AIF2DACR Source", aif2dacr_src), SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv), @@ -2140,6 +836,18 @@ SOC_SINGLE_TLV("DAC2 Left Sidetone Volume", WM8994_DAC2_MIXER_VOLUMES, SOC_ENUM("Sidetone HPF Mux", sidetone_hpf), SOC_SINGLE("Sidetone HPF Switch", WM8994_SIDETONE, 6, 1, 0), +SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf), +SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0), + +SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf), +SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0), + +SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf), +SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0), + +SOC_ENUM("ADC OSR", adc_osr), +SOC_ENUM("DAC OSR", dac_osr), + SOC_DOUBLE_R_TLV("DAC1 Volume", WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv), SOC_DOUBLE_R("DAC1 Switch", WM8994_DAC1_LEFT_VOLUME, @@ -2162,15 +870,15 @@ SOC_SINGLE_TLV("SPKR DAC1 Volume", WM8994_SPKMIXR_ATTENUATION, SOC_SINGLE_TLV("AIF1DAC1 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2, 10, 15, 0, wm8994_3d_tlv), -SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2, +SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC1_FILTERS_2, 8, 1, 0), SOC_SINGLE_TLV("AIF1DAC2 3D Stereo Volume", WM8994_AIF1_DAC2_FILTERS_2, 10, 15, 0, wm8994_3d_tlv), SOC_SINGLE("AIF1DAC2 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2, 8, 1, 0), -SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2, +SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF2_DAC_FILTERS_2, 10, 15, 0, wm8994_3d_tlv), -SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2, +SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2, 8, 1, 0), }; @@ -2209,6 +917,13 @@ SOC_SINGLE_TLV("AIF2 EQ5 Volume", WM8994_AIF2_EQ_GAINS_2, 6, 31, 0, eq_tlv), }; +static const struct snd_kcontrol_new wm8958_snd_controls[] = { +SOC_SINGLE_TLV("AIF3 Boost Volume", WM8958_AIF3_CONTROL_2, 10, 3, 0, aif_tlv), +WM8958_MBC_SWITCH("AIF1DAC1 MBC Switch", 0), +WM8958_MBC_SWITCH("AIF1DAC2 MBC Switch", 1), +WM8958_MBC_SWITCH("AIF2DAC MBC Switch", 2), +}; + static int clk_sys_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -2228,6 +943,7 @@ static int clk_sys_event(struct snd_soc_dapm_widget *w, static void wm8994_update_class_w(struct snd_soc_codec *codec) { + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); int enable = 1; int source = 0; /* GCC flow analysis can't track enable */ int reg, reg_r; @@ -2278,11 +994,13 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec) WM8994_CP_DYN_PWR | WM8994_CP_DYN_SRC_SEL_MASK, source | WM8994_CP_DYN_PWR); + wm8994->hubs.class_w = true; } else { dev_dbg(codec->dev, "Class W disabled\n"); snd_soc_update_bits(codec, WM8994_CLASS_W_1, WM8994_CP_DYN_PWR, 0); + wm8994->hubs.class_w = false; } } @@ -2512,14 +1230,47 @@ static const struct snd_kcontrol_new aif2adc_mux = SOC_DAPM_ENUM("AIF2ADC Mux", aif2adc_enum); static const char *aif3adc_text[] = { - "AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", + "AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", "Mono PCM", }; -static const struct soc_enum aif3adc_enum = +static const struct soc_enum wm8994_aif3adc_enum = SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 3, aif3adc_text); -static const struct snd_kcontrol_new aif3adc_mux = - SOC_DAPM_ENUM("AIF3ADC Mux", aif3adc_enum); +static const struct snd_kcontrol_new wm8994_aif3adc_mux = + SOC_DAPM_ENUM("AIF3ADC Mux", wm8994_aif3adc_enum); + +static const struct soc_enum wm8958_aif3adc_enum = + SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 4, aif3adc_text); + +static const struct snd_kcontrol_new wm8958_aif3adc_mux = + SOC_DAPM_ENUM("AIF3ADC Mux", wm8958_aif3adc_enum); + +static const char *mono_pcm_out_text[] = { + "None", "AIF2ADCL", "AIF2ADCR", +}; + +static const struct soc_enum mono_pcm_out_enum = + SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 9, 3, mono_pcm_out_text); + +static const struct snd_kcontrol_new mono_pcm_out_mux = + SOC_DAPM_ENUM("Mono PCM Out Mux", mono_pcm_out_enum); + +static const char *aif2dac_src_text[] = { + "AIF2", "AIF3", +}; + +/* Note that these two control shouldn't be simultaneously switched to AIF3 */ +static const struct soc_enum aif2dacl_src_enum = + SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 7, 2, aif2dac_src_text); + +static const struct snd_kcontrol_new aif2dacl_src_mux = + SOC_DAPM_ENUM("AIF2DACL Mux", aif2dacl_src_enum); + +static const struct soc_enum aif2dacr_src_enum = + SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 8, 2, aif2dac_src_text); + +static const struct snd_kcontrol_new aif2dacr_src_mux = + SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { SND_SOC_DAPM_INPUT("DMIC1DAT"), @@ -2540,19 +1291,23 @@ SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", 0, WM8994_POWER_MANAGEMENT_4, 9, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", 0, WM8994_POWER_MANAGEMENT_4, 8, 0), -SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 9, 0), -SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 8, 0), +SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0, + WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), +SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0, + WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture", 0, WM8994_POWER_MANAGEMENT_4, 11, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture", 0, WM8994_POWER_MANAGEMENT_4, 10, 0), -SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 11, 0), -SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 10, 0), +SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0, + WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), +SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0, + WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0, aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)), @@ -2581,10 +1336,12 @@ SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0, WM8994_POWER_MANAGEMENT_4, 13, 0), SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0, WM8994_POWER_MANAGEMENT_4, 12, 0), -SND_SOC_DAPM_AIF_IN("AIF2DACL", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 13, 0), -SND_SOC_DAPM_AIF_IN("AIF2DACR", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 12, 0), +SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0, + WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), +SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0, + WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0), @@ -2593,7 +1350,6 @@ SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux), SND_SOC_DAPM_MUX("AIF2DAC Mux", SND_SOC_NOPM, 0, 0, &aif2dac_mux), SND_SOC_DAPM_MUX("AIF2ADC Mux", SND_SOC_NOPM, 0, 0, &aif2adc_mux), -SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &aif3adc_mux), SND_SOC_DAPM_AIF_IN("AIF3DACDAT", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0), @@ -2631,8 +1387,18 @@ SND_SOC_DAPM_MIXER("SPKR", WM8994_POWER_MANAGEMENT_3, 9, 0, SND_SOC_DAPM_POST("Debug log", post_ev), }; -static const struct snd_soc_dapm_route intercon[] = { +static const struct snd_soc_dapm_widget wm8994_specific_dapm_widgets[] = { +SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8994_aif3adc_mux), +}; + +static const struct snd_soc_dapm_widget wm8958_dapm_widgets[] = { +SND_SOC_DAPM_MUX("Mono PCM Out Mux", SND_SOC_NOPM, 0, 0, &mono_pcm_out_mux), +SND_SOC_DAPM_MUX("AIF2DACL Mux", SND_SOC_NOPM, 0, 0, &aif2dacl_src_mux), +SND_SOC_DAPM_MUX("AIF2DACR Mux", SND_SOC_NOPM, 0, 0, &aif2dacr_src_mux), +SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8958_aif3adc_mux), +}; +static const struct snd_soc_dapm_route intercon[] = { { "CLK_SYS", NULL, "AIF1CLK", check_clk_sys }, { "CLK_SYS", NULL, "AIF2CLK", check_clk_sys }, @@ -2740,9 +1506,6 @@ static const struct snd_soc_dapm_route intercon[] = { { "AIF1DAC2L", NULL, "AIF1DAC Mux" }, { "AIF1DAC2R", NULL, "AIF1DAC Mux" }, - { "AIF2DACL", NULL, "AIF2DAC Mux" }, - { "AIF2DACR", NULL, "AIF2DAC Mux" }, - { "AIF1DAC Mux", "AIF1DACDAT", "AIF1DACDAT" }, { "AIF1DAC Mux", "AIF3DACDAT", "AIF3DACDAT" }, { "AIF2DAC Mux", "AIF2DACDAT", "AIF2DACDAT" }, @@ -2815,6 +1578,26 @@ static const struct snd_soc_dapm_route intercon[] = { { "Right Headphone Mux", "DAC", "DAC1R" }, }; +static const struct snd_soc_dapm_route wm8994_intercon[] = { + { "AIF2DACL", NULL, "AIF2DAC Mux" }, + { "AIF2DACR", NULL, "AIF2DAC Mux" }, +}; + +static const struct snd_soc_dapm_route wm8958_intercon[] = { + { "AIF2DACL", NULL, "AIF2DACL Mux" }, + { "AIF2DACR", NULL, "AIF2DACR Mux" }, + + { "AIF2DACL Mux", "AIF2", "AIF2DAC Mux" }, + { "AIF2DACL Mux", "AIF3", "AIF3DACDAT" }, + { "AIF2DACR Mux", "AIF2", "AIF2DAC Mux" }, + { "AIF2DACR Mux", "AIF3", "AIF3DACDAT" }, + + { "Mono PCM Out Mux", "AIF2ADCL", "AIF2ADCL" }, + { "Mono PCM Out Mux", "AIF2ADCR", "AIF2ADCR" }, + + { "AIF3ADC Mux", "Mono PCM", "Mono PCM Out Mux" }, +}; + /* The size in bits of the FLL divide multiplied by 10 * to allow rounding later */ #define FIXED_FLL_SIZE ((1 << 16) * 10) @@ -2930,6 +1713,7 @@ static int _wm8994_set_fll(struct snd_soc_codec *codec, int id, int src, /* Allow no source specification when stopping */ if (freq_out) return -EINVAL; + src = wm8994->fll[id].src; break; case WM8994_FLL_SRC_MCLK1: case WM8994_FLL_SRC_MCLK2: @@ -3094,6 +1878,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai, static int wm8994_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { + struct wm8994 *control = codec->control_data; struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); switch (level) { @@ -3107,16 +1892,36 @@ static int wm8994_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { - /* Tweak DC servo and DSP configuration for - * improved performance. */ - if (wm8994->revision < 4) { - /* Tweak DC servo and DSP configuration for - * improved performance. */ - snd_soc_write(codec, 0x102, 0x3); - snd_soc_write(codec, 0x56, 0x3); - snd_soc_write(codec, 0x817, 0); - snd_soc_write(codec, 0x102, 0); + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { + pm_runtime_get_sync(codec->dev); + + switch (control->type) { + case WM8994: + if (wm8994->revision < 4) { + /* Tweak DC servo and DSP + * configuration for improved + * performance. */ + snd_soc_write(codec, 0x102, 0x3); + snd_soc_write(codec, 0x56, 0x3); + snd_soc_write(codec, 0x817, 0); + snd_soc_write(codec, 0x102, 0); + } + break; + + case WM8958: + if (wm8994->revision == 0) { + /* Optimise performance for rev A */ + snd_soc_write(codec, 0x102, 0x3); + snd_soc_write(codec, 0xcb, 0x81); + snd_soc_write(codec, 0x817, 0); + snd_soc_write(codec, 0x102, 0); + + snd_soc_update_bits(codec, + WM8958_CHARGE_PUMP_2, + WM8958_CP_DISCH, + WM8958_CP_DISCH); + } + break; } /* Discharge LINEOUT1 & 2 */ @@ -3151,7 +1956,7 @@ static int wm8994_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_OFF: - if (codec->bias_level == SND_SOC_BIAS_STANDBY) { + if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) { /* Switch over to startup biases */ snd_soc_update_bits(codec, WM8994_ANTIPOP_2, WM8994_BIAS_SRC | @@ -3183,16 +1988,19 @@ static int wm8994_set_bias_level(struct snd_soc_codec *codec, WM8994_STARTUP_BIAS_ENA | WM8994_VMID_BUF_ENA | WM8994_VMID_RAMP_MASK, 0); + + pm_runtime_put(codec->dev); } break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } static int wm8994_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; + struct wm8994 *control = codec->control_data; int ms_reg; int aif1_reg; int ms = 0; @@ -3277,6 +2085,13 @@ static int wm8994_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) return -EINVAL; } + /* The AIF2 format configuration needs to be mirrored to AIF3 + * on WM8958 if it's in use so just do it all the time. */ + if (control->type == WM8958 && dai->id == 2) + snd_soc_update_bits(codec, WM8958_AIF3_CONTROL_1, + WM8994_AIF1_LRCLK_INV | + WM8958_AIF3_FMT_MASK, aif1); + snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_BCLK_INV | WM8994_AIF1_LRCLK_INV | WM8994_AIF1_FMT_MASK, @@ -3317,12 +2132,15 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; + struct wm8994 *control = codec->control_data; struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); int aif1_reg; + int aif2_reg; int bclk_reg; int lrclk_reg; int rate_reg; int aif1 = 0; + int aif2 = 0; int bclk = 0; int lrclk = 0; int rate_val = 0; @@ -3333,6 +2151,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, switch (dai->id) { case 1: aif1_reg = WM8994_AIF1_CONTROL_1; + aif2_reg = WM8994_AIF1_CONTROL_2; bclk_reg = WM8994_AIF1_BCLK; rate_reg = WM8994_AIF1_RATE; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || @@ -3345,6 +2164,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, break; case 2: aif1_reg = WM8994_AIF2_CONTROL_1; + aif2_reg = WM8994_AIF2_CONTROL_2; bclk_reg = WM8994_AIF2_BCLK; rate_reg = WM8994_AIF2_RATE; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || @@ -3355,6 +2175,14 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, dev_dbg(codec->dev, "AIF2 using split LRCLK\n"); } break; + case 3: + switch (control->type) { + case WM8958: + aif1_reg = WM8958_AIF3_CONTROL_1; + break; + default: + return 0; + } default: return -EINVAL; } @@ -3392,6 +2220,10 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n", dai->id, wm8994->aifclk[id], bclk_rate); + if (params_channels(params) == 1 && + (snd_soc_read(codec, aif1_reg) & 0x18) == 0x18) + aif2 |= WM8994_AIF1_MONO; + if (wm8994->aifclk[id] == 0) { dev_err(dai->dev, "AIF%dCLK not configured\n", dai->id); return -EINVAL; @@ -3435,6 +2267,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, lrclk, bclk_rate / lrclk); snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1); + snd_soc_update_bits(codec, aif2_reg, WM8994_AIF1_MONO, aif2); snd_soc_update_bits(codec, bclk_reg, WM8994_AIF1_BCLK_DIV_MASK, bclk); snd_soc_update_bits(codec, lrclk_reg, WM8994_AIF1DAC_RATE_MASK, lrclk); @@ -3458,6 +2291,47 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, return 0; } +static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec = dai->codec; + struct wm8994 *control = codec->control_data; + int aif1_reg; + int aif1 = 0; + + switch (dai->id) { + case 3: + switch (control->type) { + case WM8958: + aif1_reg = WM8958_AIF3_CONTROL_1; + break; + default: + return 0; + } + default: + return 0; + } + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + break; + case SNDRV_PCM_FORMAT_S20_3LE: + aif1 |= 0x20; + break; + case SNDRV_PCM_FORMAT_S24_LE: + aif1 |= 0x40; + break; + case SNDRV_PCM_FORMAT_S32_LE: + aif1 |= 0x60; + break; + default: + return -EINVAL; + } + + return snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1); +} + static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; @@ -3539,6 +2413,7 @@ static struct snd_soc_dai_ops wm8994_aif2_dai_ops = { }; static struct snd_soc_dai_ops wm8994_aif3_dai_ops = { + .hw_params = wm8994_aif3_hw_params, .set_tristate = wm8994_set_tristate, }; @@ -3548,14 +2423,14 @@ static struct snd_soc_dai_driver wm8994_dai[] = { .id = 1, .playback = { .stream_name = "AIF1 Playback", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = WM8994_RATES, .formats = WM8994_FORMATS, }, .capture = { .stream_name = "AIF1 Capture", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = WM8994_RATES, .formats = WM8994_FORMATS, @@ -3567,14 +2442,14 @@ static struct snd_soc_dai_driver wm8994_dai[] = { .id = 2, .playback = { .stream_name = "AIF2 Playback", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = WM8994_RATES, .formats = WM8994_FORMATS, }, .capture = { .stream_name = "AIF2 Capture", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = WM8994_RATES, .formats = WM8994_FORMATS, @@ -3586,14 +2461,14 @@ static struct snd_soc_dai_driver wm8994_dai[] = { .id = 3, .playback = { .stream_name = "AIF3 Playback", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = WM8994_RATES, .formats = WM8994_FORMATS, }, .capture = { .stream_name = "AIF3 Capture", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = WM8994_RATES, .formats = WM8994_FORMATS, @@ -3625,26 +2500,12 @@ static int wm8994_suspend(struct snd_soc_codec *codec, pm_message_t state) static int wm8994_resume(struct snd_soc_codec *codec) { struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); - u16 *reg_cache = codec->reg_cache; int i, ret; /* Restore the registers */ - for (i = 1; i < ARRAY_SIZE(wm8994->reg_cache); i++) { - switch (i) { - case WM8994_LDO_1: - case WM8994_LDO_2: - case WM8994_SOFTWARE_RESET: - /* Handled by other MFD drivers */ - continue; - default: - break; - } - - if (!access_masks[i].writable) - continue; - - wm8994_reg_write(codec->control_data, i, reg_cache[i]); - } + ret = snd_soc_cache_sync(codec); + if (ret != 0) + dev_err(codec->dev, "Failed to sync cache: %d\n", ret); wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY); @@ -3794,6 +2655,34 @@ static void wm8994_handle_pdata(struct wm8994_priv *wm8994) dev_dbg(codec->dev, "%d ReTune Mobile configurations\n", pdata->num_retune_mobile_cfgs); + if (pdata->num_mbc_cfgs) { + struct snd_kcontrol_new control[] = { + SOC_ENUM_EXT("MBC Mode", wm8994->mbc_enum, + wm8958_get_mbc_enum, wm8958_put_mbc_enum), + }; + + /* We need an array of texts for the enum API */ + wm8994->mbc_texts = kmalloc(sizeof(char *) + * pdata->num_mbc_cfgs, GFP_KERNEL); + if (!wm8994->mbc_texts) { + dev_err(wm8994->codec->dev, + "Failed to allocate %d MBC config texts\n", + pdata->num_mbc_cfgs); + return; + } + + for (i = 0; i < pdata->num_mbc_cfgs; i++) + wm8994->mbc_texts[i] = pdata->mbc_cfgs[i].name; + + wm8994->mbc_enum.max = pdata->num_mbc_cfgs; + wm8994->mbc_enum.texts = wm8994->mbc_texts; + + ret = snd_soc_add_controls(wm8994->codec, control, 1); + if (ret != 0) + dev_err(wm8994->codec->dev, + "Failed to add MBC mode controls: %d\n", ret); + } + if (pdata->num_retune_mobile_cfgs) wm8994_handle_retune_mobile_pdata(wm8994); else @@ -3823,8 +2712,12 @@ int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, { struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994_micdet *micdet; + struct wm8994 *control = codec->control_data; int reg; + if (control->type != WM8994) + return -EINVAL; + switch (micbias) { case 1: micdet = &wm8994->micdet[0]; @@ -3863,6 +2756,10 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data) int reg; int report; +#ifndef CONFIG_SND_SOC_WM8994_MODULE + trace_snd_soc_jack_irq(dev_name(codec->dev)); +#endif + reg = snd_soc_read(codec, WM8994_INTERRUPT_RAW_STATUS_2); if (reg < 0) { dev_err(codec->dev, "Failed to read microphone status: %d\n", @@ -3891,77 +2788,251 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data) return IRQ_HANDLED; } +/* Default microphone detection handler for WM8958 - the user can + * override this if they wish. + */ +static void wm8958_default_micdet(u16 status, void *data) +{ + struct snd_soc_codec *codec = data; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + int report = 0; + + /* If nothing present then clear our statuses */ + if (!(status & WM8958_MICD_STS)) { + wm8994->jack_is_video = false; + wm8994->jack_is_mic = false; + goto done; + } + + /* Assume anything over 475 ohms is a microphone and remember + * that we've seen one (since buttons override it) */ + if (status & 0x600) + wm8994->jack_is_mic = true; + if (wm8994->jack_is_mic) + report |= SND_JACK_MICROPHONE; + + /* Video has an impedence of approximately 75 ohms; assume + * this isn't used as a button and remember it since buttons + * override it. */ + if (status & 0x40) + wm8994->jack_is_video = true; + if (wm8994->jack_is_video) + report |= SND_JACK_VIDEOOUT; + + /* Everything else is buttons; just assign slots */ + if (status & 0x4) + report |= SND_JACK_BTN_0; + if (status & 0x8) + report |= SND_JACK_BTN_1; + if (status & 0x10) + report |= SND_JACK_BTN_2; + if (status & 0x20) + report |= SND_JACK_BTN_3; + if (status & 0x80) + report |= SND_JACK_BTN_4; + if (status & 0x100) + report |= SND_JACK_BTN_5; + +done: + snd_soc_jack_report(wm8994->micdet[0].jack, + SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | + SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | + SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT, + report); +} + +/** + * wm8958_mic_detect - Enable microphone detection via the WM8958 IRQ + * + * @codec: WM8958 codec + * @jack: jack to report detection events on + * + * Enable microphone detection functionality for the WM8958. By + * default simple detection which supports the detection of up to 6 + * buttons plus video and microphone functionality is supported. + * + * The WM8958 has an advanced jack detection facility which is able to + * support complex accessory detection, especially when used in + * conjunction with external circuitry. In order to provide maximum + * flexiblity a callback is provided which allows a completely custom + * detection algorithm. + */ +int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, + wm8958_micdet_cb cb, void *cb_data) +{ + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = codec->control_data; + + if (control->type != WM8958) + return -EINVAL; + + if (jack) { + if (!cb) { + dev_dbg(codec->dev, "Using default micdet callback\n"); + cb = wm8958_default_micdet; + cb_data = codec; + } + + wm8994->micdet[0].jack = jack; + wm8994->jack_cb = cb; + wm8994->jack_cb_data = cb_data; + + snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, + WM8958_MICD_ENA, WM8958_MICD_ENA); + } else { + snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, + WM8958_MICD_ENA, 0); + } + + return 0; +} +EXPORT_SYMBOL_GPL(wm8958_mic_detect); + +static irqreturn_t wm8958_mic_irq(int irq, void *data) +{ + struct wm8994_priv *wm8994 = data; + struct snd_soc_codec *codec = wm8994->codec; + int reg; + + reg = snd_soc_read(codec, WM8958_MIC_DETECT_3); + if (reg < 0) { + dev_err(codec->dev, "Failed to read mic detect status: %d\n", + reg); + return IRQ_NONE; + } + + if (!(reg & WM8958_MICD_VALID)) { + dev_dbg(codec->dev, "Mic detect data not valid\n"); + goto out; + } + +#ifndef CONFIG_SND_SOC_WM8994_MODULE + trace_snd_soc_jack_irq(dev_name(codec->dev)); +#endif + + if (wm8994->jack_cb) + wm8994->jack_cb(reg, wm8994->jack_cb_data); + else + dev_warn(codec->dev, "Accessory detection with no callback\n"); + +out: + return IRQ_HANDLED; +} + static int wm8994_codec_probe(struct snd_soc_codec *codec) { + struct wm8994 *control; struct wm8994_priv *wm8994; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret, i; codec->control_data = dev_get_drvdata(codec->dev->parent); + control = codec->control_data; wm8994 = kzalloc(sizeof(struct wm8994_priv), GFP_KERNEL); if (wm8994 == NULL) return -ENOMEM; snd_soc_codec_set_drvdata(codec, wm8994); - codec->reg_cache = &wm8994->reg_cache; - wm8994->pdata = dev_get_platdata(codec->dev->parent); wm8994->codec = codec; - /* Fill the cache with physical values we inherited; don't reset */ - ret = wm8994_bulk_read(codec->control_data, 0, - ARRAY_SIZE(wm8994->reg_cache) - 1, - codec->reg_cache); - if (ret < 0) { - dev_err(codec->dev, "Failed to fill register cache: %d\n", - ret); - goto err; - } + pm_runtime_enable(codec->dev); + pm_runtime_resume(codec->dev); - /* Clear the cached values for unreadable/volatile registers to - * avoid potential confusion. - */ - for (i = 0; i < ARRAY_SIZE(wm8994->reg_cache); i++) - if (wm8994_volatile(i) || !wm8994_readable(i)) - wm8994->reg_cache[i] = 0; + /* Read our current status back from the chip - we don't want to + * reset as this may interfere with the GPIO or LDO operation. */ + for (i = 0; i < WM8994_CACHE_SIZE; i++) { + if (!wm8994_readable(i) || wm8994_volatile(i)) + continue; + + ret = wm8994_reg_read(codec->control_data, i); + if (ret <= 0) + continue; + + ret = snd_soc_cache_write(codec, i, ret); + if (ret != 0) { + dev_err(codec->dev, + "Failed to initialise cache for 0x%x: %d\n", + i, ret); + goto err; + } + } /* Set revision-specific configuration */ wm8994->revision = snd_soc_read(codec, WM8994_CHIP_REVISION); - switch (wm8994->revision) { - case 2: - case 3: - wm8994->hubs.dcs_codes = -5; - wm8994->hubs.hp_startup_mode = 1; + switch (control->type) { + case WM8994: + switch (wm8994->revision) { + case 2: + case 3: + wm8994->hubs.dcs_codes = -5; + wm8994->hubs.hp_startup_mode = 1; + wm8994->hubs.dcs_readback_mode = 1; + break; + default: + wm8994->hubs.dcs_readback_mode = 1; + break; + } + + case WM8958: wm8994->hubs.dcs_readback_mode = 1; break; + default: - wm8994->hubs.dcs_readback_mode = 1; break; } - ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_DET, - wm8994_mic_irq, "Mic 1 detect", wm8994); - if (ret != 0) - dev_warn(codec->dev, - "Failed to request Mic1 detect IRQ: %d\n", ret); - - ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, - wm8994_mic_irq, "Mic 1 short", wm8994); - if (ret != 0) - dev_warn(codec->dev, - "Failed to request Mic1 short IRQ: %d\n", ret); - - ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_DET, - wm8994_mic_irq, "Mic 2 detect", wm8994); - if (ret != 0) - dev_warn(codec->dev, - "Failed to request Mic2 detect IRQ: %d\n", ret); + switch (control->type) { + case WM8994: + ret = wm8994_request_irq(codec->control_data, + WM8994_IRQ_MIC1_DET, + wm8994_mic_irq, "Mic 1 detect", + wm8994); + if (ret != 0) + dev_warn(codec->dev, + "Failed to request Mic1 detect IRQ: %d\n", + ret); + + ret = wm8994_request_irq(codec->control_data, + WM8994_IRQ_MIC1_SHRT, + wm8994_mic_irq, "Mic 1 short", + wm8994); + if (ret != 0) + dev_warn(codec->dev, + "Failed to request Mic1 short IRQ: %d\n", + ret); + + ret = wm8994_request_irq(codec->control_data, + WM8994_IRQ_MIC2_DET, + wm8994_mic_irq, "Mic 2 detect", + wm8994); + if (ret != 0) + dev_warn(codec->dev, + "Failed to request Mic2 detect IRQ: %d\n", + ret); + + ret = wm8994_request_irq(codec->control_data, + WM8994_IRQ_MIC2_SHRT, + wm8994_mic_irq, "Mic 2 short", + wm8994); + if (ret != 0) + dev_warn(codec->dev, + "Failed to request Mic2 short IRQ: %d\n", + ret); + break; - ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, - wm8994_mic_irq, "Mic 2 short", wm8994); - if (ret != 0) - dev_warn(codec->dev, - "Failed to request Mic2 short IRQ: %d\n", ret); + case WM8958: + ret = wm8994_request_irq(codec->control_data, + WM8994_IRQ_MIC1_DET, + wm8958_mic_irq, "Mic detect", + wm8994); + if (ret != 0) + dev_warn(codec->dev, + "Failed to request Mic detect IRQ: %d\n", + ret); + break; + } /* Remember if AIFnLRCLK is configured as a GPIO. This should be * configured on init - if a system wants to do this dynamically @@ -4034,10 +3105,36 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) wm_hubs_add_analogue_controls(codec); snd_soc_add_controls(codec, wm8994_snd_controls, ARRAY_SIZE(wm8994_snd_controls)); - snd_soc_dapm_new_controls(codec, wm8994_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets, ARRAY_SIZE(wm8994_dapm_widgets)); + + switch (control->type) { + case WM8994: + snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, + ARRAY_SIZE(wm8994_specific_dapm_widgets)); + break; + case WM8958: + snd_soc_add_controls(codec, wm8958_snd_controls, + ARRAY_SIZE(wm8958_snd_controls)); + snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets, + ARRAY_SIZE(wm8958_dapm_widgets)); + break; + } + + wm_hubs_add_analogue_routes(codec, 0, 0); - snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon)); + snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); + + switch (control->type) { + case WM8994: + snd_soc_dapm_add_routes(dapm, wm8994_intercon, + ARRAY_SIZE(wm8994_intercon)); + break; + case WM8958: + snd_soc_dapm_add_routes(dapm, wm8958_intercon, + ARRAY_SIZE(wm8958_intercon)); + break; + } return 0; @@ -4054,13 +3151,29 @@ err: static int wm8994_codec_remove(struct snd_soc_codec *codec) { struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = codec->control_data; wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF); - wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, wm8994); - wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994); - wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994); - wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994); + pm_runtime_disable(codec->dev); + + switch (control->type) { + case WM8994: + wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, + wm8994); + wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, + wm8994); + wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, + wm8994); + wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, + wm8994); + break; + + case WM8958: + wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, + wm8994); + break; + } kfree(wm8994->retune_mobile_texts); kfree(wm8994->drc_texts); kfree(wm8994); @@ -4073,11 +3186,16 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8994 = { .remove = wm8994_codec_remove, .suspend = wm8994_suspend, .resume = wm8994_resume, - .read = wm8994_read, - .write = wm8994_write, + .read = wm8994_read, + .write = wm8994_write, .readable_register = wm8994_readable, .volatile_register = wm8994_volatile, .set_bias_level = wm8994_set_bias_level, + + .reg_cache_size = WM8994_CACHE_SIZE, + .reg_cache_default = wm8994_reg_defaults, + .reg_word_size = 2, + .compress_type = SND_SOC_RBTREE_COMPRESSION, }; static int __devinit wm8994_probe(struct platform_device *pdev) diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h index d8dce260c430..0c355bfc88f1 100644 --- a/sound/soc/codecs/wm8994.h +++ b/sound/soc/codecs/wm8994.h @@ -28,7 +28,21 @@ #define WM8994_FLL_SRC_LRCLK 3 #define WM8994_FLL_SRC_BCLK 4 +typedef void (*wm8958_micdet_cb)(u16 status, void *data); + int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, int micbias, int det, int shrt); +int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, + wm8958_micdet_cb cb, void *cb_data); + +#define WM8994_CACHE_SIZE 1570 + +struct wm8994_access_mask { + unsigned short readable; /* Mask of readable bits */ + unsigned short writable; /* Mask of writable bits */ +}; + +extern const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE]; +extern const __devinitdata u16 wm8994_reg_defaults[WM8994_CACHE_SIZE]; #endif diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c new file mode 100644 index 000000000000..6045cbde492b --- /dev/null +++ b/sound/soc/codecs/wm8995.c @@ -0,0 +1,1818 @@ +/* + * wm8995.c -- WM8995 ALSA SoC Audio driver + * + * Copyright 2010 Wolfson Microelectronics plc + * + * Author: Dimitris Papastamos + * + * Based on wm8994.c and wm_hubs.c by Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wm8995.h" + +static const u16 wm8995_reg_defs[WM8995_MAX_REGISTER + 1] = { + [0] = 0x8995, [5] = 0x0100, [16] = 0x000b, [17] = 0x000b, + [24] = 0x02c0, [25] = 0x02c0, [26] = 0x02c0, [27] = 0x02c0, + [28] = 0x000f, [32] = 0x0005, [33] = 0x0005, [40] = 0x0003, + [41] = 0x0013, [48] = 0x0004, [56] = 0x09f8, [64] = 0x1f25, + [69] = 0x0004, [82] = 0xaaaa, [84] = 0x2a2a, [146] = 0x0060, + [256] = 0x0002, [257] = 0x8004, [520] = 0x0010, [528] = 0x0083, + [529] = 0x0083, [548] = 0x0c80, [580] = 0x0c80, [768] = 0x4050, + [769] = 0x4000, [771] = 0x0040, [772] = 0x0040, [773] = 0x0040, + [774] = 0x0004, [775] = 0x0100, [784] = 0x4050, [785] = 0x4000, + [787] = 0x0040, [788] = 0x0040, [789] = 0x0040, [1024] = 0x00c0, + [1025] = 0x00c0, [1026] = 0x00c0, [1027] = 0x00c0, [1028] = 0x00c0, + [1029] = 0x00c0, [1030] = 0x00c0, [1031] = 0x00c0, [1056] = 0x0200, + [1057] = 0x0010, [1058] = 0x0200, [1059] = 0x0010, [1088] = 0x0098, + [1089] = 0x0845, [1104] = 0x0098, [1105] = 0x0845, [1152] = 0x6318, + [1153] = 0x6300, [1154] = 0x0fca, [1155] = 0x0400, [1156] = 0x00d8, + [1157] = 0x1eb5, [1158] = 0xf145, [1159] = 0x0b75, [1160] = 0x01c5, + [1161] = 0x1c58, [1162] = 0xf373, [1163] = 0x0a54, [1164] = 0x0558, + [1165] = 0x168e, [1166] = 0xf829, [1167] = 0x07ad, [1168] = 0x1103, + [1169] = 0x0564, [1170] = 0x0559, [1171] = 0x4000, [1184] = 0x6318, + [1185] = 0x6300, [1186] = 0x0fca, [1187] = 0x0400, [1188] = 0x00d8, + [1189] = 0x1eb5, [1190] = 0xf145, [1191] = 0x0b75, [1192] = 0x01c5, + [1193] = 0x1c58, [1194] = 0xf373, [1195] = 0x0a54, [1196] = 0x0558, + [1197] = 0x168e, [1198] = 0xf829, [1199] = 0x07ad, [1200] = 0x1103, + [1201] = 0x0564, [1202] = 0x0559, [1203] = 0x4000, [1280] = 0x00c0, + [1281] = 0x00c0, [1282] = 0x00c0, [1283] = 0x00c0, [1312] = 0x0200, + [1313] = 0x0010, [1344] = 0x0098, [1345] = 0x0845, [1408] = 0x6318, + [1409] = 0x6300, [1410] = 0x0fca, [1411] = 0x0400, [1412] = 0x00d8, + [1413] = 0x1eb5, [1414] = 0xf145, [1415] = 0x0b75, [1416] = 0x01c5, + [1417] = 0x1c58, [1418] = 0xf373, [1419] = 0x0a54, [1420] = 0x0558, + [1421] = 0x168e, [1422] = 0xf829, [1423] = 0x07ad, [1424] = 0x1103, + [1425] = 0x0564, [1426] = 0x0559, [1427] = 0x4000, [1568] = 0x0002, + [1792] = 0xa100, [1793] = 0xa101, [1794] = 0xa101, [1795] = 0xa101, + [1796] = 0xa101, [1797] = 0xa101, [1798] = 0xa101, [1799] = 0xa101, + [1800] = 0xa101, [1801] = 0xa101, [1802] = 0xa101, [1803] = 0xa101, + [1804] = 0xa101, [1805] = 0xa101, [1825] = 0x0055, [1848] = 0x3fff, + [1849] = 0x1fff, [2049] = 0x0001, [2050] = 0x0069, [2056] = 0x0002, + [2057] = 0x0003, [2058] = 0x0069, [12288] = 0x0001, [12289] = 0x0001, + [12291] = 0x0006, [12292] = 0x0040, [12293] = 0x0001, [12294] = 0x000f, + [12295] = 0x0006, [12296] = 0x0001, [12297] = 0x0003, [12298] = 0x0104, + [12300] = 0x0060, [12301] = 0x0011, [12302] = 0x0401, [12304] = 0x0050, + [12305] = 0x0003, [12306] = 0x0100, [12308] = 0x0051, [12309] = 0x0003, + [12310] = 0x0104, [12311] = 0x000a, [12312] = 0x0060, [12313] = 0x003b, + [12314] = 0x0502, [12315] = 0x0100, [12316] = 0x2fff, [12320] = 0x2fff, + [12324] = 0x2fff, [12328] = 0x2fff, [12332] = 0x2fff, [12336] = 0x2fff, + [12340] = 0x2fff, [12344] = 0x2fff, [12348] = 0x2fff, [12352] = 0x0001, + [12353] = 0x0001, [12355] = 0x0006, [12356] = 0x0040, [12357] = 0x0001, + [12358] = 0x000f, [12359] = 0x0006, [12360] = 0x0001, [12361] = 0x0003, + [12362] = 0x0104, [12364] = 0x0060, [12365] = 0x0011, [12366] = 0x0401, + [12368] = 0x0050, [12369] = 0x0003, [12370] = 0x0100, [12372] = 0x0060, + [12373] = 0x003b, [12374] = 0x0502, [12375] = 0x0100, [12376] = 0x2fff, + [12380] = 0x2fff, [12384] = 0x2fff, [12388] = 0x2fff, [12392] = 0x2fff, + [12396] = 0x2fff, [12400] = 0x2fff, [12404] = 0x2fff, [12408] = 0x2fff, + [12412] = 0x2fff, [12416] = 0x0001, [12417] = 0x0001, [12419] = 0x0006, + [12420] = 0x0040, [12421] = 0x0001, [12422] = 0x000f, [12423] = 0x0006, + [12424] = 0x0001, [12425] = 0x0003, [12426] = 0x0106, [12428] = 0x0061, + [12429] = 0x0011, [12430] = 0x0401, [12432] = 0x0050, [12433] = 0x0003, + [12434] = 0x0102, [12436] = 0x0051, [12437] = 0x0003, [12438] = 0x0106, + [12439] = 0x000a, [12440] = 0x0061, [12441] = 0x003b, [12442] = 0x0502, + [12443] = 0x0100, [12444] = 0x2fff, [12448] = 0x2fff, [12452] = 0x2fff, + [12456] = 0x2fff, [12460] = 0x2fff, [12464] = 0x2fff, [12468] = 0x2fff, + [12472] = 0x2fff, [12476] = 0x2fff, [12480] = 0x0001, [12481] = 0x0001, + [12483] = 0x0006, [12484] = 0x0040, [12485] = 0x0001, [12486] = 0x000f, + [12487] = 0x0006, [12488] = 0x0001, [12489] = 0x0003, [12490] = 0x0106, + [12492] = 0x0061, [12493] = 0x0011, [12494] = 0x0401, [12496] = 0x0050, + [12497] = 0x0003, [12498] = 0x0102, [12500] = 0x0061, [12501] = 0x003b, + [12502] = 0x0502, [12503] = 0x0100, [12504] = 0x2fff, [12508] = 0x2fff, + [12512] = 0x2fff, [12516] = 0x2fff, [12520] = 0x2fff, [12524] = 0x2fff, + [12528] = 0x2fff, [12532] = 0x2fff, [12536] = 0x2fff, [12540] = 0x2fff, + [12544] = 0x0060, [12546] = 0x0601, [12548] = 0x0050, [12550] = 0x0100, + [12552] = 0x0001, [12554] = 0x0104, [12555] = 0x0100, [12556] = 0x2fff, + [12560] = 0x2fff, [12564] = 0x2fff, [12568] = 0x2fff, [12572] = 0x2fff, + [12576] = 0x2fff, [12580] = 0x2fff, [12584] = 0x2fff, [12588] = 0x2fff, + [12592] = 0x2fff, [12596] = 0x2fff, [12600] = 0x2fff, [12604] = 0x2fff, + [12608] = 0x0061, [12610] = 0x0601, [12612] = 0x0050, [12614] = 0x0102, + [12616] = 0x0001, [12618] = 0x0106, [12619] = 0x0100, [12620] = 0x2fff, + [12624] = 0x2fff, [12628] = 0x2fff, [12632] = 0x2fff, [12636] = 0x2fff, + [12640] = 0x2fff, [12644] = 0x2fff, [12648] = 0x2fff, [12652] = 0x2fff, + [12656] = 0x2fff, [12660] = 0x2fff, [12664] = 0x2fff, [12668] = 0x2fff, + [12672] = 0x0060, [12674] = 0x0601, [12676] = 0x0061, [12678] = 0x0601, + [12680] = 0x0050, [12682] = 0x0300, [12684] = 0x0001, [12686] = 0x0304, + [12688] = 0x0040, [12690] = 0x000f, [12692] = 0x0001, [12695] = 0x0100 +}; + +struct fll_config { + int src; + int in; + int out; +}; + +struct wm8995_priv { + enum snd_soc_control_type control_type; + int sysclk[2]; + int mclk[2]; + int aifclk[2]; + struct fll_config fll[2], fll_suspend[2]; +}; + +static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); +static const DECLARE_TLV_DB_SCALE(in1lr_pga_tlv, -1650, 150, 0); +static const DECLARE_TLV_DB_SCALE(in1l_boost_tlv, 0, 600, 0); +static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0); + +static const char *in1l_text[] = { + "Differential", "Single-ended IN1LN", "Single-ended IN1LP" +}; + +static const SOC_ENUM_SINGLE_DECL(in1l_enum, WM8995_LEFT_LINE_INPUT_CONTROL, + 2, in1l_text); + +static const char *in1r_text[] = { + "Differential", "Single-ended IN1RN", "Single-ended IN1RP" +}; + +static const SOC_ENUM_SINGLE_DECL(in1r_enum, WM8995_LEFT_LINE_INPUT_CONTROL, + 0, in1r_text); + +static const char *dmic_src_text[] = { + "DMICDAT1", "DMICDAT2", "DMICDAT3" +}; + +static const SOC_ENUM_SINGLE_DECL(dmic_src1_enum, WM8995_POWER_MANAGEMENT_5, + 8, dmic_src_text); +static const SOC_ENUM_SINGLE_DECL(dmic_src2_enum, WM8995_POWER_MANAGEMENT_5, + 6, dmic_src_text); + +static const struct snd_kcontrol_new wm8995_snd_controls[] = { + SOC_DOUBLE_R_TLV("DAC1 Volume", WM8995_DAC1_LEFT_VOLUME, + WM8995_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv), + SOC_DOUBLE_R("DAC1 Switch", WM8995_DAC1_LEFT_VOLUME, + WM8995_DAC1_RIGHT_VOLUME, 9, 1, 1), + + SOC_DOUBLE_R_TLV("DAC2 Volume", WM8995_DAC2_LEFT_VOLUME, + WM8995_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv), + SOC_DOUBLE_R("DAC2 Switch", WM8995_DAC2_LEFT_VOLUME, + WM8995_DAC2_RIGHT_VOLUME, 9, 1, 1), + + SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8995_AIF1_DAC1_LEFT_VOLUME, + WM8995_AIF1_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv), + SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8995_AIF1_DAC2_LEFT_VOLUME, + WM8995_AIF1_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv), + SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8995_AIF2_DAC_LEFT_VOLUME, + WM8995_AIF2_DAC_RIGHT_VOLUME, 0, 96, 0, digital_tlv), + + SOC_DOUBLE_R_TLV("IN1LR Volume", WM8995_LEFT_LINE_INPUT_1_VOLUME, + WM8995_RIGHT_LINE_INPUT_1_VOLUME, 0, 31, 0, in1lr_pga_tlv), + + SOC_SINGLE_TLV("IN1L Boost", WM8995_LEFT_LINE_INPUT_CONTROL, + 4, 3, 0, in1l_boost_tlv), + + SOC_ENUM("IN1L Mode", in1l_enum), + SOC_ENUM("IN1R Mode", in1r_enum), + + SOC_ENUM("DMIC1 SRC", dmic_src1_enum), + SOC_ENUM("DMIC2 SRC", dmic_src2_enum), + + SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8995_DAC1_MIXER_VOLUMES, 0, 5, + 24, 0, sidetone_tlv), + SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8995_DAC2_MIXER_VOLUMES, 0, 5, + 24, 0, sidetone_tlv), + + SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8995_AIF1_ADC1_LEFT_VOLUME, + WM8995_AIF1_ADC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv), + SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8995_AIF1_ADC2_LEFT_VOLUME, + WM8995_AIF1_ADC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv), + SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8995_AIF2_ADC_LEFT_VOLUME, + WM8995_AIF2_ADC_RIGHT_VOLUME, 0, 96, 0, digital_tlv) +}; + +static void wm8995_update_class_w(struct snd_soc_codec *codec) +{ + int enable = 1; + int source = 0; /* GCC flow analysis can't track enable */ + int reg, reg_r; + + /* We also need the same setting for L/R and only one path */ + reg = snd_soc_read(codec, WM8995_DAC1_LEFT_MIXER_ROUTING); + switch (reg) { + case WM8995_AIF2DACL_TO_DAC1L: + dev_dbg(codec->dev, "Class W source AIF2DAC\n"); + source = 2 << WM8995_CP_DYN_SRC_SEL_SHIFT; + break; + case WM8995_AIF1DAC2L_TO_DAC1L: + dev_dbg(codec->dev, "Class W source AIF1DAC2\n"); + source = 1 << WM8995_CP_DYN_SRC_SEL_SHIFT; + break; + case WM8995_AIF1DAC1L_TO_DAC1L: + dev_dbg(codec->dev, "Class W source AIF1DAC1\n"); + source = 0 << WM8995_CP_DYN_SRC_SEL_SHIFT; + break; + default: + dev_dbg(codec->dev, "DAC mixer setting: %x\n", reg); + enable = 0; + break; + } + + reg_r = snd_soc_read(codec, WM8995_DAC1_RIGHT_MIXER_ROUTING); + if (reg_r != reg) { + dev_dbg(codec->dev, "Left and right DAC mixers different\n"); + enable = 0; + } + + if (enable) { + dev_dbg(codec->dev, "Class W enabled\n"); + snd_soc_update_bits(codec, WM8995_CLASS_W_1, + WM8995_CP_DYN_PWR_MASK | + WM8995_CP_DYN_SRC_SEL_MASK, + source | WM8995_CP_DYN_PWR); + } else { + dev_dbg(codec->dev, "Class W disabled\n"); + snd_soc_update_bits(codec, WM8995_CLASS_W_1, + WM8995_CP_DYN_PWR_MASK, 0); + } +} + +static int check_clk_sys(struct snd_soc_dapm_widget *source, + struct snd_soc_dapm_widget *sink) +{ + unsigned int reg; + const char *clk; + + reg = snd_soc_read(source->codec, WM8995_CLOCKING_1); + /* Check what we're currently using for CLK_SYS */ + if (reg & WM8995_SYSCLK_SRC) + clk = "AIF2CLK"; + else + clk = "AIF1CLK"; + return !strcmp(source->name, clk); +} + +static int wm8995_put_class_w(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_dapm_widget *w; + struct snd_soc_codec *codec; + int ret; + + w = snd_kcontrol_chip(kcontrol); + codec = w->codec; + ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol); + wm8995_update_class_w(codec); + return ret; +} + +static int hp_supply_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec; + struct wm8995_priv *wm8995; + + codec = w->codec; + wm8995 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + /* Enable the headphone amp */ + snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, + WM8995_HPOUT1L_ENA_MASK | + WM8995_HPOUT1R_ENA_MASK, + WM8995_HPOUT1L_ENA | + WM8995_HPOUT1R_ENA); + + /* Enable the second stage */ + snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1, + WM8995_HPOUT1L_DLY_MASK | + WM8995_HPOUT1R_DLY_MASK, + WM8995_HPOUT1L_DLY | + WM8995_HPOUT1R_DLY); + break; + case SND_SOC_DAPM_PRE_PMD: + snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1, + WM8995_CP_ENA_MASK, 0); + break; + } + + return 0; +} + +static void dc_servo_cmd(struct snd_soc_codec *codec, + unsigned int reg, unsigned int val, unsigned int mask) +{ + int timeout = 10; + + dev_dbg(codec->dev, "%s: reg = %#x, val = %#x, mask = %#x\n", + __func__, reg, val, mask); + + snd_soc_write(codec, reg, val); + while (timeout--) { + msleep(10); + val = snd_soc_read(codec, WM8995_DC_SERVO_READBACK_0); + if ((val & mask) == mask) + return; + } + + dev_err(codec->dev, "Timed out waiting for DC Servo\n"); +} + +static int hp_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec; + unsigned int reg; + + codec = w->codec; + reg = snd_soc_read(codec, WM8995_ANALOGUE_HP_1); + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1, + WM8995_CP_ENA_MASK, WM8995_CP_ENA); + + msleep(5); + + snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, + WM8995_HPOUT1L_ENA_MASK | + WM8995_HPOUT1R_ENA_MASK, + WM8995_HPOUT1L_ENA | WM8995_HPOUT1R_ENA); + + udelay(20); + + reg |= WM8995_HPOUT1L_DLY | WM8995_HPOUT1R_DLY; + snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg); + + snd_soc_write(codec, WM8995_DC_SERVO_1, WM8995_DCS_ENA_CHAN_0 | + WM8995_DCS_ENA_CHAN_1); + + dc_servo_cmd(codec, WM8995_DC_SERVO_2, + WM8995_DCS_TRIG_STARTUP_0 | + WM8995_DCS_TRIG_STARTUP_1, + WM8995_DCS_TRIG_DAC_WR_0 | + WM8995_DCS_TRIG_DAC_WR_1); + + reg |= WM8995_HPOUT1R_OUTP | WM8995_HPOUT1R_RMV_SHORT | + WM8995_HPOUT1L_OUTP | WM8995_HPOUT1L_RMV_SHORT; + snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg); + + break; + case SND_SOC_DAPM_PRE_PMD: + snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1, + WM8995_HPOUT1L_OUTP_MASK | + WM8995_HPOUT1R_OUTP_MASK | + WM8995_HPOUT1L_RMV_SHORT_MASK | + WM8995_HPOUT1R_RMV_SHORT_MASK, 0); + + snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1, + WM8995_HPOUT1L_DLY_MASK | + WM8995_HPOUT1R_DLY_MASK, 0); + + snd_soc_write(codec, WM8995_DC_SERVO_1, 0); + + snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, + WM8995_HPOUT1L_ENA_MASK | + WM8995_HPOUT1R_ENA_MASK, + 0); + break; + } + + return 0; +} + +static int configure_aif_clock(struct snd_soc_codec *codec, int aif) +{ + struct wm8995_priv *wm8995; + int rate; + int reg1 = 0; + int offset; + + wm8995 = snd_soc_codec_get_drvdata(codec); + + if (aif) + offset = 4; + else + offset = 0; + + switch (wm8995->sysclk[aif]) { + case WM8995_SYSCLK_MCLK1: + rate = wm8995->mclk[0]; + break; + case WM8995_SYSCLK_MCLK2: + reg1 |= 0x8; + rate = wm8995->mclk[1]; + break; + case WM8995_SYSCLK_FLL1: + reg1 |= 0x10; + rate = wm8995->fll[0].out; + break; + case WM8995_SYSCLK_FLL2: + reg1 |= 0x18; + rate = wm8995->fll[1].out; + break; + default: + return -EINVAL; + } + + if (rate >= 13500000) { + rate /= 2; + reg1 |= WM8995_AIF1CLK_DIV; + + dev_dbg(codec->dev, "Dividing AIF%d clock to %dHz\n", + aif + 1, rate); + } + + wm8995->aifclk[aif] = rate; + + snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1 + offset, + WM8995_AIF1CLK_SRC_MASK | WM8995_AIF1CLK_DIV_MASK, + reg1); + return 0; +} + +static int configure_clock(struct snd_soc_codec *codec) +{ + struct wm8995_priv *wm8995; + int old, new; + + wm8995 = snd_soc_codec_get_drvdata(codec); + + /* Bring up the AIF clocks first */ + configure_aif_clock(codec, 0); + configure_aif_clock(codec, 1); + + /* + * Then switch CLK_SYS over to the higher of them; a change + * can only happen as a result of a clocking change which can + * only be made outside of DAPM so we can safely redo the + * clocking. + */ + + /* If they're equal it doesn't matter which is used */ + if (wm8995->aifclk[0] == wm8995->aifclk[1]) + return 0; + + if (wm8995->aifclk[0] < wm8995->aifclk[1]) + new = WM8995_SYSCLK_SRC; + else + new = 0; + + old = snd_soc_read(codec, WM8995_CLOCKING_1) & WM8995_SYSCLK_SRC; + + /* If there's no change then we're done. */ + if (old == new) + return 0; + + snd_soc_update_bits(codec, WM8995_CLOCKING_1, + WM8995_SYSCLK_SRC_MASK, new); + + snd_soc_dapm_sync(&codec->dapm); + + return 0; +} + +static int clk_sys_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec; + + codec = w->codec; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + return configure_clock(codec); + + case SND_SOC_DAPM_POST_PMD: + configure_clock(codec); + break; + } + + return 0; +} + +static const char *sidetone_text[] = { + "ADC/DMIC1", "DMIC2", +}; + +static const struct soc_enum sidetone1_enum = + SOC_ENUM_SINGLE(WM8995_SIDETONE, 0, 2, sidetone_text); + +static const struct snd_kcontrol_new sidetone1_mux = + SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum); + +static const struct soc_enum sidetone2_enum = + SOC_ENUM_SINGLE(WM8995_SIDETONE, 1, 2, sidetone_text); + +static const struct snd_kcontrol_new sidetone2_mux = + SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum); + +static const struct snd_kcontrol_new aif1adc1l_mix[] = { + SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING, + 1, 1, 0), + SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new aif1adc1r_mix[] = { + SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING, + 1, 1, 0), + SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new aif1adc2l_mix[] = { + SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING, + 1, 1, 0), + SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new aif1adc2r_mix[] = { + SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING, + 1, 1, 0), + SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new dac1l_mix[] = { + WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, + 5, 1, 0), + WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, + 4, 1, 0), + WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, + 2, 1, 0), + WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, + 1, 1, 0), + WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new dac1r_mix[] = { + WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, + 5, 1, 0), + WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, + 4, 1, 0), + WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, + 2, 1, 0), + WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, + 1, 1, 0), + WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new aif2dac2l_mix[] = { + SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, + 5, 1, 0), + SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, + 4, 1, 0), + SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, + 2, 1, 0), + SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, + 1, 1, 0), + SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new aif2dac2r_mix[] = { + SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, + 5, 1, 0), + SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, + 4, 1, 0), + SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, + 2, 1, 0), + SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, + 1, 1, 0), + SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new in1l_pga = + SOC_DAPM_SINGLE("IN1L Switch", WM8995_POWER_MANAGEMENT_2, 5, 1, 0); + +static const struct snd_kcontrol_new in1r_pga = + SOC_DAPM_SINGLE("IN1R Switch", WM8995_POWER_MANAGEMENT_2, 4, 1, 0); + +static const char *adc_mux_text[] = { + "ADC", + "DMIC", +}; + +static const struct soc_enum adc_enum = + SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text); + +static const struct snd_kcontrol_new adcl_mux = + SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum); + +static const struct snd_kcontrol_new adcr_mux = + SOC_DAPM_ENUM_VIRT("ADCR Mux", adc_enum); + +static const char *spk_src_text[] = { + "DAC1L", "DAC1R", "DAC2L", "DAC2R" +}; + +static const SOC_ENUM_SINGLE_DECL(spk1l_src_enum, WM8995_LEFT_PDM_SPEAKER_1, + 0, spk_src_text); +static const SOC_ENUM_SINGLE_DECL(spk1r_src_enum, WM8995_RIGHT_PDM_SPEAKER_1, + 0, spk_src_text); +static const SOC_ENUM_SINGLE_DECL(spk2l_src_enum, WM8995_LEFT_PDM_SPEAKER_2, + 0, spk_src_text); +static const SOC_ENUM_SINGLE_DECL(spk2r_src_enum, WM8995_RIGHT_PDM_SPEAKER_2, + 0, spk_src_text); + +static const struct snd_kcontrol_new spk1l_mux = + SOC_DAPM_ENUM("SPK1L SRC", spk1l_src_enum); +static const struct snd_kcontrol_new spk1r_mux = + SOC_DAPM_ENUM("SPK1R SRC", spk1r_src_enum); +static const struct snd_kcontrol_new spk2l_mux = + SOC_DAPM_ENUM("SPK2L SRC", spk2l_src_enum); +static const struct snd_kcontrol_new spk2r_mux = + SOC_DAPM_ENUM("SPK2R SRC", spk2r_src_enum); + +static const struct snd_soc_dapm_widget wm8995_dapm_widgets[] = { + SND_SOC_DAPM_INPUT("DMIC1DAT"), + SND_SOC_DAPM_INPUT("DMIC2DAT"), + + SND_SOC_DAPM_INPUT("IN1L"), + SND_SOC_DAPM_INPUT("IN1R"), + + SND_SOC_DAPM_MIXER("IN1L PGA", SND_SOC_NOPM, 0, 0, + &in1l_pga, 1), + SND_SOC_DAPM_MIXER("IN1R PGA", SND_SOC_NOPM, 0, 0, + &in1r_pga, 1), + + SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8995_POWER_MANAGEMENT_1, 8, 0), + SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8995_POWER_MANAGEMENT_1, 9, 0), + + SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8995_AIF1_CLOCKING_1, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8995_AIF2_CLOCKING_1, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8995_CLOCKING_1, 3, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8995_CLOCKING_1, 2, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("SYSDSPCLK", WM8995_CLOCKING_1, 1, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + + SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", 0, + WM8995_POWER_MANAGEMENT_3, 9, 0), + SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", 0, + WM8995_POWER_MANAGEMENT_3, 8, 0), + SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, + SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture", + 0, WM8995_POWER_MANAGEMENT_3, 11, 0), + SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture", + 0, WM8995_POWER_MANAGEMENT_3, 10, 0), + + SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0, + &adcl_mux), + SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0, + &adcr_mux), + + SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8995_POWER_MANAGEMENT_3, 5, 0), + SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8995_POWER_MANAGEMENT_3, 4, 0), + SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8995_POWER_MANAGEMENT_3, 3, 0), + SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8995_POWER_MANAGEMENT_3, 2, 0), + + SND_SOC_DAPM_ADC("ADCL", NULL, WM8995_POWER_MANAGEMENT_3, 1, 0), + SND_SOC_DAPM_ADC("ADCR", NULL, WM8995_POWER_MANAGEMENT_3, 0, 0), + + SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0, + aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)), + SND_SOC_DAPM_MIXER("AIF1ADC1R Mixer", SND_SOC_NOPM, 0, 0, + aif1adc1r_mix, ARRAY_SIZE(aif1adc1r_mix)), + SND_SOC_DAPM_MIXER("AIF1ADC2L Mixer", SND_SOC_NOPM, 0, 0, + aif1adc2l_mix, ARRAY_SIZE(aif1adc2l_mix)), + SND_SOC_DAPM_MIXER("AIF1ADC2R Mixer", SND_SOC_NOPM, 0, 0, + aif1adc2r_mix, ARRAY_SIZE(aif1adc2r_mix)), + + SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0, WM8995_POWER_MANAGEMENT_4, + 9, 0), + SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0, WM8995_POWER_MANAGEMENT_4, + 8, 0), + SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, + 0, 0), + + SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0, WM8995_POWER_MANAGEMENT_4, + 11, 0), + SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0, WM8995_POWER_MANAGEMENT_4, + 10, 0), + + SND_SOC_DAPM_MIXER("AIF2DAC2L Mixer", SND_SOC_NOPM, 0, 0, + aif2dac2l_mix, ARRAY_SIZE(aif2dac2l_mix)), + SND_SOC_DAPM_MIXER("AIF2DAC2R Mixer", SND_SOC_NOPM, 0, 0, + aif2dac2r_mix, ARRAY_SIZE(aif2dac2r_mix)), + + SND_SOC_DAPM_DAC("DAC2L", NULL, WM8995_POWER_MANAGEMENT_4, 3, 0), + SND_SOC_DAPM_DAC("DAC2R", NULL, WM8995_POWER_MANAGEMENT_4, 2, 0), + SND_SOC_DAPM_DAC("DAC1L", NULL, WM8995_POWER_MANAGEMENT_4, 1, 0), + SND_SOC_DAPM_DAC("DAC1R", NULL, WM8995_POWER_MANAGEMENT_4, 0, 0), + + SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, dac1l_mix, + ARRAY_SIZE(dac1l_mix)), + SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, dac1r_mix, + ARRAY_SIZE(dac1r_mix)), + + SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &sidetone1_mux), + SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &sidetone2_mux), + + SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + + SND_SOC_DAPM_SUPPLY("Headphone Supply", SND_SOC_NOPM, 0, 0, + hp_supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + + SND_SOC_DAPM_MUX("SPK1L Driver", WM8995_LEFT_PDM_SPEAKER_1, + 4, 0, &spk1l_mux), + SND_SOC_DAPM_MUX("SPK1R Driver", WM8995_RIGHT_PDM_SPEAKER_1, + 4, 0, &spk1r_mux), + SND_SOC_DAPM_MUX("SPK2L Driver", WM8995_LEFT_PDM_SPEAKER_2, + 4, 0, &spk2l_mux), + SND_SOC_DAPM_MUX("SPK2R Driver", WM8995_RIGHT_PDM_SPEAKER_2, + 4, 0, &spk2r_mux), + + SND_SOC_DAPM_SUPPLY("LDO2", WM8995_POWER_MANAGEMENT_2, 1, 0, NULL, 0), + + SND_SOC_DAPM_OUTPUT("HP1L"), + SND_SOC_DAPM_OUTPUT("HP1R"), + SND_SOC_DAPM_OUTPUT("SPK1L"), + SND_SOC_DAPM_OUTPUT("SPK1R"), + SND_SOC_DAPM_OUTPUT("SPK2L"), + SND_SOC_DAPM_OUTPUT("SPK2R") +}; + +static const struct snd_soc_dapm_route wm8995_intercon[] = { + { "CLK_SYS", NULL, "AIF1CLK", check_clk_sys }, + { "CLK_SYS", NULL, "AIF2CLK", check_clk_sys }, + + { "DSP1CLK", NULL, "CLK_SYS" }, + { "DSP2CLK", NULL, "CLK_SYS" }, + { "SYSDSPCLK", NULL, "CLK_SYS" }, + + { "AIF1ADC1L", NULL, "AIF1CLK" }, + { "AIF1ADC1L", NULL, "DSP1CLK" }, + { "AIF1ADC1R", NULL, "AIF1CLK" }, + { "AIF1ADC1R", NULL, "DSP1CLK" }, + { "AIF1ADC1R", NULL, "SYSDSPCLK" }, + + { "AIF1ADC2L", NULL, "AIF1CLK" }, + { "AIF1ADC2L", NULL, "DSP1CLK" }, + { "AIF1ADC2R", NULL, "AIF1CLK" }, + { "AIF1ADC2R", NULL, "DSP1CLK" }, + { "AIF1ADC2R", NULL, "SYSDSPCLK" }, + + { "DMIC1L", NULL, "DMIC1DAT" }, + { "DMIC1L", NULL, "CLK_SYS" }, + { "DMIC1R", NULL, "DMIC1DAT" }, + { "DMIC1R", NULL, "CLK_SYS" }, + { "DMIC2L", NULL, "DMIC2DAT" }, + { "DMIC2L", NULL, "CLK_SYS" }, + { "DMIC2R", NULL, "DMIC2DAT" }, + { "DMIC2R", NULL, "CLK_SYS" }, + + { "ADCL", NULL, "AIF1CLK" }, + { "ADCL", NULL, "DSP1CLK" }, + { "ADCL", NULL, "SYSDSPCLK" }, + + { "ADCR", NULL, "AIF1CLK" }, + { "ADCR", NULL, "DSP1CLK" }, + { "ADCR", NULL, "SYSDSPCLK" }, + + { "IN1L PGA", "IN1L Switch", "IN1L" }, + { "IN1R PGA", "IN1R Switch", "IN1R" }, + { "IN1L PGA", NULL, "LDO2" }, + { "IN1R PGA", NULL, "LDO2" }, + + { "ADCL", NULL, "IN1L PGA" }, + { "ADCR", NULL, "IN1R PGA" }, + + { "ADCL Mux", "ADC", "ADCL" }, + { "ADCL Mux", "DMIC", "DMIC1L" }, + { "ADCR Mux", "ADC", "ADCR" }, + { "ADCR Mux", "DMIC", "DMIC1R" }, + + /* AIF1 outputs */ + { "AIF1ADC1L", NULL, "AIF1ADC1L Mixer" }, + { "AIF1ADC1L Mixer", "ADC/DMIC Switch", "ADCL Mux" }, + + { "AIF1ADC1R", NULL, "AIF1ADC1R Mixer" }, + { "AIF1ADC1R Mixer", "ADC/DMIC Switch", "ADCR Mux" }, + + { "AIF1ADC2L", NULL, "AIF1ADC2L Mixer" }, + { "AIF1ADC2L Mixer", "DMIC Switch", "DMIC2L" }, + + { "AIF1ADC2R", NULL, "AIF1ADC2R Mixer" }, + { "AIF1ADC2R Mixer", "DMIC Switch", "DMIC2R" }, + + /* Sidetone */ + { "Left Sidetone", "ADC/DMIC1", "AIF1ADC1L" }, + { "Left Sidetone", "DMIC2", "AIF1ADC2L" }, + { "Right Sidetone", "ADC/DMIC1", "AIF1ADC1R" }, + { "Right Sidetone", "DMIC2", "AIF1ADC2R" }, + + { "AIF1DAC1L", NULL, "AIF1CLK" }, + { "AIF1DAC1L", NULL, "DSP1CLK" }, + { "AIF1DAC1R", NULL, "AIF1CLK" }, + { "AIF1DAC1R", NULL, "DSP1CLK" }, + { "AIF1DAC1R", NULL, "SYSDSPCLK" }, + + { "AIF1DAC2L", NULL, "AIF1CLK" }, + { "AIF1DAC2L", NULL, "DSP1CLK" }, + { "AIF1DAC2R", NULL, "AIF1CLK" }, + { "AIF1DAC2R", NULL, "DSP1CLK" }, + { "AIF1DAC2R", NULL, "SYSDSPCLK" }, + + { "DAC1L", NULL, "AIF1CLK" }, + { "DAC1L", NULL, "DSP1CLK" }, + { "DAC1L", NULL, "SYSDSPCLK" }, + + { "DAC1R", NULL, "AIF1CLK" }, + { "DAC1R", NULL, "DSP1CLK" }, + { "DAC1R", NULL, "SYSDSPCLK" }, + + { "AIF1DAC1L", NULL, "AIF1DACDAT" }, + { "AIF1DAC1R", NULL, "AIF1DACDAT" }, + { "AIF1DAC2L", NULL, "AIF1DACDAT" }, + { "AIF1DAC2R", NULL, "AIF1DACDAT" }, + + /* DAC1 inputs */ + { "DAC1L", NULL, "DAC1L Mixer" }, + { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, + { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, + { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, + { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, + + { "DAC1R", NULL, "DAC1R Mixer" }, + { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, + { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, + { "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" }, + { "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" }, + + /* DAC2/AIF2 outputs */ + { "DAC2L", NULL, "AIF2DAC2L Mixer" }, + { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, + { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, + + { "DAC2R", NULL, "AIF2DAC2R Mixer" }, + { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, + { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, + + /* Output stages */ + { "Headphone PGA", NULL, "DAC1L" }, + { "Headphone PGA", NULL, "DAC1R" }, + + { "Headphone PGA", NULL, "DAC2L" }, + { "Headphone PGA", NULL, "DAC2R" }, + + { "Headphone PGA", NULL, "Headphone Supply" }, + { "Headphone PGA", NULL, "CLK_SYS" }, + { "Headphone PGA", NULL, "LDO2" }, + + { "HP1L", NULL, "Headphone PGA" }, + { "HP1R", NULL, "Headphone PGA" }, + + { "SPK1L Driver", "DAC1L", "DAC1L" }, + { "SPK1L Driver", "DAC1R", "DAC1R" }, + { "SPK1L Driver", "DAC2L", "DAC2L" }, + { "SPK1L Driver", "DAC2R", "DAC2R" }, + { "SPK1L Driver", NULL, "CLK_SYS" }, + + { "SPK1R Driver", "DAC1L", "DAC1L" }, + { "SPK1R Driver", "DAC1R", "DAC1R" }, + { "SPK1R Driver", "DAC2L", "DAC2L" }, + { "SPK1R Driver", "DAC2R", "DAC2R" }, + { "SPK1R Driver", NULL, "CLK_SYS" }, + + { "SPK2L Driver", "DAC1L", "DAC1L" }, + { "SPK2L Driver", "DAC1R", "DAC1R" }, + { "SPK2L Driver", "DAC2L", "DAC2L" }, + { "SPK2L Driver", "DAC2R", "DAC2R" }, + { "SPK2L Driver", NULL, "CLK_SYS" }, + + { "SPK2R Driver", "DAC1L", "DAC1L" }, + { "SPK2R Driver", "DAC1R", "DAC1R" }, + { "SPK2R Driver", "DAC2L", "DAC2L" }, + { "SPK2R Driver", "DAC2R", "DAC2R" }, + { "SPK2R Driver", NULL, "CLK_SYS" }, + + { "SPK1L", NULL, "SPK1L Driver" }, + { "SPK1R", NULL, "SPK1R Driver" }, + { "SPK2L", NULL, "SPK2L Driver" }, + { "SPK2R", NULL, "SPK2R Driver" } +}; + +static int wm8995_volatile(unsigned int reg) +{ + /* out of bounds registers are generally considered + * volatile to support register banks that are partially + * owned by something else for e.g. a DSP + */ + if (reg > WM8995_MAX_CACHED_REGISTER) + return 1; + + switch (reg) { + case WM8995_SOFTWARE_RESET: + case WM8995_DC_SERVO_READBACK_0: + case WM8995_INTERRUPT_STATUS_1: + case WM8995_INTERRUPT_STATUS_2: + case WM8995_INTERRUPT_STATUS_1_MASK: + case WM8995_INTERRUPT_STATUS_2_MASK: + case WM8995_INTERRUPT_CONTROL: + case WM8995_ACCESSORY_DETECT_MODE1: + case WM8995_ACCESSORY_DETECT_MODE2: + case WM8995_HEADPHONE_DETECT1: + case WM8995_HEADPHONE_DETECT2: + return 1; + } + + return 0; +} + +static int wm8995_aif_mute(struct snd_soc_dai *dai, int mute) +{ + struct snd_soc_codec *codec = dai->codec; + int mute_reg; + + switch (dai->id) { + case 0: + mute_reg = WM8995_AIF1_DAC1_FILTERS_1; + break; + case 1: + mute_reg = WM8995_AIF2_DAC_FILTERS_1; + break; + default: + return -EINVAL; + } + + snd_soc_update_bits(codec, mute_reg, WM8995_AIF1DAC1_MUTE_MASK, + !!mute << WM8995_AIF1DAC1_MUTE_SHIFT); + return 0; +} + +static int wm8995_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) +{ + struct snd_soc_codec *codec; + int master; + int aif; + + codec = dai->codec; + + master = 0; + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBS_CFS: + break; + case SND_SOC_DAIFMT_CBM_CFM: + master = WM8995_AIF1_MSTR; + break; + default: + dev_err(dai->dev, "Unknown master/slave configuration\n"); + return -EINVAL; + } + + aif = 0; + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_B: + aif |= WM8995_AIF1_LRCLK_INV; + case SND_SOC_DAIFMT_DSP_A: + aif |= (0x3 << WM8995_AIF1_FMT_SHIFT); + break; + case SND_SOC_DAIFMT_I2S: + aif |= (0x2 << WM8995_AIF1_FMT_SHIFT); + break; + case SND_SOC_DAIFMT_RIGHT_J: + break; + case SND_SOC_DAIFMT_LEFT_J: + aif |= (0x1 << WM8995_AIF1_FMT_SHIFT); + break; + default: + dev_err(dai->dev, "Unknown dai format\n"); + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_A: + case SND_SOC_DAIFMT_DSP_B: + /* frame inversion not valid for DSP modes */ + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_IB_NF: + aif |= WM8995_AIF1_BCLK_INV; + break; + default: + return -EINVAL; + } + break; + + case SND_SOC_DAIFMT_I2S: + case SND_SOC_DAIFMT_RIGHT_J: + case SND_SOC_DAIFMT_LEFT_J: + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_IB_IF: + aif |= WM8995_AIF1_BCLK_INV | WM8995_AIF1_LRCLK_INV; + break; + case SND_SOC_DAIFMT_IB_NF: + aif |= WM8995_AIF1_BCLK_INV; + break; + case SND_SOC_DAIFMT_NB_IF: + aif |= WM8995_AIF1_LRCLK_INV; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + snd_soc_update_bits(codec, WM8995_AIF1_CONTROL_1, + WM8995_AIF1_BCLK_INV_MASK | + WM8995_AIF1_LRCLK_INV_MASK | + WM8995_AIF1_FMT_MASK, aif); + snd_soc_update_bits(codec, WM8995_AIF1_MASTER_SLAVE, + WM8995_AIF1_MSTR_MASK, master); + return 0; +} + +static const int srs[] = { + 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, + 48000, 88200, 96000 +}; + +static const int fs_ratios[] = { + -1 /* reserved */, + 128, 192, 256, 384, 512, 768, 1024, 1408, 1536 +}; + +static const int bclk_divs[] = { + 10, 15, 20, 30, 40, 55, 60, 80, 110, 120, 160, 220, 240, 320, 440, 480 +}; + +static int wm8995_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec; + struct wm8995_priv *wm8995; + int aif1_reg; + int bclk_reg; + int lrclk_reg; + int rate_reg; + int bclk_rate; + int aif1; + int lrclk, bclk; + int i, rate_val, best, best_val, cur_val; + + codec = dai->codec; + wm8995 = snd_soc_codec_get_drvdata(codec); + + switch (dai->id) { + case 0: + aif1_reg = WM8995_AIF1_CONTROL_1; + bclk_reg = WM8995_AIF1_BCLK; + rate_reg = WM8995_AIF1_RATE; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* || + wm8995->lrclk_shared[0] */) { + lrclk_reg = WM8995_AIF1DAC_LRCLK; + } else { + lrclk_reg = WM8995_AIF1ADC_LRCLK; + dev_dbg(codec->dev, "AIF1 using split LRCLK\n"); + } + break; + case 1: + aif1_reg = WM8995_AIF2_CONTROL_1; + bclk_reg = WM8995_AIF2_BCLK; + rate_reg = WM8995_AIF2_RATE; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* || + wm8995->lrclk_shared[1] */) { + lrclk_reg = WM8995_AIF2DAC_LRCLK; + } else { + lrclk_reg = WM8995_AIF2ADC_LRCLK; + dev_dbg(codec->dev, "AIF2 using split LRCLK\n"); + } + break; + default: + return -EINVAL; + } + + bclk_rate = snd_soc_params_to_bclk(params); + if (bclk_rate < 0) + return bclk_rate; + + aif1 = 0; + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + break; + case SNDRV_PCM_FORMAT_S20_3LE: + aif1 |= (0x1 << WM8995_AIF1_WL_SHIFT); + break; + case SNDRV_PCM_FORMAT_S24_LE: + aif1 |= (0x2 << WM8995_AIF1_WL_SHIFT); + break; + case SNDRV_PCM_FORMAT_S32_LE: + aif1 |= (0x3 << WM8995_AIF1_WL_SHIFT); + break; + default: + dev_err(dai->dev, "Unsupported word length %u\n", + params_format(params)); + return -EINVAL; + } + + /* try to find a suitable sample rate */ + for (i = 0; i < ARRAY_SIZE(srs); ++i) + if (srs[i] == params_rate(params)) + break; + if (i == ARRAY_SIZE(srs)) { + dev_err(dai->dev, "Sample rate %d is not supported\n", + params_rate(params)); + return -EINVAL; + } + rate_val = i << WM8995_AIF1_SR_SHIFT; + + dev_dbg(dai->dev, "Sample rate is %dHz\n", srs[i]); + dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n", + dai->id + 1, wm8995->aifclk[dai->id], bclk_rate); + + /* AIFCLK/fs ratio; look for a close match in either direction */ + best = 1; + best_val = abs((fs_ratios[1] * params_rate(params)) + - wm8995->aifclk[dai->id]); + for (i = 2; i < ARRAY_SIZE(fs_ratios); i++) { + cur_val = abs((fs_ratios[i] * params_rate(params)) + - wm8995->aifclk[dai->id]); + if (cur_val >= best_val) + continue; + best = i; + best_val = cur_val; + } + rate_val |= best; + + dev_dbg(dai->dev, "Selected AIF%dCLK/fs = %d\n", + dai->id + 1, fs_ratios[best]); + + /* + * We may not get quite the right frequency if using + * approximate clocks so look for the closest match that is + * higher than the target (we need to ensure that there enough + * BCLKs to clock out the samples). + */ + best = 0; + bclk = 0; + for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) { + cur_val = (wm8995->aifclk[dai->id] * 10 / bclk_divs[i]) - bclk_rate; + if (cur_val < 0) /* BCLK table is sorted */ + break; + best = i; + } + bclk |= best << WM8995_AIF1_BCLK_DIV_SHIFT; + + bclk_rate = wm8995->aifclk[dai->id] * 10 / bclk_divs[best]; + dev_dbg(dai->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n", + bclk_divs[best], bclk_rate); + + lrclk = bclk_rate / params_rate(params); + dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n", + lrclk, bclk_rate / lrclk); + + snd_soc_update_bits(codec, aif1_reg, + WM8995_AIF1_WL_MASK, aif1); + snd_soc_update_bits(codec, bclk_reg, + WM8995_AIF1_BCLK_DIV_MASK, bclk); + snd_soc_update_bits(codec, lrclk_reg, + WM8995_AIF1DAC_RATE_MASK, lrclk); + snd_soc_update_bits(codec, rate_reg, + WM8995_AIF1_SR_MASK | + WM8995_AIF1CLK_RATE_MASK, rate_val); + return 0; +} + +static int wm8995_set_tristate(struct snd_soc_dai *codec_dai, int tristate) +{ + struct snd_soc_codec *codec = codec_dai->codec; + int reg, val, mask; + + switch (codec_dai->id) { + case 0: + reg = WM8995_AIF1_MASTER_SLAVE; + mask = WM8995_AIF1_TRI; + break; + case 1: + reg = WM8995_AIF2_MASTER_SLAVE; + mask = WM8995_AIF2_TRI; + break; + case 2: + reg = WM8995_POWER_MANAGEMENT_5; + mask = WM8995_AIF3_TRI; + break; + default: + return -EINVAL; + } + + if (tristate) + val = mask; + else + val = 0; + + return snd_soc_update_bits(codec, reg, mask, reg); +} + +/* The size in bits of the FLL divide multiplied by 10 + * to allow rounding later */ +#define FIXED_FLL_SIZE ((1 << 16) * 10) + +struct fll_div { + u16 outdiv; + u16 n; + u16 k; + u16 clk_ref_div; + u16 fll_fratio; +}; + +static int wm8995_get_fll_config(struct fll_div *fll, + int freq_in, int freq_out) +{ + u64 Kpart; + unsigned int K, Ndiv, Nmod; + + pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out); + + /* Scale the input frequency down to <= 13.5MHz */ + fll->clk_ref_div = 0; + while (freq_in > 13500000) { + fll->clk_ref_div++; + freq_in /= 2; + + if (fll->clk_ref_div > 3) + return -EINVAL; + } + pr_debug("CLK_REF_DIV=%d, Fref=%dHz\n", fll->clk_ref_div, freq_in); + + /* Scale the output to give 90MHz<=Fvco<=100MHz */ + fll->outdiv = 3; + while (freq_out * (fll->outdiv + 1) < 90000000) { + fll->outdiv++; + if (fll->outdiv > 63) + return -EINVAL; + } + freq_out *= fll->outdiv + 1; + pr_debug("OUTDIV=%d, Fvco=%dHz\n", fll->outdiv, freq_out); + + if (freq_in > 1000000) { + fll->fll_fratio = 0; + } else if (freq_in > 256000) { + fll->fll_fratio = 1; + freq_in *= 2; + } else if (freq_in > 128000) { + fll->fll_fratio = 2; + freq_in *= 4; + } else if (freq_in > 64000) { + fll->fll_fratio = 3; + freq_in *= 8; + } else { + fll->fll_fratio = 4; + freq_in *= 16; + } + pr_debug("FLL_FRATIO=%d, Fref=%dHz\n", fll->fll_fratio, freq_in); + + /* Now, calculate N.K */ + Ndiv = freq_out / freq_in; + + fll->n = Ndiv; + Nmod = freq_out % freq_in; + pr_debug("Nmod=%d\n", Nmod); + + /* Calculate fractional part - scale up so we can round. */ + Kpart = FIXED_FLL_SIZE * (long long)Nmod; + + do_div(Kpart, freq_in); + + K = Kpart & 0xFFFFFFFF; + + if ((K % 10) >= 5) + K += 5; + + /* Move down to proper range now rounding is done */ + fll->k = K / 10; + + pr_debug("N=%x K=%x\n", fll->n, fll->k); + + return 0; +} + +static int wm8995_set_fll(struct snd_soc_dai *dai, int id, + int src, unsigned int freq_in, + unsigned int freq_out) +{ + struct snd_soc_codec *codec; + struct wm8995_priv *wm8995; + int reg_offset, ret; + struct fll_div fll; + u16 reg, aif1, aif2; + + codec = dai->codec; + wm8995 = snd_soc_codec_get_drvdata(codec); + + aif1 = snd_soc_read(codec, WM8995_AIF1_CLOCKING_1) + & WM8995_AIF1CLK_ENA; + + aif2 = snd_soc_read(codec, WM8995_AIF2_CLOCKING_1) + & WM8995_AIF2CLK_ENA; + + switch (id) { + case WM8995_FLL1: + reg_offset = 0; + id = 0; + break; + case WM8995_FLL2: + reg_offset = 0x20; + id = 1; + break; + default: + return -EINVAL; + } + + switch (src) { + case 0: + /* Allow no source specification when stopping */ + if (freq_out) + return -EINVAL; + break; + case WM8995_FLL_SRC_MCLK1: + case WM8995_FLL_SRC_MCLK2: + case WM8995_FLL_SRC_LRCLK: + case WM8995_FLL_SRC_BCLK: + break; + default: + return -EINVAL; + } + + /* Are we changing anything? */ + if (wm8995->fll[id].src == src && + wm8995->fll[id].in == freq_in && wm8995->fll[id].out == freq_out) + return 0; + + /* If we're stopping the FLL redo the old config - no + * registers will actually be written but we avoid GCC flow + * analysis bugs spewing warnings. + */ + if (freq_out) + ret = wm8995_get_fll_config(&fll, freq_in, freq_out); + else + ret = wm8995_get_fll_config(&fll, wm8995->fll[id].in, + wm8995->fll[id].out); + if (ret < 0) + return ret; + + /* Gate the AIF clocks while we reclock */ + snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1, + WM8995_AIF1CLK_ENA_MASK, 0); + snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1, + WM8995_AIF2CLK_ENA_MASK, 0); + + /* We always need to disable the FLL while reconfiguring */ + snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset, + WM8995_FLL1_ENA_MASK, 0); + + reg = (fll.outdiv << WM8995_FLL1_OUTDIV_SHIFT) | + (fll.fll_fratio << WM8995_FLL1_FRATIO_SHIFT); + snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_2 + reg_offset, + WM8995_FLL1_OUTDIV_MASK | + WM8995_FLL1_FRATIO_MASK, reg); + + snd_soc_write(codec, WM8995_FLL1_CONTROL_3 + reg_offset, fll.k); + + snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_4 + reg_offset, + WM8995_FLL1_N_MASK, + fll.n << WM8995_FLL1_N_SHIFT); + + snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_5 + reg_offset, + WM8995_FLL1_REFCLK_DIV_MASK | + WM8995_FLL1_REFCLK_SRC_MASK, + (fll.clk_ref_div << WM8995_FLL1_REFCLK_DIV_SHIFT) | + (src - 1)); + + if (freq_out) + snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset, + WM8995_FLL1_ENA_MASK, WM8995_FLL1_ENA); + + wm8995->fll[id].in = freq_in; + wm8995->fll[id].out = freq_out; + wm8995->fll[id].src = src; + + /* Enable any gated AIF clocks */ + snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1, + WM8995_AIF1CLK_ENA_MASK, aif1); + snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1, + WM8995_AIF2CLK_ENA_MASK, aif2); + + configure_clock(codec); + + return 0; +} + +static int wm8995_set_dai_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_codec *codec; + struct wm8995_priv *wm8995; + + codec = dai->codec; + wm8995 = snd_soc_codec_get_drvdata(codec); + + switch (dai->id) { + case 0: + case 1: + break; + default: + /* AIF3 shares clocking with AIF1/2 */ + return -EINVAL; + } + + switch (clk_id) { + case WM8995_SYSCLK_MCLK1: + wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1; + wm8995->mclk[0] = freq; + dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n", + dai->id + 1, freq); + break; + case WM8995_SYSCLK_MCLK2: + wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1; + wm8995->mclk[1] = freq; + dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n", + dai->id + 1, freq); + break; + case WM8995_SYSCLK_FLL1: + wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL1; + dev_dbg(dai->dev, "AIF%d using FLL1\n", dai->id + 1); + break; + case WM8995_SYSCLK_FLL2: + wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL2; + dev_dbg(dai->dev, "AIF%d using FLL2\n", dai->id + 1); + break; + case WM8995_SYSCLK_OPCLK: + default: + dev_err(dai->dev, "Unknown clock source %d\n", clk_id); + return -EINVAL; + } + + configure_clock(codec); + + return 0; +} + +static int wm8995_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + struct wm8995_priv *wm8995; + int ret; + + wm8995 = snd_soc_codec_get_drvdata(codec); + switch (level) { + case SND_SOC_BIAS_ON: + case SND_SOC_BIAS_PREPARE: + break; + case SND_SOC_BIAS_STANDBY: + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { + ret = snd_soc_cache_sync(codec); + if (ret) { + dev_err(codec->dev, + "Failed to sync cache: %d\n", ret); + return ret; + } + + snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, + WM8995_BG_ENA_MASK, WM8995_BG_ENA); + + } + break; + case SND_SOC_BIAS_OFF: + snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, + WM8995_BG_ENA_MASK, 0); + break; + } + + codec->dapm.bias_level = level; + return 0; +} + +#ifdef CONFIG_PM +static int wm8995_suspend(struct snd_soc_codec *codec, pm_message_t state) +{ + wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF); + return 0; +} + +static int wm8995_resume(struct snd_soc_codec *codec) +{ + wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + return 0; +} +#else +#define wm8995_suspend NULL +#define wm8995_resume NULL +#endif + +static int wm8995_remove(struct snd_soc_codec *codec) +{ + struct wm8995_priv *wm8995; + struct i2c_client *i2c; + + i2c = container_of(codec->dev, struct i2c_client, dev); + wm8995 = snd_soc_codec_get_drvdata(codec); + wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF); + return 0; +} + +static int wm8995_probe(struct snd_soc_codec *codec) +{ + struct wm8995_priv *wm8995; + int ret; + + codec->dapm.idle_bias_off = 1; + wm8995 = snd_soc_codec_get_drvdata(codec); + + ret = snd_soc_codec_set_cache_io(codec, 16, 16, wm8995->control_type); + if (ret < 0) { + dev_err(codec->dev, "Failed to set cache i/o: %d\n", ret); + return ret; + } + + ret = snd_soc_read(codec, WM8995_SOFTWARE_RESET); + if (ret < 0) { + dev_err(codec->dev, "Failed to read device ID: %d\n", ret); + return ret; + } + + if (ret != 0x8995) { + dev_err(codec->dev, "Invalid device ID: %#x\n", ret); + return -EINVAL; + } + + ret = snd_soc_write(codec, WM8995_SOFTWARE_RESET, 0); + if (ret < 0) { + dev_err(codec->dev, "Failed to issue reset: %d\n", ret); + return ret; + } + + wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY); + + /* Latch volume updates (right only; we always do left then right). */ + snd_soc_update_bits(codec, WM8995_AIF1_DAC1_RIGHT_VOLUME, + WM8995_AIF1DAC1_VU_MASK, WM8995_AIF1DAC1_VU); + snd_soc_update_bits(codec, WM8995_AIF1_DAC2_RIGHT_VOLUME, + WM8995_AIF1DAC2_VU_MASK, WM8995_AIF1DAC2_VU); + snd_soc_update_bits(codec, WM8995_AIF2_DAC_RIGHT_VOLUME, + WM8995_AIF2DAC_VU_MASK, WM8995_AIF2DAC_VU); + snd_soc_update_bits(codec, WM8995_AIF1_ADC1_RIGHT_VOLUME, + WM8995_AIF1ADC1_VU_MASK, WM8995_AIF1ADC1_VU); + snd_soc_update_bits(codec, WM8995_AIF1_ADC2_RIGHT_VOLUME, + WM8995_AIF1ADC2_VU_MASK, WM8995_AIF1ADC2_VU); + snd_soc_update_bits(codec, WM8995_AIF2_ADC_RIGHT_VOLUME, + WM8995_AIF2ADC_VU_MASK, WM8995_AIF1ADC2_VU); + snd_soc_update_bits(codec, WM8995_DAC1_RIGHT_VOLUME, + WM8995_DAC1_VU_MASK, WM8995_DAC1_VU); + snd_soc_update_bits(codec, WM8995_DAC2_RIGHT_VOLUME, + WM8995_DAC2_VU_MASK, WM8995_DAC2_VU); + snd_soc_update_bits(codec, WM8995_RIGHT_LINE_INPUT_1_VOLUME, + WM8995_IN1_VU_MASK, WM8995_IN1_VU); + + wm8995_update_class_w(codec); + + snd_soc_add_controls(codec, wm8995_snd_controls, + ARRAY_SIZE(wm8995_snd_controls)); + snd_soc_dapm_new_controls(&codec->dapm, wm8995_dapm_widgets, + ARRAY_SIZE(wm8995_dapm_widgets)); + snd_soc_dapm_add_routes(&codec->dapm, wm8995_intercon, + ARRAY_SIZE(wm8995_intercon)); + + return 0; +} + +#define WM8995_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ + SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_ops wm8995_aif1_dai_ops = { + .set_sysclk = wm8995_set_dai_sysclk, + .set_fmt = wm8995_set_dai_fmt, + .hw_params = wm8995_hw_params, + .digital_mute = wm8995_aif_mute, + .set_pll = wm8995_set_fll, + .set_tristate = wm8995_set_tristate, +}; + +static struct snd_soc_dai_ops wm8995_aif2_dai_ops = { + .set_sysclk = wm8995_set_dai_sysclk, + .set_fmt = wm8995_set_dai_fmt, + .hw_params = wm8995_hw_params, + .digital_mute = wm8995_aif_mute, + .set_pll = wm8995_set_fll, + .set_tristate = wm8995_set_tristate, +}; + +static struct snd_soc_dai_ops wm8995_aif3_dai_ops = { + .set_tristate = wm8995_set_tristate, +}; + +static struct snd_soc_dai_driver wm8995_dai[] = { + { + .name = "wm8995-aif1", + .playback = { + .stream_name = "AIF1 Playback", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = WM8995_FORMATS + }, + .capture = { + .stream_name = "AIF1 Capture", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_48000, + .formats = WM8995_FORMATS + }, + .ops = &wm8995_aif1_dai_ops + }, + { + .name = "wm8995-aif2", + .playback = { + .stream_name = "AIF2 Playback", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = WM8995_FORMATS + }, + .capture = { + .stream_name = "AIF2 Capture", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_48000, + .formats = WM8995_FORMATS + }, + .ops = &wm8995_aif2_dai_ops + }, + { + .name = "wm8995-aif3", + .playback = { + .stream_name = "AIF3 Playback", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = WM8995_FORMATS + }, + .capture = { + .stream_name = "AIF3 Capture", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_48000, + .formats = WM8995_FORMATS + }, + .ops = &wm8995_aif3_dai_ops + } +}; + +static struct snd_soc_codec_driver soc_codec_dev_wm8995 = { + .probe = wm8995_probe, + .remove = wm8995_remove, + .suspend = wm8995_suspend, + .resume = wm8995_resume, + .set_bias_level = wm8995_set_bias_level, + .reg_cache_size = ARRAY_SIZE(wm8995_reg_defs), + .reg_word_size = sizeof(u16), + .reg_cache_default = wm8995_reg_defs, + .volatile_register = wm8995_volatile, + .compress_type = SND_SOC_RBTREE_COMPRESSION +}; + +#if defined(CONFIG_SPI_MASTER) +static int __devinit wm8995_spi_probe(struct spi_device *spi) +{ + struct wm8995_priv *wm8995; + int ret; + + wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL); + if (!wm8995) + return -ENOMEM; + + wm8995->control_type = SND_SOC_SPI; + spi_set_drvdata(spi, wm8995); + + ret = snd_soc_register_codec(&spi->dev, + &soc_codec_dev_wm8995, wm8995_dai, + ARRAY_SIZE(wm8995_dai)); + if (ret < 0) + kfree(wm8995); + return ret; +} + +static int __devexit wm8995_spi_remove(struct spi_device *spi) +{ + snd_soc_unregister_codec(&spi->dev); + kfree(spi_get_drvdata(spi)); + return 0; +} + +static struct spi_driver wm8995_spi_driver = { + .driver = { + .name = "wm8995", + .owner = THIS_MODULE, + }, + .probe = wm8995_spi_probe, + .remove = __devexit_p(wm8995_spi_remove) +}; +#endif + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +static __devinit int wm8995_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct wm8995_priv *wm8995; + int ret; + + wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL); + if (!wm8995) + return -ENOMEM; + + wm8995->control_type = SND_SOC_I2C; + i2c_set_clientdata(i2c, wm8995); + + ret = snd_soc_register_codec(&i2c->dev, + &soc_codec_dev_wm8995, wm8995_dai, + ARRAY_SIZE(wm8995_dai)); + if (ret < 0) + kfree(wm8995); + return ret; +} + +static __devexit int wm8995_i2c_remove(struct i2c_client *client) +{ + snd_soc_unregister_codec(&client->dev); + kfree(i2c_get_clientdata(client)); + return 0; +} + +static const struct i2c_device_id wm8995_i2c_id[] = { + {"wm8995", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, wm8995_i2c_id); + +static struct i2c_driver wm8995_i2c_driver = { + .driver = { + .name = "wm8995", + .owner = THIS_MODULE, + }, + .probe = wm8995_i2c_probe, + .remove = __devexit_p(wm8995_i2c_remove), + .id_table = wm8995_i2c_id +}; +#endif + +static int __init wm8995_modinit(void) +{ + int ret = 0; + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + ret = i2c_add_driver(&wm8995_i2c_driver); + if (ret) { + printk(KERN_ERR "Failed to register wm8995 I2C driver: %d\n", + ret); + } +#endif +#if defined(CONFIG_SPI_MASTER) + ret = spi_register_driver(&wm8995_spi_driver); + if (ret) { + printk(KERN_ERR "Failed to register wm8995 SPI driver: %d\n", + ret); + } +#endif + return ret; +} + +module_init(wm8995_modinit); + +static void __exit wm8995_exit(void) +{ +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + i2c_del_driver(&wm8995_i2c_driver); +#endif +#if defined(CONFIG_SPI_MASTER) + spi_unregister_driver(&wm8995_spi_driver); +#endif +} + +module_exit(wm8995_exit); + +MODULE_DESCRIPTION("ASoC WM8995 driver"); +MODULE_AUTHOR("Dimitris Papastamos "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/wm8995.h b/sound/soc/codecs/wm8995.h new file mode 100644 index 000000000000..5642121c4977 --- /dev/null +++ b/sound/soc/codecs/wm8995.h @@ -0,0 +1,4269 @@ +/* + * wm8995.h -- WM8995 ALSA SoC Audio driver + * + * Copyright 2010 Wolfson Microelectronics plc + * + * Author: Dimitris Papastamos + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _WM8995_H +#define _WM8995_H + +#include + +/* + * Register values. + */ +#define WM8995_SOFTWARE_RESET 0x00 +#define WM8995_POWER_MANAGEMENT_1 0x01 +#define WM8995_POWER_MANAGEMENT_2 0x02 +#define WM8995_POWER_MANAGEMENT_3 0x03 +#define WM8995_POWER_MANAGEMENT_4 0x04 +#define WM8995_POWER_MANAGEMENT_5 0x05 +#define WM8995_LEFT_LINE_INPUT_1_VOLUME 0x10 +#define WM8995_RIGHT_LINE_INPUT_1_VOLUME 0x11 +#define WM8995_LEFT_LINE_INPUT_CONTROL 0x12 +#define WM8995_DAC1_LEFT_VOLUME 0x18 +#define WM8995_DAC1_RIGHT_VOLUME 0x19 +#define WM8995_DAC2_LEFT_VOLUME 0x1A +#define WM8995_DAC2_RIGHT_VOLUME 0x1B +#define WM8995_OUTPUT_VOLUME_ZC_1 0x1C +#define WM8995_MICBIAS_1 0x20 +#define WM8995_MICBIAS_2 0x21 +#define WM8995_LDO_1 0x28 +#define WM8995_LDO_2 0x29 +#define WM8995_ACCESSORY_DETECT_MODE1 0x30 +#define WM8995_ACCESSORY_DETECT_MODE2 0x31 +#define WM8995_HEADPHONE_DETECT1 0x34 +#define WM8995_HEADPHONE_DETECT2 0x35 +#define WM8995_MIC_DETECT_1 0x38 +#define WM8995_MIC_DETECT_2 0x39 +#define WM8995_CHARGE_PUMP_1 0x40 +#define WM8995_CLASS_W_1 0x45 +#define WM8995_DC_SERVO_1 0x50 +#define WM8995_DC_SERVO_2 0x51 +#define WM8995_DC_SERVO_3 0x52 +#define WM8995_DC_SERVO_5 0x54 +#define WM8995_DC_SERVO_6 0x55 +#define WM8995_DC_SERVO_7 0x56 +#define WM8995_DC_SERVO_READBACK_0 0x57 +#define WM8995_ANALOGUE_HP_1 0x60 +#define WM8995_ANALOGUE_HP_2 0x61 +#define WM8995_CHIP_REVISION 0x100 +#define WM8995_CONTROL_INTERFACE_1 0x101 +#define WM8995_CONTROL_INTERFACE_2 0x102 +#define WM8995_WRITE_SEQUENCER_CTRL_1 0x110 +#define WM8995_WRITE_SEQUENCER_CTRL_2 0x111 +#define WM8995_AIF1_CLOCKING_1 0x200 +#define WM8995_AIF1_CLOCKING_2 0x201 +#define WM8995_AIF2_CLOCKING_1 0x204 +#define WM8995_AIF2_CLOCKING_2 0x205 +#define WM8995_CLOCKING_1 0x208 +#define WM8995_CLOCKING_2 0x209 +#define WM8995_AIF1_RATE 0x210 +#define WM8995_AIF2_RATE 0x211 +#define WM8995_RATE_STATUS 0x212 +#define WM8995_FLL1_CONTROL_1 0x220 +#define WM8995_FLL1_CONTROL_2 0x221 +#define WM8995_FLL1_CONTROL_3 0x222 +#define WM8995_FLL1_CONTROL_4 0x223 +#define WM8995_FLL1_CONTROL_5 0x224 +#define WM8995_FLL2_CONTROL_1 0x240 +#define WM8995_FLL2_CONTROL_2 0x241 +#define WM8995_FLL2_CONTROL_3 0x242 +#define WM8995_FLL2_CONTROL_4 0x243 +#define WM8995_FLL2_CONTROL_5 0x244 +#define WM8995_AIF1_CONTROL_1 0x300 +#define WM8995_AIF1_CONTROL_2 0x301 +#define WM8995_AIF1_MASTER_SLAVE 0x302 +#define WM8995_AIF1_BCLK 0x303 +#define WM8995_AIF1ADC_LRCLK 0x304 +#define WM8995_AIF1DAC_LRCLK 0x305 +#define WM8995_AIF1DAC_DATA 0x306 +#define WM8995_AIF1ADC_DATA 0x307 +#define WM8995_AIF2_CONTROL_1 0x310 +#define WM8995_AIF2_CONTROL_2 0x311 +#define WM8995_AIF2_MASTER_SLAVE 0x312 +#define WM8995_AIF2_BCLK 0x313 +#define WM8995_AIF2ADC_LRCLK 0x314 +#define WM8995_AIF2DAC_LRCLK 0x315 +#define WM8995_AIF2DAC_DATA 0x316 +#define WM8995_AIF2ADC_DATA 0x317 +#define WM8995_AIF1_ADC1_LEFT_VOLUME 0x400 +#define WM8995_AIF1_ADC1_RIGHT_VOLUME 0x401 +#define WM8995_AIF1_DAC1_LEFT_VOLUME 0x402 +#define WM8995_AIF1_DAC1_RIGHT_VOLUME 0x403 +#define WM8995_AIF1_ADC2_LEFT_VOLUME 0x404 +#define WM8995_AIF1_ADC2_RIGHT_VOLUME 0x405 +#define WM8995_AIF1_DAC2_LEFT_VOLUME 0x406 +#define WM8995_AIF1_DAC2_RIGHT_VOLUME 0x407 +#define WM8995_AIF1_ADC1_FILTERS 0x410 +#define WM8995_AIF1_ADC2_FILTERS 0x411 +#define WM8995_AIF1_DAC1_FILTERS_1 0x420 +#define WM8995_AIF1_DAC1_FILTERS_2 0x421 +#define WM8995_AIF1_DAC2_FILTERS_1 0x422 +#define WM8995_AIF1_DAC2_FILTERS_2 0x423 +#define WM8995_AIF1_DRC1_1 0x440 +#define WM8995_AIF1_DRC1_2 0x441 +#define WM8995_AIF1_DRC1_3 0x442 +#define WM8995_AIF1_DRC1_4 0x443 +#define WM8995_AIF1_DRC1_5 0x444 +#define WM8995_AIF1_DRC2_1 0x450 +#define WM8995_AIF1_DRC2_2 0x451 +#define WM8995_AIF1_DRC2_3 0x452 +#define WM8995_AIF1_DRC2_4 0x453 +#define WM8995_AIF1_DRC2_5 0x454 +#define WM8995_AIF1_DAC1_EQ_GAINS_1 0x480 +#define WM8995_AIF1_DAC1_EQ_GAINS_2 0x481 +#define WM8995_AIF1_DAC1_EQ_BAND_1_A 0x482 +#define WM8995_AIF1_DAC1_EQ_BAND_1_B 0x483 +#define WM8995_AIF1_DAC1_EQ_BAND_1_PG 0x484 +#define WM8995_AIF1_DAC1_EQ_BAND_2_A 0x485 +#define WM8995_AIF1_DAC1_EQ_BAND_2_B 0x486 +#define WM8995_AIF1_DAC1_EQ_BAND_2_C 0x487 +#define WM8995_AIF1_DAC1_EQ_BAND_2_PG 0x488 +#define WM8995_AIF1_DAC1_EQ_BAND_3_A 0x489 +#define WM8995_AIF1_DAC1_EQ_BAND_3_B 0x48A +#define WM8995_AIF1_DAC1_EQ_BAND_3_C 0x48B +#define WM8995_AIF1_DAC1_EQ_BAND_3_PG 0x48C +#define WM8995_AIF1_DAC1_EQ_BAND_4_A 0x48D +#define WM8995_AIF1_DAC1_EQ_BAND_4_B 0x48E +#define WM8995_AIF1_DAC1_EQ_BAND_4_C 0x48F +#define WM8995_AIF1_DAC1_EQ_BAND_4_PG 0x490 +#define WM8995_AIF1_DAC1_EQ_BAND_5_A 0x491 +#define WM8995_AIF1_DAC1_EQ_BAND_5_B 0x492 +#define WM8995_AIF1_DAC1_EQ_BAND_5_PG 0x493 +#define WM8995_AIF1_DAC2_EQ_GAINS_1 0x4A0 +#define WM8995_AIF1_DAC2_EQ_GAINS_2 0x4A1 +#define WM8995_AIF1_DAC2_EQ_BAND_1_A 0x4A2 +#define WM8995_AIF1_DAC2_EQ_BAND_1_B 0x4A3 +#define WM8995_AIF1_DAC2_EQ_BAND_1_PG 0x4A4 +#define WM8995_AIF1_DAC2_EQ_BAND_2_A 0x4A5 +#define WM8995_AIF1_DAC2_EQ_BAND_2_B 0x4A6 +#define WM8995_AIF1_DAC2_EQ_BAND_2_C 0x4A7 +#define WM8995_AIF1_DAC2_EQ_BAND_2_PG 0x4A8 +#define WM8995_AIF1_DAC2_EQ_BAND_3_A 0x4A9 +#define WM8995_AIF1_DAC2_EQ_BAND_3_B 0x4AA +#define WM8995_AIF1_DAC2_EQ_BAND_3_C 0x4AB +#define WM8995_AIF1_DAC2_EQ_BAND_3_PG 0x4AC +#define WM8995_AIF1_DAC2_EQ_BAND_4_A 0x4AD +#define WM8995_AIF1_DAC2_EQ_BAND_4_B 0x4AE +#define WM8995_AIF1_DAC2_EQ_BAND_4_C 0x4AF +#define WM8995_AIF1_DAC2_EQ_BAND_4_PG 0x4B0 +#define WM8995_AIF1_DAC2_EQ_BAND_5_A 0x4B1 +#define WM8995_AIF1_DAC2_EQ_BAND_5_B 0x4B2 +#define WM8995_AIF1_DAC2_EQ_BAND_5_PG 0x4B3 +#define WM8995_AIF2_ADC_LEFT_VOLUME 0x500 +#define WM8995_AIF2_ADC_RIGHT_VOLUME 0x501 +#define WM8995_AIF2_DAC_LEFT_VOLUME 0x502 +#define WM8995_AIF2_DAC_RIGHT_VOLUME 0x503 +#define WM8995_AIF2_ADC_FILTERS 0x510 +#define WM8995_AIF2_DAC_FILTERS_1 0x520 +#define WM8995_AIF2_DAC_FILTERS_2 0x521 +#define WM8995_AIF2_DRC_1 0x540 +#define WM8995_AIF2_DRC_2 0x541 +#define WM8995_AIF2_DRC_3 0x542 +#define WM8995_AIF2_DRC_4 0x543 +#define WM8995_AIF2_DRC_5 0x544 +#define WM8995_AIF2_EQ_GAINS_1 0x580 +#define WM8995_AIF2_EQ_GAINS_2 0x581 +#define WM8995_AIF2_EQ_BAND_1_A 0x582 +#define WM8995_AIF2_EQ_BAND_1_B 0x583 +#define WM8995_AIF2_EQ_BAND_1_PG 0x584 +#define WM8995_AIF2_EQ_BAND_2_A 0x585 +#define WM8995_AIF2_EQ_BAND_2_B 0x586 +#define WM8995_AIF2_EQ_BAND_2_C 0x587 +#define WM8995_AIF2_EQ_BAND_2_PG 0x588 +#define WM8995_AIF2_EQ_BAND_3_A 0x589 +#define WM8995_AIF2_EQ_BAND_3_B 0x58A +#define WM8995_AIF2_EQ_BAND_3_C 0x58B +#define WM8995_AIF2_EQ_BAND_3_PG 0x58C +#define WM8995_AIF2_EQ_BAND_4_A 0x58D +#define WM8995_AIF2_EQ_BAND_4_B 0x58E +#define WM8995_AIF2_EQ_BAND_4_C 0x58F +#define WM8995_AIF2_EQ_BAND_4_PG 0x590 +#define WM8995_AIF2_EQ_BAND_5_A 0x591 +#define WM8995_AIF2_EQ_BAND_5_B 0x592 +#define WM8995_AIF2_EQ_BAND_5_PG 0x593 +#define WM8995_DAC1_MIXER_VOLUMES 0x600 +#define WM8995_DAC1_LEFT_MIXER_ROUTING 0x601 +#define WM8995_DAC1_RIGHT_MIXER_ROUTING 0x602 +#define WM8995_DAC2_MIXER_VOLUMES 0x603 +#define WM8995_DAC2_LEFT_MIXER_ROUTING 0x604 +#define WM8995_DAC2_RIGHT_MIXER_ROUTING 0x605 +#define WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING 0x606 +#define WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING 0x607 +#define WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING 0x608 +#define WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING 0x609 +#define WM8995_DAC_SOFTMUTE 0x610 +#define WM8995_OVERSAMPLING 0x620 +#define WM8995_SIDETONE 0x621 +#define WM8995_GPIO_1 0x700 +#define WM8995_GPIO_2 0x701 +#define WM8995_GPIO_3 0x702 +#define WM8995_GPIO_4 0x703 +#define WM8995_GPIO_5 0x704 +#define WM8995_GPIO_6 0x705 +#define WM8995_GPIO_7 0x706 +#define WM8995_GPIO_8 0x707 +#define WM8995_GPIO_9 0x708 +#define WM8995_GPIO_10 0x709 +#define WM8995_GPIO_11 0x70A +#define WM8995_GPIO_12 0x70B +#define WM8995_GPIO_13 0x70C +#define WM8995_GPIO_14 0x70D +#define WM8995_PULL_CONTROL_1 0x720 +#define WM8995_PULL_CONTROL_2 0x721 +#define WM8995_INTERRUPT_STATUS_1 0x730 +#define WM8995_INTERRUPT_STATUS_2 0x731 +#define WM8995_INTERRUPT_RAW_STATUS_2 0x732 +#define WM8995_INTERRUPT_STATUS_1_MASK 0x738 +#define WM8995_INTERRUPT_STATUS_2_MASK 0x739 +#define WM8995_INTERRUPT_CONTROL 0x740 +#define WM8995_LEFT_PDM_SPEAKER_1 0x800 +#define WM8995_RIGHT_PDM_SPEAKER_1 0x801 +#define WM8995_PDM_SPEAKER_1_MUTE_SEQUENCE 0x802 +#define WM8995_LEFT_PDM_SPEAKER_2 0x808 +#define WM8995_RIGHT_PDM_SPEAKER_2 0x809 +#define WM8995_PDM_SPEAKER_2_MUTE_SEQUENCE 0x80A +#define WM8995_WRITE_SEQUENCER_0 0x3000 +#define WM8995_WRITE_SEQUENCER_1 0x3001 +#define WM8995_WRITE_SEQUENCER_2 0x3002 +#define WM8995_WRITE_SEQUENCER_3 0x3003 +#define WM8995_WRITE_SEQUENCER_4 0x3004 +#define WM8995_WRITE_SEQUENCER_5 0x3005 +#define WM8995_WRITE_SEQUENCER_6 0x3006 +#define WM8995_WRITE_SEQUENCER_7 0x3007 +#define WM8995_WRITE_SEQUENCER_8 0x3008 +#define WM8995_WRITE_SEQUENCER_9 0x3009 +#define WM8995_WRITE_SEQUENCER_10 0x300A +#define WM8995_WRITE_SEQUENCER_11 0x300B +#define WM8995_WRITE_SEQUENCER_12 0x300C +#define WM8995_WRITE_SEQUENCER_13 0x300D +#define WM8995_WRITE_SEQUENCER_14 0x300E +#define WM8995_WRITE_SEQUENCER_15 0x300F +#define WM8995_WRITE_SEQUENCER_16 0x3010 +#define WM8995_WRITE_SEQUENCER_17 0x3011 +#define WM8995_WRITE_SEQUENCER_18 0x3012 +#define WM8995_WRITE_SEQUENCER_19 0x3013 +#define WM8995_WRITE_SEQUENCER_20 0x3014 +#define WM8995_WRITE_SEQUENCER_21 0x3015 +#define WM8995_WRITE_SEQUENCER_22 0x3016 +#define WM8995_WRITE_SEQUENCER_23 0x3017 +#define WM8995_WRITE_SEQUENCER_24 0x3018 +#define WM8995_WRITE_SEQUENCER_25 0x3019 +#define WM8995_WRITE_SEQUENCER_26 0x301A +#define WM8995_WRITE_SEQUENCER_27 0x301B +#define WM8995_WRITE_SEQUENCER_28 0x301C +#define WM8995_WRITE_SEQUENCER_29 0x301D +#define WM8995_WRITE_SEQUENCER_30 0x301E +#define WM8995_WRITE_SEQUENCER_31 0x301F +#define WM8995_WRITE_SEQUENCER_32 0x3020 +#define WM8995_WRITE_SEQUENCER_33 0x3021 +#define WM8995_WRITE_SEQUENCER_34 0x3022 +#define WM8995_WRITE_SEQUENCER_35 0x3023 +#define WM8995_WRITE_SEQUENCER_36 0x3024 +#define WM8995_WRITE_SEQUENCER_37 0x3025 +#define WM8995_WRITE_SEQUENCER_38 0x3026 +#define WM8995_WRITE_SEQUENCER_39 0x3027 +#define WM8995_WRITE_SEQUENCER_40 0x3028 +#define WM8995_WRITE_SEQUENCER_41 0x3029 +#define WM8995_WRITE_SEQUENCER_42 0x302A +#define WM8995_WRITE_SEQUENCER_43 0x302B +#define WM8995_WRITE_SEQUENCER_44 0x302C +#define WM8995_WRITE_SEQUENCER_45 0x302D +#define WM8995_WRITE_SEQUENCER_46 0x302E +#define WM8995_WRITE_SEQUENCER_47 0x302F +#define WM8995_WRITE_SEQUENCER_48 0x3030 +#define WM8995_WRITE_SEQUENCER_49 0x3031 +#define WM8995_WRITE_SEQUENCER_50 0x3032 +#define WM8995_WRITE_SEQUENCER_51 0x3033 +#define WM8995_WRITE_SEQUENCER_52 0x3034 +#define WM8995_WRITE_SEQUENCER_53 0x3035 +#define WM8995_WRITE_SEQUENCER_54 0x3036 +#define WM8995_WRITE_SEQUENCER_55 0x3037 +#define WM8995_WRITE_SEQUENCER_56 0x3038 +#define WM8995_WRITE_SEQUENCER_57 0x3039 +#define WM8995_WRITE_SEQUENCER_58 0x303A +#define WM8995_WRITE_SEQUENCER_59 0x303B +#define WM8995_WRITE_SEQUENCER_60 0x303C +#define WM8995_WRITE_SEQUENCER_61 0x303D +#define WM8995_WRITE_SEQUENCER_62 0x303E +#define WM8995_WRITE_SEQUENCER_63 0x303F +#define WM8995_WRITE_SEQUENCER_64 0x3040 +#define WM8995_WRITE_SEQUENCER_65 0x3041 +#define WM8995_WRITE_SEQUENCER_66 0x3042 +#define WM8995_WRITE_SEQUENCER_67 0x3043 +#define WM8995_WRITE_SEQUENCER_68 0x3044 +#define WM8995_WRITE_SEQUENCER_69 0x3045 +#define WM8995_WRITE_SEQUENCER_70 0x3046 +#define WM8995_WRITE_SEQUENCER_71 0x3047 +#define WM8995_WRITE_SEQUENCER_72 0x3048 +#define WM8995_WRITE_SEQUENCER_73 0x3049 +#define WM8995_WRITE_SEQUENCER_74 0x304A +#define WM8995_WRITE_SEQUENCER_75 0x304B +#define WM8995_WRITE_SEQUENCER_76 0x304C +#define WM8995_WRITE_SEQUENCER_77 0x304D +#define WM8995_WRITE_SEQUENCER_78 0x304E +#define WM8995_WRITE_SEQUENCER_79 0x304F +#define WM8995_WRITE_SEQUENCER_80 0x3050 +#define WM8995_WRITE_SEQUENCER_81 0x3051 +#define WM8995_WRITE_SEQUENCER_82 0x3052 +#define WM8995_WRITE_SEQUENCER_83 0x3053 +#define WM8995_WRITE_SEQUENCER_84 0x3054 +#define WM8995_WRITE_SEQUENCER_85 0x3055 +#define WM8995_WRITE_SEQUENCER_86 0x3056 +#define WM8995_WRITE_SEQUENCER_87 0x3057 +#define WM8995_WRITE_SEQUENCER_88 0x3058 +#define WM8995_WRITE_SEQUENCER_89 0x3059 +#define WM8995_WRITE_SEQUENCER_90 0x305A +#define WM8995_WRITE_SEQUENCER_91 0x305B +#define WM8995_WRITE_SEQUENCER_92 0x305C +#define WM8995_WRITE_SEQUENCER_93 0x305D +#define WM8995_WRITE_SEQUENCER_94 0x305E +#define WM8995_WRITE_SEQUENCER_95 0x305F +#define WM8995_WRITE_SEQUENCER_96 0x3060 +#define WM8995_WRITE_SEQUENCER_97 0x3061 +#define WM8995_WRITE_SEQUENCER_98 0x3062 +#define WM8995_WRITE_SEQUENCER_99 0x3063 +#define WM8995_WRITE_SEQUENCER_100 0x3064 +#define WM8995_WRITE_SEQUENCER_101 0x3065 +#define WM8995_WRITE_SEQUENCER_102 0x3066 +#define WM8995_WRITE_SEQUENCER_103 0x3067 +#define WM8995_WRITE_SEQUENCER_104 0x3068 +#define WM8995_WRITE_SEQUENCER_105 0x3069 +#define WM8995_WRITE_SEQUENCER_106 0x306A +#define WM8995_WRITE_SEQUENCER_107 0x306B +#define WM8995_WRITE_SEQUENCER_108 0x306C +#define WM8995_WRITE_SEQUENCER_109 0x306D +#define WM8995_WRITE_SEQUENCER_110 0x306E +#define WM8995_WRITE_SEQUENCER_111 0x306F +#define WM8995_WRITE_SEQUENCER_112 0x3070 +#define WM8995_WRITE_SEQUENCER_113 0x3071 +#define WM8995_WRITE_SEQUENCER_114 0x3072 +#define WM8995_WRITE_SEQUENCER_115 0x3073 +#define WM8995_WRITE_SEQUENCER_116 0x3074 +#define WM8995_WRITE_SEQUENCER_117 0x3075 +#define WM8995_WRITE_SEQUENCER_118 0x3076 +#define WM8995_WRITE_SEQUENCER_119 0x3077 +#define WM8995_WRITE_SEQUENCER_120 0x3078 +#define WM8995_WRITE_SEQUENCER_121 0x3079 +#define WM8995_WRITE_SEQUENCER_122 0x307A +#define WM8995_WRITE_SEQUENCER_123 0x307B +#define WM8995_WRITE_SEQUENCER_124 0x307C +#define WM8995_WRITE_SEQUENCER_125 0x307D +#define WM8995_WRITE_SEQUENCER_126 0x307E +#define WM8995_WRITE_SEQUENCER_127 0x307F +#define WM8995_WRITE_SEQUENCER_128 0x3080 +#define WM8995_WRITE_SEQUENCER_129 0x3081 +#define WM8995_WRITE_SEQUENCER_130 0x3082 +#define WM8995_WRITE_SEQUENCER_131 0x3083 +#define WM8995_WRITE_SEQUENCER_132 0x3084 +#define WM8995_WRITE_SEQUENCER_133 0x3085 +#define WM8995_WRITE_SEQUENCER_134 0x3086 +#define WM8995_WRITE_SEQUENCER_135 0x3087 +#define WM8995_WRITE_SEQUENCER_136 0x3088 +#define WM8995_WRITE_SEQUENCER_137 0x3089 +#define WM8995_WRITE_SEQUENCER_138 0x308A +#define WM8995_WRITE_SEQUENCER_139 0x308B +#define WM8995_WRITE_SEQUENCER_140 0x308C +#define WM8995_WRITE_SEQUENCER_141 0x308D +#define WM8995_WRITE_SEQUENCER_142 0x308E +#define WM8995_WRITE_SEQUENCER_143 0x308F +#define WM8995_WRITE_SEQUENCER_144 0x3090 +#define WM8995_WRITE_SEQUENCER_145 0x3091 +#define WM8995_WRITE_SEQUENCER_146 0x3092 +#define WM8995_WRITE_SEQUENCER_147 0x3093 +#define WM8995_WRITE_SEQUENCER_148 0x3094 +#define WM8995_WRITE_SEQUENCER_149 0x3095 +#define WM8995_WRITE_SEQUENCER_150 0x3096 +#define WM8995_WRITE_SEQUENCER_151 0x3097 +#define WM8995_WRITE_SEQUENCER_152 0x3098 +#define WM8995_WRITE_SEQUENCER_153 0x3099 +#define WM8995_WRITE_SEQUENCER_154 0x309A +#define WM8995_WRITE_SEQUENCER_155 0x309B +#define WM8995_WRITE_SEQUENCER_156 0x309C +#define WM8995_WRITE_SEQUENCER_157 0x309D +#define WM8995_WRITE_SEQUENCER_158 0x309E +#define WM8995_WRITE_SEQUENCER_159 0x309F +#define WM8995_WRITE_SEQUENCER_160 0x30A0 +#define WM8995_WRITE_SEQUENCER_161 0x30A1 +#define WM8995_WRITE_SEQUENCER_162 0x30A2 +#define WM8995_WRITE_SEQUENCER_163 0x30A3 +#define WM8995_WRITE_SEQUENCER_164 0x30A4 +#define WM8995_WRITE_SEQUENCER_165 0x30A5 +#define WM8995_WRITE_SEQUENCER_166 0x30A6 +#define WM8995_WRITE_SEQUENCER_167 0x30A7 +#define WM8995_WRITE_SEQUENCER_168 0x30A8 +#define WM8995_WRITE_SEQUENCER_169 0x30A9 +#define WM8995_WRITE_SEQUENCER_170 0x30AA +#define WM8995_WRITE_SEQUENCER_171 0x30AB +#define WM8995_WRITE_SEQUENCER_172 0x30AC +#define WM8995_WRITE_SEQUENCER_173 0x30AD +#define WM8995_WRITE_SEQUENCER_174 0x30AE +#define WM8995_WRITE_SEQUENCER_175 0x30AF +#define WM8995_WRITE_SEQUENCER_176 0x30B0 +#define WM8995_WRITE_SEQUENCER_177 0x30B1 +#define WM8995_WRITE_SEQUENCER_178 0x30B2 +#define WM8995_WRITE_SEQUENCER_179 0x30B3 +#define WM8995_WRITE_SEQUENCER_180 0x30B4 +#define WM8995_WRITE_SEQUENCER_181 0x30B5 +#define WM8995_WRITE_SEQUENCER_182 0x30B6 +#define WM8995_WRITE_SEQUENCER_183 0x30B7 +#define WM8995_WRITE_SEQUENCER_184 0x30B8 +#define WM8995_WRITE_SEQUENCER_185 0x30B9 +#define WM8995_WRITE_SEQUENCER_186 0x30BA +#define WM8995_WRITE_SEQUENCER_187 0x30BB +#define WM8995_WRITE_SEQUENCER_188 0x30BC +#define WM8995_WRITE_SEQUENCER_189 0x30BD +#define WM8995_WRITE_SEQUENCER_190 0x30BE +#define WM8995_WRITE_SEQUENCER_191 0x30BF +#define WM8995_WRITE_SEQUENCER_192 0x30C0 +#define WM8995_WRITE_SEQUENCER_193 0x30C1 +#define WM8995_WRITE_SEQUENCER_194 0x30C2 +#define WM8995_WRITE_SEQUENCER_195 0x30C3 +#define WM8995_WRITE_SEQUENCER_196 0x30C4 +#define WM8995_WRITE_SEQUENCER_197 0x30C5 +#define WM8995_WRITE_SEQUENCER_198 0x30C6 +#define WM8995_WRITE_SEQUENCER_199 0x30C7 +#define WM8995_WRITE_SEQUENCER_200 0x30C8 +#define WM8995_WRITE_SEQUENCER_201 0x30C9 +#define WM8995_WRITE_SEQUENCER_202 0x30CA +#define WM8995_WRITE_SEQUENCER_203 0x30CB +#define WM8995_WRITE_SEQUENCER_204 0x30CC +#define WM8995_WRITE_SEQUENCER_205 0x30CD +#define WM8995_WRITE_SEQUENCER_206 0x30CE +#define WM8995_WRITE_SEQUENCER_207 0x30CF +#define WM8995_WRITE_SEQUENCER_208 0x30D0 +#define WM8995_WRITE_SEQUENCER_209 0x30D1 +#define WM8995_WRITE_SEQUENCER_210 0x30D2 +#define WM8995_WRITE_SEQUENCER_211 0x30D3 +#define WM8995_WRITE_SEQUENCER_212 0x30D4 +#define WM8995_WRITE_SEQUENCER_213 0x30D5 +#define WM8995_WRITE_SEQUENCER_214 0x30D6 +#define WM8995_WRITE_SEQUENCER_215 0x30D7 +#define WM8995_WRITE_SEQUENCER_216 0x30D8 +#define WM8995_WRITE_SEQUENCER_217 0x30D9 +#define WM8995_WRITE_SEQUENCER_218 0x30DA +#define WM8995_WRITE_SEQUENCER_219 0x30DB +#define WM8995_WRITE_SEQUENCER_220 0x30DC +#define WM8995_WRITE_SEQUENCER_221 0x30DD +#define WM8995_WRITE_SEQUENCER_222 0x30DE +#define WM8995_WRITE_SEQUENCER_223 0x30DF +#define WM8995_WRITE_SEQUENCER_224 0x30E0 +#define WM8995_WRITE_SEQUENCER_225 0x30E1 +#define WM8995_WRITE_SEQUENCER_226 0x30E2 +#define WM8995_WRITE_SEQUENCER_227 0x30E3 +#define WM8995_WRITE_SEQUENCER_228 0x30E4 +#define WM8995_WRITE_SEQUENCER_229 0x30E5 +#define WM8995_WRITE_SEQUENCER_230 0x30E6 +#define WM8995_WRITE_SEQUENCER_231 0x30E7 +#define WM8995_WRITE_SEQUENCER_232 0x30E8 +#define WM8995_WRITE_SEQUENCER_233 0x30E9 +#define WM8995_WRITE_SEQUENCER_234 0x30EA +#define WM8995_WRITE_SEQUENCER_235 0x30EB +#define WM8995_WRITE_SEQUENCER_236 0x30EC +#define WM8995_WRITE_SEQUENCER_237 0x30ED +#define WM8995_WRITE_SEQUENCER_238 0x30EE +#define WM8995_WRITE_SEQUENCER_239 0x30EF +#define WM8995_WRITE_SEQUENCER_240 0x30F0 +#define WM8995_WRITE_SEQUENCER_241 0x30F1 +#define WM8995_WRITE_SEQUENCER_242 0x30F2 +#define WM8995_WRITE_SEQUENCER_243 0x30F3 +#define WM8995_WRITE_SEQUENCER_244 0x30F4 +#define WM8995_WRITE_SEQUENCER_245 0x30F5 +#define WM8995_WRITE_SEQUENCER_246 0x30F6 +#define WM8995_WRITE_SEQUENCER_247 0x30F7 +#define WM8995_WRITE_SEQUENCER_248 0x30F8 +#define WM8995_WRITE_SEQUENCER_249 0x30F9 +#define WM8995_WRITE_SEQUENCER_250 0x30FA +#define WM8995_WRITE_SEQUENCER_251 0x30FB +#define WM8995_WRITE_SEQUENCER_252 0x30FC +#define WM8995_WRITE_SEQUENCER_253 0x30FD +#define WM8995_WRITE_SEQUENCER_254 0x30FE +#define WM8995_WRITE_SEQUENCER_255 0x30FF +#define WM8995_WRITE_SEQUENCER_256 0x3100 +#define WM8995_WRITE_SEQUENCER_257 0x3101 +#define WM8995_WRITE_SEQUENCER_258 0x3102 +#define WM8995_WRITE_SEQUENCER_259 0x3103 +#define WM8995_WRITE_SEQUENCER_260 0x3104 +#define WM8995_WRITE_SEQUENCER_261 0x3105 +#define WM8995_WRITE_SEQUENCER_262 0x3106 +#define WM8995_WRITE_SEQUENCER_263 0x3107 +#define WM8995_WRITE_SEQUENCER_264 0x3108 +#define WM8995_WRITE_SEQUENCER_265 0x3109 +#define WM8995_WRITE_SEQUENCER_266 0x310A +#define WM8995_WRITE_SEQUENCER_267 0x310B +#define WM8995_WRITE_SEQUENCER_268 0x310C +#define WM8995_WRITE_SEQUENCER_269 0x310D +#define WM8995_WRITE_SEQUENCER_270 0x310E +#define WM8995_WRITE_SEQUENCER_271 0x310F +#define WM8995_WRITE_SEQUENCER_272 0x3110 +#define WM8995_WRITE_SEQUENCER_273 0x3111 +#define WM8995_WRITE_SEQUENCER_274 0x3112 +#define WM8995_WRITE_SEQUENCER_275 0x3113 +#define WM8995_WRITE_SEQUENCER_276 0x3114 +#define WM8995_WRITE_SEQUENCER_277 0x3115 +#define WM8995_WRITE_SEQUENCER_278 0x3116 +#define WM8995_WRITE_SEQUENCER_279 0x3117 +#define WM8995_WRITE_SEQUENCER_280 0x3118 +#define WM8995_WRITE_SEQUENCER_281 0x3119 +#define WM8995_WRITE_SEQUENCER_282 0x311A +#define WM8995_WRITE_SEQUENCER_283 0x311B +#define WM8995_WRITE_SEQUENCER_284 0x311C +#define WM8995_WRITE_SEQUENCER_285 0x311D +#define WM8995_WRITE_SEQUENCER_286 0x311E +#define WM8995_WRITE_SEQUENCER_287 0x311F +#define WM8995_WRITE_SEQUENCER_288 0x3120 +#define WM8995_WRITE_SEQUENCER_289 0x3121 +#define WM8995_WRITE_SEQUENCER_290 0x3122 +#define WM8995_WRITE_SEQUENCER_291 0x3123 +#define WM8995_WRITE_SEQUENCER_292 0x3124 +#define WM8995_WRITE_SEQUENCER_293 0x3125 +#define WM8995_WRITE_SEQUENCER_294 0x3126 +#define WM8995_WRITE_SEQUENCER_295 0x3127 +#define WM8995_WRITE_SEQUENCER_296 0x3128 +#define WM8995_WRITE_SEQUENCER_297 0x3129 +#define WM8995_WRITE_SEQUENCER_298 0x312A +#define WM8995_WRITE_SEQUENCER_299 0x312B +#define WM8995_WRITE_SEQUENCER_300 0x312C +#define WM8995_WRITE_SEQUENCER_301 0x312D +#define WM8995_WRITE_SEQUENCER_302 0x312E +#define WM8995_WRITE_SEQUENCER_303 0x312F +#define WM8995_WRITE_SEQUENCER_304 0x3130 +#define WM8995_WRITE_SEQUENCER_305 0x3131 +#define WM8995_WRITE_SEQUENCER_306 0x3132 +#define WM8995_WRITE_SEQUENCER_307 0x3133 +#define WM8995_WRITE_SEQUENCER_308 0x3134 +#define WM8995_WRITE_SEQUENCER_309 0x3135 +#define WM8995_WRITE_SEQUENCER_310 0x3136 +#define WM8995_WRITE_SEQUENCER_311 0x3137 +#define WM8995_WRITE_SEQUENCER_312 0x3138 +#define WM8995_WRITE_SEQUENCER_313 0x3139 +#define WM8995_WRITE_SEQUENCER_314 0x313A +#define WM8995_WRITE_SEQUENCER_315 0x313B +#define WM8995_WRITE_SEQUENCER_316 0x313C +#define WM8995_WRITE_SEQUENCER_317 0x313D +#define WM8995_WRITE_SEQUENCER_318 0x313E +#define WM8995_WRITE_SEQUENCER_319 0x313F +#define WM8995_WRITE_SEQUENCER_320 0x3140 +#define WM8995_WRITE_SEQUENCER_321 0x3141 +#define WM8995_WRITE_SEQUENCER_322 0x3142 +#define WM8995_WRITE_SEQUENCER_323 0x3143 +#define WM8995_WRITE_SEQUENCER_324 0x3144 +#define WM8995_WRITE_SEQUENCER_325 0x3145 +#define WM8995_WRITE_SEQUENCER_326 0x3146 +#define WM8995_WRITE_SEQUENCER_327 0x3147 +#define WM8995_WRITE_SEQUENCER_328 0x3148 +#define WM8995_WRITE_SEQUENCER_329 0x3149 +#define WM8995_WRITE_SEQUENCER_330 0x314A +#define WM8995_WRITE_SEQUENCER_331 0x314B +#define WM8995_WRITE_SEQUENCER_332 0x314C +#define WM8995_WRITE_SEQUENCER_333 0x314D +#define WM8995_WRITE_SEQUENCER_334 0x314E +#define WM8995_WRITE_SEQUENCER_335 0x314F +#define WM8995_WRITE_SEQUENCER_336 0x3150 +#define WM8995_WRITE_SEQUENCER_337 0x3151 +#define WM8995_WRITE_SEQUENCER_338 0x3152 +#define WM8995_WRITE_SEQUENCER_339 0x3153 +#define WM8995_WRITE_SEQUENCER_340 0x3154 +#define WM8995_WRITE_SEQUENCER_341 0x3155 +#define WM8995_WRITE_SEQUENCER_342 0x3156 +#define WM8995_WRITE_SEQUENCER_343 0x3157 +#define WM8995_WRITE_SEQUENCER_344 0x3158 +#define WM8995_WRITE_SEQUENCER_345 0x3159 +#define WM8995_WRITE_SEQUENCER_346 0x315A +#define WM8995_WRITE_SEQUENCER_347 0x315B +#define WM8995_WRITE_SEQUENCER_348 0x315C +#define WM8995_WRITE_SEQUENCER_349 0x315D +#define WM8995_WRITE_SEQUENCER_350 0x315E +#define WM8995_WRITE_SEQUENCER_351 0x315F +#define WM8995_WRITE_SEQUENCER_352 0x3160 +#define WM8995_WRITE_SEQUENCER_353 0x3161 +#define WM8995_WRITE_SEQUENCER_354 0x3162 +#define WM8995_WRITE_SEQUENCER_355 0x3163 +#define WM8995_WRITE_SEQUENCER_356 0x3164 +#define WM8995_WRITE_SEQUENCER_357 0x3165 +#define WM8995_WRITE_SEQUENCER_358 0x3166 +#define WM8995_WRITE_SEQUENCER_359 0x3167 +#define WM8995_WRITE_SEQUENCER_360 0x3168 +#define WM8995_WRITE_SEQUENCER_361 0x3169 +#define WM8995_WRITE_SEQUENCER_362 0x316A +#define WM8995_WRITE_SEQUENCER_363 0x316B +#define WM8995_WRITE_SEQUENCER_364 0x316C +#define WM8995_WRITE_SEQUENCER_365 0x316D +#define WM8995_WRITE_SEQUENCER_366 0x316E +#define WM8995_WRITE_SEQUENCER_367 0x316F +#define WM8995_WRITE_SEQUENCER_368 0x3170 +#define WM8995_WRITE_SEQUENCER_369 0x3171 +#define WM8995_WRITE_SEQUENCER_370 0x3172 +#define WM8995_WRITE_SEQUENCER_371 0x3173 +#define WM8995_WRITE_SEQUENCER_372 0x3174 +#define WM8995_WRITE_SEQUENCER_373 0x3175 +#define WM8995_WRITE_SEQUENCER_374 0x3176 +#define WM8995_WRITE_SEQUENCER_375 0x3177 +#define WM8995_WRITE_SEQUENCER_376 0x3178 +#define WM8995_WRITE_SEQUENCER_377 0x3179 +#define WM8995_WRITE_SEQUENCER_378 0x317A +#define WM8995_WRITE_SEQUENCER_379 0x317B +#define WM8995_WRITE_SEQUENCER_380 0x317C +#define WM8995_WRITE_SEQUENCER_381 0x317D +#define WM8995_WRITE_SEQUENCER_382 0x317E +#define WM8995_WRITE_SEQUENCER_383 0x317F +#define WM8995_WRITE_SEQUENCER_384 0x3180 +#define WM8995_WRITE_SEQUENCER_385 0x3181 +#define WM8995_WRITE_SEQUENCER_386 0x3182 +#define WM8995_WRITE_SEQUENCER_387 0x3183 +#define WM8995_WRITE_SEQUENCER_388 0x3184 +#define WM8995_WRITE_SEQUENCER_389 0x3185 +#define WM8995_WRITE_SEQUENCER_390 0x3186 +#define WM8995_WRITE_SEQUENCER_391 0x3187 +#define WM8995_WRITE_SEQUENCER_392 0x3188 +#define WM8995_WRITE_SEQUENCER_393 0x3189 +#define WM8995_WRITE_SEQUENCER_394 0x318A +#define WM8995_WRITE_SEQUENCER_395 0x318B +#define WM8995_WRITE_SEQUENCER_396 0x318C +#define WM8995_WRITE_SEQUENCER_397 0x318D +#define WM8995_WRITE_SEQUENCER_398 0x318E +#define WM8995_WRITE_SEQUENCER_399 0x318F +#define WM8995_WRITE_SEQUENCER_400 0x3190 +#define WM8995_WRITE_SEQUENCER_401 0x3191 +#define WM8995_WRITE_SEQUENCER_402 0x3192 +#define WM8995_WRITE_SEQUENCER_403 0x3193 +#define WM8995_WRITE_SEQUENCER_404 0x3194 +#define WM8995_WRITE_SEQUENCER_405 0x3195 +#define WM8995_WRITE_SEQUENCER_406 0x3196 +#define WM8995_WRITE_SEQUENCER_407 0x3197 +#define WM8995_WRITE_SEQUENCER_408 0x3198 +#define WM8995_WRITE_SEQUENCER_409 0x3199 +#define WM8995_WRITE_SEQUENCER_410 0x319A +#define WM8995_WRITE_SEQUENCER_411 0x319B +#define WM8995_WRITE_SEQUENCER_412 0x319C +#define WM8995_WRITE_SEQUENCER_413 0x319D +#define WM8995_WRITE_SEQUENCER_414 0x319E +#define WM8995_WRITE_SEQUENCER_415 0x319F +#define WM8995_WRITE_SEQUENCER_416 0x31A0 +#define WM8995_WRITE_SEQUENCER_417 0x31A1 +#define WM8995_WRITE_SEQUENCER_418 0x31A2 +#define WM8995_WRITE_SEQUENCER_419 0x31A3 +#define WM8995_WRITE_SEQUENCER_420 0x31A4 +#define WM8995_WRITE_SEQUENCER_421 0x31A5 +#define WM8995_WRITE_SEQUENCER_422 0x31A6 +#define WM8995_WRITE_SEQUENCER_423 0x31A7 +#define WM8995_WRITE_SEQUENCER_424 0x31A8 +#define WM8995_WRITE_SEQUENCER_425 0x31A9 +#define WM8995_WRITE_SEQUENCER_426 0x31AA +#define WM8995_WRITE_SEQUENCER_427 0x31AB +#define WM8995_WRITE_SEQUENCER_428 0x31AC +#define WM8995_WRITE_SEQUENCER_429 0x31AD +#define WM8995_WRITE_SEQUENCER_430 0x31AE +#define WM8995_WRITE_SEQUENCER_431 0x31AF +#define WM8995_WRITE_SEQUENCER_432 0x31B0 +#define WM8995_WRITE_SEQUENCER_433 0x31B1 +#define WM8995_WRITE_SEQUENCER_434 0x31B2 +#define WM8995_WRITE_SEQUENCER_435 0x31B3 +#define WM8995_WRITE_SEQUENCER_436 0x31B4 +#define WM8995_WRITE_SEQUENCER_437 0x31B5 +#define WM8995_WRITE_SEQUENCER_438 0x31B6 +#define WM8995_WRITE_SEQUENCER_439 0x31B7 +#define WM8995_WRITE_SEQUENCER_440 0x31B8 +#define WM8995_WRITE_SEQUENCER_441 0x31B9 +#define WM8995_WRITE_SEQUENCER_442 0x31BA +#define WM8995_WRITE_SEQUENCER_443 0x31BB +#define WM8995_WRITE_SEQUENCER_444 0x31BC +#define WM8995_WRITE_SEQUENCER_445 0x31BD +#define WM8995_WRITE_SEQUENCER_446 0x31BE +#define WM8995_WRITE_SEQUENCER_447 0x31BF +#define WM8995_WRITE_SEQUENCER_448 0x31C0 +#define WM8995_WRITE_SEQUENCER_449 0x31C1 +#define WM8995_WRITE_SEQUENCER_450 0x31C2 +#define WM8995_WRITE_SEQUENCER_451 0x31C3 +#define WM8995_WRITE_SEQUENCER_452 0x31C4 +#define WM8995_WRITE_SEQUENCER_453 0x31C5 +#define WM8995_WRITE_SEQUENCER_454 0x31C6 +#define WM8995_WRITE_SEQUENCER_455 0x31C7 +#define WM8995_WRITE_SEQUENCER_456 0x31C8 +#define WM8995_WRITE_SEQUENCER_457 0x31C9 +#define WM8995_WRITE_SEQUENCER_458 0x31CA +#define WM8995_WRITE_SEQUENCER_459 0x31CB +#define WM8995_WRITE_SEQUENCER_460 0x31CC +#define WM8995_WRITE_SEQUENCER_461 0x31CD +#define WM8995_WRITE_SEQUENCER_462 0x31CE +#define WM8995_WRITE_SEQUENCER_463 0x31CF +#define WM8995_WRITE_SEQUENCER_464 0x31D0 +#define WM8995_WRITE_SEQUENCER_465 0x31D1 +#define WM8995_WRITE_SEQUENCER_466 0x31D2 +#define WM8995_WRITE_SEQUENCER_467 0x31D3 +#define WM8995_WRITE_SEQUENCER_468 0x31D4 +#define WM8995_WRITE_SEQUENCER_469 0x31D5 +#define WM8995_WRITE_SEQUENCER_470 0x31D6 +#define WM8995_WRITE_SEQUENCER_471 0x31D7 +#define WM8995_WRITE_SEQUENCER_472 0x31D8 +#define WM8995_WRITE_SEQUENCER_473 0x31D9 +#define WM8995_WRITE_SEQUENCER_474 0x31DA +#define WM8995_WRITE_SEQUENCER_475 0x31DB +#define WM8995_WRITE_SEQUENCER_476 0x31DC +#define WM8995_WRITE_SEQUENCER_477 0x31DD +#define WM8995_WRITE_SEQUENCER_478 0x31DE +#define WM8995_WRITE_SEQUENCER_479 0x31DF +#define WM8995_WRITE_SEQUENCER_480 0x31E0 +#define WM8995_WRITE_SEQUENCER_481 0x31E1 +#define WM8995_WRITE_SEQUENCER_482 0x31E2 +#define WM8995_WRITE_SEQUENCER_483 0x31E3 +#define WM8995_WRITE_SEQUENCER_484 0x31E4 +#define WM8995_WRITE_SEQUENCER_485 0x31E5 +#define WM8995_WRITE_SEQUENCER_486 0x31E6 +#define WM8995_WRITE_SEQUENCER_487 0x31E7 +#define WM8995_WRITE_SEQUENCER_488 0x31E8 +#define WM8995_WRITE_SEQUENCER_489 0x31E9 +#define WM8995_WRITE_SEQUENCER_490 0x31EA +#define WM8995_WRITE_SEQUENCER_491 0x31EB +#define WM8995_WRITE_SEQUENCER_492 0x31EC +#define WM8995_WRITE_SEQUENCER_493 0x31ED +#define WM8995_WRITE_SEQUENCER_494 0x31EE +#define WM8995_WRITE_SEQUENCER_495 0x31EF +#define WM8995_WRITE_SEQUENCER_496 0x31F0 +#define WM8995_WRITE_SEQUENCER_497 0x31F1 +#define WM8995_WRITE_SEQUENCER_498 0x31F2 +#define WM8995_WRITE_SEQUENCER_499 0x31F3 +#define WM8995_WRITE_SEQUENCER_500 0x31F4 +#define WM8995_WRITE_SEQUENCER_501 0x31F5 +#define WM8995_WRITE_SEQUENCER_502 0x31F6 +#define WM8995_WRITE_SEQUENCER_503 0x31F7 +#define WM8995_WRITE_SEQUENCER_504 0x31F8 +#define WM8995_WRITE_SEQUENCER_505 0x31F9 +#define WM8995_WRITE_SEQUENCER_506 0x31FA +#define WM8995_WRITE_SEQUENCER_507 0x31FB +#define WM8995_WRITE_SEQUENCER_508 0x31FC +#define WM8995_WRITE_SEQUENCER_509 0x31FD +#define WM8995_WRITE_SEQUENCER_510 0x31FE +#define WM8995_WRITE_SEQUENCER_511 0x31FF + +#define WM8995_REGISTER_COUNT 725 +#define WM8995_MAX_REGISTER 0x31FF + +#define WM8995_MAX_CACHED_REGISTER WM8995_MAX_REGISTER + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Software Reset + */ +#define WM8995_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */ +#define WM8995_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */ +#define WM8995_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */ + +/* + * R1 (0x01) - Power Management (1) + */ +#define WM8995_MICB2_ENA 0x0200 /* MICB2_ENA */ +#define WM8995_MICB2_ENA_MASK 0x0200 /* MICB2_ENA */ +#define WM8995_MICB2_ENA_SHIFT 9 /* MICB2_ENA */ +#define WM8995_MICB2_ENA_WIDTH 1 /* MICB2_ENA */ +#define WM8995_MICB1_ENA 0x0100 /* MICB1_ENA */ +#define WM8995_MICB1_ENA_MASK 0x0100 /* MICB1_ENA */ +#define WM8995_MICB1_ENA_SHIFT 8 /* MICB1_ENA */ +#define WM8995_MICB1_ENA_WIDTH 1 /* MICB1_ENA */ +#define WM8995_HPOUT2L_ENA 0x0080 /* HPOUT2L_ENA */ +#define WM8995_HPOUT2L_ENA_MASK 0x0080 /* HPOUT2L_ENA */ +#define WM8995_HPOUT2L_ENA_SHIFT 7 /* HPOUT2L_ENA */ +#define WM8995_HPOUT2L_ENA_WIDTH 1 /* HPOUT2L_ENA */ +#define WM8995_HPOUT2R_ENA 0x0040 /* HPOUT2R_ENA */ +#define WM8995_HPOUT2R_ENA_MASK 0x0040 /* HPOUT2R_ENA */ +#define WM8995_HPOUT2R_ENA_SHIFT 6 /* HPOUT2R_ENA */ +#define WM8995_HPOUT2R_ENA_WIDTH 1 /* HPOUT2R_ENA */ +#define WM8995_HPOUT1L_ENA 0x0020 /* HPOUT1L_ENA */ +#define WM8995_HPOUT1L_ENA_MASK 0x0020 /* HPOUT1L_ENA */ +#define WM8995_HPOUT1L_ENA_SHIFT 5 /* HPOUT1L_ENA */ +#define WM8995_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */ +#define WM8995_HPOUT1R_ENA 0x0010 /* HPOUT1R_ENA */ +#define WM8995_HPOUT1R_ENA_MASK 0x0010 /* HPOUT1R_ENA */ +#define WM8995_HPOUT1R_ENA_SHIFT 4 /* HPOUT1R_ENA */ +#define WM8995_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */ +#define WM8995_BG_ENA 0x0001 /* BG_ENA */ +#define WM8995_BG_ENA_MASK 0x0001 /* BG_ENA */ +#define WM8995_BG_ENA_SHIFT 0 /* BG_ENA */ +#define WM8995_BG_ENA_WIDTH 1 /* BG_ENA */ + +/* + * R2 (0x02) - Power Management (2) + */ +#define WM8995_OPCLK_ENA 0x0800 /* OPCLK_ENA */ +#define WM8995_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ +#define WM8995_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ +#define WM8995_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ +#define WM8995_IN1L_ENA 0x0020 /* IN1L_ENA */ +#define WM8995_IN1L_ENA_MASK 0x0020 /* IN1L_ENA */ +#define WM8995_IN1L_ENA_SHIFT 5 /* IN1L_ENA */ +#define WM8995_IN1L_ENA_WIDTH 1 /* IN1L_ENA */ +#define WM8995_IN1R_ENA 0x0010 /* IN1R_ENA */ +#define WM8995_IN1R_ENA_MASK 0x0010 /* IN1R_ENA */ +#define WM8995_IN1R_ENA_SHIFT 4 /* IN1R_ENA */ +#define WM8995_IN1R_ENA_WIDTH 1 /* IN1R_ENA */ +#define WM8995_LDO2_ENA 0x0002 /* LDO2_ENA */ +#define WM8995_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */ +#define WM8995_LDO2_ENA_SHIFT 1 /* LDO2_ENA */ +#define WM8995_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ + +/* + * R3 (0x03) - Power Management (3) + */ +#define WM8995_AIF2ADCL_ENA 0x2000 /* AIF2ADCL_ENA */ +#define WM8995_AIF2ADCL_ENA_MASK 0x2000 /* AIF2ADCL_ENA */ +#define WM8995_AIF2ADCL_ENA_SHIFT 13 /* AIF2ADCL_ENA */ +#define WM8995_AIF2ADCL_ENA_WIDTH 1 /* AIF2ADCL_ENA */ +#define WM8995_AIF2ADCR_ENA 0x1000 /* AIF2ADCR_ENA */ +#define WM8995_AIF2ADCR_ENA_MASK 0x1000 /* AIF2ADCR_ENA */ +#define WM8995_AIF2ADCR_ENA_SHIFT 12 /* AIF2ADCR_ENA */ +#define WM8995_AIF2ADCR_ENA_WIDTH 1 /* AIF2ADCR_ENA */ +#define WM8995_AIF1ADC2L_ENA 0x0800 /* AIF1ADC2L_ENA */ +#define WM8995_AIF1ADC2L_ENA_MASK 0x0800 /* AIF1ADC2L_ENA */ +#define WM8995_AIF1ADC2L_ENA_SHIFT 11 /* AIF1ADC2L_ENA */ +#define WM8995_AIF1ADC2L_ENA_WIDTH 1 /* AIF1ADC2L_ENA */ +#define WM8995_AIF1ADC2R_ENA 0x0400 /* AIF1ADC2R_ENA */ +#define WM8995_AIF1ADC2R_ENA_MASK 0x0400 /* AIF1ADC2R_ENA */ +#define WM8995_AIF1ADC2R_ENA_SHIFT 10 /* AIF1ADC2R_ENA */ +#define WM8995_AIF1ADC2R_ENA_WIDTH 1 /* AIF1ADC2R_ENA */ +#define WM8995_AIF1ADC1L_ENA 0x0200 /* AIF1ADC1L_ENA */ +#define WM8995_AIF1ADC1L_ENA_MASK 0x0200 /* AIF1ADC1L_ENA */ +#define WM8995_AIF1ADC1L_ENA_SHIFT 9 /* AIF1ADC1L_ENA */ +#define WM8995_AIF1ADC1L_ENA_WIDTH 1 /* AIF1ADC1L_ENA */ +#define WM8995_AIF1ADC1R_ENA 0x0100 /* AIF1ADC1R_ENA */ +#define WM8995_AIF1ADC1R_ENA_MASK 0x0100 /* AIF1ADC1R_ENA */ +#define WM8995_AIF1ADC1R_ENA_SHIFT 8 /* AIF1ADC1R_ENA */ +#define WM8995_AIF1ADC1R_ENA_WIDTH 1 /* AIF1ADC1R_ENA */ +#define WM8995_DMIC3L_ENA 0x0080 /* DMIC3L_ENA */ +#define WM8995_DMIC3L_ENA_MASK 0x0080 /* DMIC3L_ENA */ +#define WM8995_DMIC3L_ENA_SHIFT 7 /* DMIC3L_ENA */ +#define WM8995_DMIC3L_ENA_WIDTH 1 /* DMIC3L_ENA */ +#define WM8995_DMIC3R_ENA 0x0040 /* DMIC3R_ENA */ +#define WM8995_DMIC3R_ENA_MASK 0x0040 /* DMIC3R_ENA */ +#define WM8995_DMIC3R_ENA_SHIFT 6 /* DMIC3R_ENA */ +#define WM8995_DMIC3R_ENA_WIDTH 1 /* DMIC3R_ENA */ +#define WM8995_DMIC2L_ENA 0x0020 /* DMIC2L_ENA */ +#define WM8995_DMIC2L_ENA_MASK 0x0020 /* DMIC2L_ENA */ +#define WM8995_DMIC2L_ENA_SHIFT 5 /* DMIC2L_ENA */ +#define WM8995_DMIC2L_ENA_WIDTH 1 /* DMIC2L_ENA */ +#define WM8995_DMIC2R_ENA 0x0010 /* DMIC2R_ENA */ +#define WM8995_DMIC2R_ENA_MASK 0x0010 /* DMIC2R_ENA */ +#define WM8995_DMIC2R_ENA_SHIFT 4 /* DMIC2R_ENA */ +#define WM8995_DMIC2R_ENA_WIDTH 1 /* DMIC2R_ENA */ +#define WM8995_DMIC1L_ENA 0x0008 /* DMIC1L_ENA */ +#define WM8995_DMIC1L_ENA_MASK 0x0008 /* DMIC1L_ENA */ +#define WM8995_DMIC1L_ENA_SHIFT 3 /* DMIC1L_ENA */ +#define WM8995_DMIC1L_ENA_WIDTH 1 /* DMIC1L_ENA */ +#define WM8995_DMIC1R_ENA 0x0004 /* DMIC1R_ENA */ +#define WM8995_DMIC1R_ENA_MASK 0x0004 /* DMIC1R_ENA */ +#define WM8995_DMIC1R_ENA_SHIFT 2 /* DMIC1R_ENA */ +#define WM8995_DMIC1R_ENA_WIDTH 1 /* DMIC1R_ENA */ +#define WM8995_ADCL_ENA 0x0002 /* ADCL_ENA */ +#define WM8995_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ +#define WM8995_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ +#define WM8995_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ +#define WM8995_ADCR_ENA 0x0001 /* ADCR_ENA */ +#define WM8995_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ +#define WM8995_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ +#define WM8995_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ + +/* + * R4 (0x04) - Power Management (4) + */ +#define WM8995_AIF2DACL_ENA 0x2000 /* AIF2DACL_ENA */ +#define WM8995_AIF2DACL_ENA_MASK 0x2000 /* AIF2DACL_ENA */ +#define WM8995_AIF2DACL_ENA_SHIFT 13 /* AIF2DACL_ENA */ +#define WM8995_AIF2DACL_ENA_WIDTH 1 /* AIF2DACL_ENA */ +#define WM8995_AIF2DACR_ENA 0x1000 /* AIF2DACR_ENA */ +#define WM8995_AIF2DACR_ENA_MASK 0x1000 /* AIF2DACR_ENA */ +#define WM8995_AIF2DACR_ENA_SHIFT 12 /* AIF2DACR_ENA */ +#define WM8995_AIF2DACR_ENA_WIDTH 1 /* AIF2DACR_ENA */ +#define WM8995_AIF1DAC2L_ENA 0x0800 /* AIF1DAC2L_ENA */ +#define WM8995_AIF1DAC2L_ENA_MASK 0x0800 /* AIF1DAC2L_ENA */ +#define WM8995_AIF1DAC2L_ENA_SHIFT 11 /* AIF1DAC2L_ENA */ +#define WM8995_AIF1DAC2L_ENA_WIDTH 1 /* AIF1DAC2L_ENA */ +#define WM8995_AIF1DAC2R_ENA 0x0400 /* AIF1DAC2R_ENA */ +#define WM8995_AIF1DAC2R_ENA_MASK 0x0400 /* AIF1DAC2R_ENA */ +#define WM8995_AIF1DAC2R_ENA_SHIFT 10 /* AIF1DAC2R_ENA */ +#define WM8995_AIF1DAC2R_ENA_WIDTH 1 /* AIF1DAC2R_ENA */ +#define WM8995_AIF1DAC1L_ENA 0x0200 /* AIF1DAC1L_ENA */ +#define WM8995_AIF1DAC1L_ENA_MASK 0x0200 /* AIF1DAC1L_ENA */ +#define WM8995_AIF1DAC1L_ENA_SHIFT 9 /* AIF1DAC1L_ENA */ +#define WM8995_AIF1DAC1L_ENA_WIDTH 1 /* AIF1DAC1L_ENA */ +#define WM8995_AIF1DAC1R_ENA 0x0100 /* AIF1DAC1R_ENA */ +#define WM8995_AIF1DAC1R_ENA_MASK 0x0100 /* AIF1DAC1R_ENA */ +#define WM8995_AIF1DAC1R_ENA_SHIFT 8 /* AIF1DAC1R_ENA */ +#define WM8995_AIF1DAC1R_ENA_WIDTH 1 /* AIF1DAC1R_ENA */ +#define WM8995_DAC2L_ENA 0x0008 /* DAC2L_ENA */ +#define WM8995_DAC2L_ENA_MASK 0x0008 /* DAC2L_ENA */ +#define WM8995_DAC2L_ENA_SHIFT 3 /* DAC2L_ENA */ +#define WM8995_DAC2L_ENA_WIDTH 1 /* DAC2L_ENA */ +#define WM8995_DAC2R_ENA 0x0004 /* DAC2R_ENA */ +#define WM8995_DAC2R_ENA_MASK 0x0004 /* DAC2R_ENA */ +#define WM8995_DAC2R_ENA_SHIFT 2 /* DAC2R_ENA */ +#define WM8995_DAC2R_ENA_WIDTH 1 /* DAC2R_ENA */ +#define WM8995_DAC1L_ENA 0x0002 /* DAC1L_ENA */ +#define WM8995_DAC1L_ENA_MASK 0x0002 /* DAC1L_ENA */ +#define WM8995_DAC1L_ENA_SHIFT 1 /* DAC1L_ENA */ +#define WM8995_DAC1L_ENA_WIDTH 1 /* DAC1L_ENA */ +#define WM8995_DAC1R_ENA 0x0001 /* DAC1R_ENA */ +#define WM8995_DAC1R_ENA_MASK 0x0001 /* DAC1R_ENA */ +#define WM8995_DAC1R_ENA_SHIFT 0 /* DAC1R_ENA */ +#define WM8995_DAC1R_ENA_WIDTH 1 /* DAC1R_ENA */ + +/* + * R5 (0x05) - Power Management (5) + */ +#define WM8995_DMIC_SRC2_MASK 0x0300 /* DMIC_SRC2 - [9:8] */ +#define WM8995_DMIC_SRC2_SHIFT 8 /* DMIC_SRC2 - [9:8] */ +#define WM8995_DMIC_SRC2_WIDTH 2 /* DMIC_SRC2 - [9:8] */ +#define WM8995_DMIC_SRC1_MASK 0x00C0 /* DMIC_SRC1 - [7:6] */ +#define WM8995_DMIC_SRC1_SHIFT 6 /* DMIC_SRC1 - [7:6] */ +#define WM8995_DMIC_SRC1_WIDTH 2 /* DMIC_SRC1 - [7:6] */ +#define WM8995_AIF3_TRI 0x0020 /* AIF3_TRI */ +#define WM8995_AIF3_TRI_MASK 0x0020 /* AIF3_TRI */ +#define WM8995_AIF3_TRI_SHIFT 5 /* AIF3_TRI */ +#define WM8995_AIF3_TRI_WIDTH 1 /* AIF3_TRI */ +#define WM8995_AIF3_ADCDAT_SRC_MASK 0x0018 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8995_AIF3_ADCDAT_SRC_SHIFT 3 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8995_AIF3_ADCDAT_SRC_WIDTH 2 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8995_AIF2_ADCDAT_SRC 0x0004 /* AIF2_ADCDAT_SRC */ +#define WM8995_AIF2_ADCDAT_SRC_MASK 0x0004 /* AIF2_ADCDAT_SRC */ +#define WM8995_AIF2_ADCDAT_SRC_SHIFT 2 /* AIF2_ADCDAT_SRC */ +#define WM8995_AIF2_ADCDAT_SRC_WIDTH 1 /* AIF2_ADCDAT_SRC */ +#define WM8995_AIF2_DACDAT_SRC 0x0002 /* AIF2_DACDAT_SRC */ +#define WM8995_AIF2_DACDAT_SRC_MASK 0x0002 /* AIF2_DACDAT_SRC */ +#define WM8995_AIF2_DACDAT_SRC_SHIFT 1 /* AIF2_DACDAT_SRC */ +#define WM8995_AIF2_DACDAT_SRC_WIDTH 1 /* AIF2_DACDAT_SRC */ +#define WM8995_AIF1_DACDAT_SRC 0x0001 /* AIF1_DACDAT_SRC */ +#define WM8995_AIF1_DACDAT_SRC_MASK 0x0001 /* AIF1_DACDAT_SRC */ +#define WM8995_AIF1_DACDAT_SRC_SHIFT 0 /* AIF1_DACDAT_SRC */ +#define WM8995_AIF1_DACDAT_SRC_WIDTH 1 /* AIF1_DACDAT_SRC */ + +/* + * R16 (0x10) - Left Line Input 1 Volume + */ +#define WM8995_IN1_VU 0x0080 /* IN1_VU */ +#define WM8995_IN1_VU_MASK 0x0080 /* IN1_VU */ +#define WM8995_IN1_VU_SHIFT 7 /* IN1_VU */ +#define WM8995_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8995_IN1L_ZC 0x0020 /* IN1L_ZC */ +#define WM8995_IN1L_ZC_MASK 0x0020 /* IN1L_ZC */ +#define WM8995_IN1L_ZC_SHIFT 5 /* IN1L_ZC */ +#define WM8995_IN1L_ZC_WIDTH 1 /* IN1L_ZC */ +#define WM8995_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */ +#define WM8995_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */ +#define WM8995_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */ + +/* + * R17 (0x11) - Right Line Input 1 Volume + */ +#define WM8995_IN1_VU 0x0080 /* IN1_VU */ +#define WM8995_IN1_VU_MASK 0x0080 /* IN1_VU */ +#define WM8995_IN1_VU_SHIFT 7 /* IN1_VU */ +#define WM8995_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8995_IN1R_ZC 0x0020 /* IN1R_ZC */ +#define WM8995_IN1R_ZC_MASK 0x0020 /* IN1R_ZC */ +#define WM8995_IN1R_ZC_SHIFT 5 /* IN1R_ZC */ +#define WM8995_IN1R_ZC_WIDTH 1 /* IN1R_ZC */ +#define WM8995_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */ +#define WM8995_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */ +#define WM8995_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */ + +/* + * R18 (0x12) - Left Line Input Control + */ +#define WM8995_IN1L_BOOST_MASK 0x0030 /* IN1L_BOOST - [5:4] */ +#define WM8995_IN1L_BOOST_SHIFT 4 /* IN1L_BOOST - [5:4] */ +#define WM8995_IN1L_BOOST_WIDTH 2 /* IN1L_BOOST - [5:4] */ +#define WM8995_IN1L_MODE_MASK 0x000C /* IN1L_MODE - [3:2] */ +#define WM8995_IN1L_MODE_SHIFT 2 /* IN1L_MODE - [3:2] */ +#define WM8995_IN1L_MODE_WIDTH 2 /* IN1L_MODE - [3:2] */ +#define WM8995_IN1R_MODE_MASK 0x0003 /* IN1R_MODE - [1:0] */ +#define WM8995_IN1R_MODE_SHIFT 0 /* IN1R_MODE - [1:0] */ +#define WM8995_IN1R_MODE_WIDTH 2 /* IN1R_MODE - [1:0] */ + +/* + * R24 (0x18) - DAC1 Left Volume + */ +#define WM8995_DAC1L_MUTE 0x0200 /* DAC1L_MUTE */ +#define WM8995_DAC1L_MUTE_MASK 0x0200 /* DAC1L_MUTE */ +#define WM8995_DAC1L_MUTE_SHIFT 9 /* DAC1L_MUTE */ +#define WM8995_DAC1L_MUTE_WIDTH 1 /* DAC1L_MUTE */ +#define WM8995_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8995_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8995_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8995_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8995_DAC1L_VOL_MASK 0x00FF /* DAC1L_VOL - [7:0] */ +#define WM8995_DAC1L_VOL_SHIFT 0 /* DAC1L_VOL - [7:0] */ +#define WM8995_DAC1L_VOL_WIDTH 8 /* DAC1L_VOL - [7:0] */ + +/* + * R25 (0x19) - DAC1 Right Volume + */ +#define WM8995_DAC1R_MUTE 0x0200 /* DAC1R_MUTE */ +#define WM8995_DAC1R_MUTE_MASK 0x0200 /* DAC1R_MUTE */ +#define WM8995_DAC1R_MUTE_SHIFT 9 /* DAC1R_MUTE */ +#define WM8995_DAC1R_MUTE_WIDTH 1 /* DAC1R_MUTE */ +#define WM8995_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8995_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8995_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8995_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8995_DAC1R_VOL_MASK 0x00FF /* DAC1R_VOL - [7:0] */ +#define WM8995_DAC1R_VOL_SHIFT 0 /* DAC1R_VOL - [7:0] */ +#define WM8995_DAC1R_VOL_WIDTH 8 /* DAC1R_VOL - [7:0] */ + +/* + * R26 (0x1A) - DAC2 Left Volume + */ +#define WM8995_DAC2L_MUTE 0x0200 /* DAC2L_MUTE */ +#define WM8995_DAC2L_MUTE_MASK 0x0200 /* DAC2L_MUTE */ +#define WM8995_DAC2L_MUTE_SHIFT 9 /* DAC2L_MUTE */ +#define WM8995_DAC2L_MUTE_WIDTH 1 /* DAC2L_MUTE */ +#define WM8995_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8995_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8995_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8995_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8995_DAC2L_VOL_MASK 0x00FF /* DAC2L_VOL - [7:0] */ +#define WM8995_DAC2L_VOL_SHIFT 0 /* DAC2L_VOL - [7:0] */ +#define WM8995_DAC2L_VOL_WIDTH 8 /* DAC2L_VOL - [7:0] */ + +/* + * R27 (0x1B) - DAC2 Right Volume + */ +#define WM8995_DAC2R_MUTE 0x0200 /* DAC2R_MUTE */ +#define WM8995_DAC2R_MUTE_MASK 0x0200 /* DAC2R_MUTE */ +#define WM8995_DAC2R_MUTE_SHIFT 9 /* DAC2R_MUTE */ +#define WM8995_DAC2R_MUTE_WIDTH 1 /* DAC2R_MUTE */ +#define WM8995_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8995_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8995_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8995_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8995_DAC2R_VOL_MASK 0x00FF /* DAC2R_VOL - [7:0] */ +#define WM8995_DAC2R_VOL_SHIFT 0 /* DAC2R_VOL - [7:0] */ +#define WM8995_DAC2R_VOL_WIDTH 8 /* DAC2R_VOL - [7:0] */ + +/* + * R28 (0x1C) - Output Volume ZC (1) + */ +#define WM8995_HPOUT2L_ZC 0x0008 /* HPOUT2L_ZC */ +#define WM8995_HPOUT2L_ZC_MASK 0x0008 /* HPOUT2L_ZC */ +#define WM8995_HPOUT2L_ZC_SHIFT 3 /* HPOUT2L_ZC */ +#define WM8995_HPOUT2L_ZC_WIDTH 1 /* HPOUT2L_ZC */ +#define WM8995_HPOUT2R_ZC 0x0004 /* HPOUT2R_ZC */ +#define WM8995_HPOUT2R_ZC_MASK 0x0004 /* HPOUT2R_ZC */ +#define WM8995_HPOUT2R_ZC_SHIFT 2 /* HPOUT2R_ZC */ +#define WM8995_HPOUT2R_ZC_WIDTH 1 /* HPOUT2R_ZC */ +#define WM8995_HPOUT1L_ZC 0x0002 /* HPOUT1L_ZC */ +#define WM8995_HPOUT1L_ZC_MASK 0x0002 /* HPOUT1L_ZC */ +#define WM8995_HPOUT1L_ZC_SHIFT 1 /* HPOUT1L_ZC */ +#define WM8995_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */ +#define WM8995_HPOUT1R_ZC 0x0001 /* HPOUT1R_ZC */ +#define WM8995_HPOUT1R_ZC_MASK 0x0001 /* HPOUT1R_ZC */ +#define WM8995_HPOUT1R_ZC_SHIFT 0 /* HPOUT1R_ZC */ +#define WM8995_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */ + +/* + * R32 (0x20) - MICBIAS (1) + */ +#define WM8995_MICB1_MODE 0x0008 /* MICB1_MODE */ +#define WM8995_MICB1_MODE_MASK 0x0008 /* MICB1_MODE */ +#define WM8995_MICB1_MODE_SHIFT 3 /* MICB1_MODE */ +#define WM8995_MICB1_MODE_WIDTH 1 /* MICB1_MODE */ +#define WM8995_MICB1_LVL_MASK 0x0006 /* MICB1_LVL - [2:1] */ +#define WM8995_MICB1_LVL_SHIFT 1 /* MICB1_LVL - [2:1] */ +#define WM8995_MICB1_LVL_WIDTH 2 /* MICB1_LVL - [2:1] */ +#define WM8995_MICB1_DISCH 0x0001 /* MICB1_DISCH */ +#define WM8995_MICB1_DISCH_MASK 0x0001 /* MICB1_DISCH */ +#define WM8995_MICB1_DISCH_SHIFT 0 /* MICB1_DISCH */ +#define WM8995_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ + +/* + * R33 (0x21) - MICBIAS (2) + */ +#define WM8995_MICB2_MODE 0x0008 /* MICB2_MODE */ +#define WM8995_MICB2_MODE_MASK 0x0008 /* MICB2_MODE */ +#define WM8995_MICB2_MODE_SHIFT 3 /* MICB2_MODE */ +#define WM8995_MICB2_MODE_WIDTH 1 /* MICB2_MODE */ +#define WM8995_MICB2_LVL_MASK 0x0006 /* MICB2_LVL - [2:1] */ +#define WM8995_MICB2_LVL_SHIFT 1 /* MICB2_LVL - [2:1] */ +#define WM8995_MICB2_LVL_WIDTH 2 /* MICB2_LVL - [2:1] */ +#define WM8995_MICB2_DISCH 0x0001 /* MICB2_DISCH */ +#define WM8995_MICB2_DISCH_MASK 0x0001 /* MICB2_DISCH */ +#define WM8995_MICB2_DISCH_SHIFT 0 /* MICB2_DISCH */ +#define WM8995_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ + +/* + * R40 (0x28) - LDO 1 + */ +#define WM8995_LDO1_MODE 0x0020 /* LDO1_MODE */ +#define WM8995_LDO1_MODE_MASK 0x0020 /* LDO1_MODE */ +#define WM8995_LDO1_MODE_SHIFT 5 /* LDO1_MODE */ +#define WM8995_LDO1_MODE_WIDTH 1 /* LDO1_MODE */ +#define WM8995_LDO1_VSEL_MASK 0x0006 /* LDO1_VSEL - [2:1] */ +#define WM8995_LDO1_VSEL_SHIFT 1 /* LDO1_VSEL - [2:1] */ +#define WM8995_LDO1_VSEL_WIDTH 2 /* LDO1_VSEL - [2:1] */ +#define WM8995_LDO1_DISCH 0x0001 /* LDO1_DISCH */ +#define WM8995_LDO1_DISCH_MASK 0x0001 /* LDO1_DISCH */ +#define WM8995_LDO1_DISCH_SHIFT 0 /* LDO1_DISCH */ +#define WM8995_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */ + +/* + * R41 (0x29) - LDO 2 + */ +#define WM8995_LDO2_MODE 0x0020 /* LDO2_MODE */ +#define WM8995_LDO2_MODE_MASK 0x0020 /* LDO2_MODE */ +#define WM8995_LDO2_MODE_SHIFT 5 /* LDO2_MODE */ +#define WM8995_LDO2_MODE_WIDTH 1 /* LDO2_MODE */ +#define WM8995_LDO2_VSEL_MASK 0x001E /* LDO2_VSEL - [4:1] */ +#define WM8995_LDO2_VSEL_SHIFT 1 /* LDO2_VSEL - [4:1] */ +#define WM8995_LDO2_VSEL_WIDTH 4 /* LDO2_VSEL - [4:1] */ +#define WM8995_LDO2_DISCH 0x0001 /* LDO2_DISCH */ +#define WM8995_LDO2_DISCH_MASK 0x0001 /* LDO2_DISCH */ +#define WM8995_LDO2_DISCH_SHIFT 0 /* LDO2_DISCH */ +#define WM8995_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */ + +/* + * R48 (0x30) - Accessory Detect Mode1 + */ +#define WM8995_JD_MODE_MASK 0x0003 /* JD_MODE - [1:0] */ +#define WM8995_JD_MODE_SHIFT 0 /* JD_MODE - [1:0] */ +#define WM8995_JD_MODE_WIDTH 2 /* JD_MODE - [1:0] */ + +/* + * R49 (0x31) - Accessory Detect Mode2 + */ +#define WM8995_VID_ENA 0x0001 /* VID_ENA */ +#define WM8995_VID_ENA_MASK 0x0001 /* VID_ENA */ +#define WM8995_VID_ENA_SHIFT 0 /* VID_ENA */ +#define WM8995_VID_ENA_WIDTH 1 /* VID_ENA */ + +/* + * R52 (0x34) - Headphone Detect1 + */ +#define WM8995_HP_RAMPRATE 0x0002 /* HP_RAMPRATE */ +#define WM8995_HP_RAMPRATE_MASK 0x0002 /* HP_RAMPRATE */ +#define WM8995_HP_RAMPRATE_SHIFT 1 /* HP_RAMPRATE */ +#define WM8995_HP_RAMPRATE_WIDTH 1 /* HP_RAMPRATE */ +#define WM8995_HP_POLL 0x0001 /* HP_POLL */ +#define WM8995_HP_POLL_MASK 0x0001 /* HP_POLL */ +#define WM8995_HP_POLL_SHIFT 0 /* HP_POLL */ +#define WM8995_HP_POLL_WIDTH 1 /* HP_POLL */ + +/* + * R53 (0x35) - Headphone Detect2 + */ +#define WM8995_HP_DONE 0x0080 /* HP_DONE */ +#define WM8995_HP_DONE_MASK 0x0080 /* HP_DONE */ +#define WM8995_HP_DONE_SHIFT 7 /* HP_DONE */ +#define WM8995_HP_DONE_WIDTH 1 /* HP_DONE */ +#define WM8995_HP_LVL_MASK 0x007F /* HP_LVL - [6:0] */ +#define WM8995_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */ +#define WM8995_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */ + +/* + * R56 (0x38) - Mic Detect (1) + */ +#define WM8995_MICD_RATE_MASK 0x7800 /* MICD_RATE - [14:11] */ +#define WM8995_MICD_RATE_SHIFT 11 /* MICD_RATE - [14:11] */ +#define WM8995_MICD_RATE_WIDTH 4 /* MICD_RATE - [14:11] */ +#define WM8995_MICD_LVL_SEL_MASK 0x01F8 /* MICD_LVL_SEL - [8:3] */ +#define WM8995_MICD_LVL_SEL_SHIFT 3 /* MICD_LVL_SEL - [8:3] */ +#define WM8995_MICD_LVL_SEL_WIDTH 6 /* MICD_LVL_SEL - [8:3] */ +#define WM8995_MICD_DBTIME 0x0002 /* MICD_DBTIME */ +#define WM8995_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */ +#define WM8995_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */ +#define WM8995_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */ +#define WM8995_MICD_ENA 0x0001 /* MICD_ENA */ +#define WM8995_MICD_ENA_MASK 0x0001 /* MICD_ENA */ +#define WM8995_MICD_ENA_SHIFT 0 /* MICD_ENA */ +#define WM8995_MICD_ENA_WIDTH 1 /* MICD_ENA */ + +/* + * R57 (0x39) - Mic Detect (2) + */ +#define WM8995_MICD_LVL_MASK 0x01FC /* MICD_LVL - [8:2] */ +#define WM8995_MICD_LVL_SHIFT 2 /* MICD_LVL - [8:2] */ +#define WM8995_MICD_LVL_WIDTH 7 /* MICD_LVL - [8:2] */ +#define WM8995_MICD_VALID 0x0002 /* MICD_VALID */ +#define WM8995_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define WM8995_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define WM8995_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define WM8995_MICD_STS 0x0001 /* MICD_STS */ +#define WM8995_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define WM8995_MICD_STS_SHIFT 0 /* MICD_STS */ +#define WM8995_MICD_STS_WIDTH 1 /* MICD_STS */ + +/* + * R64 (0x40) - Charge Pump (1) + */ +#define WM8995_CP_ENA 0x8000 /* CP_ENA */ +#define WM8995_CP_ENA_MASK 0x8000 /* CP_ENA */ +#define WM8995_CP_ENA_SHIFT 15 /* CP_ENA */ +#define WM8995_CP_ENA_WIDTH 1 /* CP_ENA */ + +/* + * R69 (0x45) - Class W (1) + */ +#define WM8995_CP_DYN_SRC_SEL_MASK 0x0300 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8995_CP_DYN_SRC_SEL_SHIFT 8 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8995_CP_DYN_SRC_SEL_WIDTH 2 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8995_CP_DYN_PWR 0x0001 /* CP_DYN_PWR */ +#define WM8995_CP_DYN_PWR_MASK 0x0001 /* CP_DYN_PWR */ +#define WM8995_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR */ +#define WM8995_CP_DYN_PWR_WIDTH 1 /* CP_DYN_PWR */ + +/* + * R80 (0x50) - DC Servo (1) + */ +#define WM8995_DCS_ENA_CHAN_3 0x0008 /* DCS_ENA_CHAN_3 */ +#define WM8995_DCS_ENA_CHAN_3_MASK 0x0008 /* DCS_ENA_CHAN_3 */ +#define WM8995_DCS_ENA_CHAN_3_SHIFT 3 /* DCS_ENA_CHAN_3 */ +#define WM8995_DCS_ENA_CHAN_3_WIDTH 1 /* DCS_ENA_CHAN_3 */ +#define WM8995_DCS_ENA_CHAN_2 0x0004 /* DCS_ENA_CHAN_2 */ +#define WM8995_DCS_ENA_CHAN_2_MASK 0x0004 /* DCS_ENA_CHAN_2 */ +#define WM8995_DCS_ENA_CHAN_2_SHIFT 2 /* DCS_ENA_CHAN_2 */ +#define WM8995_DCS_ENA_CHAN_2_WIDTH 1 /* DCS_ENA_CHAN_2 */ +#define WM8995_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8995_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8995_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */ +#define WM8995_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */ +#define WM8995_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8995_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8995_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */ +#define WM8995_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */ + +/* + * R81 (0x51) - DC Servo (2) + */ +#define WM8995_DCS_TRIG_SINGLE_3 0x8000 /* DCS_TRIG_SINGLE_3 */ +#define WM8995_DCS_TRIG_SINGLE_3_MASK 0x8000 /* DCS_TRIG_SINGLE_3 */ +#define WM8995_DCS_TRIG_SINGLE_3_SHIFT 15 /* DCS_TRIG_SINGLE_3 */ +#define WM8995_DCS_TRIG_SINGLE_3_WIDTH 1 /* DCS_TRIG_SINGLE_3 */ +#define WM8995_DCS_TRIG_SINGLE_2 0x4000 /* DCS_TRIG_SINGLE_2 */ +#define WM8995_DCS_TRIG_SINGLE_2_MASK 0x4000 /* DCS_TRIG_SINGLE_2 */ +#define WM8995_DCS_TRIG_SINGLE_2_SHIFT 14 /* DCS_TRIG_SINGLE_2 */ +#define WM8995_DCS_TRIG_SINGLE_2_WIDTH 1 /* DCS_TRIG_SINGLE_2 */ +#define WM8995_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8995_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8995_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */ +#define WM8995_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */ +#define WM8995_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8995_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8995_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */ +#define WM8995_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */ +#define WM8995_DCS_TRIG_SERIES_3 0x0800 /* DCS_TRIG_SERIES_3 */ +#define WM8995_DCS_TRIG_SERIES_3_MASK 0x0800 /* DCS_TRIG_SERIES_3 */ +#define WM8995_DCS_TRIG_SERIES_3_SHIFT 11 /* DCS_TRIG_SERIES_3 */ +#define WM8995_DCS_TRIG_SERIES_3_WIDTH 1 /* DCS_TRIG_SERIES_3 */ +#define WM8995_DCS_TRIG_SERIES_2 0x0400 /* DCS_TRIG_SERIES_2 */ +#define WM8995_DCS_TRIG_SERIES_2_MASK 0x0400 /* DCS_TRIG_SERIES_2 */ +#define WM8995_DCS_TRIG_SERIES_2_SHIFT 10 /* DCS_TRIG_SERIES_2 */ +#define WM8995_DCS_TRIG_SERIES_2_WIDTH 1 /* DCS_TRIG_SERIES_2 */ +#define WM8995_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8995_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8995_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */ +#define WM8995_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */ +#define WM8995_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8995_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8995_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */ +#define WM8995_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */ +#define WM8995_DCS_TRIG_STARTUP_3 0x0080 /* DCS_TRIG_STARTUP_3 */ +#define WM8995_DCS_TRIG_STARTUP_3_MASK 0x0080 /* DCS_TRIG_STARTUP_3 */ +#define WM8995_DCS_TRIG_STARTUP_3_SHIFT 7 /* DCS_TRIG_STARTUP_3 */ +#define WM8995_DCS_TRIG_STARTUP_3_WIDTH 1 /* DCS_TRIG_STARTUP_3 */ +#define WM8995_DCS_TRIG_STARTUP_2 0x0040 /* DCS_TRIG_STARTUP_2 */ +#define WM8995_DCS_TRIG_STARTUP_2_MASK 0x0040 /* DCS_TRIG_STARTUP_2 */ +#define WM8995_DCS_TRIG_STARTUP_2_SHIFT 6 /* DCS_TRIG_STARTUP_2 */ +#define WM8995_DCS_TRIG_STARTUP_2_WIDTH 1 /* DCS_TRIG_STARTUP_2 */ +#define WM8995_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8995_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8995_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */ +#define WM8995_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */ +#define WM8995_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8995_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8995_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */ +#define WM8995_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */ +#define WM8995_DCS_TRIG_DAC_WR_3 0x0008 /* DCS_TRIG_DAC_WR_3 */ +#define WM8995_DCS_TRIG_DAC_WR_3_MASK 0x0008 /* DCS_TRIG_DAC_WR_3 */ +#define WM8995_DCS_TRIG_DAC_WR_3_SHIFT 3 /* DCS_TRIG_DAC_WR_3 */ +#define WM8995_DCS_TRIG_DAC_WR_3_WIDTH 1 /* DCS_TRIG_DAC_WR_3 */ +#define WM8995_DCS_TRIG_DAC_WR_2 0x0004 /* DCS_TRIG_DAC_WR_2 */ +#define WM8995_DCS_TRIG_DAC_WR_2_MASK 0x0004 /* DCS_TRIG_DAC_WR_2 */ +#define WM8995_DCS_TRIG_DAC_WR_2_SHIFT 2 /* DCS_TRIG_DAC_WR_2 */ +#define WM8995_DCS_TRIG_DAC_WR_2_WIDTH 1 /* DCS_TRIG_DAC_WR_2 */ +#define WM8995_DCS_TRIG_DAC_WR_1 0x0002 /* DCS_TRIG_DAC_WR_1 */ +#define WM8995_DCS_TRIG_DAC_WR_1_MASK 0x0002 /* DCS_TRIG_DAC_WR_1 */ +#define WM8995_DCS_TRIG_DAC_WR_1_SHIFT 1 /* DCS_TRIG_DAC_WR_1 */ +#define WM8995_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */ +#define WM8995_DCS_TRIG_DAC_WR_0 0x0001 /* DCS_TRIG_DAC_WR_0 */ +#define WM8995_DCS_TRIG_DAC_WR_0_MASK 0x0001 /* DCS_TRIG_DAC_WR_0 */ +#define WM8995_DCS_TRIG_DAC_WR_0_SHIFT 0 /* DCS_TRIG_DAC_WR_0 */ +#define WM8995_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */ + +/* + * R82 (0x52) - DC Servo (3) + */ +#define WM8995_DCS_TIMER_PERIOD_23_MASK 0x0F00 /* DCS_TIMER_PERIOD_23 - [11:8] */ +#define WM8995_DCS_TIMER_PERIOD_23_SHIFT 8 /* DCS_TIMER_PERIOD_23 - [11:8] */ +#define WM8995_DCS_TIMER_PERIOD_23_WIDTH 4 /* DCS_TIMER_PERIOD_23 - [11:8] */ +#define WM8995_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8995_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8995_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */ + +/* + * R84 (0x54) - DC Servo (5) + */ +#define WM8995_DCS_SERIES_NO_23_MASK 0x7F00 /* DCS_SERIES_NO_23 - [14:8] */ +#define WM8995_DCS_SERIES_NO_23_SHIFT 8 /* DCS_SERIES_NO_23 - [14:8] */ +#define WM8995_DCS_SERIES_NO_23_WIDTH 7 /* DCS_SERIES_NO_23 - [14:8] */ +#define WM8995_DCS_SERIES_NO_01_MASK 0x007F /* DCS_SERIES_NO_01 - [6:0] */ +#define WM8995_DCS_SERIES_NO_01_SHIFT 0 /* DCS_SERIES_NO_01 - [6:0] */ +#define WM8995_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [6:0] */ + +/* + * R85 (0x55) - DC Servo (6) + */ +#define WM8995_DCS_DAC_WR_VAL_3_MASK 0xFF00 /* DCS_DAC_WR_VAL_3 - [15:8] */ +#define WM8995_DCS_DAC_WR_VAL_3_SHIFT 8 /* DCS_DAC_WR_VAL_3 - [15:8] */ +#define WM8995_DCS_DAC_WR_VAL_3_WIDTH 8 /* DCS_DAC_WR_VAL_3 - [15:8] */ +#define WM8995_DCS_DAC_WR_VAL_2_MASK 0x00FF /* DCS_DAC_WR_VAL_2 - [7:0] */ +#define WM8995_DCS_DAC_WR_VAL_2_SHIFT 0 /* DCS_DAC_WR_VAL_2 - [7:0] */ +#define WM8995_DCS_DAC_WR_VAL_2_WIDTH 8 /* DCS_DAC_WR_VAL_2 - [7:0] */ + +/* + * R86 (0x56) - DC Servo (7) + */ +#define WM8995_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8995_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8995_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8995_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8995_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8995_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */ + +/* + * R87 (0x57) - DC Servo Readback 0 + */ +#define WM8995_DCS_CAL_COMPLETE_MASK 0x0F00 /* DCS_CAL_COMPLETE - [11:8] */ +#define WM8995_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [11:8] */ +#define WM8995_DCS_CAL_COMPLETE_WIDTH 4 /* DCS_CAL_COMPLETE - [11:8] */ +#define WM8995_DCS_DAC_WR_COMPLETE_MASK 0x00F0 /* DCS_DAC_WR_COMPLETE - [7:4] */ +#define WM8995_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [7:4] */ +#define WM8995_DCS_DAC_WR_COMPLETE_WIDTH 4 /* DCS_DAC_WR_COMPLETE - [7:4] */ +#define WM8995_DCS_STARTUP_COMPLETE_MASK 0x000F /* DCS_STARTUP_COMPLETE - [3:0] */ +#define WM8995_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [3:0] */ +#define WM8995_DCS_STARTUP_COMPLETE_WIDTH 4 /* DCS_STARTUP_COMPLETE - [3:0] */ + +/* + * R96 (0x60) - Analogue HP (1) + */ +#define WM8995_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8995_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8995_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */ +#define WM8995_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */ +#define WM8995_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */ +#define WM8995_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */ +#define WM8995_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */ +#define WM8995_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */ +#define WM8995_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */ +#define WM8995_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */ +#define WM8995_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */ +#define WM8995_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */ +#define WM8995_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8995_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8995_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */ +#define WM8995_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */ +#define WM8995_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */ +#define WM8995_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */ +#define WM8995_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */ +#define WM8995_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */ +#define WM8995_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */ +#define WM8995_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */ +#define WM8995_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */ +#define WM8995_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */ + +/* + * R97 (0x61) - Analogue HP (2) + */ +#define WM8995_HPOUT2L_RMV_SHORT 0x0080 /* HPOUT2L_RMV_SHORT */ +#define WM8995_HPOUT2L_RMV_SHORT_MASK 0x0080 /* HPOUT2L_RMV_SHORT */ +#define WM8995_HPOUT2L_RMV_SHORT_SHIFT 7 /* HPOUT2L_RMV_SHORT */ +#define WM8995_HPOUT2L_RMV_SHORT_WIDTH 1 /* HPOUT2L_RMV_SHORT */ +#define WM8995_HPOUT2L_OUTP 0x0040 /* HPOUT2L_OUTP */ +#define WM8995_HPOUT2L_OUTP_MASK 0x0040 /* HPOUT2L_OUTP */ +#define WM8995_HPOUT2L_OUTP_SHIFT 6 /* HPOUT2L_OUTP */ +#define WM8995_HPOUT2L_OUTP_WIDTH 1 /* HPOUT2L_OUTP */ +#define WM8995_HPOUT2L_DLY 0x0020 /* HPOUT2L_DLY */ +#define WM8995_HPOUT2L_DLY_MASK 0x0020 /* HPOUT2L_DLY */ +#define WM8995_HPOUT2L_DLY_SHIFT 5 /* HPOUT2L_DLY */ +#define WM8995_HPOUT2L_DLY_WIDTH 1 /* HPOUT2L_DLY */ +#define WM8995_HPOUT2R_RMV_SHORT 0x0008 /* HPOUT2R_RMV_SHORT */ +#define WM8995_HPOUT2R_RMV_SHORT_MASK 0x0008 /* HPOUT2R_RMV_SHORT */ +#define WM8995_HPOUT2R_RMV_SHORT_SHIFT 3 /* HPOUT2R_RMV_SHORT */ +#define WM8995_HPOUT2R_RMV_SHORT_WIDTH 1 /* HPOUT2R_RMV_SHORT */ +#define WM8995_HPOUT2R_OUTP 0x0004 /* HPOUT2R_OUTP */ +#define WM8995_HPOUT2R_OUTP_MASK 0x0004 /* HPOUT2R_OUTP */ +#define WM8995_HPOUT2R_OUTP_SHIFT 2 /* HPOUT2R_OUTP */ +#define WM8995_HPOUT2R_OUTP_WIDTH 1 /* HPOUT2R_OUTP */ +#define WM8995_HPOUT2R_DLY 0x0002 /* HPOUT2R_DLY */ +#define WM8995_HPOUT2R_DLY_MASK 0x0002 /* HPOUT2R_DLY */ +#define WM8995_HPOUT2R_DLY_SHIFT 1 /* HPOUT2R_DLY */ +#define WM8995_HPOUT2R_DLY_WIDTH 1 /* HPOUT2R_DLY */ + +/* + * R256 (0x100) - Chip Revision + */ +#define WM8995_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */ +#define WM8995_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */ +#define WM8995_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */ + +/* + * R257 (0x101) - Control Interface (1) + */ +#define WM8995_REG_SYNC 0x8000 /* REG_SYNC */ +#define WM8995_REG_SYNC_MASK 0x8000 /* REG_SYNC */ +#define WM8995_REG_SYNC_SHIFT 15 /* REG_SYNC */ +#define WM8995_REG_SYNC_WIDTH 1 /* REG_SYNC */ +#define WM8995_SPI_CONTRD 0x0040 /* SPI_CONTRD */ +#define WM8995_SPI_CONTRD_MASK 0x0040 /* SPI_CONTRD */ +#define WM8995_SPI_CONTRD_SHIFT 6 /* SPI_CONTRD */ +#define WM8995_SPI_CONTRD_WIDTH 1 /* SPI_CONTRD */ +#define WM8995_SPI_4WIRE 0x0020 /* SPI_4WIRE */ +#define WM8995_SPI_4WIRE_MASK 0x0020 /* SPI_4WIRE */ +#define WM8995_SPI_4WIRE_SHIFT 5 /* SPI_4WIRE */ +#define WM8995_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */ +#define WM8995_SPI_CFG 0x0010 /* SPI_CFG */ +#define WM8995_SPI_CFG_MASK 0x0010 /* SPI_CFG */ +#define WM8995_SPI_CFG_SHIFT 4 /* SPI_CFG */ +#define WM8995_SPI_CFG_WIDTH 1 /* SPI_CFG */ +#define WM8995_AUTO_INC 0x0004 /* AUTO_INC */ +#define WM8995_AUTO_INC_MASK 0x0004 /* AUTO_INC */ +#define WM8995_AUTO_INC_SHIFT 2 /* AUTO_INC */ +#define WM8995_AUTO_INC_WIDTH 1 /* AUTO_INC */ + +/* + * R258 (0x102) - Control Interface (2) + */ +#define WM8995_CTRL_IF_SRC 0x0001 /* CTRL_IF_SRC */ +#define WM8995_CTRL_IF_SRC_MASK 0x0001 /* CTRL_IF_SRC */ +#define WM8995_CTRL_IF_SRC_SHIFT 0 /* CTRL_IF_SRC */ +#define WM8995_CTRL_IF_SRC_WIDTH 1 /* CTRL_IF_SRC */ + +/* + * R272 (0x110) - Write Sequencer Ctrl (1) + */ +#define WM8995_WSEQ_ENA 0x8000 /* WSEQ_ENA */ +#define WM8995_WSEQ_ENA_MASK 0x8000 /* WSEQ_ENA */ +#define WM8995_WSEQ_ENA_SHIFT 15 /* WSEQ_ENA */ +#define WM8995_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */ +#define WM8995_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */ +#define WM8995_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */ +#define WM8995_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */ +#define WM8995_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */ +#define WM8995_WSEQ_START 0x0100 /* WSEQ_START */ +#define WM8995_WSEQ_START_MASK 0x0100 /* WSEQ_START */ +#define WM8995_WSEQ_START_SHIFT 8 /* WSEQ_START */ +#define WM8995_WSEQ_START_WIDTH 1 /* WSEQ_START */ +#define WM8995_WSEQ_START_INDEX_MASK 0x007F /* WSEQ_START_INDEX - [6:0] */ +#define WM8995_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [6:0] */ +#define WM8995_WSEQ_START_INDEX_WIDTH 7 /* WSEQ_START_INDEX - [6:0] */ + +/* + * R273 (0x111) - Write Sequencer Ctrl (2) + */ +#define WM8995_WSEQ_BUSY 0x0100 /* WSEQ_BUSY */ +#define WM8995_WSEQ_BUSY_MASK 0x0100 /* WSEQ_BUSY */ +#define WM8995_WSEQ_BUSY_SHIFT 8 /* WSEQ_BUSY */ +#define WM8995_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */ +#define WM8995_WSEQ_CURRENT_INDEX_MASK 0x007F /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8995_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8995_WSEQ_CURRENT_INDEX_WIDTH 7 /* WSEQ_CURRENT_INDEX - [6:0] */ + +/* + * R512 (0x200) - AIF1 Clocking (1) + */ +#define WM8995_AIF1CLK_SRC_MASK 0x0018 /* AIF1CLK_SRC - [4:3] */ +#define WM8995_AIF1CLK_SRC_SHIFT 3 /* AIF1CLK_SRC - [4:3] */ +#define WM8995_AIF1CLK_SRC_WIDTH 2 /* AIF1CLK_SRC - [4:3] */ +#define WM8995_AIF1CLK_INV 0x0004 /* AIF1CLK_INV */ +#define WM8995_AIF1CLK_INV_MASK 0x0004 /* AIF1CLK_INV */ +#define WM8995_AIF1CLK_INV_SHIFT 2 /* AIF1CLK_INV */ +#define WM8995_AIF1CLK_INV_WIDTH 1 /* AIF1CLK_INV */ +#define WM8995_AIF1CLK_DIV 0x0002 /* AIF1CLK_DIV */ +#define WM8995_AIF1CLK_DIV_MASK 0x0002 /* AIF1CLK_DIV */ +#define WM8995_AIF1CLK_DIV_SHIFT 1 /* AIF1CLK_DIV */ +#define WM8995_AIF1CLK_DIV_WIDTH 1 /* AIF1CLK_DIV */ +#define WM8995_AIF1CLK_ENA 0x0001 /* AIF1CLK_ENA */ +#define WM8995_AIF1CLK_ENA_MASK 0x0001 /* AIF1CLK_ENA */ +#define WM8995_AIF1CLK_ENA_SHIFT 0 /* AIF1CLK_ENA */ +#define WM8995_AIF1CLK_ENA_WIDTH 1 /* AIF1CLK_ENA */ + +/* + * R513 (0x201) - AIF1 Clocking (2) + */ +#define WM8995_AIF1DAC_DIV_MASK 0x0038 /* AIF1DAC_DIV - [5:3] */ +#define WM8995_AIF1DAC_DIV_SHIFT 3 /* AIF1DAC_DIV - [5:3] */ +#define WM8995_AIF1DAC_DIV_WIDTH 3 /* AIF1DAC_DIV - [5:3] */ +#define WM8995_AIF1ADC_DIV_MASK 0x0007 /* AIF1ADC_DIV - [2:0] */ +#define WM8995_AIF1ADC_DIV_SHIFT 0 /* AIF1ADC_DIV - [2:0] */ +#define WM8995_AIF1ADC_DIV_WIDTH 3 /* AIF1ADC_DIV - [2:0] */ + +/* + * R516 (0x204) - AIF2 Clocking (1) + */ +#define WM8995_AIF2CLK_SRC_MASK 0x0018 /* AIF2CLK_SRC - [4:3] */ +#define WM8995_AIF2CLK_SRC_SHIFT 3 /* AIF2CLK_SRC - [4:3] */ +#define WM8995_AIF2CLK_SRC_WIDTH 2 /* AIF2CLK_SRC - [4:3] */ +#define WM8995_AIF2CLK_INV 0x0004 /* AIF2CLK_INV */ +#define WM8995_AIF2CLK_INV_MASK 0x0004 /* AIF2CLK_INV */ +#define WM8995_AIF2CLK_INV_SHIFT 2 /* AIF2CLK_INV */ +#define WM8995_AIF2CLK_INV_WIDTH 1 /* AIF2CLK_INV */ +#define WM8995_AIF2CLK_DIV 0x0002 /* AIF2CLK_DIV */ +#define WM8995_AIF2CLK_DIV_MASK 0x0002 /* AIF2CLK_DIV */ +#define WM8995_AIF2CLK_DIV_SHIFT 1 /* AIF2CLK_DIV */ +#define WM8995_AIF2CLK_DIV_WIDTH 1 /* AIF2CLK_DIV */ +#define WM8995_AIF2CLK_ENA 0x0001 /* AIF2CLK_ENA */ +#define WM8995_AIF2CLK_ENA_MASK 0x0001 /* AIF2CLK_ENA */ +#define WM8995_AIF2CLK_ENA_SHIFT 0 /* AIF2CLK_ENA */ +#define WM8995_AIF2CLK_ENA_WIDTH 1 /* AIF2CLK_ENA */ + +/* + * R517 (0x205) - AIF2 Clocking (2) + */ +#define WM8995_AIF2DAC_DIV_MASK 0x0038 /* AIF2DAC_DIV - [5:3] */ +#define WM8995_AIF2DAC_DIV_SHIFT 3 /* AIF2DAC_DIV - [5:3] */ +#define WM8995_AIF2DAC_DIV_WIDTH 3 /* AIF2DAC_DIV - [5:3] */ +#define WM8995_AIF2ADC_DIV_MASK 0x0007 /* AIF2ADC_DIV - [2:0] */ +#define WM8995_AIF2ADC_DIV_SHIFT 0 /* AIF2ADC_DIV - [2:0] */ +#define WM8995_AIF2ADC_DIV_WIDTH 3 /* AIF2ADC_DIV - [2:0] */ + +/* + * R520 (0x208) - Clocking (1) + */ +#define WM8995_LFCLK_ENA 0x0020 /* LFCLK_ENA */ +#define WM8995_LFCLK_ENA_MASK 0x0020 /* LFCLK_ENA */ +#define WM8995_LFCLK_ENA_SHIFT 5 /* LFCLK_ENA */ +#define WM8995_LFCLK_ENA_WIDTH 1 /* LFCLK_ENA */ +#define WM8995_TOCLK_ENA 0x0010 /* TOCLK_ENA */ +#define WM8995_TOCLK_ENA_MASK 0x0010 /* TOCLK_ENA */ +#define WM8995_TOCLK_ENA_SHIFT 4 /* TOCLK_ENA */ +#define WM8995_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ +#define WM8995_AIF1DSPCLK_ENA 0x0008 /* AIF1DSPCLK_ENA */ +#define WM8995_AIF1DSPCLK_ENA_MASK 0x0008 /* AIF1DSPCLK_ENA */ +#define WM8995_AIF1DSPCLK_ENA_SHIFT 3 /* AIF1DSPCLK_ENA */ +#define WM8995_AIF1DSPCLK_ENA_WIDTH 1 /* AIF1DSPCLK_ENA */ +#define WM8995_AIF2DSPCLK_ENA 0x0004 /* AIF2DSPCLK_ENA */ +#define WM8995_AIF2DSPCLK_ENA_MASK 0x0004 /* AIF2DSPCLK_ENA */ +#define WM8995_AIF2DSPCLK_ENA_SHIFT 2 /* AIF2DSPCLK_ENA */ +#define WM8995_AIF2DSPCLK_ENA_WIDTH 1 /* AIF2DSPCLK_ENA */ +#define WM8995_SYSDSPCLK_ENA 0x0002 /* SYSDSPCLK_ENA */ +#define WM8995_SYSDSPCLK_ENA_MASK 0x0002 /* SYSDSPCLK_ENA */ +#define WM8995_SYSDSPCLK_ENA_SHIFT 1 /* SYSDSPCLK_ENA */ +#define WM8995_SYSDSPCLK_ENA_WIDTH 1 /* SYSDSPCLK_ENA */ +#define WM8995_SYSCLK_SRC 0x0001 /* SYSCLK_SRC */ +#define WM8995_SYSCLK_SRC_MASK 0x0001 /* SYSCLK_SRC */ +#define WM8995_SYSCLK_SRC_SHIFT 0 /* SYSCLK_SRC */ +#define WM8995_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */ + +/* + * R521 (0x209) - Clocking (2) + */ +#define WM8995_TOCLK_DIV_MASK 0x0700 /* TOCLK_DIV - [10:8] */ +#define WM8995_TOCLK_DIV_SHIFT 8 /* TOCLK_DIV - [10:8] */ +#define WM8995_TOCLK_DIV_WIDTH 3 /* TOCLK_DIV - [10:8] */ +#define WM8995_DBCLK_DIV_MASK 0x00F0 /* DBCLK_DIV - [7:4] */ +#define WM8995_DBCLK_DIV_SHIFT 4 /* DBCLK_DIV - [7:4] */ +#define WM8995_DBCLK_DIV_WIDTH 4 /* DBCLK_DIV - [7:4] */ +#define WM8995_OPCLK_DIV_MASK 0x0007 /* OPCLK_DIV - [2:0] */ +#define WM8995_OPCLK_DIV_SHIFT 0 /* OPCLK_DIV - [2:0] */ +#define WM8995_OPCLK_DIV_WIDTH 3 /* OPCLK_DIV - [2:0] */ + +/* + * R528 (0x210) - AIF1 Rate + */ +#define WM8995_AIF1_SR_MASK 0x00F0 /* AIF1_SR - [7:4] */ +#define WM8995_AIF1_SR_SHIFT 4 /* AIF1_SR - [7:4] */ +#define WM8995_AIF1_SR_WIDTH 4 /* AIF1_SR - [7:4] */ +#define WM8995_AIF1CLK_RATE_MASK 0x000F /* AIF1CLK_RATE - [3:0] */ +#define WM8995_AIF1CLK_RATE_SHIFT 0 /* AIF1CLK_RATE - [3:0] */ +#define WM8995_AIF1CLK_RATE_WIDTH 4 /* AIF1CLK_RATE - [3:0] */ + +/* + * R529 (0x211) - AIF2 Rate + */ +#define WM8995_AIF2_SR_MASK 0x00F0 /* AIF2_SR - [7:4] */ +#define WM8995_AIF2_SR_SHIFT 4 /* AIF2_SR - [7:4] */ +#define WM8995_AIF2_SR_WIDTH 4 /* AIF2_SR - [7:4] */ +#define WM8995_AIF2CLK_RATE_MASK 0x000F /* AIF2CLK_RATE - [3:0] */ +#define WM8995_AIF2CLK_RATE_SHIFT 0 /* AIF2CLK_RATE - [3:0] */ +#define WM8995_AIF2CLK_RATE_WIDTH 4 /* AIF2CLK_RATE - [3:0] */ + +/* + * R530 (0x212) - Rate Status + */ +#define WM8995_SR_ERROR_MASK 0x000F /* SR_ERROR - [3:0] */ +#define WM8995_SR_ERROR_SHIFT 0 /* SR_ERROR - [3:0] */ +#define WM8995_SR_ERROR_WIDTH 4 /* SR_ERROR - [3:0] */ + +/* + * R544 (0x220) - FLL1 Control (1) + */ +#define WM8995_FLL1_OSC_ENA 0x0002 /* FLL1_OSC_ENA */ +#define WM8995_FLL1_OSC_ENA_MASK 0x0002 /* FLL1_OSC_ENA */ +#define WM8995_FLL1_OSC_ENA_SHIFT 1 /* FLL1_OSC_ENA */ +#define WM8995_FLL1_OSC_ENA_WIDTH 1 /* FLL1_OSC_ENA */ +#define WM8995_FLL1_ENA 0x0001 /* FLL1_ENA */ +#define WM8995_FLL1_ENA_MASK 0x0001 /* FLL1_ENA */ +#define WM8995_FLL1_ENA_SHIFT 0 /* FLL1_ENA */ +#define WM8995_FLL1_ENA_WIDTH 1 /* FLL1_ENA */ + +/* + * R545 (0x221) - FLL1 Control (2) + */ +#define WM8995_FLL1_OUTDIV_MASK 0x3F00 /* FLL1_OUTDIV - [13:8] */ +#define WM8995_FLL1_OUTDIV_SHIFT 8 /* FLL1_OUTDIV - [13:8] */ +#define WM8995_FLL1_OUTDIV_WIDTH 6 /* FLL1_OUTDIV - [13:8] */ +#define WM8995_FLL1_CTRL_RATE_MASK 0x0070 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8995_FLL1_CTRL_RATE_SHIFT 4 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8995_FLL1_CTRL_RATE_WIDTH 3 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8995_FLL1_FRATIO_MASK 0x0007 /* FLL1_FRATIO - [2:0] */ +#define WM8995_FLL1_FRATIO_SHIFT 0 /* FLL1_FRATIO - [2:0] */ +#define WM8995_FLL1_FRATIO_WIDTH 3 /* FLL1_FRATIO - [2:0] */ + +/* + * R546 (0x222) - FLL1 Control (3) + */ +#define WM8995_FLL1_K_MASK 0xFFFF /* FLL1_K - [15:0] */ +#define WM8995_FLL1_K_SHIFT 0 /* FLL1_K - [15:0] */ +#define WM8995_FLL1_K_WIDTH 16 /* FLL1_K - [15:0] */ + +/* + * R547 (0x223) - FLL1 Control (4) + */ +#define WM8995_FLL1_N_MASK 0x7FE0 /* FLL1_N - [14:5] */ +#define WM8995_FLL1_N_SHIFT 5 /* FLL1_N - [14:5] */ +#define WM8995_FLL1_N_WIDTH 10 /* FLL1_N - [14:5] */ +#define WM8995_FLL1_LOOP_GAIN_MASK 0x000F /* FLL1_LOOP_GAIN - [3:0] */ +#define WM8995_FLL1_LOOP_GAIN_SHIFT 0 /* FLL1_LOOP_GAIN - [3:0] */ +#define WM8995_FLL1_LOOP_GAIN_WIDTH 4 /* FLL1_LOOP_GAIN - [3:0] */ + +/* + * R548 (0x224) - FLL1 Control (5) + */ +#define WM8995_FLL1_FRC_NCO_VAL_MASK 0x1F80 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8995_FLL1_FRC_NCO_VAL_SHIFT 7 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8995_FLL1_FRC_NCO_VAL_WIDTH 6 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8995_FLL1_FRC_NCO 0x0040 /* FLL1_FRC_NCO */ +#define WM8995_FLL1_FRC_NCO_MASK 0x0040 /* FLL1_FRC_NCO */ +#define WM8995_FLL1_FRC_NCO_SHIFT 6 /* FLL1_FRC_NCO */ +#define WM8995_FLL1_FRC_NCO_WIDTH 1 /* FLL1_FRC_NCO */ +#define WM8995_FLL1_REFCLK_DIV_MASK 0x0018 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8995_FLL1_REFCLK_DIV_SHIFT 3 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8995_FLL1_REFCLK_DIV_WIDTH 2 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8995_FLL1_REFCLK_SRC_MASK 0x0003 /* FLL1_REFCLK_SRC - [1:0] */ +#define WM8995_FLL1_REFCLK_SRC_SHIFT 0 /* FLL1_REFCLK_SRC - [1:0] */ +#define WM8995_FLL1_REFCLK_SRC_WIDTH 2 /* FLL1_REFCLK_SRC - [1:0] */ + +/* + * R576 (0x240) - FLL2 Control (1) + */ +#define WM8995_FLL2_OSC_ENA 0x0002 /* FLL2_OSC_ENA */ +#define WM8995_FLL2_OSC_ENA_MASK 0x0002 /* FLL2_OSC_ENA */ +#define WM8995_FLL2_OSC_ENA_SHIFT 1 /* FLL2_OSC_ENA */ +#define WM8995_FLL2_OSC_ENA_WIDTH 1 /* FLL2_OSC_ENA */ +#define WM8995_FLL2_ENA 0x0001 /* FLL2_ENA */ +#define WM8995_FLL2_ENA_MASK 0x0001 /* FLL2_ENA */ +#define WM8995_FLL2_ENA_SHIFT 0 /* FLL2_ENA */ +#define WM8995_FLL2_ENA_WIDTH 1 /* FLL2_ENA */ + +/* + * R577 (0x241) - FLL2 Control (2) + */ +#define WM8995_FLL2_OUTDIV_MASK 0x3F00 /* FLL2_OUTDIV - [13:8] */ +#define WM8995_FLL2_OUTDIV_SHIFT 8 /* FLL2_OUTDIV - [13:8] */ +#define WM8995_FLL2_OUTDIV_WIDTH 6 /* FLL2_OUTDIV - [13:8] */ +#define WM8995_FLL2_CTRL_RATE_MASK 0x0070 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8995_FLL2_CTRL_RATE_SHIFT 4 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8995_FLL2_CTRL_RATE_WIDTH 3 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8995_FLL2_FRATIO_MASK 0x0007 /* FLL2_FRATIO - [2:0] */ +#define WM8995_FLL2_FRATIO_SHIFT 0 /* FLL2_FRATIO - [2:0] */ +#define WM8995_FLL2_FRATIO_WIDTH 3 /* FLL2_FRATIO - [2:0] */ + +/* + * R578 (0x242) - FLL2 Control (3) + */ +#define WM8995_FLL2_K_MASK 0xFFFF /* FLL2_K - [15:0] */ +#define WM8995_FLL2_K_SHIFT 0 /* FLL2_K - [15:0] */ +#define WM8995_FLL2_K_WIDTH 16 /* FLL2_K - [15:0] */ + +/* + * R579 (0x243) - FLL2 Control (4) + */ +#define WM8995_FLL2_N_MASK 0x7FE0 /* FLL2_N - [14:5] */ +#define WM8995_FLL2_N_SHIFT 5 /* FLL2_N - [14:5] */ +#define WM8995_FLL2_N_WIDTH 10 /* FLL2_N - [14:5] */ +#define WM8995_FLL2_LOOP_GAIN_MASK 0x000F /* FLL2_LOOP_GAIN - [3:0] */ +#define WM8995_FLL2_LOOP_GAIN_SHIFT 0 /* FLL2_LOOP_GAIN - [3:0] */ +#define WM8995_FLL2_LOOP_GAIN_WIDTH 4 /* FLL2_LOOP_GAIN - [3:0] */ + +/* + * R580 (0x244) - FLL2 Control (5) + */ +#define WM8995_FLL2_FRC_NCO_VAL_MASK 0x1F80 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8995_FLL2_FRC_NCO_VAL_SHIFT 7 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8995_FLL2_FRC_NCO_VAL_WIDTH 6 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8995_FLL2_FRC_NCO 0x0040 /* FLL2_FRC_NCO */ +#define WM8995_FLL2_FRC_NCO_MASK 0x0040 /* FLL2_FRC_NCO */ +#define WM8995_FLL2_FRC_NCO_SHIFT 6 /* FLL2_FRC_NCO */ +#define WM8995_FLL2_FRC_NCO_WIDTH 1 /* FLL2_FRC_NCO */ +#define WM8995_FLL2_REFCLK_DIV_MASK 0x0018 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8995_FLL2_REFCLK_DIV_SHIFT 3 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8995_FLL2_REFCLK_DIV_WIDTH 2 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8995_FLL2_REFCLK_SRC_MASK 0x0003 /* FLL2_REFCLK_SRC - [1:0] */ +#define WM8995_FLL2_REFCLK_SRC_SHIFT 0 /* FLL2_REFCLK_SRC - [1:0] */ +#define WM8995_FLL2_REFCLK_SRC_WIDTH 2 /* FLL2_REFCLK_SRC - [1:0] */ + +/* + * R768 (0x300) - AIF1 Control (1) + */ +#define WM8995_AIF1ADCL_SRC 0x8000 /* AIF1ADCL_SRC */ +#define WM8995_AIF1ADCL_SRC_MASK 0x8000 /* AIF1ADCL_SRC */ +#define WM8995_AIF1ADCL_SRC_SHIFT 15 /* AIF1ADCL_SRC */ +#define WM8995_AIF1ADCL_SRC_WIDTH 1 /* AIF1ADCL_SRC */ +#define WM8995_AIF1ADCR_SRC 0x4000 /* AIF1ADCR_SRC */ +#define WM8995_AIF1ADCR_SRC_MASK 0x4000 /* AIF1ADCR_SRC */ +#define WM8995_AIF1ADCR_SRC_SHIFT 14 /* AIF1ADCR_SRC */ +#define WM8995_AIF1ADCR_SRC_WIDTH 1 /* AIF1ADCR_SRC */ +#define WM8995_AIF1ADC_TDM 0x2000 /* AIF1ADC_TDM */ +#define WM8995_AIF1ADC_TDM_MASK 0x2000 /* AIF1ADC_TDM */ +#define WM8995_AIF1ADC_TDM_SHIFT 13 /* AIF1ADC_TDM */ +#define WM8995_AIF1ADC_TDM_WIDTH 1 /* AIF1ADC_TDM */ +#define WM8995_AIF1_BCLK_INV 0x0100 /* AIF1_BCLK_INV */ +#define WM8995_AIF1_BCLK_INV_MASK 0x0100 /* AIF1_BCLK_INV */ +#define WM8995_AIF1_BCLK_INV_SHIFT 8 /* AIF1_BCLK_INV */ +#define WM8995_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */ +#define WM8995_AIF1_LRCLK_INV 0x0080 /* AIF1_LRCLK_INV */ +#define WM8995_AIF1_LRCLK_INV_MASK 0x0080 /* AIF1_LRCLK_INV */ +#define WM8995_AIF1_LRCLK_INV_SHIFT 7 /* AIF1_LRCLK_INV */ +#define WM8995_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */ +#define WM8995_AIF1_WL_MASK 0x0060 /* AIF1_WL - [6:5] */ +#define WM8995_AIF1_WL_SHIFT 5 /* AIF1_WL - [6:5] */ +#define WM8995_AIF1_WL_WIDTH 2 /* AIF1_WL - [6:5] */ +#define WM8995_AIF1_FMT_MASK 0x0018 /* AIF1_FMT - [4:3] */ +#define WM8995_AIF1_FMT_SHIFT 3 /* AIF1_FMT - [4:3] */ +#define WM8995_AIF1_FMT_WIDTH 2 /* AIF1_FMT - [4:3] */ + +/* + * R769 (0x301) - AIF1 Control (2) + */ +#define WM8995_AIF1DACL_SRC 0x8000 /* AIF1DACL_SRC */ +#define WM8995_AIF1DACL_SRC_MASK 0x8000 /* AIF1DACL_SRC */ +#define WM8995_AIF1DACL_SRC_SHIFT 15 /* AIF1DACL_SRC */ +#define WM8995_AIF1DACL_SRC_WIDTH 1 /* AIF1DACL_SRC */ +#define WM8995_AIF1DACR_SRC 0x4000 /* AIF1DACR_SRC */ +#define WM8995_AIF1DACR_SRC_MASK 0x4000 /* AIF1DACR_SRC */ +#define WM8995_AIF1DACR_SRC_SHIFT 14 /* AIF1DACR_SRC */ +#define WM8995_AIF1DACR_SRC_WIDTH 1 /* AIF1DACR_SRC */ +#define WM8995_AIF1DAC_BOOST_MASK 0x0C00 /* AIF1DAC_BOOST - [11:10] */ +#define WM8995_AIF1DAC_BOOST_SHIFT 10 /* AIF1DAC_BOOST - [11:10] */ +#define WM8995_AIF1DAC_BOOST_WIDTH 2 /* AIF1DAC_BOOST - [11:10] */ +#define WM8995_AIF1DAC_COMP 0x0010 /* AIF1DAC_COMP */ +#define WM8995_AIF1DAC_COMP_MASK 0x0010 /* AIF1DAC_COMP */ +#define WM8995_AIF1DAC_COMP_SHIFT 4 /* AIF1DAC_COMP */ +#define WM8995_AIF1DAC_COMP_WIDTH 1 /* AIF1DAC_COMP */ +#define WM8995_AIF1DAC_COMPMODE 0x0008 /* AIF1DAC_COMPMODE */ +#define WM8995_AIF1DAC_COMPMODE_MASK 0x0008 /* AIF1DAC_COMPMODE */ +#define WM8995_AIF1DAC_COMPMODE_SHIFT 3 /* AIF1DAC_COMPMODE */ +#define WM8995_AIF1DAC_COMPMODE_WIDTH 1 /* AIF1DAC_COMPMODE */ +#define WM8995_AIF1ADC_COMP 0x0004 /* AIF1ADC_COMP */ +#define WM8995_AIF1ADC_COMP_MASK 0x0004 /* AIF1ADC_COMP */ +#define WM8995_AIF1ADC_COMP_SHIFT 2 /* AIF1ADC_COMP */ +#define WM8995_AIF1ADC_COMP_WIDTH 1 /* AIF1ADC_COMP */ +#define WM8995_AIF1ADC_COMPMODE 0x0002 /* AIF1ADC_COMPMODE */ +#define WM8995_AIF1ADC_COMPMODE_MASK 0x0002 /* AIF1ADC_COMPMODE */ +#define WM8995_AIF1ADC_COMPMODE_SHIFT 1 /* AIF1ADC_COMPMODE */ +#define WM8995_AIF1ADC_COMPMODE_WIDTH 1 /* AIF1ADC_COMPMODE */ +#define WM8995_AIF1_LOOPBACK 0x0001 /* AIF1_LOOPBACK */ +#define WM8995_AIF1_LOOPBACK_MASK 0x0001 /* AIF1_LOOPBACK */ +#define WM8995_AIF1_LOOPBACK_SHIFT 0 /* AIF1_LOOPBACK */ +#define WM8995_AIF1_LOOPBACK_WIDTH 1 /* AIF1_LOOPBACK */ + +/* + * R770 (0x302) - AIF1 Master/Slave + */ +#define WM8995_AIF1_TRI 0x8000 /* AIF1_TRI */ +#define WM8995_AIF1_TRI_MASK 0x8000 /* AIF1_TRI */ +#define WM8995_AIF1_TRI_SHIFT 15 /* AIF1_TRI */ +#define WM8995_AIF1_TRI_WIDTH 1 /* AIF1_TRI */ +#define WM8995_AIF1_MSTR 0x4000 /* AIF1_MSTR */ +#define WM8995_AIF1_MSTR_MASK 0x4000 /* AIF1_MSTR */ +#define WM8995_AIF1_MSTR_SHIFT 14 /* AIF1_MSTR */ +#define WM8995_AIF1_MSTR_WIDTH 1 /* AIF1_MSTR */ +#define WM8995_AIF1_CLK_FRC 0x2000 /* AIF1_CLK_FRC */ +#define WM8995_AIF1_CLK_FRC_MASK 0x2000 /* AIF1_CLK_FRC */ +#define WM8995_AIF1_CLK_FRC_SHIFT 13 /* AIF1_CLK_FRC */ +#define WM8995_AIF1_CLK_FRC_WIDTH 1 /* AIF1_CLK_FRC */ +#define WM8995_AIF1_LRCLK_FRC 0x1000 /* AIF1_LRCLK_FRC */ +#define WM8995_AIF1_LRCLK_FRC_MASK 0x1000 /* AIF1_LRCLK_FRC */ +#define WM8995_AIF1_LRCLK_FRC_SHIFT 12 /* AIF1_LRCLK_FRC */ +#define WM8995_AIF1_LRCLK_FRC_WIDTH 1 /* AIF1_LRCLK_FRC */ + +/* + * R771 (0x303) - AIF1 BCLK + */ +#define WM8995_AIF1_BCLK_DIV_MASK 0x00F0 /* AIF1_BCLK_DIV - [7:4] */ +#define WM8995_AIF1_BCLK_DIV_SHIFT 4 /* AIF1_BCLK_DIV - [7:4] */ +#define WM8995_AIF1_BCLK_DIV_WIDTH 4 /* AIF1_BCLK_DIV - [7:4] */ + +/* + * R772 (0x304) - AIF1ADC LRCLK + */ +#define WM8995_AIF1ADC_LRCLK_DIR 0x0800 /* AIF1ADC_LRCLK_DIR */ +#define WM8995_AIF1ADC_LRCLK_DIR_MASK 0x0800 /* AIF1ADC_LRCLK_DIR */ +#define WM8995_AIF1ADC_LRCLK_DIR_SHIFT 11 /* AIF1ADC_LRCLK_DIR */ +#define WM8995_AIF1ADC_LRCLK_DIR_WIDTH 1 /* AIF1ADC_LRCLK_DIR */ +#define WM8995_AIF1ADC_RATE_MASK 0x07FF /* AIF1ADC_RATE - [10:0] */ +#define WM8995_AIF1ADC_RATE_SHIFT 0 /* AIF1ADC_RATE - [10:0] */ +#define WM8995_AIF1ADC_RATE_WIDTH 11 /* AIF1ADC_RATE - [10:0] */ + +/* + * R773 (0x305) - AIF1DAC LRCLK + */ +#define WM8995_AIF1DAC_LRCLK_DIR 0x0800 /* AIF1DAC_LRCLK_DIR */ +#define WM8995_AIF1DAC_LRCLK_DIR_MASK 0x0800 /* AIF1DAC_LRCLK_DIR */ +#define WM8995_AIF1DAC_LRCLK_DIR_SHIFT 11 /* AIF1DAC_LRCLK_DIR */ +#define WM8995_AIF1DAC_LRCLK_DIR_WIDTH 1 /* AIF1DAC_LRCLK_DIR */ +#define WM8995_AIF1DAC_RATE_MASK 0x07FF /* AIF1DAC_RATE - [10:0] */ +#define WM8995_AIF1DAC_RATE_SHIFT 0 /* AIF1DAC_RATE - [10:0] */ +#define WM8995_AIF1DAC_RATE_WIDTH 11 /* AIF1DAC_RATE - [10:0] */ + +/* + * R774 (0x306) - AIF1DAC Data + */ +#define WM8995_AIF1DACL_DAT_INV 0x0002 /* AIF1DACL_DAT_INV */ +#define WM8995_AIF1DACL_DAT_INV_MASK 0x0002 /* AIF1DACL_DAT_INV */ +#define WM8995_AIF1DACL_DAT_INV_SHIFT 1 /* AIF1DACL_DAT_INV */ +#define WM8995_AIF1DACL_DAT_INV_WIDTH 1 /* AIF1DACL_DAT_INV */ +#define WM8995_AIF1DACR_DAT_INV 0x0001 /* AIF1DACR_DAT_INV */ +#define WM8995_AIF1DACR_DAT_INV_MASK 0x0001 /* AIF1DACR_DAT_INV */ +#define WM8995_AIF1DACR_DAT_INV_SHIFT 0 /* AIF1DACR_DAT_INV */ +#define WM8995_AIF1DACR_DAT_INV_WIDTH 1 /* AIF1DACR_DAT_INV */ + +/* + * R775 (0x307) - AIF1ADC Data + */ +#define WM8995_AIF1ADCL_DAT_INV 0x0002 /* AIF1ADCL_DAT_INV */ +#define WM8995_AIF1ADCL_DAT_INV_MASK 0x0002 /* AIF1ADCL_DAT_INV */ +#define WM8995_AIF1ADCL_DAT_INV_SHIFT 1 /* AIF1ADCL_DAT_INV */ +#define WM8995_AIF1ADCL_DAT_INV_WIDTH 1 /* AIF1ADCL_DAT_INV */ +#define WM8995_AIF1ADCR_DAT_INV 0x0001 /* AIF1ADCR_DAT_INV */ +#define WM8995_AIF1ADCR_DAT_INV_MASK 0x0001 /* AIF1ADCR_DAT_INV */ +#define WM8995_AIF1ADCR_DAT_INV_SHIFT 0 /* AIF1ADCR_DAT_INV */ +#define WM8995_AIF1ADCR_DAT_INV_WIDTH 1 /* AIF1ADCR_DAT_INV */ + +/* + * R784 (0x310) - AIF2 Control (1) + */ +#define WM8995_AIF2ADCL_SRC 0x8000 /* AIF2ADCL_SRC */ +#define WM8995_AIF2ADCL_SRC_MASK 0x8000 /* AIF2ADCL_SRC */ +#define WM8995_AIF2ADCL_SRC_SHIFT 15 /* AIF2ADCL_SRC */ +#define WM8995_AIF2ADCL_SRC_WIDTH 1 /* AIF2ADCL_SRC */ +#define WM8995_AIF2ADCR_SRC 0x4000 /* AIF2ADCR_SRC */ +#define WM8995_AIF2ADCR_SRC_MASK 0x4000 /* AIF2ADCR_SRC */ +#define WM8995_AIF2ADCR_SRC_SHIFT 14 /* AIF2ADCR_SRC */ +#define WM8995_AIF2ADCR_SRC_WIDTH 1 /* AIF2ADCR_SRC */ +#define WM8995_AIF2ADC_TDM 0x2000 /* AIF2ADC_TDM */ +#define WM8995_AIF2ADC_TDM_MASK 0x2000 /* AIF2ADC_TDM */ +#define WM8995_AIF2ADC_TDM_SHIFT 13 /* AIF2ADC_TDM */ +#define WM8995_AIF2ADC_TDM_WIDTH 1 /* AIF2ADC_TDM */ +#define WM8995_AIF2ADC_TDM_CHAN 0x1000 /* AIF2ADC_TDM_CHAN */ +#define WM8995_AIF2ADC_TDM_CHAN_MASK 0x1000 /* AIF2ADC_TDM_CHAN */ +#define WM8995_AIF2ADC_TDM_CHAN_SHIFT 12 /* AIF2ADC_TDM_CHAN */ +#define WM8995_AIF2ADC_TDM_CHAN_WIDTH 1 /* AIF2ADC_TDM_CHAN */ +#define WM8995_AIF2_BCLK_INV 0x0100 /* AIF2_BCLK_INV */ +#define WM8995_AIF2_BCLK_INV_MASK 0x0100 /* AIF2_BCLK_INV */ +#define WM8995_AIF2_BCLK_INV_SHIFT 8 /* AIF2_BCLK_INV */ +#define WM8995_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */ +#define WM8995_AIF2_LRCLK_INV 0x0080 /* AIF2_LRCLK_INV */ +#define WM8995_AIF2_LRCLK_INV_MASK 0x0080 /* AIF2_LRCLK_INV */ +#define WM8995_AIF2_LRCLK_INV_SHIFT 7 /* AIF2_LRCLK_INV */ +#define WM8995_AIF2_LRCLK_INV_WIDTH 1 /* AIF2_LRCLK_INV */ +#define WM8995_AIF2_WL_MASK 0x0060 /* AIF2_WL - [6:5] */ +#define WM8995_AIF2_WL_SHIFT 5 /* AIF2_WL - [6:5] */ +#define WM8995_AIF2_WL_WIDTH 2 /* AIF2_WL - [6:5] */ +#define WM8995_AIF2_FMT_MASK 0x0018 /* AIF2_FMT - [4:3] */ +#define WM8995_AIF2_FMT_SHIFT 3 /* AIF2_FMT - [4:3] */ +#define WM8995_AIF2_FMT_WIDTH 2 /* AIF2_FMT - [4:3] */ + +/* + * R785 (0x311) - AIF2 Control (2) + */ +#define WM8995_AIF2DACL_SRC 0x8000 /* AIF2DACL_SRC */ +#define WM8995_AIF2DACL_SRC_MASK 0x8000 /* AIF2DACL_SRC */ +#define WM8995_AIF2DACL_SRC_SHIFT 15 /* AIF2DACL_SRC */ +#define WM8995_AIF2DACL_SRC_WIDTH 1 /* AIF2DACL_SRC */ +#define WM8995_AIF2DACR_SRC 0x4000 /* AIF2DACR_SRC */ +#define WM8995_AIF2DACR_SRC_MASK 0x4000 /* AIF2DACR_SRC */ +#define WM8995_AIF2DACR_SRC_SHIFT 14 /* AIF2DACR_SRC */ +#define WM8995_AIF2DACR_SRC_WIDTH 1 /* AIF2DACR_SRC */ +#define WM8995_AIF2DAC_TDM 0x2000 /* AIF2DAC_TDM */ +#define WM8995_AIF2DAC_TDM_MASK 0x2000 /* AIF2DAC_TDM */ +#define WM8995_AIF2DAC_TDM_SHIFT 13 /* AIF2DAC_TDM */ +#define WM8995_AIF2DAC_TDM_WIDTH 1 /* AIF2DAC_TDM */ +#define WM8995_AIF2DAC_TDM_CHAN 0x1000 /* AIF2DAC_TDM_CHAN */ +#define WM8995_AIF2DAC_TDM_CHAN_MASK 0x1000 /* AIF2DAC_TDM_CHAN */ +#define WM8995_AIF2DAC_TDM_CHAN_SHIFT 12 /* AIF2DAC_TDM_CHAN */ +#define WM8995_AIF2DAC_TDM_CHAN_WIDTH 1 /* AIF2DAC_TDM_CHAN */ +#define WM8995_AIF2DAC_BOOST_MASK 0x0C00 /* AIF2DAC_BOOST - [11:10] */ +#define WM8995_AIF2DAC_BOOST_SHIFT 10 /* AIF2DAC_BOOST - [11:10] */ +#define WM8995_AIF2DAC_BOOST_WIDTH 2 /* AIF2DAC_BOOST - [11:10] */ +#define WM8995_AIF2DAC_COMP 0x0010 /* AIF2DAC_COMP */ +#define WM8995_AIF2DAC_COMP_MASK 0x0010 /* AIF2DAC_COMP */ +#define WM8995_AIF2DAC_COMP_SHIFT 4 /* AIF2DAC_COMP */ +#define WM8995_AIF2DAC_COMP_WIDTH 1 /* AIF2DAC_COMP */ +#define WM8995_AIF2DAC_COMPMODE 0x0008 /* AIF2DAC_COMPMODE */ +#define WM8995_AIF2DAC_COMPMODE_MASK 0x0008 /* AIF2DAC_COMPMODE */ +#define WM8995_AIF2DAC_COMPMODE_SHIFT 3 /* AIF2DAC_COMPMODE */ +#define WM8995_AIF2DAC_COMPMODE_WIDTH 1 /* AIF2DAC_COMPMODE */ +#define WM8995_AIF2ADC_COMP 0x0004 /* AIF2ADC_COMP */ +#define WM8995_AIF2ADC_COMP_MASK 0x0004 /* AIF2ADC_COMP */ +#define WM8995_AIF2ADC_COMP_SHIFT 2 /* AIF2ADC_COMP */ +#define WM8995_AIF2ADC_COMP_WIDTH 1 /* AIF2ADC_COMP */ +#define WM8995_AIF2ADC_COMPMODE 0x0002 /* AIF2ADC_COMPMODE */ +#define WM8995_AIF2ADC_COMPMODE_MASK 0x0002 /* AIF2ADC_COMPMODE */ +#define WM8995_AIF2ADC_COMPMODE_SHIFT 1 /* AIF2ADC_COMPMODE */ +#define WM8995_AIF2ADC_COMPMODE_WIDTH 1 /* AIF2ADC_COMPMODE */ +#define WM8995_AIF2_LOOPBACK 0x0001 /* AIF2_LOOPBACK */ +#define WM8995_AIF2_LOOPBACK_MASK 0x0001 /* AIF2_LOOPBACK */ +#define WM8995_AIF2_LOOPBACK_SHIFT 0 /* AIF2_LOOPBACK */ +#define WM8995_AIF2_LOOPBACK_WIDTH 1 /* AIF2_LOOPBACK */ + +/* + * R786 (0x312) - AIF2 Master/Slave + */ +#define WM8995_AIF2_TRI 0x8000 /* AIF2_TRI */ +#define WM8995_AIF2_TRI_MASK 0x8000 /* AIF2_TRI */ +#define WM8995_AIF2_TRI_SHIFT 15 /* AIF2_TRI */ +#define WM8995_AIF2_TRI_WIDTH 1 /* AIF2_TRI */ +#define WM8995_AIF2_MSTR 0x4000 /* AIF2_MSTR */ +#define WM8995_AIF2_MSTR_MASK 0x4000 /* AIF2_MSTR */ +#define WM8995_AIF2_MSTR_SHIFT 14 /* AIF2_MSTR */ +#define WM8995_AIF2_MSTR_WIDTH 1 /* AIF2_MSTR */ +#define WM8995_AIF2_CLK_FRC 0x2000 /* AIF2_CLK_FRC */ +#define WM8995_AIF2_CLK_FRC_MASK 0x2000 /* AIF2_CLK_FRC */ +#define WM8995_AIF2_CLK_FRC_SHIFT 13 /* AIF2_CLK_FRC */ +#define WM8995_AIF2_CLK_FRC_WIDTH 1 /* AIF2_CLK_FRC */ +#define WM8995_AIF2_LRCLK_FRC 0x1000 /* AIF2_LRCLK_FRC */ +#define WM8995_AIF2_LRCLK_FRC_MASK 0x1000 /* AIF2_LRCLK_FRC */ +#define WM8995_AIF2_LRCLK_FRC_SHIFT 12 /* AIF2_LRCLK_FRC */ +#define WM8995_AIF2_LRCLK_FRC_WIDTH 1 /* AIF2_LRCLK_FRC */ + +/* + * R787 (0x313) - AIF2 BCLK + */ +#define WM8995_AIF2_BCLK_DIV_MASK 0x00F0 /* AIF2_BCLK_DIV - [7:4] */ +#define WM8995_AIF2_BCLK_DIV_SHIFT 4 /* AIF2_BCLK_DIV - [7:4] */ +#define WM8995_AIF2_BCLK_DIV_WIDTH 4 /* AIF2_BCLK_DIV - [7:4] */ + +/* + * R788 (0x314) - AIF2ADC LRCLK + */ +#define WM8995_AIF2ADC_LRCLK_DIR 0x0800 /* AIF2ADC_LRCLK_DIR */ +#define WM8995_AIF2ADC_LRCLK_DIR_MASK 0x0800 /* AIF2ADC_LRCLK_DIR */ +#define WM8995_AIF2ADC_LRCLK_DIR_SHIFT 11 /* AIF2ADC_LRCLK_DIR */ +#define WM8995_AIF2ADC_LRCLK_DIR_WIDTH 1 /* AIF2ADC_LRCLK_DIR */ +#define WM8995_AIF2ADC_RATE_MASK 0x07FF /* AIF2ADC_RATE - [10:0] */ +#define WM8995_AIF2ADC_RATE_SHIFT 0 /* AIF2ADC_RATE - [10:0] */ +#define WM8995_AIF2ADC_RATE_WIDTH 11 /* AIF2ADC_RATE - [10:0] */ + +/* + * R789 (0x315) - AIF2DAC LRCLK + */ +#define WM8995_AIF2DAC_LRCLK_DIR 0x0800 /* AIF2DAC_LRCLK_DIR */ +#define WM8995_AIF2DAC_LRCLK_DIR_MASK 0x0800 /* AIF2DAC_LRCLK_DIR */ +#define WM8995_AIF2DAC_LRCLK_DIR_SHIFT 11 /* AIF2DAC_LRCLK_DIR */ +#define WM8995_AIF2DAC_LRCLK_DIR_WIDTH 1 /* AIF2DAC_LRCLK_DIR */ +#define WM8995_AIF2DAC_RATE_MASK 0x07FF /* AIF2DAC_RATE - [10:0] */ +#define WM8995_AIF2DAC_RATE_SHIFT 0 /* AIF2DAC_RATE - [10:0] */ +#define WM8995_AIF2DAC_RATE_WIDTH 11 /* AIF2DAC_RATE - [10:0] */ + +/* + * R790 (0x316) - AIF2DAC Data + */ +#define WM8995_AIF2DACL_DAT_INV 0x0002 /* AIF2DACL_DAT_INV */ +#define WM8995_AIF2DACL_DAT_INV_MASK 0x0002 /* AIF2DACL_DAT_INV */ +#define WM8995_AIF2DACL_DAT_INV_SHIFT 1 /* AIF2DACL_DAT_INV */ +#define WM8995_AIF2DACL_DAT_INV_WIDTH 1 /* AIF2DACL_DAT_INV */ +#define WM8995_AIF2DACR_DAT_INV 0x0001 /* AIF2DACR_DAT_INV */ +#define WM8995_AIF2DACR_DAT_INV_MASK 0x0001 /* AIF2DACR_DAT_INV */ +#define WM8995_AIF2DACR_DAT_INV_SHIFT 0 /* AIF2DACR_DAT_INV */ +#define WM8995_AIF2DACR_DAT_INV_WIDTH 1 /* AIF2DACR_DAT_INV */ + +/* + * R791 (0x317) - AIF2ADC Data + */ +#define WM8995_AIF2ADCL_DAT_INV 0x0002 /* AIF2ADCL_DAT_INV */ +#define WM8995_AIF2ADCL_DAT_INV_MASK 0x0002 /* AIF2ADCL_DAT_INV */ +#define WM8995_AIF2ADCL_DAT_INV_SHIFT 1 /* AIF2ADCL_DAT_INV */ +#define WM8995_AIF2ADCL_DAT_INV_WIDTH 1 /* AIF2ADCL_DAT_INV */ +#define WM8995_AIF2ADCR_DAT_INV 0x0001 /* AIF2ADCR_DAT_INV */ +#define WM8995_AIF2ADCR_DAT_INV_MASK 0x0001 /* AIF2ADCR_DAT_INV */ +#define WM8995_AIF2ADCR_DAT_INV_SHIFT 0 /* AIF2ADCR_DAT_INV */ +#define WM8995_AIF2ADCR_DAT_INV_WIDTH 1 /* AIF2ADCR_DAT_INV */ + +/* + * R1024 (0x400) - AIF1 ADC1 Left Volume + */ +#define WM8995_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1L_VOL_MASK 0x00FF /* AIF1ADC1L_VOL - [7:0] */ +#define WM8995_AIF1ADC1L_VOL_SHIFT 0 /* AIF1ADC1L_VOL - [7:0] */ +#define WM8995_AIF1ADC1L_VOL_WIDTH 8 /* AIF1ADC1L_VOL - [7:0] */ + +/* + * R1025 (0x401) - AIF1 ADC1 Right Volume + */ +#define WM8995_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */ +#define WM8995_AIF1ADC1R_VOL_MASK 0x00FF /* AIF1ADC1R_VOL - [7:0] */ +#define WM8995_AIF1ADC1R_VOL_SHIFT 0 /* AIF1ADC1R_VOL - [7:0] */ +#define WM8995_AIF1ADC1R_VOL_WIDTH 8 /* AIF1ADC1R_VOL - [7:0] */ + +/* + * R1026 (0x402) - AIF1 DAC1 Left Volume + */ +#define WM8995_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1L_VOL_MASK 0x00FF /* AIF1DAC1L_VOL - [7:0] */ +#define WM8995_AIF1DAC1L_VOL_SHIFT 0 /* AIF1DAC1L_VOL - [7:0] */ +#define WM8995_AIF1DAC1L_VOL_WIDTH 8 /* AIF1DAC1L_VOL - [7:0] */ + +/* + * R1027 (0x403) - AIF1 DAC1 Right Volume + */ +#define WM8995_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */ +#define WM8995_AIF1DAC1R_VOL_MASK 0x00FF /* AIF1DAC1R_VOL - [7:0] */ +#define WM8995_AIF1DAC1R_VOL_SHIFT 0 /* AIF1DAC1R_VOL - [7:0] */ +#define WM8995_AIF1DAC1R_VOL_WIDTH 8 /* AIF1DAC1R_VOL - [7:0] */ + +/* + * R1028 (0x404) - AIF1 ADC2 Left Volume + */ +#define WM8995_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2L_VOL_MASK 0x00FF /* AIF1ADC2L_VOL - [7:0] */ +#define WM8995_AIF1ADC2L_VOL_SHIFT 0 /* AIF1ADC2L_VOL - [7:0] */ +#define WM8995_AIF1ADC2L_VOL_WIDTH 8 /* AIF1ADC2L_VOL - [7:0] */ + +/* + * R1029 (0x405) - AIF1 ADC2 Right Volume + */ +#define WM8995_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */ +#define WM8995_AIF1ADC2R_VOL_MASK 0x00FF /* AIF1ADC2R_VOL - [7:0] */ +#define WM8995_AIF1ADC2R_VOL_SHIFT 0 /* AIF1ADC2R_VOL - [7:0] */ +#define WM8995_AIF1ADC2R_VOL_WIDTH 8 /* AIF1ADC2R_VOL - [7:0] */ + +/* + * R1030 (0x406) - AIF1 DAC2 Left Volume + */ +#define WM8995_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2L_VOL_MASK 0x00FF /* AIF1DAC2L_VOL - [7:0] */ +#define WM8995_AIF1DAC2L_VOL_SHIFT 0 /* AIF1DAC2L_VOL - [7:0] */ +#define WM8995_AIF1DAC2L_VOL_WIDTH 8 /* AIF1DAC2L_VOL - [7:0] */ + +/* + * R1031 (0x407) - AIF1 DAC2 Right Volume + */ +#define WM8995_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */ +#define WM8995_AIF1DAC2R_VOL_MASK 0x00FF /* AIF1DAC2R_VOL - [7:0] */ +#define WM8995_AIF1DAC2R_VOL_SHIFT 0 /* AIF1DAC2R_VOL - [7:0] */ +#define WM8995_AIF1DAC2R_VOL_WIDTH 8 /* AIF1DAC2R_VOL - [7:0] */ + +/* + * R1040 (0x410) - AIF1 ADC1 Filters + */ +#define WM8995_AIF1ADC_4FS 0x8000 /* AIF1ADC_4FS */ +#define WM8995_AIF1ADC_4FS_MASK 0x8000 /* AIF1ADC_4FS */ +#define WM8995_AIF1ADC_4FS_SHIFT 15 /* AIF1ADC_4FS */ +#define WM8995_AIF1ADC_4FS_WIDTH 1 /* AIF1ADC_4FS */ +#define WM8995_AIF1ADC1L_HPF 0x1000 /* AIF1ADC1L_HPF */ +#define WM8995_AIF1ADC1L_HPF_MASK 0x1000 /* AIF1ADC1L_HPF */ +#define WM8995_AIF1ADC1L_HPF_SHIFT 12 /* AIF1ADC1L_HPF */ +#define WM8995_AIF1ADC1L_HPF_WIDTH 1 /* AIF1ADC1L_HPF */ +#define WM8995_AIF1ADC1R_HPF 0x0800 /* AIF1ADC1R_HPF */ +#define WM8995_AIF1ADC1R_HPF_MASK 0x0800 /* AIF1ADC1R_HPF */ +#define WM8995_AIF1ADC1R_HPF_SHIFT 11 /* AIF1ADC1R_HPF */ +#define WM8995_AIF1ADC1R_HPF_WIDTH 1 /* AIF1ADC1R_HPF */ +#define WM8995_AIF1ADC1_HPF_MODE 0x0008 /* AIF1ADC1_HPF_MODE */ +#define WM8995_AIF1ADC1_HPF_MODE_MASK 0x0008 /* AIF1ADC1_HPF_MODE */ +#define WM8995_AIF1ADC1_HPF_MODE_SHIFT 3 /* AIF1ADC1_HPF_MODE */ +#define WM8995_AIF1ADC1_HPF_MODE_WIDTH 1 /* AIF1ADC1_HPF_MODE */ +#define WM8995_AIF1ADC1_HPF_CUT_MASK 0x0007 /* AIF1ADC1_HPF_CUT - [2:0] */ +#define WM8995_AIF1ADC1_HPF_CUT_SHIFT 0 /* AIF1ADC1_HPF_CUT - [2:0] */ +#define WM8995_AIF1ADC1_HPF_CUT_WIDTH 3 /* AIF1ADC1_HPF_CUT - [2:0] */ + +/* + * R1041 (0x411) - AIF1 ADC2 Filters + */ +#define WM8995_AIF1ADC2L_HPF 0x1000 /* AIF1ADC2L_HPF */ +#define WM8995_AIF1ADC2L_HPF_MASK 0x1000 /* AIF1ADC2L_HPF */ +#define WM8995_AIF1ADC2L_HPF_SHIFT 12 /* AIF1ADC2L_HPF */ +#define WM8995_AIF1ADC2L_HPF_WIDTH 1 /* AIF1ADC2L_HPF */ +#define WM8995_AIF1ADC2R_HPF 0x0800 /* AIF1ADC2R_HPF */ +#define WM8995_AIF1ADC2R_HPF_MASK 0x0800 /* AIF1ADC2R_HPF */ +#define WM8995_AIF1ADC2R_HPF_SHIFT 11 /* AIF1ADC2R_HPF */ +#define WM8995_AIF1ADC2R_HPF_WIDTH 1 /* AIF1ADC2R_HPF */ +#define WM8995_AIF1ADC2_HPF_MODE 0x0008 /* AIF1ADC2_HPF_MODE */ +#define WM8995_AIF1ADC2_HPF_MODE_MASK 0x0008 /* AIF1ADC2_HPF_MODE */ +#define WM8995_AIF1ADC2_HPF_MODE_SHIFT 3 /* AIF1ADC2_HPF_MODE */ +#define WM8995_AIF1ADC2_HPF_MODE_WIDTH 1 /* AIF1ADC2_HPF_MODE */ +#define WM8995_AIF1ADC2_HPF_CUT_MASK 0x0007 /* AIF1ADC2_HPF_CUT - [2:0] */ +#define WM8995_AIF1ADC2_HPF_CUT_SHIFT 0 /* AIF1ADC2_HPF_CUT - [2:0] */ +#define WM8995_AIF1ADC2_HPF_CUT_WIDTH 3 /* AIF1ADC2_HPF_CUT - [2:0] */ + +/* + * R1056 (0x420) - AIF1 DAC1 Filters (1) + */ +#define WM8995_AIF1DAC1_MUTE 0x0200 /* AIF1DAC1_MUTE */ +#define WM8995_AIF1DAC1_MUTE_MASK 0x0200 /* AIF1DAC1_MUTE */ +#define WM8995_AIF1DAC1_MUTE_SHIFT 9 /* AIF1DAC1_MUTE */ +#define WM8995_AIF1DAC1_MUTE_WIDTH 1 /* AIF1DAC1_MUTE */ +#define WM8995_AIF1DAC1_MONO 0x0080 /* AIF1DAC1_MONO */ +#define WM8995_AIF1DAC1_MONO_MASK 0x0080 /* AIF1DAC1_MONO */ +#define WM8995_AIF1DAC1_MONO_SHIFT 7 /* AIF1DAC1_MONO */ +#define WM8995_AIF1DAC1_MONO_WIDTH 1 /* AIF1DAC1_MONO */ +#define WM8995_AIF1DAC1_MUTERATE 0x0020 /* AIF1DAC1_MUTERATE */ +#define WM8995_AIF1DAC1_MUTERATE_MASK 0x0020 /* AIF1DAC1_MUTERATE */ +#define WM8995_AIF1DAC1_MUTERATE_SHIFT 5 /* AIF1DAC1_MUTERATE */ +#define WM8995_AIF1DAC1_MUTERATE_WIDTH 1 /* AIF1DAC1_MUTERATE */ +#define WM8995_AIF1DAC1_UNMUTE_RAMP 0x0010 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8995_AIF1DAC1_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8995_AIF1DAC1_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8995_AIF1DAC1_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8995_AIF1DAC1_DEEMP_MASK 0x0006 /* AIF1DAC1_DEEMP - [2:1] */ +#define WM8995_AIF1DAC1_DEEMP_SHIFT 1 /* AIF1DAC1_DEEMP - [2:1] */ +#define WM8995_AIF1DAC1_DEEMP_WIDTH 2 /* AIF1DAC1_DEEMP - [2:1] */ + +/* + * R1057 (0x421) - AIF1 DAC1 Filters (2) + */ +#define WM8995_AIF1DAC1_3D_GAIN_MASK 0x3E00 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8995_AIF1DAC1_3D_GAIN_SHIFT 9 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8995_AIF1DAC1_3D_GAIN_WIDTH 5 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8995_AIF1DAC1_3D_ENA 0x0100 /* AIF1DAC1_3D_ENA */ +#define WM8995_AIF1DAC1_3D_ENA_MASK 0x0100 /* AIF1DAC1_3D_ENA */ +#define WM8995_AIF1DAC1_3D_ENA_SHIFT 8 /* AIF1DAC1_3D_ENA */ +#define WM8995_AIF1DAC1_3D_ENA_WIDTH 1 /* AIF1DAC1_3D_ENA */ + +/* + * R1058 (0x422) - AIF1 DAC2 Filters (1) + */ +#define WM8995_AIF1DAC2_MUTE 0x0200 /* AIF1DAC2_MUTE */ +#define WM8995_AIF1DAC2_MUTE_MASK 0x0200 /* AIF1DAC2_MUTE */ +#define WM8995_AIF1DAC2_MUTE_SHIFT 9 /* AIF1DAC2_MUTE */ +#define WM8995_AIF1DAC2_MUTE_WIDTH 1 /* AIF1DAC2_MUTE */ +#define WM8995_AIF1DAC2_MONO 0x0080 /* AIF1DAC2_MONO */ +#define WM8995_AIF1DAC2_MONO_MASK 0x0080 /* AIF1DAC2_MONO */ +#define WM8995_AIF1DAC2_MONO_SHIFT 7 /* AIF1DAC2_MONO */ +#define WM8995_AIF1DAC2_MONO_WIDTH 1 /* AIF1DAC2_MONO */ +#define WM8995_AIF1DAC2_MUTERATE 0x0020 /* AIF1DAC2_MUTERATE */ +#define WM8995_AIF1DAC2_MUTERATE_MASK 0x0020 /* AIF1DAC2_MUTERATE */ +#define WM8995_AIF1DAC2_MUTERATE_SHIFT 5 /* AIF1DAC2_MUTERATE */ +#define WM8995_AIF1DAC2_MUTERATE_WIDTH 1 /* AIF1DAC2_MUTERATE */ +#define WM8995_AIF1DAC2_UNMUTE_RAMP 0x0010 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8995_AIF1DAC2_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8995_AIF1DAC2_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8995_AIF1DAC2_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8995_AIF1DAC2_DEEMP_MASK 0x0006 /* AIF1DAC2_DEEMP - [2:1] */ +#define WM8995_AIF1DAC2_DEEMP_SHIFT 1 /* AIF1DAC2_DEEMP - [2:1] */ +#define WM8995_AIF1DAC2_DEEMP_WIDTH 2 /* AIF1DAC2_DEEMP - [2:1] */ + +/* + * R1059 (0x423) - AIF1 DAC2 Filters (2) + */ +#define WM8995_AIF1DAC2_3D_GAIN_MASK 0x3E00 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8995_AIF1DAC2_3D_GAIN_SHIFT 9 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8995_AIF1DAC2_3D_GAIN_WIDTH 5 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8995_AIF1DAC2_3D_ENA 0x0100 /* AIF1DAC2_3D_ENA */ +#define WM8995_AIF1DAC2_3D_ENA_MASK 0x0100 /* AIF1DAC2_3D_ENA */ +#define WM8995_AIF1DAC2_3D_ENA_SHIFT 8 /* AIF1DAC2_3D_ENA */ +#define WM8995_AIF1DAC2_3D_ENA_WIDTH 1 /* AIF1DAC2_3D_ENA */ + +/* + * R1088 (0x440) - AIF1 DRC1 (1) + */ +#define WM8995_AIF1DRC1_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF1DRC1_SIG_DET_RMS_SHIFT 11 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF1DRC1_SIG_DET_RMS_WIDTH 5 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF1DRC1_SIG_DET_PK_MASK 0x0600 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8995_AIF1DRC1_SIG_DET_PK_SHIFT 9 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8995_AIF1DRC1_SIG_DET_PK_WIDTH 2 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8995_AIF1DRC1_NG_ENA 0x0100 /* AIF1DRC1_NG_ENA */ +#define WM8995_AIF1DRC1_NG_ENA_MASK 0x0100 /* AIF1DRC1_NG_ENA */ +#define WM8995_AIF1DRC1_NG_ENA_SHIFT 8 /* AIF1DRC1_NG_ENA */ +#define WM8995_AIF1DRC1_NG_ENA_WIDTH 1 /* AIF1DRC1_NG_ENA */ +#define WM8995_AIF1DRC1_SIG_DET_MODE 0x0080 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8995_AIF1DRC1_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8995_AIF1DRC1_SIG_DET_MODE_SHIFT 7 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8995_AIF1DRC1_SIG_DET_MODE_WIDTH 1 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8995_AIF1DRC1_SIG_DET 0x0040 /* AIF1DRC1_SIG_DET */ +#define WM8995_AIF1DRC1_SIG_DET_MASK 0x0040 /* AIF1DRC1_SIG_DET */ +#define WM8995_AIF1DRC1_SIG_DET_SHIFT 6 /* AIF1DRC1_SIG_DET */ +#define WM8995_AIF1DRC1_SIG_DET_WIDTH 1 /* AIF1DRC1_SIG_DET */ +#define WM8995_AIF1DRC1_KNEE2_OP_ENA 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC1_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC1_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC1_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC1_QR 0x0010 /* AIF1DRC1_QR */ +#define WM8995_AIF1DRC1_QR_MASK 0x0010 /* AIF1DRC1_QR */ +#define WM8995_AIF1DRC1_QR_SHIFT 4 /* AIF1DRC1_QR */ +#define WM8995_AIF1DRC1_QR_WIDTH 1 /* AIF1DRC1_QR */ +#define WM8995_AIF1DRC1_ANTICLIP 0x0008 /* AIF1DRC1_ANTICLIP */ +#define WM8995_AIF1DRC1_ANTICLIP_MASK 0x0008 /* AIF1DRC1_ANTICLIP */ +#define WM8995_AIF1DRC1_ANTICLIP_SHIFT 3 /* AIF1DRC1_ANTICLIP */ +#define WM8995_AIF1DRC1_ANTICLIP_WIDTH 1 /* AIF1DRC1_ANTICLIP */ +#define WM8995_AIF1DAC1_DRC_ENA 0x0004 /* AIF1DAC1_DRC_ENA */ +#define WM8995_AIF1DAC1_DRC_ENA_MASK 0x0004 /* AIF1DAC1_DRC_ENA */ +#define WM8995_AIF1DAC1_DRC_ENA_SHIFT 2 /* AIF1DAC1_DRC_ENA */ +#define WM8995_AIF1DAC1_DRC_ENA_WIDTH 1 /* AIF1DAC1_DRC_ENA */ +#define WM8995_AIF1ADC1L_DRC_ENA 0x0002 /* AIF1ADC1L_DRC_ENA */ +#define WM8995_AIF1ADC1L_DRC_ENA_MASK 0x0002 /* AIF1ADC1L_DRC_ENA */ +#define WM8995_AIF1ADC1L_DRC_ENA_SHIFT 1 /* AIF1ADC1L_DRC_ENA */ +#define WM8995_AIF1ADC1L_DRC_ENA_WIDTH 1 /* AIF1ADC1L_DRC_ENA */ +#define WM8995_AIF1ADC1R_DRC_ENA 0x0001 /* AIF1ADC1R_DRC_ENA */ +#define WM8995_AIF1ADC1R_DRC_ENA_MASK 0x0001 /* AIF1ADC1R_DRC_ENA */ +#define WM8995_AIF1ADC1R_DRC_ENA_SHIFT 0 /* AIF1ADC1R_DRC_ENA */ +#define WM8995_AIF1ADC1R_DRC_ENA_WIDTH 1 /* AIF1ADC1R_DRC_ENA */ + +/* + * R1089 (0x441) - AIF1 DRC1 (2) + */ +#define WM8995_AIF1DRC1_ATK_MASK 0x1E00 /* AIF1DRC1_ATK - [12:9] */ +#define WM8995_AIF1DRC1_ATK_SHIFT 9 /* AIF1DRC1_ATK - [12:9] */ +#define WM8995_AIF1DRC1_ATK_WIDTH 4 /* AIF1DRC1_ATK - [12:9] */ +#define WM8995_AIF1DRC1_DCY_MASK 0x01E0 /* AIF1DRC1_DCY - [8:5] */ +#define WM8995_AIF1DRC1_DCY_SHIFT 5 /* AIF1DRC1_DCY - [8:5] */ +#define WM8995_AIF1DRC1_DCY_WIDTH 4 /* AIF1DRC1_DCY - [8:5] */ +#define WM8995_AIF1DRC1_MINGAIN_MASK 0x001C /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8995_AIF1DRC1_MINGAIN_SHIFT 2 /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8995_AIF1DRC1_MINGAIN_WIDTH 3 /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8995_AIF1DRC1_MAXGAIN_MASK 0x0003 /* AIF1DRC1_MAXGAIN - [1:0] */ +#define WM8995_AIF1DRC1_MAXGAIN_SHIFT 0 /* AIF1DRC1_MAXGAIN - [1:0] */ +#define WM8995_AIF1DRC1_MAXGAIN_WIDTH 2 /* AIF1DRC1_MAXGAIN - [1:0] */ + +/* + * R1090 (0x442) - AIF1 DRC1 (3) + */ +#define WM8995_AIF1DRC1_NG_MINGAIN_MASK 0xF000 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8995_AIF1DRC1_NG_MINGAIN_SHIFT 12 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8995_AIF1DRC1_NG_MINGAIN_WIDTH 4 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8995_AIF1DRC1_NG_EXP_MASK 0x0C00 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8995_AIF1DRC1_NG_EXP_SHIFT 10 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8995_AIF1DRC1_NG_EXP_WIDTH 2 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8995_AIF1DRC1_QR_THR_MASK 0x0300 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8995_AIF1DRC1_QR_THR_SHIFT 8 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8995_AIF1DRC1_QR_THR_WIDTH 2 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8995_AIF1DRC1_QR_DCY_MASK 0x00C0 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8995_AIF1DRC1_QR_DCY_SHIFT 6 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8995_AIF1DRC1_QR_DCY_WIDTH 2 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8995_AIF1DRC1_HI_COMP_MASK 0x0038 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8995_AIF1DRC1_HI_COMP_SHIFT 3 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8995_AIF1DRC1_HI_COMP_WIDTH 3 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8995_AIF1DRC1_LO_COMP_MASK 0x0007 /* AIF1DRC1_LO_COMP - [2:0] */ +#define WM8995_AIF1DRC1_LO_COMP_SHIFT 0 /* AIF1DRC1_LO_COMP - [2:0] */ +#define WM8995_AIF1DRC1_LO_COMP_WIDTH 3 /* AIF1DRC1_LO_COMP - [2:0] */ + +/* + * R1091 (0x443) - AIF1 DRC1 (4) + */ +#define WM8995_AIF1DRC1_KNEE_IP_MASK 0x07E0 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8995_AIF1DRC1_KNEE_IP_SHIFT 5 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8995_AIF1DRC1_KNEE_IP_WIDTH 6 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8995_AIF1DRC1_KNEE_OP_MASK 0x001F /* AIF1DRC1_KNEE_OP - [4:0] */ +#define WM8995_AIF1DRC1_KNEE_OP_SHIFT 0 /* AIF1DRC1_KNEE_OP - [4:0] */ +#define WM8995_AIF1DRC1_KNEE_OP_WIDTH 5 /* AIF1DRC1_KNEE_OP - [4:0] */ + +/* + * R1092 (0x444) - AIF1 DRC1 (5) + */ +#define WM8995_AIF1DRC1_KNEE2_IP_MASK 0x03E0 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8995_AIF1DRC1_KNEE2_IP_SHIFT 5 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8995_AIF1DRC1_KNEE2_IP_WIDTH 5 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8995_AIF1DRC1_KNEE2_OP_MASK 0x001F /* AIF1DRC1_KNEE2_OP - [4:0] */ +#define WM8995_AIF1DRC1_KNEE2_OP_SHIFT 0 /* AIF1DRC1_KNEE2_OP - [4:0] */ +#define WM8995_AIF1DRC1_KNEE2_OP_WIDTH 5 /* AIF1DRC1_KNEE2_OP - [4:0] */ + +/* + * R1104 (0x450) - AIF1 DRC2 (1) + */ +#define WM8995_AIF1DRC2_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF1DRC2_SIG_DET_RMS_SHIFT 11 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF1DRC2_SIG_DET_RMS_WIDTH 5 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF1DRC2_SIG_DET_PK_MASK 0x0600 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8995_AIF1DRC2_SIG_DET_PK_SHIFT 9 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8995_AIF1DRC2_SIG_DET_PK_WIDTH 2 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8995_AIF1DRC2_NG_ENA 0x0100 /* AIF1DRC2_NG_ENA */ +#define WM8995_AIF1DRC2_NG_ENA_MASK 0x0100 /* AIF1DRC2_NG_ENA */ +#define WM8995_AIF1DRC2_NG_ENA_SHIFT 8 /* AIF1DRC2_NG_ENA */ +#define WM8995_AIF1DRC2_NG_ENA_WIDTH 1 /* AIF1DRC2_NG_ENA */ +#define WM8995_AIF1DRC2_SIG_DET_MODE 0x0080 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8995_AIF1DRC2_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8995_AIF1DRC2_SIG_DET_MODE_SHIFT 7 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8995_AIF1DRC2_SIG_DET_MODE_WIDTH 1 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8995_AIF1DRC2_SIG_DET 0x0040 /* AIF1DRC2_SIG_DET */ +#define WM8995_AIF1DRC2_SIG_DET_MASK 0x0040 /* AIF1DRC2_SIG_DET */ +#define WM8995_AIF1DRC2_SIG_DET_SHIFT 6 /* AIF1DRC2_SIG_DET */ +#define WM8995_AIF1DRC2_SIG_DET_WIDTH 1 /* AIF1DRC2_SIG_DET */ +#define WM8995_AIF1DRC2_KNEE2_OP_ENA 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC2_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC2_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC2_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8995_AIF1DRC2_QR 0x0010 /* AIF1DRC2_QR */ +#define WM8995_AIF1DRC2_QR_MASK 0x0010 /* AIF1DRC2_QR */ +#define WM8995_AIF1DRC2_QR_SHIFT 4 /* AIF1DRC2_QR */ +#define WM8995_AIF1DRC2_QR_WIDTH 1 /* AIF1DRC2_QR */ +#define WM8995_AIF1DRC2_ANTICLIP 0x0008 /* AIF1DRC2_ANTICLIP */ +#define WM8995_AIF1DRC2_ANTICLIP_MASK 0x0008 /* AIF1DRC2_ANTICLIP */ +#define WM8995_AIF1DRC2_ANTICLIP_SHIFT 3 /* AIF1DRC2_ANTICLIP */ +#define WM8995_AIF1DRC2_ANTICLIP_WIDTH 1 /* AIF1DRC2_ANTICLIP */ +#define WM8995_AIF1DAC2_DRC_ENA 0x0004 /* AIF1DAC2_DRC_ENA */ +#define WM8995_AIF1DAC2_DRC_ENA_MASK 0x0004 /* AIF1DAC2_DRC_ENA */ +#define WM8995_AIF1DAC2_DRC_ENA_SHIFT 2 /* AIF1DAC2_DRC_ENA */ +#define WM8995_AIF1DAC2_DRC_ENA_WIDTH 1 /* AIF1DAC2_DRC_ENA */ +#define WM8995_AIF1ADC2L_DRC_ENA 0x0002 /* AIF1ADC2L_DRC_ENA */ +#define WM8995_AIF1ADC2L_DRC_ENA_MASK 0x0002 /* AIF1ADC2L_DRC_ENA */ +#define WM8995_AIF1ADC2L_DRC_ENA_SHIFT 1 /* AIF1ADC2L_DRC_ENA */ +#define WM8995_AIF1ADC2L_DRC_ENA_WIDTH 1 /* AIF1ADC2L_DRC_ENA */ +#define WM8995_AIF1ADC2R_DRC_ENA 0x0001 /* AIF1ADC2R_DRC_ENA */ +#define WM8995_AIF1ADC2R_DRC_ENA_MASK 0x0001 /* AIF1ADC2R_DRC_ENA */ +#define WM8995_AIF1ADC2R_DRC_ENA_SHIFT 0 /* AIF1ADC2R_DRC_ENA */ +#define WM8995_AIF1ADC2R_DRC_ENA_WIDTH 1 /* AIF1ADC2R_DRC_ENA */ + +/* + * R1105 (0x451) - AIF1 DRC2 (2) + */ +#define WM8995_AIF1DRC2_ATK_MASK 0x1E00 /* AIF1DRC2_ATK - [12:9] */ +#define WM8995_AIF1DRC2_ATK_SHIFT 9 /* AIF1DRC2_ATK - [12:9] */ +#define WM8995_AIF1DRC2_ATK_WIDTH 4 /* AIF1DRC2_ATK - [12:9] */ +#define WM8995_AIF1DRC2_DCY_MASK 0x01E0 /* AIF1DRC2_DCY - [8:5] */ +#define WM8995_AIF1DRC2_DCY_SHIFT 5 /* AIF1DRC2_DCY - [8:5] */ +#define WM8995_AIF1DRC2_DCY_WIDTH 4 /* AIF1DRC2_DCY - [8:5] */ +#define WM8995_AIF1DRC2_MINGAIN_MASK 0x001C /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8995_AIF1DRC2_MINGAIN_SHIFT 2 /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8995_AIF1DRC2_MINGAIN_WIDTH 3 /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8995_AIF1DRC2_MAXGAIN_MASK 0x0003 /* AIF1DRC2_MAXGAIN - [1:0] */ +#define WM8995_AIF1DRC2_MAXGAIN_SHIFT 0 /* AIF1DRC2_MAXGAIN - [1:0] */ +#define WM8995_AIF1DRC2_MAXGAIN_WIDTH 2 /* AIF1DRC2_MAXGAIN - [1:0] */ + +/* + * R1106 (0x452) - AIF1 DRC2 (3) + */ +#define WM8995_AIF1DRC2_NG_MINGAIN_MASK 0xF000 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8995_AIF1DRC2_NG_MINGAIN_SHIFT 12 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8995_AIF1DRC2_NG_MINGAIN_WIDTH 4 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8995_AIF1DRC2_NG_EXP_MASK 0x0C00 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8995_AIF1DRC2_NG_EXP_SHIFT 10 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8995_AIF1DRC2_NG_EXP_WIDTH 2 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8995_AIF1DRC2_QR_THR_MASK 0x0300 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8995_AIF1DRC2_QR_THR_SHIFT 8 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8995_AIF1DRC2_QR_THR_WIDTH 2 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8995_AIF1DRC2_QR_DCY_MASK 0x00C0 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8995_AIF1DRC2_QR_DCY_SHIFT 6 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8995_AIF1DRC2_QR_DCY_WIDTH 2 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8995_AIF1DRC2_HI_COMP_MASK 0x0038 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8995_AIF1DRC2_HI_COMP_SHIFT 3 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8995_AIF1DRC2_HI_COMP_WIDTH 3 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8995_AIF1DRC2_LO_COMP_MASK 0x0007 /* AIF1DRC2_LO_COMP - [2:0] */ +#define WM8995_AIF1DRC2_LO_COMP_SHIFT 0 /* AIF1DRC2_LO_COMP - [2:0] */ +#define WM8995_AIF1DRC2_LO_COMP_WIDTH 3 /* AIF1DRC2_LO_COMP - [2:0] */ + +/* + * R1107 (0x453) - AIF1 DRC2 (4) + */ +#define WM8995_AIF1DRC2_KNEE_IP_MASK 0x07E0 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8995_AIF1DRC2_KNEE_IP_SHIFT 5 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8995_AIF1DRC2_KNEE_IP_WIDTH 6 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8995_AIF1DRC2_KNEE_OP_MASK 0x001F /* AIF1DRC2_KNEE_OP - [4:0] */ +#define WM8995_AIF1DRC2_KNEE_OP_SHIFT 0 /* AIF1DRC2_KNEE_OP - [4:0] */ +#define WM8995_AIF1DRC2_KNEE_OP_WIDTH 5 /* AIF1DRC2_KNEE_OP - [4:0] */ + +/* + * R1108 (0x454) - AIF1 DRC2 (5) + */ +#define WM8995_AIF1DRC2_KNEE2_IP_MASK 0x03E0 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8995_AIF1DRC2_KNEE2_IP_SHIFT 5 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8995_AIF1DRC2_KNEE2_IP_WIDTH 5 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8995_AIF1DRC2_KNEE2_OP_MASK 0x001F /* AIF1DRC2_KNEE2_OP - [4:0] */ +#define WM8995_AIF1DRC2_KNEE2_OP_SHIFT 0 /* AIF1DRC2_KNEE2_OP - [4:0] */ +#define WM8995_AIF1DRC2_KNEE2_OP_WIDTH 5 /* AIF1DRC2_KNEE2_OP - [4:0] */ + +/* + * R1152 (0x480) - AIF1 DAC1 EQ Gains (1) + */ +#define WM8995_AIF1DAC1_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF1DAC1_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF1DAC1_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF1DAC1_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF1DAC1_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF1DAC1_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF1DAC1_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF1DAC1_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF1DAC1_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF1DAC1_EQ_ENA 0x0001 /* AIF1DAC1_EQ_ENA */ +#define WM8995_AIF1DAC1_EQ_ENA_MASK 0x0001 /* AIF1DAC1_EQ_ENA */ +#define WM8995_AIF1DAC1_EQ_ENA_SHIFT 0 /* AIF1DAC1_EQ_ENA */ +#define WM8995_AIF1DAC1_EQ_ENA_WIDTH 1 /* AIF1DAC1_EQ_ENA */ + +/* + * R1153 (0x481) - AIF1 DAC1 EQ Gains (2) + */ +#define WM8995_AIF1DAC1_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF1DAC1_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF1DAC1_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF1DAC1_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ +#define WM8995_AIF1DAC1_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ +#define WM8995_AIF1DAC1_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ + +/* + * R1154 (0x482) - AIF1 DAC1 EQ Band 1 A + */ +#define WM8995_AIF1DAC1_EQ_B1_A_MASK 0xFFFF /* AIF1DAC1_EQ_B1_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B1_A_SHIFT 0 /* AIF1DAC1_EQ_B1_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B1_A_WIDTH 16 /* AIF1DAC1_EQ_B1_A - [15:0] */ + +/* + * R1155 (0x483) - AIF1 DAC1 EQ Band 1 B + */ +#define WM8995_AIF1DAC1_EQ_B1_B_MASK 0xFFFF /* AIF1DAC1_EQ_B1_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B1_B_SHIFT 0 /* AIF1DAC1_EQ_B1_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B1_B_WIDTH 16 /* AIF1DAC1_EQ_B1_B - [15:0] */ + +/* + * R1156 (0x484) - AIF1 DAC1 EQ Band 1 PG + */ +#define WM8995_AIF1DAC1_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B1_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B1_PG_SHIFT 0 /* AIF1DAC1_EQ_B1_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B1_PG_WIDTH 16 /* AIF1DAC1_EQ_B1_PG - [15:0] */ + +/* + * R1157 (0x485) - AIF1 DAC1 EQ Band 2 A + */ +#define WM8995_AIF1DAC1_EQ_B2_A_MASK 0xFFFF /* AIF1DAC1_EQ_B2_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_A_SHIFT 0 /* AIF1DAC1_EQ_B2_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_A_WIDTH 16 /* AIF1DAC1_EQ_B2_A - [15:0] */ + +/* + * R1158 (0x486) - AIF1 DAC1 EQ Band 2 B + */ +#define WM8995_AIF1DAC1_EQ_B2_B_MASK 0xFFFF /* AIF1DAC1_EQ_B2_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_B_SHIFT 0 /* AIF1DAC1_EQ_B2_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_B_WIDTH 16 /* AIF1DAC1_EQ_B2_B - [15:0] */ + +/* + * R1159 (0x487) - AIF1 DAC1 EQ Band 2 C + */ +#define WM8995_AIF1DAC1_EQ_B2_C_MASK 0xFFFF /* AIF1DAC1_EQ_B2_C - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_C_SHIFT 0 /* AIF1DAC1_EQ_B2_C - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_C_WIDTH 16 /* AIF1DAC1_EQ_B2_C - [15:0] */ + +/* + * R1160 (0x488) - AIF1 DAC1 EQ Band 2 PG + */ +#define WM8995_AIF1DAC1_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B2_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_PG_SHIFT 0 /* AIF1DAC1_EQ_B2_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B2_PG_WIDTH 16 /* AIF1DAC1_EQ_B2_PG - [15:0] */ + +/* + * R1161 (0x489) - AIF1 DAC1 EQ Band 3 A + */ +#define WM8995_AIF1DAC1_EQ_B3_A_MASK 0xFFFF /* AIF1DAC1_EQ_B3_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_A_SHIFT 0 /* AIF1DAC1_EQ_B3_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_A_WIDTH 16 /* AIF1DAC1_EQ_B3_A - [15:0] */ + +/* + * R1162 (0x48A) - AIF1 DAC1 EQ Band 3 B + */ +#define WM8995_AIF1DAC1_EQ_B3_B_MASK 0xFFFF /* AIF1DAC1_EQ_B3_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_B_SHIFT 0 /* AIF1DAC1_EQ_B3_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_B_WIDTH 16 /* AIF1DAC1_EQ_B3_B - [15:0] */ + +/* + * R1163 (0x48B) - AIF1 DAC1 EQ Band 3 C + */ +#define WM8995_AIF1DAC1_EQ_B3_C_MASK 0xFFFF /* AIF1DAC1_EQ_B3_C - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_C_SHIFT 0 /* AIF1DAC1_EQ_B3_C - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_C_WIDTH 16 /* AIF1DAC1_EQ_B3_C - [15:0] */ + +/* + * R1164 (0x48C) - AIF1 DAC1 EQ Band 3 PG + */ +#define WM8995_AIF1DAC1_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B3_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_PG_SHIFT 0 /* AIF1DAC1_EQ_B3_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B3_PG_WIDTH 16 /* AIF1DAC1_EQ_B3_PG - [15:0] */ + +/* + * R1165 (0x48D) - AIF1 DAC1 EQ Band 4 A + */ +#define WM8995_AIF1DAC1_EQ_B4_A_MASK 0xFFFF /* AIF1DAC1_EQ_B4_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_A_SHIFT 0 /* AIF1DAC1_EQ_B4_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_A_WIDTH 16 /* AIF1DAC1_EQ_B4_A - [15:0] */ + +/* + * R1166 (0x48E) - AIF1 DAC1 EQ Band 4 B + */ +#define WM8995_AIF1DAC1_EQ_B4_B_MASK 0xFFFF /* AIF1DAC1_EQ_B4_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_B_SHIFT 0 /* AIF1DAC1_EQ_B4_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_B_WIDTH 16 /* AIF1DAC1_EQ_B4_B - [15:0] */ + +/* + * R1167 (0x48F) - AIF1 DAC1 EQ Band 4 C + */ +#define WM8995_AIF1DAC1_EQ_B4_C_MASK 0xFFFF /* AIF1DAC1_EQ_B4_C - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_C_SHIFT 0 /* AIF1DAC1_EQ_B4_C - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_C_WIDTH 16 /* AIF1DAC1_EQ_B4_C - [15:0] */ + +/* + * R1168 (0x490) - AIF1 DAC1 EQ Band 4 PG + */ +#define WM8995_AIF1DAC1_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B4_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_PG_SHIFT 0 /* AIF1DAC1_EQ_B4_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B4_PG_WIDTH 16 /* AIF1DAC1_EQ_B4_PG - [15:0] */ + +/* + * R1169 (0x491) - AIF1 DAC1 EQ Band 5 A + */ +#define WM8995_AIF1DAC1_EQ_B5_A_MASK 0xFFFF /* AIF1DAC1_EQ_B5_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B5_A_SHIFT 0 /* AIF1DAC1_EQ_B5_A - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B5_A_WIDTH 16 /* AIF1DAC1_EQ_B5_A - [15:0] */ + +/* + * R1170 (0x492) - AIF1 DAC1 EQ Band 5 B + */ +#define WM8995_AIF1DAC1_EQ_B5_B_MASK 0xFFFF /* AIF1DAC1_EQ_B5_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B5_B_SHIFT 0 /* AIF1DAC1_EQ_B5_B - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B5_B_WIDTH 16 /* AIF1DAC1_EQ_B5_B - [15:0] */ + +/* + * R1171 (0x493) - AIF1 DAC1 EQ Band 5 PG + */ +#define WM8995_AIF1DAC1_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B5_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B5_PG_SHIFT 0 /* AIF1DAC1_EQ_B5_PG - [15:0] */ +#define WM8995_AIF1DAC1_EQ_B5_PG_WIDTH 16 /* AIF1DAC1_EQ_B5_PG - [15:0] */ + +/* + * R1184 (0x4A0) - AIF1 DAC2 EQ Gains (1) + */ +#define WM8995_AIF1DAC2_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF1DAC2_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF1DAC2_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF1DAC2_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF1DAC2_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF1DAC2_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF1DAC2_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF1DAC2_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF1DAC2_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF1DAC2_EQ_ENA 0x0001 /* AIF1DAC2_EQ_ENA */ +#define WM8995_AIF1DAC2_EQ_ENA_MASK 0x0001 /* AIF1DAC2_EQ_ENA */ +#define WM8995_AIF1DAC2_EQ_ENA_SHIFT 0 /* AIF1DAC2_EQ_ENA */ +#define WM8995_AIF1DAC2_EQ_ENA_WIDTH 1 /* AIF1DAC2_EQ_ENA */ + +/* + * R1185 (0x4A1) - AIF1 DAC2 EQ Gains (2) + */ +#define WM8995_AIF1DAC2_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF1DAC2_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF1DAC2_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF1DAC2_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ +#define WM8995_AIF1DAC2_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ +#define WM8995_AIF1DAC2_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ + +/* + * R1186 (0x4A2) - AIF1 DAC2 EQ Band 1 A + */ +#define WM8995_AIF1DAC2_EQ_B1_A_MASK 0xFFFF /* AIF1DAC2_EQ_B1_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B1_A_SHIFT 0 /* AIF1DAC2_EQ_B1_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B1_A_WIDTH 16 /* AIF1DAC2_EQ_B1_A - [15:0] */ + +/* + * R1187 (0x4A3) - AIF1 DAC2 EQ Band 1 B + */ +#define WM8995_AIF1DAC2_EQ_B1_B_MASK 0xFFFF /* AIF1DAC2_EQ_B1_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B1_B_SHIFT 0 /* AIF1DAC2_EQ_B1_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B1_B_WIDTH 16 /* AIF1DAC2_EQ_B1_B - [15:0] */ + +/* + * R1188 (0x4A4) - AIF1 DAC2 EQ Band 1 PG + */ +#define WM8995_AIF1DAC2_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B1_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B1_PG_SHIFT 0 /* AIF1DAC2_EQ_B1_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B1_PG_WIDTH 16 /* AIF1DAC2_EQ_B1_PG - [15:0] */ + +/* + * R1189 (0x4A5) - AIF1 DAC2 EQ Band 2 A + */ +#define WM8995_AIF1DAC2_EQ_B2_A_MASK 0xFFFF /* AIF1DAC2_EQ_B2_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_A_SHIFT 0 /* AIF1DAC2_EQ_B2_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_A_WIDTH 16 /* AIF1DAC2_EQ_B2_A - [15:0] */ + +/* + * R1190 (0x4A6) - AIF1 DAC2 EQ Band 2 B + */ +#define WM8995_AIF1DAC2_EQ_B2_B_MASK 0xFFFF /* AIF1DAC2_EQ_B2_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_B_SHIFT 0 /* AIF1DAC2_EQ_B2_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_B_WIDTH 16 /* AIF1DAC2_EQ_B2_B - [15:0] */ + +/* + * R1191 (0x4A7) - AIF1 DAC2 EQ Band 2 C + */ +#define WM8995_AIF1DAC2_EQ_B2_C_MASK 0xFFFF /* AIF1DAC2_EQ_B2_C - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_C_SHIFT 0 /* AIF1DAC2_EQ_B2_C - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_C_WIDTH 16 /* AIF1DAC2_EQ_B2_C - [15:0] */ + +/* + * R1192 (0x4A8) - AIF1 DAC2 EQ Band 2 PG + */ +#define WM8995_AIF1DAC2_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B2_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_PG_SHIFT 0 /* AIF1DAC2_EQ_B2_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B2_PG_WIDTH 16 /* AIF1DAC2_EQ_B2_PG - [15:0] */ + +/* + * R1193 (0x4A9) - AIF1 DAC2 EQ Band 3 A + */ +#define WM8995_AIF1DAC2_EQ_B3_A_MASK 0xFFFF /* AIF1DAC2_EQ_B3_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_A_SHIFT 0 /* AIF1DAC2_EQ_B3_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_A_WIDTH 16 /* AIF1DAC2_EQ_B3_A - [15:0] */ + +/* + * R1194 (0x4AA) - AIF1 DAC2 EQ Band 3 B + */ +#define WM8995_AIF1DAC2_EQ_B3_B_MASK 0xFFFF /* AIF1DAC2_EQ_B3_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_B_SHIFT 0 /* AIF1DAC2_EQ_B3_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_B_WIDTH 16 /* AIF1DAC2_EQ_B3_B - [15:0] */ + +/* + * R1195 (0x4AB) - AIF1 DAC2 EQ Band 3 C + */ +#define WM8995_AIF1DAC2_EQ_B3_C_MASK 0xFFFF /* AIF1DAC2_EQ_B3_C - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_C_SHIFT 0 /* AIF1DAC2_EQ_B3_C - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_C_WIDTH 16 /* AIF1DAC2_EQ_B3_C - [15:0] */ + +/* + * R1196 (0x4AC) - AIF1 DAC2 EQ Band 3 PG + */ +#define WM8995_AIF1DAC2_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B3_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_PG_SHIFT 0 /* AIF1DAC2_EQ_B3_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B3_PG_WIDTH 16 /* AIF1DAC2_EQ_B3_PG - [15:0] */ + +/* + * R1197 (0x4AD) - AIF1 DAC2 EQ Band 4 A + */ +#define WM8995_AIF1DAC2_EQ_B4_A_MASK 0xFFFF /* AIF1DAC2_EQ_B4_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_A_SHIFT 0 /* AIF1DAC2_EQ_B4_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_A_WIDTH 16 /* AIF1DAC2_EQ_B4_A - [15:0] */ + +/* + * R1198 (0x4AE) - AIF1 DAC2 EQ Band 4 B + */ +#define WM8995_AIF1DAC2_EQ_B4_B_MASK 0xFFFF /* AIF1DAC2_EQ_B4_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_B_SHIFT 0 /* AIF1DAC2_EQ_B4_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_B_WIDTH 16 /* AIF1DAC2_EQ_B4_B - [15:0] */ + +/* + * R1199 (0x4AF) - AIF1 DAC2 EQ Band 4 C + */ +#define WM8995_AIF1DAC2_EQ_B4_C_MASK 0xFFFF /* AIF1DAC2_EQ_B4_C - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_C_SHIFT 0 /* AIF1DAC2_EQ_B4_C - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_C_WIDTH 16 /* AIF1DAC2_EQ_B4_C - [15:0] */ + +/* + * R1200 (0x4B0) - AIF1 DAC2 EQ Band 4 PG + */ +#define WM8995_AIF1DAC2_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B4_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_PG_SHIFT 0 /* AIF1DAC2_EQ_B4_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B4_PG_WIDTH 16 /* AIF1DAC2_EQ_B4_PG - [15:0] */ + +/* + * R1201 (0x4B1) - AIF1 DAC2 EQ Band 5 A + */ +#define WM8995_AIF1DAC2_EQ_B5_A_MASK 0xFFFF /* AIF1DAC2_EQ_B5_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B5_A_SHIFT 0 /* AIF1DAC2_EQ_B5_A - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B5_A_WIDTH 16 /* AIF1DAC2_EQ_B5_A - [15:0] */ + +/* + * R1202 (0x4B2) - AIF1 DAC2 EQ Band 5 B + */ +#define WM8995_AIF1DAC2_EQ_B5_B_MASK 0xFFFF /* AIF1DAC2_EQ_B5_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B5_B_SHIFT 0 /* AIF1DAC2_EQ_B5_B - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B5_B_WIDTH 16 /* AIF1DAC2_EQ_B5_B - [15:0] */ + +/* + * R1203 (0x4B3) - AIF1 DAC2 EQ Band 5 PG + */ +#define WM8995_AIF1DAC2_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B5_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B5_PG_SHIFT 0 /* AIF1DAC2_EQ_B5_PG - [15:0] */ +#define WM8995_AIF1DAC2_EQ_B5_PG_WIDTH 16 /* AIF1DAC2_EQ_B5_PG - [15:0] */ + +/* + * R1280 (0x500) - AIF2 ADC Left Volume + */ +#define WM8995_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */ +#define WM8995_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */ +#define WM8995_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */ +#define WM8995_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */ +#define WM8995_AIF2ADCL_VOL_MASK 0x00FF /* AIF2ADCL_VOL - [7:0] */ +#define WM8995_AIF2ADCL_VOL_SHIFT 0 /* AIF2ADCL_VOL - [7:0] */ +#define WM8995_AIF2ADCL_VOL_WIDTH 8 /* AIF2ADCL_VOL - [7:0] */ + +/* + * R1281 (0x501) - AIF2 ADC Right Volume + */ +#define WM8995_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */ +#define WM8995_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */ +#define WM8995_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */ +#define WM8995_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */ +#define WM8995_AIF2ADCR_VOL_MASK 0x00FF /* AIF2ADCR_VOL - [7:0] */ +#define WM8995_AIF2ADCR_VOL_SHIFT 0 /* AIF2ADCR_VOL - [7:0] */ +#define WM8995_AIF2ADCR_VOL_WIDTH 8 /* AIF2ADCR_VOL - [7:0] */ + +/* + * R1282 (0x502) - AIF2 DAC Left Volume + */ +#define WM8995_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */ +#define WM8995_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */ +#define WM8995_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */ +#define WM8995_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */ +#define WM8995_AIF2DACL_VOL_MASK 0x00FF /* AIF2DACL_VOL - [7:0] */ +#define WM8995_AIF2DACL_VOL_SHIFT 0 /* AIF2DACL_VOL - [7:0] */ +#define WM8995_AIF2DACL_VOL_WIDTH 8 /* AIF2DACL_VOL - [7:0] */ + +/* + * R1283 (0x503) - AIF2 DAC Right Volume + */ +#define WM8995_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */ +#define WM8995_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */ +#define WM8995_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */ +#define WM8995_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */ +#define WM8995_AIF2DACR_VOL_MASK 0x00FF /* AIF2DACR_VOL - [7:0] */ +#define WM8995_AIF2DACR_VOL_SHIFT 0 /* AIF2DACR_VOL - [7:0] */ +#define WM8995_AIF2DACR_VOL_WIDTH 8 /* AIF2DACR_VOL - [7:0] */ + +/* + * R1296 (0x510) - AIF2 ADC Filters + */ +#define WM8995_AIF2ADC_4FS 0x8000 /* AIF2ADC_4FS */ +#define WM8995_AIF2ADC_4FS_MASK 0x8000 /* AIF2ADC_4FS */ +#define WM8995_AIF2ADC_4FS_SHIFT 15 /* AIF2ADC_4FS */ +#define WM8995_AIF2ADC_4FS_WIDTH 1 /* AIF2ADC_4FS */ +#define WM8995_AIF2ADCL_HPF 0x1000 /* AIF2ADCL_HPF */ +#define WM8995_AIF2ADCL_HPF_MASK 0x1000 /* AIF2ADCL_HPF */ +#define WM8995_AIF2ADCL_HPF_SHIFT 12 /* AIF2ADCL_HPF */ +#define WM8995_AIF2ADCL_HPF_WIDTH 1 /* AIF2ADCL_HPF */ +#define WM8995_AIF2ADCR_HPF 0x0800 /* AIF2ADCR_HPF */ +#define WM8995_AIF2ADCR_HPF_MASK 0x0800 /* AIF2ADCR_HPF */ +#define WM8995_AIF2ADCR_HPF_SHIFT 11 /* AIF2ADCR_HPF */ +#define WM8995_AIF2ADCR_HPF_WIDTH 1 /* AIF2ADCR_HPF */ +#define WM8995_AIF2ADC_HPF_MODE 0x0008 /* AIF2ADC_HPF_MODE */ +#define WM8995_AIF2ADC_HPF_MODE_MASK 0x0008 /* AIF2ADC_HPF_MODE */ +#define WM8995_AIF2ADC_HPF_MODE_SHIFT 3 /* AIF2ADC_HPF_MODE */ +#define WM8995_AIF2ADC_HPF_MODE_WIDTH 1 /* AIF2ADC_HPF_MODE */ +#define WM8995_AIF2ADC_HPF_CUT_MASK 0x0007 /* AIF2ADC_HPF_CUT - [2:0] */ +#define WM8995_AIF2ADC_HPF_CUT_SHIFT 0 /* AIF2ADC_HPF_CUT - [2:0] */ +#define WM8995_AIF2ADC_HPF_CUT_WIDTH 3 /* AIF2ADC_HPF_CUT - [2:0] */ + +/* + * R1312 (0x520) - AIF2 DAC Filters (1) + */ +#define WM8995_AIF2DAC_MUTE 0x0200 /* AIF2DAC_MUTE */ +#define WM8995_AIF2DAC_MUTE_MASK 0x0200 /* AIF2DAC_MUTE */ +#define WM8995_AIF2DAC_MUTE_SHIFT 9 /* AIF2DAC_MUTE */ +#define WM8995_AIF2DAC_MUTE_WIDTH 1 /* AIF2DAC_MUTE */ +#define WM8995_AIF2DAC_MONO 0x0080 /* AIF2DAC_MONO */ +#define WM8995_AIF2DAC_MONO_MASK 0x0080 /* AIF2DAC_MONO */ +#define WM8995_AIF2DAC_MONO_SHIFT 7 /* AIF2DAC_MONO */ +#define WM8995_AIF2DAC_MONO_WIDTH 1 /* AIF2DAC_MONO */ +#define WM8995_AIF2DAC_MUTERATE 0x0020 /* AIF2DAC_MUTERATE */ +#define WM8995_AIF2DAC_MUTERATE_MASK 0x0020 /* AIF2DAC_MUTERATE */ +#define WM8995_AIF2DAC_MUTERATE_SHIFT 5 /* AIF2DAC_MUTERATE */ +#define WM8995_AIF2DAC_MUTERATE_WIDTH 1 /* AIF2DAC_MUTERATE */ +#define WM8995_AIF2DAC_UNMUTE_RAMP 0x0010 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8995_AIF2DAC_UNMUTE_RAMP_MASK 0x0010 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8995_AIF2DAC_UNMUTE_RAMP_SHIFT 4 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8995_AIF2DAC_UNMUTE_RAMP_WIDTH 1 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8995_AIF2DAC_DEEMP_MASK 0x0006 /* AIF2DAC_DEEMP - [2:1] */ +#define WM8995_AIF2DAC_DEEMP_SHIFT 1 /* AIF2DAC_DEEMP - [2:1] */ +#define WM8995_AIF2DAC_DEEMP_WIDTH 2 /* AIF2DAC_DEEMP - [2:1] */ + +/* + * R1313 (0x521) - AIF2 DAC Filters (2) + */ +#define WM8995_AIF2DAC_3D_GAIN_MASK 0x3E00 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8995_AIF2DAC_3D_GAIN_SHIFT 9 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8995_AIF2DAC_3D_GAIN_WIDTH 5 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8995_AIF2DAC_3D_ENA 0x0100 /* AIF2DAC_3D_ENA */ +#define WM8995_AIF2DAC_3D_ENA_MASK 0x0100 /* AIF2DAC_3D_ENA */ +#define WM8995_AIF2DAC_3D_ENA_SHIFT 8 /* AIF2DAC_3D_ENA */ +#define WM8995_AIF2DAC_3D_ENA_WIDTH 1 /* AIF2DAC_3D_ENA */ + +/* + * R1344 (0x540) - AIF2 DRC (1) + */ +#define WM8995_AIF2DRC_SIG_DET_RMS_MASK 0xF800 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF2DRC_SIG_DET_RMS_SHIFT 11 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF2DRC_SIG_DET_RMS_WIDTH 5 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8995_AIF2DRC_SIG_DET_PK_MASK 0x0600 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8995_AIF2DRC_SIG_DET_PK_SHIFT 9 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8995_AIF2DRC_SIG_DET_PK_WIDTH 2 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8995_AIF2DRC_NG_ENA 0x0100 /* AIF2DRC_NG_ENA */ +#define WM8995_AIF2DRC_NG_ENA_MASK 0x0100 /* AIF2DRC_NG_ENA */ +#define WM8995_AIF2DRC_NG_ENA_SHIFT 8 /* AIF2DRC_NG_ENA */ +#define WM8995_AIF2DRC_NG_ENA_WIDTH 1 /* AIF2DRC_NG_ENA */ +#define WM8995_AIF2DRC_SIG_DET_MODE 0x0080 /* AIF2DRC_SIG_DET_MODE */ +#define WM8995_AIF2DRC_SIG_DET_MODE_MASK 0x0080 /* AIF2DRC_SIG_DET_MODE */ +#define WM8995_AIF2DRC_SIG_DET_MODE_SHIFT 7 /* AIF2DRC_SIG_DET_MODE */ +#define WM8995_AIF2DRC_SIG_DET_MODE_WIDTH 1 /* AIF2DRC_SIG_DET_MODE */ +#define WM8995_AIF2DRC_SIG_DET 0x0040 /* AIF2DRC_SIG_DET */ +#define WM8995_AIF2DRC_SIG_DET_MASK 0x0040 /* AIF2DRC_SIG_DET */ +#define WM8995_AIF2DRC_SIG_DET_SHIFT 6 /* AIF2DRC_SIG_DET */ +#define WM8995_AIF2DRC_SIG_DET_WIDTH 1 /* AIF2DRC_SIG_DET */ +#define WM8995_AIF2DRC_KNEE2_OP_ENA 0x0020 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8995_AIF2DRC_KNEE2_OP_ENA_MASK 0x0020 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8995_AIF2DRC_KNEE2_OP_ENA_SHIFT 5 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8995_AIF2DRC_KNEE2_OP_ENA_WIDTH 1 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8995_AIF2DRC_QR 0x0010 /* AIF2DRC_QR */ +#define WM8995_AIF2DRC_QR_MASK 0x0010 /* AIF2DRC_QR */ +#define WM8995_AIF2DRC_QR_SHIFT 4 /* AIF2DRC_QR */ +#define WM8995_AIF2DRC_QR_WIDTH 1 /* AIF2DRC_QR */ +#define WM8995_AIF2DRC_ANTICLIP 0x0008 /* AIF2DRC_ANTICLIP */ +#define WM8995_AIF2DRC_ANTICLIP_MASK 0x0008 /* AIF2DRC_ANTICLIP */ +#define WM8995_AIF2DRC_ANTICLIP_SHIFT 3 /* AIF2DRC_ANTICLIP */ +#define WM8995_AIF2DRC_ANTICLIP_WIDTH 1 /* AIF2DRC_ANTICLIP */ +#define WM8995_AIF2DAC_DRC_ENA 0x0004 /* AIF2DAC_DRC_ENA */ +#define WM8995_AIF2DAC_DRC_ENA_MASK 0x0004 /* AIF2DAC_DRC_ENA */ +#define WM8995_AIF2DAC_DRC_ENA_SHIFT 2 /* AIF2DAC_DRC_ENA */ +#define WM8995_AIF2DAC_DRC_ENA_WIDTH 1 /* AIF2DAC_DRC_ENA */ +#define WM8995_AIF2ADCL_DRC_ENA 0x0002 /* AIF2ADCL_DRC_ENA */ +#define WM8995_AIF2ADCL_DRC_ENA_MASK 0x0002 /* AIF2ADCL_DRC_ENA */ +#define WM8995_AIF2ADCL_DRC_ENA_SHIFT 1 /* AIF2ADCL_DRC_ENA */ +#define WM8995_AIF2ADCL_DRC_ENA_WIDTH 1 /* AIF2ADCL_DRC_ENA */ +#define WM8995_AIF2ADCR_DRC_ENA 0x0001 /* AIF2ADCR_DRC_ENA */ +#define WM8995_AIF2ADCR_DRC_ENA_MASK 0x0001 /* AIF2ADCR_DRC_ENA */ +#define WM8995_AIF2ADCR_DRC_ENA_SHIFT 0 /* AIF2ADCR_DRC_ENA */ +#define WM8995_AIF2ADCR_DRC_ENA_WIDTH 1 /* AIF2ADCR_DRC_ENA */ + +/* + * R1345 (0x541) - AIF2 DRC (2) + */ +#define WM8995_AIF2DRC_ATK_MASK 0x1E00 /* AIF2DRC_ATK - [12:9] */ +#define WM8995_AIF2DRC_ATK_SHIFT 9 /* AIF2DRC_ATK - [12:9] */ +#define WM8995_AIF2DRC_ATK_WIDTH 4 /* AIF2DRC_ATK - [12:9] */ +#define WM8995_AIF2DRC_DCY_MASK 0x01E0 /* AIF2DRC_DCY - [8:5] */ +#define WM8995_AIF2DRC_DCY_SHIFT 5 /* AIF2DRC_DCY - [8:5] */ +#define WM8995_AIF2DRC_DCY_WIDTH 4 /* AIF2DRC_DCY - [8:5] */ +#define WM8995_AIF2DRC_MINGAIN_MASK 0x001C /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8995_AIF2DRC_MINGAIN_SHIFT 2 /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8995_AIF2DRC_MINGAIN_WIDTH 3 /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8995_AIF2DRC_MAXGAIN_MASK 0x0003 /* AIF2DRC_MAXGAIN - [1:0] */ +#define WM8995_AIF2DRC_MAXGAIN_SHIFT 0 /* AIF2DRC_MAXGAIN - [1:0] */ +#define WM8995_AIF2DRC_MAXGAIN_WIDTH 2 /* AIF2DRC_MAXGAIN - [1:0] */ + +/* + * R1346 (0x542) - AIF2 DRC (3) + */ +#define WM8995_AIF2DRC_NG_MINGAIN_MASK 0xF000 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8995_AIF2DRC_NG_MINGAIN_SHIFT 12 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8995_AIF2DRC_NG_MINGAIN_WIDTH 4 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8995_AIF2DRC_NG_EXP_MASK 0x0C00 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8995_AIF2DRC_NG_EXP_SHIFT 10 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8995_AIF2DRC_NG_EXP_WIDTH 2 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8995_AIF2DRC_QR_THR_MASK 0x0300 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8995_AIF2DRC_QR_THR_SHIFT 8 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8995_AIF2DRC_QR_THR_WIDTH 2 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8995_AIF2DRC_QR_DCY_MASK 0x00C0 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8995_AIF2DRC_QR_DCY_SHIFT 6 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8995_AIF2DRC_QR_DCY_WIDTH 2 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8995_AIF2DRC_HI_COMP_MASK 0x0038 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8995_AIF2DRC_HI_COMP_SHIFT 3 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8995_AIF2DRC_HI_COMP_WIDTH 3 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8995_AIF2DRC_LO_COMP_MASK 0x0007 /* AIF2DRC_LO_COMP - [2:0] */ +#define WM8995_AIF2DRC_LO_COMP_SHIFT 0 /* AIF2DRC_LO_COMP - [2:0] */ +#define WM8995_AIF2DRC_LO_COMP_WIDTH 3 /* AIF2DRC_LO_COMP - [2:0] */ + +/* + * R1347 (0x543) - AIF2 DRC (4) + */ +#define WM8995_AIF2DRC_KNEE_IP_MASK 0x07E0 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8995_AIF2DRC_KNEE_IP_SHIFT 5 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8995_AIF2DRC_KNEE_IP_WIDTH 6 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8995_AIF2DRC_KNEE_OP_MASK 0x001F /* AIF2DRC_KNEE_OP - [4:0] */ +#define WM8995_AIF2DRC_KNEE_OP_SHIFT 0 /* AIF2DRC_KNEE_OP - [4:0] */ +#define WM8995_AIF2DRC_KNEE_OP_WIDTH 5 /* AIF2DRC_KNEE_OP - [4:0] */ + +/* + * R1348 (0x544) - AIF2 DRC (5) + */ +#define WM8995_AIF2DRC_KNEE2_IP_MASK 0x03E0 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8995_AIF2DRC_KNEE2_IP_SHIFT 5 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8995_AIF2DRC_KNEE2_IP_WIDTH 5 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8995_AIF2DRC_KNEE2_OP_MASK 0x001F /* AIF2DRC_KNEE2_OP - [4:0] */ +#define WM8995_AIF2DRC_KNEE2_OP_SHIFT 0 /* AIF2DRC_KNEE2_OP - [4:0] */ +#define WM8995_AIF2DRC_KNEE2_OP_WIDTH 5 /* AIF2DRC_KNEE2_OP - [4:0] */ + +/* + * R1408 (0x580) - AIF2 EQ Gains (1) + */ +#define WM8995_AIF2DAC_EQ_B1_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF2DAC_EQ_B1_GAIN_SHIFT 11 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF2DAC_EQ_B1_GAIN_WIDTH 5 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8995_AIF2DAC_EQ_B2_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF2DAC_EQ_B2_GAIN_SHIFT 6 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF2DAC_EQ_B2_GAIN_WIDTH 5 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8995_AIF2DAC_EQ_B3_GAIN_MASK 0x003E /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF2DAC_EQ_B3_GAIN_SHIFT 1 /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF2DAC_EQ_B3_GAIN_WIDTH 5 /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8995_AIF2DAC_EQ_ENA 0x0001 /* AIF2DAC_EQ_ENA */ +#define WM8995_AIF2DAC_EQ_ENA_MASK 0x0001 /* AIF2DAC_EQ_ENA */ +#define WM8995_AIF2DAC_EQ_ENA_SHIFT 0 /* AIF2DAC_EQ_ENA */ +#define WM8995_AIF2DAC_EQ_ENA_WIDTH 1 /* AIF2DAC_EQ_ENA */ + +/* + * R1409 (0x581) - AIF2 EQ Gains (2) + */ +#define WM8995_AIF2DAC_EQ_B4_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF2DAC_EQ_B4_GAIN_SHIFT 11 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF2DAC_EQ_B4_GAIN_WIDTH 5 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8995_AIF2DAC_EQ_B5_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ +#define WM8995_AIF2DAC_EQ_B5_GAIN_SHIFT 6 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ +#define WM8995_AIF2DAC_EQ_B5_GAIN_WIDTH 5 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ + +/* + * R1410 (0x582) - AIF2 EQ Band 1 A + */ +#define WM8995_AIF2DAC_EQ_B1_A_MASK 0xFFFF /* AIF2DAC_EQ_B1_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B1_A_SHIFT 0 /* AIF2DAC_EQ_B1_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B1_A_WIDTH 16 /* AIF2DAC_EQ_B1_A - [15:0] */ + +/* + * R1411 (0x583) - AIF2 EQ Band 1 B + */ +#define WM8995_AIF2DAC_EQ_B1_B_MASK 0xFFFF /* AIF2DAC_EQ_B1_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B1_B_SHIFT 0 /* AIF2DAC_EQ_B1_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B1_B_WIDTH 16 /* AIF2DAC_EQ_B1_B - [15:0] */ + +/* + * R1412 (0x584) - AIF2 EQ Band 1 PG + */ +#define WM8995_AIF2DAC_EQ_B1_PG_MASK 0xFFFF /* AIF2DAC_EQ_B1_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B1_PG_SHIFT 0 /* AIF2DAC_EQ_B1_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B1_PG_WIDTH 16 /* AIF2DAC_EQ_B1_PG - [15:0] */ + +/* + * R1413 (0x585) - AIF2 EQ Band 2 A + */ +#define WM8995_AIF2DAC_EQ_B2_A_MASK 0xFFFF /* AIF2DAC_EQ_B2_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_A_SHIFT 0 /* AIF2DAC_EQ_B2_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_A_WIDTH 16 /* AIF2DAC_EQ_B2_A - [15:0] */ + +/* + * R1414 (0x586) - AIF2 EQ Band 2 B + */ +#define WM8995_AIF2DAC_EQ_B2_B_MASK 0xFFFF /* AIF2DAC_EQ_B2_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_B_SHIFT 0 /* AIF2DAC_EQ_B2_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_B_WIDTH 16 /* AIF2DAC_EQ_B2_B - [15:0] */ + +/* + * R1415 (0x587) - AIF2 EQ Band 2 C + */ +#define WM8995_AIF2DAC_EQ_B2_C_MASK 0xFFFF /* AIF2DAC_EQ_B2_C - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_C_SHIFT 0 /* AIF2DAC_EQ_B2_C - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_C_WIDTH 16 /* AIF2DAC_EQ_B2_C - [15:0] */ + +/* + * R1416 (0x588) - AIF2 EQ Band 2 PG + */ +#define WM8995_AIF2DAC_EQ_B2_PG_MASK 0xFFFF /* AIF2DAC_EQ_B2_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_PG_SHIFT 0 /* AIF2DAC_EQ_B2_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B2_PG_WIDTH 16 /* AIF2DAC_EQ_B2_PG - [15:0] */ + +/* + * R1417 (0x589) - AIF2 EQ Band 3 A + */ +#define WM8995_AIF2DAC_EQ_B3_A_MASK 0xFFFF /* AIF2DAC_EQ_B3_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_A_SHIFT 0 /* AIF2DAC_EQ_B3_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_A_WIDTH 16 /* AIF2DAC_EQ_B3_A - [15:0] */ + +/* + * R1418 (0x58A) - AIF2 EQ Band 3 B + */ +#define WM8995_AIF2DAC_EQ_B3_B_MASK 0xFFFF /* AIF2DAC_EQ_B3_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_B_SHIFT 0 /* AIF2DAC_EQ_B3_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_B_WIDTH 16 /* AIF2DAC_EQ_B3_B - [15:0] */ + +/* + * R1419 (0x58B) - AIF2 EQ Band 3 C + */ +#define WM8995_AIF2DAC_EQ_B3_C_MASK 0xFFFF /* AIF2DAC_EQ_B3_C - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_C_SHIFT 0 /* AIF2DAC_EQ_B3_C - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_C_WIDTH 16 /* AIF2DAC_EQ_B3_C - [15:0] */ + +/* + * R1420 (0x58C) - AIF2 EQ Band 3 PG + */ +#define WM8995_AIF2DAC_EQ_B3_PG_MASK 0xFFFF /* AIF2DAC_EQ_B3_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_PG_SHIFT 0 /* AIF2DAC_EQ_B3_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B3_PG_WIDTH 16 /* AIF2DAC_EQ_B3_PG - [15:0] */ + +/* + * R1421 (0x58D) - AIF2 EQ Band 4 A + */ +#define WM8995_AIF2DAC_EQ_B4_A_MASK 0xFFFF /* AIF2DAC_EQ_B4_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_A_SHIFT 0 /* AIF2DAC_EQ_B4_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_A_WIDTH 16 /* AIF2DAC_EQ_B4_A - [15:0] */ + +/* + * R1422 (0x58E) - AIF2 EQ Band 4 B + */ +#define WM8995_AIF2DAC_EQ_B4_B_MASK 0xFFFF /* AIF2DAC_EQ_B4_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_B_SHIFT 0 /* AIF2DAC_EQ_B4_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_B_WIDTH 16 /* AIF2DAC_EQ_B4_B - [15:0] */ + +/* + * R1423 (0x58F) - AIF2 EQ Band 4 C + */ +#define WM8995_AIF2DAC_EQ_B4_C_MASK 0xFFFF /* AIF2DAC_EQ_B4_C - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_C_SHIFT 0 /* AIF2DAC_EQ_B4_C - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_C_WIDTH 16 /* AIF2DAC_EQ_B4_C - [15:0] */ + +/* + * R1424 (0x590) - AIF2 EQ Band 4 PG + */ +#define WM8995_AIF2DAC_EQ_B4_PG_MASK 0xFFFF /* AIF2DAC_EQ_B4_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_PG_SHIFT 0 /* AIF2DAC_EQ_B4_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B4_PG_WIDTH 16 /* AIF2DAC_EQ_B4_PG - [15:0] */ + +/* + * R1425 (0x591) - AIF2 EQ Band 5 A + */ +#define WM8995_AIF2DAC_EQ_B5_A_MASK 0xFFFF /* AIF2DAC_EQ_B5_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B5_A_SHIFT 0 /* AIF2DAC_EQ_B5_A - [15:0] */ +#define WM8995_AIF2DAC_EQ_B5_A_WIDTH 16 /* AIF2DAC_EQ_B5_A - [15:0] */ + +/* + * R1426 (0x592) - AIF2 EQ Band 5 B + */ +#define WM8995_AIF2DAC_EQ_B5_B_MASK 0xFFFF /* AIF2DAC_EQ_B5_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B5_B_SHIFT 0 /* AIF2DAC_EQ_B5_B - [15:0] */ +#define WM8995_AIF2DAC_EQ_B5_B_WIDTH 16 /* AIF2DAC_EQ_B5_B - [15:0] */ + +/* + * R1427 (0x593) - AIF2 EQ Band 5 PG + */ +#define WM8995_AIF2DAC_EQ_B5_PG_MASK 0xFFFF /* AIF2DAC_EQ_B5_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B5_PG_SHIFT 0 /* AIF2DAC_EQ_B5_PG - [15:0] */ +#define WM8995_AIF2DAC_EQ_B5_PG_WIDTH 16 /* AIF2DAC_EQ_B5_PG - [15:0] */ + +/* + * R1536 (0x600) - DAC1 Mixer Volumes + */ +#define WM8995_ADCR_DAC1_VOL_MASK 0x03E0 /* ADCR_DAC1_VOL - [9:5] */ +#define WM8995_ADCR_DAC1_VOL_SHIFT 5 /* ADCR_DAC1_VOL - [9:5] */ +#define WM8995_ADCR_DAC1_VOL_WIDTH 5 /* ADCR_DAC1_VOL - [9:5] */ +#define WM8995_ADCL_DAC1_VOL_MASK 0x001F /* ADCL_DAC1_VOL - [4:0] */ +#define WM8995_ADCL_DAC1_VOL_SHIFT 0 /* ADCL_DAC1_VOL - [4:0] */ +#define WM8995_ADCL_DAC1_VOL_WIDTH 5 /* ADCL_DAC1_VOL - [4:0] */ + +/* + * R1537 (0x601) - DAC1 Left Mixer Routing + */ +#define WM8995_ADCR_TO_DAC1L 0x0020 /* ADCR_TO_DAC1L */ +#define WM8995_ADCR_TO_DAC1L_MASK 0x0020 /* ADCR_TO_DAC1L */ +#define WM8995_ADCR_TO_DAC1L_SHIFT 5 /* ADCR_TO_DAC1L */ +#define WM8995_ADCR_TO_DAC1L_WIDTH 1 /* ADCR_TO_DAC1L */ +#define WM8995_ADCL_TO_DAC1L 0x0010 /* ADCL_TO_DAC1L */ +#define WM8995_ADCL_TO_DAC1L_MASK 0x0010 /* ADCL_TO_DAC1L */ +#define WM8995_ADCL_TO_DAC1L_SHIFT 4 /* ADCL_TO_DAC1L */ +#define WM8995_ADCL_TO_DAC1L_WIDTH 1 /* ADCL_TO_DAC1L */ +#define WM8995_AIF2DACL_TO_DAC1L 0x0004 /* AIF2DACL_TO_DAC1L */ +#define WM8995_AIF2DACL_TO_DAC1L_MASK 0x0004 /* AIF2DACL_TO_DAC1L */ +#define WM8995_AIF2DACL_TO_DAC1L_SHIFT 2 /* AIF2DACL_TO_DAC1L */ +#define WM8995_AIF2DACL_TO_DAC1L_WIDTH 1 /* AIF2DACL_TO_DAC1L */ +#define WM8995_AIF1DAC2L_TO_DAC1L 0x0002 /* AIF1DAC2L_TO_DAC1L */ +#define WM8995_AIF1DAC2L_TO_DAC1L_MASK 0x0002 /* AIF1DAC2L_TO_DAC1L */ +#define WM8995_AIF1DAC2L_TO_DAC1L_SHIFT 1 /* AIF1DAC2L_TO_DAC1L */ +#define WM8995_AIF1DAC2L_TO_DAC1L_WIDTH 1 /* AIF1DAC2L_TO_DAC1L */ +#define WM8995_AIF1DAC1L_TO_DAC1L 0x0001 /* AIF1DAC1L_TO_DAC1L */ +#define WM8995_AIF1DAC1L_TO_DAC1L_MASK 0x0001 /* AIF1DAC1L_TO_DAC1L */ +#define WM8995_AIF1DAC1L_TO_DAC1L_SHIFT 0 /* AIF1DAC1L_TO_DAC1L */ +#define WM8995_AIF1DAC1L_TO_DAC1L_WIDTH 1 /* AIF1DAC1L_TO_DAC1L */ + +/* + * R1538 (0x602) - DAC1 Right Mixer Routing + */ +#define WM8995_ADCR_TO_DAC1R 0x0020 /* ADCR_TO_DAC1R */ +#define WM8995_ADCR_TO_DAC1R_MASK 0x0020 /* ADCR_TO_DAC1R */ +#define WM8995_ADCR_TO_DAC1R_SHIFT 5 /* ADCR_TO_DAC1R */ +#define WM8995_ADCR_TO_DAC1R_WIDTH 1 /* ADCR_TO_DAC1R */ +#define WM8995_ADCL_TO_DAC1R 0x0010 /* ADCL_TO_DAC1R */ +#define WM8995_ADCL_TO_DAC1R_MASK 0x0010 /* ADCL_TO_DAC1R */ +#define WM8995_ADCL_TO_DAC1R_SHIFT 4 /* ADCL_TO_DAC1R */ +#define WM8995_ADCL_TO_DAC1R_WIDTH 1 /* ADCL_TO_DAC1R */ +#define WM8995_AIF2DACR_TO_DAC1R 0x0004 /* AIF2DACR_TO_DAC1R */ +#define WM8995_AIF2DACR_TO_DAC1R_MASK 0x0004 /* AIF2DACR_TO_DAC1R */ +#define WM8995_AIF2DACR_TO_DAC1R_SHIFT 2 /* AIF2DACR_TO_DAC1R */ +#define WM8995_AIF2DACR_TO_DAC1R_WIDTH 1 /* AIF2DACR_TO_DAC1R */ +#define WM8995_AIF1DAC2R_TO_DAC1R 0x0002 /* AIF1DAC2R_TO_DAC1R */ +#define WM8995_AIF1DAC2R_TO_DAC1R_MASK 0x0002 /* AIF1DAC2R_TO_DAC1R */ +#define WM8995_AIF1DAC2R_TO_DAC1R_SHIFT 1 /* AIF1DAC2R_TO_DAC1R */ +#define WM8995_AIF1DAC2R_TO_DAC1R_WIDTH 1 /* AIF1DAC2R_TO_DAC1R */ +#define WM8995_AIF1DAC1R_TO_DAC1R 0x0001 /* AIF1DAC1R_TO_DAC1R */ +#define WM8995_AIF1DAC1R_TO_DAC1R_MASK 0x0001 /* AIF1DAC1R_TO_DAC1R */ +#define WM8995_AIF1DAC1R_TO_DAC1R_SHIFT 0 /* AIF1DAC1R_TO_DAC1R */ +#define WM8995_AIF1DAC1R_TO_DAC1R_WIDTH 1 /* AIF1DAC1R_TO_DAC1R */ + +/* + * R1539 (0x603) - DAC2 Mixer Volumes + */ +#define WM8995_ADCR_DAC2_VOL_MASK 0x03E0 /* ADCR_DAC2_VOL - [9:5] */ +#define WM8995_ADCR_DAC2_VOL_SHIFT 5 /* ADCR_DAC2_VOL - [9:5] */ +#define WM8995_ADCR_DAC2_VOL_WIDTH 5 /* ADCR_DAC2_VOL - [9:5] */ +#define WM8995_ADCL_DAC2_VOL_MASK 0x001F /* ADCL_DAC2_VOL - [4:0] */ +#define WM8995_ADCL_DAC2_VOL_SHIFT 0 /* ADCL_DAC2_VOL - [4:0] */ +#define WM8995_ADCL_DAC2_VOL_WIDTH 5 /* ADCL_DAC2_VOL - [4:0] */ + +/* + * R1540 (0x604) - DAC2 Left Mixer Routing + */ +#define WM8995_ADCR_TO_DAC2L 0x0020 /* ADCR_TO_DAC2L */ +#define WM8995_ADCR_TO_DAC2L_MASK 0x0020 /* ADCR_TO_DAC2L */ +#define WM8995_ADCR_TO_DAC2L_SHIFT 5 /* ADCR_TO_DAC2L */ +#define WM8995_ADCR_TO_DAC2L_WIDTH 1 /* ADCR_TO_DAC2L */ +#define WM8995_ADCL_TO_DAC2L 0x0010 /* ADCL_TO_DAC2L */ +#define WM8995_ADCL_TO_DAC2L_MASK 0x0010 /* ADCL_TO_DAC2L */ +#define WM8995_ADCL_TO_DAC2L_SHIFT 4 /* ADCL_TO_DAC2L */ +#define WM8995_ADCL_TO_DAC2L_WIDTH 1 /* ADCL_TO_DAC2L */ +#define WM8995_AIF2DACL_TO_DAC2L 0x0004 /* AIF2DACL_TO_DAC2L */ +#define WM8995_AIF2DACL_TO_DAC2L_MASK 0x0004 /* AIF2DACL_TO_DAC2L */ +#define WM8995_AIF2DACL_TO_DAC2L_SHIFT 2 /* AIF2DACL_TO_DAC2L */ +#define WM8995_AIF2DACL_TO_DAC2L_WIDTH 1 /* AIF2DACL_TO_DAC2L */ +#define WM8995_AIF1DAC2L_TO_DAC2L 0x0002 /* AIF1DAC2L_TO_DAC2L */ +#define WM8995_AIF1DAC2L_TO_DAC2L_MASK 0x0002 /* AIF1DAC2L_TO_DAC2L */ +#define WM8995_AIF1DAC2L_TO_DAC2L_SHIFT 1 /* AIF1DAC2L_TO_DAC2L */ +#define WM8995_AIF1DAC2L_TO_DAC2L_WIDTH 1 /* AIF1DAC2L_TO_DAC2L */ +#define WM8995_AIF1DAC1L_TO_DAC2L 0x0001 /* AIF1DAC1L_TO_DAC2L */ +#define WM8995_AIF1DAC1L_TO_DAC2L_MASK 0x0001 /* AIF1DAC1L_TO_DAC2L */ +#define WM8995_AIF1DAC1L_TO_DAC2L_SHIFT 0 /* AIF1DAC1L_TO_DAC2L */ +#define WM8995_AIF1DAC1L_TO_DAC2L_WIDTH 1 /* AIF1DAC1L_TO_DAC2L */ + +/* + * R1541 (0x605) - DAC2 Right Mixer Routing + */ +#define WM8995_ADCR_TO_DAC2R 0x0020 /* ADCR_TO_DAC2R */ +#define WM8995_ADCR_TO_DAC2R_MASK 0x0020 /* ADCR_TO_DAC2R */ +#define WM8995_ADCR_TO_DAC2R_SHIFT 5 /* ADCR_TO_DAC2R */ +#define WM8995_ADCR_TO_DAC2R_WIDTH 1 /* ADCR_TO_DAC2R */ +#define WM8995_ADCL_TO_DAC2R 0x0010 /* ADCL_TO_DAC2R */ +#define WM8995_ADCL_TO_DAC2R_MASK 0x0010 /* ADCL_TO_DAC2R */ +#define WM8995_ADCL_TO_DAC2R_SHIFT 4 /* ADCL_TO_DAC2R */ +#define WM8995_ADCL_TO_DAC2R_WIDTH 1 /* ADCL_TO_DAC2R */ +#define WM8995_AIF2DACR_TO_DAC2R 0x0004 /* AIF2DACR_TO_DAC2R */ +#define WM8995_AIF2DACR_TO_DAC2R_MASK 0x0004 /* AIF2DACR_TO_DAC2R */ +#define WM8995_AIF2DACR_TO_DAC2R_SHIFT 2 /* AIF2DACR_TO_DAC2R */ +#define WM8995_AIF2DACR_TO_DAC2R_WIDTH 1 /* AIF2DACR_TO_DAC2R */ +#define WM8995_AIF1DAC2R_TO_DAC2R 0x0002 /* AIF1DAC2R_TO_DAC2R */ +#define WM8995_AIF1DAC2R_TO_DAC2R_MASK 0x0002 /* AIF1DAC2R_TO_DAC2R */ +#define WM8995_AIF1DAC2R_TO_DAC2R_SHIFT 1 /* AIF1DAC2R_TO_DAC2R */ +#define WM8995_AIF1DAC2R_TO_DAC2R_WIDTH 1 /* AIF1DAC2R_TO_DAC2R */ +#define WM8995_AIF1DAC1R_TO_DAC2R 0x0001 /* AIF1DAC1R_TO_DAC2R */ +#define WM8995_AIF1DAC1R_TO_DAC2R_MASK 0x0001 /* AIF1DAC1R_TO_DAC2R */ +#define WM8995_AIF1DAC1R_TO_DAC2R_SHIFT 0 /* AIF1DAC1R_TO_DAC2R */ +#define WM8995_AIF1DAC1R_TO_DAC2R_WIDTH 1 /* AIF1DAC1R_TO_DAC2R */ + +/* + * R1542 (0x606) - AIF1 ADC1 Left Mixer Routing + */ +#define WM8995_ADC1L_TO_AIF1ADC1L 0x0002 /* ADC1L_TO_AIF1ADC1L */ +#define WM8995_ADC1L_TO_AIF1ADC1L_MASK 0x0002 /* ADC1L_TO_AIF1ADC1L */ +#define WM8995_ADC1L_TO_AIF1ADC1L_SHIFT 1 /* ADC1L_TO_AIF1ADC1L */ +#define WM8995_ADC1L_TO_AIF1ADC1L_WIDTH 1 /* ADC1L_TO_AIF1ADC1L */ +#define WM8995_AIF2DACL_TO_AIF1ADC1L 0x0001 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8995_AIF2DACL_TO_AIF1ADC1L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8995_AIF2DACL_TO_AIF1ADC1L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8995_AIF2DACL_TO_AIF1ADC1L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC1L */ + +/* + * R1543 (0x607) - AIF1 ADC1 Right Mixer Routing + */ +#define WM8995_ADC1R_TO_AIF1ADC1R 0x0002 /* ADC1R_TO_AIF1ADC1R */ +#define WM8995_ADC1R_TO_AIF1ADC1R_MASK 0x0002 /* ADC1R_TO_AIF1ADC1R */ +#define WM8995_ADC1R_TO_AIF1ADC1R_SHIFT 1 /* ADC1R_TO_AIF1ADC1R */ +#define WM8995_ADC1R_TO_AIF1ADC1R_WIDTH 1 /* ADC1R_TO_AIF1ADC1R */ +#define WM8995_AIF2DACR_TO_AIF1ADC1R 0x0001 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8995_AIF2DACR_TO_AIF1ADC1R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8995_AIF2DACR_TO_AIF1ADC1R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8995_AIF2DACR_TO_AIF1ADC1R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC1R */ + +/* + * R1544 (0x608) - AIF1 ADC2 Left Mixer Routing + */ +#define WM8995_ADC2L_TO_AIF1ADC2L 0x0002 /* ADC2L_TO_AIF1ADC2L */ +#define WM8995_ADC2L_TO_AIF1ADC2L_MASK 0x0002 /* ADC2L_TO_AIF1ADC2L */ +#define WM8995_ADC2L_TO_AIF1ADC2L_SHIFT 1 /* ADC2L_TO_AIF1ADC2L */ +#define WM8995_ADC2L_TO_AIF1ADC2L_WIDTH 1 /* ADC2L_TO_AIF1ADC2L */ +#define WM8995_AIF2DACL_TO_AIF1ADC2L 0x0001 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8995_AIF2DACL_TO_AIF1ADC2L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8995_AIF2DACL_TO_AIF1ADC2L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8995_AIF2DACL_TO_AIF1ADC2L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC2L */ + +/* + * R1545 (0x609) - AIF1 ADC2 Right mixer Routing + */ +#define WM8995_ADC2R_TO_AIF1ADC2R 0x0002 /* ADC2R_TO_AIF1ADC2R */ +#define WM8995_ADC2R_TO_AIF1ADC2R_MASK 0x0002 /* ADC2R_TO_AIF1ADC2R */ +#define WM8995_ADC2R_TO_AIF1ADC2R_SHIFT 1 /* ADC2R_TO_AIF1ADC2R */ +#define WM8995_ADC2R_TO_AIF1ADC2R_WIDTH 1 /* ADC2R_TO_AIF1ADC2R */ +#define WM8995_AIF2DACR_TO_AIF1ADC2R 0x0001 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8995_AIF2DACR_TO_AIF1ADC2R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8995_AIF2DACR_TO_AIF1ADC2R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8995_AIF2DACR_TO_AIF1ADC2R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC2R */ + +/* + * R1552 (0x610) - DAC Softmute + */ +#define WM8995_DAC_SOFTMUTEMODE 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8995_DAC_SOFTMUTEMODE_MASK 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8995_DAC_SOFTMUTEMODE_SHIFT 1 /* DAC_SOFTMUTEMODE */ +#define WM8995_DAC_SOFTMUTEMODE_WIDTH 1 /* DAC_SOFTMUTEMODE */ +#define WM8995_DAC_MUTERATE 0x0001 /* DAC_MUTERATE */ +#define WM8995_DAC_MUTERATE_MASK 0x0001 /* DAC_MUTERATE */ +#define WM8995_DAC_MUTERATE_SHIFT 0 /* DAC_MUTERATE */ +#define WM8995_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ + +/* + * R1568 (0x620) - Oversampling + */ +#define WM8995_ADC_OSR128 0x0002 /* ADC_OSR128 */ +#define WM8995_ADC_OSR128_MASK 0x0002 /* ADC_OSR128 */ +#define WM8995_ADC_OSR128_SHIFT 1 /* ADC_OSR128 */ +#define WM8995_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */ +#define WM8995_DAC_OSR128 0x0001 /* DAC_OSR128 */ +#define WM8995_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */ +#define WM8995_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */ +#define WM8995_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */ + +/* + * R1569 (0x621) - Sidetone + */ +#define WM8995_ST_LPF 0x1000 /* ST_LPF */ +#define WM8995_ST_LPF_MASK 0x1000 /* ST_LPF */ +#define WM8995_ST_LPF_SHIFT 12 /* ST_LPF */ +#define WM8995_ST_LPF_WIDTH 1 /* ST_LPF */ +#define WM8995_ST_HPF_CUT_MASK 0x0380 /* ST_HPF_CUT - [9:7] */ +#define WM8995_ST_HPF_CUT_SHIFT 7 /* ST_HPF_CUT - [9:7] */ +#define WM8995_ST_HPF_CUT_WIDTH 3 /* ST_HPF_CUT - [9:7] */ +#define WM8995_ST_HPF 0x0040 /* ST_HPF */ +#define WM8995_ST_HPF_MASK 0x0040 /* ST_HPF */ +#define WM8995_ST_HPF_SHIFT 6 /* ST_HPF */ +#define WM8995_ST_HPF_WIDTH 1 /* ST_HPF */ +#define WM8995_STR_SEL 0x0002 /* STR_SEL */ +#define WM8995_STR_SEL_MASK 0x0002 /* STR_SEL */ +#define WM8995_STR_SEL_SHIFT 1 /* STR_SEL */ +#define WM8995_STR_SEL_WIDTH 1 /* STR_SEL */ +#define WM8995_STL_SEL 0x0001 /* STL_SEL */ +#define WM8995_STL_SEL_MASK 0x0001 /* STL_SEL */ +#define WM8995_STL_SEL_SHIFT 0 /* STL_SEL */ +#define WM8995_STL_SEL_WIDTH 1 /* STL_SEL */ + +/* + * R1792 (0x700) - GPIO 1 + */ +#define WM8995_GP1_DIR 0x8000 /* GP1_DIR */ +#define WM8995_GP1_DIR_MASK 0x8000 /* GP1_DIR */ +#define WM8995_GP1_DIR_SHIFT 15 /* GP1_DIR */ +#define WM8995_GP1_DIR_WIDTH 1 /* GP1_DIR */ +#define WM8995_GP1_PU 0x4000 /* GP1_PU */ +#define WM8995_GP1_PU_MASK 0x4000 /* GP1_PU */ +#define WM8995_GP1_PU_SHIFT 14 /* GP1_PU */ +#define WM8995_GP1_PU_WIDTH 1 /* GP1_PU */ +#define WM8995_GP1_PD 0x2000 /* GP1_PD */ +#define WM8995_GP1_PD_MASK 0x2000 /* GP1_PD */ +#define WM8995_GP1_PD_SHIFT 13 /* GP1_PD */ +#define WM8995_GP1_PD_WIDTH 1 /* GP1_PD */ +#define WM8995_GP1_POL 0x0400 /* GP1_POL */ +#define WM8995_GP1_POL_MASK 0x0400 /* GP1_POL */ +#define WM8995_GP1_POL_SHIFT 10 /* GP1_POL */ +#define WM8995_GP1_POL_WIDTH 1 /* GP1_POL */ +#define WM8995_GP1_OP_CFG 0x0200 /* GP1_OP_CFG */ +#define WM8995_GP1_OP_CFG_MASK 0x0200 /* GP1_OP_CFG */ +#define WM8995_GP1_OP_CFG_SHIFT 9 /* GP1_OP_CFG */ +#define WM8995_GP1_OP_CFG_WIDTH 1 /* GP1_OP_CFG */ +#define WM8995_GP1_DB 0x0100 /* GP1_DB */ +#define WM8995_GP1_DB_MASK 0x0100 /* GP1_DB */ +#define WM8995_GP1_DB_SHIFT 8 /* GP1_DB */ +#define WM8995_GP1_DB_WIDTH 1 /* GP1_DB */ +#define WM8995_GP1_LVL 0x0040 /* GP1_LVL */ +#define WM8995_GP1_LVL_MASK 0x0040 /* GP1_LVL */ +#define WM8995_GP1_LVL_SHIFT 6 /* GP1_LVL */ +#define WM8995_GP1_LVL_WIDTH 1 /* GP1_LVL */ +#define WM8995_GP1_FN_MASK 0x001F /* GP1_FN - [4:0] */ +#define WM8995_GP1_FN_SHIFT 0 /* GP1_FN - [4:0] */ +#define WM8995_GP1_FN_WIDTH 5 /* GP1_FN - [4:0] */ + +/* + * R1793 (0x701) - GPIO 2 + */ +#define WM8995_GP2_DIR 0x8000 /* GP2_DIR */ +#define WM8995_GP2_DIR_MASK 0x8000 /* GP2_DIR */ +#define WM8995_GP2_DIR_SHIFT 15 /* GP2_DIR */ +#define WM8995_GP2_DIR_WIDTH 1 /* GP2_DIR */ +#define WM8995_GP2_PU 0x4000 /* GP2_PU */ +#define WM8995_GP2_PU_MASK 0x4000 /* GP2_PU */ +#define WM8995_GP2_PU_SHIFT 14 /* GP2_PU */ +#define WM8995_GP2_PU_WIDTH 1 /* GP2_PU */ +#define WM8995_GP2_PD 0x2000 /* GP2_PD */ +#define WM8995_GP2_PD_MASK 0x2000 /* GP2_PD */ +#define WM8995_GP2_PD_SHIFT 13 /* GP2_PD */ +#define WM8995_GP2_PD_WIDTH 1 /* GP2_PD */ +#define WM8995_GP2_POL 0x0400 /* GP2_POL */ +#define WM8995_GP2_POL_MASK 0x0400 /* GP2_POL */ +#define WM8995_GP2_POL_SHIFT 10 /* GP2_POL */ +#define WM8995_GP2_POL_WIDTH 1 /* GP2_POL */ +#define WM8995_GP2_OP_CFG 0x0200 /* GP2_OP_CFG */ +#define WM8995_GP2_OP_CFG_MASK 0x0200 /* GP2_OP_CFG */ +#define WM8995_GP2_OP_CFG_SHIFT 9 /* GP2_OP_CFG */ +#define WM8995_GP2_OP_CFG_WIDTH 1 /* GP2_OP_CFG */ +#define WM8995_GP2_DB 0x0100 /* GP2_DB */ +#define WM8995_GP2_DB_MASK 0x0100 /* GP2_DB */ +#define WM8995_GP2_DB_SHIFT 8 /* GP2_DB */ +#define WM8995_GP2_DB_WIDTH 1 /* GP2_DB */ +#define WM8995_GP2_LVL 0x0040 /* GP2_LVL */ +#define WM8995_GP2_LVL_MASK 0x0040 /* GP2_LVL */ +#define WM8995_GP2_LVL_SHIFT 6 /* GP2_LVL */ +#define WM8995_GP2_LVL_WIDTH 1 /* GP2_LVL */ +#define WM8995_GP2_FN_MASK 0x001F /* GP2_FN - [4:0] */ +#define WM8995_GP2_FN_SHIFT 0 /* GP2_FN - [4:0] */ +#define WM8995_GP2_FN_WIDTH 5 /* GP2_FN - [4:0] */ + +/* + * R1794 (0x702) - GPIO 3 + */ +#define WM8995_GP3_DIR 0x8000 /* GP3_DIR */ +#define WM8995_GP3_DIR_MASK 0x8000 /* GP3_DIR */ +#define WM8995_GP3_DIR_SHIFT 15 /* GP3_DIR */ +#define WM8995_GP3_DIR_WIDTH 1 /* GP3_DIR */ +#define WM8995_GP3_PU 0x4000 /* GP3_PU */ +#define WM8995_GP3_PU_MASK 0x4000 /* GP3_PU */ +#define WM8995_GP3_PU_SHIFT 14 /* GP3_PU */ +#define WM8995_GP3_PU_WIDTH 1 /* GP3_PU */ +#define WM8995_GP3_PD 0x2000 /* GP3_PD */ +#define WM8995_GP3_PD_MASK 0x2000 /* GP3_PD */ +#define WM8995_GP3_PD_SHIFT 13 /* GP3_PD */ +#define WM8995_GP3_PD_WIDTH 1 /* GP3_PD */ +#define WM8995_GP3_POL 0x0400 /* GP3_POL */ +#define WM8995_GP3_POL_MASK 0x0400 /* GP3_POL */ +#define WM8995_GP3_POL_SHIFT 10 /* GP3_POL */ +#define WM8995_GP3_POL_WIDTH 1 /* GP3_POL */ +#define WM8995_GP3_OP_CFG 0x0200 /* GP3_OP_CFG */ +#define WM8995_GP3_OP_CFG_MASK 0x0200 /* GP3_OP_CFG */ +#define WM8995_GP3_OP_CFG_SHIFT 9 /* GP3_OP_CFG */ +#define WM8995_GP3_OP_CFG_WIDTH 1 /* GP3_OP_CFG */ +#define WM8995_GP3_DB 0x0100 /* GP3_DB */ +#define WM8995_GP3_DB_MASK 0x0100 /* GP3_DB */ +#define WM8995_GP3_DB_SHIFT 8 /* GP3_DB */ +#define WM8995_GP3_DB_WIDTH 1 /* GP3_DB */ +#define WM8995_GP3_LVL 0x0040 /* GP3_LVL */ +#define WM8995_GP3_LVL_MASK 0x0040 /* GP3_LVL */ +#define WM8995_GP3_LVL_SHIFT 6 /* GP3_LVL */ +#define WM8995_GP3_LVL_WIDTH 1 /* GP3_LVL */ +#define WM8995_GP3_FN_MASK 0x001F /* GP3_FN - [4:0] */ +#define WM8995_GP3_FN_SHIFT 0 /* GP3_FN - [4:0] */ +#define WM8995_GP3_FN_WIDTH 5 /* GP3_FN - [4:0] */ + +/* + * R1795 (0x703) - GPIO 4 + */ +#define WM8995_GP4_DIR 0x8000 /* GP4_DIR */ +#define WM8995_GP4_DIR_MASK 0x8000 /* GP4_DIR */ +#define WM8995_GP4_DIR_SHIFT 15 /* GP4_DIR */ +#define WM8995_GP4_DIR_WIDTH 1 /* GP4_DIR */ +#define WM8995_GP4_PU 0x4000 /* GP4_PU */ +#define WM8995_GP4_PU_MASK 0x4000 /* GP4_PU */ +#define WM8995_GP4_PU_SHIFT 14 /* GP4_PU */ +#define WM8995_GP4_PU_WIDTH 1 /* GP4_PU */ +#define WM8995_GP4_PD 0x2000 /* GP4_PD */ +#define WM8995_GP4_PD_MASK 0x2000 /* GP4_PD */ +#define WM8995_GP4_PD_SHIFT 13 /* GP4_PD */ +#define WM8995_GP4_PD_WIDTH 1 /* GP4_PD */ +#define WM8995_GP4_POL 0x0400 /* GP4_POL */ +#define WM8995_GP4_POL_MASK 0x0400 /* GP4_POL */ +#define WM8995_GP4_POL_SHIFT 10 /* GP4_POL */ +#define WM8995_GP4_POL_WIDTH 1 /* GP4_POL */ +#define WM8995_GP4_OP_CFG 0x0200 /* GP4_OP_CFG */ +#define WM8995_GP4_OP_CFG_MASK 0x0200 /* GP4_OP_CFG */ +#define WM8995_GP4_OP_CFG_SHIFT 9 /* GP4_OP_CFG */ +#define WM8995_GP4_OP_CFG_WIDTH 1 /* GP4_OP_CFG */ +#define WM8995_GP4_DB 0x0100 /* GP4_DB */ +#define WM8995_GP4_DB_MASK 0x0100 /* GP4_DB */ +#define WM8995_GP4_DB_SHIFT 8 /* GP4_DB */ +#define WM8995_GP4_DB_WIDTH 1 /* GP4_DB */ +#define WM8995_GP4_LVL 0x0040 /* GP4_LVL */ +#define WM8995_GP4_LVL_MASK 0x0040 /* GP4_LVL */ +#define WM8995_GP4_LVL_SHIFT 6 /* GP4_LVL */ +#define WM8995_GP4_LVL_WIDTH 1 /* GP4_LVL */ +#define WM8995_GP4_FN_MASK 0x001F /* GP4_FN - [4:0] */ +#define WM8995_GP4_FN_SHIFT 0 /* GP4_FN - [4:0] */ +#define WM8995_GP4_FN_WIDTH 5 /* GP4_FN - [4:0] */ + +/* + * R1796 (0x704) - GPIO 5 + */ +#define WM8995_GP5_DIR 0x8000 /* GP5_DIR */ +#define WM8995_GP5_DIR_MASK 0x8000 /* GP5_DIR */ +#define WM8995_GP5_DIR_SHIFT 15 /* GP5_DIR */ +#define WM8995_GP5_DIR_WIDTH 1 /* GP5_DIR */ +#define WM8995_GP5_PU 0x4000 /* GP5_PU */ +#define WM8995_GP5_PU_MASK 0x4000 /* GP5_PU */ +#define WM8995_GP5_PU_SHIFT 14 /* GP5_PU */ +#define WM8995_GP5_PU_WIDTH 1 /* GP5_PU */ +#define WM8995_GP5_PD 0x2000 /* GP5_PD */ +#define WM8995_GP5_PD_MASK 0x2000 /* GP5_PD */ +#define WM8995_GP5_PD_SHIFT 13 /* GP5_PD */ +#define WM8995_GP5_PD_WIDTH 1 /* GP5_PD */ +#define WM8995_GP5_POL 0x0400 /* GP5_POL */ +#define WM8995_GP5_POL_MASK 0x0400 /* GP5_POL */ +#define WM8995_GP5_POL_SHIFT 10 /* GP5_POL */ +#define WM8995_GP5_POL_WIDTH 1 /* GP5_POL */ +#define WM8995_GP5_OP_CFG 0x0200 /* GP5_OP_CFG */ +#define WM8995_GP5_OP_CFG_MASK 0x0200 /* GP5_OP_CFG */ +#define WM8995_GP5_OP_CFG_SHIFT 9 /* GP5_OP_CFG */ +#define WM8995_GP5_OP_CFG_WIDTH 1 /* GP5_OP_CFG */ +#define WM8995_GP5_DB 0x0100 /* GP5_DB */ +#define WM8995_GP5_DB_MASK 0x0100 /* GP5_DB */ +#define WM8995_GP5_DB_SHIFT 8 /* GP5_DB */ +#define WM8995_GP5_DB_WIDTH 1 /* GP5_DB */ +#define WM8995_GP5_LVL 0x0040 /* GP5_LVL */ +#define WM8995_GP5_LVL_MASK 0x0040 /* GP5_LVL */ +#define WM8995_GP5_LVL_SHIFT 6 /* GP5_LVL */ +#define WM8995_GP5_LVL_WIDTH 1 /* GP5_LVL */ +#define WM8995_GP5_FN_MASK 0x001F /* GP5_FN - [4:0] */ +#define WM8995_GP5_FN_SHIFT 0 /* GP5_FN - [4:0] */ +#define WM8995_GP5_FN_WIDTH 5 /* GP5_FN - [4:0] */ + +/* + * R1797 (0x705) - GPIO 6 + */ +#define WM8995_GP6_DIR 0x8000 /* GP6_DIR */ +#define WM8995_GP6_DIR_MASK 0x8000 /* GP6_DIR */ +#define WM8995_GP6_DIR_SHIFT 15 /* GP6_DIR */ +#define WM8995_GP6_DIR_WIDTH 1 /* GP6_DIR */ +#define WM8995_GP6_PU 0x4000 /* GP6_PU */ +#define WM8995_GP6_PU_MASK 0x4000 /* GP6_PU */ +#define WM8995_GP6_PU_SHIFT 14 /* GP6_PU */ +#define WM8995_GP6_PU_WIDTH 1 /* GP6_PU */ +#define WM8995_GP6_PD 0x2000 /* GP6_PD */ +#define WM8995_GP6_PD_MASK 0x2000 /* GP6_PD */ +#define WM8995_GP6_PD_SHIFT 13 /* GP6_PD */ +#define WM8995_GP6_PD_WIDTH 1 /* GP6_PD */ +#define WM8995_GP6_POL 0x0400 /* GP6_POL */ +#define WM8995_GP6_POL_MASK 0x0400 /* GP6_POL */ +#define WM8995_GP6_POL_SHIFT 10 /* GP6_POL */ +#define WM8995_GP6_POL_WIDTH 1 /* GP6_POL */ +#define WM8995_GP6_OP_CFG 0x0200 /* GP6_OP_CFG */ +#define WM8995_GP6_OP_CFG_MASK 0x0200 /* GP6_OP_CFG */ +#define WM8995_GP6_OP_CFG_SHIFT 9 /* GP6_OP_CFG */ +#define WM8995_GP6_OP_CFG_WIDTH 1 /* GP6_OP_CFG */ +#define WM8995_GP6_DB 0x0100 /* GP6_DB */ +#define WM8995_GP6_DB_MASK 0x0100 /* GP6_DB */ +#define WM8995_GP6_DB_SHIFT 8 /* GP6_DB */ +#define WM8995_GP6_DB_WIDTH 1 /* GP6_DB */ +#define WM8995_GP6_LVL 0x0040 /* GP6_LVL */ +#define WM8995_GP6_LVL_MASK 0x0040 /* GP6_LVL */ +#define WM8995_GP6_LVL_SHIFT 6 /* GP6_LVL */ +#define WM8995_GP6_LVL_WIDTH 1 /* GP6_LVL */ +#define WM8995_GP6_FN_MASK 0x001F /* GP6_FN - [4:0] */ +#define WM8995_GP6_FN_SHIFT 0 /* GP6_FN - [4:0] */ +#define WM8995_GP6_FN_WIDTH 5 /* GP6_FN - [4:0] */ + +/* + * R1798 (0x706) - GPIO 7 + */ +#define WM8995_GP7_DIR 0x8000 /* GP7_DIR */ +#define WM8995_GP7_DIR_MASK 0x8000 /* GP7_DIR */ +#define WM8995_GP7_DIR_SHIFT 15 /* GP7_DIR */ +#define WM8995_GP7_DIR_WIDTH 1 /* GP7_DIR */ +#define WM8995_GP7_PU 0x4000 /* GP7_PU */ +#define WM8995_GP7_PU_MASK 0x4000 /* GP7_PU */ +#define WM8995_GP7_PU_SHIFT 14 /* GP7_PU */ +#define WM8995_GP7_PU_WIDTH 1 /* GP7_PU */ +#define WM8995_GP7_PD 0x2000 /* GP7_PD */ +#define WM8995_GP7_PD_MASK 0x2000 /* GP7_PD */ +#define WM8995_GP7_PD_SHIFT 13 /* GP7_PD */ +#define WM8995_GP7_PD_WIDTH 1 /* GP7_PD */ +#define WM8995_GP7_POL 0x0400 /* GP7_POL */ +#define WM8995_GP7_POL_MASK 0x0400 /* GP7_POL */ +#define WM8995_GP7_POL_SHIFT 10 /* GP7_POL */ +#define WM8995_GP7_POL_WIDTH 1 /* GP7_POL */ +#define WM8995_GP7_OP_CFG 0x0200 /* GP7_OP_CFG */ +#define WM8995_GP7_OP_CFG_MASK 0x0200 /* GP7_OP_CFG */ +#define WM8995_GP7_OP_CFG_SHIFT 9 /* GP7_OP_CFG */ +#define WM8995_GP7_OP_CFG_WIDTH 1 /* GP7_OP_CFG */ +#define WM8995_GP7_DB 0x0100 /* GP7_DB */ +#define WM8995_GP7_DB_MASK 0x0100 /* GP7_DB */ +#define WM8995_GP7_DB_SHIFT 8 /* GP7_DB */ +#define WM8995_GP7_DB_WIDTH 1 /* GP7_DB */ +#define WM8995_GP7_LVL 0x0040 /* GP7_LVL */ +#define WM8995_GP7_LVL_MASK 0x0040 /* GP7_LVL */ +#define WM8995_GP7_LVL_SHIFT 6 /* GP7_LVL */ +#define WM8995_GP7_LVL_WIDTH 1 /* GP7_LVL */ +#define WM8995_GP7_FN_MASK 0x001F /* GP7_FN - [4:0] */ +#define WM8995_GP7_FN_SHIFT 0 /* GP7_FN - [4:0] */ +#define WM8995_GP7_FN_WIDTH 5 /* GP7_FN - [4:0] */ + +/* + * R1799 (0x707) - GPIO 8 + */ +#define WM8995_GP8_DIR 0x8000 /* GP8_DIR */ +#define WM8995_GP8_DIR_MASK 0x8000 /* GP8_DIR */ +#define WM8995_GP8_DIR_SHIFT 15 /* GP8_DIR */ +#define WM8995_GP8_DIR_WIDTH 1 /* GP8_DIR */ +#define WM8995_GP8_PU 0x4000 /* GP8_PU */ +#define WM8995_GP8_PU_MASK 0x4000 /* GP8_PU */ +#define WM8995_GP8_PU_SHIFT 14 /* GP8_PU */ +#define WM8995_GP8_PU_WIDTH 1 /* GP8_PU */ +#define WM8995_GP8_PD 0x2000 /* GP8_PD */ +#define WM8995_GP8_PD_MASK 0x2000 /* GP8_PD */ +#define WM8995_GP8_PD_SHIFT 13 /* GP8_PD */ +#define WM8995_GP8_PD_WIDTH 1 /* GP8_PD */ +#define WM8995_GP8_POL 0x0400 /* GP8_POL */ +#define WM8995_GP8_POL_MASK 0x0400 /* GP8_POL */ +#define WM8995_GP8_POL_SHIFT 10 /* GP8_POL */ +#define WM8995_GP8_POL_WIDTH 1 /* GP8_POL */ +#define WM8995_GP8_OP_CFG 0x0200 /* GP8_OP_CFG */ +#define WM8995_GP8_OP_CFG_MASK 0x0200 /* GP8_OP_CFG */ +#define WM8995_GP8_OP_CFG_SHIFT 9 /* GP8_OP_CFG */ +#define WM8995_GP8_OP_CFG_WIDTH 1 /* GP8_OP_CFG */ +#define WM8995_GP8_DB 0x0100 /* GP8_DB */ +#define WM8995_GP8_DB_MASK 0x0100 /* GP8_DB */ +#define WM8995_GP8_DB_SHIFT 8 /* GP8_DB */ +#define WM8995_GP8_DB_WIDTH 1 /* GP8_DB */ +#define WM8995_GP8_LVL 0x0040 /* GP8_LVL */ +#define WM8995_GP8_LVL_MASK 0x0040 /* GP8_LVL */ +#define WM8995_GP8_LVL_SHIFT 6 /* GP8_LVL */ +#define WM8995_GP8_LVL_WIDTH 1 /* GP8_LVL */ +#define WM8995_GP8_FN_MASK 0x001F /* GP8_FN - [4:0] */ +#define WM8995_GP8_FN_SHIFT 0 /* GP8_FN - [4:0] */ +#define WM8995_GP8_FN_WIDTH 5 /* GP8_FN - [4:0] */ + +/* + * R1800 (0x708) - GPIO 9 + */ +#define WM8995_GP9_DIR 0x8000 /* GP9_DIR */ +#define WM8995_GP9_DIR_MASK 0x8000 /* GP9_DIR */ +#define WM8995_GP9_DIR_SHIFT 15 /* GP9_DIR */ +#define WM8995_GP9_DIR_WIDTH 1 /* GP9_DIR */ +#define WM8995_GP9_PU 0x4000 /* GP9_PU */ +#define WM8995_GP9_PU_MASK 0x4000 /* GP9_PU */ +#define WM8995_GP9_PU_SHIFT 14 /* GP9_PU */ +#define WM8995_GP9_PU_WIDTH 1 /* GP9_PU */ +#define WM8995_GP9_PD 0x2000 /* GP9_PD */ +#define WM8995_GP9_PD_MASK 0x2000 /* GP9_PD */ +#define WM8995_GP9_PD_SHIFT 13 /* GP9_PD */ +#define WM8995_GP9_PD_WIDTH 1 /* GP9_PD */ +#define WM8995_GP9_POL 0x0400 /* GP9_POL */ +#define WM8995_GP9_POL_MASK 0x0400 /* GP9_POL */ +#define WM8995_GP9_POL_SHIFT 10 /* GP9_POL */ +#define WM8995_GP9_POL_WIDTH 1 /* GP9_POL */ +#define WM8995_GP9_OP_CFG 0x0200 /* GP9_OP_CFG */ +#define WM8995_GP9_OP_CFG_MASK 0x0200 /* GP9_OP_CFG */ +#define WM8995_GP9_OP_CFG_SHIFT 9 /* GP9_OP_CFG */ +#define WM8995_GP9_OP_CFG_WIDTH 1 /* GP9_OP_CFG */ +#define WM8995_GP9_DB 0x0100 /* GP9_DB */ +#define WM8995_GP9_DB_MASK 0x0100 /* GP9_DB */ +#define WM8995_GP9_DB_SHIFT 8 /* GP9_DB */ +#define WM8995_GP9_DB_WIDTH 1 /* GP9_DB */ +#define WM8995_GP9_LVL 0x0040 /* GP9_LVL */ +#define WM8995_GP9_LVL_MASK 0x0040 /* GP9_LVL */ +#define WM8995_GP9_LVL_SHIFT 6 /* GP9_LVL */ +#define WM8995_GP9_LVL_WIDTH 1 /* GP9_LVL */ +#define WM8995_GP9_FN_MASK 0x001F /* GP9_FN - [4:0] */ +#define WM8995_GP9_FN_SHIFT 0 /* GP9_FN - [4:0] */ +#define WM8995_GP9_FN_WIDTH 5 /* GP9_FN - [4:0] */ + +/* + * R1801 (0x709) - GPIO 10 + */ +#define WM8995_GP10_DIR 0x8000 /* GP10_DIR */ +#define WM8995_GP10_DIR_MASK 0x8000 /* GP10_DIR */ +#define WM8995_GP10_DIR_SHIFT 15 /* GP10_DIR */ +#define WM8995_GP10_DIR_WIDTH 1 /* GP10_DIR */ +#define WM8995_GP10_PU 0x4000 /* GP10_PU */ +#define WM8995_GP10_PU_MASK 0x4000 /* GP10_PU */ +#define WM8995_GP10_PU_SHIFT 14 /* GP10_PU */ +#define WM8995_GP10_PU_WIDTH 1 /* GP10_PU */ +#define WM8995_GP10_PD 0x2000 /* GP10_PD */ +#define WM8995_GP10_PD_MASK 0x2000 /* GP10_PD */ +#define WM8995_GP10_PD_SHIFT 13 /* GP10_PD */ +#define WM8995_GP10_PD_WIDTH 1 /* GP10_PD */ +#define WM8995_GP10_POL 0x0400 /* GP10_POL */ +#define WM8995_GP10_POL_MASK 0x0400 /* GP10_POL */ +#define WM8995_GP10_POL_SHIFT 10 /* GP10_POL */ +#define WM8995_GP10_POL_WIDTH 1 /* GP10_POL */ +#define WM8995_GP10_OP_CFG 0x0200 /* GP10_OP_CFG */ +#define WM8995_GP10_OP_CFG_MASK 0x0200 /* GP10_OP_CFG */ +#define WM8995_GP10_OP_CFG_SHIFT 9 /* GP10_OP_CFG */ +#define WM8995_GP10_OP_CFG_WIDTH 1 /* GP10_OP_CFG */ +#define WM8995_GP10_DB 0x0100 /* GP10_DB */ +#define WM8995_GP10_DB_MASK 0x0100 /* GP10_DB */ +#define WM8995_GP10_DB_SHIFT 8 /* GP10_DB */ +#define WM8995_GP10_DB_WIDTH 1 /* GP10_DB */ +#define WM8995_GP10_LVL 0x0040 /* GP10_LVL */ +#define WM8995_GP10_LVL_MASK 0x0040 /* GP10_LVL */ +#define WM8995_GP10_LVL_SHIFT 6 /* GP10_LVL */ +#define WM8995_GP10_LVL_WIDTH 1 /* GP10_LVL */ +#define WM8995_GP10_FN_MASK 0x001F /* GP10_FN - [4:0] */ +#define WM8995_GP10_FN_SHIFT 0 /* GP10_FN - [4:0] */ +#define WM8995_GP10_FN_WIDTH 5 /* GP10_FN - [4:0] */ + +/* + * R1802 (0x70A) - GPIO 11 + */ +#define WM8995_GP11_DIR 0x8000 /* GP11_DIR */ +#define WM8995_GP11_DIR_MASK 0x8000 /* GP11_DIR */ +#define WM8995_GP11_DIR_SHIFT 15 /* GP11_DIR */ +#define WM8995_GP11_DIR_WIDTH 1 /* GP11_DIR */ +#define WM8995_GP11_PU 0x4000 /* GP11_PU */ +#define WM8995_GP11_PU_MASK 0x4000 /* GP11_PU */ +#define WM8995_GP11_PU_SHIFT 14 /* GP11_PU */ +#define WM8995_GP11_PU_WIDTH 1 /* GP11_PU */ +#define WM8995_GP11_PD 0x2000 /* GP11_PD */ +#define WM8995_GP11_PD_MASK 0x2000 /* GP11_PD */ +#define WM8995_GP11_PD_SHIFT 13 /* GP11_PD */ +#define WM8995_GP11_PD_WIDTH 1 /* GP11_PD */ +#define WM8995_GP11_POL 0x0400 /* GP11_POL */ +#define WM8995_GP11_POL_MASK 0x0400 /* GP11_POL */ +#define WM8995_GP11_POL_SHIFT 10 /* GP11_POL */ +#define WM8995_GP11_POL_WIDTH 1 /* GP11_POL */ +#define WM8995_GP11_OP_CFG 0x0200 /* GP11_OP_CFG */ +#define WM8995_GP11_OP_CFG_MASK 0x0200 /* GP11_OP_CFG */ +#define WM8995_GP11_OP_CFG_SHIFT 9 /* GP11_OP_CFG */ +#define WM8995_GP11_OP_CFG_WIDTH 1 /* GP11_OP_CFG */ +#define WM8995_GP11_DB 0x0100 /* GP11_DB */ +#define WM8995_GP11_DB_MASK 0x0100 /* GP11_DB */ +#define WM8995_GP11_DB_SHIFT 8 /* GP11_DB */ +#define WM8995_GP11_DB_WIDTH 1 /* GP11_DB */ +#define WM8995_GP11_LVL 0x0040 /* GP11_LVL */ +#define WM8995_GP11_LVL_MASK 0x0040 /* GP11_LVL */ +#define WM8995_GP11_LVL_SHIFT 6 /* GP11_LVL */ +#define WM8995_GP11_LVL_WIDTH 1 /* GP11_LVL */ +#define WM8995_GP11_FN_MASK 0x001F /* GP11_FN - [4:0] */ +#define WM8995_GP11_FN_SHIFT 0 /* GP11_FN - [4:0] */ +#define WM8995_GP11_FN_WIDTH 5 /* GP11_FN - [4:0] */ + +/* + * R1803 (0x70B) - GPIO 12 + */ +#define WM8995_GP12_DIR 0x8000 /* GP12_DIR */ +#define WM8995_GP12_DIR_MASK 0x8000 /* GP12_DIR */ +#define WM8995_GP12_DIR_SHIFT 15 /* GP12_DIR */ +#define WM8995_GP12_DIR_WIDTH 1 /* GP12_DIR */ +#define WM8995_GP12_PU 0x4000 /* GP12_PU */ +#define WM8995_GP12_PU_MASK 0x4000 /* GP12_PU */ +#define WM8995_GP12_PU_SHIFT 14 /* GP12_PU */ +#define WM8995_GP12_PU_WIDTH 1 /* GP12_PU */ +#define WM8995_GP12_PD 0x2000 /* GP12_PD */ +#define WM8995_GP12_PD_MASK 0x2000 /* GP12_PD */ +#define WM8995_GP12_PD_SHIFT 13 /* GP12_PD */ +#define WM8995_GP12_PD_WIDTH 1 /* GP12_PD */ +#define WM8995_GP12_POL 0x0400 /* GP12_POL */ +#define WM8995_GP12_POL_MASK 0x0400 /* GP12_POL */ +#define WM8995_GP12_POL_SHIFT 10 /* GP12_POL */ +#define WM8995_GP12_POL_WIDTH 1 /* GP12_POL */ +#define WM8995_GP12_OP_CFG 0x0200 /* GP12_OP_CFG */ +#define WM8995_GP12_OP_CFG_MASK 0x0200 /* GP12_OP_CFG */ +#define WM8995_GP12_OP_CFG_SHIFT 9 /* GP12_OP_CFG */ +#define WM8995_GP12_OP_CFG_WIDTH 1 /* GP12_OP_CFG */ +#define WM8995_GP12_DB 0x0100 /* GP12_DB */ +#define WM8995_GP12_DB_MASK 0x0100 /* GP12_DB */ +#define WM8995_GP12_DB_SHIFT 8 /* GP12_DB */ +#define WM8995_GP12_DB_WIDTH 1 /* GP12_DB */ +#define WM8995_GP12_LVL 0x0040 /* GP12_LVL */ +#define WM8995_GP12_LVL_MASK 0x0040 /* GP12_LVL */ +#define WM8995_GP12_LVL_SHIFT 6 /* GP12_LVL */ +#define WM8995_GP12_LVL_WIDTH 1 /* GP12_LVL */ +#define WM8995_GP12_FN_MASK 0x001F /* GP12_FN - [4:0] */ +#define WM8995_GP12_FN_SHIFT 0 /* GP12_FN - [4:0] */ +#define WM8995_GP12_FN_WIDTH 5 /* GP12_FN - [4:0] */ + +/* + * R1804 (0x70C) - GPIO 13 + */ +#define WM8995_GP13_DIR 0x8000 /* GP13_DIR */ +#define WM8995_GP13_DIR_MASK 0x8000 /* GP13_DIR */ +#define WM8995_GP13_DIR_SHIFT 15 /* GP13_DIR */ +#define WM8995_GP13_DIR_WIDTH 1 /* GP13_DIR */ +#define WM8995_GP13_PU 0x4000 /* GP13_PU */ +#define WM8995_GP13_PU_MASK 0x4000 /* GP13_PU */ +#define WM8995_GP13_PU_SHIFT 14 /* GP13_PU */ +#define WM8995_GP13_PU_WIDTH 1 /* GP13_PU */ +#define WM8995_GP13_PD 0x2000 /* GP13_PD */ +#define WM8995_GP13_PD_MASK 0x2000 /* GP13_PD */ +#define WM8995_GP13_PD_SHIFT 13 /* GP13_PD */ +#define WM8995_GP13_PD_WIDTH 1 /* GP13_PD */ +#define WM8995_GP13_POL 0x0400 /* GP13_POL */ +#define WM8995_GP13_POL_MASK 0x0400 /* GP13_POL */ +#define WM8995_GP13_POL_SHIFT 10 /* GP13_POL */ +#define WM8995_GP13_POL_WIDTH 1 /* GP13_POL */ +#define WM8995_GP13_OP_CFG 0x0200 /* GP13_OP_CFG */ +#define WM8995_GP13_OP_CFG_MASK 0x0200 /* GP13_OP_CFG */ +#define WM8995_GP13_OP_CFG_SHIFT 9 /* GP13_OP_CFG */ +#define WM8995_GP13_OP_CFG_WIDTH 1 /* GP13_OP_CFG */ +#define WM8995_GP13_DB 0x0100 /* GP13_DB */ +#define WM8995_GP13_DB_MASK 0x0100 /* GP13_DB */ +#define WM8995_GP13_DB_SHIFT 8 /* GP13_DB */ +#define WM8995_GP13_DB_WIDTH 1 /* GP13_DB */ +#define WM8995_GP13_LVL 0x0040 /* GP13_LVL */ +#define WM8995_GP13_LVL_MASK 0x0040 /* GP13_LVL */ +#define WM8995_GP13_LVL_SHIFT 6 /* GP13_LVL */ +#define WM8995_GP13_LVL_WIDTH 1 /* GP13_LVL */ +#define WM8995_GP13_FN_MASK 0x001F /* GP13_FN - [4:0] */ +#define WM8995_GP13_FN_SHIFT 0 /* GP13_FN - [4:0] */ +#define WM8995_GP13_FN_WIDTH 5 /* GP13_FN - [4:0] */ + +/* + * R1805 (0x70D) - GPIO 14 + */ +#define WM8995_GP14_DIR 0x8000 /* GP14_DIR */ +#define WM8995_GP14_DIR_MASK 0x8000 /* GP14_DIR */ +#define WM8995_GP14_DIR_SHIFT 15 /* GP14_DIR */ +#define WM8995_GP14_DIR_WIDTH 1 /* GP14_DIR */ +#define WM8995_GP14_PU 0x4000 /* GP14_PU */ +#define WM8995_GP14_PU_MASK 0x4000 /* GP14_PU */ +#define WM8995_GP14_PU_SHIFT 14 /* GP14_PU */ +#define WM8995_GP14_PU_WIDTH 1 /* GP14_PU */ +#define WM8995_GP14_PD 0x2000 /* GP14_PD */ +#define WM8995_GP14_PD_MASK 0x2000 /* GP14_PD */ +#define WM8995_GP14_PD_SHIFT 13 /* GP14_PD */ +#define WM8995_GP14_PD_WIDTH 1 /* GP14_PD */ +#define WM8995_GP14_POL 0x0400 /* GP14_POL */ +#define WM8995_GP14_POL_MASK 0x0400 /* GP14_POL */ +#define WM8995_GP14_POL_SHIFT 10 /* GP14_POL */ +#define WM8995_GP14_POL_WIDTH 1 /* GP14_POL */ +#define WM8995_GP14_OP_CFG 0x0200 /* GP14_OP_CFG */ +#define WM8995_GP14_OP_CFG_MASK 0x0200 /* GP14_OP_CFG */ +#define WM8995_GP14_OP_CFG_SHIFT 9 /* GP14_OP_CFG */ +#define WM8995_GP14_OP_CFG_WIDTH 1 /* GP14_OP_CFG */ +#define WM8995_GP14_DB 0x0100 /* GP14_DB */ +#define WM8995_GP14_DB_MASK 0x0100 /* GP14_DB */ +#define WM8995_GP14_DB_SHIFT 8 /* GP14_DB */ +#define WM8995_GP14_DB_WIDTH 1 /* GP14_DB */ +#define WM8995_GP14_LVL 0x0040 /* GP14_LVL */ +#define WM8995_GP14_LVL_MASK 0x0040 /* GP14_LVL */ +#define WM8995_GP14_LVL_SHIFT 6 /* GP14_LVL */ +#define WM8995_GP14_LVL_WIDTH 1 /* GP14_LVL */ +#define WM8995_GP14_FN_MASK 0x001F /* GP14_FN - [4:0] */ +#define WM8995_GP14_FN_SHIFT 0 /* GP14_FN - [4:0] */ +#define WM8995_GP14_FN_WIDTH 5 /* GP14_FN - [4:0] */ + +/* + * R1824 (0x720) - Pull Control (1) + */ +#define WM8995_DMICDAT3_PD 0x4000 /* DMICDAT3_PD */ +#define WM8995_DMICDAT3_PD_MASK 0x4000 /* DMICDAT3_PD */ +#define WM8995_DMICDAT3_PD_SHIFT 14 /* DMICDAT3_PD */ +#define WM8995_DMICDAT3_PD_WIDTH 1 /* DMICDAT3_PD */ +#define WM8995_DMICDAT2_PD 0x1000 /* DMICDAT2_PD */ +#define WM8995_DMICDAT2_PD_MASK 0x1000 /* DMICDAT2_PD */ +#define WM8995_DMICDAT2_PD_SHIFT 12 /* DMICDAT2_PD */ +#define WM8995_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */ +#define WM8995_DMICDAT1_PD 0x0400 /* DMICDAT1_PD */ +#define WM8995_DMICDAT1_PD_MASK 0x0400 /* DMICDAT1_PD */ +#define WM8995_DMICDAT1_PD_SHIFT 10 /* DMICDAT1_PD */ +#define WM8995_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */ +#define WM8995_MCLK2_PU 0x0200 /* MCLK2_PU */ +#define WM8995_MCLK2_PU_MASK 0x0200 /* MCLK2_PU */ +#define WM8995_MCLK2_PU_SHIFT 9 /* MCLK2_PU */ +#define WM8995_MCLK2_PU_WIDTH 1 /* MCLK2_PU */ +#define WM8995_MCLK2_PD 0x0100 /* MCLK2_PD */ +#define WM8995_MCLK2_PD_MASK 0x0100 /* MCLK2_PD */ +#define WM8995_MCLK2_PD_SHIFT 8 /* MCLK2_PD */ +#define WM8995_MCLK2_PD_WIDTH 1 /* MCLK2_PD */ +#define WM8995_MCLK1_PU 0x0080 /* MCLK1_PU */ +#define WM8995_MCLK1_PU_MASK 0x0080 /* MCLK1_PU */ +#define WM8995_MCLK1_PU_SHIFT 7 /* MCLK1_PU */ +#define WM8995_MCLK1_PU_WIDTH 1 /* MCLK1_PU */ +#define WM8995_MCLK1_PD 0x0040 /* MCLK1_PD */ +#define WM8995_MCLK1_PD_MASK 0x0040 /* MCLK1_PD */ +#define WM8995_MCLK1_PD_SHIFT 6 /* MCLK1_PD */ +#define WM8995_MCLK1_PD_WIDTH 1 /* MCLK1_PD */ +#define WM8995_DACDAT1_PU 0x0020 /* DACDAT1_PU */ +#define WM8995_DACDAT1_PU_MASK 0x0020 /* DACDAT1_PU */ +#define WM8995_DACDAT1_PU_SHIFT 5 /* DACDAT1_PU */ +#define WM8995_DACDAT1_PU_WIDTH 1 /* DACDAT1_PU */ +#define WM8995_DACDAT1_PD 0x0010 /* DACDAT1_PD */ +#define WM8995_DACDAT1_PD_MASK 0x0010 /* DACDAT1_PD */ +#define WM8995_DACDAT1_PD_SHIFT 4 /* DACDAT1_PD */ +#define WM8995_DACDAT1_PD_WIDTH 1 /* DACDAT1_PD */ +#define WM8995_DACLRCLK1_PU 0x0008 /* DACLRCLK1_PU */ +#define WM8995_DACLRCLK1_PU_MASK 0x0008 /* DACLRCLK1_PU */ +#define WM8995_DACLRCLK1_PU_SHIFT 3 /* DACLRCLK1_PU */ +#define WM8995_DACLRCLK1_PU_WIDTH 1 /* DACLRCLK1_PU */ +#define WM8995_DACLRCLK1_PD 0x0004 /* DACLRCLK1_PD */ +#define WM8995_DACLRCLK1_PD_MASK 0x0004 /* DACLRCLK1_PD */ +#define WM8995_DACLRCLK1_PD_SHIFT 2 /* DACLRCLK1_PD */ +#define WM8995_DACLRCLK1_PD_WIDTH 1 /* DACLRCLK1_PD */ +#define WM8995_BCLK1_PU 0x0002 /* BCLK1_PU */ +#define WM8995_BCLK1_PU_MASK 0x0002 /* BCLK1_PU */ +#define WM8995_BCLK1_PU_SHIFT 1 /* BCLK1_PU */ +#define WM8995_BCLK1_PU_WIDTH 1 /* BCLK1_PU */ +#define WM8995_BCLK1_PD 0x0001 /* BCLK1_PD */ +#define WM8995_BCLK1_PD_MASK 0x0001 /* BCLK1_PD */ +#define WM8995_BCLK1_PD_SHIFT 0 /* BCLK1_PD */ +#define WM8995_BCLK1_PD_WIDTH 1 /* BCLK1_PD */ + +/* + * R1825 (0x721) - Pull Control (2) + */ +#define WM8995_LDO1ENA_PD 0x0010 /* LDO1ENA_PD */ +#define WM8995_LDO1ENA_PD_MASK 0x0010 /* LDO1ENA_PD */ +#define WM8995_LDO1ENA_PD_SHIFT 4 /* LDO1ENA_PD */ +#define WM8995_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */ +#define WM8995_MODE_PD 0x0004 /* MODE_PD */ +#define WM8995_MODE_PD_MASK 0x0004 /* MODE_PD */ +#define WM8995_MODE_PD_SHIFT 2 /* MODE_PD */ +#define WM8995_MODE_PD_WIDTH 1 /* MODE_PD */ +#define WM8995_CSNADDR_PD 0x0001 /* CSNADDR_PD */ +#define WM8995_CSNADDR_PD_MASK 0x0001 /* CSNADDR_PD */ +#define WM8995_CSNADDR_PD_SHIFT 0 /* CSNADDR_PD */ +#define WM8995_CSNADDR_PD_WIDTH 1 /* CSNADDR_PD */ + +/* + * R1840 (0x730) - Interrupt Status 1 + */ +#define WM8995_GP14_EINT 0x2000 /* GP14_EINT */ +#define WM8995_GP14_EINT_MASK 0x2000 /* GP14_EINT */ +#define WM8995_GP14_EINT_SHIFT 13 /* GP14_EINT */ +#define WM8995_GP14_EINT_WIDTH 1 /* GP14_EINT */ +#define WM8995_GP13_EINT 0x1000 /* GP13_EINT */ +#define WM8995_GP13_EINT_MASK 0x1000 /* GP13_EINT */ +#define WM8995_GP13_EINT_SHIFT 12 /* GP13_EINT */ +#define WM8995_GP13_EINT_WIDTH 1 /* GP13_EINT */ +#define WM8995_GP12_EINT 0x0800 /* GP12_EINT */ +#define WM8995_GP12_EINT_MASK 0x0800 /* GP12_EINT */ +#define WM8995_GP12_EINT_SHIFT 11 /* GP12_EINT */ +#define WM8995_GP12_EINT_WIDTH 1 /* GP12_EINT */ +#define WM8995_GP11_EINT 0x0400 /* GP11_EINT */ +#define WM8995_GP11_EINT_MASK 0x0400 /* GP11_EINT */ +#define WM8995_GP11_EINT_SHIFT 10 /* GP11_EINT */ +#define WM8995_GP11_EINT_WIDTH 1 /* GP11_EINT */ +#define WM8995_GP10_EINT 0x0200 /* GP10_EINT */ +#define WM8995_GP10_EINT_MASK 0x0200 /* GP10_EINT */ +#define WM8995_GP10_EINT_SHIFT 9 /* GP10_EINT */ +#define WM8995_GP10_EINT_WIDTH 1 /* GP10_EINT */ +#define WM8995_GP9_EINT 0x0100 /* GP9_EINT */ +#define WM8995_GP9_EINT_MASK 0x0100 /* GP9_EINT */ +#define WM8995_GP9_EINT_SHIFT 8 /* GP9_EINT */ +#define WM8995_GP9_EINT_WIDTH 1 /* GP9_EINT */ +#define WM8995_GP8_EINT 0x0080 /* GP8_EINT */ +#define WM8995_GP8_EINT_MASK 0x0080 /* GP8_EINT */ +#define WM8995_GP8_EINT_SHIFT 7 /* GP8_EINT */ +#define WM8995_GP8_EINT_WIDTH 1 /* GP8_EINT */ +#define WM8995_GP7_EINT 0x0040 /* GP7_EINT */ +#define WM8995_GP7_EINT_MASK 0x0040 /* GP7_EINT */ +#define WM8995_GP7_EINT_SHIFT 6 /* GP7_EINT */ +#define WM8995_GP7_EINT_WIDTH 1 /* GP7_EINT */ +#define WM8995_GP6_EINT 0x0020 /* GP6_EINT */ +#define WM8995_GP6_EINT_MASK 0x0020 /* GP6_EINT */ +#define WM8995_GP6_EINT_SHIFT 5 /* GP6_EINT */ +#define WM8995_GP6_EINT_WIDTH 1 /* GP6_EINT */ +#define WM8995_GP5_EINT 0x0010 /* GP5_EINT */ +#define WM8995_GP5_EINT_MASK 0x0010 /* GP5_EINT */ +#define WM8995_GP5_EINT_SHIFT 4 /* GP5_EINT */ +#define WM8995_GP5_EINT_WIDTH 1 /* GP5_EINT */ +#define WM8995_GP4_EINT 0x0008 /* GP4_EINT */ +#define WM8995_GP4_EINT_MASK 0x0008 /* GP4_EINT */ +#define WM8995_GP4_EINT_SHIFT 3 /* GP4_EINT */ +#define WM8995_GP4_EINT_WIDTH 1 /* GP4_EINT */ +#define WM8995_GP3_EINT 0x0004 /* GP3_EINT */ +#define WM8995_GP3_EINT_MASK 0x0004 /* GP3_EINT */ +#define WM8995_GP3_EINT_SHIFT 2 /* GP3_EINT */ +#define WM8995_GP3_EINT_WIDTH 1 /* GP3_EINT */ +#define WM8995_GP2_EINT 0x0002 /* GP2_EINT */ +#define WM8995_GP2_EINT_MASK 0x0002 /* GP2_EINT */ +#define WM8995_GP2_EINT_SHIFT 1 /* GP2_EINT */ +#define WM8995_GP2_EINT_WIDTH 1 /* GP2_EINT */ +#define WM8995_GP1_EINT 0x0001 /* GP1_EINT */ +#define WM8995_GP1_EINT_MASK 0x0001 /* GP1_EINT */ +#define WM8995_GP1_EINT_SHIFT 0 /* GP1_EINT */ +#define WM8995_GP1_EINT_WIDTH 1 /* GP1_EINT */ + +/* + * R1841 (0x731) - Interrupt Status 2 + */ +#define WM8995_DCS_DONE_23_EINT 0x1000 /* DCS_DONE_23_EINT */ +#define WM8995_DCS_DONE_23_EINT_MASK 0x1000 /* DCS_DONE_23_EINT */ +#define WM8995_DCS_DONE_23_EINT_SHIFT 12 /* DCS_DONE_23_EINT */ +#define WM8995_DCS_DONE_23_EINT_WIDTH 1 /* DCS_DONE_23_EINT */ +#define WM8995_DCS_DONE_01_EINT 0x0800 /* DCS_DONE_01_EINT */ +#define WM8995_DCS_DONE_01_EINT_MASK 0x0800 /* DCS_DONE_01_EINT */ +#define WM8995_DCS_DONE_01_EINT_SHIFT 11 /* DCS_DONE_01_EINT */ +#define WM8995_DCS_DONE_01_EINT_WIDTH 1 /* DCS_DONE_01_EINT */ +#define WM8995_WSEQ_DONE_EINT 0x0400 /* WSEQ_DONE_EINT */ +#define WM8995_WSEQ_DONE_EINT_MASK 0x0400 /* WSEQ_DONE_EINT */ +#define WM8995_WSEQ_DONE_EINT_SHIFT 10 /* WSEQ_DONE_EINT */ +#define WM8995_WSEQ_DONE_EINT_WIDTH 1 /* WSEQ_DONE_EINT */ +#define WM8995_FIFOS_ERR_EINT 0x0200 /* FIFOS_ERR_EINT */ +#define WM8995_FIFOS_ERR_EINT_MASK 0x0200 /* FIFOS_ERR_EINT */ +#define WM8995_FIFOS_ERR_EINT_SHIFT 9 /* FIFOS_ERR_EINT */ +#define WM8995_FIFOS_ERR_EINT_WIDTH 1 /* FIFOS_ERR_EINT */ +#define WM8995_AIF2DRC_SIG_DET_EINT 0x0100 /* AIF2DRC_SIG_DET_EINT */ +#define WM8995_AIF2DRC_SIG_DET_EINT_MASK 0x0100 /* AIF2DRC_SIG_DET_EINT */ +#define WM8995_AIF2DRC_SIG_DET_EINT_SHIFT 8 /* AIF2DRC_SIG_DET_EINT */ +#define WM8995_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* AIF2DRC_SIG_DET_EINT */ +#define WM8995_AIF1DRC2_SIG_DET_EINT 0x0080 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8995_AIF1DRC2_SIG_DET_EINT_MASK 0x0080 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8995_AIF1DRC2_SIG_DET_EINT_SHIFT 7 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8995_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8995_AIF1DRC1_SIG_DET_EINT 0x0040 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8995_AIF1DRC1_SIG_DET_EINT_MASK 0x0040 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8995_AIF1DRC1_SIG_DET_EINT_SHIFT 6 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8995_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8995_SRC2_LOCK_EINT 0x0020 /* SRC2_LOCK_EINT */ +#define WM8995_SRC2_LOCK_EINT_MASK 0x0020 /* SRC2_LOCK_EINT */ +#define WM8995_SRC2_LOCK_EINT_SHIFT 5 /* SRC2_LOCK_EINT */ +#define WM8995_SRC2_LOCK_EINT_WIDTH 1 /* SRC2_LOCK_EINT */ +#define WM8995_SRC1_LOCK_EINT 0x0010 /* SRC1_LOCK_EINT */ +#define WM8995_SRC1_LOCK_EINT_MASK 0x0010 /* SRC1_LOCK_EINT */ +#define WM8995_SRC1_LOCK_EINT_SHIFT 4 /* SRC1_LOCK_EINT */ +#define WM8995_SRC1_LOCK_EINT_WIDTH 1 /* SRC1_LOCK_EINT */ +#define WM8995_FLL2_LOCK_EINT 0x0008 /* FLL2_LOCK_EINT */ +#define WM8995_FLL2_LOCK_EINT_MASK 0x0008 /* FLL2_LOCK_EINT */ +#define WM8995_FLL2_LOCK_EINT_SHIFT 3 /* FLL2_LOCK_EINT */ +#define WM8995_FLL2_LOCK_EINT_WIDTH 1 /* FLL2_LOCK_EINT */ +#define WM8995_FLL1_LOCK_EINT 0x0004 /* FLL1_LOCK_EINT */ +#define WM8995_FLL1_LOCK_EINT_MASK 0x0004 /* FLL1_LOCK_EINT */ +#define WM8995_FLL1_LOCK_EINT_SHIFT 2 /* FLL1_LOCK_EINT */ +#define WM8995_FLL1_LOCK_EINT_WIDTH 1 /* FLL1_LOCK_EINT */ +#define WM8995_HP_DONE_EINT 0x0002 /* HP_DONE_EINT */ +#define WM8995_HP_DONE_EINT_MASK 0x0002 /* HP_DONE_EINT */ +#define WM8995_HP_DONE_EINT_SHIFT 1 /* HP_DONE_EINT */ +#define WM8995_HP_DONE_EINT_WIDTH 1 /* HP_DONE_EINT */ +#define WM8995_MICD_EINT 0x0001 /* MICD_EINT */ +#define WM8995_MICD_EINT_MASK 0x0001 /* MICD_EINT */ +#define WM8995_MICD_EINT_SHIFT 0 /* MICD_EINT */ +#define WM8995_MICD_EINT_WIDTH 1 /* MICD_EINT */ + +/* + * R1842 (0x732) - Interrupt Raw Status 2 + */ +#define WM8995_DCS_DONE_23_STS 0x1000 /* DCS_DONE_23_STS */ +#define WM8995_DCS_DONE_23_STS_MASK 0x1000 /* DCS_DONE_23_STS */ +#define WM8995_DCS_DONE_23_STS_SHIFT 12 /* DCS_DONE_23_STS */ +#define WM8995_DCS_DONE_23_STS_WIDTH 1 /* DCS_DONE_23_STS */ +#define WM8995_DCS_DONE_01_STS 0x0800 /* DCS_DONE_01_STS */ +#define WM8995_DCS_DONE_01_STS_MASK 0x0800 /* DCS_DONE_01_STS */ +#define WM8995_DCS_DONE_01_STS_SHIFT 11 /* DCS_DONE_01_STS */ +#define WM8995_DCS_DONE_01_STS_WIDTH 1 /* DCS_DONE_01_STS */ +#define WM8995_WSEQ_DONE_STS 0x0400 /* WSEQ_DONE_STS */ +#define WM8995_WSEQ_DONE_STS_MASK 0x0400 /* WSEQ_DONE_STS */ +#define WM8995_WSEQ_DONE_STS_SHIFT 10 /* WSEQ_DONE_STS */ +#define WM8995_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */ +#define WM8995_FIFOS_ERR_STS 0x0200 /* FIFOS_ERR_STS */ +#define WM8995_FIFOS_ERR_STS_MASK 0x0200 /* FIFOS_ERR_STS */ +#define WM8995_FIFOS_ERR_STS_SHIFT 9 /* FIFOS_ERR_STS */ +#define WM8995_FIFOS_ERR_STS_WIDTH 1 /* FIFOS_ERR_STS */ +#define WM8995_AIF2DRC_SIG_DET_STS 0x0100 /* AIF2DRC_SIG_DET_STS */ +#define WM8995_AIF2DRC_SIG_DET_STS_MASK 0x0100 /* AIF2DRC_SIG_DET_STS */ +#define WM8995_AIF2DRC_SIG_DET_STS_SHIFT 8 /* AIF2DRC_SIG_DET_STS */ +#define WM8995_AIF2DRC_SIG_DET_STS_WIDTH 1 /* AIF2DRC_SIG_DET_STS */ +#define WM8995_AIF1DRC2_SIG_DET_STS 0x0080 /* AIF1DRC2_SIG_DET_STS */ +#define WM8995_AIF1DRC2_SIG_DET_STS_MASK 0x0080 /* AIF1DRC2_SIG_DET_STS */ +#define WM8995_AIF1DRC2_SIG_DET_STS_SHIFT 7 /* AIF1DRC2_SIG_DET_STS */ +#define WM8995_AIF1DRC2_SIG_DET_STS_WIDTH 1 /* AIF1DRC2_SIG_DET_STS */ +#define WM8995_AIF1DRC1_SIG_DET_STS 0x0040 /* AIF1DRC1_SIG_DET_STS */ +#define WM8995_AIF1DRC1_SIG_DET_STS_MASK 0x0040 /* AIF1DRC1_SIG_DET_STS */ +#define WM8995_AIF1DRC1_SIG_DET_STS_SHIFT 6 /* AIF1DRC1_SIG_DET_STS */ +#define WM8995_AIF1DRC1_SIG_DET_STS_WIDTH 1 /* AIF1DRC1_SIG_DET_STS */ +#define WM8995_SRC2_LOCK_STS 0x0020 /* SRC2_LOCK_STS */ +#define WM8995_SRC2_LOCK_STS_MASK 0x0020 /* SRC2_LOCK_STS */ +#define WM8995_SRC2_LOCK_STS_SHIFT 5 /* SRC2_LOCK_STS */ +#define WM8995_SRC2_LOCK_STS_WIDTH 1 /* SRC2_LOCK_STS */ +#define WM8995_SRC1_LOCK_STS 0x0010 /* SRC1_LOCK_STS */ +#define WM8995_SRC1_LOCK_STS_MASK 0x0010 /* SRC1_LOCK_STS */ +#define WM8995_SRC1_LOCK_STS_SHIFT 4 /* SRC1_LOCK_STS */ +#define WM8995_SRC1_LOCK_STS_WIDTH 1 /* SRC1_LOCK_STS */ +#define WM8995_FLL2_LOCK_STS 0x0008 /* FLL2_LOCK_STS */ +#define WM8995_FLL2_LOCK_STS_MASK 0x0008 /* FLL2_LOCK_STS */ +#define WM8995_FLL2_LOCK_STS_SHIFT 3 /* FLL2_LOCK_STS */ +#define WM8995_FLL2_LOCK_STS_WIDTH 1 /* FLL2_LOCK_STS */ +#define WM8995_FLL1_LOCK_STS 0x0004 /* FLL1_LOCK_STS */ +#define WM8995_FLL1_LOCK_STS_MASK 0x0004 /* FLL1_LOCK_STS */ +#define WM8995_FLL1_LOCK_STS_SHIFT 2 /* FLL1_LOCK_STS */ +#define WM8995_FLL1_LOCK_STS_WIDTH 1 /* FLL1_LOCK_STS */ + +/* + * R1848 (0x738) - Interrupt Status 1 Mask + */ +#define WM8995_IM_GP14_EINT 0x2000 /* IM_GP14_EINT */ +#define WM8995_IM_GP14_EINT_MASK 0x2000 /* IM_GP14_EINT */ +#define WM8995_IM_GP14_EINT_SHIFT 13 /* IM_GP14_EINT */ +#define WM8995_IM_GP14_EINT_WIDTH 1 /* IM_GP14_EINT */ +#define WM8995_IM_GP13_EINT 0x1000 /* IM_GP13_EINT */ +#define WM8995_IM_GP13_EINT_MASK 0x1000 /* IM_GP13_EINT */ +#define WM8995_IM_GP13_EINT_SHIFT 12 /* IM_GP13_EINT */ +#define WM8995_IM_GP13_EINT_WIDTH 1 /* IM_GP13_EINT */ +#define WM8995_IM_GP12_EINT 0x0800 /* IM_GP12_EINT */ +#define WM8995_IM_GP12_EINT_MASK 0x0800 /* IM_GP12_EINT */ +#define WM8995_IM_GP12_EINT_SHIFT 11 /* IM_GP12_EINT */ +#define WM8995_IM_GP12_EINT_WIDTH 1 /* IM_GP12_EINT */ +#define WM8995_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */ +#define WM8995_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */ +#define WM8995_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */ +#define WM8995_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */ +#define WM8995_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */ +#define WM8995_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */ +#define WM8995_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */ +#define WM8995_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */ +#define WM8995_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */ +#define WM8995_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */ +#define WM8995_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */ +#define WM8995_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */ +#define WM8995_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */ +#define WM8995_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */ +#define WM8995_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */ +#define WM8995_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */ +#define WM8995_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */ +#define WM8995_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */ +#define WM8995_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */ +#define WM8995_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */ +#define WM8995_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */ +#define WM8995_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */ +#define WM8995_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */ +#define WM8995_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */ +#define WM8995_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ +#define WM8995_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ +#define WM8995_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ +#define WM8995_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ +#define WM8995_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ +#define WM8995_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ +#define WM8995_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ +#define WM8995_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ +#define WM8995_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ +#define WM8995_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ +#define WM8995_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ +#define WM8995_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ +#define WM8995_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ +#define WM8995_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ +#define WM8995_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ +#define WM8995_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ +#define WM8995_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ +#define WM8995_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ +#define WM8995_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ +#define WM8995_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ + +/* + * R1849 (0x739) - Interrupt Status 2 Mask + */ +#define WM8995_IM_DCS_DONE_23_EINT 0x1000 /* IM_DCS_DONE_23_EINT */ +#define WM8995_IM_DCS_DONE_23_EINT_MASK 0x1000 /* IM_DCS_DONE_23_EINT */ +#define WM8995_IM_DCS_DONE_23_EINT_SHIFT 12 /* IM_DCS_DONE_23_EINT */ +#define WM8995_IM_DCS_DONE_23_EINT_WIDTH 1 /* IM_DCS_DONE_23_EINT */ +#define WM8995_IM_DCS_DONE_01_EINT 0x0800 /* IM_DCS_DONE_01_EINT */ +#define WM8995_IM_DCS_DONE_01_EINT_MASK 0x0800 /* IM_DCS_DONE_01_EINT */ +#define WM8995_IM_DCS_DONE_01_EINT_SHIFT 11 /* IM_DCS_DONE_01_EINT */ +#define WM8995_IM_DCS_DONE_01_EINT_WIDTH 1 /* IM_DCS_DONE_01_EINT */ +#define WM8995_IM_WSEQ_DONE_EINT 0x0400 /* IM_WSEQ_DONE_EINT */ +#define WM8995_IM_WSEQ_DONE_EINT_MASK 0x0400 /* IM_WSEQ_DONE_EINT */ +#define WM8995_IM_WSEQ_DONE_EINT_SHIFT 10 /* IM_WSEQ_DONE_EINT */ +#define WM8995_IM_WSEQ_DONE_EINT_WIDTH 1 /* IM_WSEQ_DONE_EINT */ +#define WM8995_IM_FIFOS_ERR_EINT 0x0200 /* IM_FIFOS_ERR_EINT */ +#define WM8995_IM_FIFOS_ERR_EINT_MASK 0x0200 /* IM_FIFOS_ERR_EINT */ +#define WM8995_IM_FIFOS_ERR_EINT_SHIFT 9 /* IM_FIFOS_ERR_EINT */ +#define WM8995_IM_FIFOS_ERR_EINT_WIDTH 1 /* IM_FIFOS_ERR_EINT */ +#define WM8995_IM_AIF2DRC_SIG_DET_EINT 0x0100 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8995_IM_AIF2DRC_SIG_DET_EINT_MASK 0x0100 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8995_IM_AIF2DRC_SIG_DET_EINT_SHIFT 8 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8995_IM_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC2_SIG_DET_EINT 0x0080 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_MASK 0x0080 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_SHIFT 7 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC1_SIG_DET_EINT 0x0040 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_MASK 0x0040 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_SHIFT 6 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8995_IM_SRC2_LOCK_EINT 0x0020 /* IM_SRC2_LOCK_EINT */ +#define WM8995_IM_SRC2_LOCK_EINT_MASK 0x0020 /* IM_SRC2_LOCK_EINT */ +#define WM8995_IM_SRC2_LOCK_EINT_SHIFT 5 /* IM_SRC2_LOCK_EINT */ +#define WM8995_IM_SRC2_LOCK_EINT_WIDTH 1 /* IM_SRC2_LOCK_EINT */ +#define WM8995_IM_SRC1_LOCK_EINT 0x0010 /* IM_SRC1_LOCK_EINT */ +#define WM8995_IM_SRC1_LOCK_EINT_MASK 0x0010 /* IM_SRC1_LOCK_EINT */ +#define WM8995_IM_SRC1_LOCK_EINT_SHIFT 4 /* IM_SRC1_LOCK_EINT */ +#define WM8995_IM_SRC1_LOCK_EINT_WIDTH 1 /* IM_SRC1_LOCK_EINT */ +#define WM8995_IM_FLL2_LOCK_EINT 0x0008 /* IM_FLL2_LOCK_EINT */ +#define WM8995_IM_FLL2_LOCK_EINT_MASK 0x0008 /* IM_FLL2_LOCK_EINT */ +#define WM8995_IM_FLL2_LOCK_EINT_SHIFT 3 /* IM_FLL2_LOCK_EINT */ +#define WM8995_IM_FLL2_LOCK_EINT_WIDTH 1 /* IM_FLL2_LOCK_EINT */ +#define WM8995_IM_FLL1_LOCK_EINT 0x0004 /* IM_FLL1_LOCK_EINT */ +#define WM8995_IM_FLL1_LOCK_EINT_MASK 0x0004 /* IM_FLL1_LOCK_EINT */ +#define WM8995_IM_FLL1_LOCK_EINT_SHIFT 2 /* IM_FLL1_LOCK_EINT */ +#define WM8995_IM_FLL1_LOCK_EINT_WIDTH 1 /* IM_FLL1_LOCK_EINT */ +#define WM8995_IM_HP_DONE_EINT 0x0002 /* IM_HP_DONE_EINT */ +#define WM8995_IM_HP_DONE_EINT_MASK 0x0002 /* IM_HP_DONE_EINT */ +#define WM8995_IM_HP_DONE_EINT_SHIFT 1 /* IM_HP_DONE_EINT */ +#define WM8995_IM_HP_DONE_EINT_WIDTH 1 /* IM_HP_DONE_EINT */ +#define WM8995_IM_MICD_EINT 0x0001 /* IM_MICD_EINT */ +#define WM8995_IM_MICD_EINT_MASK 0x0001 /* IM_MICD_EINT */ +#define WM8995_IM_MICD_EINT_SHIFT 0 /* IM_MICD_EINT */ +#define WM8995_IM_MICD_EINT_WIDTH 1 /* IM_MICD_EINT */ + +/* + * R1856 (0x740) - Interrupt Control + */ +#define WM8995_IM_IRQ 0x0001 /* IM_IRQ */ +#define WM8995_IM_IRQ_MASK 0x0001 /* IM_IRQ */ +#define WM8995_IM_IRQ_SHIFT 0 /* IM_IRQ */ +#define WM8995_IM_IRQ_WIDTH 1 /* IM_IRQ */ + +/* + * R2048 (0x800) - Left PDM Speaker 1 + */ +#define WM8995_SPK1L_ENA 0x0010 /* SPK1L_ENA */ +#define WM8995_SPK1L_ENA_MASK 0x0010 /* SPK1L_ENA */ +#define WM8995_SPK1L_ENA_SHIFT 4 /* SPK1L_ENA */ +#define WM8995_SPK1L_ENA_WIDTH 1 /* SPK1L_ENA */ +#define WM8995_SPK1L_MUTE 0x0008 /* SPK1L_MUTE */ +#define WM8995_SPK1L_MUTE_MASK 0x0008 /* SPK1L_MUTE */ +#define WM8995_SPK1L_MUTE_SHIFT 3 /* SPK1L_MUTE */ +#define WM8995_SPK1L_MUTE_WIDTH 1 /* SPK1L_MUTE */ +#define WM8995_SPK1L_MUTE_ZC 0x0004 /* SPK1L_MUTE_ZC */ +#define WM8995_SPK1L_MUTE_ZC_MASK 0x0004 /* SPK1L_MUTE_ZC */ +#define WM8995_SPK1L_MUTE_ZC_SHIFT 2 /* SPK1L_MUTE_ZC */ +#define WM8995_SPK1L_MUTE_ZC_WIDTH 1 /* SPK1L_MUTE_ZC */ +#define WM8995_SPK1L_SRC_MASK 0x0003 /* SPK1L_SRC - [1:0] */ +#define WM8995_SPK1L_SRC_SHIFT 0 /* SPK1L_SRC - [1:0] */ +#define WM8995_SPK1L_SRC_WIDTH 2 /* SPK1L_SRC - [1:0] */ + +/* + * R2049 (0x801) - Right PDM Speaker 1 + */ +#define WM8995_SPK1R_ENA 0x0010 /* SPK1R_ENA */ +#define WM8995_SPK1R_ENA_MASK 0x0010 /* SPK1R_ENA */ +#define WM8995_SPK1R_ENA_SHIFT 4 /* SPK1R_ENA */ +#define WM8995_SPK1R_ENA_WIDTH 1 /* SPK1R_ENA */ +#define WM8995_SPK1R_MUTE 0x0008 /* SPK1R_MUTE */ +#define WM8995_SPK1R_MUTE_MASK 0x0008 /* SPK1R_MUTE */ +#define WM8995_SPK1R_MUTE_SHIFT 3 /* SPK1R_MUTE */ +#define WM8995_SPK1R_MUTE_WIDTH 1 /* SPK1R_MUTE */ +#define WM8995_SPK1R_MUTE_ZC 0x0004 /* SPK1R_MUTE_ZC */ +#define WM8995_SPK1R_MUTE_ZC_MASK 0x0004 /* SPK1R_MUTE_ZC */ +#define WM8995_SPK1R_MUTE_ZC_SHIFT 2 /* SPK1R_MUTE_ZC */ +#define WM8995_SPK1R_MUTE_ZC_WIDTH 1 /* SPK1R_MUTE_ZC */ +#define WM8995_SPK1R_SRC_MASK 0x0003 /* SPK1R_SRC - [1:0] */ +#define WM8995_SPK1R_SRC_SHIFT 0 /* SPK1R_SRC - [1:0] */ +#define WM8995_SPK1R_SRC_WIDTH 2 /* SPK1R_SRC - [1:0] */ + +/* + * R2050 (0x802) - PDM Speaker 1 Mute Sequence + */ +#define WM8995_SPK1_MUTE_SEQ1_MASK 0x00FF /* SPK1_MUTE_SEQ1 - [7:0] */ +#define WM8995_SPK1_MUTE_SEQ1_SHIFT 0 /* SPK1_MUTE_SEQ1 - [7:0] */ +#define WM8995_SPK1_MUTE_SEQ1_WIDTH 8 /* SPK1_MUTE_SEQ1 - [7:0] */ + +/* + * R2056 (0x808) - Left PDM Speaker 2 + */ +#define WM8995_SPK2L_ENA 0x0010 /* SPK2L_ENA */ +#define WM8995_SPK2L_ENA_MASK 0x0010 /* SPK2L_ENA */ +#define WM8995_SPK2L_ENA_SHIFT 4 /* SPK2L_ENA */ +#define WM8995_SPK2L_ENA_WIDTH 1 /* SPK2L_ENA */ +#define WM8995_SPK2L_MUTE 0x0008 /* SPK2L_MUTE */ +#define WM8995_SPK2L_MUTE_MASK 0x0008 /* SPK2L_MUTE */ +#define WM8995_SPK2L_MUTE_SHIFT 3 /* SPK2L_MUTE */ +#define WM8995_SPK2L_MUTE_WIDTH 1 /* SPK2L_MUTE */ +#define WM8995_SPK2L_MUTE_ZC 0x0004 /* SPK2L_MUTE_ZC */ +#define WM8995_SPK2L_MUTE_ZC_MASK 0x0004 /* SPK2L_MUTE_ZC */ +#define WM8995_SPK2L_MUTE_ZC_SHIFT 2 /* SPK2L_MUTE_ZC */ +#define WM8995_SPK2L_MUTE_ZC_WIDTH 1 /* SPK2L_MUTE_ZC */ +#define WM8995_SPK2L_SRC_MASK 0x0003 /* SPK2L_SRC - [1:0] */ +#define WM8995_SPK2L_SRC_SHIFT 0 /* SPK2L_SRC - [1:0] */ +#define WM8995_SPK2L_SRC_WIDTH 2 /* SPK2L_SRC - [1:0] */ + +/* + * R2057 (0x809) - Right PDM Speaker 2 + */ +#define WM8995_SPK2R_ENA 0x0010 /* SPK2R_ENA */ +#define WM8995_SPK2R_ENA_MASK 0x0010 /* SPK2R_ENA */ +#define WM8995_SPK2R_ENA_SHIFT 4 /* SPK2R_ENA */ +#define WM8995_SPK2R_ENA_WIDTH 1 /* SPK2R_ENA */ +#define WM8995_SPK2R_MUTE 0x0008 /* SPK2R_MUTE */ +#define WM8995_SPK2R_MUTE_MASK 0x0008 /* SPK2R_MUTE */ +#define WM8995_SPK2R_MUTE_SHIFT 3 /* SPK2R_MUTE */ +#define WM8995_SPK2R_MUTE_WIDTH 1 /* SPK2R_MUTE */ +#define WM8995_SPK2R_MUTE_ZC 0x0004 /* SPK2R_MUTE_ZC */ +#define WM8995_SPK2R_MUTE_ZC_MASK 0x0004 /* SPK2R_MUTE_ZC */ +#define WM8995_SPK2R_MUTE_ZC_SHIFT 2 /* SPK2R_MUTE_ZC */ +#define WM8995_SPK2R_MUTE_ZC_WIDTH 1 /* SPK2R_MUTE_ZC */ +#define WM8995_SPK2R_SRC_MASK 0x0003 /* SPK2R_SRC - [1:0] */ +#define WM8995_SPK2R_SRC_SHIFT 0 /* SPK2R_SRC - [1:0] */ +#define WM8995_SPK2R_SRC_WIDTH 2 /* SPK2R_SRC - [1:0] */ + +/* + * R2058 (0x80A) - PDM Speaker 2 Mute Sequence + */ +#define WM8995_SPK2_MUTE_SEQ1_MASK 0x00FF /* SPK2_MUTE_SEQ1 - [7:0] */ +#define WM8995_SPK2_MUTE_SEQ1_SHIFT 0 /* SPK2_MUTE_SEQ1 - [7:0] */ +#define WM8995_SPK2_MUTE_SEQ1_WIDTH 8 /* SPK2_MUTE_SEQ1 - [7:0] */ + +#define WM8995_CLASS_W_SWITCH(xname, reg, shift, max, invert) \ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ + .info = snd_soc_info_volsw, \ + .get = snd_soc_dapm_get_volsw, .put = wm8995_put_class_w, \ + .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) \ +} + +struct wm8995_reg_access { + u16 read; + u16 write; + u16 vol; +}; + +/* Sources for AIF1/2 SYSCLK - use with set_dai_sysclk() */ +enum clk_src { + WM8995_SYSCLK_MCLK1 = 1, + WM8995_SYSCLK_MCLK2, + WM8995_SYSCLK_FLL1, + WM8995_SYSCLK_FLL2, + WM8995_SYSCLK_OPCLK +}; + +#define WM8995_FLL1 1 +#define WM8995_FLL2 2 + +#define WM8995_FLL_SRC_MCLK1 1 +#define WM8995_FLL_SRC_MCLK2 2 +#define WM8995_FLL_SRC_LRCLK 3 +#define WM8995_FLL_SRC_BCLK 4 + +#endif /* _WM8995_H */ diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c index a486670966bd..43825b2102a5 100644 --- a/sound/soc/codecs/wm9081.c +++ b/sound/soc/codecs/wm9081.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -158,7 +157,6 @@ static struct { struct wm9081_priv { enum snd_soc_control_type control_type; void *control_data; - u16 reg_cache[WM9081_MAX_REGISTER + 1]; int sysclk_source; int mclk_rate; int sysclk_rate; @@ -591,6 +589,10 @@ static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id, reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT; snd_soc_write(codec, WM9081_FLL_CONTROL_5, reg5); + /* Set gain to the recommended value */ + snd_soc_update_bits(codec, WM9081_FLL_CONTROL_4, + WM9081_FLL_GAIN_MASK, 0); + /* Enable the FLL */ snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA); @@ -805,7 +807,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_STANDBY: /* Initial cold start */ - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Disable LINEOUT discharge */ reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL); reg &= ~WM9081_LINEOUT_DISCH; @@ -865,7 +867,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -1228,6 +1230,7 @@ static struct snd_soc_dai_driver wm9081_dai = { static int wm9081_probe(struct snd_soc_codec *codec) { struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; u16 reg; @@ -1269,9 +1272,9 @@ static int wm9081_probe(struct snd_soc_codec *codec) ARRAY_SIZE(wm9081_eq_controls)); } - snd_soc_dapm_new_controls(codec, wm9081_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm9081_dapm_widgets, ARRAY_SIZE(wm9081_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths)); + snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths)); return ret; } diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c index 6e5f64f627cb..a788c4297046 100644 --- a/sound/soc/codecs/wm9090.c +++ b/sound/soc/codecs/wm9090.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -442,31 +441,32 @@ static const struct snd_soc_dapm_route audio_map_in2_diff[] = { static int wm9090_add_controls(struct snd_soc_codec *codec) { struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_dapm_context *dapm = &codec->dapm; int i; - snd_soc_dapm_new_controls(codec, wm9090_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm9090_dapm_widgets, ARRAY_SIZE(wm9090_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); snd_soc_add_controls(codec, wm9090_controls, ARRAY_SIZE(wm9090_controls)); if (wm9090->pdata.lin1_diff) { - snd_soc_dapm_add_routes(codec, audio_map_in1_diff, + snd_soc_dapm_add_routes(dapm, audio_map_in1_diff, ARRAY_SIZE(audio_map_in1_diff)); } else { - snd_soc_dapm_add_routes(codec, audio_map_in1_se, + snd_soc_dapm_add_routes(dapm, audio_map_in1_se, ARRAY_SIZE(audio_map_in1_se)); snd_soc_add_controls(codec, wm9090_in1_se_controls, ARRAY_SIZE(wm9090_in1_se_controls)); } if (wm9090->pdata.lin2_diff) { - snd_soc_dapm_add_routes(codec, audio_map_in2_diff, + snd_soc_dapm_add_routes(dapm, audio_map_in2_diff, ARRAY_SIZE(audio_map_in2_diff)); } else { - snd_soc_dapm_add_routes(codec, audio_map_in2_se, + snd_soc_dapm_add_routes(dapm, audio_map_in2_se, ARRAY_SIZE(audio_map_in2_se)); snd_soc_add_controls(codec, wm9090_in2_se_controls, ARRAY_SIZE(wm9090_in2_se_controls)); @@ -513,7 +513,7 @@ static int wm9090_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) { + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Restore the register cache */ for (i = 1; i < codec->driver->reg_cache_size; i++) { if (reg_cache[i] == wm9090_reg_defaults[i]) @@ -543,7 +543,7 @@ static int wm9090_set_bias_level(struct snd_soc_codec *codec, break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c index a144acda751c..47b357adabdd 100644 --- a/sound/soc/codecs/wm9705.c +++ b/sound/soc/codecs/wm9705.c @@ -19,7 +19,6 @@ #include #include #include -#include #include "wm9705.h" @@ -203,9 +202,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm9705_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm9705_dapm_widgets, + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_new_controls(dapm, wm9705_dapm_widgets, ARRAY_SIZE(wm9705_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index d2f224d62744..bf5d4ef1a2a6 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "wm9712.h" #define WM9712_VERSION "0.4" @@ -432,10 +431,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm9712_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm9712_dapm_widgets, - ARRAY_SIZE(wm9712_dapm_widgets)); + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_new_controls(dapm, wm9712_dapm_widgets, + ARRAY_SIZE(wm9712_dapm_widgets)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -570,7 +570,7 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec, ac97_write(codec, AC97_POWERDOWN, 0xffff); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 7da13b07a53d..38ed98558718 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c @@ -26,7 +26,6 @@ #include #include #include -#include #include "wm9713.h" @@ -647,10 +646,12 @@ static const struct snd_soc_dapm_route audio_map[] = { static int wm9713_add_widgets(struct snd_soc_codec *codec) { - snd_soc_dapm_new_controls(codec, wm9713_dapm_widgets, + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_new_controls(dapm, wm9713_dapm_widgets, ARRAY_SIZE(wm9713_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } @@ -1147,7 +1148,7 @@ static int wm9713_set_bias_level(struct snd_soc_codec *codec, ac97_write(codec, AC97_POWERDOWN, 0xffff); break; } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 0e24092722c3..c466982eed23 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -94,41 +93,61 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec) struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec); u16 reg, reg_l, reg_r, dcs_cfg; - /* Set for 32 series updates */ - snd_soc_update_bits(codec, WM8993_DC_SERVO_1, - WM8993_DCS_SERIES_NO_01_MASK, - 32 << WM8993_DCS_SERIES_NO_01_SHIFT); - wait_for_dc_servo(codec, - WM8993_DCS_TRIG_SERIES_0 | WM8993_DCS_TRIG_SERIES_1); + /* If we're using a digital only path and have a previously + * callibrated DC servo offset stored then use that. */ + if (hubs->class_w && hubs->class_w_dcs) { + dev_dbg(codec->dev, "Using cached DC servo offset %x\n", + hubs->class_w_dcs); + snd_soc_write(codec, WM8993_DC_SERVO_3, hubs->class_w_dcs); + wait_for_dc_servo(codec, + WM8993_DCS_TRIG_DAC_WR_0 | + WM8993_DCS_TRIG_DAC_WR_1); + return; + } + + /* Devices not using a DCS code correction have startup mode */ + if (hubs->dcs_codes) { + /* Set for 32 series updates */ + snd_soc_update_bits(codec, WM8993_DC_SERVO_1, + WM8993_DCS_SERIES_NO_01_MASK, + 32 << WM8993_DCS_SERIES_NO_01_SHIFT); + wait_for_dc_servo(codec, + WM8993_DCS_TRIG_SERIES_0 | + WM8993_DCS_TRIG_SERIES_1); + } else { + wait_for_dc_servo(codec, + WM8993_DCS_TRIG_STARTUP_0 | + WM8993_DCS_TRIG_STARTUP_1); + } + + /* Different chips in the family support different readback + * methods. + */ + switch (hubs->dcs_readback_mode) { + case 0: + reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1) + & WM8993_DCS_INTEG_CHAN_0_MASK; + reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2) + & WM8993_DCS_INTEG_CHAN_1_MASK; + break; + case 1: + reg = snd_soc_read(codec, WM8993_DC_SERVO_3); + reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK) + >> WM8993_DCS_DAC_WR_VAL_1_SHIFT; + reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK; + break; + default: + WARN(1, "Unknown DCS readback method\n"); + break; + } + + dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r); /* Apply correction to DC servo result */ if (hubs->dcs_codes) { dev_dbg(codec->dev, "Applying %d code DC servo correction\n", hubs->dcs_codes); - /* Different chips in the family support different - * readback methods. - */ - switch (hubs->dcs_readback_mode) { - case 0: - reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1) - & WM8993_DCS_INTEG_CHAN_0_MASK;; - reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2) - & WM8993_DCS_INTEG_CHAN_1_MASK; - break; - case 1: - reg = snd_soc_read(codec, WM8993_DC_SERVO_3); - reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK) - >> WM8993_DCS_DAC_WR_VAL_1_SHIFT; - reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK; - break; - default: - WARN(1, "Unknown DCS readback method\n"); - break; - } - - dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r); - /* HPOUT1L */ if (reg_l + hubs->dcs_codes > 0 && reg_l + hubs->dcs_codes < 0xff) @@ -148,7 +167,15 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec) wait_for_dc_servo(codec, WM8993_DCS_TRIG_DAC_WR_0 | WM8993_DCS_TRIG_DAC_WR_1); + } else { + dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT; + dcs_cfg |= reg_r; } + + /* Save the callibrated offset if we're in class W mode and + * therefore don't have any analogue signal mixed in. */ + if (hubs->class_w) + hubs->class_w_dcs = dcs_cfg; } /* @@ -163,6 +190,9 @@ static int wm8993_put_dc_servo(struct snd_kcontrol *kcontrol, ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); + /* Updating the analogue gains invalidates the DC servo cache */ + hubs->class_w_dcs = 0; + /* If we're applying an offset correction then updating the * callibration would be likely to introduce further offsets. */ if (hubs->dcs_codes) @@ -791,6 +821,8 @@ static const struct snd_soc_dapm_route lineout2_se_routes[] = { int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + /* Latch volume update bits & default ZC on */ snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_1_2_VOLUME, WM8993_IN1_VU, WM8993_IN1_VU); @@ -819,7 +851,7 @@ int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec) snd_soc_add_controls(codec, analogue_snd_controls, ARRAY_SIZE(analogue_snd_controls)); - snd_soc_dapm_new_controls(codec, analogue_dapm_widgets, + snd_soc_dapm_new_controls(dapm, analogue_dapm_widgets, ARRAY_SIZE(analogue_dapm_widgets)); return 0; } @@ -828,24 +860,26 @@ EXPORT_SYMBOL_GPL(wm_hubs_add_analogue_controls); int wm_hubs_add_analogue_routes(struct snd_soc_codec *codec, int lineout1_diff, int lineout2_diff) { - snd_soc_dapm_add_routes(codec, analogue_routes, + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_add_routes(dapm, analogue_routes, ARRAY_SIZE(analogue_routes)); if (lineout1_diff) - snd_soc_dapm_add_routes(codec, + snd_soc_dapm_add_routes(dapm, lineout1_diff_routes, ARRAY_SIZE(lineout1_diff_routes)); else - snd_soc_dapm_add_routes(codec, + snd_soc_dapm_add_routes(dapm, lineout1_se_routes, ARRAY_SIZE(lineout1_se_routes)); if (lineout2_diff) - snd_soc_dapm_add_routes(codec, + snd_soc_dapm_add_routes(dapm, lineout2_diff_routes, ARRAY_SIZE(lineout2_diff_routes)); else - snd_soc_dapm_add_routes(codec, + snd_soc_dapm_add_routes(dapm, lineout2_se_routes, ARRAY_SIZE(lineout2_se_routes)); @@ -872,7 +906,7 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_codec *codec, * VMID as an output and can disable it. */ if (lineout1_diff && lineout2_diff) - codec->idle_bias_off = 1; + codec->dapm.idle_bias_off = 1; if (lineout1fb) snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL, diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h index e51c16683589..f8a5e976b5e6 100644 --- a/sound/soc/codecs/wm_hubs.h +++ b/sound/soc/codecs/wm_hubs.h @@ -23,6 +23,9 @@ struct wm_hubs_data { int dcs_codes; int dcs_readback_mode; int hp_startup_mode; + + bool class_w; + u16 class_w_dcs; }; extern int wm_hubs_add_analogue_controls(struct snd_soc_codec *); diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c index bc9e6b0b3f6f..0c2d6bacc681 100644 --- a/sound/soc/davinci/davinci-evm.c +++ b/sound/soc/davinci/davinci-evm.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -27,7 +26,6 @@ #include #include -#include "../codecs/tlv320aic3x.h" #include "davinci-pcm.h" #include "davinci-i2s.h" #include "davinci-mcasp.h" @@ -132,26 +130,27 @@ static const struct snd_soc_dapm_route audio_map[] = { static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; /* Add davinci-evm specific widgets */ - snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets, + snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets, ARRAY_SIZE(aic3x_dapm_widgets)); /* Set up davinci-evm specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* not connected */ - snd_soc_dapm_disable_pin(codec, "MONO_LOUT"); - snd_soc_dapm_disable_pin(codec, "HPLCOM"); - snd_soc_dapm_disable_pin(codec, "HPRCOM"); + snd_soc_dapm_disable_pin(dapm, "MONO_LOUT"); + snd_soc_dapm_disable_pin(dapm, "HPLCOM"); + snd_soc_dapm_disable_pin(dapm, "HPRCOM"); /* always connected */ - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Line Out"); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); - snd_soc_dapm_enable_pin(codec, "Line In"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Line Out"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Line In"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c index 6c6666a1f942..0fe558c65145 100644 --- a/sound/soc/davinci/davinci-sffsdr.c +++ b/sound/soc/davinci/davinci-sffsdr.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c index 4f4873359613..9ac93f6b4f85 100644 --- a/sound/soc/ep93xx/ep93xx-i2s.c +++ b/sound/soc/ep93xx/ep93xx-i2s.c @@ -352,13 +352,13 @@ static struct snd_soc_dai_driver ep93xx_i2s_dai = { .playback = { .channels_min = 2, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_48000, + .rates = SNDRV_PCM_RATE_8000_96000, .formats = EP93XX_I2S_FORMATS, }, .capture = { .channels_min = 2, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_48000, + .rates = SNDRV_PCM_RATE_8000_96000, .formats = EP93XX_I2S_FORMATS, }, .ops = &ep93xx_i2s_dai_ops, diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c index 2f121ddbe4bb..06670776f649 100644 --- a/sound/soc/ep93xx/ep93xx-pcm.c +++ b/sound/soc/ep93xx/ep93xx-pcm.c @@ -35,9 +35,9 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = { SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER), - .rates = SNDRV_PCM_RATE_8000_48000, + .rates = SNDRV_PCM_RATE_8000_96000, .rate_min = SNDRV_PCM_RATE_8000, - .rate_max = SNDRV_PCM_RATE_48000, + .rate_max = SNDRV_PCM_RATE_96000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | diff --git a/sound/soc/ep93xx/snappercl15.c b/sound/soc/ep93xx/snappercl15.c index 28ab5ff772ac..dfe1d7f74ea6 100644 --- a/sound/soc/ep93xx/snappercl15.c +++ b/sound/soc/ep93xx/snappercl15.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -79,11 +78,12 @@ static const struct snd_soc_dapm_route audio_map[] = { static int snappercl15_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets, + snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets, ARRAY_SIZE(tlv320aic23_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c index dd4fffdbd177..e20c9e1457c0 100644 --- a/sound/soc/imx/eukrea-tlv320.c +++ b/sound/soc/imx/eukrea-tlv320.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include "../codecs/tlv320aic23.h" diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index 390b6ffc2658..30894ea7f333 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c @@ -456,13 +456,13 @@ static int imx_ssi_dai_probe(struct snd_soc_dai *dai) static struct snd_soc_dai_driver imx_ssi_dai = { .probe = imx_ssi_dai_probe, .playback = { - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, diff --git a/sound/soc/imx/phycore-ac97.c b/sound/soc/imx/phycore-ac97.c index 9eabc28667e6..a7deb5cb2433 100644 --- a/sound/soc/imx/phycore-ac97.c +++ b/sound/soc/imx/phycore-ac97.c @@ -17,7 +17,6 @@ #include #include #include -#include #include static struct snd_soc_card imx_phycore; diff --git a/sound/soc/imx/wm1133-ev1.c b/sound/soc/imx/wm1133-ev1.c index 30fdb15065be..75b4c72787e2 100644 --- a/sound/soc/imx/wm1133-ev1.c +++ b/sound/soc/imx/wm1133-ev1.c @@ -19,7 +19,6 @@ #include #include #include -#include #include @@ -213,11 +212,12 @@ static struct snd_soc_jack_pin mic_jack_pins[] = { static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, wm1133_ev1_widgets, + snd_soc_dapm_new_controls(dapm, wm1133_ev1_widgets, ARRAY_SIZE(wm1133_ev1_widgets)); - snd_soc_dapm_add_routes(codec, wm1133_ev1_map, + snd_soc_dapm_add_routes(dapm, wm1133_ev1_map, ARRAY_SIZE(wm1133_ev1_map)); /* Headphone jack detection */ @@ -234,7 +234,7 @@ static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd) wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE, SND_JACK_BTN_0); - snd_soc_dapm_force_enable_pin(codec, "Mic Bias"); + snd_soc_dapm_force_enable_pin(dapm, "Mic Bias"); return 0; } diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index f3cffd183401..419bf4f5534a 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include "jz4740-i2s.h" diff --git a/sound/soc/jz4740/qi_lb60.c b/sound/soc/jz4740/qi_lb60.c index ef1a99e6a3bd..49723e3e7e38 100644 --- a/sound/soc/jz4740/qi_lb60.c +++ b/sound/soc/jz4740/qi_lb60.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #define QI_LB60_SND_GPIO JZ_GPIO_PORTB(29) @@ -59,10 +58,11 @@ static int qi_lb60_codec_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; - snd_soc_dapm_nc_pin(codec, "LIN"); - snd_soc_dapm_nc_pin(codec, "RIN"); + snd_soc_dapm_nc_pin(dapm, "LIN"); + snd_soc_dapm_nc_pin(dapm, "RIN"); ret = snd_soc_dai_set_fmt(cpu_dai, QI_LB60_DAIFMT); if (ret < 0) { @@ -70,9 +70,11 @@ static int qi_lb60_codec_init(struct snd_soc_pcm_runtime *rtd) return ret; } - snd_soc_dapm_new_controls(codec, qi_lb60_widgets, ARRAY_SIZE(qi_lb60_widgets)); - snd_soc_dapm_add_routes(codec, qi_lb60_routes, ARRAY_SIZE(qi_lb60_routes)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_new_controls(dapm, qi_lb60_widgets, + ARRAY_SIZE(qi_lb60_widgets)); + snd_soc_dapm_add_routes(dapm, qi_lb60_routes, + ARRAY_SIZE(qi_lb60_routes)); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig index 16ec2a2dba4d..8f49e165f4d1 100644 --- a/sound/soc/kirkwood/Kconfig +++ b/sound/soc/kirkwood/Kconfig @@ -11,10 +11,19 @@ config SND_KIRKWOOD_SOC_I2S config SND_KIRKWOOD_SOC_OPENRD tristate "SoC Audio support for Kirkwood Openrd Client" - depends on SND_KIRKWOOD_SOC && MACH_OPENRD_CLIENT + depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE) select SND_KIRKWOOD_SOC_I2S select SND_SOC_CS42L51 help Say Y if you want to add support for SoC audio on Openrd Client. +config SND_KIRKWOOD_SOC_T5325 + tristate "SoC Audio support for HP t5325" + depends on SND_KIRKWOOD_SOC && MACH_T5325 + select SND_KIRKWOOD_SOC_I2S + select SND_SOC_ALC5623 + help + Say Y if you want to add support for SoC audio on + the HP t5325 thin client. + diff --git a/sound/soc/kirkwood/Makefile b/sound/soc/kirkwood/Makefile index 33a16dcab5b5..3e62ae9e7bbe 100644 --- a/sound/soc/kirkwood/Makefile +++ b/sound/soc/kirkwood/Makefile @@ -5,5 +5,7 @@ obj-$(CONFIG_SND_KIRKWOOD_SOC) += snd-soc-kirkwood.o obj-$(CONFIG_SND_KIRKWOOD_SOC_I2S) += snd-soc-kirkwood-i2s.o snd-soc-openrd-objs := kirkwood-openrd.o +snd-soc-t5325-objs := kirkwood-t5325.o obj-$(CONFIG_SND_KIRKWOOD_SOC_OPENRD) += snd-soc-openrd.o +obj-$(CONFIG_SND_KIRKWOOD_SOC_T5325) += snd-soc-t5325.o diff --git a/sound/soc/kirkwood/kirkwood-openrd.c b/sound/soc/kirkwood/kirkwood-openrd.c index 9d7c81e921f1..d863afb3ee52 100644 --- a/sound/soc/kirkwood/kirkwood-openrd.c +++ b/sound/soc/kirkwood/kirkwood-openrd.c @@ -86,7 +86,7 @@ static int __init openrd_client_init(void) { int ret; - if (!machine_is_openrd_client()) + if (!machine_is_openrd_client() && !machine_is_openrd_ultimate()) return 0; openrd_client_snd_device = platform_device_alloc("soc-audio", -1); diff --git a/sound/soc/kirkwood/kirkwood-t5325.c b/sound/soc/kirkwood/kirkwood-t5325.c new file mode 100644 index 000000000000..c8d21956ab52 --- /dev/null +++ b/sound/soc/kirkwood/kirkwood-t5325.c @@ -0,0 +1,141 @@ +/* + * kirkwood-t5325.c + * + * (c) 2010 Arnaud Patard + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../codecs/alc5623.h" + +static int t5325_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *codec_dai = rtd->codec_dai; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + int ret; + unsigned int freq, fmt; + + fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS; + ret = snd_soc_dai_set_fmt(cpu_dai, fmt); + if (ret < 0) + return ret; + + ret = snd_soc_dai_set_fmt(codec_dai, fmt); + if (ret < 0) + return ret; + + freq = params_rate(params) * 256; + + return snd_soc_dai_set_sysclk(codec_dai, 0, freq, SND_SOC_CLOCK_IN); + +} + +static struct snd_soc_ops t5325_ops = { + .hw_params = t5325_hw_params, +}; + +static const struct snd_soc_dapm_widget t5325_dapm_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_SPK("Speaker", NULL), + SND_SOC_DAPM_MIC("Mic Jack", NULL), +}; + +static const struct snd_soc_dapm_route t5325_route[] = { + { "Headphone Jack", NULL, "HPL" }, + { "Headphone Jack", NULL, "HPR" }, + + {"Speaker", NULL, "SPKOUT"}, + {"Speaker", NULL, "SPKOUTN"}, + + { "MIC1", NULL, "Mic Jack" }, + { "MIC2", NULL, "Mic Jack" }, +}; + +static int t5325_dai_init(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_new_controls(dapm, t5325_dapm_widgets, + ARRAY_SIZE(t5325_dapm_widgets)); + + snd_soc_dapm_add_routes(dapm, t5325_route, ARRAY_SIZE(t5325_route)); + + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Speaker"); + + snd_soc_dapm_sync(dapm); + + return 0; +} + +static struct snd_soc_dai_link t5325_dai[] = { +{ + .name = "ALC5621", + .stream_name = "ALC5621 HiFi", + .cpu_dai_name = "kirkwood-i2s", + .platform_name = "kirkwood-pcm-audio", + .codec_dai_name = "alc5621-hifi", + .codec_name = "alc562x-codec.0-001a", + .ops = &t5325_ops, + .init = t5325_dai_init, +}, +}; + + +static struct snd_soc_card t5325 = { + .name = "t5325", + .dai_link = t5325_dai, + .num_links = ARRAY_SIZE(t5325_dai), +}; + +static struct platform_device *t5325_snd_device; + +static int __init t5325_init(void) +{ + int ret; + + if (!machine_is_t5325()) + return 0; + + t5325_snd_device = platform_device_alloc("soc-audio", -1); + if (!t5325_snd_device) + return -ENOMEM; + + platform_set_drvdata(t5325_snd_device, + &t5325); + + ret = platform_device_add(t5325_snd_device); + if (ret) { + printk(KERN_ERR "%s: platform_device_add failed\n", __func__); + platform_device_put(t5325_snd_device); + } + + return ret; +} +module_init(t5325_init); + +static void __exit t5325_exit(void) +{ + platform_device_unregister(t5325_snd_device); +} +module_exit(t5325_exit); + +MODULE_AUTHOR("Arnaud Patard "); +MODULE_DESCRIPTION("ALSA SoC t5325 audio client"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/nuc900/nuc900-audio.c b/sound/soc/nuc900/nuc900-audio.c index 161f5b667d7b..38a2d0d883b5 100644 --- a/sound/soc/nuc900/nuc900-audio.c +++ b/sound/soc/nuc900/nuc900-audio.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "nuc900-audio.h" diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c index 979dd508305f..161750443ebc 100644 --- a/sound/soc/omap/am3517evm.c +++ b/sound/soc/omap/am3517evm.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -114,20 +113,21 @@ static const struct snd_soc_dapm_route audio_map[] = { static int am3517evm_aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; /* Add am3517-evm specific widgets */ - snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets, + snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets, ARRAY_SIZE(tlv320aic23_dapm_widgets)); /* Set up davinci-evm specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* always connected */ - snd_soc_dapm_enable_pin(codec, "Line Out"); - snd_soc_dapm_enable_pin(codec, "Line In"); - snd_soc_dapm_enable_pin(codec, "Mic In"); + snd_soc_dapm_enable_pin(dapm, "Line Out"); + snd_soc_dapm_enable_pin(dapm, "Line In"); + snd_soc_dapm_enable_pin(dapm, "Mic In"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c index 438146addbb8..2101bdcee21f 100644 --- a/sound/soc/omap/ams-delta.c +++ b/sound/soc/omap/ams-delta.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include @@ -94,6 +94,7 @@ static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct snd_soc_dapm_context *dapm = &codec->dapm; struct soc_enum *control = (struct soc_enum *)kcontrol->private_value; unsigned short pins; int pin, changed = 0; @@ -112,48 +113,48 @@ static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol, /* Setup pins after corresponding bits if changed */ pin = !!(pins & (1 << AMS_DELTA_MOUTHPIECE)); - if (pin != snd_soc_dapm_get_pin_status(codec, "Mouthpiece")) { + if (pin != snd_soc_dapm_get_pin_status(dapm, "Mouthpiece")) { changed = 1; if (pin) - snd_soc_dapm_enable_pin(codec, "Mouthpiece"); + snd_soc_dapm_enable_pin(dapm, "Mouthpiece"); else - snd_soc_dapm_disable_pin(codec, "Mouthpiece"); + snd_soc_dapm_disable_pin(dapm, "Mouthpiece"); } pin = !!(pins & (1 << AMS_DELTA_EARPIECE)); - if (pin != snd_soc_dapm_get_pin_status(codec, "Earpiece")) { + if (pin != snd_soc_dapm_get_pin_status(dapm, "Earpiece")) { changed = 1; if (pin) - snd_soc_dapm_enable_pin(codec, "Earpiece"); + snd_soc_dapm_enable_pin(dapm, "Earpiece"); else - snd_soc_dapm_disable_pin(codec, "Earpiece"); + snd_soc_dapm_disable_pin(dapm, "Earpiece"); } pin = !!(pins & (1 << AMS_DELTA_MICROPHONE)); - if (pin != snd_soc_dapm_get_pin_status(codec, "Microphone")) { + if (pin != snd_soc_dapm_get_pin_status(dapm, "Microphone")) { changed = 1; if (pin) - snd_soc_dapm_enable_pin(codec, "Microphone"); + snd_soc_dapm_enable_pin(dapm, "Microphone"); else - snd_soc_dapm_disable_pin(codec, "Microphone"); + snd_soc_dapm_disable_pin(dapm, "Microphone"); } pin = !!(pins & (1 << AMS_DELTA_SPEAKER)); - if (pin != snd_soc_dapm_get_pin_status(codec, "Speaker")) { + if (pin != snd_soc_dapm_get_pin_status(dapm, "Speaker")) { changed = 1; if (pin) - snd_soc_dapm_enable_pin(codec, "Speaker"); + snd_soc_dapm_enable_pin(dapm, "Speaker"); else - snd_soc_dapm_disable_pin(codec, "Speaker"); + snd_soc_dapm_disable_pin(dapm, "Speaker"); } pin = !!(pins & (1 << AMS_DELTA_AGC)); if (pin != ams_delta_audio_agc) { ams_delta_audio_agc = pin; changed = 1; if (pin) - snd_soc_dapm_enable_pin(codec, "AGCIN"); + snd_soc_dapm_enable_pin(dapm, "AGCIN"); else - snd_soc_dapm_disable_pin(codec, "AGCIN"); + snd_soc_dapm_disable_pin(dapm, "AGCIN"); } if (changed) - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); mutex_unlock(&codec->mutex); @@ -164,19 +165,20 @@ static int ams_delta_get_audio_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct snd_soc_dapm_context *dapm = &codec->dapm; unsigned short pins, mode; - pins = ((snd_soc_dapm_get_pin_status(codec, "Mouthpiece") << + pins = ((snd_soc_dapm_get_pin_status(dapm, "Mouthpiece") << AMS_DELTA_MOUTHPIECE) | - (snd_soc_dapm_get_pin_status(codec, "Earpiece") << + (snd_soc_dapm_get_pin_status(dapm, "Earpiece") << AMS_DELTA_EARPIECE)); if (pins) - pins |= (snd_soc_dapm_get_pin_status(codec, "Microphone") << + pins |= (snd_soc_dapm_get_pin_status(dapm, "Microphone") << AMS_DELTA_MICROPHONE); else - pins = ((snd_soc_dapm_get_pin_status(codec, "Microphone") << + pins = ((snd_soc_dapm_get_pin_status(dapm, "Microphone") << AMS_DELTA_MICROPHONE) | - (snd_soc_dapm_get_pin_status(codec, "Speaker") << + (snd_soc_dapm_get_pin_status(dapm, "Speaker") << AMS_DELTA_SPEAKER) | (ams_delta_audio_agc << AMS_DELTA_AGC)); @@ -300,6 +302,7 @@ static int cx81801_open(struct tty_struct *tty) static void cx81801_close(struct tty_struct *tty) { struct snd_soc_codec *codec = tty->disc_data; + struct snd_soc_dapm_context *dapm = &codec->dapm; del_timer_sync(&cx81801_timer); @@ -312,12 +315,12 @@ static void cx81801_close(struct tty_struct *tty) v253_ops.close(tty); /* Revert back to default audio input/output constellation */ - snd_soc_dapm_disable_pin(codec, "Mouthpiece"); - snd_soc_dapm_enable_pin(codec, "Earpiece"); - snd_soc_dapm_enable_pin(codec, "Microphone"); - snd_soc_dapm_disable_pin(codec, "Speaker"); - snd_soc_dapm_disable_pin(codec, "AGCIN"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_disable_pin(dapm, "Mouthpiece"); + snd_soc_dapm_enable_pin(dapm, "Earpiece"); + snd_soc_dapm_enable_pin(dapm, "Microphone"); + snd_soc_dapm_disable_pin(dapm, "Speaker"); + snd_soc_dapm_disable_pin(dapm, "AGCIN"); + snd_soc_dapm_sync(dapm); } /* Line discipline .hangup() */ @@ -432,16 +435,16 @@ static int ams_delta_set_bias_level(struct snd_soc_card *card, case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: case SND_SOC_BIAS_STANDBY: - if (codec->bias_level == SND_SOC_BIAS_OFF) + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET, AMS_DELTA_LATCH2_MODEM_NRESET); break; case SND_SOC_BIAS_OFF: - if (codec->bias_level != SND_SOC_BIAS_OFF) + if (codec->dapm.bias_level != SND_SOC_BIAS_OFF) ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET, 0); } - codec->bias_level = level; + codec->dapm.bias_level = level; return 0; } @@ -492,6 +495,7 @@ static void ams_delta_shutdown(struct snd_pcm_substream *substream) static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_card *card = rtd->card; int ret; @@ -541,7 +545,7 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd) } /* Add board specific DAPM widgets and routes */ - ret = snd_soc_dapm_new_controls(codec, ams_delta_dapm_widgets, + ret = snd_soc_dapm_new_controls(dapm, ams_delta_dapm_widgets, ARRAY_SIZE(ams_delta_dapm_widgets)); if (ret) { dev_warn(card->dev, @@ -550,7 +554,7 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd) return 0; } - ret = snd_soc_dapm_add_routes(codec, ams_delta_audio_map, + ret = snd_soc_dapm_add_routes(dapm, ams_delta_audio_map, ARRAY_SIZE(ams_delta_audio_map)); if (ret) { dev_warn(card->dev, @@ -560,13 +564,13 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd) } /* Set up initial pin constellation */ - snd_soc_dapm_disable_pin(codec, "Mouthpiece"); - snd_soc_dapm_enable_pin(codec, "Earpiece"); - snd_soc_dapm_enable_pin(codec, "Microphone"); - snd_soc_dapm_disable_pin(codec, "Speaker"); - snd_soc_dapm_disable_pin(codec, "AGCIN"); - snd_soc_dapm_disable_pin(codec, "AGCOUT"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_disable_pin(dapm, "Mouthpiece"); + snd_soc_dapm_enable_pin(dapm, "Earpiece"); + snd_soc_dapm_enable_pin(dapm, "Microphone"); + snd_soc_dapm_disable_pin(dapm, "Speaker"); + snd_soc_dapm_disable_pin(dapm, "AGCIN"); + snd_soc_dapm_disable_pin(dapm, "AGCOUT"); + snd_soc_dapm_sync(dapm); /* Add virtual switch */ ret = snd_soc_add_controls(codec, ams_delta_audio_controls, diff --git a/sound/soc/omap/igep0020.c b/sound/soc/omap/igep0020.c index fd3a40f309c8..0ae34702995b 100644 --- a/sound/soc/omap/igep0020.c +++ b/sound/soc/omap/igep0020.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c index a3b6d897ad84..83d213bfd3d1 100644 --- a/sound/soc/omap/n810.c +++ b/sound/soc/omap/n810.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -36,7 +35,6 @@ #include "omap-mcbsp.h" #include "omap-pcm.h" -#include "../codecs/tlv320aic3x.h" #define N810_HEADSET_AMP_GPIO 10 #define N810_SPEAKER_AMP_GPIO 101 @@ -58,6 +56,7 @@ static int n810_dmic_func; static void n810_ext_control(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; int hp = 0, line1l = 0; switch (n810_jack_func) { @@ -72,25 +71,25 @@ static void n810_ext_control(struct snd_soc_codec *codec) } if (n810_spk_func) - snd_soc_dapm_enable_pin(codec, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else - snd_soc_dapm_disable_pin(codec, "Ext Spk"); + snd_soc_dapm_disable_pin(dapm, "Ext Spk"); if (hp) - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); else - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); if (line1l) - snd_soc_dapm_enable_pin(codec, "LINE1L"); + snd_soc_dapm_enable_pin(dapm, "LINE1L"); else - snd_soc_dapm_disable_pin(codec, "LINE1L"); + snd_soc_dapm_disable_pin(dapm, "LINE1L"); if (n810_dmic_func) - snd_soc_dapm_enable_pin(codec, "DMic"); + snd_soc_dapm_enable_pin(dapm, "DMic"); else - snd_soc_dapm_disable_pin(codec, "DMic"); + snd_soc_dapm_disable_pin(dapm, "DMic"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } static int n810_startup(struct snd_pcm_substream *substream) @@ -274,17 +273,18 @@ static const struct snd_kcontrol_new aic33_n810_controls[] = { static int n810_aic33_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* Not connected */ - snd_soc_dapm_nc_pin(codec, "MONO_LOUT"); - snd_soc_dapm_nc_pin(codec, "HPLCOM"); - snd_soc_dapm_nc_pin(codec, "HPRCOM"); - snd_soc_dapm_nc_pin(codec, "MIC3L"); - snd_soc_dapm_nc_pin(codec, "MIC3R"); - snd_soc_dapm_nc_pin(codec, "LINE1R"); - snd_soc_dapm_nc_pin(codec, "LINE2L"); - snd_soc_dapm_nc_pin(codec, "LINE2R"); + snd_soc_dapm_nc_pin(dapm, "MONO_LOUT"); + snd_soc_dapm_nc_pin(dapm, "HPLCOM"); + snd_soc_dapm_nc_pin(dapm, "HPRCOM"); + snd_soc_dapm_nc_pin(dapm, "MIC3L"); + snd_soc_dapm_nc_pin(dapm, "MIC3R"); + snd_soc_dapm_nc_pin(dapm, "LINE1R"); + snd_soc_dapm_nc_pin(dapm, "LINE2L"); + snd_soc_dapm_nc_pin(dapm, "LINE2R"); /* Add N810 specific controls */ err = snd_soc_add_controls(codec, aic33_n810_controls, @@ -293,13 +293,13 @@ static int n810_aic33_init(struct snd_soc_pcm_runtime *rtd) return err; /* Add N810 specific widgets */ - snd_soc_dapm_new_controls(codec, aic33_dapm_widgets, + snd_soc_dapm_new_controls(dapm, aic33_dapm_widgets, ARRAY_SIZE(aic33_dapm_widgets)); /* Set up N810 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index 7e84f24b9a88..d203f4da18a0 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -102,6 +102,17 @@ static const int omap24xx_dma_reqs[][2] = { static const int omap24xx_dma_reqs[][2] = {}; #endif +#if defined(CONFIG_ARCH_OMAP4) +static const int omap44xx_dma_reqs[][2] = { + { OMAP44XX_DMA_MCBSP1_TX, OMAP44XX_DMA_MCBSP1_RX }, + { OMAP44XX_DMA_MCBSP2_TX, OMAP44XX_DMA_MCBSP2_RX }, + { OMAP44XX_DMA_MCBSP3_TX, OMAP44XX_DMA_MCBSP3_RX }, + { OMAP44XX_DMA_MCBSP4_TX, OMAP44XX_DMA_MCBSP4_RX }, +}; +#else +static const int omap44xx_dma_reqs[][2] = {}; +#endif + #if defined(CONFIG_ARCH_OMAP2420) static const unsigned long omap2420_mcbsp_port[][2] = { { OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1, @@ -147,6 +158,21 @@ static const unsigned long omap34xx_mcbsp_port[][2] = { static const unsigned long omap34xx_mcbsp_port[][2] = {}; #endif +#if defined(CONFIG_ARCH_OMAP4) +static const unsigned long omap44xx_mcbsp_port[][2] = { + { OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR, + OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR }, + { OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR, + OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR }, + { OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DXR, + OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DRR }, + { OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DXR, + OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DRR }, +}; +#else +static const unsigned long omap44xx_mcbsp_port[][2] = {}; +#endif + static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; @@ -224,7 +250,7 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream, * 2 channels (stereo): size is 128 / 2 = 64 frames (2 * 64 words) * 4 channels: size is 128 / 4 = 32 frames (4 * 32 words) */ - if (cpu_is_omap343x()) { + if (cpu_is_omap343x() || cpu_is_omap44xx()) { /* * Rule for the buffer size. We should not allow * smaller buffer than the FIFO size to avoid underruns @@ -332,6 +358,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream, } else if (cpu_is_omap343x()) { dma = omap24xx_dma_reqs[bus_id][substream->stream]; port = omap34xx_mcbsp_port[bus_id][substream->stream]; + } else if (cpu_is_omap44xx()) { + dma = omap44xx_dma_reqs[bus_id][substream->stream]; + port = omap44xx_mcbsp_port[bus_id][substream->stream]; } else { return -ENODEV; } @@ -498,11 +527,11 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai, regs->spcr2 |= XINTM(3) | FREE; regs->spcr1 |= RINTM(3); /* RFIG and XFIG are not defined in 34xx */ - if (!cpu_is_omap34xx()) { + if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) { regs->rcr2 |= RFIG; regs->xcr2 |= XFIG; } - if (cpu_is_omap2430() || cpu_is_omap34xx()) { + if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) { regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE; regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE; } diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h index ffdcc5abb7b9..110c106611d3 100644 --- a/sound/soc/omap/omap-mcbsp.h +++ b/sound/soc/omap/omap-mcbsp.h @@ -50,6 +50,10 @@ enum omap_mcbsp_div { #undef NUM_LINKS #define NUM_LINKS 3 #endif +#if defined(CONFIG_ARCH_OMAP4) +#undef NUM_LINKS +#define NUM_LINKS 4 +#endif #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) #undef NUM_LINKS #define NUM_LINKS 5 diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c index cf3fc8a675b5..29b60d6796e7 100644 --- a/sound/soc/omap/omap2evm.c +++ b/sound/soc/omap/omap2evm.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/omap/omap3beagle.c b/sound/soc/omap/omap3beagle.c index e56832b0c444..40db813c0795 100644 --- a/sound/soc/omap/omap3beagle.c +++ b/sound/soc/omap/omap3beagle.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/omap/omap3evm.c b/sound/soc/omap/omap3evm.c index 810f1e36da21..0daa04469836 100644 --- a/sound/soc/omap/omap3evm.c +++ b/sound/soc/omap/omap3evm.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c index 4ee33ce2cb98..8047c521e318 100644 --- a/sound/soc/omap/omap3pandora.c +++ b/sound/soc/omap/omap3pandora.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -170,51 +169,53 @@ static const struct snd_soc_dapm_route omap3pandora_in_map[] = { static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* All TWL4030 output pins are floating */ - snd_soc_dapm_nc_pin(codec, "EARPIECE"); - snd_soc_dapm_nc_pin(codec, "PREDRIVEL"); - snd_soc_dapm_nc_pin(codec, "PREDRIVER"); - snd_soc_dapm_nc_pin(codec, "HSOL"); - snd_soc_dapm_nc_pin(codec, "HSOR"); - snd_soc_dapm_nc_pin(codec, "CARKITL"); - snd_soc_dapm_nc_pin(codec, "CARKITR"); - snd_soc_dapm_nc_pin(codec, "HFL"); - snd_soc_dapm_nc_pin(codec, "HFR"); - snd_soc_dapm_nc_pin(codec, "VIBRA"); - - ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets, + snd_soc_dapm_nc_pin(dapm, "EARPIECE"); + snd_soc_dapm_nc_pin(dapm, "PREDRIVEL"); + snd_soc_dapm_nc_pin(dapm, "PREDRIVER"); + snd_soc_dapm_nc_pin(dapm, "HSOL"); + snd_soc_dapm_nc_pin(dapm, "HSOR"); + snd_soc_dapm_nc_pin(dapm, "CARKITL"); + snd_soc_dapm_nc_pin(dapm, "CARKITR"); + snd_soc_dapm_nc_pin(dapm, "HFL"); + snd_soc_dapm_nc_pin(dapm, "HFR"); + snd_soc_dapm_nc_pin(dapm, "VIBRA"); + + ret = snd_soc_dapm_new_controls(dapm, omap3pandora_out_dapm_widgets, ARRAY_SIZE(omap3pandora_out_dapm_widgets)); if (ret < 0) return ret; - snd_soc_dapm_add_routes(codec, omap3pandora_out_map, + snd_soc_dapm_add_routes(dapm, omap3pandora_out_map, ARRAY_SIZE(omap3pandora_out_map)); - return snd_soc_dapm_sync(codec); + return snd_soc_dapm_sync(dapm); } static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* Not comnnected */ - snd_soc_dapm_nc_pin(codec, "HSMIC"); - snd_soc_dapm_nc_pin(codec, "CARKITMIC"); - snd_soc_dapm_nc_pin(codec, "DIGIMIC0"); - snd_soc_dapm_nc_pin(codec, "DIGIMIC1"); + snd_soc_dapm_nc_pin(dapm, "HSMIC"); + snd_soc_dapm_nc_pin(dapm, "CARKITMIC"); + snd_soc_dapm_nc_pin(dapm, "DIGIMIC0"); + snd_soc_dapm_nc_pin(dapm, "DIGIMIC1"); - ret = snd_soc_dapm_new_controls(codec, omap3pandora_in_dapm_widgets, + ret = snd_soc_dapm_new_controls(dapm, omap3pandora_in_dapm_widgets, ARRAY_SIZE(omap3pandora_in_dapm_widgets)); if (ret < 0) return ret; - snd_soc_dapm_add_routes(codec, omap3pandora_in_map, + snd_soc_dapm_add_routes(dapm, omap3pandora_in_map, ARRAY_SIZE(omap3pandora_in_map)); - return snd_soc_dapm_sync(codec); + return snd_soc_dapm_sync(dapm); } static struct snd_soc_ops omap3pandora_ops = { diff --git a/sound/soc/omap/osk5912.c b/sound/soc/omap/osk5912.c index 65ae00e976ef..7e75e775fb4a 100644 --- a/sound/soc/omap/osk5912.c +++ b/sound/soc/omap/osk5912.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -116,19 +115,20 @@ static const struct snd_soc_dapm_route audio_map[] = { static int osk_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; /* Add osk5912 specific widgets */ - snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets, + snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets, ARRAY_SIZE(tlv320aic23_dapm_widgets)); /* Set up osk5912 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Line In"); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Line In"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/omap/overo.c b/sound/soc/omap/overo.c index e95a607937de..bbcf380bfb56 100644 --- a/sound/soc/omap/overo.c +++ b/sound/soc/omap/overo.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c index 04b5723bf89b..09fb0df8d416 100644 --- a/sound/soc/omap/rx51.c +++ b/sound/soc/omap/rx51.c @@ -30,14 +30,12 @@ #include #include #include -#include #include #include #include "omap-mcbsp.h" #include "omap-pcm.h" -#include "../codecs/tlv320aic3x.h" #define RX51_TVOUT_SEL_GPIO 40 #define RX51_JACK_DETECT_GPIO 177 @@ -58,19 +56,21 @@ static int rx51_jack_func; static void rx51_ext_control(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + if (rx51_spk_func) - snd_soc_dapm_enable_pin(codec, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else - snd_soc_dapm_disable_pin(codec, "Ext Spk"); + snd_soc_dapm_disable_pin(dapm, "Ext Spk"); if (rx51_dmic_func) - snd_soc_dapm_enable_pin(codec, "DMic"); + snd_soc_dapm_enable_pin(dapm, "DMic"); else - snd_soc_dapm_disable_pin(codec, "DMic"); + snd_soc_dapm_disable_pin(dapm, "DMic"); gpio_set_value(RX51_TVOUT_SEL_GPIO, rx51_jack_func == RX51_JACK_TVOUT); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } static int rx51_startup(struct snd_pcm_substream *substream) @@ -244,12 +244,13 @@ static const struct snd_kcontrol_new aic34_rx51_controls[] = { static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* Set up NC codec pins */ - snd_soc_dapm_nc_pin(codec, "MIC3L"); - snd_soc_dapm_nc_pin(codec, "MIC3R"); - snd_soc_dapm_nc_pin(codec, "LINE1R"); + snd_soc_dapm_nc_pin(dapm, "MIC3L"); + snd_soc_dapm_nc_pin(dapm, "MIC3R"); + snd_soc_dapm_nc_pin(dapm, "LINE1R"); /* Add RX-51 specific controls */ err = snd_soc_add_controls(codec, aic34_rx51_controls, @@ -258,13 +259,13 @@ static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd) return err; /* Add RX-51 specific widgets */ - snd_soc_dapm_new_controls(codec, aic34_dapm_widgets, + snd_soc_dapm_new_controls(dapm, aic34_dapm_widgets, ARRAY_SIZE(aic34_dapm_widgets)); /* Set up RX-51 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); /* AV jack detection */ err = snd_soc_jack_new(codec, "AV Jack", diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c index 07fbcf7d2411..3f72d17d1ef0 100644 --- a/sound/soc/omap/sdp3430.c +++ b/sound/soc/omap/sdp3430.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -191,39 +190,40 @@ static const struct snd_soc_dapm_route audio_map[] = { static int sdp3430_twl4030_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* Add SDP3430 specific widgets */ - ret = snd_soc_dapm_new_controls(codec, sdp3430_twl4030_dapm_widgets, + ret = snd_soc_dapm_new_controls(dapm, sdp3430_twl4030_dapm_widgets, ARRAY_SIZE(sdp3430_twl4030_dapm_widgets)); if (ret) return ret; /* Set up SDP3430 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* SDP3430 connected pins */ - snd_soc_dapm_enable_pin(codec, "Ext Mic"); - snd_soc_dapm_enable_pin(codec, "Ext Spk"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Headset Stereophone"); + snd_soc_dapm_enable_pin(dapm, "Ext Mic"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Headset Stereophone"); /* TWL4030 not connected pins */ - snd_soc_dapm_nc_pin(codec, "AUXL"); - snd_soc_dapm_nc_pin(codec, "AUXR"); - snd_soc_dapm_nc_pin(codec, "CARKITMIC"); - snd_soc_dapm_nc_pin(codec, "DIGIMIC0"); - snd_soc_dapm_nc_pin(codec, "DIGIMIC1"); - - snd_soc_dapm_nc_pin(codec, "OUTL"); - snd_soc_dapm_nc_pin(codec, "OUTR"); - snd_soc_dapm_nc_pin(codec, "EARPIECE"); - snd_soc_dapm_nc_pin(codec, "PREDRIVEL"); - snd_soc_dapm_nc_pin(codec, "PREDRIVER"); - snd_soc_dapm_nc_pin(codec, "CARKITL"); - snd_soc_dapm_nc_pin(codec, "CARKITR"); - - ret = snd_soc_dapm_sync(codec); + snd_soc_dapm_nc_pin(dapm, "AUXL"); + snd_soc_dapm_nc_pin(dapm, "AUXR"); + snd_soc_dapm_nc_pin(dapm, "CARKITMIC"); + snd_soc_dapm_nc_pin(dapm, "DIGIMIC0"); + snd_soc_dapm_nc_pin(dapm, "DIGIMIC1"); + + snd_soc_dapm_nc_pin(dapm, "OUTL"); + snd_soc_dapm_nc_pin(dapm, "OUTR"); + snd_soc_dapm_nc_pin(dapm, "EARPIECE"); + snd_soc_dapm_nc_pin(dapm, "PREDRIVEL"); + snd_soc_dapm_nc_pin(dapm, "PREDRIVER"); + snd_soc_dapm_nc_pin(dapm, "CARKITL"); + snd_soc_dapm_nc_pin(dapm, "CARKITR"); + + ret = snd_soc_dapm_sync(dapm); if (ret) return ret; diff --git a/sound/soc/omap/sdp4430.c b/sound/soc/omap/sdp4430.c index 4b4463db6ba0..189e03900637 100644 --- a/sound/soc/omap/sdp4430.c +++ b/sound/soc/omap/sdp4430.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -66,6 +66,21 @@ static struct snd_soc_ops sdp4430_ops = { .hw_params = sdp4430_hw_params, }; +/* Headset jack */ +static struct snd_soc_jack hs_jack; + +/*Headset jack detection DAPM pins */ +static struct snd_soc_jack_pin hs_jack_pins[] = { + { + .pin = "Headset Mic", + .mask = SND_JACK_MICROPHONE, + }, + { + .pin = "Headset Stereophone", + .mask = SND_JACK_HEADPHONE, + }, +}; + static int sdp4430_get_power_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -102,6 +117,7 @@ static const struct snd_soc_dapm_widget sdp4430_twl6040_dapm_widgets[] = { SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_HP("Headset Stereophone", NULL), SND_SOC_DAPM_SPK("Earphone Spk", NULL), + SND_SOC_DAPM_INPUT("Aux/FM Stereo In"), }; static const struct snd_soc_dapm_route audio_map[] = { @@ -124,11 +140,16 @@ static const struct snd_soc_dapm_route audio_map[] = { /* Earphone speaker */ {"Earphone Spk", NULL, "EP"}, + + /* Aux/FM Stereo In: AFML, AFMR */ + {"AFML", NULL, "Aux/FM Stereo In"}, + {"AFMR", NULL, "Aux/FM Stereo In"}, }; static int sdp4430_twl6040_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* Add SDP4430 specific controls */ @@ -138,25 +159,39 @@ static int sdp4430_twl6040_init(struct snd_soc_pcm_runtime *rtd) return ret; /* Add SDP4430 specific widgets */ - ret = snd_soc_dapm_new_controls(codec, sdp4430_twl6040_dapm_widgets, + ret = snd_soc_dapm_new_controls(dapm, sdp4430_twl6040_dapm_widgets, ARRAY_SIZE(sdp4430_twl6040_dapm_widgets)); if (ret) return ret; /* Set up SDP4430 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* SDP4430 connected pins */ - snd_soc_dapm_enable_pin(codec, "Ext Mic"); - snd_soc_dapm_enable_pin(codec, "Ext Spk"); - snd_soc_dapm_enable_pin(codec, "Headset Mic"); - snd_soc_dapm_enable_pin(codec, "Headset Stereophone"); + snd_soc_dapm_enable_pin(dapm, "Ext Mic"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "AFML"); + snd_soc_dapm_enable_pin(dapm, "AFMR"); + snd_soc_dapm_enable_pin(dapm, "Headset Mic"); + snd_soc_dapm_enable_pin(dapm, "Headset Stereophone"); + + ret = snd_soc_dapm_sync(dapm); + if (ret) + return ret; + + /* Headset jack detection */ + ret = snd_soc_jack_new(codec, "Headset Jack", + SND_JACK_HEADSET, &hs_jack); + if (ret) + return ret; - /* TWL6040 not connected pins */ - snd_soc_dapm_nc_pin(codec, "AFML"); - snd_soc_dapm_nc_pin(codec, "AFMR"); + ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins), + hs_jack_pins); - ret = snd_soc_dapm_sync(codec); + if (machine_is_omap_4430sdp()) + twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET); + else + snd_soc_jack_report(&hs_jack, SND_JACK_HEADSET, SND_JACK_HEADSET); return ret; } diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c index 718031eeac34..01709940a43c 100644 --- a/sound/soc/omap/zoom2.c +++ b/sound/soc/omap/zoom2.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -162,35 +161,36 @@ static const struct snd_soc_dapm_route audio_map[] = { static int zoom2_twl4030_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* Add Zoom2 specific widgets */ - ret = snd_soc_dapm_new_controls(codec, zoom2_twl4030_dapm_widgets, + ret = snd_soc_dapm_new_controls(dapm, zoom2_twl4030_dapm_widgets, ARRAY_SIZE(zoom2_twl4030_dapm_widgets)); if (ret) return ret; /* Set up Zoom2 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* Zoom2 connected pins */ - snd_soc_dapm_enable_pin(codec, "Ext Mic"); - snd_soc_dapm_enable_pin(codec, "Ext Spk"); - snd_soc_dapm_enable_pin(codec, "Headset Mic"); - snd_soc_dapm_enable_pin(codec, "Headset Stereophone"); - snd_soc_dapm_enable_pin(codec, "Aux In"); + snd_soc_dapm_enable_pin(dapm, "Ext Mic"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "Headset Mic"); + snd_soc_dapm_enable_pin(dapm, "Headset Stereophone"); + snd_soc_dapm_enable_pin(dapm, "Aux In"); /* TWL4030 not connected pins */ - snd_soc_dapm_nc_pin(codec, "CARKITMIC"); - snd_soc_dapm_nc_pin(codec, "DIGIMIC0"); - snd_soc_dapm_nc_pin(codec, "DIGIMIC1"); - snd_soc_dapm_nc_pin(codec, "EARPIECE"); - snd_soc_dapm_nc_pin(codec, "PREDRIVEL"); - snd_soc_dapm_nc_pin(codec, "PREDRIVER"); - snd_soc_dapm_nc_pin(codec, "CARKITL"); - snd_soc_dapm_nc_pin(codec, "CARKITR"); - - ret = snd_soc_dapm_sync(codec); + snd_soc_dapm_nc_pin(dapm, "CARKITMIC"); + snd_soc_dapm_nc_pin(dapm, "DIGIMIC0"); + snd_soc_dapm_nc_pin(dapm, "DIGIMIC1"); + snd_soc_dapm_nc_pin(dapm, "EARPIECE"); + snd_soc_dapm_nc_pin(dapm, "PREDRIVEL"); + snd_soc_dapm_nc_pin(dapm, "PREDRIVER"); + snd_soc_dapm_nc_pin(dapm, "CARKITL"); + snd_soc_dapm_nc_pin(dapm, "CARKITR"); + + ret = snd_soc_dapm_sync(dapm); return ret; } diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c index f451acd4935b..fc592f0d5fc7 100644 --- a/sound/soc/pxa/corgi.c +++ b/sound/soc/pxa/corgi.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -48,51 +47,53 @@ static int corgi_spk_func; static void corgi_ext_control(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + /* set up jack connection */ switch (corgi_jack_func) { case CORGI_HP: /* set = unmute headphone */ gpio_set_value(CORGI_GPIO_MUTE_L, 1); gpio_set_value(CORGI_GPIO_MUTE_R, 1); - snd_soc_dapm_disable_pin(codec, "Mic Jack"); - snd_soc_dapm_disable_pin(codec, "Line Jack"); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Mic Jack"); + snd_soc_dapm_disable_pin(dapm, "Line Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case CORGI_MIC: /* reset = mute headphone */ gpio_set_value(CORGI_GPIO_MUTE_L, 0); gpio_set_value(CORGI_GPIO_MUTE_R, 0); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); - snd_soc_dapm_disable_pin(codec, "Line Jack"); - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); + snd_soc_dapm_disable_pin(dapm, "Line Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case CORGI_LINE: gpio_set_value(CORGI_GPIO_MUTE_L, 0); gpio_set_value(CORGI_GPIO_MUTE_R, 0); - snd_soc_dapm_disable_pin(codec, "Mic Jack"); - snd_soc_dapm_enable_pin(codec, "Line Jack"); - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Line Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case CORGI_HEADSET: gpio_set_value(CORGI_GPIO_MUTE_L, 0); gpio_set_value(CORGI_GPIO_MUTE_R, 1); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); - snd_soc_dapm_disable_pin(codec, "Line Jack"); - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Headset Jack"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); + snd_soc_dapm_disable_pin(dapm, "Line Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Headset Jack"); break; } if (corgi_spk_func == CORGI_SPK_ON) - snd_soc_dapm_enable_pin(codec, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else - snd_soc_dapm_disable_pin(codec, "Ext Spk"); + snd_soc_dapm_disable_pin(dapm, "Ext Spk"); /* signal a DAPM event */ - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } static int corgi_startup(struct snd_pcm_substream *substream) @@ -279,10 +280,11 @@ static const struct snd_kcontrol_new wm8731_corgi_controls[] = { static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; - snd_soc_dapm_nc_pin(codec, "LLINEIN"); - snd_soc_dapm_nc_pin(codec, "RLINEIN"); + snd_soc_dapm_nc_pin(dapm, "LLINEIN"); + snd_soc_dapm_nc_pin(dapm, "RLINEIN"); /* Add corgi specific controls */ err = snd_soc_add_controls(codec, wm8731_corgi_controls, @@ -291,13 +293,13 @@ static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd) return err; /* Add corgi specific widgets */ - snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets, ARRAY_SIZE(wm8731_dapm_widgets)); /* Set up corgi specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c index c82cedb602fd..28333e7d9c50 100644 --- a/sound/soc/pxa/e740_wm9705.c +++ b/sound/soc/pxa/e740_wm9705.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -92,23 +91,24 @@ static const struct snd_soc_dapm_route audio_map[] = { static int e740_ac97_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; - - snd_soc_dapm_nc_pin(codec, "HPOUTL"); - snd_soc_dapm_nc_pin(codec, "HPOUTR"); - snd_soc_dapm_nc_pin(codec, "PHONE"); - snd_soc_dapm_nc_pin(codec, "LINEINL"); - snd_soc_dapm_nc_pin(codec, "LINEINR"); - snd_soc_dapm_nc_pin(codec, "CDINL"); - snd_soc_dapm_nc_pin(codec, "CDINR"); - snd_soc_dapm_nc_pin(codec, "PCBEEP"); - snd_soc_dapm_nc_pin(codec, "MIC2"); - - snd_soc_dapm_new_controls(codec, e740_dapm_widgets, + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_nc_pin(dapm, "HPOUTL"); + snd_soc_dapm_nc_pin(dapm, "HPOUTR"); + snd_soc_dapm_nc_pin(dapm, "PHONE"); + snd_soc_dapm_nc_pin(dapm, "LINEINL"); + snd_soc_dapm_nc_pin(dapm, "LINEINR"); + snd_soc_dapm_nc_pin(dapm, "CDINL"); + snd_soc_dapm_nc_pin(dapm, "CDINR"); + snd_soc_dapm_nc_pin(dapm, "PCBEEP"); + snd_soc_dapm_nc_pin(dapm, "MIC2"); + + snd_soc_dapm_new_controls(dapm, e740_dapm_widgets, ARRAY_SIZE(e740_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c index 4c143803a75e..01bf31675c55 100644 --- a/sound/soc/pxa/e750_wm9705.c +++ b/sound/soc/pxa/e750_wm9705.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -74,23 +73,24 @@ static const struct snd_soc_dapm_route audio_map[] = { static int e750_ac97_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; - - snd_soc_dapm_nc_pin(codec, "LOUT"); - snd_soc_dapm_nc_pin(codec, "ROUT"); - snd_soc_dapm_nc_pin(codec, "PHONE"); - snd_soc_dapm_nc_pin(codec, "LINEINL"); - snd_soc_dapm_nc_pin(codec, "LINEINR"); - snd_soc_dapm_nc_pin(codec, "CDINL"); - snd_soc_dapm_nc_pin(codec, "CDINR"); - snd_soc_dapm_nc_pin(codec, "PCBEEP"); - snd_soc_dapm_nc_pin(codec, "MIC2"); - - snd_soc_dapm_new_controls(codec, e750_dapm_widgets, + struct snd_soc_dapm_context *dapm = &codec->dapm; + + snd_soc_dapm_nc_pin(dapm, "LOUT"); + snd_soc_dapm_nc_pin(dapm, "ROUT"); + snd_soc_dapm_nc_pin(dapm, "PHONE"); + snd_soc_dapm_nc_pin(dapm, "LINEINL"); + snd_soc_dapm_nc_pin(dapm, "LINEINR"); + snd_soc_dapm_nc_pin(dapm, "CDINL"); + snd_soc_dapm_nc_pin(dapm, "CDINR"); + snd_soc_dapm_nc_pin(dapm, "PCBEEP"); + snd_soc_dapm_nc_pin(dapm, "MIC2"); + + snd_soc_dapm_new_controls(dapm, e750_dapm_widgets, ARRAY_SIZE(e750_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c index d42e5fe832c5..c6a37c6ef23b 100644 --- a/sound/soc/pxa/e800_wm9712.c +++ b/sound/soc/pxa/e800_wm9712.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -75,12 +74,13 @@ static const struct snd_soc_dapm_route audio_map[] = { static int e800_ac97_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, e800_dapm_widgets, + snd_soc_dapm_new_controls(dapm, e800_dapm_widgets, ARRAY_SIZE(e800_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c index eadf9d351a04..fc22e6eefc98 100644 --- a/sound/soc/pxa/em-x270.c +++ b/sound/soc/pxa/em-x270.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c index 5ef0526924b9..67dcc36cd621 100644 --- a/sound/soc/pxa/magician.c +++ b/sound/soc/pxa/magician.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -44,27 +43,29 @@ static int magician_in_sel = MAGICIAN_MIC; static void magician_ext_control(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + if (magician_spk_switch) - snd_soc_dapm_enable_pin(codec, "Speaker"); + snd_soc_dapm_enable_pin(dapm, "Speaker"); else - snd_soc_dapm_disable_pin(codec, "Speaker"); + snd_soc_dapm_disable_pin(dapm, "Speaker"); if (magician_hp_switch) - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); else - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); switch (magician_in_sel) { case MAGICIAN_MIC: - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_enable_pin(codec, "Call Mic"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_enable_pin(dapm, "Call Mic"); break; case MAGICIAN_MIC_EXT: - snd_soc_dapm_disable_pin(codec, "Call Mic"); - snd_soc_dapm_enable_pin(codec, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); + snd_soc_dapm_enable_pin(dapm, "Headset Mic"); break; } - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } static int magician_startup(struct snd_pcm_substream *substream) @@ -399,15 +400,16 @@ static const struct snd_kcontrol_new uda1380_magician_controls[] = { static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* NC codec pins */ - snd_soc_dapm_nc_pin(codec, "VOUTLHP"); - snd_soc_dapm_nc_pin(codec, "VOUTRHP"); + snd_soc_dapm_nc_pin(dapm, "VOUTLHP"); + snd_soc_dapm_nc_pin(dapm, "VOUTRHP"); /* FIXME: is anything connected here? */ - snd_soc_dapm_nc_pin(codec, "VINL"); - snd_soc_dapm_nc_pin(codec, "VINR"); + snd_soc_dapm_nc_pin(dapm, "VINL"); + snd_soc_dapm_nc_pin(dapm, "VINR"); /* Add magician specific controls */ err = snd_soc_add_controls(codec, uda1380_magician_controls, @@ -416,13 +418,13 @@ static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd) return err; /* Add magician specific widgets */ - snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets, + snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets, ARRAY_SIZE(uda1380_dapm_widgets)); /* Set up magician specific audio path interconnects */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c index f284cc54bc80..0d70fc8c12bd 100644 --- a/sound/soc/pxa/mioa701_wm9713.c +++ b/sound/soc/pxa/mioa701_wm9713.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include @@ -130,13 +129,14 @@ static const struct snd_soc_dapm_route audio_map[] = { static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; unsigned short reg; /* Add mioa701 specific widgets */ - snd_soc_dapm_new_controls(codec, ARRAY_AND_SIZE(mioa701_dapm_widgets)); + snd_soc_dapm_new_controls(dapm, ARRAY_AND_SIZE(mioa701_dapm_widgets)); /* Set up mioa701 specific audio path audio_mapnects */ - snd_soc_dapm_add_routes(codec, ARRAY_AND_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, ARRAY_AND_SIZE(audio_map)); /* Prepare GPIO8 for rear speaker amplifier */ reg = codec->driver->read(codec, AC97_GPIO_CFG); @@ -146,12 +146,12 @@ static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd) reg = codec->driver->read(codec, AC97_3D_CONTROL); codec->driver->write(codec, AC97_3D_CONTROL, reg | 0xc000); - snd_soc_dapm_enable_pin(codec, "Front Speaker"); - snd_soc_dapm_enable_pin(codec, "Rear Speaker"); - snd_soc_dapm_enable_pin(codec, "Front Mic"); - snd_soc_dapm_enable_pin(codec, "GSM Line In"); - snd_soc_dapm_enable_pin(codec, "GSM Line Out"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_enable_pin(dapm, "Front Speaker"); + snd_soc_dapm_enable_pin(dapm, "Rear Speaker"); + snd_soc_dapm_enable_pin(dapm, "Front Mic"); + snd_soc_dapm_enable_pin(dapm, "GSM Line In"); + snd_soc_dapm_enable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c index 13f6d485d571..857db96d4a4f 100644 --- a/sound/soc/pxa/palm27x.c +++ b/sound/soc/pxa/palm27x.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -77,37 +76,38 @@ static struct snd_soc_card palm27x_asoc; static int palm27x_ac97_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* add palm27x specific widgets */ - err = snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets, + err = snd_soc_dapm_new_controls(dapm, palm27x_dapm_widgets, ARRAY_SIZE(palm27x_dapm_widgets)); if (err) return err; /* set up palm27x specific audio path audio_map */ - err = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + err = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); if (err) return err; /* connected pins */ if (machine_is_palmld()) - snd_soc_dapm_enable_pin(codec, "MIC1"); - snd_soc_dapm_enable_pin(codec, "HPOUTL"); - snd_soc_dapm_enable_pin(codec, "HPOUTR"); - snd_soc_dapm_enable_pin(codec, "LOUT2"); - snd_soc_dapm_enable_pin(codec, "ROUT2"); + snd_soc_dapm_enable_pin(dapm, "MIC1"); + snd_soc_dapm_enable_pin(dapm, "HPOUTL"); + snd_soc_dapm_enable_pin(dapm, "HPOUTR"); + snd_soc_dapm_enable_pin(dapm, "LOUT2"); + snd_soc_dapm_enable_pin(dapm, "ROUT2"); /* not connected pins */ - snd_soc_dapm_nc_pin(codec, "OUT3"); - snd_soc_dapm_nc_pin(codec, "MONOOUT"); - snd_soc_dapm_nc_pin(codec, "LINEINL"); - snd_soc_dapm_nc_pin(codec, "LINEINR"); - snd_soc_dapm_nc_pin(codec, "PCBEEP"); - snd_soc_dapm_nc_pin(codec, "PHONE"); - snd_soc_dapm_nc_pin(codec, "MIC2"); - - err = snd_soc_dapm_sync(codec); + snd_soc_dapm_nc_pin(dapm, "OUT3"); + snd_soc_dapm_nc_pin(dapm, "MONOOUT"); + snd_soc_dapm_nc_pin(dapm, "LINEINL"); + snd_soc_dapm_nc_pin(dapm, "LINEINR"); + snd_soc_dapm_nc_pin(dapm, "PCBEEP"); + snd_soc_dapm_nc_pin(dapm, "PHONE"); + snd_soc_dapm_nc_pin(dapm, "MIC2"); + + err = snd_soc_dapm_sync(dapm); if (err) return err; diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c index 84edd0385a21..6298ee115e27 100644 --- a/sound/soc/pxa/poodle.c +++ b/sound/soc/pxa/poodle.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -46,6 +45,8 @@ static int poodle_spk_func; static void poodle_ext_control(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + /* set up jack connection */ if (poodle_jack_func == POODLE_HP) { /* set = unmute headphone */ @@ -53,23 +54,23 @@ static void poodle_ext_control(struct snd_soc_codec *codec) POODLE_LOCOMO_GPIO_MUTE_L, 1); locomo_gpio_write(&poodle_locomo_device.dev, POODLE_LOCOMO_GPIO_MUTE_R, 1); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); } else { locomo_gpio_write(&poodle_locomo_device.dev, POODLE_LOCOMO_GPIO_MUTE_L, 0); locomo_gpio_write(&poodle_locomo_device.dev, POODLE_LOCOMO_GPIO_MUTE_R, 0); - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); } /* set the enpoints to their new connetion states */ if (poodle_spk_func == POODLE_SPK_ON) - snd_soc_dapm_enable_pin(codec, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else - snd_soc_dapm_disable_pin(codec, "Ext Spk"); + snd_soc_dapm_disable_pin(dapm, "Ext Spk"); /* signal a DAPM event */ - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } static int poodle_startup(struct snd_pcm_substream *substream) @@ -244,11 +245,12 @@ static const struct snd_kcontrol_new wm8731_poodle_controls[] = { static int poodle_wm8731_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; - snd_soc_dapm_nc_pin(codec, "LLINEIN"); - snd_soc_dapm_nc_pin(codec, "RLINEIN"); - snd_soc_dapm_enable_pin(codec, "MICIN"); + snd_soc_dapm_nc_pin(dapm, "LLINEIN"); + snd_soc_dapm_nc_pin(dapm, "RLINEIN"); + snd_soc_dapm_enable_pin(dapm, "MICIN"); /* Add poodle specific controls */ err = snd_soc_add_controls(codec, wm8731_poodle_controls, @@ -257,13 +259,13 @@ static int poodle_wm8731_init(struct snd_soc_pcm_runtime *rtd) return err; /* Add poodle specific widgets */ - snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets, ARRAY_SIZE(wm8731_dapm_widgets)); /* Set up poodle specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c index 2cda82bc5d2e..0fd60f423036 100644 --- a/sound/soc/pxa/raumfeld.c +++ b/sound/soc/pxa/raumfeld.c @@ -22,7 +22,6 @@ #include #include #include -#include #include diff --git a/sound/soc/pxa/saarb.c b/sound/soc/pxa/saarb.c index d63cb474b4e1..9595189fc681 100644 --- a/sound/soc/pxa/saarb.c +++ b/sound/soc/pxa/saarb.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -133,20 +132,21 @@ static struct snd_soc_card snd_soc_card_saarb = { static int saarb_pm860x_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; - snd_soc_dapm_new_controls(codec, saarb_dapm_widgets, + snd_soc_dapm_new_controls(dapm, saarb_dapm_widgets, ARRAY_SIZE(saarb_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* connected pins */ - snd_soc_dapm_enable_pin(codec, "Ext Speaker"); - snd_soc_dapm_enable_pin(codec, "Ext Mic 1"); - snd_soc_dapm_enable_pin(codec, "Ext Mic 3"); - snd_soc_dapm_disable_pin(codec, "Headset Mic 2"); - snd_soc_dapm_disable_pin(codec, "Headset Stereophone"); + snd_soc_dapm_enable_pin(dapm, "Ext Speaker"); + snd_soc_dapm_enable_pin(dapm, "Ext Mic 1"); + snd_soc_dapm_enable_pin(dapm, "Ext Mic 3"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic 2"); + snd_soc_dapm_disable_pin(dapm, "Headset Stereophone"); - ret = snd_soc_dapm_sync(codec); + ret = snd_soc_dapm_sync(dapm); if (ret) return ret; diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c index 0b30d7de24ec..c2acb69b957a 100644 --- a/sound/soc/pxa/spitz.c +++ b/sound/soc/pxa/spitz.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -46,61 +45,63 @@ static int spitz_spk_func; static void spitz_ext_control(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + if (spitz_spk_func == SPITZ_SPK_ON) - snd_soc_dapm_enable_pin(codec, "Ext Spk"); + snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else - snd_soc_dapm_disable_pin(codec, "Ext Spk"); + snd_soc_dapm_disable_pin(dapm, "Ext Spk"); /* set up jack connection */ switch (spitz_jack_func) { case SPITZ_HP: /* enable and unmute hp jack, disable mic bias */ - snd_soc_dapm_disable_pin(codec, "Headset Jack"); - snd_soc_dapm_disable_pin(codec, "Mic Jack"); - snd_soc_dapm_disable_pin(codec, "Line Jack"); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Mic Jack"); + snd_soc_dapm_disable_pin(dapm, "Line Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 1); gpio_set_value(SPITZ_GPIO_MUTE_R, 1); break; case SPITZ_MIC: /* enable mic jack and bias, mute hp */ - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); - snd_soc_dapm_disable_pin(codec, "Line Jack"); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Line Jack"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; case SPITZ_LINE: /* enable line jack, disable mic bias and mute hp */ - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); - snd_soc_dapm_disable_pin(codec, "Mic Jack"); - snd_soc_dapm_enable_pin(codec, "Line Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Line Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; case SPITZ_HEADSET: /* enable and unmute headset jack enable mic bias, mute L hp */ - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); - snd_soc_dapm_disable_pin(codec, "Line Jack"); - snd_soc_dapm_enable_pin(codec, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); + snd_soc_dapm_disable_pin(dapm, "Line Jack"); + snd_soc_dapm_enable_pin(dapm, "Headset Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 1); break; case SPITZ_HP_OFF: /* jack removed, everything off */ - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); - snd_soc_dapm_disable_pin(codec, "Mic Jack"); - snd_soc_dapm_disable_pin(codec, "Line Jack"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Mic Jack"); + snd_soc_dapm_disable_pin(dapm, "Line Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; } - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } static int spitz_startup(struct snd_pcm_substream *substream) @@ -281,16 +282,17 @@ static const struct snd_kcontrol_new wm8750_spitz_controls[] = { static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* NC codec pins */ - snd_soc_dapm_nc_pin(codec, "RINPUT1"); - snd_soc_dapm_nc_pin(codec, "LINPUT2"); - snd_soc_dapm_nc_pin(codec, "RINPUT2"); - snd_soc_dapm_nc_pin(codec, "LINPUT3"); - snd_soc_dapm_nc_pin(codec, "RINPUT3"); - snd_soc_dapm_nc_pin(codec, "OUT3"); - snd_soc_dapm_nc_pin(codec, "MONO1"); + snd_soc_dapm_nc_pin(dapm, "RINPUT1"); + snd_soc_dapm_nc_pin(dapm, "LINPUT2"); + snd_soc_dapm_nc_pin(dapm, "RINPUT2"); + snd_soc_dapm_nc_pin(dapm, "LINPUT3"); + snd_soc_dapm_nc_pin(dapm, "RINPUT3"); + snd_soc_dapm_nc_pin(dapm, "OUT3"); + snd_soc_dapm_nc_pin(dapm, "MONO1"); /* Add spitz specific controls */ err = snd_soc_add_controls(codec, wm8750_spitz_controls, @@ -299,13 +301,13 @@ static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd) return err; /* Add spitz specific widgets */ - snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets, ARRAY_SIZE(wm8750_dapm_widgets)); /* Set up spitz specific audio paths */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/tavorevb3.c b/sound/soc/pxa/tavorevb3.c index 248c283fc4df..f881f65ec172 100644 --- a/sound/soc/pxa/tavorevb3.c +++ b/sound/soc/pxa/tavorevb3.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -133,20 +132,21 @@ static struct snd_soc_card snd_soc_card_evb3 = { static int evb3_pm860x_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; - snd_soc_dapm_new_controls(codec, evb3_dapm_widgets, + snd_soc_dapm_new_controls(dapm, evb3_dapm_widgets, ARRAY_SIZE(evb3_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* connected pins */ - snd_soc_dapm_enable_pin(codec, "Ext Speaker"); - snd_soc_dapm_enable_pin(codec, "Ext Mic 1"); - snd_soc_dapm_enable_pin(codec, "Ext Mic 3"); - snd_soc_dapm_disable_pin(codec, "Headset Mic 2"); - snd_soc_dapm_disable_pin(codec, "Headset Stereophone"); + snd_soc_dapm_enable_pin(dapm, "Ext Speaker"); + snd_soc_dapm_enable_pin(dapm, "Ext Mic 1"); + snd_soc_dapm_enable_pin(dapm, "Ext Mic 3"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic 2"); + snd_soc_dapm_disable_pin(dapm, "Headset Stereophone"); - ret = snd_soc_dapm_sync(codec); + ret = snd_soc_dapm_sync(dapm); if (ret) return ret; diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index 7b983f935454..f75804ef0897 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -49,31 +48,33 @@ static int tosa_spk_func; static void tosa_ext_control(struct snd_soc_codec *codec) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + /* set up jack connection */ switch (tosa_jack_func) { case TOSA_HP: - snd_soc_dapm_disable_pin(codec, "Mic (Internal)"); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Mic (Internal)"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case TOSA_MIC_INT: - snd_soc_dapm_enable_pin(codec, "Mic (Internal)"); - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_disable_pin(codec, "Headset Jack"); + snd_soc_dapm_enable_pin(dapm, "Mic (Internal)"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case TOSA_HEADSET: - snd_soc_dapm_disable_pin(codec, "Mic (Internal)"); - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Headset Jack"); + snd_soc_dapm_disable_pin(dapm, "Mic (Internal)"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Headset Jack"); break; } if (tosa_spk_func == TOSA_SPK_ON) - snd_soc_dapm_enable_pin(codec, "Speaker"); + snd_soc_dapm_enable_pin(dapm, "Speaker"); else - snd_soc_dapm_disable_pin(codec, "Speaker"); + snd_soc_dapm_disable_pin(dapm, "Speaker"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); } static int tosa_startup(struct snd_pcm_substream *substream) @@ -191,10 +192,11 @@ static const struct snd_kcontrol_new tosa_controls[] = { static int tosa_ac97_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; - snd_soc_dapm_nc_pin(codec, "OUT3"); - snd_soc_dapm_nc_pin(codec, "MONOOUT"); + snd_soc_dapm_nc_pin(dapm, "OUT3"); + snd_soc_dapm_nc_pin(dapm, "MONOOUT"); /* add tosa specific controls */ err = snd_soc_add_controls(codec, tosa_controls, @@ -203,13 +205,13 @@ static int tosa_ac97_init(struct snd_soc_pcm_runtime *rtd) return err; /* add tosa specific widgets */ - snd_soc_dapm_new_controls(codec, tosa_dapm_widgets, + snd_soc_dapm_new_controls(dapm, tosa_dapm_widgets, ARRAY_SIZE(tosa_dapm_widgets)); /* set up tosa specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c index 4cc841b44182..2d4f896d7fec 100644 --- a/sound/soc/pxa/z2.c +++ b/sound/soc/pxa/z2.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -140,22 +139,23 @@ static const struct snd_soc_dapm_route audio_map[] = { static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* NC codec pins */ - snd_soc_dapm_disable_pin(codec, "LINPUT3"); - snd_soc_dapm_disable_pin(codec, "RINPUT3"); - snd_soc_dapm_disable_pin(codec, "OUT3"); - snd_soc_dapm_disable_pin(codec, "MONO"); + snd_soc_dapm_disable_pin(dapm, "LINPUT3"); + snd_soc_dapm_disable_pin(dapm, "RINPUT3"); + snd_soc_dapm_disable_pin(dapm, "OUT3"); + snd_soc_dapm_disable_pin(dapm, "MONO"); /* Add z2 specific widgets */ - snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets, ARRAY_SIZE(wm8750_dapm_widgets)); /* Set up z2 specific audio paths */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); - ret = snd_soc_dapm_sync(codec); + ret = snd_soc_dapm_sync(dapm); if (ret) goto err; diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index d27e05af7759..b222a7d72027 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "../codecs/wm9713.h" #include "pxa2xx-ac97.h" @@ -73,21 +72,22 @@ static const struct snd_soc_dapm_route audio_map[] = { static int zylonite_wm9713_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; if (clk_pout) snd_soc_dai_set_pll(rtd->codec_dai, 0, 0, clk_get_rate(pout), 0); - snd_soc_dapm_new_controls(codec, zylonite_dapm_widgets, + snd_soc_dapm_new_controls(dapm, zylonite_dapm_widgets, ARRAY_SIZE(zylonite_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* Static setup for now */ - snd_soc_dapm_enable_pin(codec, "Headphone"); - snd_soc_dapm_enable_pin(codec, "Headset Earpiece"); + snd_soc_dapm_enable_pin(dapm, "Headphone"); + snd_soc_dapm_enable_pin(dapm, "Headset Earpiece"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig deleted file mode 100644 index d85bf8a0abb2..000000000000 --- a/sound/soc/s3c24xx/Kconfig +++ /dev/null @@ -1,171 +0,0 @@ -config SND_S3C24XX_SOC - tristate "SoC Audio for the Samsung S3CXXXX chips" - depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 - select S3C64XX_DMA if ARCH_S3C64XX - select S3C2410_DMA if ARCH_S3C2410 - help - Say Y or M if you want to add support for codecs attached to - the S3C24XX AC97 or I2S interfaces. You will also need to - select the audio interfaces to support below. - -config SND_S3C24XX_SOC_I2S - tristate - select S3C2410_DMA - -config SND_S3C_I2SV2_SOC - tristate - -config SND_S3C2412_SOC_I2S - tristate - select SND_S3C_I2SV2_SOC - select S3C2410_DMA - -config SND_S3C64XX_SOC_I2S - tristate - select SND_S3C_I2SV2_SOC - select S3C64XX_DMA - -config SND_S3C64XX_SOC_I2S_V4 - tristate - select SND_S3C_I2SV2_SOC - select S3C64XX_DMA - -config SND_S3C_SOC_PCM - tristate - -config SND_S3C_SOC_AC97 - tristate - select SND_SOC_AC97_BUS - -config SND_S5P_SOC_SPDIF - tristate - select SND_SOC_SPDIF - -config SND_S3C24XX_SOC_NEO1973_WM8753 - tristate "SoC I2S Audio support for NEO1973 - WM8753" - depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA01 - select SND_S3C24XX_SOC_I2S - select SND_SOC_WM8753 - help - Say Y if you want to add support for SoC audio on smdk2440 - with the WM8753. - -config SND_S3C24XX_SOC_NEO1973_GTA02_WM8753 - tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)" - depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA02 - select SND_S3C24XX_SOC_I2S - select SND_SOC_WM8753 - help - This driver provides audio support for the Openmoko Neo FreeRunner - smartphone. - -config SND_S3C24XX_SOC_JIVE_WM8750 - tristate "SoC I2S Audio support for Jive" - depends on SND_S3C24XX_SOC && MACH_JIVE - select SND_SOC_WM8750 - select SND_S3C2412_SOC_I2S - help - Sat Y if you want to add support for SoC audio on the Jive. - -config SND_S3C64XX_SOC_WM8580 - tristate "SoC I2S Audio support for WM8580 on SMDK64XX" - depends on SND_S3C24XX_SOC && MACH_SMDK6410 - select SND_SOC_WM8580 - select SND_S3C64XX_SOC_I2S_V4 - help - Say Y if you want to add support for SoC audio on the SMDK6410. - -config SND_S3C24XX_SOC_SMDK2443_WM9710 - tristate "SoC AC97 Audio support for SMDK2443 - WM9710" - depends on SND_S3C24XX_SOC && MACH_SMDK2443 - select S3C2410_DMA - select AC97_BUS - select SND_SOC_AC97_CODEC - select SND_S3C_SOC_AC97 - help - Say Y if you want to add support for SoC audio on smdk2443 - with the WM9710. - -config SND_S3C24XX_SOC_LN2440SBC_ALC650 - tristate "SoC AC97 Audio support for LN2440SBC - ALC650" - depends on SND_S3C24XX_SOC && ARCH_S3C2410 - select S3C2410_DMA - select AC97_BUS - select SND_SOC_AC97_CODEC - select SND_S3C_SOC_AC97 - help - Say Y if you want to add support for SoC audio on ln2440sbc - with the ALC650. - -config SND_S3C24XX_SOC_S3C24XX_UDA134X - tristate "SoC I2S Audio support UDA134X wired to a S3C24XX" - depends on SND_S3C24XX_SOC && ARCH_S3C2410 - select SND_S3C24XX_SOC_I2S - select SND_SOC_L3 - select SND_SOC_UDA134X - -config SND_S3C24XX_SOC_SIMTEC - tristate - help - Internal node for common S3C24XX/Simtec suppor - -config SND_S3C24XX_SOC_SIMTEC_TLV320AIC23 - tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards" - depends on SND_S3C24XX_SOC && ARCH_S3C2410 - select SND_S3C24XX_SOC_I2S - select SND_SOC_TLV320AIC23 - select SND_S3C24XX_SOC_SIMTEC - -config SND_S3C24XX_SOC_SIMTEC_HERMES - tristate "SoC I2S Audio support for Simtec Hermes board" - depends on SND_S3C24XX_SOC && ARCH_S3C2410 - select SND_S3C24XX_SOC_I2S - select SND_SOC_TLV320AIC3X - select SND_S3C24XX_SOC_SIMTEC - -config SND_S3C24XX_SOC_RX1950_UDA1380 - tristate "Audio support for the HP iPAQ RX1950" - depends on SND_S3C24XX_SOC && MACH_RX1950 - select SND_S3C24XX_SOC_I2S - select SND_SOC_UDA1380 - help - This driver provides audio support for HP iPAQ RX1950 PDA. - -config SND_SOC_SMDK_WM9713 - tristate "SoC AC97 Audio support for SMDK with WM9713" - depends on SND_S3C24XX_SOC && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110) - select SND_SOC_WM9713 - select SND_S3C_SOC_AC97 - help - Sat Y if you want to add support for SoC audio on the SMDK. - -config SND_S3C64XX_SOC_SMARTQ - tristate "SoC I2S Audio support for SmartQ board" - depends on SND_S3C24XX_SOC && MACH_SMARTQ - select SND_S3C64XX_SOC_I2S - select SND_SOC_WM8750 - -config SND_S5PC110_SOC_AQUILA_WM8994 - tristate "SoC I2S Audio support for AQUILA - WM8994" - depends on SND_S3C24XX_SOC && MACH_AQUILA - select SND_S3C64XX_SOC_I2S_V4 - select SND_SOC_WM8994 - help - Say Y if you want to add support for SoC audio on aquila - with the WM8994. - -config SND_S5PV210_SOC_GONI_WM8994 - tristate "SoC I2S Audio support for GONI - WM8994" - depends on SND_S3C24XX_SOC && MACH_GONI - select SND_S3C64XX_SOC_I2S_V4 - select SND_SOC_WM8994 - help - Say Y if you want to add support for SoC audio on goni - with the WM8994. - -config SND_SOC_SMDK_SPDIF - tristate "SoC S/PDIF Audio support for SMDK" - depends on SND_S3C24XX_SOC && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210) - select SND_S5P_SOC_SPDIF - help - Say Y if you want to add support for SoC S/PDIF audio on the SMDK. diff --git a/sound/soc/s3c24xx/Makefile b/sound/soc/s3c24xx/Makefile deleted file mode 100644 index ee8f41d6df99..000000000000 --- a/sound/soc/s3c24xx/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -# S3c24XX Platform Support -snd-soc-s3c24xx-objs := s3c-dma.o -snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o -snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o -snd-soc-s3c64xx-i2s-objs := s3c64xx-i2s.o -snd-soc-s3c-ac97-objs := s3c-ac97.o -snd-soc-s3c64xx-i2s-v4-objs := s3c64xx-i2s-v4.o -snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o -snd-soc-s3c-pcm-objs := s3c-pcm.o -snd-soc-samsung-spdif-objs := spdif.o - -obj-$(CONFIG_SND_S3C24XX_SOC) += snd-soc-s3c24xx.o -obj-$(CONFIG_SND_S3C24XX_SOC_I2S) += snd-soc-s3c24xx-i2s.o -obj-$(CONFIG_SND_S3C_SOC_AC97) += snd-soc-s3c-ac97.o -obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o -obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += snd-soc-s3c64xx-i2s.o -obj-$(CONFIG_SND_S3C64XX_SOC_I2S_V4) += snd-soc-s3c64xx-i2s-v4.o -obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o -obj-$(CONFIG_SND_S3C_SOC_PCM) += snd-soc-s3c-pcm.o -obj-$(CONFIG_SND_S5P_SOC_SPDIF) += snd-soc-samsung-spdif.o - -# S3C24XX Machine Support -snd-soc-jive-wm8750-objs := jive_wm8750.o -snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o -snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o -snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o -snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o -snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o -snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o -snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o -snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o -snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o -snd-soc-smdk64xx-wm8580-objs := smdk64xx_wm8580.o -snd-soc-smdk-wm9713-objs := smdk_wm9713.o -snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o -snd-soc-aquila-wm8994-objs := aquila_wm8994.o -snd-soc-goni-wm8994-objs := goni_wm8994.o -snd-soc-smdk-spdif-objs := smdk_spdif.o - -obj-$(CONFIG_SND_S3C24XX_SOC_JIVE_WM8750) += snd-soc-jive-wm8750.o -obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o -obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o -obj-$(CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o -obj-$(CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o -obj-$(CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o -obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC) += snd-soc-s3c24xx-simtec.o -obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o -obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o -obj-$(CONFIG_SND_S3C24XX_SOC_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o -obj-$(CONFIG_SND_S3C64XX_SOC_WM8580) += snd-soc-smdk64xx-wm8580.o -obj-$(CONFIG_SND_SOC_SMDK_WM9713) += snd-soc-smdk-wm9713.o -obj-$(CONFIG_SND_S3C64XX_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o -obj-$(CONFIG_SND_S5PC110_SOC_AQUILA_WM8994) += snd-soc-aquila-wm8994.o -obj-$(CONFIG_SND_S5PV210_SOC_GONI_WM8994) += snd-soc-goni-wm8994.o -obj-$(CONFIG_SND_SOC_SMDK_SPDIF) += snd-soc-smdk-spdif.o diff --git a/sound/soc/s3c24xx/aquila_wm8994.c b/sound/soc/s3c24xx/aquila_wm8994.c deleted file mode 100644 index 235d1973f7d0..000000000000 --- a/sound/soc/s3c24xx/aquila_wm8994.c +++ /dev/null @@ -1,295 +0,0 @@ -/* - * aquila_wm8994.c - * - * Copyright (C) 2010 Samsung Electronics Co.Ltd - * Author: Chanwoo Choi - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include "../codecs/wm8994.h" -#include "s3c-dma.h" -#include "s3c64xx-i2s.h" - -static struct snd_soc_card aquila; -static struct platform_device *aquila_snd_device; - -/* 3.5 pie jack */ -static struct snd_soc_jack jack; - -/* 3.5 pie jack detection DAPM pins */ -static struct snd_soc_jack_pin jack_pins[] = { - { - .pin = "Headset Mic", - .mask = SND_JACK_MICROPHONE, - }, { - .pin = "Headset Stereophone", - .mask = SND_JACK_HEADPHONE | SND_JACK_MECHANICAL | - SND_JACK_AVOUT, - }, -}; - -/* 3.5 pie jack detection gpios */ -static struct snd_soc_jack_gpio jack_gpios[] = { - { - .gpio = S5PV210_GPH0(6), - .name = "DET_3.5", - .report = SND_JACK_HEADSET | SND_JACK_MECHANICAL | - SND_JACK_AVOUT, - .debounce_time = 200, - }, -}; - -static const struct snd_soc_dapm_widget aquila_dapm_widgets[] = { - SND_SOC_DAPM_SPK("Ext Spk", NULL), - SND_SOC_DAPM_SPK("Ext Rcv", NULL), - SND_SOC_DAPM_HP("Headset Stereophone", NULL), - SND_SOC_DAPM_MIC("Headset Mic", NULL), - SND_SOC_DAPM_MIC("Main Mic", NULL), - SND_SOC_DAPM_MIC("2nd Mic", NULL), - SND_SOC_DAPM_LINE("Radio In", NULL), -}; - -static const struct snd_soc_dapm_route aquila_dapm_routes[] = { - {"Ext Spk", NULL, "SPKOUTLP"}, - {"Ext Spk", NULL, "SPKOUTLN"}, - - {"Ext Rcv", NULL, "HPOUT2N"}, - {"Ext Rcv", NULL, "HPOUT2P"}, - - {"Headset Stereophone", NULL, "HPOUT1L"}, - {"Headset Stereophone", NULL, "HPOUT1R"}, - - {"IN1RN", NULL, "Headset Mic"}, - {"IN1RP", NULL, "Headset Mic"}, - - {"IN1RN", NULL, "2nd Mic"}, - {"IN1RP", NULL, "2nd Mic"}, - - {"IN1LN", NULL, "Main Mic"}, - {"IN1LP", NULL, "Main Mic"}, - - {"IN2LN", NULL, "Radio In"}, - {"IN2RN", NULL, "Radio In"}, -}; - -static int aquila_wm8994_init(struct snd_soc_pcm_runtime *rtd) -{ - struct snd_soc_codec *codec = rtd->codec; - int ret; - - /* add aquila specific widgets */ - snd_soc_dapm_new_controls(codec, aquila_dapm_widgets, - ARRAY_SIZE(aquila_dapm_widgets)); - - /* set up aquila specific audio routes */ - snd_soc_dapm_add_routes(codec, aquila_dapm_routes, - ARRAY_SIZE(aquila_dapm_routes)); - - /* set endpoints to not connected */ - snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN"); - snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP"); - snd_soc_dapm_nc_pin(codec, "LINEOUT1N"); - snd_soc_dapm_nc_pin(codec, "LINEOUT1P"); - snd_soc_dapm_nc_pin(codec, "LINEOUT2N"); - snd_soc_dapm_nc_pin(codec, "LINEOUT2P"); - snd_soc_dapm_nc_pin(codec, "SPKOUTRN"); - snd_soc_dapm_nc_pin(codec, "SPKOUTRP"); - - snd_soc_dapm_sync(codec); - - /* Headset jack detection */ - ret = snd_soc_jack_new(&aquila, "Headset Jack", - SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT, - &jack); - if (ret) - return ret; - - ret = snd_soc_jack_add_pins(&jack, ARRAY_SIZE(jack_pins), jack_pins); - if (ret) - return ret; - - ret = snd_soc_jack_add_gpios(&jack, ARRAY_SIZE(jack_gpios), jack_gpios); - if (ret) - return ret; - - return 0; -} - -static int aquila_hifi_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_dai *codec_dai = rtd->codec_dai; - struct snd_soc_dai *cpu_dai = rtd->cpu_dai; - unsigned int pll_out = 24000000; - int ret = 0; - - /* set the cpu DAI configuration */ - ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | - SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); - if (ret < 0) - return ret; - - /* set the cpu system clock */ - ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK, - 0, SND_SOC_CLOCK_IN); - if (ret < 0) - return ret; - - /* set codec DAI configuration */ - ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | - SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); - if (ret < 0) - return ret; - - /* set the codec FLL */ - ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, 0, pll_out, - params_rate(params) * 256); - if (ret < 0) - return ret; - - /* set the codec system clock */ - ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1, - params_rate(params) * 256, SND_SOC_CLOCK_IN); - if (ret < 0) - return ret; - - return 0; -} - -static struct snd_soc_ops aquila_hifi_ops = { - .hw_params = aquila_hifi_hw_params, -}; - -static int aquila_voice_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_dai *codec_dai = rtd->codec_dai; - unsigned int pll_out = 24000000; - int ret = 0; - - if (params_rate(params) != 8000) - return -EINVAL; - - /* set codec DAI configuration */ - ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J | - SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM); - if (ret < 0) - return ret; - - /* set the codec FLL */ - ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, 0, pll_out, - params_rate(params) * 256); - if (ret < 0) - return ret; - - /* set the codec system clock */ - ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2, - params_rate(params) * 256, SND_SOC_CLOCK_IN); - if (ret < 0) - return ret; - - return 0; -} - -static struct snd_soc_dai_driver voice_dai = { - .name = "aquila-voice-dai", - .playback = { - .channels_min = 1, - .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000, - .formats = SNDRV_PCM_FMTBIT_S16_LE,}, - .capture = { - .channels_min = 1, - .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000, - .formats = SNDRV_PCM_FMTBIT_S16_LE,}, -}; - -static struct snd_soc_ops aquila_voice_ops = { - .hw_params = aquila_voice_hw_params, -}; - -static struct snd_soc_dai_link aquila_dai[] = { -{ - .name = "WM8994", - .stream_name = "WM8994 HiFi", - .cpu_dai_name = "s3c64xx-i2s-v4", - .codec_dai_name = "wm8994-hifi", - .platform_name = "s3c24xx-pcm-audio", - .codec_name = "wm8994-codec.0-0x1a", - .init = aquila_wm8994_init, - .ops = &aquila_hifi_ops, -}, { - .name = "WM8994 Voice", - .stream_name = "Voice", - .cpu_dai_name = "aquila-voice-dai", - .codec_dai_name = "wm8994-voice", - .platform_name = "s3c24xx-pcm-audio", - .codec_name = "wm8994-codec.0-0x1a", - .ops = &aquila_voice_ops, -}, -}; - -static struct snd_soc_card aquila = { - .name = "aquila", - .dai_link = aquila_dai, - .num_links = ARRAY_SIZE(aquila_dai), -}; - -static int __init aquila_init(void) -{ - int ret; - - if (!machine_is_aquila()) - return -ENODEV; - - aquila_snd_device = platform_device_alloc("soc-audio", -1); - if (!aquila_snd_device) - return -ENOMEM; - - /* register voice DAI here */ - ret = snd_soc_register_dai(&aquila_snd_device->dev, &voice_dai); - if (ret) - return ret; - - platform_set_drvdata(aquila_snd_device, &aquila); - ret = platform_device_add(aquila_snd_device); - - if (ret) - platform_device_put(aquila_snd_device); - - return ret; -} - -static void __exit aquila_exit(void) -{ - platform_device_unregister(aquila_snd_device); -} - -module_init(aquila_init); -module_exit(aquila_exit); - -/* Module information */ -MODULE_DESCRIPTION("ALSA SoC WM8994 Aquila(S5PC110)"); -MODULE_AUTHOR("Chanwoo Choi "); -MODULE_LICENSE("GPL"); diff --git a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c b/sound/soc/s3c24xx/s3c64xx-i2s-v4.c deleted file mode 100644 index a9628472ebfe..000000000000 --- a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c +++ /dev/null @@ -1,230 +0,0 @@ -/* sound/soc/s3c24xx/s3c64xx-i2s-v4.c - * - * ALSA SoC Audio Layer - S3C64XX I2Sv4 driver - * Copyright (c) 2010 Samsung Electronics Co. Ltd - * Author: Jaswinder Singh - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include - -#include -#include - -#include - -#include -#include - -#include "s3c-dma.h" -#include "regs-i2s-v2.h" -#include "s3c64xx-i2s.h" - -static struct s3c2410_dma_client s3c64xx_dma_client_out = { - .name = "I2Sv4 PCM Stereo out" -}; - -static struct s3c2410_dma_client s3c64xx_dma_client_in = { - .name = "I2Sv4 PCM Stereo in" -}; - -static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_out; -static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_in; -static struct s3c_i2sv2_info s3c64xx_i2sv4; - -static int s3c64xx_i2sv4_probe(struct snd_soc_dai *dai) -{ - struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4; - int ret = 0; - - snd_soc_dai_set_drvdata(dai, i2s); - - ret = s3c_i2sv2_probe(dai, i2s, i2s->base); - - return ret; -} - -static int s3c_i2sv4_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *cpu_dai) -{ - struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(cpu_dai); - struct s3c_dma_params *dma_data; - u32 iismod; - - dev_dbg(cpu_dai->dev, "Entered %s\n", __func__); - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - dma_data = i2s->dma_playback; - else - dma_data = i2s->dma_capture; - - snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); - - iismod = readl(i2s->regs + S3C2412_IISMOD); - dev_dbg(cpu_dai->dev, "%s: r: IISMOD: %x\n", __func__, iismod); - - iismod &= ~S3C64XX_IISMOD_BLC_MASK; - switch (params_format(params)) { - case SNDRV_PCM_FORMAT_S8: - iismod |= S3C64XX_IISMOD_BLC_8BIT; - break; - case SNDRV_PCM_FORMAT_S16_LE: - break; - case SNDRV_PCM_FORMAT_S24_LE: - iismod |= S3C64XX_IISMOD_BLC_24BIT; - break; - } - - writel(iismod, i2s->regs + S3C2412_IISMOD); - dev_dbg(cpu_dai->dev, "%s: w: IISMOD: %x\n", __func__, iismod); - - return 0; -} - -static struct snd_soc_dai_ops s3c64xx_i2sv4_dai_ops = { - .hw_params = s3c_i2sv4_hw_params, -}; - -static struct snd_soc_dai_driver s3c64xx_i2s_v4_dai = { - .symmetric_rates = 1, - .playback = { - .channels_min = 2, - .channels_max = 2, - .rates = S3C64XX_I2S_RATES, - .formats = S3C64XX_I2S_FMTS, - }, - .capture = { - .channels_min = 2, - .channels_max = 2, - .rates = S3C64XX_I2S_RATES, - .formats = S3C64XX_I2S_FMTS, - }, - .probe = s3c64xx_i2sv4_probe, - .ops = &s3c64xx_i2sv4_dai_ops, -}; - -static __devinit int s3c64xx_i2sv4_dev_probe(struct platform_device *pdev) -{ - struct s3c_audio_pdata *i2s_pdata; - struct s3c_i2sv2_info *i2s; - struct resource *res; - int ret; - - i2s = &s3c64xx_i2sv4; - - i2s->feature |= S3C_FEATURE_CDCLKCON; - - i2s->dma_capture = &s3c64xx_i2sv4_pcm_stereo_in; - i2s->dma_playback = &s3c64xx_i2sv4_pcm_stereo_out; - - res = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (!res) { - dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n"); - return -ENXIO; - } - i2s->dma_playback->channel = res->start; - - res = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (!res) { - dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n"); - return -ENXIO; - } - i2s->dma_capture->channel = res->start; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Unable to get I2S SFR address\n"); - return -ENXIO; - } - - if (!request_mem_region(res->start, resource_size(res), - "s3c64xx-i2s-v4")) { - dev_err(&pdev->dev, "Unable to request SFR region\n"); - return -EBUSY; - } - i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD; - i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD; - - i2s->dma_capture->client = &s3c64xx_dma_client_in; - i2s->dma_capture->dma_size = 4; - i2s->dma_playback->client = &s3c64xx_dma_client_out; - i2s->dma_playback->dma_size = 4; - - i2s->base = res->start; - - i2s_pdata = pdev->dev.platform_data; - if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) { - dev_err(&pdev->dev, "Unable to configure gpio\n"); - return -EINVAL; - } - - i2s->iis_cclk = clk_get(&pdev->dev, "audio-bus"); - if (IS_ERR(i2s->iis_cclk)) { - dev_err(&pdev->dev, "failed to get audio-bus\n"); - ret = PTR_ERR(i2s->iis_cclk); - goto err; - } - - clk_enable(i2s->iis_cclk); - - ret = s3c_i2sv2_register_dai(&pdev->dev, pdev->id, &s3c64xx_i2s_v4_dai); - if (ret != 0) - goto err_i2sv2; - - return 0; - -err_i2sv2: - clk_put(i2s->iis_cclk); -err: - return ret; -} - -static __devexit int s3c64xx_i2sv4_dev_remove(struct platform_device *pdev) -{ - struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4; - struct resource *res; - - snd_soc_unregister_dai(&pdev->dev); - clk_put(i2s->iis_cclk); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) - release_mem_region(res->start, resource_size(res)); - else - dev_warn(&pdev->dev, "Unable to get I2S SFR address\n"); - - return 0; -} - -static struct platform_driver s3c64xx_i2sv4_driver = { - .probe = s3c64xx_i2sv4_dev_probe, - .remove = s3c64xx_i2sv4_dev_remove, - .driver = { - .name = "s3c64xx-iis-v4", - .owner = THIS_MODULE, - }, -}; - -static int __init s3c64xx_i2sv4_init(void) -{ - return platform_driver_register(&s3c64xx_i2sv4_driver); -} -module_init(s3c64xx_i2sv4_init); - -static void __exit s3c64xx_i2sv4_exit(void) -{ - platform_driver_unregister(&s3c64xx_i2sv4_driver); -} -module_exit(s3c64xx_i2sv4_exit); - -/* Module information */ -MODULE_AUTHOR("Jaswinder Singh, "); -MODULE_DESCRIPTION("S3C64XX I2Sv4 SoC Interface"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:s3c64xx-iis-v4"); diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.c b/sound/soc/s3c24xx/s3c64xx-i2s.c deleted file mode 100644 index ae7acb6c4f1d..000000000000 --- a/sound/soc/s3c24xx/s3c64xx-i2s.c +++ /dev/null @@ -1,242 +0,0 @@ -/* sound/soc/s3c24xx/s3c64xx-i2s.c - * - * ALSA SoC Audio Layer - S3C64XX I2S driver - * - * Copyright 2008 Openmoko, Inc. - * Copyright 2008 Simtec Electronics - * Ben Dooks - * http://armlinux.simtec.co.uk/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include - -#include - -#include -#include - -#include "s3c-dma.h" -#include "regs-i2s-v2.h" -#include "s3c64xx-i2s.h" - -/* The value should be set to maximum of the total number - * of I2Sv3 controllers that any supported SoC has. - */ -#define MAX_I2SV3 2 - -static struct s3c2410_dma_client s3c64xx_dma_client_out = { - .name = "I2S PCM Stereo out" -}; - -static struct s3c2410_dma_client s3c64xx_dma_client_in = { - .name = "I2S PCM Stereo in" -}; - -static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_out[MAX_I2SV3]; -static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_in[MAX_I2SV3]; -static struct s3c_i2sv2_info s3c64xx_i2s[MAX_I2SV3]; - -struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai) -{ - struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai); - u32 iismod = readl(i2s->regs + S3C2412_IISMOD); - - if (iismod & S3C2412_IISMOD_IMS_SYSMUX) - return i2s->iis_cclk; - else - return i2s->iis_pclk; -} -EXPORT_SYMBOL_GPL(s3c64xx_i2s_get_clock); - -static int s3c64xx_i2s_probe(struct snd_soc_dai *dai) -{ - struct s3c_i2sv2_info *i2s; - int ret; - - if (dai->id >= MAX_I2SV3) { - dev_err(dai->dev, "id %d out of range\n", dai->id); - return -EINVAL; - } - - i2s = &s3c64xx_i2s[dai->id]; - snd_soc_dai_set_drvdata(dai, i2s); - - i2s->iis_cclk = clk_get(dai->dev, "audio-bus"); - if (IS_ERR(i2s->iis_cclk)) { - dev_err(dai->dev, "failed to get audio-bus\n"); - ret = PTR_ERR(i2s->iis_cclk); - goto err; - } - - clk_enable(i2s->iis_cclk); - - ret = s3c_i2sv2_probe(dai, i2s, i2s->base); - if (ret) - goto err_clk; - - return 0; - -err_clk: - clk_disable(i2s->iis_cclk); - clk_put(i2s->iis_cclk); -err: - kfree(i2s); - return ret; -} - -static int s3c64xx_i2s_remove(struct snd_soc_dai *dai) -{ - struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai); - - clk_disable(i2s->iis_cclk); - clk_put(i2s->iis_cclk); - kfree(i2s); - return 0; -} - -static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops; - -static struct snd_soc_dai_driver s3c64xx_i2s_dai[MAX_I2SV3] = { -{ - .name = "s3c64xx-i2s-0", - .probe = s3c64xx_i2s_probe, - .remove = s3c64xx_i2s_remove, - .playback = { - .channels_min = 2, - .channels_max = 2, - .rates = S3C64XX_I2S_RATES, - .formats = S3C64XX_I2S_FMTS,}, - .capture = { - .channels_min = 2, - .channels_max = 2, - .rates = S3C64XX_I2S_RATES, - .formats = S3C64XX_I2S_FMTS,}, - .ops = &s3c64xx_i2s_dai_ops, - .symmetric_rates = 1, -}, { - .name = "s3c64xx-i2s-1", - .probe = s3c64xx_i2s_probe, - .remove = s3c64xx_i2s_remove, - .playback = { - .channels_min = 2, - .channels_max = 2, - .rates = S3C64XX_I2S_RATES, - .formats = S3C64XX_I2S_FMTS,}, - .capture = { - .channels_min = 2, - .channels_max = 2, - .rates = S3C64XX_I2S_RATES, - .formats = S3C64XX_I2S_FMTS,}, - .ops = &s3c64xx_i2s_dai_ops, - .symmetric_rates = 1, -},}; - -static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev) -{ - struct s3c_audio_pdata *i2s_pdata; - struct s3c_i2sv2_info *i2s; - struct resource *res; - int i, ret; - - if (pdev->id >= MAX_I2SV3) { - dev_err(&pdev->dev, "id %d out of range\n", pdev->id); - return -EINVAL; - } - - i2s = &s3c64xx_i2s[pdev->id]; - - i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id]; - i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id]; - - res = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (!res) { - dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n"); - return -ENXIO; - } - i2s->dma_playback->channel = res->start; - - res = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (!res) { - dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n"); - return -ENXIO; - } - i2s->dma_capture->channel = res->start; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Unable to get I2S SFR address\n"); - return -ENXIO; - } - - if (!request_mem_region(res->start, resource_size(res), - "s3c64xx-i2s")) { - dev_err(&pdev->dev, "Unable to request SFR region\n"); - return -EBUSY; - } - i2s->base = res->start; - - i2s_pdata = pdev->dev.platform_data; - if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) { - dev_err(&pdev->dev, "Unable to configure gpio\n"); - return -EINVAL; - } - i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD; - i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD; - - i2s->dma_capture->client = &s3c64xx_dma_client_in; - i2s->dma_capture->dma_size = 4; - i2s->dma_playback->client = &s3c64xx_dma_client_out; - i2s->dma_playback->dma_size = 4; - - for (i = 0; i < ARRAY_SIZE(s3c64xx_i2s_dai); i++) { - ret = s3c_i2sv2_register_dai(&pdev->dev, i, - &s3c64xx_i2s_dai[i]); - if (ret != 0) - return ret; - } - - return 0; -} - -static __devexit int s3c64xx_iis_dev_remove(struct platform_device *pdev) -{ - snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c64xx_i2s_dai)); - return 0; -} - -static struct platform_driver s3c64xx_iis_driver = { - .probe = s3c64xx_iis_dev_probe, - .remove = s3c64xx_iis_dev_remove, - .driver = { - .name = "s3c64xx-iis", - .owner = THIS_MODULE, - }, -}; - -static int __init s3c64xx_i2s_init(void) -{ - return platform_driver_register(&s3c64xx_iis_driver); -} -module_init(s3c64xx_i2s_init); - -static void __exit s3c64xx_i2s_exit(void) -{ - platform_driver_unregister(&s3c64xx_iis_driver); -} -module_exit(s3c64xx_i2s_exit); - -/* Module information */ -MODULE_AUTHOR("Ben Dooks, "); -MODULE_DESCRIPTION("S3C64XX I2S SoC Interface"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:s3c64xx-iis"); diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.h b/sound/soc/s3c24xx/s3c64xx-i2s.h deleted file mode 100644 index de4075d26f0c..000000000000 --- a/sound/soc/s3c24xx/s3c64xx-i2s.h +++ /dev/null @@ -1,41 +0,0 @@ -/* sound/soc/s3c24xx/s3c64xx-i2s.h - * - * ALSA SoC Audio Layer - S3C64XX I2S driver - * - * Copyright 2008 Openmoko, Inc. - * Copyright 2008 Simtec Electronics - * Ben Dooks - * http://armlinux.simtec.co.uk/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __SND_SOC_S3C24XX_S3C64XX_I2S_H -#define __SND_SOC_S3C24XX_S3C64XX_I2S_H __FILE__ - -struct clk; - -#include "s3c-i2s-v2.h" - -#define S3C64XX_DIV_BCLK S3C_I2SV2_DIV_BCLK -#define S3C64XX_DIV_RCLK S3C_I2SV2_DIV_RCLK -#define S3C64XX_DIV_PRESCALER S3C_I2SV2_DIV_PRESCALER - -#define S3C64XX_CLKSRC_PCLK S3C_I2SV2_CLKSRC_PCLK -#define S3C64XX_CLKSRC_MUX S3C_I2SV2_CLKSRC_AUDIOBUS -#define S3C64XX_CLKSRC_CDCLK S3C_I2SV2_CLKSRC_CDCLK - -#define S3C64XX_I2S_RATES \ - (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \ - SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ - SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) - -#define S3C64XX_I2S_FMTS \ - (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S24_LE) - -struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai); - -#endif /* __SND_SOC_S3C24XX_S3C64XX_I2S_H */ diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c index c1244c5bc730..5890e431852f 100644 --- a/sound/soc/s6000/s6105-ipcam.c +++ b/sound/soc/s6000/s6105-ipcam.c @@ -18,11 +18,9 @@ #include #include #include -#include #include -#include "../codecs/tlv320aic3x.h" #include "s6000-pcm.h" #include "s6000-i2s.h" @@ -107,6 +105,7 @@ static int output_type_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = kcontrol->private_data; + struct snd_soc_dapm_context *dapm = &codec->dapm; unsigned int val = (ucontrol->value.enumerated.item[0] != 0); char *differential = "Audio Out Differential"; char *stereo = "Audio Out Stereo"; @@ -114,10 +113,10 @@ static int output_type_put(struct snd_kcontrol *kcontrol, if (kcontrol->private_value == val) return 0; kcontrol->private_value = val; - snd_soc_dapm_disable_pin(codec, val ? differential : stereo); - snd_soc_dapm_sync(codec); - snd_soc_dapm_enable_pin(codec, val ? stereo : differential); - snd_soc_dapm_sync(codec); + snd_soc_dapm_disable_pin(dapm, val ? differential : stereo); + snd_soc_dapm_sync(dapm); + snd_soc_dapm_enable_pin(dapm, val ? stereo : differential); + snd_soc_dapm_sync(dapm); return 1; } @@ -137,35 +136,36 @@ static const struct snd_kcontrol_new audio_out_mux = { static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; /* Add s6105 specific widgets */ - snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets, + snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets, ARRAY_SIZE(aic3x_dapm_widgets)); /* Set up s6105 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* not present */ - snd_soc_dapm_nc_pin(codec, "MONO_LOUT"); - snd_soc_dapm_nc_pin(codec, "LINE2L"); - snd_soc_dapm_nc_pin(codec, "LINE2R"); + snd_soc_dapm_nc_pin(dapm, "MONO_LOUT"); + snd_soc_dapm_nc_pin(dapm, "LINE2L"); + snd_soc_dapm_nc_pin(dapm, "LINE2R"); /* not connected */ - snd_soc_dapm_nc_pin(codec, "MIC3L"); /* LINE2L on this chip */ - snd_soc_dapm_nc_pin(codec, "MIC3R"); /* LINE2R on this chip */ - snd_soc_dapm_nc_pin(codec, "LLOUT"); - snd_soc_dapm_nc_pin(codec, "RLOUT"); - snd_soc_dapm_nc_pin(codec, "HPRCOM"); + snd_soc_dapm_nc_pin(dapm, "MIC3L"); /* LINE2L on this chip */ + snd_soc_dapm_nc_pin(dapm, "MIC3R"); /* LINE2R on this chip */ + snd_soc_dapm_nc_pin(dapm, "LLOUT"); + snd_soc_dapm_nc_pin(dapm, "RLOUT"); + snd_soc_dapm_nc_pin(dapm, "HPRCOM"); /* always connected */ - snd_soc_dapm_enable_pin(codec, "Audio In"); + snd_soc_dapm_enable_pin(dapm, "Audio In"); /* must correspond to audio_out_mux.private_value initializer */ - snd_soc_dapm_disable_pin(codec, "Audio Out Differential"); - snd_soc_dapm_sync(codec); - snd_soc_dapm_enable_pin(codec, "Audio Out Stereo"); + snd_soc_dapm_disable_pin(dapm, "Audio Out Differential"); + snd_soc_dapm_sync(dapm); + snd_soc_dapm_enable_pin(dapm, "Audio Out Stereo"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec)); diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig new file mode 100644 index 000000000000..a6a6b5fa2f2f --- /dev/null +++ b/sound/soc/samsung/Kconfig @@ -0,0 +1,171 @@ +config SND_SOC_SAMSUNG + tristate "ASoC support for Samsung" + depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PV310 + select S3C64XX_DMA if ARCH_S3C64XX + select S3C2410_DMA if ARCH_S3C2410 + help + Say Y or M if you want to add support for codecs attached to + the Samsung SoCs' Audio interfaces. You will also need to + select the audio interfaces to support below. + +config SND_S3C24XX_I2S + tristate + select S3C2410_DMA + +config SND_S3C_I2SV2_SOC + tristate + +config SND_S3C2412_SOC_I2S + tristate + select SND_S3C_I2SV2_SOC + select S3C2410_DMA + +config SND_SAMSUNG_PCM + tristate + +config SND_SAMSUNG_AC97 + tristate + select SND_SOC_AC97_BUS + +config SND_SAMSUNG_SPDIF + tristate + select SND_SOC_SPDIF + +config SND_SAMSUNG_I2S + tristate + +config SND_SOC_SAMSUNG_NEO1973_WM8753 + tristate "SoC I2S Audio support for NEO1973 - WM8753" + depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA01 + select SND_S3C24XX_I2S + select SND_SOC_WM8753 + help + Say Y if you want to add support for SoC audio on smdk2440 + with the WM8753. + +config SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753 + tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)" + depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA02 + select SND_S3C24XX_I2S + select SND_SOC_WM8753 + help + This driver provides audio support for the Openmoko Neo FreeRunner + smartphone. + +config SND_SOC_SAMSUNG_JIVE_WM8750 + tristate "SoC I2S Audio support for Jive" + depends on SND_SOC_SAMSUNG && MACH_JIVE + select SND_SOC_WM8750 + select SND_S3C2412_SOC_I2S + help + Sat Y if you want to add support for SoC audio on the Jive. + +config SND_SOC_SAMSUNG_SMDK_WM8580 + tristate "SoC I2S Audio support for WM8580 on SMDK" + depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDK6442 || MACH_SMDKV210 || MACH_SMDKC110) + select SND_SOC_WM8580 + select SND_SAMSUNG_I2S + help + Say Y if you want to add support for SoC audio on the SMDKs. + +config SND_SOC_SAMSUNG_SMDK_WM8994 + tristate "SoC I2S Audio support for WM8994 on SMDK" + depends on SND_SOC_SAMSUNG && (MACH_SMDKV310 || MACH_SMDKC210) + select SND_SOC_WM8994 + select SND_SAMSUNG_I2S + help + Say Y if you want to add support for SoC audio on the SMDKs. + +config SND_SOC_SAMSUNG_SMDK2443_WM9710 + tristate "SoC AC97 Audio support for SMDK2443 - WM9710" + depends on SND_SOC_SAMSUNG && MACH_SMDK2443 + select S3C2410_DMA + select AC97_BUS + select SND_SOC_AC97_CODEC + select SND_SAMSUNG_AC97 + help + Say Y if you want to add support for SoC audio on smdk2443 + with the WM9710. + +config SND_SOC_SAMSUNG_LN2440SBC_ALC650 + tristate "SoC AC97 Audio support for LN2440SBC - ALC650" + depends on SND_SOC_SAMSUNG && ARCH_S3C2410 + select S3C2410_DMA + select AC97_BUS + select SND_SOC_AC97_CODEC + select SND_SAMSUNG_AC97 + help + Say Y if you want to add support for SoC audio on ln2440sbc + with the ALC650. + +config SND_SOC_SAMSUNG_S3C24XX_UDA134X + tristate "SoC I2S Audio support UDA134X wired to a S3C24XX" + depends on SND_SOC_SAMSUNG && ARCH_S3C2410 + select SND_S3C24XX_I2S + select SND_SOC_L3 + select SND_SOC_UDA134X + +config SND_SOC_SAMSUNG_SIMTEC + tristate + help + Internal node for common S3C24XX/Simtec suppor + +config SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23 + tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards" + depends on SND_SOC_SAMSUNG && ARCH_S3C2410 + select SND_S3C24XX_I2S + select SND_SOC_TLV320AIC23 + select SND_SOC_SAMSUNG_SIMTEC + +config SND_SOC_SAMSUNG_SIMTEC_HERMES + tristate "SoC I2S Audio support for Simtec Hermes board" + depends on SND_SOC_SAMSUNG && ARCH_S3C2410 + select SND_S3C24XX_I2S + select SND_SOC_TLV320AIC3X + select SND_SOC_SAMSUNG_SIMTEC + +config SND_SOC_SAMSUNG_H1940_UDA1380 + tristate "Audio support for the HP iPAQ H1940" + depends on SND_SOC_SAMSUNG && ARCH_H1940 + select SND_S3C24XX_I2S + select SND_SOC_UDA1380 + help + This driver provides audio support for HP iPAQ h1940 PDA. + +config SND_SOC_SAMSUNG_RX1950_UDA1380 + tristate "Audio support for the HP iPAQ RX1950" + depends on SND_SOC_SAMSUNG && MACH_RX1950 + select SND_S3C24XX_I2S + select SND_SOC_UDA1380 + help + This driver provides audio support for HP iPAQ RX1950 PDA. + +config SND_SOC_SAMSUNG_SMDK_WM9713 + tristate "SoC AC97 Audio support for SMDK with WM9713" + depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210) + select SND_SOC_WM9713 + select SND_SAMSUNG_AC97 + help + Sat Y if you want to add support for SoC audio on the SMDK. + +config SND_SOC_SMARTQ + tristate "SoC I2S Audio support for SmartQ board" + depends on SND_SOC_SAMSUNG && MACH_SMARTQ + select SND_SAMSUNG_I2S + select SND_SOC_WM8750 + +config SND_SOC_GONI_AQUILA_WM8994 + tristate "SoC I2S Audio support for AQUILA/GONI - WM8994" + depends on SND_SOC_SAMSUNG && (MACH_GONI || MACH_AQUILA) + select SND_SAMSUNG_I2S + select SND_SOC_WM8994 + help + Say Y if you want to add support for SoC audio on goni or aquila + with the WM8994. + +config SND_SOC_SAMSUNG_SMDK_SPDIF + tristate "SoC S/PDIF Audio support for SMDK" + depends on SND_SOC_SAMSUNG && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210) + select SND_SAMSUNG_SPDIF + help + Say Y if you want to add support for SoC S/PDIF audio on the SMDK. diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile new file mode 100644 index 000000000000..705d4e8a6724 --- /dev/null +++ b/sound/soc/samsung/Makefile @@ -0,0 +1,55 @@ +# S3c24XX Platform Support +snd-soc-s3c24xx-objs := dma.o +snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o +snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o +snd-soc-ac97-objs := ac97.o +snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o +snd-soc-samsung-spdif-objs := spdif.o +snd-soc-pcm-objs := pcm.o +snd-soc-i2s-objs := i2s.o + +obj-$(CONFIG_SND_SOC_SAMSUNG) += snd-soc-s3c24xx.o +obj-$(CONFIG_SND_S3C24XX_I2S) += snd-soc-s3c24xx-i2s.o +obj-$(CONFIG_SND_SAMSUNG_AC97) += snd-soc-ac97.o +obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o +obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o +obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o +obj-$(CONFIG_SND_SAMSUNG_PCM) += snd-soc-pcm.o +obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-i2s.o + +# S3C24XX Machine Support +snd-soc-jive-wm8750-objs := jive_wm8750.o +snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o +snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o +snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o +snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o +snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o +snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o +snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o +snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o +snd-soc-h1940-uda1380-objs := h1940_uda1380.o +snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o +snd-soc-smdk-wm8580-objs := smdk_wm8580.o +snd-soc-smdk-wm8994-objs := smdk_wm8994.o +snd-soc-smdk-wm9713-objs := smdk_wm9713.o +snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o +snd-soc-goni-wm8994-objs := goni_wm8994.o +snd-soc-smdk-spdif-objs := smdk_spdif.o + +obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o +obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o +obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o +obj-$(CONFIG_SND_SOC_SAMSUNG_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o +obj-$(CONFIG_SND_SOC_SAMSUNG_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC) += snd-soc-s3c24xx-simtec.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o +obj-$(CONFIG_SND_SOC_SAMSUNG_H1940_UDA1380) += snd-soc-h1940-uda1380.o +obj-$(CONFIG_SND_SOC_SAMSUNG_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8580) += snd-soc-smdk-wm8580.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994) += snd-soc-smdk-wm8994.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM9713) += snd-soc-smdk-wm9713.o +obj-$(CONFIG_SND_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o +obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF) += snd-soc-smdk-spdif.o +obj-$(CONFIG_SND_SOC_GONI_AQUILA_WM8994) += snd-soc-goni-wm8994.o diff --git a/sound/soc/s3c24xx/s3c-ac97.c b/sound/soc/samsung/ac97.c similarity index 96% rename from sound/soc/s3c24xx/s3c-ac97.c rename to sound/soc/samsung/ac97.c index f891eb79b575..4770a9550341 100644 --- a/sound/soc/s3c24xx/s3c-ac97.c +++ b/sound/soc/samsung/ac97.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c-ac97.c +/* sound/soc/samsung/ac97.c * * ALSA SoC Audio Layer - S3C AC97 Controller driver * Evolved from s3c2443-ac97.c @@ -24,8 +24,8 @@ #include #include -#include "s3c-dma.h" -#include "s3c-ac97.h" +#include "dma.h" +#include "ac97.h" #define AC_CMD_ADDR(x) (x << 16) #define AC_CMD_DATA(x) (x & 0xffff) @@ -122,7 +122,7 @@ static unsigned short s3c_ac97_read(struct snd_ac97 *ac97, data = (stat & 0xffff); if (addr != reg) - pr_err("s3c-ac97: req addr = %02x, rep addr = %02x\n", + pr_err("ac97: req addr = %02x, rep addr = %02x\n", reg, addr); mutex_unlock(&s3c_ac97.lock); @@ -334,7 +334,7 @@ static struct snd_soc_dai_ops s3c_ac97_mic_dai_ops = { static struct snd_soc_dai_driver s3c_ac97_dai[] = { [S3C_AC97_DAI_PCM] = { - .name = "s3c-ac97", + .name = "samsung-ac97", .ac97_control = 1, .playback = { .stream_name = "AC97 Playback", @@ -351,7 +351,7 @@ static struct snd_soc_dai_driver s3c_ac97_dai[] = { .ops = &s3c_ac97_dai_ops, }, [S3C_AC97_DAI_MIC] = { - .name = "s3c-ac97-mic", + .name = "samsung-ac97-mic", .ac97_control = 1, .capture = { .stream_name = "AC97 Mic Capture", @@ -407,7 +407,7 @@ static __devinit int s3c_ac97_probe(struct platform_device *pdev) } if (!request_mem_region(mem_res->start, - resource_size(mem_res), "s3c-ac97")) { + resource_size(mem_res), "ac97")) { dev_err(&pdev->dev, "Unable to request register region\n"); return -EBUSY; } @@ -431,7 +431,7 @@ static __devinit int s3c_ac97_probe(struct platform_device *pdev) s3c_ac97.ac97_clk = clk_get(&pdev->dev, "ac97"); if (IS_ERR(s3c_ac97.ac97_clk)) { - dev_err(&pdev->dev, "s3c-ac97 failed to get ac97_clock\n"); + dev_err(&pdev->dev, "ac97 failed to get ac97_clock\n"); ret = -ENODEV; goto err2; } @@ -446,7 +446,7 @@ static __devinit int s3c_ac97_probe(struct platform_device *pdev) ret = request_irq(irq_res->start, s3c_ac97_irq, IRQF_DISABLED, "AC97", NULL); if (ret < 0) { - dev_err(&pdev->dev, "s3c-ac97: interrupt request failed.\n"); + dev_err(&pdev->dev, "ac97: interrupt request failed.\n"); goto err4; } @@ -497,7 +497,7 @@ static struct platform_driver s3c_ac97_driver = { .probe = s3c_ac97_probe, .remove = s3c_ac97_remove, .driver = { - .name = "s3c-ac97", + .name = "samsung-ac97", .owner = THIS_MODULE, }, }; @@ -517,4 +517,4 @@ module_exit(s3c_ac97_exit); MODULE_AUTHOR("Jaswinder Singh, "); MODULE_DESCRIPTION("AC97 driver for the Samsung SoC"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:s3c-ac97"); +MODULE_ALIAS("platform:samsung-ac97"); diff --git a/sound/soc/s3c24xx/s3c-ac97.h b/sound/soc/samsung/ac97.h similarity index 73% rename from sound/soc/s3c24xx/s3c-ac97.h rename to sound/soc/samsung/ac97.h index 5dcedd07fdbb..0d0e1b511457 100644 --- a/sound/soc/s3c24xx/s3c-ac97.h +++ b/sound/soc/samsung/ac97.h @@ -1,11 +1,11 @@ -/* sound/soc/s3c24xx/s3c-ac97.h +/* sound/soc/samsung/ac97.h * * ALSA SoC Audio Layer - S3C AC97 Controller driver - * Evolved from s3c2443-ac97.h + * Evolved from s3c2443-ac97.h * * Copyright (c) 2010 Samsung Electronics Co. Ltd - * Author: Jaswinder Singh - * Credits: Graeme Gregory, Sean Choi + * Author: Jaswinder Singh + * Credits: Graeme Gregory, Sean Choi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/samsung/dma.c similarity index 74% rename from sound/soc/s3c24xx/s3c-dma.c rename to sound/soc/samsung/dma.c index 243f79bf43bb..21240198c5d6 100644 --- a/sound/soc/s3c24xx/s3c-dma.c +++ b/sound/soc/samsung/dma.c @@ -1,5 +1,5 @@ /* - * s3c-dma.c -- ALSA Soc Audio Layer + * dma.c -- ALSA Soc Audio Layer * * (c) 2006 Wolfson Microelectronics PLC. * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com @@ -30,9 +30,9 @@ #include #include -#include "s3c-dma.h" +#include "dma.h" -static const struct snd_pcm_hardware s3c_dma_hardware = { +static const struct snd_pcm_hardware dma_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | @@ -53,7 +53,7 @@ static const struct snd_pcm_hardware s3c_dma_hardware = { .fifo_size = 32, }; -struct s3c24xx_runtime_data { +struct runtime_data { spinlock_t lock; int state; unsigned int dma_loaded; @@ -65,14 +65,14 @@ struct s3c24xx_runtime_data { struct s3c_dma_params *params; }; -/* s3c_dma_enqueue +/* dma_enqueue * * place a dma buffer onto the queue for the dma system * to handle. */ -static void s3c_dma_enqueue(struct snd_pcm_substream *substream) +static void dma_enqueue(struct snd_pcm_substream *substream) { - struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; + struct runtime_data *prtd = substream->runtime->private_data; dma_addr_t pos = prtd->dma_pos; unsigned int limit; int ret; @@ -112,12 +112,12 @@ static void s3c_dma_enqueue(struct snd_pcm_substream *substream) prtd->dma_pos = pos; } -static void s3c24xx_audio_buffdone(struct s3c2410_dma_chan *channel, +static void audio_buffdone(struct s3c2410_dma_chan *channel, void *dev_id, int size, enum s3c2410_dma_buffresult result) { struct snd_pcm_substream *substream = dev_id; - struct s3c24xx_runtime_data *prtd; + struct runtime_data *prtd; pr_debug("Entered %s\n", __func__); @@ -132,17 +132,17 @@ static void s3c24xx_audio_buffdone(struct s3c2410_dma_chan *channel, spin_lock(&prtd->lock); if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) { prtd->dma_loaded--; - s3c_dma_enqueue(substream); + dma_enqueue(substream); } spin_unlock(&prtd->lock); } -static int s3c_dma_hw_params(struct snd_pcm_substream *substream, +static int dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; - struct s3c24xx_runtime_data *prtd = runtime->private_data; + struct runtime_data *prtd = runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; unsigned long totbytes = params_buffer_bytes(params); struct s3c_dma_params *dma = @@ -181,7 +181,7 @@ static int s3c_dma_hw_params(struct snd_pcm_substream *substream, } s3c2410_dma_set_buffdone_fn(prtd->params->channel, - s3c24xx_audio_buffdone); + audio_buffdone); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); @@ -199,9 +199,9 @@ static int s3c_dma_hw_params(struct snd_pcm_substream *substream, return 0; } -static int s3c_dma_hw_free(struct snd_pcm_substream *substream) +static int dma_hw_free(struct snd_pcm_substream *substream) { - struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; + struct runtime_data *prtd = substream->runtime->private_data; pr_debug("Entered %s\n", __func__); @@ -216,9 +216,9 @@ static int s3c_dma_hw_free(struct snd_pcm_substream *substream) return 0; } -static int s3c_dma_prepare(struct snd_pcm_substream *substream) +static int dma_prepare(struct snd_pcm_substream *substream) { - struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; + struct runtime_data *prtd = substream->runtime->private_data; int ret = 0; pr_debug("Entered %s\n", __func__); @@ -249,14 +249,14 @@ static int s3c_dma_prepare(struct snd_pcm_substream *substream) prtd->dma_pos = prtd->dma_start; /* enqueue dma buffers */ - s3c_dma_enqueue(substream); + dma_enqueue(substream); return ret; } -static int s3c_dma_trigger(struct snd_pcm_substream *substream, int cmd) +static int dma_trigger(struct snd_pcm_substream *substream, int cmd) { - struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; + struct runtime_data *prtd = substream->runtime->private_data; int ret = 0; pr_debug("Entered %s\n", __func__); @@ -289,10 +289,10 @@ static int s3c_dma_trigger(struct snd_pcm_substream *substream, int cmd) } static snd_pcm_uframes_t -s3c_dma_pointer(struct snd_pcm_substream *substream) +dma_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; - struct s3c24xx_runtime_data *prtd = runtime->private_data; + struct runtime_data *prtd = runtime->private_data; unsigned long res; dma_addr_t src, dst; @@ -324,17 +324,17 @@ s3c_dma_pointer(struct snd_pcm_substream *substream) return bytes_to_frames(substream->runtime, res); } -static int s3c_dma_open(struct snd_pcm_substream *substream) +static int dma_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; - struct s3c24xx_runtime_data *prtd; + struct runtime_data *prtd; pr_debug("Entered %s\n", __func__); snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); - snd_soc_set_runtime_hwparams(substream, &s3c_dma_hardware); + snd_soc_set_runtime_hwparams(substream, &dma_hardware); - prtd = kzalloc(sizeof(struct s3c24xx_runtime_data), GFP_KERNEL); + prtd = kzalloc(sizeof(struct runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; @@ -344,22 +344,22 @@ static int s3c_dma_open(struct snd_pcm_substream *substream) return 0; } -static int s3c_dma_close(struct snd_pcm_substream *substream) +static int dma_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; - struct s3c24xx_runtime_data *prtd = runtime->private_data; + struct runtime_data *prtd = runtime->private_data; pr_debug("Entered %s\n", __func__); if (!prtd) - pr_debug("s3c_dma_close called with prtd == NULL\n"); + pr_debug("dma_close called with prtd == NULL\n"); kfree(prtd); return 0; } -static int s3c_dma_mmap(struct snd_pcm_substream *substream, +static int dma_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; @@ -372,23 +372,23 @@ static int s3c_dma_mmap(struct snd_pcm_substream *substream, runtime->dma_bytes); } -static struct snd_pcm_ops s3c_dma_ops = { - .open = s3c_dma_open, - .close = s3c_dma_close, +static struct snd_pcm_ops dma_ops = { + .open = dma_open, + .close = dma_close, .ioctl = snd_pcm_lib_ioctl, - .hw_params = s3c_dma_hw_params, - .hw_free = s3c_dma_hw_free, - .prepare = s3c_dma_prepare, - .trigger = s3c_dma_trigger, - .pointer = s3c_dma_pointer, - .mmap = s3c_dma_mmap, + .hw_params = dma_hw_params, + .hw_free = dma_hw_free, + .prepare = dma_prepare, + .trigger = dma_trigger, + .pointer = dma_pointer, + .mmap = dma_mmap, }; -static int s3c_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) +static int preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; - size_t size = s3c_dma_hardware.buffer_bytes_max; + size_t size = dma_hardware.buffer_bytes_max; pr_debug("Entered %s\n", __func__); @@ -403,7 +403,7 @@ static int s3c_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) return 0; } -static void s3c_dma_free_dma_buffers(struct snd_pcm *pcm) +static void dma_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; @@ -426,9 +426,9 @@ static void s3c_dma_free_dma_buffers(struct snd_pcm *pcm) } } -static u64 s3c_dma_mask = DMA_BIT_MASK(32); +static u64 dma_mask = DMA_BIT_MASK(32); -static int s3c_dma_new(struct snd_card *card, +static int dma_new(struct snd_card *card, struct snd_soc_dai *dai, struct snd_pcm *pcm) { int ret = 0; @@ -436,67 +436,67 @@ static int s3c_dma_new(struct snd_card *card, pr_debug("Entered %s\n", __func__); if (!card->dev->dma_mask) - card->dev->dma_mask = &s3c_dma_mask; + card->dev->dma_mask = &dma_mask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (dai->driver->playback.channels_min) { - ret = s3c_preallocate_dma_buffer(pcm, + ret = preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (dai->driver->capture.channels_min) { - ret = s3c_preallocate_dma_buffer(pcm, + ret = preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } - out: +out: return ret; } -static struct snd_soc_platform_driver s3c24xx_soc_platform = { - .ops = &s3c_dma_ops, - .pcm_new = s3c_dma_new, - .pcm_free = s3c_dma_free_dma_buffers, +static struct snd_soc_platform_driver samsung_asoc_platform = { + .ops = &dma_ops, + .pcm_new = dma_new, + .pcm_free = dma_free_dma_buffers, }; -static int __devinit s3c24xx_soc_platform_probe(struct platform_device *pdev) +static int __devinit samsung_asoc_platform_probe(struct platform_device *pdev) { - return snd_soc_register_platform(&pdev->dev, &s3c24xx_soc_platform); + return snd_soc_register_platform(&pdev->dev, &samsung_asoc_platform); } -static int __devexit s3c24xx_soc_platform_remove(struct platform_device *pdev) +static int __devexit samsung_asoc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } -static struct platform_driver s3c24xx_pcm_driver = { +static struct platform_driver asoc_dma_driver = { .driver = { - .name = "s3c24xx-pcm-audio", + .name = "samsung-audio", .owner = THIS_MODULE, }, - .probe = s3c24xx_soc_platform_probe, - .remove = __devexit_p(s3c24xx_soc_platform_remove), + .probe = samsung_asoc_platform_probe, + .remove = __devexit_p(samsung_asoc_platform_remove), }; -static int __init snd_s3c24xx_pcm_init(void) +static int __init samsung_asoc_init(void) { - return platform_driver_register(&s3c24xx_pcm_driver); + return platform_driver_register(&asoc_dma_driver); } -module_init(snd_s3c24xx_pcm_init); +module_init(samsung_asoc_init); -static void __exit snd_s3c24xx_pcm_exit(void) +static void __exit samsung_asoc_exit(void) { - platform_driver_unregister(&s3c24xx_pcm_driver); + platform_driver_unregister(&asoc_dma_driver); } -module_exit(snd_s3c24xx_pcm_exit); +module_exit(samsung_asoc_exit); MODULE_AUTHOR("Ben Dooks, "); -MODULE_DESCRIPTION("Samsung S3C Audio DMA module"); +MODULE_DESCRIPTION("Samsung ASoC DMA Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:s3c24xx-pcm-audio"); +MODULE_ALIAS("platform:samsung-audio"); diff --git a/sound/soc/s3c24xx/s3c-dma.h b/sound/soc/samsung/dma.h similarity index 97% rename from sound/soc/s3c24xx/s3c-dma.h rename to sound/soc/samsung/dma.h index 748c07d7c075..f8cd2b4223af 100644 --- a/sound/soc/s3c24xx/s3c-dma.h +++ b/sound/soc/samsung/dma.h @@ -1,5 +1,5 @@ /* - * s3c-dma.h -- + * dma.h -- * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/sound/soc/s3c24xx/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c similarity index 84% rename from sound/soc/s3c24xx/goni_wm8994.c rename to sound/soc/samsung/goni_wm8994.c index 694f702cc8e2..34dd9ef1b9c0 100644 --- a/sound/soc/s3c24xx/goni_wm8994.c +++ b/sound/soc/samsung/goni_wm8994.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -25,8 +24,16 @@ #include #include #include "../codecs/wm8994.h" -#include "s3c-dma.h" -#include "s3c64xx-i2s.h" +#include "dma.h" +#include "i2s.h" + +#define MACHINE_NAME 0 +#define CPU_VOICE_DAI 1 + +static const char *aquila_str[] = { + [MACHINE_NAME] = "aquila", + [CPU_VOICE_DAI] = "aquila-voice-dai", +}; static struct snd_soc_card goni; static struct platform_device *goni_snd_device; @@ -97,28 +104,34 @@ static const struct snd_soc_dapm_route goni_dapm_routes[] = { static int goni_wm8994_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* add goni specific widgets */ - snd_soc_dapm_new_controls(codec, goni_dapm_widgets, + snd_soc_dapm_new_controls(dapm, goni_dapm_widgets, ARRAY_SIZE(goni_dapm_widgets)); /* set up goni specific audio routes */ - snd_soc_dapm_add_routes(codec, goni_dapm_routes, + snd_soc_dapm_add_routes(dapm, goni_dapm_routes, ARRAY_SIZE(goni_dapm_routes)); /* set endpoints to not connected */ - snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN"); - snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP"); - snd_soc_dapm_nc_pin(codec, "LINEOUT1N"); - snd_soc_dapm_nc_pin(codec, "LINEOUT1P"); - snd_soc_dapm_nc_pin(codec, "LINEOUT2N"); - snd_soc_dapm_nc_pin(codec, "LINEOUT2P"); + snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN"); + snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT1N"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT1P"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT2N"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT2P"); - snd_soc_dapm_sync(codec); + if (machine_is_aquila()) { + snd_soc_dapm_nc_pin(dapm, "SPKOUTRN"); + snd_soc_dapm_nc_pin(dapm, "SPKOUTRP"); + } + + snd_soc_dapm_sync(dapm); /* Headset jack detection */ - ret = snd_soc_jack_new(&goni, "Headset Jack", + ret = snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT, &jack); if (ret) @@ -150,12 +163,6 @@ static int goni_hifi_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - /* set the cpu system clock */ - ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK, - 0, SND_SOC_CLOCK_IN); - if (ret < 0) - return ret; - /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); @@ -236,9 +243,9 @@ static struct snd_soc_dai_link goni_dai[] = { { .name = "WM8994", .stream_name = "WM8994 HiFi", - .cpu_dai_name = "s3c64xx-i2s-v4", + .cpu_dai_name = "samsung-i2s.0", .codec_dai_name = "wm8994-hifi", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .codec_name = "wm8994-codec.0-0x1a", .init = goni_wm8994_init, .ops = &goni_hifi_ops, @@ -247,7 +254,7 @@ static struct snd_soc_dai_link goni_dai[] = { .stream_name = "Voice", .cpu_dai_name = "goni-voice-dai", .codec_dai_name = "wm8994-voice", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .codec_name = "wm8994-codec.0-0x1a", .ops = &goni_voice_ops, }, @@ -263,7 +270,11 @@ static int __init goni_init(void) { int ret; - if (!machine_is_goni()) + if (machine_is_aquila()) { + voice_dai.name = aquila_str[CPU_VOICE_DAI]; + goni_dai[1].cpu_dai_name = aquila_str[CPU_VOICE_DAI]; + goni.name = aquila_str[MACHINE_NAME]; + } else if (!machine_is_goni()) return -ENODEV; goni_snd_device = platform_device_alloc("soc-audio", -1); @@ -272,20 +283,25 @@ static int __init goni_init(void) /* register voice DAI here */ ret = snd_soc_register_dai(&goni_snd_device->dev, &voice_dai); - if (ret) + if (ret) { + platform_device_put(goni_snd_device); return ret; + } platform_set_drvdata(goni_snd_device, &goni); ret = platform_device_add(goni_snd_device); - if (ret) + if (ret) { + snd_soc_unregister_dai(&goni_snd_device->dev); platform_device_put(goni_snd_device); + } return ret; } static void __exit goni_exit(void) { + snd_soc_unregister_dai(&goni_snd_device->dev); platform_device_unregister(goni_snd_device); } diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c new file mode 100644 index 000000000000..c45f7ce14d61 --- /dev/null +++ b/sound/soc/samsung/h1940_uda1380.c @@ -0,0 +1,296 @@ +/* + * h1940-uda1380.c -- ALSA Soc Audio Layer + * + * Copyright (c) 2010 Arnaud Patard + * Copyright (c) 2010 Vasily Khoruzhick + * + * Based on version from Arnaud Patard + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +#include + +#include "dma.h" +#include "s3c24xx-i2s.h" +#include "../codecs/uda1380.h" + +static unsigned int rates[] = { + 11025, + 22050, + 44100, +}; + +static struct snd_pcm_hw_constraint_list hw_rates = { + .count = ARRAY_SIZE(rates), + .list = rates, + .mask = 0, +}; + +static struct snd_soc_jack hp_jack; + +static struct snd_soc_jack_pin hp_jack_pins[] = { + { + .pin = "Headphone Jack", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Speaker", + .mask = SND_JACK_HEADPHONE, + .invert = 1, + }, +}; + +static struct snd_soc_jack_gpio hp_jack_gpios[] = { + { + .gpio = S3C2410_GPG(4), + .name = "hp-gpio", + .report = SND_JACK_HEADPHONE, + .invert = 1, + .debounce_time = 200, + }, +}; + +static int h1940_startup(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + + runtime->hw.rate_min = hw_rates.list[0]; + runtime->hw.rate_max = hw_rates.list[hw_rates.count - 1]; + runtime->hw.rates = SNDRV_PCM_RATE_KNOT; + + return snd_pcm_hw_constraint_list(runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, + &hw_rates); +} + +static int h1940_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + struct snd_soc_dai *codec_dai = rtd->codec_dai; + int div; + int ret; + unsigned int rate = params_rate(params); + + switch (rate) { + case 11025: + case 22050: + case 44100: + div = s3c24xx_i2s_get_clockrate() / (384 * rate); + if (s3c24xx_i2s_get_clockrate() % (384 * rate) > (192 * rate)) + div++; + break; + default: + dev_err(&rtd->dev, "%s: rate %d is not supported\n", + __func__, rate); + return -EINVAL; + } + + /* set codec DAI configuration */ + ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | + SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); + if (ret < 0) + return ret; + + /* set cpu DAI configuration */ + ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | + SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); + if (ret < 0) + return ret; + + /* select clock source */ + ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_PCLK, rate, + SND_SOC_CLOCK_OUT); + if (ret < 0) + return ret; + + /* set MCLK division for sample rate */ + ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, + S3C2410_IISMOD_384FS); + if (ret < 0) + return ret; + + /* set BCLK division for sample rate */ + ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK, + S3C2410_IISMOD_32FS); + if (ret < 0) + return ret; + + /* set prescaler division for sample rate */ + ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER, + S3C24XX_PRESCALE(div, div)); + if (ret < 0) + return ret; + + return 0; +} + +static struct snd_soc_ops h1940_ops = { + .startup = h1940_startup, + .hw_params = h1940_hw_params, +}; + +static int h1940_spk_power(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + if (SND_SOC_DAPM_EVENT_ON(event)) + gpio_set_value(H1940_LATCH_AUDIO_POWER, 1); + else + gpio_set_value(H1940_LATCH_AUDIO_POWER, 0); + + return 0; +} + +/* h1940 machine dapm widgets */ +static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Mic Jack", NULL), + SND_SOC_DAPM_SPK("Speaker", h1940_spk_power), +}; + +/* h1940 machine audio_map */ +static const struct snd_soc_dapm_route audio_map[] = { + /* headphone connected to VOUTLHP, VOUTRHP */ + {"Headphone Jack", NULL, "VOUTLHP"}, + {"Headphone Jack", NULL, "VOUTRHP"}, + + /* ext speaker connected to VOUTL, VOUTR */ + {"Speaker", NULL, "VOUTL"}, + {"Speaker", NULL, "VOUTR"}, + + /* mic is connected to VINM */ + {"VINM", NULL, "Mic Jack"}, +}; + +static struct platform_device *s3c24xx_snd_device; + +static int h1940_uda1380_init(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; + int err; + + /* Add h1940 specific widgets */ + err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets, + ARRAY_SIZE(uda1380_dapm_widgets)); + if (err) + return err; + + /* Set up h1940 specific audio path audio_mapnects */ + err = snd_soc_dapm_add_routes(dapm, audio_map, + ARRAY_SIZE(audio_map)); + if (err) + return err; + + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Speaker"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); + + snd_soc_dapm_sync(dapm); + + snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE, + &hp_jack); + + snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins), + hp_jack_pins); + + snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios), + hp_jack_gpios); + + return 0; +} + +/* s3c24xx digital audio interface glue - connects codec <--> CPU */ +static struct snd_soc_dai_link h1940_uda1380_dai[] = { + { + .name = "uda1380", + .stream_name = "UDA1380 Duplex", + .cpu_dai_name = "s3c24xx-iis", + .codec_dai_name = "uda1380-hifi", + .init = h1940_uda1380_init, + .platform_name = "samsung-audio", + .codec_name = "uda1380-codec.0-001a", + .ops = &h1940_ops, + }, +}; + +static struct snd_soc_card h1940_asoc = { + .name = "h1940", + .dai_link = h1940_uda1380_dai, + .num_links = ARRAY_SIZE(h1940_uda1380_dai), +}; + +static int __init h1940_init(void) +{ + int ret; + + if (!machine_is_h1940()) + return -ENODEV; + + /* configure some gpios */ + ret = gpio_request(H1940_LATCH_AUDIO_POWER, "speaker-power"); + if (ret) + goto err_out; + + ret = gpio_direction_output(H1940_LATCH_AUDIO_POWER, 0); + if (ret) + goto err_gpio; + + s3c24xx_snd_device = platform_device_alloc("soc-audio", -1); + if (!s3c24xx_snd_device) { + ret = -ENOMEM; + goto err_gpio; + } + + platform_set_drvdata(s3c24xx_snd_device, &h1940_asoc); + ret = platform_device_add(s3c24xx_snd_device); + + if (ret) + goto err_plat; + + return 0; + +err_plat: + platform_device_put(s3c24xx_snd_device); +err_gpio: + gpio_free(H1940_LATCH_AUDIO_POWER); + +err_out: + return ret; +} + +static void __exit h1940_exit(void) +{ + platform_device_unregister(s3c24xx_snd_device); + snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios), + hp_jack_gpios); + gpio_free(H1940_LATCH_AUDIO_POWER); +} + +module_init(h1940_init); +module_exit(h1940_exit); + +/* Module information */ +MODULE_AUTHOR("Arnaud Patard, Vasily Khoruzhick"); +MODULE_DESCRIPTION("ALSA SoC H1940"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c new file mode 100644 index 000000000000..d00ac3a7102c --- /dev/null +++ b/sound/soc/samsung/i2s.c @@ -0,0 +1,1258 @@ +/* sound/soc/samsung/i2s.c + * + * ALSA SoC Audio Layer - Samsung I2S Controller driver + * + * Copyright (c) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "dma.h" +#include "i2s.h" + +#define I2SCON 0x0 +#define I2SMOD 0x4 +#define I2SFIC 0x8 +#define I2SPSR 0xc +#define I2STXD 0x10 +#define I2SRXD 0x14 +#define I2SFICS 0x18 +#define I2STXDS 0x1c + +#define CON_RSTCLR (1 << 31) +#define CON_FRXOFSTATUS (1 << 26) +#define CON_FRXORINTEN (1 << 25) +#define CON_FTXSURSTAT (1 << 24) +#define CON_FTXSURINTEN (1 << 23) +#define CON_TXSDMA_PAUSE (1 << 20) +#define CON_TXSDMA_ACTIVE (1 << 18) + +#define CON_FTXURSTATUS (1 << 17) +#define CON_FTXURINTEN (1 << 16) +#define CON_TXFIFO2_EMPTY (1 << 15) +#define CON_TXFIFO1_EMPTY (1 << 14) +#define CON_TXFIFO2_FULL (1 << 13) +#define CON_TXFIFO1_FULL (1 << 12) + +#define CON_LRINDEX (1 << 11) +#define CON_TXFIFO_EMPTY (1 << 10) +#define CON_RXFIFO_EMPTY (1 << 9) +#define CON_TXFIFO_FULL (1 << 8) +#define CON_RXFIFO_FULL (1 << 7) +#define CON_TXDMA_PAUSE (1 << 6) +#define CON_RXDMA_PAUSE (1 << 5) +#define CON_TXCH_PAUSE (1 << 4) +#define CON_RXCH_PAUSE (1 << 3) +#define CON_TXDMA_ACTIVE (1 << 2) +#define CON_RXDMA_ACTIVE (1 << 1) +#define CON_ACTIVE (1 << 0) + +#define MOD_OPCLK_CDCLK_OUT (0 << 30) +#define MOD_OPCLK_CDCLK_IN (1 << 30) +#define MOD_OPCLK_BCLK_OUT (2 << 30) +#define MOD_OPCLK_PCLK (3 << 30) +#define MOD_OPCLK_MASK (3 << 30) +#define MOD_TXS_IDMA (1 << 28) /* Sec_TXFIFO use I-DMA */ + +#define MOD_BLCS_SHIFT 26 +#define MOD_BLCS_16BIT (0 << MOD_BLCS_SHIFT) +#define MOD_BLCS_8BIT (1 << MOD_BLCS_SHIFT) +#define MOD_BLCS_24BIT (2 << MOD_BLCS_SHIFT) +#define MOD_BLCS_MASK (3 << MOD_BLCS_SHIFT) +#define MOD_BLCP_SHIFT 24 +#define MOD_BLCP_16BIT (0 << MOD_BLCP_SHIFT) +#define MOD_BLCP_8BIT (1 << MOD_BLCP_SHIFT) +#define MOD_BLCP_24BIT (2 << MOD_BLCP_SHIFT) +#define MOD_BLCP_MASK (3 << MOD_BLCP_SHIFT) + +#define MOD_C2DD_HHALF (1 << 21) /* Discard Higher-half */ +#define MOD_C2DD_LHALF (1 << 20) /* Discard Lower-half */ +#define MOD_C1DD_HHALF (1 << 19) +#define MOD_C1DD_LHALF (1 << 18) +#define MOD_DC2_EN (1 << 17) +#define MOD_DC1_EN (1 << 16) +#define MOD_BLC_16BIT (0 << 13) +#define MOD_BLC_8BIT (1 << 13) +#define MOD_BLC_24BIT (2 << 13) +#define MOD_BLC_MASK (3 << 13) + +#define MOD_IMS_SYSMUX (1 << 10) +#define MOD_SLAVE (1 << 11) +#define MOD_TXONLY (0 << 8) +#define MOD_RXONLY (1 << 8) +#define MOD_TXRX (2 << 8) +#define MOD_MASK (3 << 8) +#define MOD_LR_LLOW (0 << 7) +#define MOD_LR_RLOW (1 << 7) +#define MOD_SDF_IIS (0 << 5) +#define MOD_SDF_MSB (1 << 5) +#define MOD_SDF_LSB (2 << 5) +#define MOD_SDF_MASK (3 << 5) +#define MOD_RCLK_256FS (0 << 3) +#define MOD_RCLK_512FS (1 << 3) +#define MOD_RCLK_384FS (2 << 3) +#define MOD_RCLK_768FS (3 << 3) +#define MOD_RCLK_MASK (3 << 3) +#define MOD_BCLK_32FS (0 << 1) +#define MOD_BCLK_48FS (1 << 1) +#define MOD_BCLK_16FS (2 << 1) +#define MOD_BCLK_24FS (3 << 1) +#define MOD_BCLK_MASK (3 << 1) +#define MOD_8BIT (1 << 0) + +#define MOD_CDCLKCON (1 << 12) + +#define PSR_PSREN (1 << 15) + +#define FIC_TX2COUNT(x) (((x) >> 24) & 0xf) +#define FIC_TX1COUNT(x) (((x) >> 16) & 0xf) + +#define FIC_TXFLUSH (1 << 15) +#define FIC_RXFLUSH (1 << 7) +#define FIC_TXCOUNT(x) (((x) >> 8) & 0xf) +#define FIC_RXCOUNT(x) (((x) >> 0) & 0xf) +#define FICS_TXCOUNT(x) (((x) >> 8) & 0x7f) + +#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + +struct i2s_dai { + /* Platform device for this DAI */ + struct platform_device *pdev; + /* IOREMAP'd SFRs */ + void __iomem *addr; + /* Physical base address of SFRs */ + u32 base; + /* Rate of RCLK source clock */ + unsigned long rclk_srcrate; + /* Frame Clock */ + unsigned frmclk; + /* + * Specifically requested RCLK,BCLK by MACHINE Driver. + * 0 indicates CPU driver is free to choose any value. + */ + unsigned rfs, bfs; + /* I2S Controller's core clock */ + struct clk *clk; + /* Clock for generating I2S signals */ + struct clk *op_clk; + /* Array of clock names for op_clk */ + const char **src_clk; + /* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */ + struct i2s_dai *pri_dai; + /* Pointer to the Secondary_Fifo if it has one, NULL otherwise */ + struct i2s_dai *sec_dai; +#define DAI_OPENED (1 << 0) /* Dai is opened */ +#define DAI_MANAGER (1 << 1) /* Dai is the manager */ + unsigned mode; + /* Driver for this DAI */ + struct snd_soc_dai_driver i2s_dai_drv; + /* DMA parameters */ + struct s3c_dma_params dma_playback; + struct s3c_dma_params dma_capture; + u32 quirks; + u32 suspend_i2smod; + u32 suspend_i2scon; + u32 suspend_i2spsr; +}; + +/* Lock for cross i/f checks */ +static DEFINE_SPINLOCK(lock); + +/* If this is the 'overlay' stereo DAI */ +static inline bool is_secondary(struct i2s_dai *i2s) +{ + return i2s->pri_dai ? true : false; +} + +/* If operating in SoC-Slave mode */ +static inline bool is_slave(struct i2s_dai *i2s) +{ + return (readl(i2s->addr + I2SMOD) & MOD_SLAVE) ? true : false; +} + +/* If this interface of the controller is transmitting data */ +static inline bool tx_active(struct i2s_dai *i2s) +{ + u32 active; + + if (!i2s) + return false; + + active = readl(i2s->addr + I2SMOD); + + if (is_secondary(i2s)) + active &= CON_TXSDMA_ACTIVE; + else + active &= CON_TXDMA_ACTIVE; + + return active ? true : false; +} + +/* If the other interface of the controller is transmitting data */ +static inline bool other_tx_active(struct i2s_dai *i2s) +{ + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + + return tx_active(other); +} + +/* If any interface of the controller is transmitting data */ +static inline bool any_tx_active(struct i2s_dai *i2s) +{ + return tx_active(i2s) || other_tx_active(i2s); +} + +/* If this interface of the controller is receiving data */ +static inline bool rx_active(struct i2s_dai *i2s) +{ + u32 active; + + if (!i2s) + return false; + + active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE; + + return active ? true : false; +} + +/* If the other interface of the controller is receiving data */ +static inline bool other_rx_active(struct i2s_dai *i2s) +{ + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + + return rx_active(other); +} + +/* If any interface of the controller is receiving data */ +static inline bool any_rx_active(struct i2s_dai *i2s) +{ + return rx_active(i2s) || other_rx_active(i2s); +} + +/* If the other DAI is transmitting or receiving data */ +static inline bool other_active(struct i2s_dai *i2s) +{ + return other_rx_active(i2s) || other_tx_active(i2s); +} + +/* If this DAI is transmitting or receiving data */ +static inline bool this_active(struct i2s_dai *i2s) +{ + return tx_active(i2s) || rx_active(i2s); +} + +/* If the controller is active anyway */ +static inline bool any_active(struct i2s_dai *i2s) +{ + return this_active(i2s) || other_active(i2s); +} + +static inline struct i2s_dai *to_info(struct snd_soc_dai *dai) +{ + return snd_soc_dai_get_drvdata(dai); +} + +static inline bool is_opened(struct i2s_dai *i2s) +{ + if (i2s && (i2s->mode & DAI_OPENED)) + return true; + else + return false; +} + +static inline bool is_manager(struct i2s_dai *i2s) +{ + if (is_opened(i2s) && (i2s->mode & DAI_MANAGER)) + return true; + else + return false; +} + +/* Read RCLK of I2S (in multiples of LRCLK) */ +static inline unsigned get_rfs(struct i2s_dai *i2s) +{ + u32 rfs = (readl(i2s->addr + I2SMOD) >> 3) & 0x3; + + switch (rfs) { + case 3: return 768; + case 2: return 384; + case 1: return 512; + default: return 256; + } +} + +/* Write RCLK of I2S (in multiples of LRCLK) */ +static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs) +{ + u32 mod = readl(i2s->addr + I2SMOD); + + mod &= ~MOD_RCLK_MASK; + + switch (rfs) { + case 768: + mod |= MOD_RCLK_768FS; + break; + case 512: + mod |= MOD_RCLK_512FS; + break; + case 384: + mod |= MOD_RCLK_384FS; + break; + default: + mod |= MOD_RCLK_256FS; + break; + } + + writel(mod, i2s->addr + I2SMOD); +} + +/* Read Bit-Clock of I2S (in multiples of LRCLK) */ +static inline unsigned get_bfs(struct i2s_dai *i2s) +{ + u32 bfs = (readl(i2s->addr + I2SMOD) >> 1) & 0x3; + + switch (bfs) { + case 3: return 24; + case 2: return 16; + case 1: return 48; + default: return 32; + } +} + +/* Write Bit-Clock of I2S (in multiples of LRCLK) */ +static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs) +{ + u32 mod = readl(i2s->addr + I2SMOD); + + mod &= ~MOD_BCLK_MASK; + + switch (bfs) { + case 48: + mod |= MOD_BCLK_48FS; + break; + case 32: + mod |= MOD_BCLK_32FS; + break; + case 24: + mod |= MOD_BCLK_24FS; + break; + case 16: + mod |= MOD_BCLK_16FS; + break; + default: + dev_err(&i2s->pdev->dev, "Wrong BCLK Divider!\n"); + return; + } + + writel(mod, i2s->addr + I2SMOD); +} + +/* Sample-Size */ +static inline int get_blc(struct i2s_dai *i2s) +{ + int blc = readl(i2s->addr + I2SMOD); + + blc = (blc >> 13) & 0x3; + + switch (blc) { + case 2: return 24; + case 1: return 8; + default: return 16; + } +} + +/* TX Channel Control */ +static void i2s_txctrl(struct i2s_dai *i2s, int on) +{ + void __iomem *addr = i2s->addr; + u32 con = readl(addr + I2SCON); + u32 mod = readl(addr + I2SMOD) & ~MOD_MASK; + + if (on) { + con |= CON_ACTIVE; + con &= ~CON_TXCH_PAUSE; + + if (is_secondary(i2s)) { + con |= CON_TXSDMA_ACTIVE; + con &= ~CON_TXSDMA_PAUSE; + } else { + con |= CON_TXDMA_ACTIVE; + con &= ~CON_TXDMA_PAUSE; + } + + if (any_rx_active(i2s)) + mod |= MOD_TXRX; + else + mod |= MOD_TXONLY; + } else { + if (is_secondary(i2s)) { + con |= CON_TXSDMA_PAUSE; + con &= ~CON_TXSDMA_ACTIVE; + } else { + con |= CON_TXDMA_PAUSE; + con &= ~CON_TXDMA_ACTIVE; + } + + if (other_tx_active(i2s)) { + writel(con, addr + I2SCON); + return; + } + + con |= CON_TXCH_PAUSE; + + if (any_rx_active(i2s)) + mod |= MOD_RXONLY; + else + con &= ~CON_ACTIVE; + } + + writel(mod, addr + I2SMOD); + writel(con, addr + I2SCON); +} + +/* RX Channel Control */ +static void i2s_rxctrl(struct i2s_dai *i2s, int on) +{ + void __iomem *addr = i2s->addr; + u32 con = readl(addr + I2SCON); + u32 mod = readl(addr + I2SMOD) & ~MOD_MASK; + + if (on) { + con |= CON_RXDMA_ACTIVE | CON_ACTIVE; + con &= ~(CON_RXDMA_PAUSE | CON_RXCH_PAUSE); + + if (any_tx_active(i2s)) + mod |= MOD_TXRX; + else + mod |= MOD_RXONLY; + } else { + con |= CON_RXDMA_PAUSE | CON_RXCH_PAUSE; + con &= ~CON_RXDMA_ACTIVE; + + if (any_tx_active(i2s)) + mod |= MOD_TXONLY; + else + con &= ~CON_ACTIVE; + } + + writel(mod, addr + I2SMOD); + writel(con, addr + I2SCON); +} + +/* Flush FIFO of an interface */ +static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush) +{ + void __iomem *fic; + u32 val; + + if (!i2s) + return; + + if (is_secondary(i2s)) + fic = i2s->addr + I2SFICS; + else + fic = i2s->addr + I2SFIC; + + /* Flush the FIFO */ + writel(readl(fic) | flush, fic); + + /* Be patient */ + val = msecs_to_loops(1) / 1000; /* 1 usec */ + while (--val) + cpu_relax(); + + writel(readl(fic) & ~flush, fic); +} + +static int i2s_set_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int rfs, int dir) +{ + struct i2s_dai *i2s = to_info(dai); + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + u32 mod = readl(i2s->addr + I2SMOD); + + switch (clk_id) { + case SAMSUNG_I2S_CDCLK: + /* Shouldn't matter in GATING(CLOCK_IN) mode */ + if (dir == SND_SOC_CLOCK_IN) + rfs = 0; + + if ((rfs && other->rfs && (other->rfs != rfs)) || + (any_active(i2s) && + (((dir == SND_SOC_CLOCK_IN) + && !(mod & MOD_CDCLKCON)) || + ((dir == SND_SOC_CLOCK_OUT) + && (mod & MOD_CDCLKCON))))) { + dev_err(&i2s->pdev->dev, + "%s:%d Other DAI busy\n", __func__, __LINE__); + return -EAGAIN; + } + + if (dir == SND_SOC_CLOCK_IN) + mod |= MOD_CDCLKCON; + else + mod &= ~MOD_CDCLKCON; + + i2s->rfs = rfs; + break; + + case SAMSUNG_I2S_RCLKSRC_0: /* clock corrsponding to IISMOD[10] := 0 */ + case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */ + if ((i2s->quirks & QUIRK_NO_MUXPSR) + || (clk_id == SAMSUNG_I2S_RCLKSRC_0)) + clk_id = 0; + else + clk_id = 1; + + if (!any_active(i2s)) { + if (i2s->op_clk) { + if ((clk_id && !(mod & MOD_IMS_SYSMUX)) || + (!clk_id && (mod & MOD_IMS_SYSMUX))) { + clk_disable(i2s->op_clk); + clk_put(i2s->op_clk); + } else { + i2s->rclk_srcrate = + clk_get_rate(i2s->op_clk); + return 0; + } + } + + i2s->op_clk = clk_get(&i2s->pdev->dev, + i2s->src_clk[clk_id]); + clk_enable(i2s->op_clk); + i2s->rclk_srcrate = clk_get_rate(i2s->op_clk); + + /* Over-ride the other's */ + if (other) { + other->op_clk = i2s->op_clk; + other->rclk_srcrate = i2s->rclk_srcrate; + } + } else if ((!clk_id && (mod & MOD_IMS_SYSMUX)) + || (clk_id && !(mod & MOD_IMS_SYSMUX))) { + dev_err(&i2s->pdev->dev, + "%s:%d Other DAI busy\n", __func__, __LINE__); + return -EAGAIN; + } else { + /* Call can't be on the active DAI */ + i2s->op_clk = other->op_clk; + i2s->rclk_srcrate = other->rclk_srcrate; + return 0; + } + + if (clk_id == 0) + mod &= ~MOD_IMS_SYSMUX; + else + mod |= MOD_IMS_SYSMUX; + break; + + default: + dev_err(&i2s->pdev->dev, "We don't serve that!\n"); + return -EINVAL; + } + + writel(mod, i2s->addr + I2SMOD); + + return 0; +} + +static int i2s_set_fmt(struct snd_soc_dai *dai, + unsigned int fmt) +{ + struct i2s_dai *i2s = to_info(dai); + u32 mod = readl(i2s->addr + I2SMOD); + u32 tmp = 0; + + /* Format is priority */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_RIGHT_J: + tmp |= MOD_LR_RLOW; + tmp |= MOD_SDF_MSB; + break; + case SND_SOC_DAIFMT_LEFT_J: + tmp |= MOD_LR_RLOW; + tmp |= MOD_SDF_LSB; + break; + case SND_SOC_DAIFMT_I2S: + tmp |= MOD_SDF_IIS; + break; + default: + dev_err(&i2s->pdev->dev, "Format not supported\n"); + return -EINVAL; + } + + /* + * INV flag is relative to the FORMAT flag - if set it simply + * flips the polarity specified by the Standard + */ + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_NB_IF: + if (tmp & MOD_LR_RLOW) + tmp &= ~MOD_LR_RLOW; + else + tmp |= MOD_LR_RLOW; + break; + default: + dev_err(&i2s->pdev->dev, "Polarity not supported\n"); + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + tmp |= MOD_SLAVE; + break; + case SND_SOC_DAIFMT_CBS_CFS: + /* Set default source clock in Master mode */ + if (i2s->rclk_srcrate == 0) + i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, + 0, SND_SOC_CLOCK_IN); + break; + default: + dev_err(&i2s->pdev->dev, "master/slave format not supported\n"); + return -EINVAL; + } + + if (any_active(i2s) && + ((mod & (MOD_SDF_MASK | MOD_LR_RLOW + | MOD_SLAVE)) != tmp)) { + dev_err(&i2s->pdev->dev, + "%s:%d Other DAI busy\n", __func__, __LINE__); + return -EAGAIN; + } + + mod &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE); + mod |= tmp; + writel(mod, i2s->addr + I2SMOD); + + return 0; +} + +static int i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = to_info(dai); + u32 mod = readl(i2s->addr + I2SMOD); + + if (!is_secondary(i2s)) + mod &= ~(MOD_DC2_EN | MOD_DC1_EN); + + switch (params_channels(params)) { + case 6: + mod |= MOD_DC2_EN; + case 4: + mod |= MOD_DC1_EN; + break; + case 2: + break; + default: + dev_err(&i2s->pdev->dev, "%d channels not supported\n", + params_channels(params)); + return -EINVAL; + } + + if (is_secondary(i2s)) + mod &= ~MOD_BLCS_MASK; + else + mod &= ~MOD_BLCP_MASK; + + if (is_manager(i2s)) + mod &= ~MOD_BLC_MASK; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S8: + if (is_secondary(i2s)) + mod |= MOD_BLCS_8BIT; + else + mod |= MOD_BLCP_8BIT; + if (is_manager(i2s)) + mod |= MOD_BLC_8BIT; + break; + case SNDRV_PCM_FORMAT_S16_LE: + if (is_secondary(i2s)) + mod |= MOD_BLCS_16BIT; + else + mod |= MOD_BLCP_16BIT; + if (is_manager(i2s)) + mod |= MOD_BLC_16BIT; + break; + case SNDRV_PCM_FORMAT_S24_LE: + if (is_secondary(i2s)) + mod |= MOD_BLCS_24BIT; + else + mod |= MOD_BLCP_24BIT; + if (is_manager(i2s)) + mod |= MOD_BLC_24BIT; + break; + default: + dev_err(&i2s->pdev->dev, "Format(%d) not supported\n", + params_format(params)); + return -EINVAL; + } + writel(mod, i2s->addr + I2SMOD); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + snd_soc_dai_set_dma_data(dai, substream, + (void *)&i2s->dma_playback); + else + snd_soc_dai_set_dma_data(dai, substream, + (void *)&i2s->dma_capture); + + i2s->frmclk = params_rate(params); + + return 0; +} + +/* We set constraints on the substream acc to the version of I2S */ +static int i2s_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = to_info(dai); + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + + i2s->mode |= DAI_OPENED; + + if (is_manager(other)) + i2s->mode &= ~DAI_MANAGER; + else + i2s->mode |= DAI_MANAGER; + + /* Enforce set_sysclk in Master mode */ + i2s->rclk_srcrate = 0; + + spin_unlock_irqrestore(&lock, flags); + + return 0; +} + +static void i2s_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = to_info(dai); + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + + i2s->mode &= ~DAI_OPENED; + i2s->mode &= ~DAI_MANAGER; + + if (is_opened(other)) + other->mode |= DAI_MANAGER; + + /* Reset any constraint on RFS and BFS */ + i2s->rfs = 0; + i2s->bfs = 0; + + spin_unlock_irqrestore(&lock, flags); + + /* Gate CDCLK by default */ + if (!is_opened(other)) + i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK, + 0, SND_SOC_CLOCK_IN); +} + +static int config_setup(struct i2s_dai *i2s) +{ + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + unsigned rfs, bfs, blc; + u32 psr; + + blc = get_blc(i2s); + + bfs = i2s->bfs; + + if (!bfs && other) + bfs = other->bfs; + + /* Select least possible multiple(2) if no constraint set */ + if (!bfs) + bfs = blc * 2; + + rfs = i2s->rfs; + + if (!rfs && other) + rfs = other->rfs; + + if ((rfs == 256 || rfs == 512) && (blc == 24)) { + dev_err(&i2s->pdev->dev, + "%d-RFS not supported for 24-blc\n", rfs); + return -EINVAL; + } + + if (!rfs) { + if (bfs == 16 || bfs == 32) + rfs = 256; + else + rfs = 384; + } + + /* If already setup and running */ + if (any_active(i2s) && (get_rfs(i2s) != rfs || get_bfs(i2s) != bfs)) { + dev_err(&i2s->pdev->dev, + "%s:%d Other DAI busy\n", __func__, __LINE__); + return -EAGAIN; + } + + /* Don't bother RFS, BFS & PSR in Slave mode */ + if (is_slave(i2s)) + return 0; + + set_bfs(i2s, bfs); + set_rfs(i2s, rfs); + + if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { + psr = i2s->rclk_srcrate / i2s->frmclk / rfs; + writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); + dev_dbg(&i2s->pdev->dev, + "RCLK_SRC=%luHz PSR=%u, RCLK=%dfs, BCLK=%dfs\n", + i2s->rclk_srcrate, psr, rfs, bfs); + } + + return 0; +} + +static int i2s_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) +{ + int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_dai *i2s = to_info(rtd->cpu_dai); + unsigned long flags; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + local_irq_save(flags); + + if (config_setup(i2s)) { + local_irq_restore(flags); + return -EINVAL; + } + + if (capture) + i2s_rxctrl(i2s, 1); + else + i2s_txctrl(i2s, 1); + + local_irq_restore(flags); + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + local_irq_save(flags); + + if (capture) + i2s_rxctrl(i2s, 0); + else + i2s_txctrl(i2s, 0); + + if (capture) + i2s_fifo(i2s, FIC_RXFLUSH); + else + i2s_fifo(i2s, FIC_TXFLUSH); + + local_irq_restore(flags); + break; + } + + return 0; +} + +static int i2s_set_clkdiv(struct snd_soc_dai *dai, + int div_id, int div) +{ + struct i2s_dai *i2s = to_info(dai); + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + + switch (div_id) { + case SAMSUNG_I2S_DIV_BCLK: + if ((any_active(i2s) && div && (get_bfs(i2s) != div)) + || (other && other->bfs && (other->bfs != div))) { + dev_err(&i2s->pdev->dev, + "%s:%d Other DAI busy\n", __func__, __LINE__); + return -EAGAIN; + } + i2s->bfs = div; + break; + default: + dev_err(&i2s->pdev->dev, + "Invalid clock divider(%d)\n", div_id); + return -EINVAL; + } + + return 0; +} + +static snd_pcm_sframes_t +i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = to_info(dai); + u32 reg = readl(i2s->addr + I2SFIC); + snd_pcm_sframes_t delay; + + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) + delay = FIC_RXCOUNT(reg); + else if (is_secondary(i2s)) + delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS)); + else + delay = FIC_TXCOUNT(reg); + + return delay; +} + +#ifdef CONFIG_PM +static int i2s_suspend(struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = to_info(dai); + + if (dai->active) { + i2s->suspend_i2smod = readl(i2s->addr + I2SMOD); + i2s->suspend_i2scon = readl(i2s->addr + I2SCON); + i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR); + } + + return 0; +} + +static int i2s_resume(struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = to_info(dai); + + if (dai->active) { + writel(i2s->suspend_i2scon, i2s->addr + I2SCON); + writel(i2s->suspend_i2smod, i2s->addr + I2SMOD); + writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR); + } + + return 0; +} +#else +#define i2s_suspend NULL +#define i2s_resume NULL +#endif + +static int samsung_i2s_dai_probe(struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = to_info(dai); + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + + if (other && other->clk) /* If this is probe on secondary */ + goto probe_exit; + + i2s->addr = ioremap(i2s->base, 0x100); + if (i2s->addr == NULL) { + dev_err(&i2s->pdev->dev, "cannot ioremap registers\n"); + return -ENXIO; + } + + i2s->clk = clk_get(&i2s->pdev->dev, "iis"); + if (IS_ERR(i2s->clk)) { + dev_err(&i2s->pdev->dev, "failed to get i2s_clock\n"); + iounmap(i2s->addr); + return -ENOENT; + } + clk_enable(i2s->clk); + + if (other) { + other->addr = i2s->addr; + other->clk = i2s->clk; + } + + if (i2s->quirks & QUIRK_NEED_RSTCLR) + writel(CON_RSTCLR, i2s->addr + I2SCON); + +probe_exit: + /* Reset any constraint on RFS and BFS */ + i2s->rfs = 0; + i2s->bfs = 0; + i2s_txctrl(i2s, 0); + i2s_rxctrl(i2s, 0); + i2s_fifo(i2s, FIC_TXFLUSH); + i2s_fifo(other, FIC_TXFLUSH); + i2s_fifo(i2s, FIC_RXFLUSH); + + /* Gate CDCLK by default */ + if (!is_opened(other)) + i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK, + 0, SND_SOC_CLOCK_IN); + + return 0; +} + +static int samsung_i2s_dai_remove(struct snd_soc_dai *dai) +{ + struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai); + struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; + + if (!other || !other->clk) { + + if (i2s->quirks & QUIRK_NEED_RSTCLR) + writel(0, i2s->addr + I2SCON); + + clk_disable(i2s->clk); + clk_put(i2s->clk); + + iounmap(i2s->addr); + } + + i2s->clk = NULL; + + return 0; +} + +static struct snd_soc_dai_ops samsung_i2s_dai_ops = { + .trigger = i2s_trigger, + .hw_params = i2s_hw_params, + .set_fmt = i2s_set_fmt, + .set_clkdiv = i2s_set_clkdiv, + .set_sysclk = i2s_set_sysclk, + .startup = i2s_startup, + .shutdown = i2s_shutdown, + .delay = i2s_delay, +}; + +#define SAMSUNG_I2S_RATES SNDRV_PCM_RATE_8000_96000 + +#define SAMSUNG_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ + SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S24_LE) + +static __devinit +struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) +{ + struct i2s_dai *i2s; + + i2s = kzalloc(sizeof(struct i2s_dai), GFP_KERNEL); + if (i2s == NULL) + return NULL; + + i2s->pdev = pdev; + i2s->pri_dai = NULL; + i2s->sec_dai = NULL; + i2s->i2s_dai_drv.symmetric_rates = 1; + i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe; + i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove; + i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops; + i2s->i2s_dai_drv.suspend = i2s_suspend; + i2s->i2s_dai_drv.resume = i2s_resume; + i2s->i2s_dai_drv.playback.channels_min = 2; + i2s->i2s_dai_drv.playback.channels_max = 2; + i2s->i2s_dai_drv.playback.rates = SAMSUNG_I2S_RATES; + i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS; + + if (!sec) { + i2s->i2s_dai_drv.capture.channels_min = 2; + i2s->i2s_dai_drv.capture.channels_max = 2; + i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; + i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; + } else { /* Create a new platform_device for Secondary */ + i2s->pdev = platform_device_register_resndata(NULL, + pdev->name, pdev->id + SAMSUNG_I2S_SECOFF, + NULL, 0, NULL, 0); + if (IS_ERR(i2s->pdev)) { + kfree(i2s); + return NULL; + } + } + + /* Pre-assign snd_soc_dai_set_drvdata */ + dev_set_drvdata(&i2s->pdev->dev, i2s); + + return i2s; +} + +static __devinit int samsung_i2s_probe(struct platform_device *pdev) +{ + u32 dma_pl_chan, dma_cp_chan, dma_pl_sec_chan; + struct i2s_dai *pri_dai, *sec_dai = NULL; + struct s3c_audio_pdata *i2s_pdata; + struct samsung_i2s *i2s_cfg; + struct resource *res; + u32 regs_base, quirks; + int ret = 0; + + /* Call during Seconday interface registration */ + if (pdev->id >= SAMSUNG_I2S_SECOFF) { + sec_dai = dev_get_drvdata(&pdev->dev); + snd_soc_register_dai(&sec_dai->pdev->dev, + &sec_dai->i2s_dai_drv); + return 0; + } + + i2s_pdata = pdev->dev.platform_data; + if (i2s_pdata == NULL) { + dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!res) { + dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n"); + return -ENXIO; + } + dma_pl_chan = res->start; + + res = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (!res) { + dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n"); + return -ENXIO; + } + dma_cp_chan = res->start; + + res = platform_get_resource(pdev, IORESOURCE_DMA, 2); + if (res) + dma_pl_sec_chan = res->start; + else + dma_pl_sec_chan = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Unable to get I2S SFR address\n"); + return -ENXIO; + } + + if (!request_mem_region(res->start, resource_size(res), + "samsung-i2s")) { + dev_err(&pdev->dev, "Unable to request SFR region\n"); + return -EBUSY; + } + regs_base = res->start; + + i2s_cfg = &i2s_pdata->type.i2s; + quirks = i2s_cfg->quirks; + + pri_dai = i2s_alloc_dai(pdev, false); + if (!pri_dai) { + dev_err(&pdev->dev, "Unable to alloc I2S_pri\n"); + ret = -ENOMEM; + goto err1; + } + + pri_dai->dma_playback.dma_addr = regs_base + I2STXD; + pri_dai->dma_capture.dma_addr = regs_base + I2SRXD; + pri_dai->dma_playback.client = + (struct s3c2410_dma_client *)&pri_dai->dma_playback; + pri_dai->dma_capture.client = + (struct s3c2410_dma_client *)&pri_dai->dma_capture; + pri_dai->dma_playback.channel = dma_pl_chan; + pri_dai->dma_capture.channel = dma_cp_chan; + pri_dai->src_clk = i2s_cfg->src_clk; + pri_dai->dma_playback.dma_size = 4; + pri_dai->dma_capture.dma_size = 4; + pri_dai->base = regs_base; + pri_dai->quirks = quirks; + + if (quirks & QUIRK_PRI_6CHAN) + pri_dai->i2s_dai_drv.playback.channels_max = 6; + + if (quirks & QUIRK_SEC_DAI) { + sec_dai = i2s_alloc_dai(pdev, true); + if (!sec_dai) { + dev_err(&pdev->dev, "Unable to alloc I2S_sec\n"); + ret = -ENOMEM; + goto err2; + } + sec_dai->dma_playback.dma_addr = regs_base + I2STXDS; + sec_dai->dma_playback.client = + (struct s3c2410_dma_client *)&sec_dai->dma_playback; + /* Use iDMA always if SysDMA not provided */ + sec_dai->dma_playback.channel = dma_pl_sec_chan ? : -1; + sec_dai->src_clk = i2s_cfg->src_clk; + sec_dai->dma_playback.dma_size = 4; + sec_dai->base = regs_base; + sec_dai->quirks = quirks; + sec_dai->pri_dai = pri_dai; + pri_dai->sec_dai = sec_dai; + } + + if (i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) { + dev_err(&pdev->dev, "Unable to configure gpio\n"); + ret = -EINVAL; + goto err3; + } + + snd_soc_register_dai(&pri_dai->pdev->dev, &pri_dai->i2s_dai_drv); + + return 0; +err3: + kfree(sec_dai); +err2: + kfree(pri_dai); +err1: + release_mem_region(regs_base, resource_size(res)); + + return ret; +} + +static __devexit int samsung_i2s_remove(struct platform_device *pdev) +{ + struct i2s_dai *i2s, *other; + + i2s = dev_get_drvdata(&pdev->dev); + other = i2s->pri_dai ? : i2s->sec_dai; + + if (other) { + other->pri_dai = NULL; + other->sec_dai = NULL; + } else { + struct resource *res; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); + } + + i2s->pri_dai = NULL; + i2s->sec_dai = NULL; + + kfree(i2s); + + snd_soc_unregister_dai(&pdev->dev); + + return 0; +} + +static struct platform_driver samsung_i2s_driver = { + .probe = samsung_i2s_probe, + .remove = samsung_i2s_remove, + .driver = { + .name = "samsung-i2s", + .owner = THIS_MODULE, + }, +}; + +static int __init samsung_i2s_init(void) +{ + return platform_driver_register(&samsung_i2s_driver); +} +module_init(samsung_i2s_init); + +static void __exit samsung_i2s_exit(void) +{ + platform_driver_unregister(&samsung_i2s_driver); +} +module_exit(samsung_i2s_exit); + +/* Module information */ +MODULE_AUTHOR("Jaswinder Singh, "); +MODULE_DESCRIPTION("Samsung I2S Interface"); +MODULE_ALIAS("platform:samsung-i2s"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/samsung/i2s.h b/sound/soc/samsung/i2s.h new file mode 100644 index 000000000000..8e15f6a616d1 --- /dev/null +++ b/sound/soc/samsung/i2s.h @@ -0,0 +1,29 @@ +/* sound/soc/samsung/i2s.h + * + * ALSA SoC Audio Layer - Samsung I2S Controller driver + * + * Copyright (c) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SND_SOC_SAMSUNG_I2S_H +#define __SND_SOC_SAMSUNG_I2S_H + +/* + * Maximum number of I2S blocks that any SoC can have. + * The secondary interface of a CPU dai(if there exists any), + * is indexed at [cpu-dai's ID + SAMSUNG_I2S_SECOFF] + */ +#define SAMSUNG_I2S_SECOFF 4 + +#define SAMSUNG_I2S_DIV_BCLK 1 + +#define SAMSUNG_I2S_RCLKSRC_0 0 +#define SAMSUNG_I2S_RCLKSRC_1 1 +#define SAMSUNG_I2S_CDCLK 2 + +#endif /* __SND_SOC_SAMSUNG_I2S_H */ diff --git a/sound/soc/s3c24xx/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c similarity index 88% rename from sound/soc/s3c24xx/jive_wm8750.c rename to sound/soc/samsung/jive_wm8750.c index 49605cd83947..08802520e014 100644 --- a/sound/soc/s3c24xx/jive_wm8750.c +++ b/sound/soc/samsung/jive_wm8750.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/jive_wm8750.c +/* sound/soc/samsung/jive_wm8750.c * * Copyright 2007,2008 Simtec Electronics * @@ -21,11 +21,10 @@ #include #include #include -#include #include -#include "s3c-dma.h" +#include "dma.h" #include "s3c2412-i2s.h" #include "../codecs/wm8750.h" @@ -111,18 +110,19 @@ static struct snd_soc_ops jive_ops = { static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* These endpoints are not being used. */ - snd_soc_dapm_nc_pin(codec, "LINPUT2"); - snd_soc_dapm_nc_pin(codec, "RINPUT2"); - snd_soc_dapm_nc_pin(codec, "LINPUT3"); - snd_soc_dapm_nc_pin(codec, "RINPUT3"); - snd_soc_dapm_nc_pin(codec, "OUT3"); - snd_soc_dapm_nc_pin(codec, "MONO"); + snd_soc_dapm_nc_pin(dapm, "LINPUT2"); + snd_soc_dapm_nc_pin(dapm, "RINPUT2"); + snd_soc_dapm_nc_pin(dapm, "LINPUT3"); + snd_soc_dapm_nc_pin(dapm, "RINPUT3"); + snd_soc_dapm_nc_pin(dapm, "OUT3"); + snd_soc_dapm_nc_pin(dapm, "MONO"); /* Add jive specific widgets */ - err = snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets, + err = snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets, ARRAY_SIZE(wm8750_dapm_widgets)); if (err) { printk(KERN_ERR "%s: failed to add widgets (%d)\n", @@ -130,8 +130,8 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd) return err; } - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_sync(dapm); return 0; } @@ -141,7 +141,7 @@ static struct snd_soc_dai_link jive_dai = { .stream_name = "WM8750", .cpu_dai_name = "s3c2412-i2s", .codec_dai_name = "wm8750-hifi", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .codec_name = "wm8750-codec.0-0x1a", .init = jive_wm8750_init, .ops = &jive_ops, diff --git a/sound/soc/s3c24xx/lm4857.h b/sound/soc/samsung/lm4857.h similarity index 100% rename from sound/soc/s3c24xx/lm4857.h rename to sound/soc/samsung/lm4857.h diff --git a/sound/soc/s3c24xx/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c similarity index 92% rename from sound/soc/s3c24xx/ln2440sbc_alc650.c rename to sound/soc/samsung/ln2440sbc_alc650.c index abe64abe8c84..a2bb34def740 100644 --- a/sound/soc/s3c24xx/ln2440sbc_alc650.c +++ b/sound/soc/samsung/ln2440sbc_alc650.c @@ -21,10 +21,9 @@ #include #include #include -#include -#include "s3c-dma.h" -#include "s3c-ac97.h" +#include "dma.h" +#include "ac97.h" static struct snd_soc_card ln2440sbc; @@ -32,10 +31,10 @@ static struct snd_soc_dai_link ln2440sbc_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "s3c-ac97", + .cpu_dai_name = "samsung-ac97", .codec_dai_name = "ac97-hifi", .codec_name = "ac97-codec", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", }, }; diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/samsung/neo1973_gta02_wm8753.c similarity index 88% rename from sound/soc/s3c24xx/neo1973_gta02_wm8753.c rename to sound/soc/samsung/neo1973_gta02_wm8753.c index e97bdf150a03..3eec610c10f9 100644 --- a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c +++ b/sound/soc/samsung/neo1973_gta02_wm8753.c @@ -22,7 +22,6 @@ #include #include #include -#include #include @@ -32,7 +31,7 @@ #include #include #include "../codecs/wm8753.h" -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" static struct snd_soc_card neo1973_gta02; @@ -333,16 +332,17 @@ static const struct snd_kcontrol_new wm8753_neo1973_gta02_controls[] = { static int neo1973_gta02_wm8753_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* set up NC codec pins */ - snd_soc_dapm_nc_pin(codec, "OUT3"); - snd_soc_dapm_nc_pin(codec, "OUT4"); - snd_soc_dapm_nc_pin(codec, "LINE1"); - snd_soc_dapm_nc_pin(codec, "LINE2"); + snd_soc_dapm_nc_pin(dapm, "OUT3"); + snd_soc_dapm_nc_pin(dapm, "OUT4"); + snd_soc_dapm_nc_pin(dapm, "LINE1"); + snd_soc_dapm_nc_pin(dapm, "LINE2"); /* Add neo1973 gta02 specific widgets */ - snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets, ARRAY_SIZE(wm8753_dapm_widgets)); /* add neo1973 gta02 specific controls */ @@ -353,25 +353,25 @@ static int neo1973_gta02_wm8753_init(struct snd_soc_pcm_runtime *rtd) return err; /* set up neo1973 gta02 specific audio path audio_map */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* set endpoints to default off mode */ - snd_soc_dapm_disable_pin(codec, "Stereo Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Handset Mic"); - snd_soc_dapm_disable_pin(codec, "Handset Spk"); + snd_soc_dapm_disable_pin(dapm, "Stereo Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Handset Mic"); + snd_soc_dapm_disable_pin(dapm, "Handset Spk"); /* allow audio paths from the GSM modem to run during suspend */ - snd_soc_dapm_ignore_suspend(codec, "Stereo Out"); - snd_soc_dapm_ignore_suspend(codec, "GSM Line Out"); - snd_soc_dapm_ignore_suspend(codec, "GSM Line In"); - snd_soc_dapm_ignore_suspend(codec, "Headset Mic"); - snd_soc_dapm_ignore_suspend(codec, "Handset Mic"); - snd_soc_dapm_ignore_suspend(codec, "Handset Spk"); + snd_soc_dapm_ignore_suspend(dapm, "Stereo Out"); + snd_soc_dapm_ignore_suspend(dapm, "GSM Line Out"); + snd_soc_dapm_ignore_suspend(dapm, "GSM Line In"); + snd_soc_dapm_ignore_suspend(dapm, "Headset Mic"); + snd_soc_dapm_ignore_suspend(dapm, "Handset Mic"); + snd_soc_dapm_ignore_suspend(dapm, "Handset Spk"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } @@ -400,7 +400,7 @@ static struct snd_soc_dai_link neo1973_gta02_dai[] = { .cpu_dai_name = "s3c24xx-i2s", .codec_dai_name = "wm8753-hifi", .init = neo1973_gta02_wm8753_init, - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .codec_name = "wm8753-codec.0-0x1a", .ops = &neo1973_gta02_hifi_ops, }, @@ -411,7 +411,7 @@ static struct snd_soc_dai_link neo1973_gta02_dai[] = { .codec_dai_name = "wm8753-voice", .ops = &neo1973_gta02_voice_ops, .codec_name = "wm8753-codec.0-0x1a", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", }, }; @@ -438,25 +438,21 @@ static int __init neo1973_gta02_init(void) return -ENOMEM; /* register bluetooth DAI here */ - ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, -1, &bt_dai); - if (ret) { - platform_device_put(neo1973_gta02_snd_device); - return ret; - } + ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, &bt_dai); + if (ret) + goto err_put_device; platform_set_drvdata(neo1973_gta02_snd_device, &neo1973_gta02); ret = platform_device_add(neo1973_gta02_snd_device); - if (ret) { - platform_device_put(neo1973_gta02_snd_device); - return ret; - } + if (ret) + goto err_unregister_dai; /* Initialise GPIOs used by amp */ ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN"); if (ret) { pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN); - goto err_unregister_device; + goto err_del_device; } ret = gpio_direction_output(GTA02_GPIO_HP_IN, 1); @@ -483,15 +479,19 @@ err_free_gpio_amp_shut: gpio_free(GTA02_GPIO_AMP_SHUT); err_free_gpio_hp_in: gpio_free(GTA02_GPIO_HP_IN); -err_unregister_device: - platform_device_unregister(neo1973_gta02_snd_device); +err_del_device: + platform_device_del(neo1973_gta02_snd_device); +err_unregister_dai: + snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev); +err_put_device: + platform_device_put(neo1973_gta02_snd_device); return ret; } module_init(neo1973_gta02_init); static void __exit neo1973_gta02_exit(void) { - snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev, -1); + snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev); platform_device_unregister(neo1973_gta02_snd_device); gpio_free(GTA02_GPIO_HP_IN); gpio_free(GTA02_GPIO_AMP_SHUT); diff --git a/sound/soc/s3c24xx/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c similarity index 83% rename from sound/soc/s3c24xx/neo1973_wm8753.c rename to sound/soc/samsung/neo1973_wm8753.c index f4f2ee731f01..c7a24514beb5 100644 --- a/sound/soc/s3c24xx/neo1973_wm8753.c +++ b/sound/soc/samsung/neo1973_wm8753.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -36,7 +35,7 @@ #include "../codecs/wm8753.h" #include "lm4857.h" -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" /* define the scenarios */ @@ -237,81 +236,83 @@ static int neo1973_get_scenario(struct snd_kcontrol *kcontrol, static int set_scenario_endpoints(struct snd_soc_codec *codec, int scenario) { + struct snd_soc_dapm_context *dapm = &codec->dapm; + pr_debug("Entered %s\n", __func__); switch (neo1973_scenario) { case NEO_AUDIO_OFF: - snd_soc_dapm_disable_pin(codec, "Audio Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_disable_pin(dapm, "Audio Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); break; case NEO_GSM_CALL_AUDIO_HANDSET: - snd_soc_dapm_enable_pin(codec, "Audio Out"); - snd_soc_dapm_enable_pin(codec, "GSM Line Out"); - snd_soc_dapm_enable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_enable_pin(codec, "Call Mic"); + snd_soc_dapm_enable_pin(dapm, "Audio Out"); + snd_soc_dapm_enable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_enable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_enable_pin(dapm, "Call Mic"); break; case NEO_GSM_CALL_AUDIO_HEADSET: - snd_soc_dapm_enable_pin(codec, "Audio Out"); - snd_soc_dapm_enable_pin(codec, "GSM Line Out"); - snd_soc_dapm_enable_pin(codec, "GSM Line In"); - snd_soc_dapm_enable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_enable_pin(dapm, "Audio Out"); + snd_soc_dapm_enable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_enable_pin(dapm, "GSM Line In"); + snd_soc_dapm_enable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); break; case NEO_GSM_CALL_AUDIO_BLUETOOTH: - snd_soc_dapm_disable_pin(codec, "Audio Out"); - snd_soc_dapm_enable_pin(codec, "GSM Line Out"); - snd_soc_dapm_enable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_disable_pin(dapm, "Audio Out"); + snd_soc_dapm_enable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_enable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); break; case NEO_STEREO_TO_SPEAKERS: - snd_soc_dapm_enable_pin(codec, "Audio Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_enable_pin(dapm, "Audio Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); break; case NEO_STEREO_TO_HEADPHONES: - snd_soc_dapm_enable_pin(codec, "Audio Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_enable_pin(dapm, "Audio Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); break; case NEO_CAPTURE_HANDSET: - snd_soc_dapm_disable_pin(codec, "Audio Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_enable_pin(codec, "Call Mic"); + snd_soc_dapm_disable_pin(dapm, "Audio Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_enable_pin(dapm, "Call Mic"); break; case NEO_CAPTURE_HEADSET: - snd_soc_dapm_disable_pin(codec, "Audio Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_enable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_disable_pin(dapm, "Audio Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_enable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); break; case NEO_CAPTURE_BLUETOOTH: - snd_soc_dapm_disable_pin(codec, "Audio Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_disable_pin(dapm, "Audio Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); break; default: - snd_soc_dapm_disable_pin(codec, "Audio Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line Out"); - snd_soc_dapm_disable_pin(codec, "GSM Line In"); - snd_soc_dapm_disable_pin(codec, "Headset Mic"); - snd_soc_dapm_disable_pin(codec, "Call Mic"); + snd_soc_dapm_disable_pin(dapm, "Audio Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); + snd_soc_dapm_disable_pin(dapm, "GSM Line In"); + snd_soc_dapm_disable_pin(dapm, "Headset Mic"); + snd_soc_dapm_disable_pin(dapm, "Call Mic"); } - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } @@ -502,20 +503,21 @@ static const struct snd_kcontrol_new wm8753_neo1973_controls[] = { static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; pr_debug("Entered %s\n", __func__); /* set up NC codec pins */ - snd_soc_dapm_nc_pin(codec, "LOUT2"); - snd_soc_dapm_nc_pin(codec, "ROUT2"); - snd_soc_dapm_nc_pin(codec, "OUT3"); - snd_soc_dapm_nc_pin(codec, "OUT4"); - snd_soc_dapm_nc_pin(codec, "LINE1"); - snd_soc_dapm_nc_pin(codec, "LINE2"); + snd_soc_dapm_nc_pin(dapm, "LOUT2"); + snd_soc_dapm_nc_pin(dapm, "ROUT2"); + snd_soc_dapm_nc_pin(dapm, "OUT3"); + snd_soc_dapm_nc_pin(dapm, "OUT4"); + snd_soc_dapm_nc_pin(dapm, "LINE1"); + snd_soc_dapm_nc_pin(dapm, "LINE2"); /* Add neo1973 specific widgets */ - snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets, ARRAY_SIZE(wm8753_dapm_widgets)); /* set endpoints to default mode */ @@ -528,10 +530,10 @@ static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd) return err; /* set up neo1973 specific audio routes */ - err = snd_soc_dapm_add_routes(codec, dapm_routes, + err = snd_soc_dapm_add_routes(dapm, dapm_routes, ARRAY_SIZE(dapm_routes)); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } @@ -556,7 +558,7 @@ static struct snd_soc_dai_link neo1973_dai[] = { { /* Hifi Playback - for similatious use with voice below */ .name = "WM8753", .stream_name = "WM8753 HiFi", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .cpu_dai_name = "s3c24xx-i2s", .codec_dai_name = "wm8753-hifi", .codec_name = "wm8753-codec.0-0x1a", @@ -566,7 +568,7 @@ static struct snd_soc_dai_link neo1973_dai[] = { { /* Voice via BT */ .name = "Bluetooth", .stream_name = "Voice", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .cpu_dai_name = "bluetooth-dai", .codec_dai_name = "wm8753-voice", .codec_name = "wm8753-codec.0-0x1a", diff --git a/sound/soc/s3c24xx/s3c-pcm.c b/sound/soc/samsung/pcm.c similarity index 99% rename from sound/soc/s3c24xx/s3c-pcm.c rename to sound/soc/samsung/pcm.c index 2e020e1b4eab..48d0b750406b 100644 --- a/sound/soc/s3c24xx/s3c-pcm.c +++ b/sound/soc/samsung/pcm.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c-pcm.c +/* sound/soc/samsung/pcm.c * * ALSA SoC Audio Layer - S3C PCM-Controller driver * @@ -29,8 +29,8 @@ #include #include -#include "s3c-dma.h" -#include "s3c-pcm.h" +#include "dma.h" +#include "pcm.h" static struct s3c2410_dma_client s3c_pcm_dma_client_out = { .name = "PCM Stereo out" diff --git a/sound/soc/s3c24xx/s3c-pcm.h b/sound/soc/samsung/pcm.h similarity index 99% rename from sound/soc/s3c24xx/s3c-pcm.h rename to sound/soc/samsung/pcm.h index f60baa19387d..03393dcf852d 100644 --- a/sound/soc/s3c24xx/s3c-pcm.h +++ b/sound/soc/samsung/pcm.h @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c-pcm.h +/* sound/soc/samsung/pcm.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/sound/soc/s3c24xx/regs-i2s-v2.h b/sound/soc/samsung/regs-i2s-v2.h similarity index 100% rename from sound/soc/s3c24xx/regs-i2s-v2.h rename to sound/soc/samsung/regs-i2s-v2.h diff --git a/sound/soc/s3c24xx/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c similarity index 95% rename from sound/soc/s3c24xx/rx1950_uda1380.c rename to sound/soc/samsung/rx1950_uda1380.c index 468cc11fdf47..f40027445dda 100644 --- a/sound/soc/s3c24xx/rx1950_uda1380.c +++ b/sound/soc/samsung/rx1950_uda1380.c @@ -25,7 +25,6 @@ #include #include -#include #include #include @@ -35,7 +34,7 @@ #include -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" #include "../codecs/uda1380.h" @@ -95,7 +94,7 @@ static struct snd_soc_dai_link rx1950_uda1380_dai[] = { .cpu_dai_name = "s3c24xx-iis", .codec_dai_name = "uda1380-hifi", .init = rx1950_uda1380_init, - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .codec_name = "uda1380-codec.0-001a", .ops = &rx1950_ops, }, @@ -228,26 +227,28 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream, static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* Add rx1950 specific widgets */ - err = snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets, + err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets, ARRAY_SIZE(uda1380_dapm_widgets)); if (err) return err; /* Set up rx1950 specific audio path audio_mapnects */ - err = snd_soc_dapm_add_routes(codec, audio_map, + err = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); if (err) return err; - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Speaker"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Speaker"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE, &hp_jack); diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c similarity index 99% rename from sound/soc/s3c24xx/s3c-i2s-v2.c rename to sound/soc/samsung/s3c-i2s-v2.c index b3866d5b19e9..094f36e41e83 100644 --- a/sound/soc/s3c24xx/s3c-i2s-v2.c +++ b/sound/soc/samsung/s3c-i2s-v2.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c-i2c-v2.c +/* sound/soc/samsung/s3c-i2c-v2.c * * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs. * @@ -28,7 +28,7 @@ #include "regs-i2s-v2.h" #include "s3c-i2s-v2.h" -#include "s3c-dma.h" +#include "dma.h" #undef S3C_IIS_V2_SUPPORTED diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h similarity index 98% rename from sound/soc/s3c24xx/s3c-i2s-v2.h rename to sound/soc/samsung/s3c-i2s-v2.h index d45830151484..f8297d9bb8a3 100644 --- a/sound/soc/s3c24xx/s3c-i2s-v2.h +++ b/sound/soc/samsung/s3c-i2s-v2.h @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c-i2s-v2.h +/* sound/soc/samsung/s3c-i2s-v2.h * * ALSA Soc Audio Layer - S3C_I2SV2 I2S driver * diff --git a/sound/soc/s3c24xx/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c similarity index 98% rename from sound/soc/s3c24xx/s3c2412-i2s.c rename to sound/soc/samsung/s3c2412-i2s.c index 4a861cfa52c5..7ea837867124 100644 --- a/sound/soc/s3c24xx/s3c2412-i2s.c +++ b/sound/soc/samsung/s3c2412-i2s.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c2412-i2s.c +/* sound/soc/samsung/s3c2412-i2s.c * * ALSA Soc Audio Layer - S3C2412 I2S driver * @@ -35,7 +35,7 @@ #include #include -#include "s3c-dma.h" +#include "dma.h" #include "regs-i2s-v2.h" #include "s3c2412-i2s.h" diff --git a/sound/soc/s3c24xx/s3c2412-i2s.h b/sound/soc/samsung/s3c2412-i2s.h similarity index 95% rename from sound/soc/s3c24xx/s3c2412-i2s.h rename to sound/soc/samsung/s3c2412-i2s.h index 01a0471ac65c..02ad5794c0a9 100644 --- a/sound/soc/s3c24xx/s3c2412-i2s.h +++ b/sound/soc/samsung/s3c2412-i2s.h @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c2412-i2s.c +/* sound/soc/samsung/s3c2412-i2s.c * * ALSA Soc Audio Layer - S3C2412 I2S driver * diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c similarity index 99% rename from sound/soc/s3c24xx/s3c24xx-i2s.c rename to sound/soc/samsung/s3c24xx-i2s.c index e060daaa458f..13e41ed8e22b 100644 --- a/sound/soc/s3c24xx/s3c24xx-i2s.c +++ b/sound/soc/samsung/s3c24xx-i2s.c @@ -38,7 +38,7 @@ #include -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" static struct s3c2410_dma_client s3c24xx_dma_client_out = { diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.h b/sound/soc/samsung/s3c24xx-i2s.h similarity index 100% rename from sound/soc/s3c24xx/s3c24xx-i2s.h rename to sound/soc/samsung/s3c24xx-i2s.h diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c similarity index 99% rename from sound/soc/s3c24xx/s3c24xx_simtec.c rename to sound/soc/samsung/s3c24xx_simtec.c index c4c111442010..a434032d1832 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec.c +++ b/sound/soc/samsung/s3c24xx_simtec.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c24xx_simtec.c +/* sound/soc/samsung/s3c24xx_simtec.c * * Copyright 2009 Simtec Electronics * @@ -17,11 +17,10 @@ #include #include #include -#include #include -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" #include "s3c24xx_simtec.h" diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/samsung/s3c24xx_simtec.h similarity index 94% rename from sound/soc/s3c24xx/s3c24xx_simtec.h rename to sound/soc/samsung/s3c24xx_simtec.h index e63d5ff9c41f..8270748a2c41 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec.h +++ b/sound/soc/samsung/s3c24xx_simtec.h @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c24xx_simtec.h +/* sound/soc/samsung/s3c24xx_simtec.h * * Copyright 2009 Simtec Electronics * diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c similarity index 87% rename from sound/soc/s3c24xx/s3c24xx_simtec_hermes.c rename to sound/soc/samsung/s3c24xx_simtec_hermes.c index f88453735ae2..bb4292e3596c 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c +++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c24xx_simtec_hermes.c +/* sound/soc/samsung/s3c24xx_simtec_hermes.c * * Copyright 2009 Simtec Electronics * @@ -14,16 +14,13 @@ #include #include #include -#include #include -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" #include "s3c24xx_simtec.h" -#include "../codecs/tlv320aic3x.h" - static const struct snd_soc_dapm_widget dapm_widgets[] = { SND_SOC_DAPM_LINE("GSM Out", NULL), SND_SOC_DAPM_LINE("GSM In", NULL), @@ -76,19 +73,20 @@ static const struct snd_soc_dapm_route base_map[] = { static int simtec_hermes_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, dapm_widgets, + snd_soc_dapm_new_controls(dapm, dapm_widgets, ARRAY_SIZE(dapm_widgets)); - snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map)); + snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map)); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Line In"); - snd_soc_dapm_enable_pin(codec, "Line Out"); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Line In"); + snd_soc_dapm_enable_pin(dapm, "Line Out"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); simtec_audio_init(rtd); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } @@ -99,7 +97,7 @@ static struct snd_soc_dai_link simtec_dai_aic33 = { .codec_name = "tlv320aic3x-codec.0-0x1a", .cpu_dai_name = "s3c24xx-i2s", .codec_dai_name = "tlv320aic3x-hifi", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .init = simtec_hermes_init, }; diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c similarity index 86% rename from sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c rename to sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c index c0967593510d..fbba4e367729 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c +++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c +/* sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c * * Copyright 2009 Simtec Electronics * @@ -14,11 +14,10 @@ #include #include #include -#include #include -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" #include "s3c24xx_simtec.h" @@ -65,19 +64,20 @@ static const struct snd_soc_dapm_route base_map[] = { static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, dapm_widgets, + snd_soc_dapm_new_controls(dapm, dapm_widgets, ARRAY_SIZE(dapm_widgets)); - snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map)); + snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map)); - snd_soc_dapm_enable_pin(codec, "Headphone Jack"); - snd_soc_dapm_enable_pin(codec, "Line In"); - snd_soc_dapm_enable_pin(codec, "Line Out"); - snd_soc_dapm_enable_pin(codec, "Mic Jack"); + snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Line In"); + snd_soc_dapm_enable_pin(dapm, "Line Out"); + snd_soc_dapm_enable_pin(dapm, "Mic Jack"); simtec_audio_init(rtd); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } @@ -88,7 +88,7 @@ static struct snd_soc_dai_link simtec_dai_aic23 = { .codec_name = "tlv320aic3x-codec.0-0x1a", .cpu_dai_name = "s3c24xx-i2s", .codec_dai_name = "tlv320aic3x-hifi", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .init = simtec_tlv320aic23_init, }; diff --git a/sound/soc/s3c24xx/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c similarity index 99% rename from sound/soc/s3c24xx/s3c24xx_uda134x.c rename to sound/soc/samsung/s3c24xx_uda134x.c index bd48ffbde880..cdc8ecbcb8ef 100644 --- a/sound/soc/s3c24xx/s3c24xx_uda134x.c +++ b/sound/soc/samsung/s3c24xx_uda134x.c @@ -18,13 +18,12 @@ #include #include #include -#include #include #include #include -#include "s3c-dma.h" +#include "dma.h" #include "s3c24xx-i2s.h" #include "../codecs/uda134x.h" @@ -231,7 +230,7 @@ static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = { .codec_dai_name = "uda134x-hifi", .cpu_dai_name = "s3c24xx-i2s", .ops = &s3c24xx_uda134x_ops, - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", }; static struct snd_soc_card snd_soc_s3c24xx_uda134x = { diff --git a/sound/soc/s3c24xx/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c similarity index 82% rename from sound/soc/s3c24xx/smartq_wm8987.c rename to sound/soc/samsung/smartq_wm8987.c index dd20ca7f4681..61e2b5219d42 100644 --- a/sound/soc/s3c24xx/smartq_wm8987.c +++ b/sound/soc/samsung/smartq_wm8987.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/smartq_wm8987.c +/* sound/soc/samsung/smartq_wm8987.c * * Copyright 2010 Maurus Cuelenaere * @@ -19,13 +19,13 @@ #include #include -#include +#include #include #include -#include "s3c-dma.h" -#include "s3c64xx-i2s.h" +#include "dma.h" +#include "i2s.h" #include "../codecs/wm8750.h" @@ -39,15 +39,11 @@ static int smartq_hifi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; - struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; - struct s3c_i2sv2_rate_calc div; + struct snd_soc_dai *codec_dai = rtd->codec_dai; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int clk = 0; int ret; - s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params), - s3c_i2sv2_get_clock(cpu_dai)); - switch (params_rate(params)) { case 8000: case 16000: @@ -78,20 +74,21 @@ static int smartq_hifi_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - /* set the codec system clock for DAC and ADC */ - ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk, - SND_SOC_CLOCK_IN); + /* Use PCLK for I2S signal generation */ + ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0, + 0, SND_SOC_CLOCK_IN); if (ret < 0) return ret; - /* set MCLK division for sample rate */ - ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, div.fs_div); + /* Gate the RCLK output on PAD */ + ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK, + 0, SND_SOC_CLOCK_IN); if (ret < 0) return ret; - /* set prescaler division for sample rate */ - ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_PRESCALER, - div.clk_div - 1); + /* set the codec system clock for DAC and ADC */ + ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk, + SND_SOC_CLOCK_IN); if (ret < 0) return ret; @@ -156,12 +153,14 @@ static const struct snd_soc_dapm_route audio_map[] = { {"LINPUT2", NULL, "Mic Bias"}, }; -static int smartq_wm8987_init(struct snd_soc_codec *codec) +static int smartq_wm8987_init(struct snd_soc_pcm_runtime *rtd) { + struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; int err = 0; /* Add SmartQ specific widgets */ - snd_soc_dapm_new_controls(codec, wm8987_dapm_widgets, + snd_soc_dapm_new_controls(dapm, wm8987_dapm_widgets, ARRAY_SIZE(wm8987_dapm_widgets)); /* add SmartQ specific controls */ @@ -172,25 +171,25 @@ static int smartq_wm8987_init(struct snd_soc_codec *codec) return err; /* setup SmartQ specific audio path */ - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* set endpoints to not connected */ - snd_soc_dapm_nc_pin(codec, "LINPUT1"); - snd_soc_dapm_nc_pin(codec, "RINPUT1"); - snd_soc_dapm_nc_pin(codec, "OUT3"); - snd_soc_dapm_nc_pin(codec, "ROUT1"); + snd_soc_dapm_nc_pin(dapm, "LINPUT1"); + snd_soc_dapm_nc_pin(dapm, "RINPUT1"); + snd_soc_dapm_nc_pin(dapm, "OUT3"); + snd_soc_dapm_nc_pin(dapm, "ROUT1"); /* set endpoints to default off mode */ - snd_soc_dapm_enable_pin(codec, "Internal Speaker"); - snd_soc_dapm_enable_pin(codec, "Internal Mic"); - snd_soc_dapm_disable_pin(codec, "Headphone Jack"); + snd_soc_dapm_enable_pin(dapm, "Internal Speaker"); + snd_soc_dapm_enable_pin(dapm, "Internal Mic"); + snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); - err = snd_soc_dapm_sync(codec); + err = snd_soc_dapm_sync(dapm); if (err) return err; /* Headphone jack detection */ - err = snd_soc_jack_new(&snd_soc_smartq, "Headphone Jack", + err = snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE, &smartq_jack); if (err) return err; @@ -211,9 +210,9 @@ static struct snd_soc_dai_link smartq_dai[] = { { .name = "wm8987", .stream_name = "SmartQ Hi-Fi", - .cpu_dai_name = "s3c64xx-i2s.0", + .cpu_dai_name = "samsung-i2s.0", .codec_dai_name = "wm8750-hifi", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .codec_name = "wm8750-codec.0-0x1a", .init = smartq_wm8987_init, .ops = &smartq_hifi_ops, @@ -275,6 +274,7 @@ err_unregister_device: static void __exit smartq_exit(void) { + gpio_free(S3C64XX_GPK(12)); snd_soc_jack_free_gpios(&smartq_jack, ARRAY_SIZE(smartq_jack_gpios), smartq_jack_gpios); diff --git a/sound/soc/s3c24xx/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c similarity index 92% rename from sound/soc/s3c24xx/smdk2443_wm9710.c rename to sound/soc/samsung/smdk2443_wm9710.c index 4613288c2772..3be7e7e92d6e 100644 --- a/sound/soc/s3c24xx/smdk2443_wm9710.c +++ b/sound/soc/samsung/smdk2443_wm9710.c @@ -17,10 +17,9 @@ #include #include #include -#include -#include "s3c-dma.h" -#include "s3c-ac97.h" +#include "dma.h" +#include "ac97.h" static struct snd_soc_card smdk2443; @@ -28,10 +27,10 @@ static struct snd_soc_dai_link smdk2443_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "s3c-ac97", + .cpu_dai_name = "samsung-ac97", .codec_dai_name = "ac97-hifi", .codec_name = "ac97-codec", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", }, }; diff --git a/sound/soc/s3c24xx/smdk_spdif.c b/sound/soc/samsung/smdk_spdif.c similarity index 92% rename from sound/soc/s3c24xx/smdk_spdif.c rename to sound/soc/samsung/smdk_spdif.c index c8bd90488a87..b5c3fad01bb8 100644 --- a/sound/soc/s3c24xx/smdk_spdif.c +++ b/sound/soc/samsung/smdk_spdif.c @@ -18,7 +18,7 @@ #include -#include "s3c-dma.h" +#include "dma.h" #include "spdif.h" /* Audio clock settings are belonged to board specific part. Every @@ -28,7 +28,7 @@ static int set_audio_clock_heirachy(struct platform_device *pdev) { struct clk *fout_epll, *mout_epll, *sclk_audio0, *sclk_spdif; - int ret; + int ret = 0; fout_epll = clk_get(NULL, "fout_epll"); if (IS_ERR(fout_epll)) { @@ -61,7 +61,7 @@ static int set_audio_clock_heirachy(struct platform_device *pdev) goto out3; } - /* Set audio clock heirachy for S/PDIF */ + /* Set audio clock hierarchy for S/PDIF */ clk_set_parent(mout_epll, fout_epll); clk_set_parent(sclk_audio0, mout_epll); clk_set_parent(sclk_spdif, sclk_audio0); @@ -79,7 +79,7 @@ out1: /* We should haved to set clock directly on this part because of clock * scheme of Samsudng SoCs did not support to set rates from abstrct - * clock of it's heirachy. + * clock of it's hierarchy. */ static int set_audio_clock_rate(unsigned long epll_rate, unsigned long audio_rate) @@ -152,12 +152,10 @@ static struct snd_soc_ops smdk_spdif_ops = { .hw_params = smdk_hw_params, }; -static struct snd_soc_card smdk; - static struct snd_soc_dai_link smdk_dai = { .name = "S/PDIF", .stream_name = "S/PDIF PCM Playback", - .platform_name = "s3c24xx-pcm-audio", + .platform_name = "samsung-audio", .cpu_dai_name = "samsung-spdif", .codec_dai_name = "dit-hifi", .codec_name = "spdif-dit", @@ -183,7 +181,7 @@ static int __init smdk_init(void) ret = platform_device_add(smdk_snd_spdif_dit_device); if (ret) - goto err2; + goto err1; smdk_snd_spdif_device = platform_device_alloc("soc-audio", -1); if (!smdk_snd_spdif_device) { @@ -195,17 +193,21 @@ static int __init smdk_init(void) ret = platform_device_add(smdk_snd_spdif_device); if (ret) - goto err1; + goto err3; - /* Set audio clock heirachy manually */ + /* Set audio clock hierarchy manually */ ret = set_audio_clock_heirachy(smdk_snd_spdif_device); if (ret) - goto err1; + goto err4; return 0; -err1: +err4: + platform_device_del(smdk_snd_spdif_device); +err3: platform_device_put(smdk_snd_spdif_device); err2: + platform_device_del(smdk_snd_spdif_dit_device); +err1: platform_device_put(smdk_snd_spdif_dit_device); return ret; } @@ -213,6 +215,7 @@ err2: static void __exit smdk_exit(void) { platform_device_unregister(smdk_snd_spdif_device); + platform_device_unregister(smdk_snd_spdif_dit_device); } module_init(smdk_init); diff --git a/sound/soc/s3c24xx/smdk64xx_wm8580.c b/sound/soc/samsung/smdk_wm8580.c similarity index 55% rename from sound/soc/s3c24xx/smdk64xx_wm8580.c rename to sound/soc/samsung/smdk_wm8580.c index 052e499b68d1..b2cff1a44aed 100644 --- a/sound/soc/s3c24xx/smdk64xx_wm8580.c +++ b/sound/soc/samsung/smdk_wm8580.c @@ -1,5 +1,5 @@ /* - * smdk64xx_wm8580.c + * smdk_wm8580.c * * Copyright (c) 2009 Samsung Electronics Co. Ltd * Author: Jaswinder Singh @@ -16,11 +16,12 @@ #include #include #include -#include + +#include #include "../codecs/wm8580.h" -#include "s3c-dma.h" -#include "s3c64xx-i2s.h" +#include "dma.h" +#include "i2s.h" /* * Default CFG switch settings to use this driver: @@ -28,10 +29,10 @@ * SMDK6410: Set CFG1 1-3 Off, CFG2 1-4 On */ -/* SMDK64XX has a 12MHZ crystal attached to WM8580 */ -#define SMDK64XX_WM8580_FREQ 12000000 +/* SMDK has a 12MHZ crystal attached to WM8580 */ +#define SMDK_WM8580_FREQ 12000000 -static int smdk64xx_hw_params(struct snd_pcm_substream *substream, +static int smdk_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; @@ -96,17 +97,6 @@ static int smdk64xx_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_CDCLK, - 0, SND_SOC_CLOCK_IN); - if (ret < 0) - return ret; - - /* We use PCLK for basic ops in SoC-Slave mode */ - ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK, - 0, SND_SOC_CLOCK_IN); - if (ret < 0) - return ret; - /* Set WM8580 to drive MCLK from its PLLA */ ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK, WM8580_CLKSRC_PLLA); @@ -114,7 +104,7 @@ static int smdk64xx_hw_params(struct snd_pcm_substream *substream, return ret; ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0, - SMDK64XX_WM8580_FREQ, pll_out); + SMDK_WM8580_FREQ, pll_out); if (ret < 0) return ret; @@ -123,32 +113,24 @@ static int smdk64xx_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_BCLK, bfs); - if (ret < 0) - return ret; - - ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, rfs); - if (ret < 0) - return ret; - return 0; } /* - * SMDK64XX WM8580 DAI operations. + * SMDK WM8580 DAI operations. */ -static struct snd_soc_ops smdk64xx_ops = { - .hw_params = smdk64xx_hw_params, +static struct snd_soc_ops smdk_ops = { + .hw_params = smdk_hw_params, }; -/* SMDK64xx Playback widgets */ +/* SMDK Playback widgets */ static const struct snd_soc_dapm_widget wm8580_dapm_widgets_pbk[] = { SND_SOC_DAPM_HP("Front", NULL), SND_SOC_DAPM_HP("Center+Sub", NULL), SND_SOC_DAPM_HP("Rear", NULL), }; -/* SMDK64xx Capture widgets */ +/* SMDK Capture widgets */ static const struct snd_soc_dapm_widget wm8580_dapm_widgets_cpt[] = { SND_SOC_DAPM_MIC("MicIn", NULL), SND_SOC_DAPM_LINE("LineIn", NULL), @@ -179,94 +161,132 @@ static const struct snd_soc_dapm_route audio_map_rx[] = { {"Rear", NULL, "VOUT3R"}, }; -static int smdk64xx_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd) +static int smdk_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - /* Add smdk64xx specific Capture widgets */ - snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_cpt, + /* Add smdk specific Capture widgets */ + snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_cpt, ARRAY_SIZE(wm8580_dapm_widgets_cpt)); /* Set up PAIFTX audio path */ - snd_soc_dapm_add_routes(codec, audio_map_tx, ARRAY_SIZE(audio_map_tx)); + snd_soc_dapm_add_routes(dapm, audio_map_tx, ARRAY_SIZE(audio_map_tx)); /* Enabling the microphone requires the fitting of a 0R * resistor to connect the line from the microphone jack. */ - snd_soc_dapm_disable_pin(codec, "MicIn"); + snd_soc_dapm_disable_pin(dapm, "MicIn"); /* signal a DAPM event */ - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } -static int smdk64xx_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd) +static int smdk_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - /* Add smdk64xx specific Playback widgets */ - snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_pbk, + /* Add smdk specific Playback widgets */ + snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_pbk, ARRAY_SIZE(wm8580_dapm_widgets_pbk)); /* Set up PAIFRX audio path */ - snd_soc_dapm_add_routes(codec, audio_map_rx, ARRAY_SIZE(audio_map_rx)); + snd_soc_dapm_add_routes(dapm, audio_map_rx, ARRAY_SIZE(audio_map_rx)); /* signal a DAPM event */ - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); return 0; } -static struct snd_soc_dai_link smdk64xx_dai[] = { -{ /* Primary Playback i/f */ - .name = "WM8580 PAIF RX", - .stream_name = "Playback", - .cpu_dai_name = "s3c64xx-iis-v4", - .codec_dai_name = "wm8580-hifi-playback", - .platform_name = "s3c24xx-pcm-audio", - .codec_name = "wm8580-codec.0-001b", - .init = smdk64xx_wm8580_init_paifrx, - .ops = &smdk64xx_ops, -}, -{ /* Primary Capture i/f */ - .name = "WM8580 PAIF TX", - .stream_name = "Capture", - .cpu_dai_name = "s3c64xx-iis-v4", - .codec_dai_name = "wm8580-hifi-capture", - .platform_name = "s3c24xx-pcm-audio", - .codec_name = "wm8580-codec.0-001b", - .init = smdk64xx_wm8580_init_paiftx, - .ops = &smdk64xx_ops, -}, +enum { + PRI_PLAYBACK = 0, + PRI_CAPTURE, + SEC_PLAYBACK, +}; + +static struct snd_soc_dai_link smdk_dai[] = { + [PRI_PLAYBACK] = { /* Primary Playback i/f */ + .name = "WM8580 PAIF RX", + .stream_name = "Playback", + .cpu_dai_name = "samsung-i2s.0", + .codec_dai_name = "wm8580-hifi-playback", + .platform_name = "samsung-audio", + .codec_name = "wm8580-codec.0-001b", + .init = smdk_wm8580_init_paifrx, + .ops = &smdk_ops, + }, + [PRI_CAPTURE] = { /* Primary Capture i/f */ + .name = "WM8580 PAIF TX", + .stream_name = "Capture", + .cpu_dai_name = "samsung-i2s.0", + .codec_dai_name = "wm8580-hifi-capture", + .platform_name = "samsung-audio", + .codec_name = "wm8580-codec.0-001b", + .init = smdk_wm8580_init_paiftx, + .ops = &smdk_ops, + }, + [SEC_PLAYBACK] = { /* Sec_Fifo Playback i/f */ + .name = "Sec_FIFO TX", + .stream_name = "Playback", + .cpu_dai_name = "samsung-i2s.x", + .codec_dai_name = "wm8580-hifi-playback", + .platform_name = "samsung-audio", + .codec_name = "wm8580-codec.0-001b", + .init = smdk_wm8580_init_paifrx, + .ops = &smdk_ops, + }, }; -static struct snd_soc_card smdk64xx = { - .name = "SMDK64xx 5.1", - .dai_link = smdk64xx_dai, - .num_links = ARRAY_SIZE(smdk64xx_dai), +static struct snd_soc_card smdk = { + .name = "SMDK-I2S", + .dai_link = smdk_dai, + .num_links = 2, }; -static struct platform_device *smdk64xx_snd_device; +static struct platform_device *smdk_snd_device; -static int __init smdk64xx_audio_init(void) +static int __init smdk_audio_init(void) { int ret; + char *str; + + if (machine_is_smdkc100() || machine_is_smdk6442() + || machine_is_smdkv210() || machine_is_smdkc110()) { + smdk.num_links = 3; + /* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */ + str = (char *)smdk_dai[SEC_PLAYBACK].cpu_dai_name; + str[strlen(str) - 1] = '0' + SAMSUNG_I2S_SECOFF; + } else if (machine_is_smdk6410()) { + str = (char *)smdk_dai[PRI_PLAYBACK].cpu_dai_name; + str[strlen(str) - 1] = '2'; + str = (char *)smdk_dai[PRI_CAPTURE].cpu_dai_name; + str[strlen(str) - 1] = '2'; + } - smdk64xx_snd_device = platform_device_alloc("soc-audio", -1); - if (!smdk64xx_snd_device) + smdk_snd_device = platform_device_alloc("soc-audio", -1); + if (!smdk_snd_device) return -ENOMEM; - platform_set_drvdata(smdk64xx_snd_device, &smdk64xx); - ret = platform_device_add(smdk64xx_snd_device); + platform_set_drvdata(smdk_snd_device, &smdk); + ret = platform_device_add(smdk_snd_device); if (ret) - platform_device_put(smdk64xx_snd_device); + platform_device_put(smdk_snd_device); return ret; } -module_init(smdk64xx_audio_init); +module_init(smdk_audio_init); + +static void __exit smdk_audio_exit(void) +{ + platform_device_unregister(smdk_snd_device); +} +module_exit(smdk_audio_exit); MODULE_AUTHOR("Jaswinder Singh, jassi.brar@samsung.com"); -MODULE_DESCRIPTION("ALSA SoC SMDK64XX WM8580"); +MODULE_DESCRIPTION("ALSA SoC SMDK WM8580"); MODULE_LICENSE("GPL"); diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c new file mode 100644 index 000000000000..e7c1009a1e1d --- /dev/null +++ b/sound/soc/samsung/smdk_wm8994.c @@ -0,0 +1,176 @@ +/* + * smdk_wm8994.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include "../codecs/wm8994.h" + + /* + * Default CFG switch settings to use this driver: + * SMDKV310: CFG5-1000, CFG7-111111 + */ + + /* + * Configure audio route as :- + * $ amixer sset 'DAC1' on,on + * $ amixer sset 'Right Headphone Mux' 'DAC' + * $ amixer sset 'Left Headphone Mux' 'DAC' + * $ amixer sset 'DAC1R Mixer AIF1.1' on + * $ amixer sset 'DAC1L Mixer AIF1.1' on + * $ amixer sset 'IN2L' on + * $ amixer sset 'IN2L PGA IN2LN' on + * $ amixer sset 'MIXINL IN2L' on + * $ amixer sset 'AIF1ADC1L Mixer ADC/DMIC' on + * $ amixer sset 'IN2R' on + * $ amixer sset 'IN2R PGA IN2RN' on + * $ amixer sset 'MIXINR IN2R' on + * $ amixer sset 'AIF1ADC1R Mixer ADC/DMIC' on + */ + +/* SMDK has a 16.934MHZ crystal attached to WM8994 */ +#define SMDK_WM8994_FREQ 16934000 + +static int smdk_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + struct snd_soc_dai *codec_dai = rtd->codec_dai; + unsigned int pll_out; + int ret; + + /* AIF1CLK should be >=3MHz for optimal performance */ + if (params_rate(params) == 8000 || params_rate(params) == 11025) + pll_out = params_rate(params) * 512; + else + pll_out = params_rate(params) * 256; + + ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S + | SND_SOC_DAIFMT_NB_NF + | SND_SOC_DAIFMT_CBM_CFM); + if (ret < 0) + return ret; + + ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S + | SND_SOC_DAIFMT_NB_NF + | SND_SOC_DAIFMT_CBM_CFM); + if (ret < 0) + return ret; + + ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1, + SMDK_WM8994_FREQ, pll_out); + if (ret < 0) + return ret; + + ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1, + pll_out, SND_SOC_CLOCK_IN); + if (ret < 0) + return ret; + + return 0; +} + +/* + * SMDK WM8994 DAI operations. + */ +static struct snd_soc_ops smdk_ops = { + .hw_params = smdk_hw_params, +}; + +static int smdk_wm8994_init_paiftx(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; + + /* HeadPhone */ + snd_soc_dapm_enable_pin(dapm, "HPOUT1R"); + snd_soc_dapm_enable_pin(dapm, "HPOUT1L"); + + /* MicIn */ + snd_soc_dapm_enable_pin(dapm, "IN1LN"); + snd_soc_dapm_enable_pin(dapm, "IN1RN"); + + /* LineIn */ + snd_soc_dapm_enable_pin(dapm, "IN2LN"); + snd_soc_dapm_enable_pin(dapm, "IN2RN"); + + /* Other pins NC */ + snd_soc_dapm_nc_pin(dapm, "HPOUT2P"); + snd_soc_dapm_nc_pin(dapm, "HPOUT2N"); + snd_soc_dapm_nc_pin(dapm, "SPKOUTLN"); + snd_soc_dapm_nc_pin(dapm, "SPKOUTLP"); + snd_soc_dapm_nc_pin(dapm, "SPKOUTRP"); + snd_soc_dapm_nc_pin(dapm, "SPKOUTRN"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT1N"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT1P"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT2N"); + snd_soc_dapm_nc_pin(dapm, "LINEOUT2P"); + snd_soc_dapm_nc_pin(dapm, "IN1LP"); + snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN"); + snd_soc_dapm_nc_pin(dapm, "IN1RP"); + snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP"); + + snd_soc_dapm_sync(dapm); + + return 0; +} + +static struct snd_soc_dai_link smdk_dai[] = { + { /* Primary DAI i/f */ + .name = "WM8994 AIF1", + .stream_name = "Pri_Dai", + .cpu_dai_name = "samsung-i2s.0", + .codec_dai_name = "wm8994-aif1", + .platform_name = "samsung-audio", + .codec_name = "wm8994-codec", + .init = smdk_wm8994_init_paiftx, + .ops = &smdk_ops, + }, { /* Sec_Fifo Playback i/f */ + .name = "Sec_FIFO TX", + .stream_name = "Sec_Dai", + .cpu_dai_name = "samsung-i2s.4", + .codec_dai_name = "wm8994-aif1", + .platform_name = "samsung-audio", + .codec_name = "wm8994-codec", + .ops = &smdk_ops, + }, +}; + +static struct snd_soc_card smdk = { + .name = "SMDK-I2S", + .dai_link = smdk_dai, + .num_links = ARRAY_SIZE(smdk_dai), +}; + +static struct platform_device *smdk_snd_device; + +static int __init smdk_audio_init(void) +{ + int ret; + + smdk_snd_device = platform_device_alloc("soc-audio", -1); + if (!smdk_snd_device) + return -ENOMEM; + + platform_set_drvdata(smdk_snd_device, &smdk); + + ret = platform_device_add(smdk_snd_device); + if (ret) + platform_device_put(smdk_snd_device); + + return ret; +} +module_init(smdk_audio_init); + +static void __exit smdk_audio_exit(void) +{ + platform_device_unregister(smdk_snd_device); +} +module_exit(smdk_audio_exit); + +MODULE_DESCRIPTION("ALSA SoC SMDK WM8994"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/s3c24xx/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c similarity index 87% rename from sound/soc/s3c24xx/smdk_wm9713.c rename to sound/soc/samsung/smdk_wm9713.c index 33ba8fdbcf07..ae5fed6f772f 100644 --- a/sound/soc/s3c24xx/smdk_wm9713.c +++ b/sound/soc/samsung/smdk_wm9713.c @@ -15,8 +15,8 @@ #include #include -#include "s3c-dma.h" -#include "s3c-ac97.h" +#include "dma.h" +#include "ac97.h" static struct snd_soc_card smdk; @@ -27,6 +27,7 @@ static struct snd_soc_card smdk; * SMDKC100: Set CFG6 1-3 On, CFG7 1 On * SMDKC110: Set CFGB10 1-2 Off, CFGB12 1-3 On * SMDKV210: Set CFGB10 1-2 Off, CFGB12 1-3 On + * SMDKV310: Set CFG2 1-2 Off, CFG4 All On, CFG7 All Off, CFG8 1-On */ /* @@ -45,8 +46,8 @@ static struct snd_soc_card smdk; static struct snd_soc_dai_link smdk_dai = { .name = "AC97", .stream_name = "AC97 PCM", - .platform_name = "s3c24xx-pcm-audio", - .cpu_dai_name = "s3c-ac97", + .platform_name = "samsung-audio", + .cpu_dai_name = "samsung-ac97", .codec_dai_name = "wm9713-hifi", .codec_name = "wm9713-codec", }; @@ -70,24 +71,27 @@ static int __init smdk_init(void) ret = platform_device_add(smdk_snd_wm9713_device); if (ret) - goto err; + goto err1; smdk_snd_ac97_device = platform_device_alloc("soc-audio", -1); if (!smdk_snd_ac97_device) { ret = -ENOMEM; - goto err; + goto err2; } platform_set_drvdata(smdk_snd_ac97_device, &smdk); ret = platform_device_add(smdk_snd_ac97_device); - if (ret) { - platform_device_put(smdk_snd_ac97_device); - goto err; - } + if (ret) + goto err3; return 0; -err: + +err3: + platform_device_put(smdk_snd_ac97_device); +err2: + platform_device_del(smdk_snd_wm9713_device); +err1: platform_device_put(smdk_snd_wm9713_device); return ret; } diff --git a/sound/soc/s3c24xx/spdif.c b/sound/soc/samsung/spdif.c similarity index 99% rename from sound/soc/s3c24xx/spdif.c rename to sound/soc/samsung/spdif.c index ce554e9cabcc..f0816404ea3e 100644 --- a/sound/soc/s3c24xx/spdif.c +++ b/sound/soc/samsung/spdif.c @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/spdif.c +/* sound/soc/samsung/spdif.c * * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver * @@ -20,7 +20,7 @@ #include #include -#include "s3c-dma.h" +#include "dma.h" #include "spdif.h" /* Registers */ diff --git a/sound/soc/s3c24xx/spdif.h b/sound/soc/samsung/spdif.h similarity index 94% rename from sound/soc/s3c24xx/spdif.h rename to sound/soc/samsung/spdif.h index 3ed55592710f..4f72cb446dbf 100644 --- a/sound/soc/s3c24xx/spdif.h +++ b/sound/soc/samsung/spdif.h @@ -1,4 +1,4 @@ -/* sound/soc/s3c24xx/spdif.h +/* sound/soc/samsung/spdif.h * * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver * diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig index 7f0a496e07ce..d8e06a607a22 100644 --- a/sound/soc/sh/Kconfig +++ b/sound/soc/sh/Kconfig @@ -48,7 +48,7 @@ config SND_SH7760_AC97 config SND_FSI_AK4642 tristate "FSI-AK4642 sound support" - depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE + depends on SND_SOC_SH4_FSI && I2C select SND_SOC_AK4642 help This option enables generic sound support for the @@ -56,7 +56,7 @@ config SND_FSI_AK4642 config SND_FSI_DA7210 tristate "FSI-DA7210 sound support" - depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE + depends on SND_SOC_SH4_FSI && I2C select SND_SOC_DA7210 help This option enables generic sound support for the diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c index d96602de71de..a14820ac9665 100644 --- a/sound/soc/sh/fsi-ak4642.c +++ b/sound/soc/sh/fsi-ak4642.c @@ -12,6 +12,14 @@ #include #include +struct fsi_ak4642_data { + const char *name; + const char *card; + const char *cpu_dai; + const char *codec; + const char *platform; +}; + static int fsi_ak4642_dai_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai = rtd->codec_dai; @@ -27,37 +35,42 @@ static int fsi_ak4642_dai_init(struct snd_soc_pcm_runtime *rtd) } static struct snd_soc_dai_link fsi_dai_link = { - .name = "AK4642", - .stream_name = "AK4642", - .cpu_dai_name = "fsia-dai", /* fsi A */ .codec_dai_name = "ak4642-hifi", -#ifdef CONFIG_MACH_AP4EVB - .platform_name = "sh_fsi2", - .codec_name = "ak4642-codec.0-0013", -#else - .platform_name = "sh_fsi.0", - .codec_name = "ak4642-codec.0-0012", -#endif .init = fsi_ak4642_dai_init, - .ops = NULL, }; static struct snd_soc_card fsi_soc_card = { - .name = "FSI (AK4642)", .dai_link = &fsi_dai_link, .num_links = 1, }; static struct platform_device *fsi_snd_device; -static int __init fsi_ak4642_init(void) +static int fsi_ak4642_probe(struct platform_device *pdev) { int ret = -ENOMEM; + const struct platform_device_id *id_entry; + struct fsi_ak4642_data *pdata; + + id_entry = pdev->id_entry; + if (!id_entry) { + dev_err(&pdev->dev, "unknown fsi ak4642\n"); + return -ENODEV; + } + + pdata = (struct fsi_ak4642_data *)id_entry->driver_data; fsi_snd_device = platform_device_alloc("soc-audio", FSI_PORT_A); if (!fsi_snd_device) goto out; + fsi_dai_link.name = pdata->name; + fsi_dai_link.stream_name = pdata->name; + fsi_dai_link.cpu_dai_name = pdata->cpu_dai; + fsi_dai_link.platform_name = pdata->platform; + fsi_dai_link.codec_name = pdata->codec; + fsi_soc_card.name = pdata->card; + platform_set_drvdata(fsi_snd_device, &fsi_soc_card); ret = platform_device_add(fsi_snd_device); @@ -68,9 +81,108 @@ out: return ret; } -static void __exit fsi_ak4642_exit(void) +static int fsi_ak4642_remove(struct platform_device *pdev) { platform_device_unregister(fsi_snd_device); + return 0; +} + +static struct fsi_ak4642_data fsi_a_ak4642 = { + .name = "AK4642", + .card = "FSIA (AK4642)", + .cpu_dai = "fsia-dai", + .codec = "ak4642-codec.0-0012", + .platform = "sh_fsi.0", +}; + +static struct fsi_ak4642_data fsi_b_ak4642 = { + .name = "AK4642", + .card = "FSIB (AK4642)", + .cpu_dai = "fsib-dai", + .codec = "ak4642-codec.0-0012", + .platform = "sh_fsi.0", +}; + +static struct fsi_ak4642_data fsi_a_ak4643 = { + .name = "AK4643", + .card = "FSIA (AK4643)", + .cpu_dai = "fsia-dai", + .codec = "ak4642-codec.0-0013", + .platform = "sh_fsi.0", +}; + +static struct fsi_ak4642_data fsi_b_ak4643 = { + .name = "AK4643", + .card = "FSIB (AK4643)", + .cpu_dai = "fsib-dai", + .codec = "ak4642-codec.0-0013", + .platform = "sh_fsi.0", +}; + +static struct fsi_ak4642_data fsi2_a_ak4642 = { + .name = "AK4642", + .card = "FSI2A (AK4642)", + .cpu_dai = "fsia-dai", + .codec = "ak4642-codec.0-0012", + .platform = "sh_fsi2", +}; + +static struct fsi_ak4642_data fsi2_b_ak4642 = { + .name = "AK4642", + .card = "FSI2B (AK4642)", + .cpu_dai = "fsib-dai", + .codec = "ak4642-codec.0-0012", + .platform = "sh_fsi2", +}; + +static struct fsi_ak4642_data fsi2_a_ak4643 = { + .name = "AK4643", + .card = "FSI2A (AK4643)", + .cpu_dai = "fsia-dai", + .codec = "ak4642-codec.0-0013", + .platform = "sh_fsi2", +}; + +static struct fsi_ak4642_data fsi2_b_ak4643 = { + .name = "AK4643", + .card = "FSI2B (AK4643)", + .cpu_dai = "fsib-dai", + .codec = "ak4642-codec.0-0013", + .platform = "sh_fsi2", +}; + +static struct platform_device_id fsi_id_table[] = { + /* FSI */ + { "sh_fsi_a_ak4642", (kernel_ulong_t)&fsi_a_ak4642 }, + { "sh_fsi_b_ak4642", (kernel_ulong_t)&fsi_b_ak4642 }, + { "sh_fsi_a_ak4643", (kernel_ulong_t)&fsi_a_ak4643 }, + { "sh_fsi_b_ak4643", (kernel_ulong_t)&fsi_b_ak4643 }, + + /* FSI 2 */ + { "sh_fsi2_a_ak4642", (kernel_ulong_t)&fsi2_a_ak4642 }, + { "sh_fsi2_b_ak4642", (kernel_ulong_t)&fsi2_b_ak4642 }, + { "sh_fsi2_a_ak4643", (kernel_ulong_t)&fsi2_a_ak4643 }, + { "sh_fsi2_b_ak4643", (kernel_ulong_t)&fsi2_b_ak4643 }, + {}, +}; + +static struct platform_driver fsi_ak4642 = { + .driver = { + .name = "fsi-ak4642-audio", + }, + .probe = fsi_ak4642_probe, + .remove = fsi_ak4642_remove, + .id_table = fsi_id_table, +}; + +static int __init fsi_ak4642_init(void) +{ + return platform_driver_register(&fsi_ak4642); +} + +static void __exit fsi_ak4642_exit(void) +{ + platform_driver_unregister(&fsi_ak4642); } module_init(fsi_ak4642_init); diff --git a/sound/soc/sh/fsi-da7210.c b/sound/soc/sh/fsi-da7210.c index a6adb6e43250..e8df9da92f71 100644 --- a/sound/soc/sh/fsi-da7210.c +++ b/sound/soc/sh/fsi-da7210.c @@ -18,7 +18,7 @@ static int fsi_da7210_init(struct snd_soc_pcm_runtime *rtd) struct snd_soc_dai *dai = rtd->codec_dai; return snd_soc_dai_set_fmt(dai, - SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM); } diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 4c2404b1b862..2b06402801ef 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -19,20 +19,26 @@ #include #include -#define DO_FMT 0x0000 -#define DOFF_CTL 0x0004 -#define DOFF_ST 0x0008 -#define DI_FMT 0x000C -#define DIFF_CTL 0x0010 -#define DIFF_ST 0x0014 -#define CKG1 0x0018 -#define CKG2 0x001C -#define DIDT 0x0020 -#define DODT 0x0024 -#define MUTE_ST 0x0028 -#define OUT_SEL 0x0030 -#define REG_END OUT_SEL - +/* PortA/PortB register */ +#define REG_DO_FMT 0x0000 +#define REG_DOFF_CTL 0x0004 +#define REG_DOFF_ST 0x0008 +#define REG_DI_FMT 0x000C +#define REG_DIFF_CTL 0x0010 +#define REG_DIFF_ST 0x0014 +#define REG_CKG1 0x0018 +#define REG_CKG2 0x001C +#define REG_DIDT 0x0020 +#define REG_DODT 0x0024 +#define REG_MUTE_ST 0x0028 +#define REG_OUT_SEL 0x0030 + +/* master register */ +#define MST_CLK_RST 0x0210 +#define MST_SOFT_RST 0x0214 +#define MST_FIFO_SZ 0x0218 + +/* core register (depend on FSI version) */ #define A_MST_CTLR 0x0180 #define B_MST_CTLR 0x01A0 #define CPU_INT_ST 0x01F4 @@ -41,22 +47,23 @@ #define INT_ST 0x0200 #define IEMSK 0x0204 #define IMSK 0x0208 -#define MUTE 0x020C -#define CLK_RST 0x0210 -#define SOFT_RST 0x0214 -#define FIFO_SZ 0x0218 -#define MREG_START A_MST_CTLR -#define MREG_END FIFO_SZ /* DO_FMT */ /* DI_FMT */ +#define CR_BWS_24 (0x0 << 20) /* FSI2 */ +#define CR_BWS_16 (0x1 << 20) /* FSI2 */ +#define CR_BWS_20 (0x2 << 20) /* FSI2 */ + +#define CR_DTMD_PCM (0x0 << 8) /* FSI2 */ +#define CR_DTMD_SPDIF_PCM (0x1 << 8) /* FSI2 */ +#define CR_DTMD_SPDIF_STREAM (0x2 << 8) /* FSI2 */ + #define CR_MONO (0x0 << 4) #define CR_MONO_D (0x1 << 4) #define CR_PCM (0x2 << 4) #define CR_I2S (0x3 << 4) #define CR_TDM (0x4 << 4) #define CR_TDM_D (0x5 << 4) -#define CR_SPDIF 0x00100120 /* DOFF_CTL */ /* DIFF_CTL */ @@ -93,6 +100,10 @@ #define IR (1 << 4) /* Interrupt Reset */ #define FSISR (1 << 0) /* Software Reset */ +/* OUT_SEL (FSI2) */ +#define DMMD (1 << 4) /* SPDIF output timing 0: Biphase only */ + /* 1: Biphase and serial */ + /* FIFO_SZ */ #define FIFO_SZ_MASK 0x7 @@ -123,6 +134,9 @@ struct fsi_stream { int buff_len; int period_len; int period_num; + + int uerr_num; + int oerr_num; }; struct fsi_priv { @@ -133,8 +147,6 @@ struct fsi_priv { struct fsi_stream capture; long rate; - - u32 mst_ctrl; }; struct fsi_core { @@ -143,6 +155,8 @@ struct fsi_core { u32 int_st; u32 iemsk; u32 imsk; + u32 a_mclk; + u32 b_mclk; }; struct fsi_master { @@ -182,62 +196,22 @@ static void __fsi_reg_mask_set(u32 reg, u32 mask, u32 data) __fsi_reg_write(reg, val); } -static void fsi_reg_write(struct fsi_priv *fsi, u32 reg, u32 data) -{ - if (reg > REG_END) { - pr_err("fsi: register access err (%s)\n", __func__); - return; - } +#define fsi_reg_write(p, r, d)\ + __fsi_reg_write((u32)(p->base + REG_##r), d) - __fsi_reg_write((u32)(fsi->base + reg), data); -} +#define fsi_reg_read(p, r)\ + __fsi_reg_read((u32)(p->base + REG_##r)) -static u32 fsi_reg_read(struct fsi_priv *fsi, u32 reg) -{ - if (reg > REG_END) { - pr_err("fsi: register access err (%s)\n", __func__); - return 0; - } +#define fsi_reg_mask_set(p, r, m, d)\ + __fsi_reg_mask_set((u32)(p->base + REG_##r), m, d) - return __fsi_reg_read((u32)(fsi->base + reg)); -} - -static void fsi_reg_mask_set(struct fsi_priv *fsi, u32 reg, u32 mask, u32 data) -{ - if (reg > REG_END) { - pr_err("fsi: register access err (%s)\n", __func__); - return; - } - - __fsi_reg_mask_set((u32)(fsi->base + reg), mask, data); -} - -static void fsi_master_write(struct fsi_master *master, u32 reg, u32 data) -{ - unsigned long flags; - - if ((reg < MREG_START) || - (reg > MREG_END)) { - pr_err("fsi: register access err (%s)\n", __func__); - return; - } - - spin_lock_irqsave(&master->lock, flags); - __fsi_reg_write((u32)(master->base + reg), data); - spin_unlock_irqrestore(&master->lock, flags); -} - -static u32 fsi_master_read(struct fsi_master *master, u32 reg) +#define fsi_master_read(p, r) _fsi_master_read(p, MST_##r) +#define fsi_core_read(p, r) _fsi_master_read(p, p->core->r) +static u32 _fsi_master_read(struct fsi_master *master, u32 reg) { u32 ret; unsigned long flags; - if ((reg < MREG_START) || - (reg > MREG_END)) { - pr_err("fsi: register access err (%s)\n", __func__); - return 0; - } - spin_lock_irqsave(&master->lock, flags); ret = __fsi_reg_read((u32)(master->base + reg)); spin_unlock_irqrestore(&master->lock, flags); @@ -245,17 +219,13 @@ static u32 fsi_master_read(struct fsi_master *master, u32 reg) return ret; } -static void fsi_master_mask_set(struct fsi_master *master, +#define fsi_master_mask_set(p, r, m, d) _fsi_master_mask_set(p, MST_##r, m, d) +#define fsi_core_mask_set(p, r, m, d) _fsi_master_mask_set(p, p->core->r, m, d) +static void _fsi_master_mask_set(struct fsi_master *master, u32 reg, u32 mask, u32 data) { unsigned long flags; - if ((reg < MREG_START) || - (reg > MREG_END)) { - pr_err("fsi: register access err (%s)\n", __func__); - return; - } - spin_lock_irqsave(&master->lock, flags); __fsi_reg_mask_set((u32)(master->base + reg), mask, data); spin_unlock_irqrestore(&master->lock, flags); @@ -359,27 +329,41 @@ static void fsi_stream_push(struct fsi_priv *fsi, io->buff_offset = 0; io->period_len = period_len; io->period_num = 0; + io->oerr_num = -1; /* ignore 1st err */ + io->uerr_num = -1; /* ignore 1st err */ } static void fsi_stream_pop(struct fsi_priv *fsi, int is_play) { struct fsi_stream *io = fsi_get_stream(fsi, is_play); + struct snd_soc_dai *dai = fsi_get_dai(io->substream); + + + if (io->oerr_num > 0) + dev_err(dai->dev, "over_run = %d\n", io->oerr_num); + + if (io->uerr_num > 0) + dev_err(dai->dev, "under_run = %d\n", io->uerr_num); io->substream = NULL; io->buff_len = 0; io->buff_offset = 0; io->period_len = 0; io->period_num = 0; + io->oerr_num = 0; + io->uerr_num = 0; } static int fsi_get_fifo_data_num(struct fsi_priv *fsi, int is_play) { u32 status; - u32 reg = is_play ? DOFF_ST : DIFF_ST; struct fsi_stream *io = fsi_get_stream(fsi, is_play); int data_num; - status = fsi_reg_read(fsi, reg); + status = is_play ? + fsi_reg_read(fsi, DOFF_ST) : + fsi_reg_read(fsi, DIFF_ST); + data_num = 0x1ff & (status >> 8); data_num *= io->chan_num; @@ -406,6 +390,27 @@ static int fsi_get_frame_width(struct fsi_priv *fsi, int is_play) return frames_to_bytes(runtime, 1) / io->chan_num; } +static void fsi_count_fifo_err(struct fsi_priv *fsi) +{ + u32 ostatus = fsi_reg_read(fsi, DOFF_ST); + u32 istatus = fsi_reg_read(fsi, DIFF_ST); + + if (ostatus & ERR_OVER) + fsi->playback.oerr_num++; + + if (ostatus & ERR_UNDER) + fsi->playback.uerr_num++; + + if (istatus & ERR_OVER) + fsi->capture.oerr_num++; + + if (istatus & ERR_UNDER) + fsi->capture.uerr_num++; + + fsi_reg_write(fsi, DOFF_ST, 0); + fsi_reg_write(fsi, DIFF_ST, 0); +} + /* * dma function */ @@ -473,8 +478,8 @@ static void fsi_irq_enable(struct fsi_priv *fsi, int is_play) u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play)); struct fsi_master *master = fsi_get_master(fsi); - fsi_master_mask_set(master, master->core->imsk, data, data); - fsi_master_mask_set(master, master->core->iemsk, data, data); + fsi_core_mask_set(master, imsk, data, data); + fsi_core_mask_set(master, iemsk, data, data); } static void fsi_irq_disable(struct fsi_priv *fsi, int is_play) @@ -482,18 +487,13 @@ static void fsi_irq_disable(struct fsi_priv *fsi, int is_play) u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play)); struct fsi_master *master = fsi_get_master(fsi); - fsi_master_mask_set(master, master->core->imsk, data, 0); - fsi_master_mask_set(master, master->core->iemsk, data, 0); + fsi_core_mask_set(master, imsk, data, 0); + fsi_core_mask_set(master, iemsk, data, 0); } static u32 fsi_irq_get_status(struct fsi_master *master) { - return fsi_master_read(master, master->core->int_st); -} - -static void fsi_irq_clear_all_status(struct fsi_master *master) -{ - fsi_master_write(master, master->core->int_st, 0); + return fsi_core_read(master, int_st); } static void fsi_irq_clear_status(struct fsi_priv *fsi) @@ -505,7 +505,7 @@ static void fsi_irq_clear_status(struct fsi_priv *fsi) data |= AB_IO(1, fsi_get_port_shift(fsi, 1)); /* clear interrupt factor */ - fsi_master_mask_set(master, master->core->int_st, data, 0); + fsi_core_mask_set(master, int_st, data, 0); } /* @@ -516,17 +516,19 @@ static void fsi_irq_clear_status(struct fsi_priv *fsi) static void fsi_spdif_clk_ctrl(struct fsi_priv *fsi, int enable) { struct fsi_master *master = fsi_get_master(fsi); - u32 val = BP | SE; + u32 mask, val; if (master->core->ver < 2) { pr_err("fsi: register access err (%s)\n", __func__); return; } - if (enable) - fsi_master_mask_set(master, fsi->mst_ctrl, val, val); - else - fsi_master_mask_set(master, fsi->mst_ctrl, val, 0); + mask = BP | SE; + val = enable ? mask : 0; + + fsi_is_port_a(fsi) ? + fsi_core_mask_set(master, a_mclk, mask, val) : + fsi_core_mask_set(master, b_mclk, mask, val); } /* @@ -550,7 +552,7 @@ static void fsi_fifo_init(struct fsi_priv *fsi, { struct fsi_master *master = fsi_get_master(fsi); struct fsi_stream *io = fsi_get_stream(fsi, is_play); - u32 ctrl, shift, i; + u32 shift, i; /* get on-chip RAM capacity */ shift = fsi_master_read(master, FIFO_SZ); @@ -583,13 +585,17 @@ static void fsi_fifo_init(struct fsi_priv *fsi, dev_dbg(dai->dev, "%d channel %d store\n", io->chan_num, io->fifo_max_num); - ctrl = is_play ? DOFF_CTL : DIFF_CTL; - - /* set interrupt generation factor */ - fsi_reg_write(fsi, ctrl, IRQ_HALF); - - /* clear FIFO */ - fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR); + /* + * set interrupt generation factor + * clear FIFO + */ + if (is_play) { + fsi_reg_write(fsi, DOFF_CTL, IRQ_HALF); + fsi_reg_mask_set(fsi, DOFF_CTL, FIFO_CLR, FIFO_CLR); + } else { + fsi_reg_write(fsi, DIFF_CTL, IRQ_HALF); + fsi_reg_mask_set(fsi, DIFF_CTL, FIFO_CLR, FIFO_CLR); + } } static void fsi_soft_all_reset(struct fsi_master *master) @@ -604,13 +610,12 @@ static void fsi_soft_all_reset(struct fsi_master *master) mdelay(10); } -static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int startup, int stream) +static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int stream) { struct snd_pcm_runtime *runtime; struct snd_pcm_substream *substream = NULL; int is_play = fsi_stream_is_play(stream); struct fsi_stream *io = fsi_get_stream(fsi, is_play); - u32 status_reg = is_play ? DOFF_ST : DIFF_ST; int data_residue_num; int data_num; int data_num_max; @@ -698,35 +703,20 @@ static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int startup, int stream) /* update buff_offset */ io->buff_offset += fsi_num2offset(data_num, ch_width); - /* check fifo status */ - if (!startup) { - struct snd_soc_dai *dai = fsi_get_dai(substream); - u32 status = fsi_reg_read(fsi, status_reg); - - if (status & ERR_OVER) - dev_err(dai->dev, "over run\n"); - if (status & ERR_UNDER) - dev_err(dai->dev, "under run\n"); - } - fsi_reg_write(fsi, status_reg, 0); - - /* re-enable irq */ - fsi_irq_enable(fsi, is_play); - if (over_period) snd_pcm_period_elapsed(substream); return 0; } -static int fsi_data_pop(struct fsi_priv *fsi, int startup) +static int fsi_data_pop(struct fsi_priv *fsi) { - return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_CAPTURE); + return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_CAPTURE); } -static int fsi_data_push(struct fsi_priv *fsi, int startup) +static int fsi_data_push(struct fsi_priv *fsi) { - return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_PLAYBACK); + return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_PLAYBACK); } static irqreturn_t fsi_interrupt(int irq, void *data) @@ -739,15 +729,19 @@ static irqreturn_t fsi_interrupt(int irq, void *data) fsi_master_mask_set(master, SOFT_RST, IR, IR); if (int_st & AB_IO(1, AO_SHIFT)) - fsi_data_push(&master->fsia, 0); + fsi_data_push(&master->fsia); if (int_st & AB_IO(1, BO_SHIFT)) - fsi_data_push(&master->fsib, 0); + fsi_data_push(&master->fsib); if (int_st & AB_IO(1, AI_SHIFT)) - fsi_data_pop(&master->fsia, 0); + fsi_data_pop(&master->fsia); if (int_st & AB_IO(1, BI_SHIFT)) - fsi_data_pop(&master->fsib, 0); + fsi_data_pop(&master->fsib); + + fsi_count_fifo_err(&master->fsia); + fsi_count_fifo_err(&master->fsib); - fsi_irq_clear_all_status(master); + fsi_irq_clear_status(&master->fsia); + fsi_irq_clear_status(&master->fsib); return IRQ_HANDLED; } @@ -764,7 +758,6 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream, struct fsi_stream *io; u32 flags = fsi_get_info_flags(fsi); u32 fmt; - u32 reg; u32 data; int is_play = fsi_is_play(substream); int is_master; @@ -796,7 +789,6 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream, /* do fmt, di fmt */ data = 0; - reg = is_play ? DO_FMT : DI_FMT; fmt = is_play ? SH_FSI_GET_OFMT(flags) : SH_FSI_GET_IFMT(flags); switch (fmt) { case SH_FSI_FMT_MONO: @@ -830,16 +822,18 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream, dev_err(dai->dev, "This FSI can not use SPDIF\n"); return -EINVAL; } - data = CR_SPDIF; + data = CR_BWS_16 | CR_DTMD_SPDIF_PCM | CR_PCM; io->chan_num = 2; fsi_spdif_clk_ctrl(fsi, 1); - fsi_reg_mask_set(fsi, OUT_SEL, 0x0010, 0x0010); + fsi_reg_mask_set(fsi, OUT_SEL, DMMD, DMMD); break; default: dev_err(dai->dev, "unknown format.\n"); return -EINVAL; } - fsi_reg_write(fsi, reg, data); + is_play ? + fsi_reg_write(fsi, DO_FMT, data) : + fsi_reg_write(fsi, DI_FMT, data); /* irq clear */ fsi_irq_disable(fsi, is_play); @@ -883,7 +877,8 @@ static int fsi_dai_trigger(struct snd_pcm_substream *substream, int cmd, fsi_stream_push(fsi, is_play, substream, frames_to_bytes(runtime, runtime->buffer_size), frames_to_bytes(runtime, runtime->period_size)); - ret = is_play ? fsi_data_push(fsi, 1) : fsi_data_pop(fsi, 1); + ret = is_play ? fsi_data_push(fsi) : fsi_data_pop(fsi); + fsi_irq_enable(fsi, is_play); break; case SNDRV_PCM_TRIGGER_STOP: fsi_irq_disable(fsi, is_play); @@ -1174,12 +1169,10 @@ static int fsi_probe(struct platform_device *pdev) /* FSI A setting */ master->fsia.base = master->base; master->fsia.master = master; - master->fsia.mst_ctrl = A_MST_CTLR; /* FSI B setting */ master->fsib.base = master->base + 0x40; master->fsib.master = master; - master->fsib.mst_ctrl = B_MST_CTLR; pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); @@ -1266,6 +1259,8 @@ static struct fsi_core fsi2_core = { .int_st = CPU_INT_ST, .iemsk = CPU_IEMSK, .imsk = CPU_IMSK, + .a_mclk = A_MST_CTLR, + .b_mclk = B_MST_CTLR, }; static struct platform_device_id fsi_id_table[] = { diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c index ac6c49ce6fdf..6088a6a3238a 100644 --- a/sound/soc/sh/migor.c +++ b/sound/soc/sh/migor.c @@ -8,11 +8,11 @@ * published by the Free Software Foundation. */ +#include #include #include #include -#include #include #include @@ -20,7 +20,6 @@ #include #include #include -#include #include "../codecs/wm8978.h" #include "siu.h" @@ -140,11 +139,12 @@ static const struct snd_soc_dapm_route audio_map[] = { static int migor_dai_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = &codec->dapm; - snd_soc_dapm_new_controls(codec, migor_dapm_widgets, + snd_soc_dapm_new_controls(dapm, migor_dapm_widgets, ARRAY_SIZE(migor_dapm_widgets)); - snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); + snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } diff --git a/sound/soc/sh/sh7760-ac97.c b/sound/soc/sh/sh7760-ac97.c index f8e0ab82ef59..917d3ceadc9d 100644 --- a/sound/soc/sh/sh7760-ac97.c +++ b/sound/soc/sh/sh7760-ac97.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #define IPSEL 0xFE400034 @@ -23,7 +22,7 @@ extern struct snd_soc_platform_driver sh7760_soc_platform; static int machine_init(struct snd_soc_pcm_runtime *rtd) { - snd_soc_dapm_sync(rtd->codec); + snd_soc_dapm_sync(&rtd->codec->dapm); return 0; } diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h index 9f4dcb921ff0..83c3430ad797 100644 --- a/sound/soc/sh/siu.h +++ b/sound/soc/sh/siu.h @@ -75,7 +75,7 @@ struct siu_firmware { #include #include -#include +#include #define SIU_PERIOD_BYTES_MAX 8192 /* DMA transfer/period size */ #define SIU_PERIOD_BYTES_MIN 256 /* DMA transfer/period size */ diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c index af53b64d8af2..4973c2939d79 100644 --- a/sound/soc/sh/siu_dai.c +++ b/sound/soc/sh/siu_dai.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include "siu.h" diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c index ed29c9e1ed4e..a423babcf145 100644 --- a/sound/soc/sh/siu_pcm.c +++ b/sound/soc/sh/siu_pcm.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index d214f02cbb65..8c2a21a978ac 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c @@ -14,27 +14,34 @@ #include #include #include +#include +#include +#include static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val; if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { if (codec->cache_only) return -1; + BUG_ON(!codec->hw_read); return codec->hw_read(codec, reg); } - return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; } static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *cache = codec->reg_cache; u8 data[2]; int ret; @@ -42,16 +49,17 @@ static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg, data[1] = value & 0x00ff; if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + } if (codec->cache_only) { codec->cache_sync = 1; return 0; } - dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value); - ret = codec->hw_write(codec->control_data, data, 2); if (ret == 2) return 0; @@ -77,7 +85,7 @@ static int snd_soc_4_12_spi_write(void *control_data, const char *data, msg[1] = data[0]; spi_message_init(&m); - memset(&t, 0, (sizeof t)); + memset(&t, 0, sizeof t); t.tx_buf = &msg[0]; t.len = len; @@ -94,23 +102,27 @@ static int snd_soc_4_12_spi_write(void *control_data, const char *data, static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val; if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { if (codec->cache_only) return -1; + BUG_ON(!codec->hw_read); return codec->hw_read(codec, reg); } - return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; } static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *cache = codec->reg_cache; u8 data[2]; int ret; @@ -118,16 +130,17 @@ static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg, data[1] = value & 0x00ff; if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + } if (codec->cache_only) { codec->cache_sync = 1; return 0; } - dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value); - ret = codec->hw_write(codec->control_data, data, 2); if (ret == 2) return 0; @@ -153,7 +166,7 @@ static int snd_soc_7_9_spi_write(void *control_data, const char *data, msg[1] = data[1]; spi_message_init(&m); - memset(&t, 0, (sizeof t)); + memset(&t, 0, sizeof t); t.tx_buf = &msg[0]; t.len = len; @@ -170,24 +183,25 @@ static int snd_soc_7_9_spi_write(void *control_data, const char *data, static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u8 *cache = codec->reg_cache; u8 data[2]; + int ret; reg &= 0xff; data[0] = reg; data[1] = value & 0xff; if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + } if (codec->cache_only) { codec->cache_sync = 1; return 0; } - dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value); - if (codec->hw_write(codec->control_data, data, 2) == 2) return 0; else @@ -197,7 +211,8 @@ static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg, static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec, unsigned int reg) { - u8 *cache = codec->reg_cache; + int ret; + unsigned int val; reg &= 0xff; if (reg >= codec->driver->reg_cache_size || @@ -205,10 +220,14 @@ static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec, if (codec->cache_only) return -1; + BUG_ON(!codec->hw_read); return codec->hw_read(codec, reg); } - return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; } #if defined(CONFIG_SPI_MASTER) @@ -227,7 +246,7 @@ static int snd_soc_8_8_spi_write(void *control_data, const char *data, msg[1] = data[1]; spi_message_init(&m); - memset(&t, 0, (sizeof t)); + memset(&t, 0, sizeof t); t.tx_buf = &msg[0]; t.len = len; @@ -244,24 +263,25 @@ static int snd_soc_8_8_spi_write(void *control_data, const char *data, static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *reg_cache = codec->reg_cache; u8 data[3]; + int ret; data[0] = reg; data[1] = (value >> 8) & 0xff; data[2] = value & 0xff; if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - reg_cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + } if (codec->cache_only) { codec->cache_sync = 1; return 0; } - dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value); - if (codec->hw_write(codec->control_data, data, 3) == 3) return 0; else @@ -271,17 +291,22 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg, static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val; if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { if (codec->cache_only) return -1; + BUG_ON(!codec->hw_read); return codec->hw_read(codec, reg); - } else { - return cache[reg]; } + + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; } #if defined(CONFIG_SPI_MASTER) @@ -301,7 +326,7 @@ static int snd_soc_8_16_spi_write(void *control_data, const char *data, msg[2] = data[2]; spi_message_init(&m); - memset(&t, 0, (sizeof t)); + memset(&t, 0, sizeof t); t.tx_buf = &msg[0]; t.len = len; @@ -420,7 +445,8 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec, static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, unsigned int reg) { - u8 *cache = codec->reg_cache; + int ret; + unsigned int val; reg &= 0xff; if (reg >= codec->driver->reg_cache_size || @@ -428,16 +454,19 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, if (codec->cache_only) return -1; + BUG_ON(!codec->hw_read); return codec->hw_read(codec, reg); } - return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; } static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u8 *cache = codec->reg_cache; u8 data[3]; int ret; @@ -447,16 +476,17 @@ static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg, reg &= 0xff; if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + } if (codec->cache_only) { codec->cache_sync = 1; return 0; } - dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value); - ret = codec->hw_write(codec->control_data, data, 3); if (ret == 3) return 0; @@ -483,7 +513,7 @@ static int snd_soc_16_8_spi_write(void *control_data, const char *data, msg[2] = data[2]; spi_message_init(&m); - memset(&t, 0, (sizeof t)); + memset(&t, 0, sizeof t); t.tx_buf = &msg[0]; t.len = len; @@ -534,23 +564,28 @@ static unsigned int snd_soc_16_16_read_i2c(struct snd_soc_codec *codec, static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val; if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { if (codec->cache_only) return -1; + BUG_ON(!codec->hw_read); return codec->hw_read(codec, reg); } - return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + + return val; } static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *cache = codec->reg_cache; u8 data[4]; int ret; @@ -560,16 +595,17 @@ static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg, data[3] = value & 0xff; if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + } if (codec->cache_only) { codec->cache_sync = 1; return 0; } - dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value); - ret = codec->hw_write(codec->control_data, data, 4); if (ret == 4) return 0; @@ -597,7 +633,7 @@ static int snd_soc_16_16_spi_write(void *control_data, const char *data, msg[3] = data[3]; spi_message_init(&m); - memset(&t, 0, (sizeof t)); + memset(&t, 0, sizeof t); t.tx_buf = &msg[0]; t.len = len; @@ -692,8 +728,8 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, return -EINVAL; } - codec->driver->write = io_types[i].write; - codec->driver->read = io_types[i].read; + codec->write = io_types[i].write; + codec->read = io_types[i].read; switch (control) { case SND_SOC_CUSTOM: @@ -724,3 +760,930 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, return 0; } EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io); + +struct snd_soc_rbtree_node { + struct rb_node node; + unsigned int reg; + unsigned int value; + unsigned int defval; +} __attribute__ ((packed)); + +struct snd_soc_rbtree_ctx { + struct rb_root root; +}; + +static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup( + struct rb_root *root, unsigned int reg) +{ + struct rb_node *node; + struct snd_soc_rbtree_node *rbnode; + + node = root->rb_node; + while (node) { + rbnode = container_of(node, struct snd_soc_rbtree_node, node); + if (rbnode->reg < reg) + node = node->rb_left; + else if (rbnode->reg > reg) + node = node->rb_right; + else + return rbnode; + } + + return NULL; +} + +static int snd_soc_rbtree_insert(struct rb_root *root, + struct snd_soc_rbtree_node *rbnode) +{ + struct rb_node **new, *parent; + struct snd_soc_rbtree_node *rbnode_tmp; + + parent = NULL; + new = &root->rb_node; + while (*new) { + rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node, + node); + parent = *new; + if (rbnode_tmp->reg < rbnode->reg) + new = &((*new)->rb_left); + else if (rbnode_tmp->reg > rbnode->reg) + new = &((*new)->rb_right); + else + return 0; + } + + /* insert the node into the rbtree */ + rb_link_node(&rbnode->node, parent, new); + rb_insert_color(&rbnode->node, root); + + return 1; +} + +static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct rb_node *node; + struct snd_soc_rbtree_node *rbnode; + unsigned int val; + int ret; + + rbtree_ctx = codec->reg_cache; + for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { + rbnode = rb_entry(node, struct snd_soc_rbtree_node, node); + if (rbnode->value == rbnode->defval) + continue; + ret = snd_soc_cache_read(codec, rbnode->reg, &val); + if (ret) + return ret; + ret = snd_soc_write(codec, rbnode->reg, val); + if (ret) + return ret; + dev_dbg(codec->dev, "Synced register %#x, value = %#x\n", + rbnode->reg, val); + } + + return 0; +} + +static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct snd_soc_rbtree_node *rbnode; + + rbtree_ctx = codec->reg_cache; + rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg); + if (rbnode) { + if (rbnode->value == value) + return 0; + rbnode->value = value; + } else { + /* bail out early, no need to create the rbnode yet */ + if (!value) + return 0; + /* + * for uninitialized registers whose value is changed + * from the default zero, create an rbnode and insert + * it into the tree. + */ + rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); + if (!rbnode) + return -ENOMEM; + rbnode->reg = reg; + rbnode->value = value; + snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode); + } + + return 0; +} + +static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct snd_soc_rbtree_node *rbnode; + + rbtree_ctx = codec->reg_cache; + rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg); + if (rbnode) { + *value = rbnode->value; + } else { + /* uninitialized registers default to 0 */ + *value = 0; + } + + return 0; +} + +static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec) +{ + struct rb_node *next; + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct snd_soc_rbtree_node *rbtree_node; + + /* if we've already been called then just return */ + rbtree_ctx = codec->reg_cache; + if (!rbtree_ctx) + return 0; + + /* free up the rbtree */ + next = rb_first(&rbtree_ctx->root); + while (next) { + rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node); + next = rb_next(&rbtree_node->node); + rb_erase(&rbtree_node->node, &rbtree_ctx->root); + kfree(rbtree_node); + } + + /* release the resources */ + kfree(codec->reg_cache); + codec->reg_cache = NULL; + + return 0; +} + +static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + + codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); + if (!codec->reg_cache) + return -ENOMEM; + + rbtree_ctx = codec->reg_cache; + rbtree_ctx->root = RB_ROOT; + + if (!codec->reg_def_copy) + return 0; + +/* + * populate the rbtree with the initialized registers. All other + * registers will be inserted into the tree when they are first written. + * + * The reasoning behind this, is that we need to step through and + * dereference the cache in u8/u16 increments without sacrificing + * portability. This could also be done using memcpy() but that would + * be slightly more cryptic. + */ +#define snd_soc_rbtree_populate(cache) \ +({ \ + int ret, i; \ + struct snd_soc_rbtree_node *rbtree_node; \ + \ + ret = 0; \ + cache = codec->reg_def_copy; \ + for (i = 0; i < codec->driver->reg_cache_size; ++i) { \ + if (!cache[i]) \ + continue; \ + rbtree_node = kzalloc(sizeof *rbtree_node, GFP_KERNEL); \ + if (!rbtree_node) { \ + ret = -ENOMEM; \ + snd_soc_cache_exit(codec); \ + break; \ + } \ + rbtree_node->reg = i; \ + rbtree_node->value = cache[i]; \ + rbtree_node->defval = cache[i]; \ + snd_soc_rbtree_insert(&rbtree_ctx->root, \ + rbtree_node); \ + } \ + ret; \ +}) + + switch (codec->driver->reg_word_size) { + case 1: { + const u8 *cache; + + return snd_soc_rbtree_populate(cache); + } + case 2: { + const u16 *cache; + + return snd_soc_rbtree_populate(cache); + } + default: + BUG(); + } + + return 0; +} + +#ifdef CONFIG_SND_SOC_CACHE_LZO +struct snd_soc_lzo_ctx { + void *wmem; + void *dst; + const void *src; + size_t src_len; + size_t dst_len; + size_t decompressed_size; + unsigned long *sync_bmp; + int sync_bmp_nbits; +}; + +#define LZO_BLOCK_NUM 8 +static int snd_soc_lzo_block_count(void) +{ + return LZO_BLOCK_NUM; +} + +static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx) +{ + lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); + if (!lzo_ctx->wmem) + return -ENOMEM; + return 0; +} + +static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx) +{ + size_t compress_size; + int ret; + + ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len, + lzo_ctx->dst, &compress_size, lzo_ctx->wmem); + if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len) + return -EINVAL; + lzo_ctx->dst_len = compress_size; + return 0; +} + +static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx) +{ + size_t dst_len; + int ret; + + dst_len = lzo_ctx->dst_len; + ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len, + lzo_ctx->dst, &dst_len); + if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len) + return -EINVAL; + return 0; +} + +static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec, + struct snd_soc_lzo_ctx *lzo_ctx) +{ + int ret; + + lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE); + lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); + if (!lzo_ctx->dst) { + lzo_ctx->dst_len = 0; + return -ENOMEM; + } + + ret = snd_soc_lzo_compress(lzo_ctx); + if (ret < 0) + return ret; + return 0; +} + +static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec, + struct snd_soc_lzo_ctx *lzo_ctx) +{ + int ret; + + lzo_ctx->dst_len = lzo_ctx->decompressed_size; + lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); + if (!lzo_ctx->dst) { + lzo_ctx->dst_len = 0; + return -ENOMEM; + } + + ret = snd_soc_lzo_decompress(lzo_ctx); + if (ret < 0) + return ret; + return 0; +} + +static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec, + unsigned int reg) +{ + const struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + return (reg * codec_drv->reg_word_size) / + DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()); +} + +static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec, + unsigned int reg) +{ + const struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + return reg % (DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()) / + codec_drv->reg_word_size); +} + +static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec) +{ + const struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + return DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()); +} + +static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec) +{ + struct snd_soc_lzo_ctx **lzo_blocks; + unsigned int val; + int i; + int ret; + + lzo_blocks = codec->reg_cache; + for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { + ret = snd_soc_cache_read(codec, i, &val); + if (ret) + return ret; + ret = snd_soc_write(codec, i, val); + if (ret) + return ret; + dev_dbg(codec->dev, "Synced register %#x, value = %#x\n", + i, val); + } + + return 0; +} + +static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks; + int ret, blkindex, blkpos; + size_t blksize, tmp_dst_len; + void *tmp_dst; + + /* index of the compressed lzo block */ + blkindex = snd_soc_lzo_get_blkindex(codec, reg); + /* register index within the decompressed block */ + blkpos = snd_soc_lzo_get_blkpos(codec, reg); + /* size of the compressed block */ + blksize = snd_soc_lzo_get_blksize(codec); + lzo_blocks = codec->reg_cache; + lzo_block = lzo_blocks[blkindex]; + + /* save the pointer and length of the compressed block */ + tmp_dst = lzo_block->dst; + tmp_dst_len = lzo_block->dst_len; + + /* prepare the source to be the compressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* decompress the block */ + ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block); + if (ret < 0) { + kfree(lzo_block->dst); + goto out; + } + + /* write the new value to the cache */ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + cache = lzo_block->dst; + if (cache[blkpos] == value) { + kfree(lzo_block->dst); + goto out; + } + cache[blkpos] = value; + } + break; + case 2: { + u16 *cache; + cache = lzo_block->dst; + if (cache[blkpos] == value) { + kfree(lzo_block->dst); + goto out; + } + cache[blkpos] = value; + } + break; + default: + BUG(); + } + + /* prepare the source to be the decompressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* compress the block */ + ret = snd_soc_lzo_compress_cache_block(codec, lzo_block); + if (ret < 0) { + kfree(lzo_block->dst); + kfree(lzo_block->src); + goto out; + } + + /* set the bit so we know we have to sync this register */ + set_bit(reg, lzo_block->sync_bmp); + kfree(tmp_dst); + kfree(lzo_block->src); + return 0; +out: + lzo_block->dst = tmp_dst; + lzo_block->dst_len = tmp_dst_len; + return ret; +} + +static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks; + int ret, blkindex, blkpos; + size_t blksize, tmp_dst_len; + void *tmp_dst; + + *value = 0; + /* index of the compressed lzo block */ + blkindex = snd_soc_lzo_get_blkindex(codec, reg); + /* register index within the decompressed block */ + blkpos = snd_soc_lzo_get_blkpos(codec, reg); + /* size of the compressed block */ + blksize = snd_soc_lzo_get_blksize(codec); + lzo_blocks = codec->reg_cache; + lzo_block = lzo_blocks[blkindex]; + + /* save the pointer and length of the compressed block */ + tmp_dst = lzo_block->dst; + tmp_dst_len = lzo_block->dst_len; + + /* prepare the source to be the compressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* decompress the block */ + ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block); + if (ret >= 0) { + /* fetch the value from the cache */ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + cache = lzo_block->dst; + *value = cache[blkpos]; + } + break; + case 2: { + u16 *cache; + cache = lzo_block->dst; + *value = cache[blkpos]; + } + break; + default: + BUG(); + } + } + + kfree(lzo_block->dst); + /* restore the pointer and length of the compressed block */ + lzo_block->dst = tmp_dst; + lzo_block->dst_len = tmp_dst_len; + return 0; +} + +static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec) +{ + struct snd_soc_lzo_ctx **lzo_blocks; + int i, blkcount; + + lzo_blocks = codec->reg_cache; + if (!lzo_blocks) + return 0; + + blkcount = snd_soc_lzo_block_count(); + /* + * the pointer to the bitmap used for syncing the cache + * is shared amongst all lzo_blocks. Ensure it is freed + * only once. + */ + if (lzo_blocks[0]) + kfree(lzo_blocks[0]->sync_bmp); + for (i = 0; i < blkcount; ++i) { + if (lzo_blocks[i]) { + kfree(lzo_blocks[i]->wmem); + kfree(lzo_blocks[i]->dst); + } + /* each lzo_block is a pointer returned by kmalloc or NULL */ + kfree(lzo_blocks[i]); + } + kfree(lzo_blocks); + codec->reg_cache = NULL; + return 0; +} + +static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec) +{ + struct snd_soc_lzo_ctx **lzo_blocks; + size_t reg_size, bmp_size; + const struct snd_soc_codec_driver *codec_drv; + int ret, tofree, i, blksize, blkcount; + const char *p, *end; + unsigned long *sync_bmp; + + ret = 0; + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + + /* + * If we have not been given a default register cache + * then allocate a dummy zero-ed out region, compress it + * and remember to free it afterwards. + */ + tofree = 0; + if (!codec->reg_def_copy) + tofree = 1; + + if (!codec->reg_def_copy) { + codec->reg_def_copy = kzalloc(reg_size, + GFP_KERNEL); + if (!codec->reg_def_copy) + return -ENOMEM; + } + + blkcount = snd_soc_lzo_block_count(); + codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks, + GFP_KERNEL); + if (!codec->reg_cache) { + ret = -ENOMEM; + goto err_tofree; + } + lzo_blocks = codec->reg_cache; + + /* + * allocate a bitmap to be used when syncing the cache with + * the hardware. Each time a register is modified, the corresponding + * bit is set in the bitmap, so we know that we have to sync + * that register. + */ + bmp_size = codec_drv->reg_cache_size; + sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long), + GFP_KERNEL); + if (!sync_bmp) { + ret = -ENOMEM; + goto err; + } + bitmap_zero(sync_bmp, bmp_size); + + /* allocate the lzo blocks and initialize them */ + for (i = 0; i < blkcount; ++i) { + lzo_blocks[i] = kzalloc(sizeof **lzo_blocks, + GFP_KERNEL); + if (!lzo_blocks[i]) { + kfree(sync_bmp); + ret = -ENOMEM; + goto err; + } + lzo_blocks[i]->sync_bmp = sync_bmp; + lzo_blocks[i]->sync_bmp_nbits = bmp_size; + /* alloc the working space for the compressed block */ + ret = snd_soc_lzo_prepare(lzo_blocks[i]); + if (ret < 0) + goto err; + } + + blksize = snd_soc_lzo_get_blksize(codec); + p = codec->reg_def_copy; + end = codec->reg_def_copy + reg_size; + /* compress the register map and fill the lzo blocks */ + for (i = 0; i < blkcount; ++i, p += blksize) { + lzo_blocks[i]->src = p; + if (p + blksize > end) + lzo_blocks[i]->src_len = end - p; + else + lzo_blocks[i]->src_len = blksize; + ret = snd_soc_lzo_compress_cache_block(codec, + lzo_blocks[i]); + if (ret < 0) + goto err; + lzo_blocks[i]->decompressed_size = + lzo_blocks[i]->src_len; + } + + if (tofree) { + kfree(codec->reg_def_copy); + codec->reg_def_copy = NULL; + } + return 0; +err: + snd_soc_cache_exit(codec); +err_tofree: + if (tofree) { + kfree(codec->reg_def_copy); + codec->reg_def_copy = NULL; + } + return ret; +} +#endif + +static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) +{ + int i; + int ret; + const struct snd_soc_codec_driver *codec_drv; + unsigned int val; + + codec_drv = codec->driver; + for (i = 0; i < codec_drv->reg_cache_size; ++i) { + ret = snd_soc_cache_read(codec, i, &val); + if (ret) + return ret; + if (codec_drv->reg_cache_default) { + switch (codec_drv->reg_word_size) { + case 1: { + const u8 *cache; + + cache = codec_drv->reg_cache_default; + if (cache[i] == val) + continue; + } + break; + case 2: { + const u16 *cache; + + cache = codec_drv->reg_cache_default; + if (cache[i] == val) + continue; + } + break; + default: + BUG(); + } + } + ret = snd_soc_write(codec, i, val); + if (ret) + return ret; + dev_dbg(codec->dev, "Synced register %#x, value = %#x\n", + i, val); + } + return 0; +} + +static int snd_soc_flat_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + + cache = codec->reg_cache; + cache[reg] = value; + } + break; + case 2: { + u16 *cache; + + cache = codec->reg_cache; + cache[reg] = value; + } + break; + default: + BUG(); + } + + return 0; +} + +static int snd_soc_flat_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + + cache = codec->reg_cache; + *value = cache[reg]; + } + break; + case 2: { + u16 *cache; + + cache = codec->reg_cache; + *value = cache[reg]; + } + break; + default: + BUG(); + } + + return 0; +} + +static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec) +{ + if (!codec->reg_cache) + return 0; + kfree(codec->reg_cache); + codec->reg_cache = NULL; + return 0; +} + +static int snd_soc_flat_cache_init(struct snd_soc_codec *codec) +{ + const struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + + /* + * for flat compression, we don't need to keep a copy of the + * original defaults register cache as it will definitely not + * be marked as __devinitconst + */ + kfree(codec->reg_def_copy); + codec->reg_def_copy = NULL; + + if (codec_drv->reg_cache_default) + codec->reg_cache = kmemdup(codec_drv->reg_cache_default, + reg_size, GFP_KERNEL); + else + codec->reg_cache = kzalloc(reg_size, GFP_KERNEL); + if (!codec->reg_cache) + return -ENOMEM; + + return 0; +} + +/* an array of all supported compression types */ +static const struct snd_soc_cache_ops cache_types[] = { + /* Flat *must* be the first entry for fallback */ + { + .id = SND_SOC_FLAT_COMPRESSION, + .name = "flat", + .init = snd_soc_flat_cache_init, + .exit = snd_soc_flat_cache_exit, + .read = snd_soc_flat_cache_read, + .write = snd_soc_flat_cache_write, + .sync = snd_soc_flat_cache_sync + }, +#ifdef CONFIG_SND_SOC_CACHE_LZO + { + .id = SND_SOC_LZO_COMPRESSION, + .name = "LZO", + .init = snd_soc_lzo_cache_init, + .exit = snd_soc_lzo_cache_exit, + .read = snd_soc_lzo_cache_read, + .write = snd_soc_lzo_cache_write, + .sync = snd_soc_lzo_cache_sync + }, +#endif + { + .id = SND_SOC_RBTREE_COMPRESSION, + .name = "rbtree", + .init = snd_soc_rbtree_cache_init, + .exit = snd_soc_rbtree_cache_exit, + .read = snd_soc_rbtree_cache_read, + .write = snd_soc_rbtree_cache_write, + .sync = snd_soc_rbtree_cache_sync + } +}; + +int snd_soc_cache_init(struct snd_soc_codec *codec) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cache_types); ++i) + if (cache_types[i].id == codec->compress_type) + break; + + /* Fall back to flat compression */ + if (i == ARRAY_SIZE(cache_types)) { + dev_warn(codec->dev, "Could not match compress type: %d\n", + codec->compress_type); + i = 0; + } + + mutex_init(&codec->cache_rw_mutex); + codec->cache_ops = &cache_types[i]; + + if (codec->cache_ops->init) { + if (codec->cache_ops->name) + dev_dbg(codec->dev, "Initializing %s cache for %s codec\n", + codec->cache_ops->name, codec->name); + return codec->cache_ops->init(codec); + } + return -EINVAL; +} + +/* + * NOTE: keep in mind that this function might be called + * multiple times. + */ +int snd_soc_cache_exit(struct snd_soc_codec *codec) +{ + if (codec->cache_ops && codec->cache_ops->exit) { + if (codec->cache_ops->name) + dev_dbg(codec->dev, "Destroying %s cache for %s codec\n", + codec->cache_ops->name, codec->name); + return codec->cache_ops->exit(codec); + } + return -EINVAL; +} + +/** + * snd_soc_cache_read: Fetch the value of a given register from the cache. + * + * @codec: CODEC to configure. + * @reg: The register index. + * @value: The value to be returned. + */ +int snd_soc_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + int ret; + + mutex_lock(&codec->cache_rw_mutex); + + if (value && codec->cache_ops && codec->cache_ops->read) { + ret = codec->cache_ops->read(codec, reg, value); + mutex_unlock(&codec->cache_rw_mutex); + return ret; + } + + mutex_unlock(&codec->cache_rw_mutex); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_read); + +/** + * snd_soc_cache_write: Set the value of a given register in the cache. + * + * @codec: CODEC to configure. + * @reg: The register index. + * @value: The new register value. + */ +int snd_soc_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + int ret; + + mutex_lock(&codec->cache_rw_mutex); + + if (codec->cache_ops && codec->cache_ops->write) { + ret = codec->cache_ops->write(codec, reg, value); + mutex_unlock(&codec->cache_rw_mutex); + return ret; + } + + mutex_unlock(&codec->cache_rw_mutex); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_write); + +/** + * snd_soc_cache_sync: Sync the register cache with the hardware. + * + * @codec: CODEC to configure. + * + * Any registers that should not be synced should be marked as + * volatile. In general drivers can choose not to use the provided + * syncing functionality if they so require. + */ +int snd_soc_cache_sync(struct snd_soc_codec *codec) +{ + int ret; + + if (!codec->cache_sync) { + return 0; + } + + if (codec->cache_ops && codec->cache_ops->sync) { + if (codec->cache_ops->name) + dev_dbg(codec->dev, "Syncing %s cache for %s codec\n", + codec->cache_ops->name, codec->name); + ret = codec->cache_ops->sync(codec); + if (!ret) + codec->cache_sync = 0; + return ret; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_sync); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 85b7d548f167..bac7291b6ff6 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -33,12 +33,15 @@ #include #include #include +#include #include #include #include -#include #include +#define CREATE_TRACE_POINTS +#include + #define NAME_SIZE 32 static DEFINE_MUTEX(pcm_mutex); @@ -67,25 +70,6 @@ static int pmdown_time = 5000; module_param(pmdown_time, int, 0); MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)"); -/* - * This function forces any delayed work to be queued and run. - */ -static int run_delayed_work(struct delayed_work *dwork) -{ - int ret; - - /* cancel any work waiting to be queued. */ - ret = cancel_delayed_work(dwork); - - /* if there was any work waiting then we run it now and - * wait for it's completion */ - if (ret) { - schedule_delayed_work(dwork, 0); - flush_scheduled_work(); - } - return ret; -} - /* codec register dump */ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf) { @@ -114,7 +98,7 @@ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf) * the register being volatile and the device being * powered off. */ - ret = codec->driver->read(codec, i); + ret = snd_soc_read(codec, i); if (ret >= 0) count += snprintf(buf + count, PAGE_SIZE - count, @@ -225,7 +209,7 @@ static ssize_t codec_reg_write_file(struct file *file, start++; if (strict_strtoul(start, 16, &value)) return -EINVAL; - codec->driver->write(codec, reg, value); + snd_soc_write(codec, reg, value); return buf_size; } @@ -238,8 +222,10 @@ static const struct file_operations codec_reg_fops = { static void soc_init_codec_debugfs(struct snd_soc_codec *codec) { - codec->debugfs_codec_root = debugfs_create_dir(codec->name , - debugfs_root); + struct dentry *debugfs_card_root = codec->card->debugfs_card_root; + + codec->debugfs_codec_root = debugfs_create_dir(codec->name, + debugfs_card_root); if (!codec->debugfs_codec_root) { printk(KERN_WARNING "ASoC: Failed to create codec debugfs directory\n"); @@ -253,20 +239,13 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec) printk(KERN_WARNING "ASoC: Failed to create codec register debugfs file\n"); - codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644, - codec->debugfs_codec_root, - &codec->pop_time); - if (!codec->debugfs_pop_time) - printk(KERN_WARNING - "Failed to create pop time debugfs file\n"); - - codec->debugfs_dapm = debugfs_create_dir("dapm", + codec->dapm.debugfs_dapm = debugfs_create_dir("dapm", codec->debugfs_codec_root); - if (!codec->debugfs_dapm) + if (!codec->dapm.debugfs_dapm) printk(KERN_WARNING "Failed to create DAPM debugfs directory\n"); - snd_soc_dapm_debugfs_init(codec); + snd_soc_dapm_debugfs_init(&codec->dapm); } static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) @@ -374,6 +353,29 @@ static const struct file_operations platform_list_fops = { .llseek = default_llseek,/* read accesses f_pos */ }; +static void soc_init_card_debugfs(struct snd_soc_card *card) +{ + card->debugfs_card_root = debugfs_create_dir(card->name, + debugfs_root); + if (!card->debugfs_card_root) { + dev_warn(card->dev, + "ASoC: Failed to create codec debugfs directory\n"); + return; + } + + card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644, + card->debugfs_card_root, + &card->pop_time); + if (!card->debugfs_pop_time) + dev_warn(card->dev, + "Failed to create pop time debugfs file\n"); +} + +static void soc_cleanup_card_debugfs(struct snd_soc_card *card) +{ + debugfs_remove_recursive(card->debugfs_card_root); +} + #else static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec) @@ -383,6 +385,14 @@ static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec) static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) { } + +static inline void soc_init_card_debugfs(struct snd_soc_card *card) +{ +} + +static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card) +{ +} #endif #ifdef CONFIG_SND_SOC_AC97_BUS @@ -497,7 +507,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream) } } - /* Check that the codec and cpu DAI's are compatible */ + /* Check that the codec and cpu DAIs are compatible */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { runtime->hw.rate_min = max(codec_dai_drv->playback.rate_min, @@ -846,7 +856,7 @@ codec_err: } /* - * Free's resources allocated by hw_params, can be called multiple times + * Frees resources allocated by hw_params, can be called multiple times */ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) { @@ -870,7 +880,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) if (platform->driver->ops->hw_free) platform->driver->ops->hw_free(substream); - /* now free hw params for the DAI's */ + /* now free hw params for the DAIs */ if (codec_dai->driver->ops->hw_free) codec_dai->driver->ops->hw_free(substream, codec_dai); @@ -958,6 +968,7 @@ static int soc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct snd_soc_card *card = platform_get_drvdata(pdev); + struct snd_soc_codec *codec; int i; /* If the initialization of this soc device failed, there is no codec @@ -976,7 +987,7 @@ static int soc_suspend(struct device *dev) /* we're going to block userspace touching us until resume completes */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot); - /* mute any active DAC's */ + /* mute any active DACs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *dai = card->rtd[i].codec_dai; struct snd_soc_dai_driver *drv = dai->driver; @@ -1016,8 +1027,8 @@ static int soc_suspend(struct device *dev) /* close any waiting streams and save state */ for (i = 0; i < card->num_rtd; i++) { - run_delayed_work(&card->rtd[i].delayed_work); - card->rtd[i].codec->suspend_bias_level = card->rtd[i].codec->bias_level; + flush_delayed_work_sync(&card->rtd[i].delayed_work); + card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level; } for (i = 0; i < card->num_rtd; i++) { @@ -1036,12 +1047,11 @@ static int soc_suspend(struct device *dev) } /* suspend all CODECs */ - for (i = 0; i < card->num_rtd; i++) { - struct snd_soc_codec *codec = card->rtd[i].codec; + list_for_each_entry(codec, &card->codec_dev_list, card_list) { /* If there are paths active then the CODEC will be held with * bias _ON and should not be suspended. */ if (!codec->suspended && codec->driver->suspend) { - switch (codec->bias_level) { + switch (codec->dapm.bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: codec->driver->suspend(codec, PMSG_SUSPEND); @@ -1078,6 +1088,7 @@ static void soc_resume_deferred(struct work_struct *work) struct snd_soc_card *card = container_of(work, struct snd_soc_card, deferred_resume_work); struct platform_device *pdev = to_platform_device(card->dev); + struct snd_soc_codec *codec; int i; /* our power state is still SNDRV_CTL_POWER_D3hot from suspend time, @@ -1103,14 +1114,13 @@ static void soc_resume_deferred(struct work_struct *work) cpu_dai->driver->resume(cpu_dai); } - for (i = 0; i < card->num_rtd; i++) { - struct snd_soc_codec *codec = card->rtd[i].codec; + list_for_each_entry(codec, &card->codec_dev_list, card_list) { /* If the CODEC was idle over suspend then it will have been * left with bias OFF or STANDBY and suspended so we must now * resume. Otherwise the suspend was suppressed. */ if (codec->driver->resume && codec->suspended) { - switch (codec->bias_level) { + switch (codec->dapm.bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: codec->driver->resume(codec); @@ -1249,9 +1259,6 @@ find_codec: if (!strcmp(codec->name, dai_link->codec_name)) { rtd->codec = codec; - if (!try_module_get(codec->dev->driver->owner)) - return -ENODEV; - /* CODEC found, so find CODEC DAI from registered DAIs from this CODEC*/ list_for_each_entry(codec_dai, &dai_list, list) { if (codec->dev == codec_dai->dev && @@ -1277,10 +1284,6 @@ find_platform: /* no, then find CPU DAI from registered DAIs*/ list_for_each_entry(platform, &platform_list, list) { if (!strcmp(platform->name, dai_link->platform_name)) { - - if (!try_module_get(platform->dev->driver->owner)) - return -ENODEV; - rtd->platform = platform; goto out; } @@ -1299,6 +1302,27 @@ out: return 1; } +static void soc_remove_codec(struct snd_soc_codec *codec) +{ + int err; + + if (codec->driver->remove) { + err = codec->driver->remove(codec); + if (err < 0) + dev_err(codec->dev, + "asoc: failed to remove %s: %d\n", + codec->name, err); + } + + /* Make sure all DAPM widgets are freed */ + snd_soc_dapm_free(&codec->dapm); + + soc_cleanup_codec_debugfs(codec); + codec->probed = 0; + list_del(&codec->card_list); + module_put(codec->dev->driver->owner); +} + static void soc_remove_dai_link(struct snd_soc_card *card, int num) { struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; @@ -1310,6 +1334,7 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num) /* unregister the rtd device */ if (rtd->dev_registered) { device_remove_file(&rtd->dev, &dev_attr_pmdown_time); + device_remove_file(&rtd->dev, &dev_attr_codec_reg); device_unregister(&rtd->dev); rtd->dev_registered = 0; } @@ -1338,22 +1363,8 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num) } /* remove the CODEC */ - if (codec && codec->probed) { - if (codec->driver->remove) { - err = codec->driver->remove(codec); - if (err < 0) - printk(KERN_ERR "asoc: failed to remove %s\n", codec->name); - } - - /* Make sure all DAPM widgets are freed */ - snd_soc_dapm_free(codec); - - soc_cleanup_codec_debugfs(codec); - device_remove_file(&rtd->dev, &dev_attr_codec_reg); - codec->probed = 0; - list_del(&codec->card_list); - module_put(codec->dev->driver->owner); - } + if (codec && codec->probed) + soc_remove_codec(codec); /* remove the cpu_dai */ if (cpu_dai && cpu_dai->probed) { @@ -1368,8 +1379,126 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num) } } +static void soc_set_name_prefix(struct snd_soc_card *card, + struct snd_soc_codec *codec) +{ + int i; + + if (card->codec_conf == NULL) + return; + + for (i = 0; i < card->num_configs; i++) { + struct snd_soc_codec_conf *map = &card->codec_conf[i]; + if (map->dev_name && !strcmp(codec->name, map->dev_name)) { + codec->name_prefix = map->name_prefix; + break; + } + } +} + +static int soc_probe_codec(struct snd_soc_card *card, + struct snd_soc_codec *codec) +{ + int ret = 0; + + codec->card = card; + codec->dapm.card = card; + soc_set_name_prefix(card, codec); + + if (codec->driver->probe) { + ret = codec->driver->probe(codec); + if (ret < 0) { + dev_err(codec->dev, + "asoc: failed to probe CODEC %s: %d\n", + codec->name, ret); + return ret; + } + } + + soc_init_codec_debugfs(codec); + + /* mark codec as probed and add to card codec list */ + if (!try_module_get(codec->dev->driver->owner)) + return -ENODEV; + + codec->probed = 1; + list_add(&codec->card_list, &card->codec_dev_list); + list_add(&codec->dapm.list, &card->dapm_list); + + return ret; +} + static void rtd_release(struct device *dev) {} +static int soc_post_component_init(struct snd_soc_card *card, + struct snd_soc_codec *codec, + int num, int dailess) +{ + struct snd_soc_dai_link *dai_link = NULL; + struct snd_soc_aux_dev *aux_dev = NULL; + struct snd_soc_pcm_runtime *rtd; + const char *temp, *name; + int ret = 0; + + if (!dailess) { + dai_link = &card->dai_link[num]; + rtd = &card->rtd[num]; + name = dai_link->name; + } else { + aux_dev = &card->aux_dev[num]; + rtd = &card->rtd_aux[num]; + name = aux_dev->name; + } + + /* machine controls, routes and widgets are not prefixed */ + temp = codec->name_prefix; + codec->name_prefix = NULL; + + /* do machine specific initialization */ + if (!dailess && dai_link->init) + ret = dai_link->init(rtd); + else if (dailess && aux_dev->init) + ret = aux_dev->init(&codec->dapm); + if (ret < 0) { + dev_err(card->dev, "asoc: failed to init %s: %d\n", name, ret); + return ret; + } + codec->name_prefix = temp; + + /* Make sure all DAPM widgets are instantiated */ + snd_soc_dapm_new_widgets(&codec->dapm); + snd_soc_dapm_sync(&codec->dapm); + + /* register the rtd device */ + rtd->codec = codec; + rtd->card = card; + rtd->dev.parent = card->dev; + rtd->dev.release = rtd_release; + rtd->dev.init_name = name; + ret = device_register(&rtd->dev); + if (ret < 0) { + dev_err(card->dev, + "asoc: failed to register runtime device: %d\n", ret); + return ret; + } + rtd->dev_registered = 1; + + /* add DAPM sysfs entries for this codec */ + ret = snd_soc_dapm_sys_add(&rtd->dev); + if (ret < 0) + dev_err(codec->dev, + "asoc: failed to add codec dapm sysfs entries: %d\n", + ret); + + /* add codec sysfs entries */ + ret = device_create_file(&rtd->dev, &dev_attr_codec_reg); + if (ret < 0) + dev_err(codec->dev, + "asoc: failed to add codec sysfs files: %d\n", ret); + + return 0; +} + static int soc_probe_dai_link(struct snd_soc_card *card, int num) { struct snd_soc_dai_link *dai_link = &card->dai_link[num]; @@ -1383,10 +1512,7 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num) /* config components */ codec_dai->codec = codec; - codec->card = card; cpu_dai->platform = platform; - rtd->card = card; - rtd->dev.parent = card->dev; codec_dai->card = card; cpu_dai->card = card; @@ -1410,20 +1536,9 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num) /* probe the CODEC */ if (!codec->probed) { - if (codec->driver->probe) { - ret = codec->driver->probe(codec); - if (ret < 0) { - printk(KERN_ERR "asoc: failed to probe CODEC %s\n", - codec->name); - return ret; - } - } - - soc_init_codec_debugfs(codec); - - /* mark codec as probed and add to card codec list */ - codec->probed = 1; - list_add(&codec->card_list, &card->codec_dev_list); + ret = soc_probe_codec(card, codec); + if (ret < 0) + return ret; } /* probe the platform */ @@ -1437,6 +1552,10 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num) } } /* mark platform as probed and add to card platform list */ + + if (!try_module_get(platform->dev->driver->owner)) + return -ENODEV; + platform->probed = 1; list_add(&platform->card_list, &card->platform_dev_list); } @@ -1460,43 +1579,14 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num) /* DAPM dai link stream work */ INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work); - /* now that all clients have probed, initialise the DAI link */ - if (dai_link->init) { - ret = dai_link->init(rtd); - if (ret < 0) { - printk(KERN_ERR "asoc: failed to init %s\n", dai_link->stream_name); - return ret; - } - } - - /* Make sure all DAPM widgets are instantiated */ - snd_soc_dapm_new_widgets(codec); - snd_soc_dapm_sync(codec); - - /* register the rtd device */ - rtd->dev.release = rtd_release; - rtd->dev.init_name = dai_link->name; - ret = device_register(&rtd->dev); - if (ret < 0) { - printk(KERN_ERR "asoc: failed to register DAI runtime device %d\n", ret); + ret = soc_post_component_init(card, codec, num, 0); + if (ret) return ret; - } - rtd->dev_registered = 1; ret = device_create_file(&rtd->dev, &dev_attr_pmdown_time); if (ret < 0) printk(KERN_WARNING "asoc: failed to add pmdown_time sysfs\n"); - /* add DAPM sysfs entries for this codec */ - ret = snd_soc_dapm_sys_add(&rtd->dev); - if (ret < 0) - printk(KERN_WARNING "asoc: failed to add codec dapm sysfs entries\n"); - - /* add codec sysfs entries */ - ret = device_create_file(&rtd->dev, &dev_attr_codec_reg); - if (ret < 0) - printk(KERN_WARNING "asoc: failed to add codec sysfs files\n"); - /* create the pcm */ ret = soc_new_pcm(rtd, num); if (ret < 0) { @@ -1551,9 +1641,85 @@ static void soc_unregister_ac97_dai_link(struct snd_soc_codec *codec) } #endif +static int soc_probe_aux_dev(struct snd_soc_card *card, int num) +{ + struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num]; + struct snd_soc_codec *codec; + int ret = -ENODEV; + + /* find CODEC from registered CODECs*/ + list_for_each_entry(codec, &codec_list, list) { + if (!strcmp(codec->name, aux_dev->codec_name)) { + if (codec->probed) { + dev_err(codec->dev, + "asoc: codec already probed"); + ret = -EBUSY; + goto out; + } + goto found; + } + } + /* codec not found */ + dev_err(card->dev, "asoc: codec %s not found", aux_dev->codec_name); + goto out; + +found: + if (!try_module_get(codec->dev->driver->owner)) + return -ENODEV; + + ret = soc_probe_codec(card, codec); + if (ret < 0) + return ret; + + ret = soc_post_component_init(card, codec, num, 1); + +out: + return ret; +} + +static void soc_remove_aux_dev(struct snd_soc_card *card, int num) +{ + struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num]; + struct snd_soc_codec *codec = rtd->codec; + + /* unregister the rtd device */ + if (rtd->dev_registered) { + device_remove_file(&rtd->dev, &dev_attr_codec_reg); + device_unregister(&rtd->dev); + rtd->dev_registered = 0; + } + + if (codec && codec->probed) + soc_remove_codec(codec); +} + +static int snd_soc_init_codec_cache(struct snd_soc_codec *codec, + enum snd_soc_compress_type compress_type) +{ + int ret; + + if (codec->cache_init) + return 0; + + /* override the compress_type if necessary */ + if (compress_type && codec->compress_type != compress_type) + codec->compress_type = compress_type; + ret = snd_soc_cache_init(codec); + if (ret < 0) { + dev_err(codec->dev, "Failed to set cache compression type: %d\n", + ret); + return ret; + } + codec->cache_init = 1; + return 0; +} + static void snd_soc_instantiate_card(struct snd_soc_card *card) { struct platform_device *pdev = to_platform_device(card->dev); + struct snd_soc_codec *codec; + struct snd_soc_codec_conf *codec_conf; + enum snd_soc_compress_type compress_type; int ret, i; mutex_lock(&card->mutex); @@ -1573,6 +1739,39 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card) return; } + /* initialize the register cache for each available codec */ + list_for_each_entry(codec, &codec_list, list) { + if (codec->cache_init) + continue; + /* check to see if we need to override the compress_type */ + for (i = 0; i < card->num_configs; ++i) { + codec_conf = &card->codec_conf[i]; + if (!strcmp(codec->name, codec_conf->dev_name)) { + compress_type = codec_conf->compress_type; + if (compress_type && compress_type + != codec->compress_type) + break; + } + } + if (i == card->num_configs) { + /* no need to override the compress_type so + * go ahead and do the standard thing */ + ret = snd_soc_init_codec_cache(codec, 0); + if (ret < 0) { + mutex_unlock(&card->mutex); + return; + } + continue; + } + /* override the compress_type with the one supplied in + * the machine driver */ + ret = snd_soc_init_codec_cache(codec, compress_type); + if (ret < 0) { + mutex_unlock(&card->mutex); + return; + } + } + /* card bind complete so register a sound card */ ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, card->owner, 0, &card->snd_card); @@ -1605,6 +1804,15 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card) } } + for (i = 0; i < card->num_aux_devs; i++) { + ret = soc_probe_aux_dev(card, i); + if (ret < 0) { + pr_err("asoc: failed to add auxiliary devices %s: %d\n", + card->name, ret); + goto probe_aux_dev_err; + } + } + snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname), "%s", card->name); snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), @@ -1613,7 +1821,7 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card) ret = snd_card_register(card->snd_card); if (ret < 0) { printk(KERN_ERR "asoc: failed to register soundcard for %s\n", card->name); - goto probe_dai_err; + goto probe_aux_dev_err; } #ifdef CONFIG_SND_SOC_AC97_BUS @@ -1623,8 +1831,8 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card) if (ret < 0) { printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name); while (--i >= 0) - soc_unregister_ac97_dai_link(&card->rtd[i]); - goto probe_dai_err; + soc_unregister_ac97_dai_link(card->rtd[i].codec); + goto probe_aux_dev_err; } } #endif @@ -1633,6 +1841,10 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card) mutex_unlock(&card->mutex); return; +probe_aux_dev_err: + for (i = 0; i < card->num_aux_devs; i++) + soc_remove_aux_dev(card, i); + probe_dai_err: for (i = 0; i < card->num_links; i++) soc_remove_dai_link(card, i); @@ -1668,6 +1880,11 @@ static int soc_probe(struct platform_device *pdev) INIT_LIST_HEAD(&card->dai_dev_list); INIT_LIST_HEAD(&card->codec_dev_list); INIT_LIST_HEAD(&card->platform_dev_list); + INIT_LIST_HEAD(&card->widgets); + INIT_LIST_HEAD(&card->paths); + INIT_LIST_HEAD(&card->dapm_list); + + soc_init_card_debugfs(card); ret = snd_soc_register_card(card); if (ret != 0) { @@ -1689,13 +1906,19 @@ static int soc_remove(struct platform_device *pdev) /* make sure any delayed work runs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; - run_delayed_work(&rtd->delayed_work); + flush_delayed_work_sync(&rtd->delayed_work); } + /* remove auxiliary devices */ + for (i = 0; i < card->num_aux_devs; i++) + soc_remove_aux_dev(card, i); + /* remove and free each DAI */ for (i = 0; i < card->num_rtd; i++) soc_remove_dai_link(card, i); + soc_cleanup_card_debugfs(card); + /* remove the card */ if (card->remove) card->remove(pdev); @@ -1720,7 +1943,7 @@ static int soc_poweroff(struct device *dev) * now, we're shutting down so no imminent restart. */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; - run_delayed_work(&rtd->delayed_work); + flush_delayed_work_sync(&rtd->delayed_work); } snd_soc_dapm_shutdown(card); @@ -1879,6 +2102,27 @@ void snd_soc_free_ac97_codec(struct snd_soc_codec *codec) } EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec); +unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg) +{ + unsigned int ret; + + ret = codec->read(codec, reg); + dev_dbg(codec->dev, "read %x => %x\n", reg, ret); + trace_snd_soc_reg_read(codec, reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(snd_soc_read); + +unsigned int snd_soc_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int val) +{ + dev_dbg(codec->dev, "write %x = %x\n", reg, val); + trace_snd_soc_reg_write(codec, reg, val); + return codec->write(codec, reg, val); +} +EXPORT_SYMBOL_GPL(snd_soc_write); + /** * snd_soc_update_bits - update codec register bits * @codec: audio codec @@ -2019,14 +2263,22 @@ int snd_soc_add_controls(struct snd_soc_codec *codec, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = codec->card->snd_card; + char prefixed_name[44], *name; int err, i; for (i = 0; i < num_controls; i++) { const struct snd_kcontrol_new *control = &controls[i]; - err = snd_ctl_add(card, snd_soc_cnew(control, codec, NULL)); + if (codec->name_prefix) { + snprintf(prefixed_name, sizeof(prefixed_name), "%s %s", + codec->name_prefix, control->name); + name = prefixed_name; + } else { + name = control->name; + } + err = snd_ctl_add(card, snd_soc_cnew(control, codec, name)); if (err < 0) { dev_err(codec->dev, "%s: Failed to add %s: %d\n", - codec->name, control->name, err); + codec->name, name, err); return err; } } @@ -2863,10 +3115,12 @@ static int snd_soc_register_card(struct snd_soc_card *card) if (!card->name || !card->dev) return -EINVAL; - card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) * card->num_links, - GFP_KERNEL); + card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) * + (card->num_links + card->num_aux_devs), + GFP_KERNEL); if (card->rtd == NULL) return -ENOMEM; + card->rtd_aux = &card->rtd[card->num_links]; for (i = 0; i < card->num_links; i++) card->rtd[i].dai_link = &card->dai_link[i]; @@ -2908,7 +3162,7 @@ static int snd_soc_unregister_card(struct snd_soc_card *card) * Simplify DAI link configuration by removing ".-1" from device names * and sanitizing names. */ -static inline char *fmt_single_name(struct device *dev, int *id) +static char *fmt_single_name(struct device *dev, int *id) { char *found, name[NAME_SIZE]; int id1, id2; @@ -2916,7 +3170,7 @@ static inline char *fmt_single_name(struct device *dev, int *id) if (dev_name(dev) == NULL) return NULL; - strncpy(name, dev_name(dev), NAME_SIZE); + strlcpy(name, dev_name(dev), NAME_SIZE); /* are we a "%s.%d" name (platform and SPI components) */ found = strstr(name, dev->driver->name); @@ -2939,7 +3193,7 @@ static inline char *fmt_single_name(struct device *dev, int *id) /* sanitize component name for DAI link creation */ snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name); - strncpy(name, tmp, NAME_SIZE); + strlcpy(name, tmp, NAME_SIZE); } else *id = 0; } @@ -3204,9 +3458,11 @@ static void fixup_codec_formats(struct snd_soc_pcm_stream *stream) * @codec: codec to register */ int snd_soc_register_codec(struct device *dev, - struct snd_soc_codec_driver *codec_drv, - struct snd_soc_dai_driver *dai_drv, int num_dai) + const struct snd_soc_codec_driver *codec_drv, + struct snd_soc_dai_driver *dai_drv, + int num_dai) { + size_t reg_size; struct snd_soc_codec *codec; int ret, i; @@ -3223,30 +3479,37 @@ int snd_soc_register_codec(struct device *dev, return -ENOMEM; } - /* allocate CODEC register cache */ - if (codec_drv->reg_cache_size && codec_drv->reg_word_size) { - - if (codec_drv->reg_cache_default) - codec->reg_cache = kmemdup(codec_drv->reg_cache_default, - codec_drv->reg_cache_size * codec_drv->reg_word_size, GFP_KERNEL); - else - codec->reg_cache = kzalloc(codec_drv->reg_cache_size * - codec_drv->reg_word_size, GFP_KERNEL); - - if (codec->reg_cache == NULL) { - kfree(codec->name); - kfree(codec); - return -ENOMEM; - } - } + if (codec_drv->compress_type) + codec->compress_type = codec_drv->compress_type; + else + codec->compress_type = SND_SOC_FLAT_COMPRESSION; + codec->write = codec_drv->write; + codec->read = codec_drv->read; + codec->dapm.bias_level = SND_SOC_BIAS_OFF; + codec->dapm.dev = dev; + codec->dapm.codec = codec; codec->dev = dev; codec->driver = codec_drv; - codec->bias_level = SND_SOC_BIAS_OFF; codec->num_dai = num_dai; mutex_init(&codec->mutex); - INIT_LIST_HEAD(&codec->dapm_widgets); - INIT_LIST_HEAD(&codec->dapm_paths); + + /* allocate CODEC register cache */ + if (codec_drv->reg_cache_size && codec_drv->reg_word_size) { + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + /* it is necessary to make a copy of the default register cache + * because in the case of using a compression type that requires + * the default register cache to be marked as __devinitconst the + * kernel might have freed the array by the time we initialize + * the cache. + */ + codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default, + reg_size, GFP_KERNEL); + if (!codec->reg_def_copy) { + ret = -ENOMEM; + goto fail; + } + } for (i = 0; i < num_dai; i++) { fixup_codec_formats(&dai_drv[i].playback); @@ -3257,7 +3520,7 @@ int snd_soc_register_codec(struct device *dev, if (num_dai) { ret = snd_soc_register_dais(dev, dai_drv, num_dai); if (ret < 0) - goto error; + goto fail; } mutex_lock(&client_mutex); @@ -3268,9 +3531,9 @@ int snd_soc_register_codec(struct device *dev, pr_debug("Registered codec '%s'\n", codec->name); return 0; -error: - if (codec->reg_cache) - kfree(codec->reg_cache); +fail: + kfree(codec->reg_def_copy); + codec->reg_def_copy = NULL; kfree(codec->name); kfree(codec); return ret; @@ -3304,8 +3567,8 @@ found: pr_debug("Unregistered codec '%s'\n", codec->name); - if (codec->reg_cache) - kfree(codec->reg_cache); + snd_soc_cache_exit(codec); + kfree(codec->reg_def_copy); kfree(codec->name); kfree(codec); } diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index c721502833bc..499730ab5638 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -42,9 +42,11 @@ #include #include #include -#include +#include #include +#include + /* dapm power sequences - make this per codec in the future */ static int dapm_up_seq[] = { [snd_soc_dapm_pre] = 0, @@ -54,12 +56,14 @@ static int dapm_up_seq[] = { [snd_soc_dapm_aif_out] = 3, [snd_soc_dapm_mic] = 4, [snd_soc_dapm_mux] = 5, + [snd_soc_dapm_virt_mux] = 5, [snd_soc_dapm_value_mux] = 5, [snd_soc_dapm_dac] = 6, [snd_soc_dapm_mixer] = 7, [snd_soc_dapm_mixer_named_ctl] = 7, [snd_soc_dapm_pga] = 8, [snd_soc_dapm_adc] = 9, + [snd_soc_dapm_out_drv] = 10, [snd_soc_dapm_hp] = 10, [snd_soc_dapm_spk] = 10, [snd_soc_dapm_post] = 11, @@ -70,6 +74,7 @@ static int dapm_down_seq[] = { [snd_soc_dapm_adc] = 1, [snd_soc_dapm_hp] = 2, [snd_soc_dapm_spk] = 2, + [snd_soc_dapm_out_drv] = 2, [snd_soc_dapm_pga] = 4, [snd_soc_dapm_mixer_named_ctl] = 5, [snd_soc_dapm_mixer] = 5, @@ -77,6 +82,7 @@ static int dapm_down_seq[] = { [snd_soc_dapm_mic] = 7, [snd_soc_dapm_micbias] = 8, [snd_soc_dapm_mux] = 9, + [snd_soc_dapm_virt_mux] = 9, [snd_soc_dapm_value_mux] = 9, [snd_soc_dapm_aif_in] = 10, [snd_soc_dapm_aif_out] = 10, @@ -90,17 +96,24 @@ static void pop_wait(u32 pop_time) schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time)); } -static void pop_dbg(u32 pop_time, const char *fmt, ...) +static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...) { va_list args; + char *buf; - va_start(args, fmt); + if (!pop_time) + return; - if (pop_time) { - vprintk(fmt, args); - } + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (buf == NULL) + return; + va_start(args, fmt); + vsnprintf(buf, PAGE_SIZE, fmt, args); + dev_info(dev, "%s", buf); va_end(args); + + kfree(buf); } /* create a new dapm widget */ @@ -120,36 +133,45 @@ static inline struct snd_soc_dapm_widget *dapm_cnew_widget( * Returns 0 for success else error. */ static int snd_soc_dapm_set_bias_level(struct snd_soc_card *card, - struct snd_soc_codec *codec, enum snd_soc_bias_level level) + struct snd_soc_dapm_context *dapm, + enum snd_soc_bias_level level) { int ret = 0; switch (level) { case SND_SOC_BIAS_ON: - dev_dbg(codec->dev, "Setting full bias\n"); + dev_dbg(dapm->dev, "Setting full bias\n"); break; case SND_SOC_BIAS_PREPARE: - dev_dbg(codec->dev, "Setting bias prepare\n"); + dev_dbg(dapm->dev, "Setting bias prepare\n"); break; case SND_SOC_BIAS_STANDBY: - dev_dbg(codec->dev, "Setting standby bias\n"); + dev_dbg(dapm->dev, "Setting standby bias\n"); break; case SND_SOC_BIAS_OFF: - dev_dbg(codec->dev, "Setting bias off\n"); + dev_dbg(dapm->dev, "Setting bias off\n"); break; default: - dev_err(codec->dev, "Setting invalid bias %d\n", level); + dev_err(dapm->dev, "Setting invalid bias %d\n", level); return -EINVAL; } + trace_snd_soc_bias_level_start(card, level); + if (card && card->set_bias_level) ret = card->set_bias_level(card, level); if (ret == 0) { - if (codec->driver->set_bias_level) - ret = codec->driver->set_bias_level(codec, level); + if (dapm->codec && dapm->codec->driver->set_bias_level) + ret = dapm->codec->driver->set_bias_level(dapm->codec, level); else - codec->bias_level = level; + dapm->bias_level = level; } + if (ret == 0) { + if (card && card->set_bias_level_post) + ret = card->set_bias_level_post(card, level); + } + + trace_snd_soc_bias_level_done(card, level); return ret; } @@ -196,6 +218,20 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w, } } break; + case snd_soc_dapm_virt_mux: { + struct soc_enum *e = (struct soc_enum *)w->kcontrols[i].private_value; + + p->connect = 0; + /* since a virtual mux has no backing registers to + * decide which path to connect, it will try to match + * with the first enumeration. This is to ensure + * that the default mux choice (the first) will be + * correctly powered up during initialization. + */ + if (!strcmp(p->name, e->texts[0])) + p->connect = 1; + } + break; case snd_soc_dapm_value_mux: { struct soc_enum *e = (struct soc_enum *) w->kcontrols[i].private_value; @@ -217,6 +253,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w, break; /* does not effect routing - always connected */ case snd_soc_dapm_pga: + case snd_soc_dapm_out_drv: case snd_soc_dapm_output: case snd_soc_dapm_adc: case snd_soc_dapm_input: @@ -241,7 +278,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w, } /* connect mux widget to its interconnecting audio paths */ -static int dapm_connect_mux(struct snd_soc_codec *codec, +static int dapm_connect_mux(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest, struct snd_soc_dapm_path *path, const char *control_name, const struct snd_kcontrol_new *kcontrol) @@ -251,7 +288,7 @@ static int dapm_connect_mux(struct snd_soc_codec *codec, for (i = 0; i < e->max; i++) { if (!(strcmp(control_name, e->texts[i]))) { - list_add(&path->list, &codec->dapm_paths); + list_add(&path->list, &dapm->card->paths); list_add(&path->list_sink, &dest->sources); list_add(&path->list_source, &src->sinks); path->name = (char*)e->texts[i]; @@ -264,7 +301,7 @@ static int dapm_connect_mux(struct snd_soc_codec *codec, } /* connect mixer widget to its interconnecting audio paths */ -static int dapm_connect_mixer(struct snd_soc_codec *codec, +static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest, struct snd_soc_dapm_path *path, const char *control_name) { @@ -273,7 +310,7 @@ static int dapm_connect_mixer(struct snd_soc_codec *codec, /* search for mixer kcontrol */ for (i = 0; i < dest->num_kcontrols; i++) { if (!strcmp(control_name, dest->kcontrols[i].name)) { - list_add(&path->list, &codec->dapm_paths); + list_add(&path->list, &dapm->card->paths); list_add(&path->list_sink, &dest->sources); list_add(&path->list_source, &src->sinks); path->name = dest->kcontrols[i].name; @@ -290,6 +327,8 @@ static int dapm_update_bits(struct snd_soc_dapm_widget *widget) int change, power; unsigned int old, new; struct snd_soc_codec *codec = widget->codec; + struct snd_soc_dapm_context *dapm = widget->dapm; + struct snd_soc_card *card = dapm->card; /* check for valid widgets */ if (widget->reg < 0 || widget->id == snd_soc_dapm_input || @@ -309,24 +348,26 @@ static int dapm_update_bits(struct snd_soc_dapm_widget *widget) change = old != new; if (change) { - pop_dbg(codec->pop_time, "pop test %s : %s in %d ms\n", + pop_dbg(dapm->dev, card->pop_time, + "pop test %s : %s in %d ms\n", widget->name, widget->power ? "on" : "off", - codec->pop_time); - pop_wait(codec->pop_time); + card->pop_time); + pop_wait(card->pop_time); snd_soc_write(codec, widget->reg, new); } - pr_debug("reg %x old %x new %x change %d\n", widget->reg, - old, new, change); + dev_dbg(dapm->dev, "reg %x old %x new %x change %d\n", widget->reg, + old, new, change); return change; } /* create new dapm mixer control */ -static int dapm_new_mixer(struct snd_soc_codec *codec, +static int dapm_new_mixer(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *w) { int i, ret = 0; size_t name_len; struct snd_soc_dapm_path *path; + struct snd_card *card = dapm->codec->card->snd_card; /* add kcontrol */ for (i = 0; i < w->num_kcontrols; i++) { @@ -368,11 +409,11 @@ static int dapm_new_mixer(struct snd_soc_codec *codec, path->kcontrol = snd_soc_cnew(&w->kcontrols[i], w, path->long_name); - ret = snd_ctl_add(codec->card->snd_card, path->kcontrol); + ret = snd_ctl_add(card, path->kcontrol); if (ret < 0) { - printk(KERN_ERR "asoc: failed to add dapm kcontrol %s: %d\n", - path->long_name, - ret); + dev_err(dapm->dev, + "asoc: failed to add dapm kcontrol %s: %d\n", + path->long_name, ret); kfree(path->long_name); path->long_name = NULL; return ret; @@ -383,20 +424,22 @@ static int dapm_new_mixer(struct snd_soc_codec *codec, } /* create new dapm mux control */ -static int dapm_new_mux(struct snd_soc_codec *codec, +static int dapm_new_mux(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_path *path = NULL; struct snd_kcontrol *kcontrol; + struct snd_card *card = dapm->codec->card->snd_card; int ret = 0; if (!w->num_kcontrols) { - printk(KERN_ERR "asoc: mux %s has no controls\n", w->name); + dev_err(dapm->dev, "asoc: mux %s has no controls\n", w->name); return -EINVAL; } kcontrol = snd_soc_cnew(&w->kcontrols[0], w, w->name); - ret = snd_ctl_add(codec->card->snd_card, kcontrol); + ret = snd_ctl_add(card, kcontrol); + if (ret < 0) goto err; @@ -406,26 +449,27 @@ static int dapm_new_mux(struct snd_soc_codec *codec, return ret; err: - printk(KERN_ERR "asoc: failed to add kcontrol %s\n", w->name); + dev_err(dapm->dev, "asoc: failed to add kcontrol %s\n", w->name); return ret; } /* create new dapm volume control */ -static int dapm_new_pga(struct snd_soc_codec *codec, +static int dapm_new_pga(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *w) { if (w->num_kcontrols) - pr_err("asoc: PGA controls not supported: '%s'\n", w->name); + dev_err(w->dapm->dev, + "asoc: PGA controls not supported: '%s'\n", w->name); return 0; } /* reset 'walked' bit for each dapm path */ -static inline void dapm_clear_walk(struct snd_soc_codec *codec) +static inline void dapm_clear_walk(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_path *p; - list_for_each_entry(p, &codec->dapm_paths, list) + list_for_each_entry(p, &dapm->card->paths, list) p->walked = 0; } @@ -435,13 +479,14 @@ static inline void dapm_clear_walk(struct snd_soc_codec *codec) */ static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget) { - int level = snd_power_get_state(widget->codec->card->snd_card); + int level = snd_power_get_state(widget->dapm->codec->card->snd_card); switch (level) { case SNDRV_CTL_POWER_D3hot: case SNDRV_CTL_POWER_D3cold: if (widget->ignore_suspend) - pr_debug("%s ignoring suspend\n", widget->name); + dev_dbg(widget->dapm->dev, "%s ignoring suspend\n", + widget->name); return widget->ignore_suspend; default: return 1; @@ -572,7 +617,7 @@ static int dapm_generic_apply_power(struct snd_soc_dapm_widget *w) /* call any power change event handlers */ if (w->event) - pr_debug("power %s event for %s flags %x\n", + dev_dbg(w->dapm->dev, "power %s event for %s flags %x\n", w->power ? "on" : "off", w->name, w->event_flags); @@ -621,9 +666,9 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w) int in, out; in = is_connected_input_ep(w); - dapm_clear_walk(w->codec); + dapm_clear_walk(w->dapm); out = is_connected_output_ep(w); - dapm_clear_walk(w->codec); + dapm_clear_walk(w->dapm); return out != 0 && in != 0; } @@ -634,7 +679,7 @@ static int dapm_adc_check_power(struct snd_soc_dapm_widget *w) if (w->active) { in = is_connected_input_ep(w); - dapm_clear_walk(w->codec); + dapm_clear_walk(w->dapm); return in != 0; } else { return dapm_generic_check_power(w); @@ -648,7 +693,7 @@ static int dapm_dac_check_power(struct snd_soc_dapm_widget *w) if (w->active) { out = is_connected_output_ep(w); - dapm_clear_walk(w->codec); + dapm_clear_walk(w->dapm); return out != 0; } else { return dapm_generic_check_power(w); @@ -674,7 +719,7 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) } } - dapm_clear_walk(w->codec); + dapm_clear_walk(w->dapm); return power; } @@ -687,8 +732,8 @@ static int dapm_seq_compare(struct snd_soc_dapm_widget *a, return sort[a->id] - sort[b->id]; if (a->reg != b->reg) return a->reg - b->reg; - if (a->codec != b->codec) - return (unsigned long)a->codec - (unsigned long)b->codec; + if (a->dapm != b->dapm) + return (unsigned long)a->dapm - (unsigned long)b->dapm; return 0; } @@ -709,12 +754,57 @@ static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget, list_add_tail(&new_widget->power_list, list); } +static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm, + struct snd_soc_dapm_widget *w, int event) +{ + struct snd_soc_card *card = dapm->card; + const char *ev_name; + int power, ret; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + ev_name = "PRE_PMU"; + power = 1; + break; + case SND_SOC_DAPM_POST_PMU: + ev_name = "POST_PMU"; + power = 1; + break; + case SND_SOC_DAPM_PRE_PMD: + ev_name = "PRE_PMD"; + power = 0; + break; + case SND_SOC_DAPM_POST_PMD: + ev_name = "POST_PMD"; + power = 0; + break; + default: + BUG(); + return; + } + + if (w->power != power) + return; + + if (w->event && (w->event_flags & event)) { + pop_dbg(dapm->dev, card->pop_time, "pop test : %s %s\n", + w->name, ev_name); + trace_snd_soc_dapm_widget_event_start(w, event); + ret = w->event(w, NULL, event); + trace_snd_soc_dapm_widget_event_done(w, event); + if (ret < 0) + pr_err("%s: %s event failed: %d\n", + ev_name, w->name, ret); + } +} + /* Apply the coalesced changes from a DAPM sequence */ -static void dapm_seq_run_coalesced(struct snd_soc_codec *codec, +static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm, struct list_head *pending) { + struct snd_soc_card *card = dapm->card; struct snd_soc_dapm_widget *w; - int reg, power, ret; + int reg, power; unsigned int value = 0; unsigned int mask = 0; unsigned int cur_mask; @@ -735,64 +825,26 @@ static void dapm_seq_run_coalesced(struct snd_soc_codec *codec, if (power) value |= cur_mask; - pop_dbg(codec->pop_time, + pop_dbg(dapm->dev, card->pop_time, "pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n", w->name, reg, value, mask); - /* power up pre event */ - if (w->power && w->event && - (w->event_flags & SND_SOC_DAPM_PRE_PMU)) { - pop_dbg(codec->pop_time, "pop test : %s PRE_PMU\n", - w->name); - ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU); - if (ret < 0) - pr_err("%s: pre event failed: %d\n", - w->name, ret); - } - - /* power down pre event */ - if (!w->power && w->event && - (w->event_flags & SND_SOC_DAPM_PRE_PMD)) { - pop_dbg(codec->pop_time, "pop test : %s PRE_PMD\n", - w->name); - ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD); - if (ret < 0) - pr_err("%s: pre event failed: %d\n", - w->name, ret); - } + /* Check for events */ + dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMU); + dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMD); } if (reg >= 0) { - pop_dbg(codec->pop_time, + pop_dbg(dapm->dev, card->pop_time, "pop test : Applying 0x%x/0x%x to %x in %dms\n", - value, mask, reg, codec->pop_time); - pop_wait(codec->pop_time); - snd_soc_update_bits(codec, reg, mask, value); + value, mask, reg, card->pop_time); + pop_wait(card->pop_time); + snd_soc_update_bits(dapm->codec, reg, mask, value); } list_for_each_entry(w, pending, power_list) { - /* power up post event */ - if (w->power && w->event && - (w->event_flags & SND_SOC_DAPM_POST_PMU)) { - pop_dbg(codec->pop_time, "pop test : %s POST_PMU\n", - w->name); - ret = w->event(w, - NULL, SND_SOC_DAPM_POST_PMU); - if (ret < 0) - pr_err("%s: post event failed: %d\n", - w->name, ret); - } - - /* power down post event */ - if (!w->power && w->event && - (w->event_flags & SND_SOC_DAPM_POST_PMD)) { - pop_dbg(codec->pop_time, "pop test : %s POST_PMD\n", - w->name); - ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD); - if (ret < 0) - pr_err("%s: post event failed: %d\n", - w->name, ret); - } + dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMU); + dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMD); } } @@ -804,26 +856,29 @@ static void dapm_seq_run_coalesced(struct snd_soc_codec *codec, * Currently anything that requires more than a single write is not * handled. */ -static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list, - int event, int sort[]) +static void dapm_seq_run(struct snd_soc_dapm_context *dapm, + struct list_head *list, int event, int sort[]) { struct snd_soc_dapm_widget *w, *n; LIST_HEAD(pending); int cur_sort = -1; int cur_reg = SND_SOC_NOPM; + struct snd_soc_dapm_context *cur_dapm = NULL; int ret; list_for_each_entry_safe(w, n, list, power_list) { ret = 0; /* Do we need to apply any queued changes? */ - if (sort[w->id] != cur_sort || w->reg != cur_reg) { + if (sort[w->id] != cur_sort || w->reg != cur_reg || + w->dapm != cur_dapm) { if (!list_empty(&pending)) - dapm_seq_run_coalesced(codec, &pending); + dapm_seq_run_coalesced(cur_dapm, &pending); INIT_LIST_HEAD(&pending); cur_sort = -1; cur_reg = SND_SOC_NOPM; + cur_dapm = NULL; } switch (w->id) { @@ -867,19 +922,55 @@ static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list, /* Queue it up for application */ cur_sort = sort[w->id]; cur_reg = w->reg; + cur_dapm = w->dapm; list_move(&w->power_list, &pending); break; } if (ret < 0) - pr_err("Failed to apply widget power: %d\n", - ret); + dev_err(w->dapm->dev, + "Failed to apply widget power: %d\n", ret); } if (!list_empty(&pending)) - dapm_seq_run_coalesced(codec, &pending); + dapm_seq_run_coalesced(dapm, &pending); +} + +static void dapm_widget_update(struct snd_soc_dapm_context *dapm) +{ + struct snd_soc_dapm_update *update = dapm->update; + struct snd_soc_dapm_widget *w; + int ret; + + if (!update) + return; + + w = update->widget; + + if (w->event && + (w->event_flags & SND_SOC_DAPM_PRE_REG)) { + ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG); + if (ret != 0) + pr_err("%s DAPM pre-event failed: %d\n", + w->name, ret); + } + + ret = snd_soc_update_bits(w->codec, update->reg, update->mask, + update->val); + if (ret < 0) + pr_err("%s DAPM update failed: %d\n", w->name, ret); + + if (w->event && + (w->event_flags & SND_SOC_DAPM_POST_REG)) { + ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG); + if (ret != 0) + pr_err("%s DAPM post-event failed: %d\n", + w->name, ret); + } } + + /* * Scan each dapm widget for complete audio path. * A complete path is a route that has valid endpoints i.e.:- @@ -889,20 +980,26 @@ static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list, * o Input pin to Output pin (bypass, sidetone) * o DAC to ADC (loopback). */ -static int dapm_power_widgets(struct snd_soc_codec *codec, int event) +static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event) { - struct snd_soc_card *card = codec->card; + struct snd_soc_card *card = dapm->codec->card; struct snd_soc_dapm_widget *w; + struct snd_soc_dapm_context *d; LIST_HEAD(up_list); LIST_HEAD(down_list); int ret = 0; int power; - int sys_power = 0; + + trace_snd_soc_dapm_start(card); + + list_for_each_entry(d, &card->dapm_list, list) + if (d->n_widgets) + d->dev_power = 0; /* Check which widgets we need to power and store them in * lists indicating if they should be powered up or down. */ - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &card->widgets, list) { switch (w->id) { case snd_soc_dapm_pre: dapm_seq_insert(w, &down_list, dapm_down_seq); @@ -920,11 +1017,13 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event) else power = 1; if (power) - sys_power = 1; + w->dapm->dev_power = 1; if (w->power == power) continue; + trace_snd_soc_dapm_widget_power(w, power); + if (power) dapm_seq_insert(w, &up_list, dapm_up_seq); else @@ -938,26 +1037,26 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event) /* If there are no DAPM widgets then try to figure out power from the * event type. */ - if (list_empty(&codec->dapm_widgets)) { + if (!dapm->n_widgets) { switch (event) { case SND_SOC_DAPM_STREAM_START: case SND_SOC_DAPM_STREAM_RESUME: - sys_power = 1; + dapm->dev_power = 1; break; case SND_SOC_DAPM_STREAM_STOP: - sys_power = !!codec->active; + dapm->dev_power = !!dapm->codec->active; break; case SND_SOC_DAPM_STREAM_SUSPEND: - sys_power = 0; + dapm->dev_power = 0; break; case SND_SOC_DAPM_STREAM_NOP: - switch (codec->bias_level) { + switch (dapm->bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: - sys_power = 0; + dapm->dev_power = 0; break; default: - sys_power = 1; + dapm->dev_power = 1; break; } break; @@ -966,52 +1065,71 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event) } } - if (sys_power && codec->bias_level == SND_SOC_BIAS_OFF) { - ret = snd_soc_dapm_set_bias_level(card, codec, - SND_SOC_BIAS_STANDBY); - if (ret != 0) - pr_err("Failed to turn on bias: %d\n", ret); - } + list_for_each_entry(d, &dapm->card->dapm_list, list) { + if (d->dev_power && d->bias_level == SND_SOC_BIAS_OFF) { + ret = snd_soc_dapm_set_bias_level(card, d, + SND_SOC_BIAS_STANDBY); + if (ret != 0) + dev_err(d->dev, + "Failed to turn on bias: %d\n", ret); + } - /* If we're changing to all on or all off then prepare */ - if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) || - (!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) { - ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_PREPARE); - if (ret != 0) - pr_err("Failed to prepare bias: %d\n", ret); + /* If we're changing to all on or all off then prepare */ + if ((d->dev_power && d->bias_level == SND_SOC_BIAS_STANDBY) || + (!d->dev_power && d->bias_level == SND_SOC_BIAS_ON)) { + ret = snd_soc_dapm_set_bias_level(card, d, + SND_SOC_BIAS_PREPARE); + if (ret != 0) + dev_err(d->dev, + "Failed to prepare bias: %d\n", ret); + } } /* Power down widgets first; try to avoid amplifying pops. */ - dapm_seq_run(codec, &down_list, event, dapm_down_seq); + dapm_seq_run(dapm, &down_list, event, dapm_down_seq); + + dapm_widget_update(dapm); /* Now power up. */ - dapm_seq_run(codec, &up_list, event, dapm_up_seq); + dapm_seq_run(dapm, &up_list, event, dapm_up_seq); + + list_for_each_entry(d, &dapm->card->dapm_list, list) { + /* If we just powered the last thing off drop to standby bias */ + if (d->bias_level == SND_SOC_BIAS_PREPARE && !d->dev_power) { + ret = snd_soc_dapm_set_bias_level(card, d, + SND_SOC_BIAS_STANDBY); + if (ret != 0) + dev_err(d->dev, + "Failed to apply standby bias: %d\n", + ret); + } - /* If we just powered the last thing off drop to standby bias */ - if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) { - ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_STANDBY); - if (ret != 0) - pr_err("Failed to apply standby bias: %d\n", ret); - } + /* If we're in standby and can support bias off then do that */ + if (d->bias_level == SND_SOC_BIAS_STANDBY && + d->idle_bias_off) { + ret = snd_soc_dapm_set_bias_level(card, d, + SND_SOC_BIAS_OFF); + if (ret != 0) + dev_err(d->dev, + "Failed to turn off bias: %d\n", ret); + } - /* If we're in standby and can support bias off then do that */ - if (codec->bias_level == SND_SOC_BIAS_STANDBY && - codec->idle_bias_off) { - ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF); - if (ret != 0) - pr_err("Failed to turn off bias: %d\n", ret); + /* If we just powered up then move to active bias */ + if (d->bias_level == SND_SOC_BIAS_PREPARE && d->dev_power) { + ret = snd_soc_dapm_set_bias_level(card, d, + SND_SOC_BIAS_ON); + if (ret != 0) + dev_err(d->dev, + "Failed to apply active bias: %d\n", + ret); + } } - /* If we just powered up then move to active bias */ - if (codec->bias_level == SND_SOC_BIAS_PREPARE && sys_power) { - ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_ON); - if (ret != 0) - pr_err("Failed to apply active bias: %d\n", ret); - } + pop_dbg(dapm->dev, card->pop_time, + "DAPM sequencing finished, waiting %dms\n", card->pop_time); + pop_wait(card->pop_time); - pop_dbg(codec->pop_time, "DAPM sequencing finished, waiting %dms\n", - codec->pop_time); - pop_wait(codec->pop_time); + trace_snd_soc_dapm_done(card); return 0; } @@ -1038,9 +1156,9 @@ static ssize_t dapm_widget_power_read_file(struct file *file, return -ENOMEM; in = is_connected_input_ep(w); - dapm_clear_walk(w->codec); + dapm_clear_walk(w->dapm); out = is_connected_output_ep(w); - dapm_clear_walk(w->codec); + dapm_clear_walk(w->dapm); ret = snprintf(buf, PAGE_SIZE, "%s: %s in %d out %d", w->name, w->power ? "On" : "Off", in, out); @@ -1090,29 +1208,29 @@ static const struct file_operations dapm_widget_power_fops = { .llseek = default_llseek, }; -void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec) +void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w; struct dentry *d; - if (!codec->debugfs_dapm) + if (!dapm->debugfs_dapm) return; - list_for_each_entry(w, &codec->dapm_widgets, list) { - if (!w->name) + list_for_each_entry(w, &dapm->card->widgets, list) { + if (!w->name || w->dapm != dapm) continue; d = debugfs_create_file(w->name, 0444, - codec->debugfs_dapm, w, + dapm->debugfs_dapm, w, &dapm_widget_power_fops); if (!d) - printk(KERN_WARNING - "ASoC: Failed to create %s debugfs file\n", - w->name); + dev_warn(w->dapm->dev, + "ASoC: Failed to create %s debugfs file\n", + w->name); } } #else -void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec) +void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm) { } #endif @@ -1126,6 +1244,7 @@ static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget, int found = 0; if (widget->id != snd_soc_dapm_mux && + widget->id != snd_soc_dapm_virt_mux && widget->id != snd_soc_dapm_value_mux) return -ENODEV; @@ -1133,7 +1252,7 @@ static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget, return 0; /* find dapm widget path assoc with kcontrol */ - list_for_each_entry(path, &widget->codec->dapm_paths, list) { + list_for_each_entry(path, &widget->dapm->card->paths, list) { if (path->kcontrol != kcontrol) continue; @@ -1149,7 +1268,7 @@ static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget, } if (found) - dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP); + dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP); return 0; } @@ -1167,7 +1286,7 @@ static int dapm_mixer_update_power(struct snd_soc_dapm_widget *widget, return -ENODEV; /* find dapm widget path assoc with kcontrol */ - list_for_each_entry(path, &widget->codec->dapm_paths, list) { + list_for_each_entry(path, &widget->dapm->card->paths, list) { if (path->kcontrol != kcontrol) continue; @@ -1178,7 +1297,7 @@ static int dapm_mixer_update_power(struct snd_soc_dapm_widget *widget, } if (found) - dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP); + dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP); return 0; } @@ -1194,7 +1313,9 @@ static ssize_t dapm_widget_show(struct device *dev, int count = 0; char *state = "not set"; - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &codec->card->widgets, list) { + if (w->dapm != &codec->dapm) + continue; /* only display widgets that burnm power */ switch (w->id) { @@ -1206,6 +1327,7 @@ static ssize_t dapm_widget_show(struct device *dev, case snd_soc_dapm_dac: case snd_soc_dapm_adc: case snd_soc_dapm_pga: + case snd_soc_dapm_out_drv: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: case snd_soc_dapm_supply: @@ -1218,7 +1340,7 @@ static ssize_t dapm_widget_show(struct device *dev, } } - switch (codec->bias_level) { + switch (codec->dapm.bias_level) { case SND_SOC_BIAS_ON: state = "On"; break; @@ -1250,31 +1372,50 @@ static void snd_soc_dapm_sys_remove(struct device *dev) } /* free all dapm widgets and resources */ -static void dapm_free_widgets(struct snd_soc_codec *codec) +static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w, *next_w; struct snd_soc_dapm_path *p, *next_p; - list_for_each_entry_safe(w, next_w, &codec->dapm_widgets, list) { + list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) { + if (w->dapm != dapm) + continue; list_del(&w->list); + /* + * remove source and sink paths associated to this widget. + * While removing the path, remove reference to it from both + * source and sink widgets so that path is removed only once. + */ + list_for_each_entry_safe(p, next_p, &w->sources, list_sink) { + list_del(&p->list_sink); + list_del(&p->list_source); + list_del(&p->list); + kfree(p->long_name); + kfree(p); + } + list_for_each_entry_safe(p, next_p, &w->sinks, list_source) { + list_del(&p->list_sink); + list_del(&p->list_source); + list_del(&p->list); + kfree(p->long_name); + kfree(p); + } + kfree(w->name); kfree(w); } - - list_for_each_entry_safe(p, next_p, &codec->dapm_paths, list) { - list_del(&p->list); - kfree(p->long_name); - kfree(p); - } } -static int snd_soc_dapm_set_pin(struct snd_soc_codec *codec, +static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, const char *pin, int status) { struct snd_soc_dapm_widget *w; - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &dapm->card->widgets, list) { + if (w->dapm != dapm) + continue; if (!strcmp(w->name, pin)) { - pr_debug("dapm: %s: pin %s\n", codec->name, pin); + dev_dbg(w->dapm->dev, "dapm: pin %s = %d\n", + pin, status); w->connected = status; /* Allow disabling of forced pins */ if (status == 0) @@ -1283,46 +1424,72 @@ static int snd_soc_dapm_set_pin(struct snd_soc_codec *codec, } } - pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin); + dev_err(dapm->dev, "dapm: unknown pin %s\n", pin); return -EINVAL; } /** * snd_soc_dapm_sync - scan and power dapm paths - * @codec: audio codec + * @dapm: DAPM context * * Walks all dapm audio paths and powers widgets according to their * stream or path usage. * * Returns 0 for success. */ -int snd_soc_dapm_sync(struct snd_soc_codec *codec) +int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm) { - return dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP); + return dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP); } EXPORT_SYMBOL_GPL(snd_soc_dapm_sync); -static int snd_soc_dapm_add_route(struct snd_soc_codec *codec, +static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route) { struct snd_soc_dapm_path *path; struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w; - const char *sink = route->sink; + struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL; + const char *sink; const char *control = route->control; - const char *source = route->source; + const char *source; + char prefixed_sink[80]; + char prefixed_source[80]; int ret = 0; - /* find src and dest widgets */ - list_for_each_entry(w, &codec->dapm_widgets, list) { + if (dapm->codec->name_prefix) { + snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s", + dapm->codec->name_prefix, route->sink); + sink = prefixed_sink; + snprintf(prefixed_source, sizeof(prefixed_source), "%s %s", + dapm->codec->name_prefix, route->source); + source = prefixed_source; + } else { + sink = route->sink; + source = route->source; + } + /* + * find src and dest widgets over all widgets but favor a widget from + * current DAPM context + */ + list_for_each_entry(w, &dapm->card->widgets, list) { if (!wsink && !(strcmp(w->name, sink))) { - wsink = w; + wtsink = w; + if (w->dapm == dapm) + wsink = w; continue; } if (!wsource && !(strcmp(w->name, source))) { - wsource = w; + wtsource = w; + if (w->dapm == dapm) + wsource = w; } } + /* use widget from another DAPM context if not found from this */ + if (!wsink) + wsink = wtsink; + if (!wsource) + wsource = wtsource; if (wsource == NULL || wsink == NULL) return -ENODEV; @@ -1356,7 +1523,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec, /* connect static paths */ if (control == NULL) { - list_add(&path->list, &codec->dapm_paths); + list_add(&path->list, &dapm->card->paths); list_add(&path->list_sink, &wsink->sources); list_add(&path->list_source, &wsource->sinks); path->connect = 1; @@ -1368,6 +1535,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec, case snd_soc_dapm_adc: case snd_soc_dapm_dac: case snd_soc_dapm_pga: + case snd_soc_dapm_out_drv: case snd_soc_dapm_input: case snd_soc_dapm_output: case snd_soc_dapm_micbias: @@ -1377,14 +1545,15 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec, case snd_soc_dapm_supply: case snd_soc_dapm_aif_in: case snd_soc_dapm_aif_out: - list_add(&path->list, &codec->dapm_paths); + list_add(&path->list, &dapm->card->paths); list_add(&path->list_sink, &wsink->sources); list_add(&path->list_source, &wsource->sinks); path->connect = 1; return 0; case snd_soc_dapm_mux: + case snd_soc_dapm_virt_mux: case snd_soc_dapm_value_mux: - ret = dapm_connect_mux(codec, wsource, wsink, path, control, + ret = dapm_connect_mux(dapm, wsource, wsink, path, control, &wsink->kcontrols[0]); if (ret != 0) goto err; @@ -1392,7 +1561,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec, case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: - ret = dapm_connect_mixer(codec, wsource, wsink, path, control); + ret = dapm_connect_mixer(dapm, wsource, wsink, path, control); if (ret != 0) goto err; break; @@ -1400,7 +1569,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec, case snd_soc_dapm_mic: case snd_soc_dapm_line: case snd_soc_dapm_spk: - list_add(&path->list, &codec->dapm_paths); + list_add(&path->list, &dapm->card->paths); list_add(&path->list_sink, &wsink->sources); list_add(&path->list_source, &wsource->sinks); path->connect = 0; @@ -1409,15 +1578,15 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec, return 0; err: - printk(KERN_WARNING "asoc: no dapm match for %s --> %s --> %s\n", source, - control, sink); + dev_warn(dapm->dev, "asoc: no dapm match for %s --> %s --> %s\n", + source, control, sink); kfree(path); return ret; } /** * snd_soc_dapm_add_routes - Add routes between DAPM widgets - * @codec: codec + * @dapm: DAPM context * @route: audio routes * @num: number of routes * @@ -1428,17 +1597,16 @@ err: * Returns 0 for success else error. On error all resources can be freed * with a call to snd_soc_card_free(). */ -int snd_soc_dapm_add_routes(struct snd_soc_codec *codec, +int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num) { int i, ret; for (i = 0; i < num; i++) { - ret = snd_soc_dapm_add_route(codec, route); + ret = snd_soc_dapm_add_route(dapm, route); if (ret < 0) { - printk(KERN_ERR "Failed to add route %s->%s\n", - route->source, - route->sink); + dev_err(dapm->dev, "Failed to add route %s->%s\n", + route->source, route->sink); return ret; } route++; @@ -1450,17 +1618,17 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes); /** * snd_soc_dapm_new_widgets - add new dapm widgets - * @codec: audio codec + * @dapm: DAPM context * * Checks the codec for any new dapm widgets and creates them if found. * * Returns 0 for success. */ -int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec) +int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w; - list_for_each_entry(w, &codec->dapm_widgets, list) + list_for_each_entry(w, &dapm->card->widgets, list) { if (w->new) continue; @@ -1470,12 +1638,13 @@ int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec) case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: w->power_check = dapm_generic_check_power; - dapm_new_mixer(codec, w); + dapm_new_mixer(dapm, w); break; case snd_soc_dapm_mux: + case snd_soc_dapm_virt_mux: case snd_soc_dapm_value_mux: w->power_check = dapm_generic_check_power; - dapm_new_mux(codec, w); + dapm_new_mux(dapm, w); break; case snd_soc_dapm_adc: case snd_soc_dapm_aif_out: @@ -1486,8 +1655,9 @@ int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec) w->power_check = dapm_dac_check_power; break; case snd_soc_dapm_pga: + case snd_soc_dapm_out_drv: w->power_check = dapm_generic_check_power; - dapm_new_pga(codec, w); + dapm_new_pga(dapm, w); break; case snd_soc_dapm_input: case snd_soc_dapm_output: @@ -1508,7 +1678,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec) w->new = 1; } - dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP); + dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets); @@ -1569,13 +1739,12 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int shift = mc->shift; - unsigned int rshift = mc->rshift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; - unsigned int val, val2, val_mask; - int connect; - int ret; + unsigned int val, val_mask; + int connect, change; + struct snd_soc_dapm_update update; val = (ucontrol->value.integer.value[0] & mask); @@ -1583,18 +1752,12 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, val = max - val; val_mask = mask << shift; val = val << shift; - if (shift != rshift) { - val2 = (ucontrol->value.integer.value[1] & mask); - if (invert) - val2 = max - val2; - val_mask |= mask << rshift; - val |= val2 << rshift; - } mutex_lock(&widget->codec->mutex); widget->value = val; - if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) { + change = snd_soc_test_bits(widget->codec, reg, val_mask, val); + if (change) { if (val) /* new connection */ connect = invert ? 0:1; @@ -1602,28 +1765,20 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, /* old connection must be powered down */ connect = invert ? 1:0; + update.kcontrol = kcontrol; + update.widget = widget; + update.reg = reg; + update.mask = mask; + update.val = val; + widget->dapm->update = &update; + dapm_mixer_update_power(widget, kcontrol, connect); + + widget->dapm->update = NULL; } - if (widget->event) { - if (widget->event_flags & SND_SOC_DAPM_PRE_REG) { - ret = widget->event(widget, kcontrol, - SND_SOC_DAPM_PRE_REG); - if (ret < 0) { - ret = 1; - goto out; - } - } - ret = snd_soc_update_bits(widget->codec, reg, val_mask, val); - if (widget->event_flags & SND_SOC_DAPM_POST_REG) - ret = widget->event(widget, kcontrol, - SND_SOC_DAPM_POST_REG); - } else - ret = snd_soc_update_bits(widget->codec, reg, val_mask, val); - -out: mutex_unlock(&widget->codec->mutex); - return ret; + return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw); @@ -1671,7 +1826,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, mux, change; unsigned int mask, bitmask; - int ret = 0; + struct snd_soc_dapm_update update; for (bitmask = 1; bitmask < e->max; bitmask <<= 1) ; @@ -1690,24 +1845,20 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, mutex_lock(&widget->codec->mutex); widget->value = val; change = snd_soc_test_bits(widget->codec, e->reg, mask, val); - dapm_mux_update_power(widget, kcontrol, change, mux, e); - if (widget->event_flags & SND_SOC_DAPM_PRE_REG) { - ret = widget->event(widget, - kcontrol, SND_SOC_DAPM_PRE_REG); - if (ret < 0) - goto out; - } + update.kcontrol = kcontrol; + update.widget = widget; + update.reg = e->reg; + update.mask = mask; + update.val = val; + widget->dapm->update = &update; - ret = snd_soc_update_bits(widget->codec, e->reg, mask, val); + dapm_mux_update_power(widget, kcontrol, change, mux, e); - if (widget->event_flags & SND_SOC_DAPM_POST_REG) - ret = widget->event(widget, - kcontrol, SND_SOC_DAPM_POST_REG); + widget->dapm->update = NULL; -out: mutex_unlock(&widget->codec->mutex); - return ret; + return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double); @@ -1819,7 +1970,7 @@ int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, mux, change; unsigned int mask; - int ret = 0; + struct snd_soc_dapm_update update; if (ucontrol->value.enumerated.item[0] > e->max - 1) return -EINVAL; @@ -1836,24 +1987,20 @@ int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol, mutex_lock(&widget->codec->mutex); widget->value = val; change = snd_soc_test_bits(widget->codec, e->reg, mask, val); - dapm_mux_update_power(widget, kcontrol, change, mux, e); - if (widget->event_flags & SND_SOC_DAPM_PRE_REG) { - ret = widget->event(widget, - kcontrol, SND_SOC_DAPM_PRE_REG); - if (ret < 0) - goto out; - } + update.kcontrol = kcontrol; + update.widget = widget; + update.reg = e->reg; + update.mask = mask; + update.val = val; + widget->dapm->update = &update; - ret = snd_soc_update_bits(widget->codec, e->reg, mask, val); + dapm_mux_update_power(widget, kcontrol, change, mux, e); - if (widget->event_flags & SND_SOC_DAPM_POST_REG) - ret = widget->event(widget, - kcontrol, SND_SOC_DAPM_POST_REG); + widget->dapm->update = NULL; -out: mutex_unlock(&widget->codec->mutex); - return ret; + return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_value_enum_double); @@ -1892,7 +2039,7 @@ int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol, mutex_lock(&codec->mutex); ucontrol->value.integer.value[0] = - snd_soc_dapm_get_pin_status(codec, pin); + snd_soc_dapm_get_pin_status(&codec->dapm, pin); mutex_unlock(&codec->mutex); @@ -1915,11 +2062,11 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol, mutex_lock(&codec->mutex); if (ucontrol->value.integer.value[0]) - snd_soc_dapm_enable_pin(codec, pin); + snd_soc_dapm_enable_pin(&codec->dapm, pin); else - snd_soc_dapm_disable_pin(codec, pin); + snd_soc_dapm_disable_pin(&codec->dapm, pin); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(&codec->dapm); mutex_unlock(&codec->mutex); @@ -1929,26 +2076,43 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch); /** * snd_soc_dapm_new_control - create new dapm control - * @codec: audio codec + * @dapm: DAPM context * @widget: widget template * * Creates a new dapm control based upon the template. * * Returns 0 for success else error. */ -int snd_soc_dapm_new_control(struct snd_soc_codec *codec, +int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget) { struct snd_soc_dapm_widget *w; + size_t name_len; if ((w = dapm_cnew_widget(widget)) == NULL) return -ENOMEM; - w->codec = codec; + name_len = strlen(widget->name) + 1; + if (dapm->codec->name_prefix) + name_len += 1 + strlen(dapm->codec->name_prefix); + w->name = kmalloc(name_len, GFP_KERNEL); + if (w->name == NULL) { + kfree(w); + return -ENOMEM; + } + if (dapm->codec->name_prefix) + snprintf(w->name, name_len, "%s %s", + dapm->codec->name_prefix, widget->name); + else + snprintf(w->name, name_len, "%s", widget->name); + + dapm->n_widgets++; + w->dapm = dapm; + w->codec = dapm->codec; INIT_LIST_HEAD(&w->sources); INIT_LIST_HEAD(&w->sinks); INIT_LIST_HEAD(&w->list); - list_add(&w->list, &codec->dapm_widgets); + list_add(&w->list, &dapm->card->widgets); /* machine layer set ups unconnected pins and insertions */ w->connected = 1; @@ -1958,7 +2122,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control); /** * snd_soc_dapm_new_controls - create new dapm controls - * @codec: audio codec + * @dapm: DAPM context * @widget: widget array * @num: number of widgets * @@ -1966,18 +2130,18 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control); * * Returns 0 for success else error. */ -int snd_soc_dapm_new_controls(struct snd_soc_codec *codec, +int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget, int num) { int i, ret; for (i = 0; i < num; i++) { - ret = snd_soc_dapm_new_control(codec, widget); + ret = snd_soc_dapm_new_control(dapm, widget); if (ret < 0) { - printk(KERN_ERR - "ASoC: Failed to create DAPM control %s: %d\n", - widget->name, ret); + dev_err(dapm->dev, + "ASoC: Failed to create DAPM control %s: %d\n", + widget->name, ret); return ret; } widget++; @@ -1986,34 +2150,17 @@ int snd_soc_dapm_new_controls(struct snd_soc_codec *codec, } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls); - -/** - * snd_soc_dapm_stream_event - send a stream event to the dapm core - * @codec: audio codec - * @stream: stream name - * @event: stream event - * - * Sends a stream event to the dapm core. The core then makes any - * necessary widget power changes. - * - * Returns 0 for success else error. - */ -int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, +static void soc_dapm_stream_event(struct snd_soc_dapm_context *dapm, const char *stream, int event) { - struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_widget *w; - if (stream == NULL) - return 0; - - mutex_lock(&codec->mutex); - list_for_each_entry(w, &codec->dapm_widgets, list) + list_for_each_entry(w, &dapm->card->widgets, list) { - if (!w->sname) + if (!w->sname || w->dapm != dapm) continue; - pr_debug("widget %s\n %s stream %s event %d\n", - w->name, w->sname, stream, event); + dev_dbg(w->dapm->dev, "widget %s\n %s stream %s event %d\n", + w->name, w->sname, stream, event); if (strstr(w->sname, stream)) { switch(event) { case SND_SOC_DAPM_STREAM_START: @@ -2031,7 +2178,30 @@ int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, } } - dapm_power_widgets(codec, event); + dapm_power_widgets(dapm, event); +} + +/** + * snd_soc_dapm_stream_event - send a stream event to the dapm core + * @rtd: PCM runtime data + * @stream: stream name + * @event: stream event + * + * Sends a stream event to the dapm core. The core then makes any + * necessary widget power changes. + * + * Returns 0 for success else error. + */ +int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, + const char *stream, int event) +{ + struct snd_soc_codec *codec = rtd->codec; + + if (stream == NULL) + return 0; + + mutex_lock(&codec->mutex); + soc_dapm_stream_event(&codec->dapm, stream, event); mutex_unlock(&codec->mutex); return 0; } @@ -2039,7 +2209,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_stream_event); /** * snd_soc_dapm_enable_pin - enable pin. - * @codec: SoC codec + * @dapm: DAPM context * @pin: pin name * * Enables input/output pin and its parents or children widgets iff there is @@ -2047,15 +2217,15 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_stream_event); * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ -int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin) +int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin) { - return snd_soc_dapm_set_pin(codec, pin, 1); + return snd_soc_dapm_set_pin(dapm, pin, 1); } EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin); /** * snd_soc_dapm_force_enable_pin - force a pin to be enabled - * @codec: SoC codec + * @dapm: DAPM context * @pin: pin name * * Enables input/output pin regardless of any other state. This is @@ -2065,42 +2235,47 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin); * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ -int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec, const char *pin) +int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, + const char *pin) { struct snd_soc_dapm_widget *w; - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &dapm->card->widgets, list) { + if (w->dapm != dapm) + continue; if (!strcmp(w->name, pin)) { - pr_debug("dapm: %s: pin %s\n", codec->name, pin); + dev_dbg(w->dapm->dev, + "dapm: force enable pin %s\n", pin); w->connected = 1; w->force = 1; return 0; } } - pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin); + dev_err(dapm->dev, "dapm: unknown pin %s\n", pin); return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin); /** * snd_soc_dapm_disable_pin - disable pin. - * @codec: SoC codec + * @dapm: DAPM context * @pin: pin name * * Disables input/output pin and its parents or children widgets. * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ -int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin) +int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, + const char *pin) { - return snd_soc_dapm_set_pin(codec, pin, 0); + return snd_soc_dapm_set_pin(dapm, pin, 0); } EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin); /** * snd_soc_dapm_nc_pin - permanently disable pin. - * @codec: SoC codec + * @dapm: DAPM context * @pin: pin name * * Marks the specified pin as being not connected, disabling it along @@ -2112,26 +2287,29 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin); * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ -int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin) +int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin) { - return snd_soc_dapm_set_pin(codec, pin, 0); + return snd_soc_dapm_set_pin(dapm, pin, 0); } EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin); /** * snd_soc_dapm_get_pin_status - get audio pin status - * @codec: audio codec + * @dapm: DAPM context * @pin: audio signal pin endpoint (or start point) * * Get audio pin status - connected or disconnected. * * Returns 1 for connected otherwise 0. */ -int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin) +int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, + const char *pin) { struct snd_soc_dapm_widget *w; - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &dapm->card->widgets, list) { + if (w->dapm != dapm) + continue; if (!strcmp(w->name, pin)) return w->connected; } @@ -2142,7 +2320,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_status); /** * snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint - * @codec: audio codec + * @dapm: DAPM context * @pin: audio signal pin endpoint (or start point) * * Mark the given endpoint or pin as ignoring suspend. When the @@ -2151,18 +2329,21 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_status); * normal means at suspend time, it will not be turned on if it was not * already enabled. */ -int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin) +int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, + const char *pin) { struct snd_soc_dapm_widget *w; - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &dapm->card->widgets, list) { + if (w->dapm != dapm) + continue; if (!strcmp(w->name, pin)) { w->ignore_suspend = 1; return 0; } } - pr_err("Unknown DAPM pin: %s\n", pin); + dev_err(dapm->dev, "dapm: unknown pin %s\n", pin); return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend); @@ -2173,20 +2354,23 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend); * * Free all dapm widgets and resources. */ -void snd_soc_dapm_free(struct snd_soc_codec *codec) +void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm) { - snd_soc_dapm_sys_remove(codec->dev); - dapm_free_widgets(codec); + snd_soc_dapm_sys_remove(dapm->dev); + dapm_free_widgets(dapm); + list_del(&dapm->list); } EXPORT_SYMBOL_GPL(snd_soc_dapm_free); -static void soc_dapm_shutdown_codec(struct snd_soc_codec *codec) +static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w; LIST_HEAD(down_list); int powerdown = 0; - list_for_each_entry(w, &codec->dapm_widgets, list) { + list_for_each_entry(w, &dapm->card->widgets, list) { + if (w->dapm != dapm) + continue; if (w->power) { dapm_seq_insert(w, &down_list, dapm_down_seq); w->power = 0; @@ -2198,9 +2382,9 @@ static void soc_dapm_shutdown_codec(struct snd_soc_codec *codec) * standby. */ if (powerdown) { - snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_PREPARE); - dapm_seq_run(codec, &down_list, 0, dapm_down_seq); - snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_STANDBY); + snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_PREPARE); + dapm_seq_run(dapm, &down_list, 0, dapm_down_seq); + snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_STANDBY); } } @@ -2211,10 +2395,10 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card) { struct snd_soc_codec *codec; - list_for_each_entry(codec, &card->codec_dev_list, list) - soc_dapm_shutdown_codec(codec); - - snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF); + list_for_each_entry(codec, &card->codec_dev_list, list) { + soc_dapm_shutdown_codec(&codec->dapm); + snd_soc_dapm_set_bias_level(card, &codec->dapm, SND_SOC_BIAS_OFF); + } } /* Module information */ diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c index 8a0a9205b1e7..ac5a5bc7375a 100644 --- a/sound/soc/soc-jack.c +++ b/sound/soc/soc-jack.c @@ -13,11 +13,11 @@ #include #include -#include #include #include #include #include +#include /** * snd_soc_jack_new - Create a new jack @@ -60,14 +60,18 @@ EXPORT_SYMBOL_GPL(snd_soc_jack_new); void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) { struct snd_soc_codec *codec; + struct snd_soc_dapm_context *dapm; struct snd_soc_jack_pin *pin; int enable; int oldstatus; + trace_snd_soc_jack_report(jack, mask, status); + if (!jack) return; codec = jack->codec; + dapm = &codec->dapm; mutex_lock(&codec->mutex); @@ -81,6 +85,8 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) if (mask && (jack->status == oldstatus)) goto out; + trace_snd_soc_jack_notify(jack, status); + list_for_each_entry(pin, &jack->pins, list) { enable = pin->mask & jack->status; @@ -88,15 +94,15 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) enable = !enable; if (enable) - snd_soc_dapm_enable_pin(codec, pin->pin); + snd_soc_dapm_enable_pin(dapm, pin->pin); else - snd_soc_dapm_disable_pin(codec, pin->pin); + snd_soc_dapm_disable_pin(dapm, pin->pin); } /* Report before the DAPM sync to help users updating micbias status */ blocking_notifier_call_chain(&jack->notifier, status, NULL); - snd_soc_dapm_sync(codec); + snd_soc_dapm_sync(dapm); snd_jack_report(jack->jack, status); @@ -207,6 +213,12 @@ static void snd_soc_jack_gpio_detect(struct snd_soc_jack_gpio *gpio) static irqreturn_t gpio_handler(int irq, void *data) { struct snd_soc_jack_gpio *gpio = data; + struct device *dev = gpio->jack->codec->card->dev; + + trace_snd_soc_jack_irq(gpio->name); + + if (device_may_wakeup(dev)) + pm_wakeup_event(dev, gpio->debounce_time + 50); schedule_delayed_work(&gpio->work, msecs_to_jiffies(gpio->debounce_time)); @@ -263,11 +275,12 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count, INIT_DELAYED_WORK(&gpios[i].work, gpio_work); gpios[i].jack = jack; - ret = request_irq(gpio_to_irq(gpios[i].gpio), - gpio_handler, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - jack->codec->dev->driver->name, - &gpios[i]); + ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio), + gpio_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + jack->codec->dev->driver->name, + &gpios[i]); if (ret) goto err; diff --git a/sound/usb/format.c b/sound/usb/format.c index 69148212aa70..5b792d2c8061 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -76,7 +76,10 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, format = 1 << UAC_FORMAT_TYPE_I_PCM; } if (format & (1 << UAC_FORMAT_TYPE_I_PCM)) { - if (sample_width > sample_bytes * 8) { + if (chip->usb_id == USB_ID(0x0582, 0x0016) /* Edirol SD-90 */ && + sample_width == 24 && sample_bytes == 2) + sample_bytes = 3; + else if (sample_width > sample_bytes * 8) { snd_printk(KERN_INFO "%d:%u:%d : sample bitwidth %d in over sample bytes %d\n", chip->dev->devnum, fp->iface, fp->altsetting, sample_width, sample_bytes); diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 25bce7e5b1af..db2dc5ffe6dd 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -850,8 +850,8 @@ static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep, return; } - memset(urb->transfer_buffer + count, 0xFD, 9 - count); - urb->transfer_buffer_length = count; + memset(urb->transfer_buffer + count, 0xFD, ep->max_transfer - count); + urb->transfer_buffer_length = ep->max_transfer; } static struct usb_protocol_ops snd_usbmidi_122l_ops = { @@ -1295,6 +1295,13 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi, case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */ ep->max_transfer = 4; break; + /* + * Some devices only work with 9 bytes packet size: + */ + case USB_ID(0x0644, 0x800E): /* Tascam US-122L */ + case USB_ID(0x0644, 0x800F): /* Tascam US-144 */ + ep->max_transfer = 9; + break; } for (i = 0; i < OUTPUT_URBS; ++i) { buffer = usb_alloc_coherent(umidi->dev, @@ -1729,13 +1736,7 @@ static int roland_load_info(struct snd_kcontrol *kcontrol, { static const char *const names[] = { "High Load", "Light Load" }; - info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - info->count = 1; - info->value.enumerated.items = 2; - if (info->value.enumerated.item > 1) - info->value.enumerated.item = 1; - strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); - return 0; + return snd_ctl_enum_info(info, 1, 2, names); } static int roland_load_get(struct snd_kcontrol *kcontrol, diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index f2d74d654b3c..7df89b3d7ded 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1633,18 +1633,11 @@ static int parse_audio_extension_unit(struct mixer_build *state, int unitid, voi static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = kcontrol->private_data; - char **itemlist = (char **)kcontrol->private_value; + const char **itemlist = (const char **)kcontrol->private_value; if (snd_BUG_ON(!itemlist)) return -EINVAL; - uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - uinfo->count = 1; - uinfo->value.enumerated.items = cval->max; - if (uinfo->value.enumerated.item >= cval->max) - uinfo->value.enumerated.item = cval->max - 1; - strlcpy(uinfo->value.enumerated.name, itemlist[uinfo->value.enumerated.item], - sizeof(uinfo->value.enumerated.name)); - return 0; + return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist); } /* get callback for selector unit */ diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index ad7079d1676c..35999874d301 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -705,11 +705,11 @@ YAMAHA_DEVICE(0x7010, "UB99"), .data = (const struct snd_usb_audio_quirk[]) { { .ifnum = 0, - .type = QUIRK_IGNORE_INTERFACE + .type = QUIRK_AUDIO_STANDARD_INTERFACE }, { .ifnum = 1, - .type = QUIRK_IGNORE_INTERFACE + .type = QUIRK_AUDIO_STANDARD_INTERFACE }, { .ifnum = 2, diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c index 6ef68e42138e..084e6fc8d5bf 100644 --- a/sound/usb/usx2y/us122l.c +++ b/sound/usb/usx2y/us122l.c @@ -273,29 +273,26 @@ static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { struct us122l *us122l = hw->private_data; - struct usb_stream *s = us122l->sk.s; unsigned *polled; unsigned int mask; poll_wait(file, &us122l->sk.sleep, wait); - switch (s->state) { - case usb_stream_ready: - if (us122l->first == file) - polled = &s->periods_polled; - else - polled = &us122l->second_periods_polled; - if (*polled != s->periods_done) { - *polled = s->periods_done; - mask = POLLIN | POLLOUT | POLLWRNORM; - break; + mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; + if (mutex_trylock(&us122l->mutex)) { + struct usb_stream *s = us122l->sk.s; + if (s && s->state == usb_stream_ready) { + if (us122l->first == file) + polled = &s->periods_polled; + else + polled = &us122l->second_periods_polled; + if (*polled != s->periods_done) { + *polled = s->periods_done; + mask = POLLIN | POLLOUT | POLLWRNORM; + } else + mask = 0; } - /* Fall through */ - mask = 0; - break; - default: - mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; - break; + mutex_unlock(&us122l->mutex); } return mask; } @@ -381,6 +378,7 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, { struct usb_stream_config *cfg; struct us122l *us122l = hw->private_data; + struct usb_stream *s; unsigned min_period_frames; int err = 0; bool high_speed; @@ -426,18 +424,18 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, snd_power_wait(hw->card, SNDRV_CTL_POWER_D0); mutex_lock(&us122l->mutex); + s = us122l->sk.s; if (!us122l->master) us122l->master = file; else if (us122l->master != file) { - if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) { + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) { err = -EIO; goto unlock; } us122l->slave = file; } - if (!us122l->sk.s || - memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) || - us122l->sk.s->state == usb_stream_xrun) { + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) || + s->state == usb_stream_xrun) { us122l_stop(us122l); if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames)) err = -EIO; @@ -448,6 +446,7 @@ unlock: mutex_unlock(&us122l->mutex); free: kfree(cfg); + wake_up_all(&us122l->sk.sleep); return err; } diff --git a/usr/Kconfig b/usr/Kconfig index c2c7fe2f717d..4780deac5974 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -72,6 +72,15 @@ config RD_LZMA Support loading of a LZMA encoded initial ramdisk or cpio buffer If unsure, say N. +config RD_XZ + bool "Support initial ramdisks compressed using XZ" if EMBEDDED + default !EMBEDDED + depends on BLK_DEV_INITRD + select DECOMPRESS_XZ + help + Support loading of a XZ encoded initial ramdisk or cpio buffer. + If unsure, say N. + config RD_LZO bool "Support initial ramdisks compressed using LZO" if EMBEDDED default !EMBEDDED @@ -139,6 +148,15 @@ config INITRAMFS_COMPRESSION_LZMA three. Compression is slowest. The initramfs size is about 33% smaller with LZMA in comparison to gzip. +config INITRAMFS_COMPRESSION_XZ + bool "XZ" + depends on RD_XZ + help + XZ uses the LZMA2 algorithm. The initramfs size is about 30% + smaller with XZ in comparison to gzip. Decompression speed + is better than that of bzip2 but worse than gzip and LZO. + Compression is slow. + config INITRAMFS_COMPRESSION_LZO bool "LZO" depends on RD_LZO diff --git a/usr/Makefile b/usr/Makefile index 6faa444b7087..029ffe6cd0d8 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -15,6 +15,9 @@ suffix_$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) = .bz2 # Lzma suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA) = .lzma +# XZ +suffix_$(CONFIG_INITRAMFS_COMPRESSION_XZ) = .xz + # Lzo suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO) = .lzo @@ -50,7 +53,7 @@ endif quiet_cmd_initfs = GEN $@ cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) -targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio +targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.xz initramfs_data.cpio.lzo initramfs_data.cpio # do not try to update files included in initramfs $(deps_initramfs): ; diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 7f1178f6b839..f63ccb0a5982 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -15,3 +15,6 @@ config KVM_APIC_ARCHITECTURE config KVM_MMIO bool + +config KVM_ASYNC_PF + bool diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 7c98928b09d9..ae72ae604c89 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -55,58 +55,31 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel return index; } -static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) +static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id) { - struct kvm_assigned_dev_kernel *assigned_dev; - int i; + struct kvm_assigned_dev_kernel *assigned_dev = dev_id; + u32 vector; + int index; - assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, - interrupt_work); + if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) { + spin_lock(&assigned_dev->intx_lock); + disable_irq_nosync(irq); + assigned_dev->host_irq_disabled = true; + spin_unlock(&assigned_dev->intx_lock); + } - spin_lock_irq(&assigned_dev->assigned_dev_lock); if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { - struct kvm_guest_msix_entry *guest_entries = - assigned_dev->guest_msix_entries; - for (i = 0; i < assigned_dev->entries_nr; i++) { - if (!(guest_entries[i].flags & - KVM_ASSIGNED_MSIX_PENDING)) - continue; - guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; + index = find_index_from_host_irq(assigned_dev, irq); + if (index >= 0) { + vector = assigned_dev-> + guest_msix_entries[index].vector; kvm_set_irq(assigned_dev->kvm, - assigned_dev->irq_source_id, - guest_entries[i].vector, 1); + assigned_dev->irq_source_id, vector, 1); } } else kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, assigned_dev->guest_irq, 1); - spin_unlock_irq(&assigned_dev->assigned_dev_lock); -} - -static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) -{ - unsigned long flags; - struct kvm_assigned_dev_kernel *assigned_dev = - (struct kvm_assigned_dev_kernel *) dev_id; - - spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); - if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { - int index = find_index_from_host_irq(assigned_dev, irq); - if (index < 0) - goto out; - assigned_dev->guest_msix_entries[index].flags |= - KVM_ASSIGNED_MSIX_PENDING; - } - - schedule_work(&assigned_dev->interrupt_work); - - if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { - disable_irq_nosync(irq); - assigned_dev->host_irq_disabled = true; - } - -out: - spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); return IRQ_HANDLED; } @@ -114,7 +87,6 @@ out: static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) { struct kvm_assigned_dev_kernel *dev; - unsigned long flags; if (kian->gsi == -1) return; @@ -127,12 +99,12 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) /* The guest irq may be shared so this ack may be * from another device. */ - spin_lock_irqsave(&dev->assigned_dev_lock, flags); + spin_lock(&dev->intx_lock); if (dev->host_irq_disabled) { enable_irq(dev->host_irq); dev->host_irq_disabled = false; } - spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); + spin_unlock(&dev->intx_lock); } static void deassign_guest_irq(struct kvm *kvm, @@ -141,6 +113,9 @@ static void deassign_guest_irq(struct kvm *kvm, kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); assigned_dev->ack_notifier.gsi = -1; + kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, + assigned_dev->guest_irq, 0); + if (assigned_dev->irq_source_id != -1) kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); assigned_dev->irq_source_id = -1; @@ -152,28 +127,19 @@ static void deassign_host_irq(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { /* - * In kvm_free_device_irq, cancel_work_sync return true if: - * 1. work is scheduled, and then cancelled. - * 2. work callback is executed. - * - * The first one ensured that the irq is disabled and no more events - * would happen. But for the second one, the irq may be enabled (e.g. - * for MSI). So we disable irq here to prevent further events. + * We disable irq here to prevent further events. * * Notice this maybe result in nested disable if the interrupt type is * INTx, but it's OK for we are going to free it. * * If this function is a part of VM destroy, please ensure that till * now, the kvm state is still legal for probably we also have to wait - * interrupt_work done. + * on a currently running IRQ handler. */ if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { int i; for (i = 0; i < assigned_dev->entries_nr; i++) - disable_irq_nosync(assigned_dev-> - host_msix_entries[i].vector); - - cancel_work_sync(&assigned_dev->interrupt_work); + disable_irq(assigned_dev->host_msix_entries[i].vector); for (i = 0; i < assigned_dev->entries_nr; i++) free_irq(assigned_dev->host_msix_entries[i].vector, @@ -185,8 +151,7 @@ static void deassign_host_irq(struct kvm *kvm, pci_disable_msix(assigned_dev->dev); } else { /* Deal with MSI and INTx */ - disable_irq_nosync(assigned_dev->host_irq); - cancel_work_sync(&assigned_dev->interrupt_work); + disable_irq(assigned_dev->host_irq); free_irq(assigned_dev->host_irq, (void *)assigned_dev); @@ -232,7 +197,8 @@ static void kvm_free_assigned_device(struct kvm *kvm, { kvm_free_assigned_irq(kvm, assigned_dev); - pci_reset_function(assigned_dev->dev); + __pci_reset_function(assigned_dev->dev); + pci_restore_state(assigned_dev->dev); pci_release_regions(assigned_dev->dev); pci_disable_device(assigned_dev->dev); @@ -265,8 +231,8 @@ static int assigned_device_enable_host_intx(struct kvm *kvm, * on the same interrupt line is not a happy situation: there * are going to be long delays in accepting, acking, etc. */ - if (request_irq(dev->host_irq, kvm_assigned_dev_intr, - 0, "kvm_assigned_intx_device", (void *)dev)) + if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, + IRQF_ONESHOT, dev->irq_name, (void *)dev)) return -EIO; return 0; } @@ -284,8 +250,8 @@ static int assigned_device_enable_host_msi(struct kvm *kvm, } dev->host_irq = dev->dev->irq; - if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, - "kvm_assigned_msi_device", (void *)dev)) { + if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, + 0, dev->irq_name, (void *)dev)) { pci_disable_msi(dev->dev); return -EIO; } @@ -310,10 +276,9 @@ static int assigned_device_enable_host_msix(struct kvm *kvm, return r; for (i = 0; i < dev->entries_nr; i++) { - r = request_irq(dev->host_msix_entries[i].vector, - kvm_assigned_dev_intr, 0, - "kvm_assigned_msix_device", - (void *)dev); + r = request_threaded_irq(dev->host_msix_entries[i].vector, + NULL, kvm_assigned_dev_thread, + 0, dev->irq_name, (void *)dev); if (r) goto err; } @@ -370,6 +335,9 @@ static int assign_host_irq(struct kvm *kvm, if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) return r; + snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s", + pci_name(dev->dev)); + switch (host_irq_type) { case KVM_DEV_IRQ_HOST_INTX: r = assigned_device_enable_host_intx(kvm, dev); @@ -547,6 +515,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, } pci_reset_function(dev); + pci_save_state(dev); match->assigned_dev_id = assigned_dev->assigned_dev_id; match->host_segnr = assigned_dev->segnr; @@ -554,12 +523,10 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, match->host_devfn = assigned_dev->devfn; match->flags = assigned_dev->flags; match->dev = dev; - spin_lock_init(&match->assigned_dev_lock); + spin_lock_init(&match->intx_lock); match->irq_source_id = -1; match->kvm = kvm; match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; - INIT_WORK(&match->interrupt_work, - kvm_assigned_dev_interrupt_work_handler); list_add(&match->list, &kvm->arch.assigned_dev_head); @@ -579,6 +546,7 @@ out: mutex_unlock(&kvm->lock); return r; out_list_del: + pci_restore_state(dev); list_del(&match->list); pci_release_regions(dev); out_disable: @@ -651,9 +619,9 @@ static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, r = -ENOMEM; goto msix_nr_out; } - adev->guest_msix_entries = kzalloc( - sizeof(struct kvm_guest_msix_entry) * - entry_nr->entry_nr, GFP_KERNEL); + adev->guest_msix_entries = + kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr, + GFP_KERNEL); if (!adev->guest_msix_entries) { kfree(adev->host_msix_entries); r = -ENOMEM; @@ -706,7 +674,7 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; - int r = -ENOTTY; + int r; switch (ioctl) { case KVM_ASSIGN_PCI_DEVICE: { @@ -724,7 +692,6 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, r = -EOPNOTSUPP; break; } -#ifdef KVM_CAP_ASSIGN_DEV_IRQ case KVM_ASSIGN_DEV_IRQ: { struct kvm_assigned_irq assigned_irq; @@ -747,8 +714,6 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, goto out; break; } -#endif -#ifdef KVM_CAP_DEVICE_DEASSIGNMENT case KVM_DEASSIGN_PCI_DEVICE: { struct kvm_assigned_pci_dev assigned_dev; @@ -760,7 +725,6 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, goto out; break; } -#endif #ifdef KVM_CAP_IRQ_ROUTING case KVM_SET_GSI_ROUTING: { struct kvm_irq_routing routing; @@ -813,6 +777,9 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, break; } #endif + default: + r = -ENOTTY; + break; } out: return r; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c new file mode 100644 index 000000000000..74268b4c2ee1 --- /dev/null +++ b/virt/kvm/async_pf.c @@ -0,0 +1,216 @@ +/* + * kvm asynchronous fault support + * + * Copyright 2010 Red Hat, Inc. + * + * Author: + * Gleb Natapov + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include + +#include "async_pf.h" +#include + +static struct kmem_cache *async_pf_cache; + +int kvm_async_pf_init(void) +{ + async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); + + if (!async_pf_cache) + return -ENOMEM; + + return 0; +} + +void kvm_async_pf_deinit(void) +{ + if (async_pf_cache) + kmem_cache_destroy(async_pf_cache); + async_pf_cache = NULL; +} + +void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) +{ + INIT_LIST_HEAD(&vcpu->async_pf.done); + INIT_LIST_HEAD(&vcpu->async_pf.queue); + spin_lock_init(&vcpu->async_pf.lock); +} + +static void async_pf_execute(struct work_struct *work) +{ + struct page *page = NULL; + struct kvm_async_pf *apf = + container_of(work, struct kvm_async_pf, work); + struct mm_struct *mm = apf->mm; + struct kvm_vcpu *vcpu = apf->vcpu; + unsigned long addr = apf->addr; + gva_t gva = apf->gva; + + might_sleep(); + + use_mm(mm); + down_read(&mm->mmap_sem); + get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL); + up_read(&mm->mmap_sem); + unuse_mm(mm); + + spin_lock(&vcpu->async_pf.lock); + list_add_tail(&apf->link, &vcpu->async_pf.done); + apf->page = page; + apf->done = true; + spin_unlock(&vcpu->async_pf.lock); + + /* + * apf may be freed by kvm_check_async_pf_completion() after + * this point + */ + + trace_kvm_async_pf_completed(addr, page, gva); + + if (waitqueue_active(&vcpu->wq)) + wake_up_interruptible(&vcpu->wq); + + mmdrop(mm); + kvm_put_kvm(vcpu->kvm); +} + +void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) +{ + /* cancel outstanding work queue item */ + while (!list_empty(&vcpu->async_pf.queue)) { + struct kvm_async_pf *work = + list_entry(vcpu->async_pf.queue.next, + typeof(*work), queue); + cancel_work_sync(&work->work); + list_del(&work->queue); + if (!work->done) /* work was canceled */ + kmem_cache_free(async_pf_cache, work); + } + + spin_lock(&vcpu->async_pf.lock); + while (!list_empty(&vcpu->async_pf.done)) { + struct kvm_async_pf *work = + list_entry(vcpu->async_pf.done.next, + typeof(*work), link); + list_del(&work->link); + if (work->page) + put_page(work->page); + kmem_cache_free(async_pf_cache, work); + } + spin_unlock(&vcpu->async_pf.lock); + + vcpu->async_pf.queued = 0; +} + +void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) +{ + struct kvm_async_pf *work; + + while (!list_empty_careful(&vcpu->async_pf.done) && + kvm_arch_can_inject_async_page_present(vcpu)) { + spin_lock(&vcpu->async_pf.lock); + work = list_first_entry(&vcpu->async_pf.done, typeof(*work), + link); + list_del(&work->link); + spin_unlock(&vcpu->async_pf.lock); + + if (work->page) + kvm_arch_async_page_ready(vcpu, work); + kvm_arch_async_page_present(vcpu, work); + + list_del(&work->queue); + vcpu->async_pf.queued--; + if (work->page) + put_page(work->page); + kmem_cache_free(async_pf_cache, work); + } +} + +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + struct kvm_arch_async_pf *arch) +{ + struct kvm_async_pf *work; + + if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) + return 0; + + /* setup delayed work */ + + /* + * do alloc nowait since if we are going to sleep anyway we + * may as well sleep faulting in page + */ + work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); + if (!work) + return 0; + + work->page = NULL; + work->done = false; + work->vcpu = vcpu; + work->gva = gva; + work->addr = gfn_to_hva(vcpu->kvm, gfn); + work->arch = *arch; + work->mm = current->mm; + atomic_inc(&work->mm->mm_count); + kvm_get_kvm(work->vcpu->kvm); + + /* this can't really happen otherwise gfn_to_pfn_async + would succeed */ + if (unlikely(kvm_is_error_hva(work->addr))) + goto retry_sync; + + INIT_WORK(&work->work, async_pf_execute); + if (!schedule_work(&work->work)) + goto retry_sync; + + list_add_tail(&work->queue, &vcpu->async_pf.queue); + vcpu->async_pf.queued++; + kvm_arch_async_page_not_present(vcpu, work); + return 1; +retry_sync: + kvm_put_kvm(work->vcpu->kvm); + mmdrop(work->mm); + kmem_cache_free(async_pf_cache, work); + return 0; +} + +int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) +{ + struct kvm_async_pf *work; + + if (!list_empty_careful(&vcpu->async_pf.done)) + return 0; + + work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); + if (!work) + return -ENOMEM; + + work->page = bad_page; + get_page(bad_page); + INIT_LIST_HEAD(&work->queue); /* for list_del to work */ + + spin_lock(&vcpu->async_pf.lock); + list_add_tail(&work->link, &vcpu->async_pf.done); + spin_unlock(&vcpu->async_pf.lock); + + vcpu->async_pf.queued++; + return 0; +} diff --git a/virt/kvm/async_pf.h b/virt/kvm/async_pf.h new file mode 100644 index 000000000000..e7ef6447cb82 --- /dev/null +++ b/virt/kvm/async_pf.h @@ -0,0 +1,36 @@ +/* + * kvm asynchronous fault support + * + * Copyright 2010 Red Hat, Inc. + * + * Author: + * Gleb Natapov + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __KVM_ASYNC_PF_H__ +#define __KVM_ASYNC_PF_H__ + +#ifdef CONFIG_KVM_ASYNC_PF +int kvm_async_pf_init(void); +void kvm_async_pf_deinit(void); +void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu); +#else +#define kvm_async_pf_init() (0) +#define kvm_async_pf_deinit() do{}while(0) +#define kvm_async_pf_vcpu_init(C) do{}while(0) +#endif + +#endif diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index c1f1e3c62984..2ca4535f4fb7 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -44,14 +44,19 @@ */ struct _irqfd { - struct kvm *kvm; - struct eventfd_ctx *eventfd; - int gsi; - struct list_head list; - poll_table pt; - wait_queue_t wait; - struct work_struct inject; - struct work_struct shutdown; + /* Used for MSI fast-path */ + struct kvm *kvm; + wait_queue_t wait; + /* Update side is protected by irqfds.lock */ + struct kvm_kernel_irq_routing_entry __rcu *irq_entry; + /* Used for level IRQ fast-path */ + int gsi; + struct work_struct inject; + /* Used for setup/shutdown */ + struct eventfd_ctx *eventfd; + struct list_head list; + poll_table pt; + struct work_struct shutdown; }; static struct workqueue_struct *irqfd_cleanup_wq; @@ -125,14 +130,22 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait); unsigned long flags = (unsigned long)key; + struct kvm_kernel_irq_routing_entry *irq; + struct kvm *kvm = irqfd->kvm; - if (flags & POLLIN) + if (flags & POLLIN) { + rcu_read_lock(); + irq = rcu_dereference(irqfd->irq_entry); /* An event has been signaled, inject an interrupt */ - schedule_work(&irqfd->inject); + if (irq) + kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); + else + schedule_work(&irqfd->inject); + rcu_read_unlock(); + } if (flags & POLLHUP) { /* The eventfd is closing, detach from KVM */ - struct kvm *kvm = irqfd->kvm; unsigned long flags; spin_lock_irqsave(&kvm->irqfds.lock, flags); @@ -163,9 +176,31 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, add_wait_queue(wqh, &irqfd->wait); } +/* Must be called under irqfds.lock */ +static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, + struct kvm_irq_routing_table *irq_rt) +{ + struct kvm_kernel_irq_routing_entry *e; + struct hlist_node *n; + + if (irqfd->gsi >= irq_rt->nr_rt_entries) { + rcu_assign_pointer(irqfd->irq_entry, NULL); + return; + } + + hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) { + /* Only fast-path MSI. */ + if (e->type == KVM_IRQ_ROUTING_MSI) + rcu_assign_pointer(irqfd->irq_entry, e); + else + rcu_assign_pointer(irqfd->irq_entry, NULL); + } +} + static int kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) { + struct kvm_irq_routing_table *irq_rt; struct _irqfd *irqfd, *tmp; struct file *file = NULL; struct eventfd_ctx *eventfd = NULL; @@ -215,6 +250,10 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) goto fail; } + irq_rt = rcu_dereference_protected(kvm->irq_routing, + lockdep_is_held(&kvm->irqfds.lock)); + irqfd_update(kvm, irqfd, irq_rt); + events = file->f_op->poll(file, &irqfd->pt); list_add_tail(&irqfd->list, &kvm->irqfds.items); @@ -271,8 +310,17 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi) spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { - if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) + if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) { + /* + * This rcu_assign_pointer is needed for when + * another thread calls kvm_irqfd_update before + * we flush workqueue below. + * It is paired with synchronize_rcu done by caller + * of that function. + */ + rcu_assign_pointer(irqfd->irq_entry, NULL); irqfd_deactivate(irqfd); + } } spin_unlock_irq(&kvm->irqfds.lock); @@ -321,6 +369,25 @@ kvm_irqfd_release(struct kvm *kvm) } +/* + * Change irq_routing and irqfd. + * Caller must invoke synchronize_rcu afterwards. + */ +void kvm_irq_routing_update(struct kvm *kvm, + struct kvm_irq_routing_table *irq_rt) +{ + struct _irqfd *irqfd; + + spin_lock_irq(&kvm->irqfds.lock); + + rcu_assign_pointer(kvm->irq_routing, irq_rt); + + list_for_each_entry(irqfd, &kvm->irqfds.items, list) + irqfd_update(kvm, irqfd, irq_rt); + + spin_unlock_irq(&kvm->irqfds.lock); +} + /* * create a host-wide workqueue for issuing deferred shutdown requests * aggregated from all vm* instances. We need our own isolated single-thread diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 8edca9141b78..9f614b4e365f 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -114,8 +114,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, return r; } -static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, - struct kvm *kvm, int irq_source_id, int level) +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level) { struct kvm_lapic_irq irq; @@ -409,8 +409,9 @@ int kvm_set_irq_routing(struct kvm *kvm, mutex_lock(&kvm->irq_lock); old = kvm->irq_routing; - rcu_assign_pointer(kvm->irq_routing, new); + kvm_irq_routing_update(kvm, new); mutex_unlock(&kvm->irq_lock); + synchronize_rcu(); new = old; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5225052aebc1..f29abeb6a912 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -55,6 +55,7 @@ #include #include "coalesced_mmio.h" +#include "async_pf.h" #define CREATE_TRACE_POINTS #include @@ -89,7 +90,8 @@ static void hardware_disable_all(void); static void kvm_io_bus_destroy(struct kvm_io_bus *bus); -static bool kvm_rebooting; +bool kvm_rebooting; +EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; @@ -102,8 +104,26 @@ static pfn_t fault_pfn; inline int kvm_is_mmio_pfn(pfn_t pfn) { if (pfn_valid(pfn)) { - struct page *page = compound_head(pfn_to_page(pfn)); - return PageReserved(page); + int reserved; + struct page *tail = pfn_to_page(pfn); + struct page *head = compound_trans_head(tail); + reserved = PageReserved(head); + if (head != tail) { + /* + * "head" is not a dangling pointer + * (compound_trans_head takes care of that) + * but the hugepage may have been splitted + * from under us (and we may not hold a + * reference count on the head page so it can + * be reused before we run PageReferenced), so + * we've to check PageTail before returning + * what we just read. + */ + smp_rmb(); + if (PageTail(tail)) + return reserved; + } + return PageReserved(tail); } return true; @@ -167,8 +187,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) void kvm_flush_remote_tlbs(struct kvm *kvm) { + int dirty_count = kvm->tlbs_dirty; + + smp_mb(); if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; + cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); } void kvm_reload_remote_mmus(struct kvm *kvm) @@ -186,6 +210,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->kvm = kvm; vcpu->vcpu_id = id; init_waitqueue_head(&vcpu->wq); + kvm_async_pf_vcpu_init(vcpu); page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { @@ -247,7 +272,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; - need_tlb_flush = kvm_unmap_hva(kvm, address); + need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); @@ -291,6 +316,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm->mmu_notifier_count++; for (; start < end; start += PAGE_SIZE) need_tlb_flush |= kvm_unmap_hva(kvm, start); + need_tlb_flush |= kvm->tlbs_dirty; spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); @@ -344,6 +370,22 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, return young; } +static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int young, idx; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + young = kvm_test_age_hva(kvm, address); + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + + return young; +} + static void kvm_mmu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { @@ -360,6 +402,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, + .test_young = kvm_mmu_notifier_test_young, .change_pte = kvm_mmu_notifier_change_pte, .release = kvm_mmu_notifier_release, }; @@ -381,11 +424,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) static struct kvm *kvm_create_vm(void) { - int r = 0, i; - struct kvm *kvm = kvm_arch_create_vm(); + int r, i; + struct kvm *kvm = kvm_arch_alloc_vm(); - if (IS_ERR(kvm)) - goto out; + if (!kvm) + return ERR_PTR(-ENOMEM); + + r = kvm_arch_init_vm(kvm); + if (r) + goto out_err_nodisable; r = hardware_enable_all(); if (r) @@ -399,23 +446,19 @@ static struct kvm *kvm_create_vm(void) r = -ENOMEM; kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!kvm->memslots) - goto out_err; + goto out_err_nosrcu; if (init_srcu_struct(&kvm->srcu)) - goto out_err; + goto out_err_nosrcu; for (i = 0; i < KVM_NR_BUSES; i++) { kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); - if (!kvm->buses[i]) { - cleanup_srcu_struct(&kvm->srcu); + if (!kvm->buses[i]) goto out_err; - } } r = kvm_init_mmu_notifier(kvm); - if (r) { - cleanup_srcu_struct(&kvm->srcu); + if (r) goto out_err; - } kvm->mm = current->mm; atomic_inc(&kvm->mm->mm_count); @@ -429,19 +472,35 @@ static struct kvm *kvm_create_vm(void) spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); -out: + return kvm; out_err: + cleanup_srcu_struct(&kvm->srcu); +out_err_nosrcu: hardware_disable_all(); out_err_nodisable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); kfree(kvm->memslots); - kfree(kvm); + kvm_arch_free_vm(kvm); return ERR_PTR(r); } +static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) +{ + if (!memslot->dirty_bitmap) + return; + + if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE) + vfree(memslot->dirty_bitmap_head); + else + kfree(memslot->dirty_bitmap_head); + + memslot->dirty_bitmap = NULL; + memslot->dirty_bitmap_head = NULL; +} + /* * Free any memory in @free but not in @dont. */ @@ -454,7 +513,7 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, vfree(free->rmap); if (!dont || free->dirty_bitmap != dont->dirty_bitmap) - vfree(free->dirty_bitmap); + kvm_destroy_dirty_bitmap(free); for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { @@ -465,7 +524,6 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, } free->npages = 0; - free->dirty_bitmap = NULL; free->rmap = NULL; } @@ -499,6 +557,9 @@ static void kvm_destroy_vm(struct kvm *kvm) kvm_arch_flush_shadow(kvm); #endif kvm_arch_destroy_vm(kvm); + kvm_free_physmem(kvm); + cleanup_srcu_struct(&kvm->srcu); + kvm_arch_free_vm(kvm); hardware_disable_all(); mmdrop(mm); } @@ -527,6 +588,27 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) return 0; } +/* + * Allocation size is twice as large as the actual dirty bitmap size. + * This makes it possible to do double buffering: see x86's + * kvm_vm_ioctl_get_dirty_log(). + */ +static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) +{ + unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); + + if (dirty_bytes > PAGE_SIZE) + memslot->dirty_bitmap = vzalloc(dirty_bytes); + else + memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL); + + if (!memslot->dirty_bitmap) + return -ENOMEM; + + memslot->dirty_bitmap_head = memslot->dirty_bitmap; + return 0; +} + /* * Allocate some memory and give it an address in the guest physical address * space. @@ -604,13 +686,11 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Allocate if a slot is being created */ #ifndef CONFIG_S390 if (npages && !new.rmap) { - new.rmap = vmalloc(npages * sizeof(*new.rmap)); + new.rmap = vzalloc(npages * sizeof(*new.rmap)); if (!new.rmap) goto out_free; - memset(new.rmap, 0, npages * sizeof(*new.rmap)); - new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; } @@ -633,14 +713,11 @@ int __kvm_set_memory_region(struct kvm *kvm, >> KVM_HPAGE_GFN_SHIFT(level)); lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level); - new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); + new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i])); if (!new.lpage_info[i]) goto out_free; - memset(new.lpage_info[i], 0, - lpages * sizeof(*new.lpage_info[i])); - if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) new.lpage_info[i][0].write_count = 1; if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) @@ -661,12 +738,8 @@ skip_lpage: /* Allocate page dirty bitmap if needed */ if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { - unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); - - new.dirty_bitmap = vmalloc(dirty_bytes); - if (!new.dirty_bitmap) + if (kvm_create_dirty_bitmap(&new) < 0) goto out_free; - memset(new.dirty_bitmap, 0, dirty_bytes); /* destroy any largepage mappings for dirty tracking */ if (old.npages) flush_shadow = 1; @@ -685,6 +758,7 @@ skip_lpage: memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); if (mem->slot >= slots->nmemslots) slots->nmemslots = mem->slot + 1; + slots->generation++; slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; old_memslots = kvm->memslots; @@ -719,6 +793,7 @@ skip_lpage: memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); if (mem->slot >= slots->nmemslots) slots->nmemslots = mem->slot + 1; + slots->generation++; /* actual memory is freed via old in kvm_free_physmem_slot below */ if (!npages) { @@ -849,10 +924,10 @@ int kvm_is_error_hva(unsigned long addr) } EXPORT_SYMBOL_GPL(kvm_is_error_hva); -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) +static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, + gfn_t gfn) { int i; - struct kvm_memslots *slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; @@ -863,6 +938,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) } return NULL; } + +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) +{ + return __gfn_to_memslot(kvm_memslots(kvm), gfn); +} EXPORT_SYMBOL_GPL(gfn_to_memslot); int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) @@ -925,12 +1005,9 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) return memslot - slots->memslots; } -static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, +static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages) { - struct kvm_memory_slot *slot; - - slot = gfn_to_memslot(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); @@ -942,28 +1019,61 @@ static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { - return gfn_to_hva_many(kvm, gfn, NULL); + return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); } EXPORT_SYMBOL_GPL(gfn_to_hva); -static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic) +static pfn_t get_fault_pfn(void) +{ + get_page(fault_page); + return fault_pfn; +} + +static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, + bool *async, bool write_fault, bool *writable) { struct page *page[1]; - int npages; + int npages = 0; pfn_t pfn; - if (atomic) + /* we can do it either atomically or asynchronously, not both */ + BUG_ON(atomic && async); + + BUG_ON(!write_fault && !writable); + + if (writable) + *writable = true; + + if (atomic || async) npages = __get_user_pages_fast(addr, 1, 1, page); - else { + + if (unlikely(npages != 1) && !atomic) { might_sleep(); - npages = get_user_pages_fast(addr, 1, 1, page); + + if (writable) + *writable = write_fault; + + npages = get_user_pages_fast(addr, 1, write_fault, page); + + /* map read fault as writable if possible */ + if (unlikely(!write_fault) && npages == 1) { + struct page *wpage[1]; + + npages = __get_user_pages_fast(addr, 1, 1, wpage); + if (npages == 1) { + *writable = true; + put_page(page[0]); + page[0] = wpage[0]; + } + npages = 1; + } } if (unlikely(npages != 1)) { struct vm_area_struct *vma; if (atomic) - goto return_fault_page; + return get_fault_pfn(); down_read(¤t->mm->mmap_sem); if (is_hwpoison_address(addr)) { @@ -972,19 +1082,20 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic) return page_to_pfn(hwpoison_page); } - vma = find_vma(current->mm, addr); - - if (vma == NULL || addr < vma->vm_start || - !(vma->vm_flags & VM_PFNMAP)) { - up_read(¤t->mm->mmap_sem); -return_fault_page: - get_page(fault_page); - return page_to_pfn(fault_page); + vma = find_vma_intersection(current->mm, addr, addr+1); + + if (vma == NULL) + pfn = get_fault_pfn(); + else if ((vma->vm_flags & VM_PFNMAP)) { + pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + + vma->vm_pgoff; + BUG_ON(!kvm_is_mmio_pfn(pfn)); + } else { + if (async && (vma->vm_flags & VM_WRITE)) + *async = true; + pfn = get_fault_pfn(); } - - pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; up_read(¤t->mm->mmap_sem); - BUG_ON(!kvm_is_mmio_pfn(pfn)); } else pfn = page_to_pfn(page[0]); @@ -993,40 +1104,58 @@ return_fault_page: pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) { - return hva_to_pfn(kvm, addr, true); + return hva_to_pfn(kvm, addr, true, NULL, true, NULL); } EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); -static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic) +static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, + bool write_fault, bool *writable) { unsigned long addr; + if (async) + *async = false; + addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) { get_page(bad_page); return page_to_pfn(bad_page); } - return hva_to_pfn(kvm, addr, atomic); + return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable); } pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) { - return __gfn_to_pfn(kvm, gfn, true); + return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); +pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, + bool write_fault, bool *writable) +{ + return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); +} +EXPORT_SYMBOL_GPL(gfn_to_pfn_async); + pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) { - return __gfn_to_pfn(kvm, gfn, false); + return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn); +pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, + bool *writable) +{ + return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); +} +EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); + pfn_t gfn_to_pfn_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { unsigned long addr = gfn_to_hva_memslot(slot, gfn); - return hva_to_pfn(kvm, addr, false); + return hva_to_pfn(kvm, addr, false, NULL, true, NULL); } int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, @@ -1035,7 +1164,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, unsigned long addr; gfn_t entry; - addr = gfn_to_hva_many(kvm, gfn, &entry); + addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); if (kvm_is_error_hva(addr)) return -1; @@ -1219,9 +1348,51 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, return 0; } +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + gpa_t gpa) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + int offset = offset_in_page(gpa); + gfn_t gfn = gpa >> PAGE_SHIFT; + + ghc->gpa = gpa; + ghc->generation = slots->generation; + ghc->memslot = __gfn_to_memslot(slots, gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); + if (!kvm_is_error_hva(ghc->hva)) + ghc->hva += offset; + else + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); + +int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + int r; + + if (slots->generation != ghc->generation) + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + + r = copy_to_user((void __user *)ghc->hva, data, len); + if (r) + return -EFAULT; + mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_write_guest_cached); + int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) { - return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); + return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page, + offset, len); } EXPORT_SYMBOL_GPL(kvm_clear_guest_page); @@ -1244,11 +1415,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) } EXPORT_SYMBOL_GPL(kvm_clear_guest); -void mark_page_dirty(struct kvm *kvm, gfn_t gfn) +void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, + gfn_t gfn) { - struct kvm_memory_slot *memslot; - - memslot = gfn_to_memslot(kvm, gfn); if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; @@ -1256,6 +1425,14 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) } } +void mark_page_dirty(struct kvm *kvm, gfn_t gfn) +{ + struct kvm_memory_slot *memslot; + + memslot = gfn_to_memslot(kvm, gfn); + mark_page_dirty_in_slot(kvm, memslot, gfn); +} + /* * The vCPU has executed a HLT instruction with in-kernel mode enabled. */ @@ -1457,6 +1634,7 @@ static long kvm_vcpu_ioctl(struct file *filp, if (arg) goto out; r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); + trace_kvm_userspace_exit(vcpu->run->exit_reason, r); break; case KVM_GET_REGS: { struct kvm_regs *kvm_regs; @@ -1824,7 +2002,7 @@ static struct file_operations kvm_vm_fops = { static int kvm_dev_ioctl_create_vm(void) { - int fd, r; + int r; struct kvm *kvm; kvm = kvm_create_vm(); @@ -1837,11 +2015,11 @@ static int kvm_dev_ioctl_create_vm(void) return r; } #endif - fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); - if (fd < 0) + r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); + if (r < 0) kvm_put_kvm(kvm); - return fd; + return r; } static long kvm_dev_ioctl_check_extension_generic(long arg) @@ -1922,7 +2100,7 @@ static struct miscdevice kvm_dev = { &kvm_chardev_ops, }; -static void hardware_enable(void *junk) +static void hardware_enable_nolock(void *junk) { int cpu = raw_smp_processor_id(); int r; @@ -1942,7 +2120,14 @@ static void hardware_enable(void *junk) } } -static void hardware_disable(void *junk) +static void hardware_enable(void *junk) +{ + spin_lock(&kvm_lock); + hardware_enable_nolock(junk); + spin_unlock(&kvm_lock); +} + +static void hardware_disable_nolock(void *junk) { int cpu = raw_smp_processor_id(); @@ -1952,13 +2137,20 @@ static void hardware_disable(void *junk) kvm_arch_hardware_disable(NULL); } +static void hardware_disable(void *junk) +{ + spin_lock(&kvm_lock); + hardware_disable_nolock(junk); + spin_unlock(&kvm_lock); +} + static void hardware_disable_all_nolock(void) { BUG_ON(!kvm_usage_count); kvm_usage_count--; if (!kvm_usage_count) - on_each_cpu(hardware_disable, NULL, 1); + on_each_cpu(hardware_disable_nolock, NULL, 1); } static void hardware_disable_all(void) @@ -1977,7 +2169,7 @@ static int hardware_enable_all(void) kvm_usage_count++; if (kvm_usage_count == 1) { atomic_set(&hardware_enable_failed, 0); - on_each_cpu(hardware_enable, NULL, 1); + on_each_cpu(hardware_enable_nolock, NULL, 1); if (atomic_read(&hardware_enable_failed)) { hardware_disable_all_nolock(); @@ -2008,27 +2200,19 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, case CPU_STARTING: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); - spin_lock(&kvm_lock); hardware_enable(NULL); - spin_unlock(&kvm_lock); break; } return NOTIFY_OK; } -asmlinkage void kvm_handle_fault_on_reboot(void) +asmlinkage void kvm_spurious_fault(void) { - if (kvm_rebooting) { - /* spin while reset goes on */ - local_irq_enable(); - while (true) - cpu_relax(); - } /* Fault while not rebooting. We want the trace. */ BUG(); } -EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); +EXPORT_SYMBOL_GPL(kvm_spurious_fault); static int kvm_reboot(struct notifier_block *notifier, unsigned long val, void *v) @@ -2041,7 +2225,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, */ printk(KERN_INFO "kvm: exiting hardware virtualization\n"); kvm_rebooting = true; - on_each_cpu(hardware_disable, NULL, 1); + on_each_cpu(hardware_disable_nolock, NULL, 1); return NOTIFY_OK; } @@ -2211,7 +2395,7 @@ static void kvm_exit_debug(void) static int kvm_suspend(struct sys_device *dev, pm_message_t state) { if (kvm_usage_count) - hardware_disable(NULL); + hardware_disable_nolock(NULL); return 0; } @@ -2219,7 +2403,7 @@ static int kvm_resume(struct sys_device *dev) { if (kvm_usage_count) { WARN_ON(spin_is_locked(&kvm_lock)); - hardware_enable(NULL); + hardware_enable_nolock(NULL); } return 0; } @@ -2336,6 +2520,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, goto out_free_5; } + r = kvm_async_pf_init(); + if (r) + goto out_free; + kvm_chardev_ops.owner = module; kvm_vm_fops.owner = module; kvm_vcpu_fops.owner = module; @@ -2343,7 +2531,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = misc_register(&kvm_dev); if (r) { printk(KERN_ERR "kvm: misc device register failed\n"); - goto out_free; + goto out_unreg; } kvm_preempt_ops.sched_in = kvm_sched_in; @@ -2353,6 +2541,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, return 0; +out_unreg: + kvm_async_pf_deinit(); out_free: kmem_cache_destroy(kvm_vcpu_cache); out_free_5: @@ -2385,11 +2575,12 @@ void kvm_exit(void) kvm_exit_debug(); misc_deregister(&kvm_dev); kmem_cache_destroy(kvm_vcpu_cache); + kvm_async_pf_deinit(); sysdev_unregister(&kvm_sysdev); sysdev_class_unregister(&kvm_sysdev_class); unregister_reboot_notifier(&kvm_reboot_notifier); unregister_cpu_notifier(&kvm_cpu_notifier); - on_each_cpu(hardware_disable, NULL, 1); + on_each_cpu(hardware_disable_nolock, NULL, 1); kvm_arch_hardware_unsetup(); kvm_arch_exit(); free_cpumask_var(cpus_hardware_enabled);