* 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM: Add missing syscore_suspend() and syscore_resume() calls
PM: Fix error code paths executed after failing syscore_suspend()
--- /dev/null
+Kernel driver max16064
+======================
+
+Supported chips:
+ * Maxim MAX16064
+ Prefix: 'max16064'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply
+Controller with Active-Voltage Output Control and PMBus Interface.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-4]_label "vout[1-4]"
+in[1-4]_input Measured voltage. From READ_VOUT register.
+in[1-4]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-4]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+ status is set.
+temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+ status is set.
--- /dev/null
+Kernel driver max34440
+======================
+
+Supported chips:
+ * Maxim MAX34440
+ Prefixes: 'max34440'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+ * Maxim MAX34441
+ PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+ Prefixes: 'max34441'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
+Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager
+and Intelligent Fan Controller.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-6]_label "vout[1-6]".
+in[1-6]_input Measured voltage. From READ_VOUT register.
+in[1-6]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-6]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr[1-6]_label "iout[1-6]".
+curr[1-6]_input Measured current. From READ_IOUT register.
+curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
+curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
+
+ in6 and curr6 attributes only exist for MAX34440.
+
+temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register.
+ temp1 is the chip's internal temperature. temp2..temp5
+ are remote I2C temperature sensors. For MAX34441, temp6
+ is a remote thermal-diode sensor. For MAX34440, temp6..8
+ are remote I2C temperature sensors.
+temp[1-8]_max Maximum temperature. From OT_WARN_LIMIT register.
+temp[1-8]_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp[1-8]_max_alarm Temperature high alarm.
+temp[1-8]_crit_alarm Temperature critical high alarm.
+
+ temp7 and temp8 attributes only exist for MAX34440.
--- /dev/null
+Kernel driver max8688
+=====================
+
+Supported chips:
+ * Maxim MAX8688
+ Prefix: 'max8688'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply
+Controller/Monitor with PMBus Interface.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in1_label "vout1"
+in1_input Measured voltage. From READ_VOUT register.
+in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in1_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr1_label "iout1"
+curr1_input Measured current. From READ_IOUT register.
+curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register.
+curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
+
+temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+ status is set.
+temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+ status is set.
Prefix: 'ltc2978'
Addresses scanned: -
Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
- * Maxim MAX16064
- Quad Power-Supply Controller
- Prefix: 'max16064'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
- * Maxim MAX34440
- PMBus 6-Channel Power-Supply Manager
- Prefixes: 'max34440'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
- * Maxim MAX34441
- PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
- Prefixes: 'max34441'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
- * Maxim MAX8688
- Digital Power-Supply Controller/Monitor
- Prefix: 'max8688'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
* Generic PMBus devices
Prefix: 'pmbus'
Addresses scanned: -
From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register.
currX_alarm Current high alarm.
From IIN_OC_WARNING or IOUT_OC_WARNING status.
+currX_max_alarm Current high alarm.
+ From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status.
currX_lcrit_alarm Output current critical low alarm.
From IOUT_UC_FAULT status.
currX_crit_alarm Current critical high alarm.
From IIN_OC_FAULT or IOUT_OC_FAULT status.
-currX_label "iin" or "vinY"
+currX_label "iin" or "ioutY"
powerX_input Measured power. From READ_PIN or READ_POUT register.
powerX_cap Output power cap. From POUT_MAX register.
From POUT_OP_FAULT status.
powerX_label "pin" or "poutY"
-tempX_input Measured tempererature.
+tempX_input Measured temperature.
From READ_TEMPERATURE_X register.
-tempX_min Mimimum tempererature. From UT_WARN_LIMIT register.
-tempX_max Maximum tempererature. From OT_WARN_LIMIT register.
-tempX_lcrit Critical low tempererature.
+tempX_min Mimimum temperature. From UT_WARN_LIMIT register.
+tempX_max Maximum temperature. From OT_WARN_LIMIT register.
+tempX_lcrit Critical low temperature.
From UT_FAULT_LIMIT register.
-tempX_crit Critical high tempererature.
+tempX_crit Critical high temperature.
From OT_FAULT_LIMIT register.
tempX_min_alarm Chip temperature low alarm. Set by comparing
READ_TEMPERATURE_X with UT_WARN_LIMIT if
in9_crit_alarm AIN1 critical alarm
in10_crit_alarm AIN2 critical alarm
-temp1_input Chip tempererature
-temp1_min Mimimum chip tempererature
-temp1_max Maximum chip tempererature
-temp1_crit Critical chip tempererature
+temp1_input Chip temperature
+temp1_min Mimimum chip temperature
+temp1_max Maximum chip temperature
+temp1_crit Critical chip temperature
temp1_crit_alarm Temperature critical alarm
--- /dev/null
+ How to Get Your Patch Accepted Into the Hwmon Subsystem
+ -------------------------------------------------------
+
+This text is is a collection of suggestions for people writing patches or
+drivers for the hwmon subsystem. Following these suggestions will greatly
+increase the chances of your change being accepted.
+
+
+1. General
+----------
+
+* It should be unnecessary to mention, but please read and follow
+ Documentation/SubmitChecklist
+ Documentation/SubmittingDrivers
+ Documentation/SubmittingPatches
+ Documentation/CodingStyle
+
+* If your patch generates checkpatch warnings, please refrain from explanations
+ such as "I don't like that coding style". Keep in mind that each unnecessary
+ warning helps hiding a real problem. If you don't like the kernel coding
+ style, don't write kernel drivers.
+
+* Please test your patch thoroughly. We are not your test group.
+ Sometimes a patch can not or not completely be tested because of missing
+ hardware. In such cases, you should test-build the code on at least one
+ architecture. If run-time testing was not achieved, it should be written
+ explicitly below the patch header.
+
+* If your patch (or the driver) is affected by configuration options such as
+ CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration
+ variants.
+
+
+2. Adding functionality to existing drivers
+-------------------------------------------
+
+* Make sure the documentation in Documentation/hwmon/<driver_name> is up to
+ date.
+
+* Make sure the information in Kconfig is up to date.
+
+* If the added functionality requires some cleanup or structural changes, split
+ your patch into a cleanup part and the actual addition. This makes it easier
+ to review your changes, and to bisect any resulting problems.
+
+* Never mix bug fixes, cleanup, and functional enhancements in a single patch.
+
+
+3. New drivers
+--------------
+
+* Running your patch or driver file(s) through checkpatch does not mean its
+ formatting is clean. If unsure about formatting in your new driver, run it
+ through Lindent. Lindent is not perfect, and you may have to do some minor
+ cleanup, but it is a good start.
+
+* Consider adding yourself to MAINTAINERS.
+
+* Document the driver in Documentation/hwmon/<driver_name>.
+
+* Add the driver to Kconfig and Makefile in alphabetical order.
+
+* Make sure that all dependencies are listed in Kconfig. For new drivers, it
+ is most likely prudent to add a dependency on EXPERIMENTAL.
+
+* Avoid forward declarations if you can. Rearrange the code if necessary.
+
+* Avoid calculations in macros and macro-generated functions. While such macros
+ may save a line or so in the source, it obfuscates the code and makes code
+ review more difficult. It may also result in code which is more complicated
+ than necessary. Use inline functions or just regular functions instead.
+
+* If the driver has a detect function, make sure it is silent. Debug messages
+ and messages printed after a successful detection are acceptable, but it
+ must not print messages such as "Chip XXX not found/supported".
+
+ Keep in mind that the detect function will run for all drivers supporting an
+ address if a chip is detected on that address. Unnecessary messages will just
+ pollute the kernel log and not provide any value.
+
+* Provide a detect function if and only if a chip can be detected reliably.
+
+* Avoid writing to chip registers in the detect function. If you have to write,
+ only do it after you have already gathered enough data to be certain that the
+ detection is going to be successful.
+
+ Keep in mind that the chip might not be what your driver believes it is, and
+ writing to it might cause a bad misconfiguration.
+
+* Make sure there are no race conditions in the probe function. Specifically,
+ completely initialize your chip first, then create sysfs entries and register
+ with the hwmon subsystem.
+
+* Do not provide support for deprecated sysfs attributes.
+
+* Do not create non-standard attributes unless really needed. If you have to use
+ non-standard attributes, or you believe you do, discuss it on the mailing list
+ first. Either case, provide a detailed explanation why you need the
+ non-standard attribute(s).
+ Standard attributes are specified in Documentation/hwmon/sysfs-interface.
+
+* When deciding which sysfs attributes to support, look at the chip's
+ capabilities. While we do not expect your driver to support everything the
+ chip may offer, it should at least support all limits and alarms.
+
+* Last but not least, please check if a driver for your chip already exists
+ before starting to write a new driver. Especially for temperature sensors,
+ new chips are often variants of previously released chips. In some cases,
+ a presumably new chip may simply have been relabeled.
--- /dev/null
+The input protocol uses a map of types and codes to express input device values
+to userspace. This document describes the types and codes and how and when they
+may be used.
+
+A single hardware event generates multiple input events. Each input event
+contains the new value of a single data item. A special event type, EV_SYN, is
+used to separate input events into packets of input data changes occurring at
+the same moment in time. In the following, the term "event" refers to a single
+input event encompassing a type, code, and value.
+
+The input protocol is a stateful protocol. Events are emitted only when values
+of event codes have changed. However, the state is maintained within the Linux
+input subsystem; drivers do not need to maintain the state and may attempt to
+emit unchanged values without harm. Userspace may obtain the current state of
+event code values using the EVIOCG* ioctls defined in linux/input.h. The event
+reports supported by a device are also provided by sysfs in
+class/input/event*/device/capabilities/, and the properties of a device are
+provided in class/input/event*/device/properties.
+
+Types:
+==========
+Types are groupings of codes under a logical input construct. Each type has a
+set of applicable codes to be used in generating events. See the Codes section
+for details on valid codes for each type.
+
+* EV_SYN:
+ - Used as markers to separate events. Events may be separated in time or in
+ space, such as with the multitouch protocol.
+
+* EV_KEY:
+ - Used to describe state changes of keyboards, buttons, or other key-like
+ devices.
+
+* EV_REL:
+ - Used to describe relative axis value changes, e.g. moving the mouse 5 units
+ to the left.
+
+* EV_ABS:
+ - Used to describe absolute axis value changes, e.g. describing the
+ coordinates of a touch on a touchscreen.
+
+* EV_MSC:
+ - Used to describe miscellaneous input data that do not fit into other types.
+
+* EV_SW:
+ - Used to describe binary state input switches.
+
+* EV_LED:
+ - Used to turn LEDs on devices on and off.
+
+* EV_SND:
+ - Used to output sound to devices.
+
+* EV_REP:
+ - Used for autorepeating devices.
+
+* EV_FF:
+ - Used to send force feedback commands to an input device.
+
+* EV_PWR:
+ - A special type for power button and switch input.
+
+* EV_FF_STATUS:
+ - Used to receive force feedback device status.
+
+Codes:
+==========
+Codes define the precise type of event.
+
+EV_SYN:
+----------
+EV_SYN event values are undefined. Their usage is defined only by when they are
+sent in the evdev event stream.
+
+* SYN_REPORT:
+ - Used to synchronize and separate events into packets of input data changes
+ occurring at the same moment in time. For example, motion of a mouse may set
+ the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next
+ motion will emit more REL_X and REL_Y values and send another SYN_REPORT.
+
+* SYN_CONFIG:
+ - TBD
+
+* SYN_MT_REPORT:
+ - Used to synchronize and separate touch events. See the
+ multi-touch-protocol.txt document for more information.
+
+* SYN_DROPPED:
+ - Used to indicate buffer overrun in the evdev client's event queue.
+ Client should ignore all events up to and including next SYN_REPORT
+ event and query the device (using EVIOCG* ioctls) to obtain its
+ current state.
+
+EV_KEY:
+----------
+EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used
+to represent the 'A' key on a keyboard. When a key is depressed, an event with
+the key's code is emitted with value 1. When the key is released, an event is
+emitted with value 0. Some hardware send events when a key is repeated. These
+events have a value of 2. In general, KEY_<name> is used for keyboard keys, and
+BTN_<name> is used for other types of momentary switch events.
+
+A few EV_KEY codes have special meanings:
+
+* BTN_TOOL_<name>:
+ - These codes are used in conjunction with input trackpads, tablets, and
+ touchscreens. These devices may be used with fingers, pens, or other tools.
+ When an event occurs and a tool is used, the corresponding BTN_TOOL_<name>
+ code should be set to a value of 1. When the tool is no longer interacting
+ with the input device, the BTN_TOOL_<name> code should be reset to 0. All
+ trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name>
+ code when events are generated.
+
+* BTN_TOUCH:
+ BTN_TOUCH is used for touch contact. While an input tool is determined to be
+ within meaningful physical contact, the value of this property must be set
+ to 1. Meaningful physical contact may mean any contact, or it may mean
+ contact conditioned by an implementation defined property. For example, a
+ touchpad may set the value to 1 only when the touch pressure rises above a
+ certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For
+ example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the
+ pen is hovering over but not touching the tablet surface.
+
+Note: For appropriate function of the legacy mousedev emulation driver,
+BTN_TOUCH must be the first evdev code emitted in a synchronization frame.
+
+Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was
+interpreted as a touchpad by userspace, while a similar device without
+BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility
+with current userspace it is recommended to follow this distinction. In the
+future, this distinction will be deprecated and the device properties ioctl
+EVIOCGPROP, defined in linux/input.h, will be used to convey the device type.
+
+* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP:
+ - These codes denote one, two, three, and four finger interaction on a
+ trackpad or touchscreen. For example, if the user uses two fingers and moves
+ them on the touchpad in an effort to scroll content on screen,
+ BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion.
+ Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in
+ purpose. A trackpad event generated by finger touches should generate events
+ for one code from each group. At most only one of these BTN_TOOL_<name>
+ codes should have a value of 1 during any synchronization frame.
+
+Note: Historically some drivers emitted multiple of the finger count codes with
+a value of 1 in the same synchronization frame. This usage is deprecated.
+
+Note: In multitouch drivers, the input_mt_report_finger_count() function should
+be used to emit these codes. Please see multi-touch-protocol.txt for details.
+
+EV_REL:
+----------
+EV_REL events describe relative changes in a property. For example, a mouse may
+move to the left by a certain number of units, but its absolute position in
+space is unknown. If the absolute position is known, EV_ABS codes should be used
+instead of EV_REL codes.
+
+A few EV_REL codes have special meanings:
+
+* REL_WHEEL, REL_HWHEEL:
+ - These codes are used for vertical and horizontal scroll wheels,
+ respectively.
+
+EV_ABS:
+----------
+EV_ABS events describe absolute changes in a property. For example, a touchpad
+may emit coordinates for a touch location.
+
+A few EV_ABS codes have special meanings:
+
+* ABS_DISTANCE:
+ - Used to describe the distance of a tool from an interaction surface. This
+ event should only be emitted while the tool is hovering, meaning in close
+ proximity of the device and while the value of the BTN_TOUCH code is 0. If
+ the input device may be used freely in three dimensions, consider ABS_Z
+ instead.
+
+* ABS_MT_<name>:
+ - Used to describe multitouch input events. Please see
+ multi-touch-protocol.txt for details.
+
+EV_SW:
+----------
+EV_SW events describe stateful binary switches. For example, the SW_LID code is
+used to denote when a laptop lid is closed.
+
+Upon binding to a device or resuming from suspend, a driver must report
+the current switch state. This ensures that the device, kernel, and userspace
+state is in sync.
+
+Upon resume, if the switch state is the same as before suspend, then the input
+subsystem will filter out the duplicate switch state reports. The driver does
+not need to keep the state of the switch at any time.
+
+EV_MSC:
+----------
+EV_MSC events are used for input and output events that do not fall under other
+categories.
+
+EV_LED:
+----------
+EV_LED events are used for input and output to set and query the state of
+various LEDs on devices.
+
+EV_REP:
+----------
+EV_REP events are used for specifying autorepeating events.
+
+EV_SND:
+----------
+EV_SND events are used for sending sound commands to simple sound output
+devices.
+
+EV_FF:
+----------
+EV_FF events are used to initialize a force feedback capable device and to cause
+such device to feedback.
+
+EV_PWR:
+----------
+EV_PWR events are a special type of event used specifically for power
+mangement. Its usage is not well defined. To be addressed later.
+
+Guidelines:
+==========
+The guidelines below ensure proper single-touch and multi-finger functionality.
+For multi-touch functionality, see the multi-touch-protocol.txt document for
+more information.
+
+Mice:
+----------
+REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report
+the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report
+further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report
+scroll wheel events where available.
+
+Touchscreens:
+----------
+ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be
+used to report when a touch is active on the screen.
+BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
+contact. BTN_TOOL_<name> events should be reported where possible.
+
+Trackpads:
+----------
+Legacy trackpads that only provide relative position information must report
+events like mice described above.
+
+Trackpads that provide absolute touch position must report ABS_{X,Y} for the
+location of the touch. BTN_TOUCH should be used to report when a touch is active
+on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
+be used to report the number of touches active on the trackpad.
+
+Tablets:
+----------
+BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
+the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH
+should be used to report when the tool is in contact with the tablet.
+BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any
+button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
+BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
+meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
+purpose on the device.
within the array where IO will be blocked. This is currently
only supported for raid4/5/6.
+ sync_min
+ sync_max
+ The two values, given as numbers of sectors, indicate a range
+ withing the array where 'check'/'repair' will operate. Must be
+ a multiple of chunk_size. When it reaches "sync_max" it will
+ pause, rather than complete.
+ You can use 'select' or 'poll' on "sync_completed" to wait for
+ that number to reach sync_max. Then you can either increase
+ "sync_max", or can write 'idle' to "sync_action".
+
Each active md device may also have attributes specific to the
personality module that manages it.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
-name='Music Playback Volume',index=0
+name='Synth Playback Volume',index=0
This control is used to attenuate samples for left and right MIDI FX-bus
accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples.
The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
-name='Music Capture Volume',index=0
-name='Music Capture Switch',index=0
+name='Synth Capture Volume',index=0
+name='Synth Capture Switch',index=0
These controls are used to attenuate samples for left and right MIDI FX-bus
accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
+M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
F: include/media/*7146*
SAMSUNG AUDIO (ASoC) DRIVERS
-M: Jassi Brar <jassi.brar@samsung.com>
+M: Jassi Brar <jassisinghbrar@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/samsung
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 39
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
#define __ASM_ARM_CPUTYPE_H
#include <linux/stringify.h>
+#include <linux/kernel.h>
#define CPUID_ID 0
#define CPUID_CACHETYPE 1
#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
+#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370)
+#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
+#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
+#define __NR_syncfs (__NR_SYSCALL_BASE+373)
/*
* The following SWIs are ARM private.
CALL(sys_fanotify_init)
CALL(sys_fanotify_mark)
CALL(sys_prlimit64)
+/* 370 */ CALL(sys_name_to_handle_at)
+ CALL(sys_open_by_handle_at)
+ CALL(sys_clock_adjtime)
+ CALL(sys_syncfs)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
static void __init qsd8x50_init_mmc(void)
{
- if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa())
- vreg_mmc = vreg_get(NULL, "gp6");
- else
- vreg_mmc = vreg_get(NULL, "gp5");
+ vreg_mmc = vreg_get(NULL, "gp5");
if (IS_ERR(vreg_mmc)) {
pr_err("vreg get for vreg_mmc failed (%ld)\n",
/* Use existing clock_event for cpu 0 */
if (!smp_processor_id())
- return;
+ return 0;
writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
.num_resources = 0,
};
+static struct platform_device gta02_dfbmcs320_device = {
+ .name = "dfbmcs320",
+};
+
static struct i2c_board_info gta02_i2c_devs[] __initdata = {
{
I2C_BOARD_INFO("pcf50633", 0x73),
&s3c_device_iis,
&samsung_asoc_dma,
&s3c_device_i2c0,
+ >a02_dfbmcs320_device,
>a02_buttons_device,
&s3c_device_adc,
&s3c_device_ts,
config ARCH_SUSPEND_POSSIBLE
def_bool y
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
- PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
+ (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
config PPC_DCR_NATIVE
bool
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL)
+#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500)
+#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+#endif
#else
enum {
CPU_FTRS_POSSIBLE =
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+ CPU_FTRS_E5500 |
#endif
0,
};
#endif /* __powerpc64__ */
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500)
+#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#endif
#else
enum {
CPU_FTRS_ALWAYS =
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+ CPU_FTRS_E5500 &
#endif
CPU_FTRS_POSSIBLE,
};
* on platforms where such control is possible.
*/
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES)
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
#else
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
.pvr_mask = 0xffff0000,
.pvr_value = 0x80240000,
.cpu_name = "e5500",
- .cpu_features = CPU_FTRS_E500MC,
+ .cpu_features = CPU_FTRS_E5500,
.cpu_user_features = COMMON_USER_BOOKE,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
}
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+#ifdef CONFIG_PPC_STD_MMU_64
static void crash_kexec_wait_realmode(int cpu)
{
unsigned int msecs;
}
mb();
}
-#else
-static inline void crash_kexec_wait_realmode(int cpu) {}
-#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
/*
* This function will be called by secondary cpus or by kexec cpu
crash_ipi_callback(regs);
}
-#else
+#else /* ! CONFIG_SMP */
+static inline void crash_kexec_wait_realmode(int cpu) {}
+
static void crash_kexec_prepare_cpus(int cpu)
{
/*
{
cpus_in_sr = CPU_MASK_NONE;
}
-#endif
+#endif /* CONFIG_SMP */
/*
* Register a function to be called on shutdown. Only use this if you
if (!parent)
continue;
if (of_match_node(legacy_serial_parents, parent) != NULL) {
- index = add_legacy_soc_port(np, np);
- if (index >= 0 && np == stdout)
- legacy_serial_console = index;
+ if (of_device_is_available(np)) {
+ index = add_legacy_soc_port(np, np);
+ if (index >= 0 && np == stdout)
+ legacy_serial_console = index;
+ }
}
of_node_put(parent);
}
return 0;
}
+static u64 check_and_compute_delta(u64 prev, u64 val)
+{
+ u64 delta = (val - prev) & 0xfffffffful;
+
+ /*
+ * POWER7 can roll back counter values, if the new value is smaller
+ * than the previous value it will cause the delta and the counter to
+ * have bogus values unless we rolled a counter over. If a coutner is
+ * rolled back, it will be smaller, but within 256, which is the maximum
+ * number of events to rollback at once. If we dectect a rollback
+ * return 0. This can lead to a small lack of precision in the
+ * counters.
+ */
+ if (prev > val && (prev - val) < 256)
+ delta = 0;
+
+ return delta;
+}
+
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
+ delta = check_and_compute_delta(prev, val);
+ if (!delta)
+ return;
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
- /* The counters are only 32 bits wide */
- delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
- delta = (val - prev) & 0xfffffffful;
- local64_add(delta, &event->count);
+ delta = check_and_compute_delta(prev, val);
+ if (delta)
+ local64_add(delta, &event->count);
}
}
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
- u64 val;
+ u64 val, prev;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
- local64_set(&event->hw.prev_count, val);
+ prev = local64_read(&event->hw.prev_count);
+ if (check_and_compute_delta(prev, val))
+ local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
- delta = (val - prev) & 0xfffffffful;
+ delta = check_and_compute_delta(prev, val);
local64_add(delta, &event->count);
/*
u64 stolen = 0;
u64 dtb;
+ if (!dtl)
+ return 0;
+
if (i == vpa->dtl_idx)
return 0;
while (i < vpa->dtl_idx) {
mpic_setup_this_cpu();
}
+#ifdef CONFIG_PPC64
#ifdef CONFIG_HOTPLUG_CPU
static int smp_core99_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static void __init smp_core99_bringup_done(void)
{
-#ifdef CONFIG_PPC64
extern void g5_phy_disable_cpu1(void);
/* Close i2c bus if it was used for tb sync */
set_cpu_present(1, false);
g5_phy_disable_cpu1();
}
-#endif /* CONFIG_PPC64 */
-
#ifdef CONFIG_HOTPLUG_CPU
register_cpu_notifier(&smp_core99_cpu_nb);
#endif
+
if (ppc_md.progress)
ppc_md.progress("smp_core99_bringup_done", 0x349);
}
+#endif /* CONFIG_PPC64 */
#ifdef CONFIG_HOTPLUG_CPU
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
+#ifdef CONFIG_PPC64
.bringup_done = smp_core99_bringup_done,
+#endif
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_core99_give_timebase,
int cpu, ret;
struct paca_struct *pp;
struct dtl_entry *dtl;
+ struct kmem_cache *dtl_cache;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
+ dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+ DISPATCH_LOG_BYTES, 0, NULL);
+ if (!dtl_cache) {
+ pr_warn("Failed to create dispatch trace log buffer cache\n");
+ pr_warn("Stolen time statistics will be unreliable\n");
+ return 0;
+ }
+
for_each_possible_cpu(cpu) {
pp = &paca[cpu];
- dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
- cpu_to_node(cpu));
+ dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
if (!dtl) {
pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
cpu);
struct resource rsrc;
const int *bus_range;
+ if (!of_device_is_available(dev)) {
+ pr_warning("%s: disabled\n", dev->full_name);
+ return -ENODEV;
+ }
+
pr_debug("Adding PCI host bridge %s\n", dev->full_name);
/* Fetch host bridge registers address */
* Don't enable translation but enable GART IO and CPU accesses.
* Also, set DISTLBWALKPRB since GART tables memory is UC.
*/
- ctl = DISTLBWALKPRB | order << 1;
+ ctl = order << 1;
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
{
u32 tmp, ctl;
- /* address of the mappings table */
- addr >>= 12;
- tmp = (u32) addr<<4;
- tmp &= ~0xf;
- pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
-
- /* Enable GART translation for this hammer. */
- pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
- ctl |= GARTEN;
- ctl &= ~(DISGARTCPU | DISGARTIO);
- pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+ /* address of the mappings table */
+ addr >>= 12;
+ tmp = (u32) addr<<4;
+ tmp &= ~0xf;
+ pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
+
+ /* Enable GART translation for this hammer. */
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
+ ctl |= GARTEN | DISTLBWALKPRB;
+ ctl &= ~(DISGARTCPU | DISGARTIO);
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
+void debug_cpumask_set_cpu(int cpu, int node, bool enable);
#endif
#endif /* _ASM_X86_NUMA_H */
* Don't enable translation yet but enable GART IO and CPU
* accesses and set DISTLBWALKPRB since GART table memory is UC.
*/
- u32 ctl = DISTLBWALKPRB | aper_order << 1;
+ u32 ctl = aper_order << 1;
bus = amd_nb_bus_dev_ranges[i].bus;
dev_base = amd_nb_bus_dev_ranges[i].dev_base;
return -EOPNOTSUPP;
}
+ /*
+ * Do not allow config1 (extended registers) to propagate,
+ * there's no sane user-space generalization yet:
+ */
if (attr->type == PERF_TYPE_RAW)
- return x86_pmu_extra_regs(event->attr.config, event);
+ return 0;
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
- [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
+ [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
*
* Exceptions:
*
+ * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
* 0x003 FP PERF_CTL[3]
+ * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
* 0x00B FP PERF_CTL[3]
* 0x00D FP PERF_CTL[3]
* 0x023 DE PERF_CTL[2:0]
* 0x0DF LS PERF_CTL[5:0]
* 0x1D6 EX PERF_CTL[5:0]
* 0x1D8 EX PERF_CTL[5:0]
+ *
+ * (*) depending on the umask all FPU counters may be used
*/
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
static struct event_constraint *
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
{
- unsigned int event_code = amd_get_event_code(&event->hw);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int event_code = amd_get_event_code(hwc);
switch (event_code & AMD_EVENT_TYPE_MASK) {
case AMD_EVENT_FP:
switch (event_code) {
+ case 0x000:
+ if (!(hwc->config & 0x0000F000ULL))
+ break;
+ if (!(hwc->config & 0x00000F00ULL))
+ break;
+ return &amd_f15_PMC3;
+ case 0x004:
+ if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
+ break;
+ return &amd_f15_PMC3;
case 0x003:
case 0x00B:
case 0x00D:
return &amd_f15_PMC3;
- default:
- return &amd_f15_PMC53;
}
+ return &amd_f15_PMC53;
case AMD_EVENT_LS:
case AMD_EVENT_DC:
case AMD_EVENT_EX_LS:
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
- [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
- [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
case 37: /* 32 nm nehalem, "Clarkdale" */
case 44: /* 32 nm nehalem, "Gulftown" */
+ case 47: /* 32 nm Xeon E7 */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
- p4_pmu_disable_event(event);
+ x86_pmu_stop(event, 0);
}
if (handled) {
#define AGPEXTERN
#endif
+/* GART can only remap to physical addresses < 1TB */
+#define GART_MAX_PHYS_ADDR (1ULL << 40)
+
/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;
size_t size, int dir, unsigned long align_mask)
{
unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
- unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
+ unsigned long iommu_page;
int i;
+ if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
+ return bad_dma_addr;
+
+ iommu_page = alloc_iommu(dev, npages, align_mask);
if (iommu_page == -1) {
if (!nonforced_iommu(dev, phys_mem, size))
return phys_mem;
identify_secondary_cpu(c);
}
-static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2)
-{
- int node1 = early_cpu_to_node(cpu1);
- int node2 = early_cpu_to_node(cpu2);
-
- /*
- * Our CPU scheduler assumes all logical cpus in the same physical cpu
- * share the same node. But, buggy ACPI or NUMA emulation might assign
- * them to different node. Fix it.
- */
- if (node1 != node2) {
- pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n",
- cpu1, node1, cpu2, node2, node2);
-
- numa_remove_cpu(cpu1);
- numa_set_node(cpu1, node2);
- numa_add_cpu(cpu1);
- }
-}
-
static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
{
cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
- check_cpu_siblings_on_same_node(cpu1, cpu2);
}
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
- check_cpu_siblings_on_same_node(cpu, i);
}
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpumask_set_cpu(i, cpu_core_mask(cpu));
cpumask_set_cpu(cpu, cpu_core_mask(i));
- check_cpu_siblings_on_same_node(cpu, i);
/*
* Does this new cpu bringup a new core?
*/
return per_cpu(x86_cpu_to_node_map, cpu);
}
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
+void debug_cpumask_set_cpu(int cpu, int node, bool enable)
{
- int node = early_cpu_to_node(cpu);
struct cpumask *mask;
char buf[64];
if (node == NUMA_NO_NODE) {
/* early_cpu_to_node() already emits a warning and trace */
- return NULL;
+ return;
}
mask = node_to_cpumask_map[node];
if (!mask) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
- return NULL;
+ return;
}
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+
cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
cpu, node, buf);
- return mask;
+ return;
}
# ifndef CONFIG_NUMA_EMU
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
{
- struct cpumask *mask;
-
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
-
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
+ debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
}
void __cpuinit numa_add_cpu(int cpu)
{
- numa_set_cpumask(cpu, 1);
+ numa_set_cpumask(cpu, true);
}
void __cpuinit numa_remove_cpu(int cpu)
{
- numa_set_cpumask(cpu, 0);
+ numa_set_cpumask(cpu, false);
}
# endif /* !CONFIG_NUMA_EMU */
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
}
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
{
- struct cpumask *mask;
- int nid, physnid, i;
+ int nid, physnid;
nid = early_cpu_to_node(cpu);
if (nid == NUMA_NO_NODE) {
physnid = emu_nid_to_phys[nid];
- for_each_online_node(i) {
+ for_each_online_node(nid) {
if (emu_nid_to_phys[nid] != physnid)
continue;
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
-
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
+ debug_cpumask_set_cpu(cpu, nid, enable);
}
}
void __cpuinit numa_add_cpu(int cpu)
{
- numa_set_cpumask(cpu, 1);
+ numa_set_cpumask(cpu, true);
}
void __cpuinit numa_remove_cpu(int cpu)
{
- numa_set_cpumask(cpu, 0);
+ numa_set_cpumask(cpu, false);
}
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
#endif
}
+#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
- unsigned long pfn = pte_pfn(pte);
-
-#ifdef CONFIG_X86_32
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
-#endif
+
+ return pte;
+}
+#else /* CONFIG_X86_64 */
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
/*
* If the new pfn is within the range of the newly allocated
return pte;
}
+#endif /* CONFIG_X86_64 */
/* Init-time set_pte while constructing initial pagetables, which
doesn't allow RO pagetable pages to be remapped RW */
memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
- xen_extra_mem_start = mem_end;
+ xen_extra_mem_start = max((1ULL << 32), mem_end);
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end;
int arch_show_interrupts(struct seq_file *p, int prec)
{
- int j;
-
- seq_printf(p, "%*s: ", prec, "NMI");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", nmi_count(j));
- seq_putc(p, '\n');
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
-static void xtensa_irq_mask(struct irq_chip *d)
+static void xtensa_irq_mask(struct irq_data *d)
{
cached_irq_mask &= ~(1 << d->irq);
set_sr (cached_irq_mask, INTENABLE);
}
-static void xtensa_irq_unmask(struct irq_chip *d)
+static void xtensa_irq_unmask(struct irq_data *d)
{
cached_irq_mask |= 1 << d->irq;
set_sr (cached_irq_mask, INTENABLE);
}
-static void xtensa_irq_enable(struct irq_chip *d)
+static void xtensa_irq_enable(struct irq_data *d)
{
variant_irq_enable(d->irq);
xtensa_irq_unmask(d->irq);
}
-static void xtensa_irq_disable(struct irq_chip *d)
+static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d->irq);
variant_irq_disable(d->irq);
}
-static void xtensa_irq_ack(struct irq_chip *d)
+static void xtensa_irq_ack(struct irq_data *d)
{
set_sr(1 << d->irq, INTCLEAR);
}
-static int xtensa_irq_retrigger(struct irq_chip *d)
+static int xtensa_irq_retrigger(struct irq_data *d)
{
set_sr (1 << d->irq, INTSET);
return 1;
q = container_of(work, struct request_queue, delay_work.work);
spin_lock_irq(q->queue_lock);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
}
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
- schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+ queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
- * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
- *
*/
-void __blk_run_queue(struct request_queue *q, bool force_kblockd)
+void __blk_run_queue(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
- /*
- * Only recurse once to avoid overrunning the stack, let the unplug
- * handling reinvoke the handler shortly if we already got there.
- */
- if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else
- queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+ q->request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
+/**
+ * blk_run_queue_async - run a single device queue in workqueue context
+ * @q: The queue to run
+ *
+ * Description:
+ * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
+ * of us.
+ */
+void blk_run_queue_async(struct request_queue *q)
+{
+ if (likely(!blk_queue_stopped(q)))
+ queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+}
+EXPORT_SYMBOL(blk_run_queue_async);
+
/**
* blk_run_queue - run a single device queue
* @q: The queue to run
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
blk_queue_end_tag(q, rq);
add_acct_request(q, rq, where);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
out_unlock:
spin_unlock_irq(q->queue_lock);
}
plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list);
+ INIT_LIST_HEAD(&plug->cb_list);
plug->should_sort = 0;
/*
*/
static void queue_unplugged(struct request_queue *q, unsigned int depth,
bool from_schedule)
+ __releases(q->queue_lock)
{
trace_block_unplug(q, depth, !from_schedule);
- __blk_run_queue(q, from_schedule);
- if (q->unplugged_fn)
- q->unplugged_fn(q);
+ /*
+ * If we are punting this to kblockd, then we can safely drop
+ * the queue_lock before waking kblockd (which needs to take
+ * this lock).
+ */
+ if (from_schedule) {
+ spin_unlock(q->queue_lock);
+ blk_run_queue_async(q);
+ } else {
+ __blk_run_queue(q);
+ spin_unlock(q->queue_lock);
+ }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
+{
+ LIST_HEAD(callbacks);
+
+ if (list_empty(&plug->cb_list))
+ return;
+
+ list_splice_init(&plug->cb_list, &callbacks);
+
+ while (!list_empty(&callbacks)) {
+ struct blk_plug_cb *cb = list_first_entry(&callbacks,
+ struct blk_plug_cb,
+ list);
+ list_del(&cb->list);
+ cb->callback(cb);
+ }
}
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(plug->magic != PLUG_MAGIC);
+ flush_plug_callbacks(plug);
if (list_empty(&plug->list))
return;
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
- if (q) {
+ /*
+ * This drops the queue lock
+ */
+ if (q)
queue_unplugged(q, depth, from_schedule);
- spin_unlock(q->queue_lock);
- }
q = rq->q;
depth = 0;
spin_lock(q->queue_lock);
depth++;
}
- if (q) {
+ /*
+ * This drops the queue lock
+ */
+ if (q)
queue_unplugged(q, depth, from_schedule);
- spin_unlock(q->queue_lock);
- }
local_irq_restore(flags);
}
-EXPORT_SYMBOL(blk_flush_plug_list);
void blk_finish_plug(struct blk_plug *plug)
{
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
/* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q);
* request_fn may confuse the driver. Always use kblockd.
*/
if (queued)
- __blk_run_queue(q, true);
+ blk_run_queue_async(q);
}
/**
* the comment in flush_end_io().
*/
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
- __blk_run_queue(q, true);
+ blk_run_queue_async(q);
}
/**
}
EXPORT_SYMBOL_GPL(blk_queue_flush);
-/**
- * blk_queue_unplugged - register a callback for an unplug event
- * @q: the request queue for the device
- * @fn: the function to call
- *
- * Some stacked drivers may need to know when IO is dispatched on an
- * unplug event. By registrering a callback here, they will be notified
- * when someone flushes their on-stack queue plug. The function will be
- * called with the queue lock held.
- */
-void blk_queue_unplugged(struct request_queue *q, unplugged_fn *fn)
-{
- q->unplugged_fn = fn;
-}
-EXPORT_SYMBOL(blk_queue_unplugged);
-
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_SYNC);
- } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+ } else {
blk_clear_queue_full(q, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_ASYNC);
- } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+ } else {
blk_clear_queue_full(q, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
return ret;
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
- if (ret < 0)
+ if (ret < 0) {
+ blk_trace_remove_sysfs(dev);
return ret;
+ }
kobject_uevent(&q->kobj, KOBJ_ADD);
}
/*
- * Must always be called with the rcu_read_lock() held
+ * Call func for each cic attached to this ioc.
*/
static void
-__call_for_each_cic(struct io_context *ioc,
- void (*func)(struct io_context *, struct cfq_io_context *))
+call_for_each_cic(struct io_context *ioc,
+ void (*func)(struct io_context *, struct cfq_io_context *))
{
struct cfq_io_context *cic;
struct hlist_node *n;
+ rcu_read_lock();
+
hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
func(ioc, cic);
-}
-/*
- * Call func for each cic attached to this ioc.
- */
-static void
-call_for_each_cic(struct io_context *ioc,
- void (*func)(struct io_context *, struct cfq_io_context *))
-{
- rcu_read_lock();
- __call_for_each_cic(ioc, func);
rcu_read_unlock();
}
* should be ok to iterate over the known list, we will see all cic's
* since no new ones are added.
*/
- __call_for_each_cic(ioc, cic_free_func);
+ call_for_each_cic(ioc, cic_free_func);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
}
}
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
q->boundary_rq = rq;
}
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
- where == ELEVATOR_INSERT_SORT)
+ (where == ELEVATOR_INSERT_SORT ||
+ where == ELEVATOR_INSERT_SORT_MERGE))
where = ELEVATOR_INSERT_BACK;
switch (where) {
* with anything. There's no point in delaying queue
* processing.
*/
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT_MERGE:
spin_unlock_irq(&ev->lock);
- /* tell userland about new events */
+ /*
+ * Tell userland about new events. Only the events listed in
+ * @disk->events are reported. Unlisted events are processed the
+ * same internally but never get reported to userland.
+ */
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & (1 << i))
+ if (events & disk->events & (1 << i))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
struct agp_memory *new;
unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+ if (INT_MAX/sizeof(struct page *) < num_agp_pages)
+ return NULL;
+
new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
if (new == NULL)
return NULL;
int scratch_pages;
struct agp_memory *new;
size_t i;
+ int cur_memory;
if (!bridge)
return NULL;
- if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
+ cur_memory = atomic_read(&bridge->current_memory_agp);
+ if ((cur_memory + page_count > bridge->max_memory_agp) ||
+ (cur_memory + page_count < page_count))
return NULL;
if (type >= AGP_USER_TYPES) {
return -EINVAL;
}
- /* AK: could wrap */
- if ((pg_start + mem->page_count) > num_entries)
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
return -EINVAL;
j = pg_start;
{
size_t i;
struct agp_bridge_data *bridge;
- int mask_type;
+ int mask_type, num_entries;
bridge = mem->bridge;
if (!bridge)
if (type != mem->type)
return -EINVAL;
+ num_entries = agp_num_entries();
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
+ return -EINVAL;
+
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0) {
/* The generic routines know nothing of memory types */
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
-#if 0
- /*
- * hvc_remove() not called as removing one hvc port
- * results in other hvc ports getting frozen.
- *
- * Once this is resolved in hvc, this functionality
- * will be enabled. Till that is done, the -EPIPE
- * return from get_chars() above will help
- * hvc_console.c to clean up on ports we remove here.
- */
hvc_remove(port->cons.hvc);
-#endif
}
/* Remove unused data this port might have received. */
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
+ err = 0;
}
return err;
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
return false;
+ }
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
return ret;
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
- */
- intel_crtc->dpms_mode = -1;
-}
-
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
-
-static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
- .gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = intel_crtc_destroy,
- .page_flip = intel_crtc_page_flip,
-};
-
static void intel_sanitize_modesetting(struct drm_device *dev,
int pipe, int plane)
{
intel_disable_pipe(dev_priv, pipe);
}
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Reset flags back to the 'unknown' status so that they
+ * will be correctly set on the initial modeset.
+ */
+ intel_crtc->dpms_mode = -1;
+
+ /* We need to fix up any BIOS configuration that conflicts with
+ * our expectations.
+ */
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .dpms = intel_crtc_dpms,
+ .mode_fixup = intel_crtc_mode_fixup,
+ .mode_set = intel_crtc_mode_set,
+ .mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .reset = intel_crtc_reset,
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
+};
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
-
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
int pipeconf_reg = PIPECONF(pipe);
- int dspcntr_reg = DSPCNTR(pipe);
+ int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = DSPADDR(pipe);
+ int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
if (type < 0)
return connector_status_disconnected;
+ intel_tv->type = type;
intel_tv_find_better_format(connector);
+
return connector_status_connected;
}
*
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
- connector->polled =
- DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
if (ret)
return ret;
/* For PFIFO and PGRAPH. */
spinlock_t context_switch_lock;
+ /* VM/PRAMIN flush, legacy PRAMIN aperture */
+ spinlock_t vm_lock;
+
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramfc;
OUT_RING (chan, 0);
}
- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+ nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
FIRE_RING(chan);
mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+ if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
ret = 0;
break;
}
dma_bits = 40;
} else
if (drm_pci_device_is_pcie(dev) &&
- dev_priv->chipset != 0x40 &&
+ dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
dma_bits = 39;
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
- uint32_t flags;
+ uint32_t flags, ttmpl;
int ret;
- if (nouveau_vram_notify)
+ if (nouveau_vram_notify) {
flags = NOUVEAU_GEM_DOMAIN_VRAM;
- else
+ ttmpl = TTM_PL_FLAG_VRAM;
+ } else {
flags = NOUVEAU_GEM_DOMAIN_GART;
+ ttmpl = TTM_PL_FLAG_TT;
+ }
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, flags);
+ ret = nouveau_bo_pin(ntfy, ttmpl);
if (ret)
goto out_err;
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
u32 val;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return val;
}
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return;
}
be->func->clear(be);
return -EFAULT;
}
+ nvbe->ttm_alloced[nvbe->nr_pages] = false;
}
nvbe->nr_pages++;
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
+ if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
aper_size = 512 * 1024 * 1024;
else
aper_size = 64 * 1024 * 1024;
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
if (drm_pci_device_is_pcie(dev) &&
- dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
align = 512 * 1024;
spin_lock_init(&dev_priv->channels.lock);
spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
+ spin_lock_init(&dev_priv->vm_lock);
/* Make the CRTCs and I2C buses accessible */
ret = engine->display.early_init(dev);
nv50_instmem_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
void
nv84_instmem_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
nv50_vm_flush_engine(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
+ unsigned long flags;
u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev);
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
nv_rd32(dev, 0x100c80), engine);
}
}
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
case ATOM_IIO_MOVE_INDEX:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((index >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
case ATOM_IIO_MOVE_DATA:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((data >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
case ATOM_IIO_MOVE_ATTR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((ctx->
io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
- if ((rdev->family == CHIP_R600) ||
- (rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV630) ||
- (rdev->family == CHIP_RV670))
+ if (rdev->family < CHIP_RV770)
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
pll->flags |= RADEON_PLL_LEGACY;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp = 0;
+ u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
* first display controller
* 0 - first half of lb (3840 * 2)
* 1 - first 3/4 of lb (5760 * 2)
- * 2 - whole lb (7680 * 2)
+ * 2 - whole lb (7680 * 2), other crtc must be disabled
* 3 - first 1/4 of lb (1920 * 2)
* second display controller
* 4 - second half of lb (3840 * 2)
* 5 - second 3/4 of lb (5760 * 2)
- * 6 - whole lb (7680 * 2)
+ * 6 - whole lb (7680 * 2), other crtc must be disabled
* 7 - last 1/4 of lb (1920 * 2)
*/
- if (mode && other_mode) {
- if (mode->hdisplay > other_mode->hdisplay) {
- if (mode->hdisplay > 2560)
- tmp = 1; /* 3/4 */
- else
- tmp = 0; /* 1/2 */
- } else if (other_mode->hdisplay > mode->hdisplay) {
- if (other_mode->hdisplay > 2560)
- tmp = 3; /* 1/4 */
- else
- tmp = 0; /* 1/2 */
- } else
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+ if (other_mode)
tmp = 0; /* 1/2 */
- } else if (mode)
- tmp = 2; /* whole */
- else if (other_mode)
- tmp = 3; /* 1/4 */
+ else
+ tmp = 2; /* whole */
+ } else
+ tmp = 0;
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
- switch (tmp) {
- case 0:
- case 4:
- default:
- if (ASIC_IS_DCE5(rdev))
- return 4096 * 2;
- else
- return 3840 * 2;
- case 1:
- case 5:
- if (ASIC_IS_DCE5(rdev))
- return 6144 * 2;
- else
- return 5760 * 2;
- case 2:
- case 6:
- if (ASIC_IS_DCE5(rdev))
- return 8192 * 2;
- else
- return 7680 * 2;
- case 3:
- case 7:
- if (ASIC_IS_DCE5(rdev))
- return 2048 * 2;
- else
- return 1920 * 2;
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ case 4:
+ default:
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
+ case 1:
+ case 5:
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
+ case 2:
+ case 6:
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
+ case 3:
+ case 7:
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
+ }
}
+
+ /* controller not enabled, so no lb used */
+ return 0;
}
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
if (!radeon_connector->router_bus)
- goto failed;
+ DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
radeon_legacy_backlight_init(radeon_encoder, connector);
}
}
- return;
-
-failed:
- drm_connector_cleanup(connector);
- kfree(connector);
}
if (!radeon_connector->router.ddc_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
if (!radeon_connector->router.cd_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
* A single status register covers multiple attributes,
* so we keep them all together.
*/
- u8 status_bits;
u8 status[PB_NUM_STATUS_REG];
u8 currpage;
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
return CDS_DRIVE_NOT_READY;
}
+/*
+ * ide-cd always generates media changed event if media is missing, which
+ * makes it impossible to use for proper event reporting, so disk->events
+ * is cleared to 0 and the following function is used only to trigger
+ * revalidation and never propagated to userland.
+ */
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
unsigned int clearing, int slot_nr)
{
return 0;
}
+ /*
+ * The following is used to force revalidation on the first open on
+ * removeable devices, and never gets reported to userland as
+ * genhd->events is 0. This is intended as removeable ide disk
+ * can't really detect MEDIA_CHANGE events.
+ */
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
};
struct evdev_client {
- int head;
- int tail;
+ unsigned int head;
+ unsigned int tail;
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
- int bufsize;
+ unsigned int bufsize;
struct input_event buffer[];
};
static void evdev_pass_event(struct evdev_client *client,
struct input_event *event)
{
- /*
- * Interrupts are disabled, just acquire the lock.
- * Make sure we don't leave with the client buffer
- * "empty" by having client->head == client->tail.
- */
+ /* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
- do {
- client->buffer[client->head++] = *event;
- client->head &= client->bufsize - 1;
- } while (client->head == client->tail);
+
+ client->buffer[client->head++] = *event;
+ client->head &= client->bufsize - 1;
+
+ if (unlikely(client->head == client->tail)) {
+ /*
+ * This effectively "drops" all unconsumed events, leaving
+ * EV_SYN/SYN_DROPPED plus the newest event in the queue.
+ */
+ client->tail = (client->head - 2) & (client->bufsize - 1);
+
+ client->buffer[client->tail].time = event->time;
+ client->buffer[client->tail].type = EV_SYN;
+ client->buffer[client->tail].code = SYN_DROPPED;
+ client->buffer[client->tail].value = 0;
+ }
+
spin_unlock(&client->buffer_lock);
if (event->type == EV_SYN)
}
EXPORT_SYMBOL(input_set_capability);
+static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
+{
+ int mt_slots;
+ int i;
+ unsigned int events;
+
+ if (dev->mtsize) {
+ mt_slots = dev->mtsize;
+ } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
+ mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
+ dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
+ clamp(mt_slots, 2, 32);
+ } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+ mt_slots = 2;
+ } else {
+ mt_slots = 0;
+ }
+
+ events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
+
+ for (i = 0; i < ABS_CNT; i++) {
+ if (test_bit(i, dev->absbit)) {
+ if (input_is_mt_axis(i))
+ events += mt_slots;
+ else
+ events++;
+ }
+ }
+
+ for (i = 0; i < REL_CNT; i++)
+ if (test_bit(i, dev->relbit))
+ events++;
+
+ return events;
+}
+
#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
do { \
if (!test_bit(EV_##type, dev->evbit)) \
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
+ if (!dev->hint_events_per_packet)
+ dev->hint_events_per_packet =
+ input_estimate_events_per_packet(dev);
+
/*
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
static int __devinit twl4030_kp_probe(struct platform_device *pdev)
{
struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
+ const struct matrix_keymap_data *keymap_data;
struct twl4030_keypad *kp;
struct input_dev *input;
u8 reg;
int error;
- if (!pdata || !pdata->rows || !pdata->cols ||
+ if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
dev_err(&pdev->dev, "Invalid platform_data\n");
return -EINVAL;
}
+ keymap_data = pdata->keymap_data;
+
kp = kzalloc(sizeof(*kp), GFP_KERNEL);
input = input_allocate_device();
if (!kp || !input) {
enum xenbus_state backend_state)
{
struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
- int val;
+ int ret, val;
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitWait:
InitWait:
+ ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-abs-pointer", "%d", &val);
+ if (ret < 0)
+ val = 0;
+ if (val) {
+ ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
+ "request-abs-pointer", "1");
+ if (ret)
+ pr_warning("xenkbd: can't request abs-pointer");
+ }
+
xenbus_switch_state(dev, XenbusStateConnected);
break;
IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) {
printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
err = -EBUSY;
- goto fail2;
+ goto fail1;
}
if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) {
printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
err = -EBUSY;
- goto fail3;
+ goto fail2;
}
serio_set_drvdata(serio, ts);
err = serio_open(serio, drv);
if (err)
- return err;
+ goto fail3;
//h3600_flite_control(1, 25); /* default brightness */
- input_register_device(ts->dev);
+ err = input_register_device(ts->dev);
+ if (err)
+ goto fail4;
return 0;
-fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
+fail4: serio_close(serio);
+fail3: serio_set_drvdata(serio, NULL);
+ free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: serio_set_drvdata(serio, NULL);
- input_free_device(input_dev);
+fail1: input_free_device(input_dev);
kfree(ts);
return err;
}
return md_raid5_congested(&rs->md, bits);
}
-static void raid_unplug(struct dm_target_callbacks *cb)
-{
- struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
-
- md_raid5_kick_device(rs->md.private);
-}
-
/*
* Construct a RAID4/5/6 mapping:
* Args:
}
rs->callbacks.congested_fn = raid_is_congested;
- rs->callbacks.unplug_fn = raid_unplug;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
return 0;
/* Support for plugging.
* This mirrors the plugging support in request_queue, but does not
- * require having a whole queue
+ * require having a whole queue or request structures.
+ * We allocate an md_plug_cb for each md device and each thread it gets
+ * plugged on. This links tot the private plug_handle structure in the
+ * personality data where we keep a count of the number of outstanding
+ * plugs so other code can see if a plug is active.
*/
-static void plugger_work(struct work_struct *work)
-{
- struct plug_handle *plug =
- container_of(work, struct plug_handle, unplug_work);
- plug->unplug_fn(plug);
-}
-static void plugger_timeout(unsigned long data)
-{
- struct plug_handle *plug = (void *)data;
- kblockd_schedule_work(NULL, &plug->unplug_work);
-}
-void plugger_init(struct plug_handle *plug,
- void (*unplug_fn)(struct plug_handle *))
-{
- plug->unplug_flag = 0;
- plug->unplug_fn = unplug_fn;
- init_timer(&plug->unplug_timer);
- plug->unplug_timer.function = plugger_timeout;
- plug->unplug_timer.data = (unsigned long)plug;
- INIT_WORK(&plug->unplug_work, plugger_work);
-}
-EXPORT_SYMBOL_GPL(plugger_init);
+struct md_plug_cb {
+ struct blk_plug_cb cb;
+ mddev_t *mddev;
+};
-void plugger_set_plug(struct plug_handle *plug)
+static void plugger_unplug(struct blk_plug_cb *cb)
{
- if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
- mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
+ struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
+ if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
+ md_wakeup_thread(mdcb->mddev->thread);
+ kfree(mdcb);
}
-EXPORT_SYMBOL_GPL(plugger_set_plug);
-int plugger_remove_plug(struct plug_handle *plug)
+/* Check that an unplug wakeup will come shortly.
+ * If not, wakeup the md thread immediately
+ */
+int mddev_check_plugged(mddev_t *mddev)
{
- if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
- del_timer(&plug->unplug_timer);
- return 1;
- } else
+ struct blk_plug *plug = current->plug;
+ struct md_plug_cb *mdcb;
+
+ if (!plug)
return 0;
-}
-EXPORT_SYMBOL_GPL(plugger_remove_plug);
+ list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
+ if (mdcb->cb.callback == plugger_unplug &&
+ mdcb->mddev == mddev) {
+ /* Already on the list, move to top */
+ if (mdcb != list_first_entry(&plug->cb_list,
+ struct md_plug_cb,
+ cb.list))
+ list_move(&mdcb->cb.list, &plug->cb_list);
+ return 1;
+ }
+ }
+ /* Not currently on the callback list */
+ mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
+ if (!mdcb)
+ return 0;
+
+ mdcb->mddev = mddev;
+ mdcb->cb.callback = plugger_unplug;
+ atomic_inc(&mddev->plug_cnt);
+ list_add(&mdcb->cb.list, &plug->cb_list);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(mddev_check_plugged);
static inline mddev_t *mddev_get(mddev_t *mddev)
{
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
+ atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ mddev->degraded = 0;
if (mddev->pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
- mddev->plug = NULL;
}
static void __md_stop_writes(mddev_t *mddev)
}
EXPORT_SYMBOL_GPL(md_allow_write);
-void md_unplug(mddev_t *mddev)
-{
- if (mddev->plug)
- mddev->plug->unplug_fn(mddev->plug);
-}
-
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
void md_do_sync(mddev_t *mddev)
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
-/* generic plugging support - like that provided with request_queue,
- * but does not require a request_queue
- */
-struct plug_handle {
- void (*unplug_fn)(struct plug_handle *);
- struct timer_list unplug_timer;
- struct work_struct unplug_work;
- unsigned long unplug_flag;
-};
-#define PLUGGED_FLAG 1
-void plugger_init(struct plug_handle *plug,
- void (*unplug_fn)(struct plug_handle *));
-void plugger_set_plug(struct plug_handle *plug);
-int plugger_remove_plug(struct plug_handle *plug);
-static inline void plugger_flush(struct plug_handle *plug)
-{
- del_timer_sync(&plug->unplug_timer);
- cancel_work_sync(&plug->unplug_work);
-}
-
/*
* MD's 'extended' device
*/
int delta_disks, new_level, new_layout;
int new_chunk_sectors;
+ atomic_t plug_cnt; /* If device is expecting
+ * more bios soon.
+ */
struct mdk_thread_s *thread; /* management thread */
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
sector_t curr_resync; /* last block scheduled */
struct list_head all_mddevs;
struct attribute_group *to_remove;
- struct plug_handle *plug; /* if used by personality */
struct bio_set *bio_set;
extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void restore_bitmap_write_access(struct file *file);
-extern void md_unplug(mddev_t *mddev);
extern void mddev_init(mddev_t *mddev);
extern int md_run(mddev_t *mddev);
mddev_t *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
mddev_t *mddev);
+extern int mddev_check_plugged(mddev_t *mddev);
#endif /* _MD_MD_H */
spin_unlock_irq(&conf->device_lock);
}
-static void md_kick_device(mddev_t *mddev)
-{
- blk_flush_plug(current);
- md_wakeup_thread(mddev->thread);
-}
-
/* Barriers....
* Sometimes we need to suspend IO while we do something else,
* either some resync/recovery, or reconfigure the array.
/* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
/* block any new IO from starting */
conf->barrier++;
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
spin_unlock_irq(&conf->resync_lock);
}
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- md_kick_device(conf->mddev));
+ );
conf->nr_waiting--;
}
conf->nr_pending++;
wait_event_lock_irq(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
- ({ flush_pending_writes(conf);
- md_kick_device(conf->mddev); }));
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(conf_t *conf)
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
mdk_rdev_t *blocked_rdev;
+ int plugged;
/*
* Register the new request and wait if the reconstruction
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
*/
+ plugged = mddev_check_plugged(mddev);
+
disks = conf->raid_disks;
retry_write:
blocked_rdev = NULL;
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync || !bitmap)
+ if (do_sync || !bitmap || !plugged)
md_wakeup_thread(mddev->thread);
return 0;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
mdk_rdev_t *rdev;
+ struct blk_plug plug;
md_check_recovery(mddev);
-
+
+ blk_start_plug(&plug);
for (;;) {
char b[BDEVNAME_SIZE];
- flush_pending_writes(conf);
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
}
cond_resched();
}
+ blk_finish_plug(&plug);
}
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
spin_unlock_irq(&conf->device_lock);
}
-static void md_kick_device(mddev_t *mddev)
-{
- blk_flush_plug(current);
- md_wakeup_thread(mddev->thread);
-}
-
/* Barriers....
* Sometimes we need to suspend IO while we do something else,
* either some resync/recovery, or reconfigure the array.
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
/* block any new IO from starting */
conf->barrier++;
- /* No wait for all pending IO to complete */
+ /* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
spin_unlock_irq(&conf->resync_lock);
}
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- md_kick_device(conf->mddev));
+ );
conf->nr_waiting--;
}
conf->nr_pending++;
wait_event_lock_irq(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
- ({ flush_pending_writes(conf);
- md_kick_device(conf->mddev); }));
+ flush_pending_writes(conf));
+
spin_unlock_irq(&conf->resync_lock);
}
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
unsigned long flags;
mdk_rdev_t *blocked_rdev;
+ int plugged;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
*/
+ plugged = mddev_check_plugged(mddev);
+
raid10_find_phys(conf, r10_bio);
retry_write:
blocked_rdev = NULL;
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync || !mddev->bitmap)
+ if (do_sync || !mddev->bitmap || !plugged)
md_wakeup_thread(mddev->thread);
-
return 0;
}
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
mdk_rdev_t *rdev;
+ struct blk_plug plug;
md_check_recovery(mddev);
+ blk_start_plug(&plug);
for (;;) {
char b[BDEVNAME_SIZE];
}
cond_resched();
}
+ blk_finish_plug(&plug);
}
*
* We group bitmap updates into batches. Each batch has a number.
* We may write out several batches at once, but that isn't very important.
- * conf->bm_write is the number of the last batch successfully written.
- * conf->bm_flush is the number of the last batch that was closed to
+ * conf->seq_write is the number of the last batch successfully written.
+ * conf->seq_flush is the number of the last batch that was closed to
* new additions.
* When we discover that we will need to write to any block in a stripe
* (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
- * the number of the batch it will be in. This is bm_flush+1.
+ * the number of the batch it will be in. This is seq_flush+1.
* When we are ready to do a write, if that batch hasn't been written yet,
* we plug the array and queue the stripe for later.
* When an unplug happens, we increment bm_flush, thus closing the current
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state)) {
+ if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
- plugger_set_plug(&conf->plug);
- } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
- sh->bm_seq - conf->seq_write > 0) {
+ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
- plugger_set_plug(&conf->plug);
- } else {
+ else {
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
}
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
- md_raid5_kick_device(conf));
+ );
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
conf->device_lock,
- blk_flush_plug(current));
+ );
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->hold_list);
}
- } else
- plugger_set_plug(&conf->plug);
+ }
}
static void activate_bit_delay(raid5_conf_t *conf)
}
}
-void md_raid5_kick_device(raid5_conf_t *conf)
-{
- blk_flush_plug(current);
- raid5_activate_delayed(conf);
- md_wakeup_thread(conf->mddev->thread);
-}
-EXPORT_SYMBOL_GPL(md_raid5_kick_device);
-
-static void raid5_unplug(struct plug_handle *plug)
-{
- raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
-
- md_raid5_kick_device(conf);
-}
-
int md_raid5_congested(mddev_t *mddev, int bits)
{
raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
+ int plugged;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+ plugged = mddev_check_plugged(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int disks, data_disks;
* add failed due to overlap. Flush everything
* and wait a while
*/
- md_raid5_kick_device(conf);
+ md_wakeup_thread(mddev->thread);
release_stripe(sh);
schedule();
goto retry;
}
}
+ if (!plugged)
+ md_wakeup_thread(mddev->thread);
+
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(bi);
spin_unlock_irq(&conf->device_lock);
struct stripe_head *sh;
raid5_conf_t *conf = mddev->private;
int handled;
+ struct blk_plug plug;
pr_debug("+++ raid5d active\n");
md_check_recovery(mddev);
+ blk_start_plug(&plug);
handled = 0;
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
- if (conf->seq_flush != conf->seq_write) {
- int seq = conf->seq_flush;
+ if (atomic_read(&mddev->plug_cnt) == 0 &&
+ !list_empty(&conf->bitmap_list)) {
+ /* Now is a good time to flush some bitmap updates */
+ conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
- conf->seq_write = seq;
+ conf->seq_write = conf->seq_flush;
activate_bit_delay(conf);
}
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ raid5_activate_delayed(conf);
while ((bio = remove_bio_from_retry(conf))) {
int ok;
spin_unlock_irq(&conf->device_lock);
async_tx_issue_pending_all();
+ blk_finish_plug(&plug);
pr_debug("--- raid5d inactive\n");
}
mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- plugger_init(&conf->plug, raid5_unplug);
- mddev->plug = &conf->plug;
if (mddev->queue) {
int chunk_size;
/* read-ahead size must cover two whole stripes, which
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
mddev->thread = NULL;
if (mddev->queue)
mddev->queue->backing_dev_info.congested_fn = NULL;
- plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
free_conf(conf);
mddev->private = NULL;
mddev->to_remove = &raid5_attrs_group;
static void *raid45_takeover_raid0(mddev_t *mddev, int level)
{
struct raid0_private_data *raid0_priv = mddev->private;
+ sector_t sectors;
/* for raid0 takeover only one zone is supported */
if (raid0_priv->nr_strip_zones > 1) {
return ERR_PTR(-EINVAL);
}
+ sectors = raid0_priv->strip_zone[0].zone_end;
+ sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
+ mddev->dev_sectors = sectors;
mddev->new_level = level;
mddev->new_layout = ALGORITHM_PARITY_N;
mddev->new_chunk_sectors = mddev->chunk_sectors;
* Cleared when a sync completes.
*/
- struct plug_handle plug;
-
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
retval = remap_pfn_range(vma, vma->vm_start,
- PFN_DOWN(virt_to_phys(mem->vaddr)),
+ mem->dma_handle >> PAGE_SHIFT,
size, vma->vm_page_prot);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
* execution context (driver/bios) must match.
*/
static bool
-bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
if (fwhdr.signature != drv_fwhdr->signature)
return false;
- if (fwhdr.exec != drv_fwhdr->exec)
+ if (swab32(fwhdr.param) != boot_env)
return false;
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
{
enum bfi_ioc_state ioc_fwstate;
bool fwvalid;
+ u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ boot_env = BFI_BOOT_LOADER_OS;
+
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
- false : bfa_ioc_fwver_valid(ioc);
+ false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
return;
}
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
}
void
*/
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
- u32 boot_param)
+ u32 boot_env)
{
u32 *fwimg;
u32 pgnum, pgoff;
/*
* Set boot type and boot param at the end.
*/
- writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ writel(boot_type, ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_TYPE_OFF)));
- writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
- + (BFI_BOOT_PARAM_OFF)));
+ writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_LOADER_OFF)));
}
static void
* as the entry vector.
*/
static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
{
void __iomem *rb;
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
- if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
} else {
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_param);
+ bfa_ioc_download_fw(ioc, boot_type, boot_env);
/**
* Enable interrupts just before starting LPU
bool msix);
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+ bool (*ioc_sync_start) (struct bfa_ioc *ioc);
void (*ioc_sync_join) (struct bfa_ioc *ioc);
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
bfa_nw_ioc_hw_sem_release(ioc);
}
+/**
+ * Synchronized IOC failure processing routines
+ */
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
/**
* Synchronized IOC failure processing routines
*/
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
#define BFI_BOOT_TYPE_OFF 8
-#define BFI_BOOT_PARAM_OFF 12
+#define BFI_BOOT_LOADER_OFF 12
-#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
+#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
+#define BFI_BOOT_LOADER_OS 0
+
#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
- rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OPER, SPEED_1000);
+ LED_MODE_ON, SPEED_1000);
else
bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OFF, 0);
+ LED_MODE_FRONT_PANEL_OFF, 0);
msleep_interruptible(500);
if (signal_pending(current))
break;
}
- if (bp->link_vars.link_up)
- bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
- bp->link_vars.line_speed);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER, bp->link_vars.line_speed);
return 0;
}
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
- tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
+ tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
_unlock_tx_hashtbl(bond);
*/
rlb_choose_channel(skb, bond);
- /* The ARP relpy packets must be delayed so that
+ /* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
*/
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
*
* If the permanent hw address of @slave is @bond's hw address, we need to
* find a different hw address to give @slave, that isn't in use by any other
- * slave in the bond. This address must be, of course, one of the premanent
+ * slave in the bond. This address must be, of course, one of the permanent
* addresses of the other slaves.
*
* We go over the slave list, and for each slave there we compare its
* gave this entry index.
*/
u32 tx_bytes; /* Each Client accumulates the BytesTx that
- * were tranmitted to it, and after each
+ * were transmitted to it, and after each
* CallBack the LoadHistory is divided
* by the balance interval
*/
};
struct alb_bond_info {
- struct timer_list alb_timer;
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
spinlock_t tx_hashtbl_lock;
u32 unbalanced_load;
struct slave *next_rx_slave;/* next slave to be assigned
* to a new rx client for
*/
- u32 rlb_interval_counter;
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time
if (!ofdev->dev.of_match)
return -EINVAL;
- data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
+ data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
base = of_iomap(np, 0);
if (!base) {
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
- | NETIF_F_NETNS_LOCAL;
+ | NETIF_F_NETNS_LOCAL
+ | NETIF_F_VLAN_CHALLENGED;
dev->ethtool_ops = &loopback_ethtool_ops;
dev->header_ops = ð_header_ops;
dev->netdev_ops = &loopback_ops;
prev_eedata = eedata;
}
+ /* Store MAC Address in perm_addr */
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
dev->base_addr = (unsigned long __force) ioaddr;
dev->irq = irq;
#define MAX_NUM_CARDS 4
-#define MAX_BUFFERS_PER_CMD 32
+#define NETXEN_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
*/
struct netxen_cmd_buffer {
struct sk_buff *skb;
- struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
int i, k;
+ int delta = 0;
+ struct skb_frag_struct *frag;
u32 producer;
int frag_count, no_of_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ delta += frag->size;
+ }
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
struct ethhdr *phdr;
+ int delta = 0;
int i, k;
u32 producer;
}
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_shinfo(skb)->frags[i].size;
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
- * Since we are touching interrupts the caller should hold the suspend lock
+ * This is for use only during a loopback self-test. It must not
+ * deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
+ BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
+ if (efx_dev_registered(efx) && !efx->port_inhibited)
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
+ rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
- return ((efx_qword_t *) (channel->eventq.addr)) + index;
+ return ((efx_qword_t *) (channel->eventq.addr)) +
+ (index & channel->eventq_mask);
}
/* See if an event is present
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
channel->channel);
}
code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
if (code == EFX_CHANNEL_MAGIC_TEST(channel))
- ++channel->magic_count;
+ ; /* ignore */
else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
return spent;
}
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+ unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
- struct efx_channel *channel;
-
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
efx->last_irq_cpu = -1;
smp_wmb();
- /* ACK each interrupting event queue. Receiving an interrupt due to
- * traffic before a test event is raised is considered a pass */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- if (efx->last_irq_cpu >= 0)
- goto success;
- }
-
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
- unsigned int magic_count, count;
+ unsigned int read_ptr, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
- magic_count = channel->magic_count;
+ read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1;
smp_wmb();
do {
schedule_timeout_uninterruptible(HZ / 100);
- if (channel->work_pending)
- efx_process_channel_now(channel);
-
- if (channel->magic_count != magic_count)
+ if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
goto eventq_ok;
} while (++count < 2);
}
/* Check to see if event was received even if interrupt wasn't */
- efx_process_channel_now(channel);
- if (channel->magic_count != magic_count) {
+ if (efx_nic_event_present(channel)) {
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
+ netif_tx_wake_all_queues(efx->net_dev);
+
return rc_test;
}
* queue state. */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
- likely(efx->port_enabled)) {
+ likely(efx->port_enabled) &&
+ likely(!efx->port_inhibited)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
- * MAC address is read from read_eeprom() into @net_dev->dev_addr.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
return 1;
}
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
outb(0x09 + i, 0x70);
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
- * @net_dev->dev_addr.
+ * @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
}
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
/* enable packet filtering */
outl(rfcrSave | RFEN, rfcr + ioaddr);
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
outl(EEDONE, ee_addr);
return 1;
} else {
#undef DWMAC_DMA_DEBUG
#ifdef DWMAC_DMA_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
+#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
#else
-#define DBG(fmt, args...) do { } while (0)
+#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
#endif
/* CSR1 enables the transmit DMA to check for new descriptor */
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
- DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+ DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
#ifdef DWMAC_DMA_DEBUG
/* It displays the DMA process states (CSR5 register) */
show_tx_process_state(intr_status);
#endif
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(INFO, "transmit underflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
ret = tx_hard_error_bump_tc;
x->tx_undeflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(INFO, "transmit jabber\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
x->tx_jabber_irq++;
}
if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(INFO, "recv overflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
x->rx_overflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(INFO, "receive buffer unavailable\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
x->rx_buf_unav_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(INFO, "receive process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
x->rx_process_stopped_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(INFO, "receive watchdog\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
x->rx_watchdog_irq++;
}
if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(INFO, "transmit early interrupt\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
x->tx_early_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(INFO, "transmit process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
x->tx_process_stopped_irq++;
ret = tx_hard_error;
}
if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(INFO, "fatal bus error\n");
+ DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
x->fatal_bus_error_irq++;
ret = tx_hard_error;
}
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
- DBG(INFO, "\n\n");
+ DWMAC_LIB_DBG(KERN_INFO "\n\n");
return ret;
}
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
priv->xstats.threshold = tc;
}
- stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
}
stmmac_verify_args();
- ret = stmmac_init_phy(dev);
- if (unlikely(ret)) {
- pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
- return ret;
- }
-
- /* Request the IRQ lines */
- ret = request_irq(dev->irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
- __func__, dev->irq, ret);
- return ret;
- }
-
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
} else
priv->tm->enable = 1;
#endif
+ ret = stmmac_init_phy(dev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ goto open_error;
+ }
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
- priv->dma_tx_phy,
- priv->dma_rx_phy) < 0)) {
-
+ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
+ priv->dma_tx_phy, priv->dma_rx_phy);
+ if (ret < 0) {
pr_err("%s: DMA initialization failed\n", __func__);
- return -1;
+ goto open_error;
}
/* Copy the MAC addr into the HW */
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ goto open_error;
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_enable_mac(priv->ioaddr);
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
+
return 0;
+
+open_error:
+#ifdef CONFIG_STMMAC_TIMER
+ kfree(priv->tm);
+#endif
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ return ret;
}
/**
/*
* The NIC has told us that a packet has been downloaded onto the card, we must
* find out which packet it has done, clear the skb and information for the packet
- * then advance around the ring for all tranmitted packets
+ * then advance around the ring for all transmitted packets
*/
static void xl_dn_comp(struct net_device *dev)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
}
ret = ath9k_htc_hw_init(hif_dev->htc_handle,
- &hif_dev->udev->dev, hif_dev->device_id,
+ &interface->dev, hif_dev->device_id,
hif_dev->udev->product, id->driver_info);
if (ret) {
ret = -EINVAL;
#endif
static struct usb_driver ath9k_hif_usb_driver = {
- .name = "ath9k_hif_usb",
+ .name = KBUILD_MODNAME,
.probe = ath9k_hif_usb_probe,
.disconnect = ath9k_hif_usb_disconnect,
#ifdef CONFIG_PM
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
- if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
- ath9k_hw_abortpcurecv(ah);
- if (!ath9k_hw_stopdmarecv(ah)) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Failed to stop receive dma\n");
- bChannelChange = false;
- }
- }
-
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
#define AH_RX_TIME_QUANTUM 100 /* usec */
struct ath_common *common = ath9k_hw_common(ah);
+ u32 mac_status, last_mac_status = 0;
int i;
+ /* Enable access to the DMA observation bus */
+ REG_WRITE(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
REG_WRITE(ah, AR_CR, AR_CR_RXD);
/* Wait for rx enable bit to go low */
for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
break;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
+ if (mac_status == 0x1c0 && mac_status == last_mac_status) {
+ *reset = true;
+ break;
+ }
+
+ last_mac_status = mac_status;
+ }
+
udelay(AH_TIME_QUANTUM);
}
if (i == 0) {
ath_err(common,
- "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
AH_RX_STOP_DMA_TIMEOUT / 1000,
REG_READ(ah, AR_CR),
- REG_READ(ah, AR_DIAG_SW));
+ REG_READ(ah, AR_DIAG_SW),
+ REG_READ(ah, AR_DMADBG_7));
return false;
} else {
return true;
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
/* Interrupt Handling */
ath9k_calculate_iter_data(hw, vif, &iter_data);
- ath9k_ps_wakeup(sc);
/* Set BSSID mask. */
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
}
ath9k_hw_set_interrupts(ah, ah->imask);
- ath9k_ps_restore(sc);
/* Set up ANI */
if ((iter_data.naps + iter_data.nadhocs) > 0) {
struct ath_vif *avp = (void *)vif->drv_priv;
int ret = 0;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
switch (vif->type) {
ath9k_do_vif_add_setup(hw, vif);
out:
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return ret;
}
ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
/* See if new interface type is valid. */
if ((new_type == NL80211_IFTYPE_ADHOC) &&
ath9k_do_vif_add_setup(hw, vif);
out:
+ ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
return ret;
}
ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
sc->nvifs--;
ath9k_calculate_summary_state(hw, NULL);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
static void ath9k_enable_ps(struct ath_softc *sc)
txq = sc->tx.txq_map[queue];
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return ret;
}
int slottime;
int error;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
if (changed & BSS_CHANGED_BSSID) {
}
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- bool stopped;
+ bool stopped, reset = false;
spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
- stopped = ath9k_hw_stopdmarecv(ah);
+ stopped = ath9k_hw_stopdmarecv(ah, &reset);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
"confusing the DMA engine when we start RX up\n");
ATH_DBG_WARN_ON_ONCE(!stopped);
}
- return stopped;
+ return stopped || reset;
}
void ath_flushrecv(struct ath_softc *sc)
{APL9_WORLD, CTL_ETSI, CTL_ETSI},
{APL3_FCCA, CTL_FCC, CTL_FCC},
+ {APL7_FCCA, CTL_FCC, CTL_FCC},
{APL1_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_APLD, CTL_FCC, NO_CTL},
config IWLWIFI_LEGACY
- tristate "Intel Wireless Wifi legacy devices"
- depends on PCI && MAC80211
+ tristate
select FW_LOADER
select NEW_LEDS
select LEDS_CLASS
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
- depends on IWLWIFI_LEGACY
+ depends on PCI && MAC80211
+ select IWLWIFI_LEGACY
---help---
This option enables support for
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
- depends on IWLWIFI_LEGACY
+ depends on PCI && MAC80211
+ select IWLWIFI_LEGACY
---help---
Select to build the driver supporting the:
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
-#define IWL_DEFAULT_TX_POWER 0x0F
-
/*
* EEPROM related constants, enums, and structures.
*/
#define IWL4965_DEFAULT_TX_RETRY 15
-/* Limit range of txpower output target to be between these values */
-#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
-
/* EEPROM */
#define IWL4965_FIRST_AMPDU_QUEUE 10
struct ieee80211_channel *geo_ch;
struct ieee80211_rate *rates;
int i = 0;
+ s8 max_tx_power = 0;
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
geo_ch->flags |= ch->ht40_extension_channel;
- if (ch->max_power_avg > priv->tx_power_device_lmt)
- priv->tx_power_device_lmt = ch->max_power_avg;
+ if (ch->max_power_avg > max_tx_power)
+ max_tx_power = ch->max_power_avg;
} else {
geo_ch->flags |= IEEE80211_CHAN_DISABLED;
}
geo_ch->flags);
}
+ priv->tx_power_device_lmt = max_tx_power;
+ priv->tx_power_user_lmt = max_tx_power;
+ priv->tx_power_next = max_tx_power;
+
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
priv->cfg->sku & IWL_SKU_A) {
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
if (!priv->cfg->ops->lib->send_tx_power)
return -EOPNOTSUPP;
- if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+ /* 0 dBm mean 1 milliwatt */
+ if (tx_power < 0) {
IWL_WARN(priv,
- "Requested user TXPOWER %d below lower limit %d.\n",
- tx_power,
- IWL4965_TX_POWER_TARGET_POWER_MIN);
+ "Requested user TXPOWER %d below 1 mW.\n",
+ tx_power);
return -EINVAL;
}
flags & EEPROM_CHANNEL_RADAR))
? "" : "not ");
- /* Set the tx_power_user_lmt to the highest power
- * supported by any channel */
- if (eeprom_ch_info[ch].max_power_avg >
- priv->tx_power_user_lmt)
- priv->tx_power_user_lmt =
- eeprom_ch_info[ch].max_power_avg;
-
ch_info++;
}
}
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
- priv->tx_power_next = IWL_DEFAULT_TX_POWER;
-
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
eeprom->version);
iwl_legacy_init_scan_params(priv);
- /* Set the tx_power_user_lmt to the lowest power level
- * this value will get overwritten by channel max power avg
- * from eeprom */
- priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
- priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
-
ret = iwl_legacy_init_channel_map(priv);
if (ret) {
IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
IWL_DEVICE_5000,
+ /* at least EEPROM 0x11A has wrong info */
+ .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
+ .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
.ht_params = &iwl5000_ht_params,
};
struct mwl8k_priv {
struct ieee80211_hw *hw;
struct pci_dev *pdev;
+ int irq;
struct mwl8k_device_info *device_info;
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
+ priv->irq = -1;
wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
return -EIO;
}
+ priv->irq = priv->pdev->irq;
/* Enable TX reclaim and RX tasklets. */
tasklet_enable(&priv->poll_tx_task);
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
+ priv->irq = -1;
tasklet_disable(&priv->poll_tx_task);
tasklet_disable(&priv->poll_rx_task);
}
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- free_irq(priv->pdev->irq, hw);
+ if (priv->irq != -1) {
+ free_irq(priv->pdev->irq, hw);
+ priv->irq = -1;
+ }
/* Stop finalize join worker */
cancel_work_sync(&priv->finalize_join_worker);
struct p54_tx_info *p54info;
struct p54_hdr *hdr;
struct p54_tx_data *txhdr;
- unsigned int padding, len, extra_len;
+ unsigned int padding, len, extra_len = 0;
int i, j, ridx;
u16 hdr_flags = 0, aid = 0;
u8 rate, queue = 0, crypt_offset = 0;
const struct parport_pc_via_data *via)
{
short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
- struct resource *base_res;
u32 ite8872set;
u32 ite8872_lpt, ite8872_lpthi;
u8 ite8872_irq, type;
/* make sure which one chip */
for (i = 0; i < 5; i++) {
- base_res = request_region(inta_addr[i], 32, "it887x");
- if (base_res) {
+ if (request_region(inta_addr[i], 32, "it887x")) {
int test;
pci_write_config_dword(pdev, 0x60,
0xe5000000 | inta_addr[i]);
test = inb(inta_addr[i]);
if (test != 0xff)
break;
- release_region(inta_addr[i], 0x8);
+ release_region(inta_addr[i], 32);
}
}
if (i >= 5) {
/*
* Release the resource so that parport_pc_probe_port can get it.
*/
- release_resource(base_res);
+ release_region(inta_addr[i], 32);
if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
printk(KERN_INFO
depends on HOTPLUG
default y
-select NLS if (DMI || ACPI)
+config PCI_LABEL
+ def_bool y if (DMI || ACPI)
+ select NLS
# ACPI Related PCI FW Functions
# ACPI _DSM provided firmware instance and string name
#
-obj-$(CONFIG_ACPI) += pci-acpi.o pci-label.o
+obj-$(CONFIG_ACPI) += pci-acpi.o
# SMBIOS provided firmware instance and labels
-obj-$(CONFIG_DMI) += pci-label.o
+obj-$(CONFIG_PCI_LABEL) += pci-label.o
# Cardbus & CompactPCI use setup-bus
obj-$(CONFIG_HOTPLUG) += setup-bus.o
static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_rbtree_key;
-static void dmar_init_reserved_ranges(void)
+static int dmar_init_reserved_ranges(void)
{
struct pci_dev *pdev = NULL;
struct iova *iova;
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve IOAPIC range failed\n");
+ return -ENODEV;
+ }
/* Reserve all PCI MMIO to avoid peer-to-peer access */
for_each_pci_dev(pdev) {
iova = reserve_iova(&reserved_iova_list,
IOVA_PFN(r->start),
IOVA_PFN(r->end));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve iova failed\n");
+ return -ENODEV;
+ }
}
}
-
+ return 0;
}
static void domain_reserve_special_ranges(struct dmar_domain *domain)
ret = iommu_attach_domain(domain, iommu);
if (ret) {
- domain_exit(domain);
+ free_domain_mem(domain);
goto error;
}
return 0;
}
-int __init init_dmars(void)
+static int __init init_dmars(int force_on)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
* enable translation
*/
for_each_drhd_unit(drhd) {
- if (drhd->ignored)
+ if (drhd->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on
+ * this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(drhd->iommu);
continue;
+ }
iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
if (!domain)
return 0;
- if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
+ if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
domain_remove_one_dev_info(domain, pdev);
+ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
+ domain_exit(domain);
+ }
+
return 0;
}
if (no_iommu || dmar_disabled)
return -ENODEV;
- iommu_init_mempool();
- dmar_init_reserved_ranges();
+ if (iommu_init_mempool()) {
+ if (force_on)
+ panic("tboot: Failed to initialize iommu memory\n");
+ return -ENODEV;
+ }
+
+ if (dmar_init_reserved_ranges()) {
+ if (force_on)
+ panic("tboot: Failed to reserve iommu ranges\n");
+ return -ENODEV;
+ }
init_no_remapping_devices();
- ret = init_dmars();
+ ret = init_dmars(force_on);
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
domain->iommu_count--;
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+
+ spin_lock_irqsave(&iommu->lock, tmp_flags);
+ clear_bit(domain->id, iommu->domain_ids);
+ iommu->domains[domain->id] = NULL;
+ spin_unlock_irqrestore(&iommu->lock, tmp_flags);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
pte = dmar_domain->pgd;
if (dma_pte_present(pte)) {
- free_pgtable_page(dmar_domain->pgd);
dmar_domain->pgd = (struct dma_pte *)
phys_to_virt(dma_pte_addr(pte));
+ free_pgtable_page(pte);
}
dmar_domain->agaw--;
}
return 0;
fail2:
- free_irq(omap_rtc_timer, NULL);
+ free_irq(omap_rtc_timer, rtc);
fail1:
rtc_device_unregister(rtc);
fail0:
list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) {
- int flagset;
-
/*
* As long as shost is accepting commands and we have
* starved queues, call blk_run_queue. scsi_request_fn
continue;
}
- spin_unlock(shost->host_lock);
-
- spin_lock(sdev->request_queue->queue_lock);
- flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
- !test_bit(QUEUE_FLAG_REENTER,
- &sdev->request_queue->queue_flags);
- if (flagset)
- queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
- __blk_run_queue(sdev->request_queue, false);
- if (flagset)
- queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
- spin_unlock(sdev->request_queue->queue_lock);
-
- spin_lock(shost->host_lock);
+ blk_run_queue_async(sdev->request_queue);
}
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
- int flagset;
- unsigned long flags;
-
if (!rport->rqst_q)
return;
+ /*
+ * This get/put dance makes no sense
+ */
get_device(&rport->dev);
-
- spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
- flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
- !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
- if (flagset)
- queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
- __blk_run_queue(rport->rqst_q, false);
- if (flagset)
- queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
- spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
-
+ blk_run_queue_async(rport->rqst_q);
put_device(&rport->dev);
}
-
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
* @q: rport request queue
if ((gsm->control & ~PF) == UI)
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
- /* generate final CRC with received FCS */
- gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
+ if (gsm->encoding == 0){
+ /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
+ In this case it contain the last piece of data
+ required to generate final CRC */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
+ }
if (gsm->fcs != GOOD_FCS) {
gsm->bad_fcs++;
if (debug & 4)
static irqreturn_t imx_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
- unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS;
+ unsigned int val;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
writel(USR1_RTSD, sport->port.membase + USR1);
+ val = readl(sport->port.membase + USR1) & USR1_RTSS;
uart_handle_cts_change(&sport->port, !!val);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
static void virtio_pci_release_dev(struct device *_d)
{
- struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
+ struct virtio_device *dev = container_of(_d, struct virtio_device,
+ dev);
struct virtio_pci_device *vp_dev = to_vp_device(dev);
- struct pci_dev *pci_dev = vp_dev->pci_dev;
- vp_del_vqs(dev);
- pci_set_drvdata(pci_dev, NULL);
- pci_iounmap(pci_dev, vp_dev->ioaddr);
- pci_release_regions(pci_dev);
- pci_disable_device(pci_dev);
kfree(vp_dev);
}
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
unregister_virtio_device(&vp_dev->vdev);
+
+ vp_del_vqs(&vp_dev->vdev);
+ pci_set_drvdata(pci_dev, NULL);
+ pci_iounmap(pci_dev, vp_dev->ioaddr);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
}
#ifdef CONFIG_PM
/* detach_buf clears data, so grab it now. */
buf = vq->data[i];
detach_buf(vq, i);
+ vq->vring.avail->idx--;
END_USE(vq);
return buf;
}
if (value) {
acl = posix_acl_from_xattr(value, size);
- if (acl == NULL) {
- value = NULL;
- size = 0;
+ if (acl) {
+ ret = posix_acl_valid(acl);
+ if (ret)
+ goto out;
} else if (IS_ERR(acl)) {
return PTR_ERR(acl);
}
}
ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
-
+out:
posix_acl_release(acl);
return ret;
*/
unsigned long reservation_progress;
- int full; /* indicates that we cannot allocate any more
+ int full:1; /* indicates that we cannot allocate any more
chunks for this space */
+ int chunk_alloc:1; /* set if we are allocating a chunk */
+
int force_alloc; /* set if we need to force a chunk alloc for
this space */
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
+void btrfs_drop_pages(struct page **pages, size_t num_pages);
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+ struct page **pages, size_t num_pages,
+ loff_t pos, size_t write_bytes,
+ struct extent_state **cached);
/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
btrfs_destroy_pinned_extent(root,
root->fs_info->pinned_extents);
- t->use_count = 0;
+ atomic_set(&t->use_count, 0);
list_del_init(&t->list);
memset(t, 0, sizeof(*t));
kmem_cache_free(btrfs_transaction_cachep, t);
#include "locking.h"
#include "free-space-cache.h"
+/* control flags for do_chunk_alloc's force field
+ * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
+ * if we really need one.
+ *
+ * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_LIMITED means to only try and allocate one
+ * if we have very few chunks already allocated. This is
+ * used as part of the clustering code to help make sure
+ * we have a good pool of storage to cluster in, without
+ * filling the FS with empty chunks
+ *
+ */
+enum {
+ CHUNK_ALLOC_NO_FORCE = 0,
+ CHUNK_ALLOC_FORCE = 1,
+ CHUNK_ALLOC_LIMITED = 2,
+};
+
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc);
found->bytes_readonly = 0;
found->bytes_may_use = 0;
found->full = 0;
- found->force_alloc = 0;
+ found->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ found->chunk_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
atomic_set(&found->caching_threads, 0);
if (!data_sinfo->full && alloc_chunk) {
u64 alloc_target;
- data_sinfo->force_alloc = 1;
+ data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
spin_unlock(&data_sinfo->lock);
alloc:
alloc_target = btrfs_get_alloc_profile(root, 1);
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
bytes + 2 * 1024 * 1024,
- alloc_target, 0);
+ alloc_target,
+ CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
if (ret < 0) {
if (ret != -ENOSPC)
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
- found->force_alloc = 1;
+ found->force_alloc = CHUNK_ALLOC_FORCE;
}
rcu_read_unlock();
}
static int should_alloc_chunk(struct btrfs_root *root,
- struct btrfs_space_info *sinfo, u64 alloc_bytes)
+ struct btrfs_space_info *sinfo, u64 alloc_bytes,
+ int force)
{
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
+ u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
u64 thresh;
- if (sinfo->bytes_used + sinfo->bytes_reserved +
- alloc_bytes + 256 * 1024 * 1024 < num_bytes)
+ if (force == CHUNK_ALLOC_FORCE)
+ return 1;
+
+ /*
+ * in limited mode, we want to have some free space up to
+ * about 1% of the FS size.
+ */
+ if (force == CHUNK_ALLOC_LIMITED) {
+ thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+ thresh = max_t(u64, 64 * 1024 * 1024,
+ div_factor_fine(thresh, 1));
+
+ if (num_bytes - num_allocated < thresh)
+ return 1;
+ }
+
+ /*
+ * we have two similar checks here, one based on percentage
+ * and once based on a hard number of 256MB. The idea
+ * is that if we have a good amount of free
+ * room, don't allocate a chunk. A good mount is
+ * less than 80% utilized of the chunks we have allocated,
+ * or more than 256MB free
+ */
+ if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
return 0;
- if (sinfo->bytes_used + sinfo->bytes_reserved +
- alloc_bytes < div_factor(num_bytes, 8))
+ if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
return 0;
thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+
+ /* 256MB or 5% of the FS */
thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
return 0;
-
return 1;
}
{
struct btrfs_space_info *space_info;
struct btrfs_fs_info *fs_info = extent_root->fs_info;
+ int wait_for_alloc = 0;
int ret = 0;
- mutex_lock(&fs_info->chunk_mutex);
-
flags = btrfs_reduce_alloc_profile(extent_root, flags);
space_info = __find_space_info(extent_root->fs_info, flags);
}
BUG_ON(!space_info);
+again:
spin_lock(&space_info->lock);
if (space_info->force_alloc)
- force = 1;
+ force = space_info->force_alloc;
if (space_info->full) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
}
- if (!force && !should_alloc_chunk(extent_root, space_info,
- alloc_bytes)) {
+ if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
+ } else if (space_info->chunk_alloc) {
+ wait_for_alloc = 1;
+ } else {
+ space_info->chunk_alloc = 1;
}
+
spin_unlock(&space_info->lock);
+ mutex_lock(&fs_info->chunk_mutex);
+
+ /*
+ * The chunk_mutex is held throughout the entirety of a chunk
+ * allocation, so once we've acquired the chunk_mutex we know that the
+ * other guy is done and we need to recheck and see if we should
+ * allocate.
+ */
+ if (wait_for_alloc) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ wait_for_alloc = 0;
+ goto again;
+ }
+
/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
space_info->full = 1;
else
ret = 1;
- space_info->force_alloc = 0;
+
+ space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
-out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}
if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
- 2 * 1024 * 1024, data, 1);
+ 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_LIMITED);
allowed_chunk_alloc = 0;
done_chunk_alloc = 1;
- } else if (!done_chunk_alloc) {
- space_info->force_alloc = 1;
+ } else if (!done_chunk_alloc &&
+ space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
+ space_info->force_alloc = CHUNK_ALLOC_LIMITED;
}
if (loop < LOOP_NO_EMPTY_SIZE) {
*/
if (empty_size || root->ref_cows)
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes + 2 * 1024 * 1024, data, 0);
+ num_bytes + 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_NO_FORCE);
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
num_bytes = num_bytes & ~(root->sectorsize - 1);
num_bytes = max(num_bytes, min_alloc_size);
do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes, data, 1);
+ num_bytes, data, CHUNK_ALLOC_FORCE);
goto again;
}
if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
alloc_flags = update_block_group_flags(root, cache->flags);
if (alloc_flags != cache->flags)
- do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
ret = set_block_group_ro(cache);
if (!ret)
goto out;
alloc_flags = get_alloc_profile(root, cache->space_info->flags);
- ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = set_block_group_ro(cache);
struct btrfs_root *root, u64 type)
{
u64 alloc_flags = get_alloc_profile(root, type);
- return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
}
/*
}
}
+static void uncache_state(struct extent_state **cached_ptr)
+{
+ if (cached_ptr && (*cached_ptr)) {
+ struct extent_state *state = *cached_ptr;
+ *cached_ptr = NULL;
+ free_extent_state(state);
+ }
+}
+
/*
* set some bits on a range in the tree. This may require allocations or
* sleeping, so the gfp mask is used to indicate what is allowed.
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+ struct extent_state **cached_state, gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
- NULL, mask);
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+ NULL, cached_state, mask);
}
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
mask);
}
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
mask);
do {
struct page *page = bvec->bv_page;
+ struct extent_state *cached = NULL;
+ struct extent_state *state;
+
tree = &BTRFS_I(page->mapping->host)->io_tree;
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
if (++bvec <= bvec_end)
prefetchw(&bvec->bv_page->flags);
+ spin_lock(&tree->lock);
+ state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
+ if (state && state->start == start) {
+ /*
+ * take a reference on the state, unlock will drop
+ * the ref
+ */
+ cache_state(state, &cached);
+ }
+ spin_unlock(&tree->lock);
+
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
- NULL);
+ state);
if (ret)
uptodate = 0;
}
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
uptodate = 0;
+ uncache_state(&cached);
continue;
}
}
if (uptodate) {
- set_extent_uptodate(tree, start, end,
+ set_extent_uptodate(tree, start, end, &cached,
GFP_ATOMIC);
}
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
if (whole_page) {
if (uptodate) {
do {
struct page *page = bvec->bv_page;
+ struct extent_state *cached = NULL;
tree = &BTRFS_I(page->mapping->host)->io_tree;
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
prefetchw(&bvec->bv_page->flags);
if (uptodate) {
- set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+ set_extent_uptodate(tree, start, end, &cached,
+ GFP_ATOMIC);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
} while (bvec >= bio->bi_io_vec);
while (cur <= end) {
if (cur >= last_byte) {
char *userpage;
+ struct extent_state *cached = NULL;
+
iosize = PAGE_CACHE_SIZE - page_offset;
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, page_offset, cur,
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
char *userpage;
+ struct extent_state *cached = NULL;
+
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
cur = cur + iosize;
page_offset += iosize;
continue;
iocount++;
block_start = block_start + iosize;
} else {
- set_extent_uptodate(tree, block_start, cur_end,
+ struct extent_state *cached = NULL;
+
+ set_extent_uptodate(tree, block_start, cur_end, &cached,
GFP_NOFS);
- unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+ unlock_extent_cached(tree, block_start, cur_end,
+ &cached, GFP_NOFS);
block_start = cur_end + 1;
}
page_offset = block_start & (PAGE_CACHE_SIZE - 1);
num_pages = num_extent_pages(eb->start, eb->len);
set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- GFP_NOFS);
+ NULL, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
kunmap_atomic(dst_kaddr, KM_USER0);
}
+static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
+{
+ unsigned long distance = (src > dst) ? src - dst : dst - src;
+ return distance < len;
+}
+
static void copy_pages(struct page *dst_page, struct page *src_page,
unsigned long dst_off, unsigned long src_off,
unsigned long len)
char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
char *src_kaddr;
- if (dst_page != src_page)
+ if (dst_page != src_page) {
src_kaddr = kmap_atomic(src_page, KM_USER1);
- else
+ } else {
src_kaddr = dst_kaddr;
+ BUG_ON(areas_overlap(src_off, dst_off, len));
+ }
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
kunmap_atomic(dst_kaddr, KM_USER0);
"len %lu len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}
- if (dst_offset < src_offset) {
+ if (!areas_overlap(src_offset, dst_offset, len)) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
int bits, int exclusive_bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask);
+ struct extent_state **cached_state, gfp_t mask);
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
/*
* unlocks pages after btrfs_file_write is done with them
*/
-static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
+void btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-static noinline int dirty_and_release_pages(struct btrfs_root *root,
- struct file *file,
- struct page **pages,
- size_t num_pages,
- loff_t pos,
- size_t write_bytes)
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+ struct page **pages, size_t num_pages,
+ loff_t pos, size_t write_bytes,
+ struct extent_state **cached)
{
int err = 0;
int i;
- struct inode *inode = fdentry(file)->d_inode;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- NULL);
+ cached);
if (err)
return err;
}
if (copied > 0) {
- ret = dirty_and_release_pages(root, file, pages,
- dirty_pages, pos,
- copied);
+ ret = btrfs_dirty_pages(root, inode, pages,
+ dirty_pages, pos, copied,
+ NULL);
if (ret) {
btrfs_delalloc_release_space(inode,
dirty_pages << PAGE_CACHE_SHIFT);
struct inode *inode;
struct rb_node *node;
struct list_head *pos, *n;
+ struct page **pages;
struct page *page;
struct extent_state *cached_state = NULL;
struct btrfs_free_cluster *cluster = NULL;
u64 start, end, len;
u64 bytes = 0;
u32 *crc, *checksums;
- pgoff_t index = 0, last_index = 0;
unsigned long first_page_offset;
- int num_checksums;
+ int index = 0, num_pages = 0;
int entries = 0;
int bitmaps = 0;
int ret = 0;
bool next_page = false;
+ bool out_of_space = false;
root = root->fs_info->tree_root;
return 0;
}
- last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+ num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
filemap_write_and_wait(inode->i_mapping);
btrfs_wait_ordered_range(inode, inode->i_size &
~(root->sectorsize - 1), (u64)-1);
/* We need a checksum per page. */
- num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
- crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
+ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
if (!crc) {
iput(inode);
return 0;
}
+ pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
+ if (!pages) {
+ kfree(crc);
+ iput(inode);
+ return 0;
+ }
+
/* Since the first page has all of our checksums and our generation we
* need to calculate the offset into the page that we can start writing
* our entries.
*/
- first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
+ first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
/* Get the cluster for this block_group if it exists */
if (!list_empty(&block_group->cluster_list))
* after find_get_page at this point. Just putting this here so people
* know and don't freak out.
*/
- while (index <= last_index) {
+ while (index < num_pages) {
page = grab_cache_page(inode->i_mapping, index);
if (!page) {
- pgoff_t i = 0;
+ int i;
- while (i < index) {
- page = find_get_page(inode->i_mapping, i);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
- i++;
+ for (i = 0; i < num_pages; i++) {
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
}
goto out_free;
}
+ pages[index] = page;
index++;
}
offset = start_offset;
}
- page = find_get_page(inode->i_mapping, index);
+ if (index >= num_pages) {
+ out_of_space = true;
+ break;
+ }
+
+ page = pages[index];
addr = kmap(page);
entry = addr + start_offset;
bytes += PAGE_CACHE_SIZE;
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
-
- /*
- * We need to release our reference we got for grab_cache_page,
- * except for the first page which will hold our checksums, we
- * do that below.
- */
- if (index != 0) {
- unlock_page(page);
- page_cache_release(page);
- }
-
- page_cache_release(page);
-
index++;
} while (node || next_page);
struct btrfs_free_space *entry =
list_entry(pos, struct btrfs_free_space, list);
- page = find_get_page(inode->i_mapping, index);
+ if (index >= num_pages) {
+ out_of_space = true;
+ break;
+ }
+ page = pages[index];
addr = kmap(page);
memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
crc++;
bytes += PAGE_CACHE_SIZE;
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
list_del_init(&entry->list);
index++;
}
+ if (out_of_space) {
+ btrfs_drop_pages(pages, num_pages);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1, &cached_state,
+ GFP_NOFS);
+ ret = 0;
+ goto out_free;
+ }
+
/* Zero out the rest of the pages just to make sure */
- while (index <= last_index) {
+ while (index < num_pages) {
void *addr;
- page = find_get_page(inode->i_mapping, index);
-
+ page = pages[index];
addr = kmap(page);
memset(addr, 0, PAGE_CACHE_SIZE);
kunmap(page);
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
bytes += PAGE_CACHE_SIZE;
index++;
}
- btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
-
/* Write the checksums and trans id to the first page */
{
void *addr;
u64 *gen;
- page = find_get_page(inode->i_mapping, 0);
+ page = pages[0];
addr = kmap(page);
- memcpy(addr, checksums, sizeof(u32) * num_checksums);
- gen = addr + (sizeof(u32) * num_checksums);
+ memcpy(addr, checksums, sizeof(u32) * num_pages);
+ gen = addr + (sizeof(u32) * num_pages);
*gen = trans->transid;
kunmap(page);
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
}
- BTRFS_I(inode)->generation = trans->transid;
+ ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
+ bytes, &cached_state);
+ btrfs_drop_pages(pages, num_pages);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+ if (ret) {
+ ret = 0;
+ goto out_free;
+ }
+
+ BTRFS_I(inode)->generation = trans->transid;
+
filemap_write_and_wait(inode->i_mapping);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
BTRFS_I(inode)->generation = 0;
}
kfree(checksums);
+ kfree(pages);
btrfs_update_inode(trans, root, inode);
iput(inode);
return ret;
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
- btrfs_ordered_update_i_size(inode, 0, ordered_extent);
- ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ if (!ret) {
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ }
+ ret = 0;
out:
if (nolock) {
if (trans)
struct btrfs_inode_item *item,
struct inode *inode)
{
+ if (!leaf->map_token)
+ map_private_extent_buffer(leaf, (unsigned long)item,
+ sizeof(struct btrfs_inode_item),
+ &leaf->map_token, &leaf->kaddr,
+ &leaf->map_start, &leaf->map_len,
+ KM_USER1);
+
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+
+ if (leaf->map_token) {
+ unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+ leaf->map_token = NULL;
+ }
}
/*
struct btrfs_key found_key;
struct btrfs_path *path;
int ret;
- u32 nritems;
struct extent_buffer *leaf;
int slot;
- int advance;
unsigned char d_type;
int over = 0;
u32 di_cur;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- advance = 0;
while (1) {
leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
slot = path->slots[0];
- if (advance || slot >= nritems) {
- if (slot >= nritems - 1) {
- ret = btrfs_next_leaf(root, path);
- if (ret)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- slot = path->slots[0];
- } else {
- slot++;
- path->slots[0]++;
- }
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto err;
+ else if (ret > 0)
+ break;
+ continue;
}
- advance = 1;
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (btrfs_key_type(&found_key) != key_type)
break;
if (found_key.offset < filp->f_pos)
- continue;
+ goto next;
filp->f_pos = found_key.offset;
di_cur += di_len;
di = (struct btrfs_dir_item *)((char *)di + di_len);
}
+next:
+ path->slots[0]++;
}
/* Reached end of directory/root. Bump pos past the last item. */
BUG_ON(!path);
inode = new_inode(root->fs_info->sb);
- if (!inode)
+ if (!inode) {
+ btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
+ }
if (dir) {
trace_btrfs_inode_request(dir);
ret = btrfs_set_inode_index(dir, index);
if (ret) {
+ btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
if (inode->i_nlink == ~0U)
return -EMLINK;
- btrfs_inc_nlink(inode);
- inode->i_ctime = CURRENT_TIME;
-
err = btrfs_set_inode_index(dir, &index);
if (err)
goto fail;
goto fail;
}
+ btrfs_inc_nlink(inode);
+ inode->i_ctime = CURRENT_TIME;
+
btrfs_set_trans_block_group(trans, dir);
ihold(inode);
btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
- extent_map_end(em) - 1, GFP_NOFS);
+ extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
} else {
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
+ struct extent_map *em,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
- struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
+ bool insert = false;
- btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+ /*
+ * Ok if the extent map we looked up is a hole and is for the exact
+ * range we want, there is no reason to allocate a new one, however if
+ * it is not right then we need to free this one and drop the cache for
+ * our range.
+ */
+ if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
+ em->len != len) {
+ free_extent_map(em);
+ em = NULL;
+ insert = true;
+ btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+ }
trans = btrfs_join_transaction(root, 0);
if (IS_ERR(trans))
goto out;
}
- em = alloc_extent_map(GFP_NOFS);
if (!em) {
- em = ERR_PTR(-ENOMEM);
- goto out;
+ em = alloc_extent_map(GFP_NOFS);
+ if (!em) {
+ em = ERR_PTR(-ENOMEM);
+ goto out;
+ }
}
em->start = start;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
+
+ /*
+ * We need to do this because if we're using the original em we searched
+ * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
+ */
+ em->flags = 0;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
- while (1) {
+ while (insert) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
* it above
*/
len = bh_result->b_size;
- free_extent_map(em);
- em = btrfs_new_extent_direct(inode, start, len);
+ em = btrfs_new_extent_direct(inode, em, start, len);
if (IS_ERR(em))
return PTR_ERR(em);
len = min(len, em->len - (start - em->start));
}
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
- btrfs_ordered_update_i_size(inode, 0, ordered);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+ if (!ret)
+ btrfs_update_inode(trans, root, inode);
+ ret = 0;
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1,
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
- u32 *csums)
+ u32 *csums, int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (ret)
goto err;
- if (write && !skip_sum) {
+ if (skip_sum)
+ goto map;
+
+ if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
inode, rw, bio, 0, 0,
file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
+ } else if (write) {
+ /*
+ * If we aren't doing async submit, calculate the csum of the
+ * bio now.
+ */
+ ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+ if (ret)
+ goto err;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
file_offset, csums);
goto err;
}
- ret = btrfs_map_bio(root, rw, bio, 0, 1);
+map:
+ ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
int nr_pages = 0;
u32 *csums = dip->csums;
int ret = 0;
+ int async_submit = 0;
int write = rw & REQ_WRITE;
- bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
- if (!bio)
- return -ENOMEM;
- bio->bi_private = dip;
- bio->bi_end_io = btrfs_end_dio_bio;
- atomic_inc(&dip->pending_bios);
-
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
return -EIO;
}
+ if (map_length >= orig_bio->bi_size) {
+ bio = orig_bio;
+ goto submit;
+ }
+
+ async_submit = 1;
+ bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
+ if (!bio)
+ return -ENOMEM;
+ bio->bi_private = dip;
+ bio->bi_end_io = btrfs_end_dio_bio;
+ atomic_inc(&dip->pending_bios);
+
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
- csums);
+ csums, async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
}
}
+submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
- csums);
+ csums, async_submit);
if (!ret)
return 0;
unsigned long nr_segs)
{
int seg;
+ int i;
size_t size;
unsigned long addr;
unsigned blocksize_mask = root->sectorsize - 1;
addr = (unsigned long)iov[seg].iov_base;
size = iov[seg].iov_len;
end += size;
- if ((addr & blocksize_mask) || (size & blocksize_mask))
+ if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
+
+ /* If this is a write we don't need to check anymore */
+ if (rw & WRITE)
+ continue;
+
+ /*
+ * Check to make sure we don't have duplicate iov_base's in this
+ * iovec, if so return EINVAL, otherwise we'll get csum errors
+ * when reading back.
+ */
+ for (i = seg + 1; i < nr_segs; i++) {
+ if (iov[seg].iov_base == iov[i].iov_base)
+ goto out;
+ }
}
retval = 0;
out:
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
struct btrfs_ioctl_space_info *dest_orig;
- struct btrfs_ioctl_space_info *user_dest;
+ struct btrfs_ioctl_space_info __user *user_dest;
struct btrfs_space_info *info;
u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
BTRFS_BLOCK_GROUP_SYSTEM,
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
- Opt_enospc_debug, Opt_err,
+ Opt_enospc_debug, Opt_subvolrootid, Opt_err,
};
static match_table_t tokens = {
{Opt_clear_cache, "clear_cache"},
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
{Opt_enospc_debug, "enospc_debug"},
+ {Opt_subvolrootid, "subvolrootid=%d"},
{Opt_err, NULL},
};
break;
case Opt_subvol:
case Opt_subvolid:
+ case Opt_subvolrootid:
case Opt_device:
/*
* These are parsed by btrfs_parse_early_options
*/
static int btrfs_parse_early_options(const char *options, fmode_t flags,
void *holder, char **subvol_name, u64 *subvol_objectid,
- struct btrfs_fs_devices **fs_devices)
+ u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *opts, *orig, *p;
*subvol_objectid = intarg;
}
break;
+ case Opt_subvolrootid:
+ intarg = 0;
+ error = match_int(&args[0], &intarg);
+ if (!error) {
+ /* we want the original fs_tree */
+ if (!intarg)
+ *subvol_rootid =
+ BTRFS_FS_TREE_OBJECTID;
+ else
+ *subvol_rootid = intarg;
+ }
+ break;
case Opt_device:
error = btrfs_scan_one_device(match_strdup(&args[0]),
flags, holder, fs_devices);
fmode_t mode = FMODE_READ;
char *subvol_name = NULL;
u64 subvol_objectid = 0;
+ u64 subvol_rootid = 0;
int error = 0;
if (!(flags & MS_RDONLY))
error = btrfs_parse_early_options(data, mode, fs_type,
&subvol_name, &subvol_objectid,
- &fs_devices);
+ &subvol_rootid, &fs_devices);
if (error)
return ERR_PTR(error);
s->s_flags |= MS_ACTIVE;
}
- root = get_default_root(s, subvol_objectid);
- if (IS_ERR(root)) {
- error = PTR_ERR(root);
- deactivate_locked_super(s);
- goto error_free_subvol_name;
- }
/* if they gave us a subvolume name bind mount into that */
if (strcmp(subvol_name, ".")) {
struct dentry *new_root;
+
+ root = get_default_root(s, subvol_rootid);
+ if (IS_ERR(root)) {
+ error = PTR_ERR(root);
+ deactivate_locked_super(s);
+ goto error_free_subvol_name;
+ }
+
mutex_lock(&root->d_inode->i_mutex);
new_root = lookup_one_len(subvol_name, root,
strlen(subvol_name));
}
dput(root);
root = new_root;
+ } else {
+ root = get_default_root(s, subvol_objectid);
+ if (IS_ERR(root)) {
+ error = PTR_ERR(root);
+ deactivate_locked_super(s);
+ goto error_free_subvol_name;
+ }
}
kfree(subvol_name);
static noinline void put_transaction(struct btrfs_transaction *transaction)
{
- WARN_ON(transaction->use_count == 0);
- transaction->use_count--;
- if (transaction->use_count == 0) {
- list_del_init(&transaction->list);
+ WARN_ON(atomic_read(&transaction->use_count) == 0);
+ if (atomic_dec_and_test(&transaction->use_count)) {
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
if (!cur_trans)
return -ENOMEM;
root->fs_info->generation++;
- cur_trans->num_writers = 1;
+ atomic_set(&cur_trans->num_writers, 1);
cur_trans->num_joined = 0;
cur_trans->transid = root->fs_info->generation;
init_waitqueue_head(&cur_trans->writer_wait);
init_waitqueue_head(&cur_trans->commit_wait);
cur_trans->in_commit = 0;
cur_trans->blocked = 0;
- cur_trans->use_count = 1;
+ atomic_set(&cur_trans->use_count, 1);
cur_trans->commit_done = 0;
cur_trans->start_time = get_seconds();
root->fs_info->running_transaction = cur_trans;
spin_unlock(&root->fs_info->new_trans_lock);
} else {
- cur_trans->num_writers++;
+ atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
}
cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) {
DEFINE_WAIT(wait);
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
while (1) {
prepare_to_wait(&root->fs_info->transaction_wait, &wait,
TASK_UNINTERRUPTIBLE);
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
+ int retries = 0;
int ret;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
}
cur_trans = root->fs_info->running_transaction;
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
if (type != TRANS_JOIN_NOLOCK)
mutex_unlock(&root->fs_info->trans_mutex);
if (num_items > 0) {
ret = btrfs_trans_reserve_metadata(h, root, num_items);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN && !retries) {
+ retries++;
btrfs_commit_transaction(h, root);
goto again;
+ } else if (ret == -EAGAIN) {
+ /*
+ * We have already retried and got EAGAIN, so really we
+ * don't have space, so set ret to -ENOSPC.
+ */
+ ret = -ENOSPC;
}
+
if (ret < 0) {
btrfs_end_transaction(h, root);
return ERR_PTR(ret);
goto out_unlock; /* nothing committing|committed */
}
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
wait_for_commit(root, cur_trans);
wake_up_process(info->transaction_kthread);
}
- if (lock)
- mutex_lock(&info->trans_mutex);
WARN_ON(cur_trans != info->running_transaction);
- WARN_ON(cur_trans->num_writers < 1);
- cur_trans->num_writers--;
+ WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
+ atomic_dec(&cur_trans->num_writers);
smp_mb();
if (waitqueue_active(&cur_trans->writer_wait))
wake_up(&cur_trans->writer_wait);
put_transaction(cur_trans);
- if (lock)
- mutex_unlock(&info->trans_mutex);
if (current->journal_info == trans)
current->journal_info = NULL;
/* take transaction reference */
mutex_lock(&root->fs_info->trans_mutex);
cur_trans = trans->transaction;
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
mutex_lock(&root->fs_info->trans_mutex);
if (cur_trans->in_commit) {
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
if (!prev_trans->commit_done) {
- prev_trans->use_count++;
+ atomic_inc(&prev_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
wait_for_commit(root, prev_trans);
TASK_UNINTERRUPTIBLE);
smp_mb();
- if (cur_trans->num_writers > 1)
+ if (atomic_read(&cur_trans->num_writers) > 1)
schedule_timeout(MAX_SCHEDULE_TIMEOUT);
else if (should_grow)
schedule_timeout(1);
mutex_lock(&root->fs_info->trans_mutex);
finish_wait(&cur_trans->writer_wait, &wait);
- } while (cur_trans->num_writers > 1 ||
+ } while (atomic_read(&cur_trans->num_writers) > 1 ||
(should_grow && cur_trans->num_joined != joined));
ret = create_pending_snapshots(trans, root->fs_info);
wake_up(&cur_trans->commit_wait);
+ list_del_init(&cur_trans->list);
put_transaction(cur_trans);
put_transaction(cur_trans);
* total writers in this transaction, it must be zero before the
* transaction can end
*/
- unsigned long num_writers;
+ atomic_t num_writers;
unsigned long num_joined;
int in_commit;
- int use_count;
+ atomic_t use_count;
int commit_done;
int blocked;
struct list_head list;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
- int ret = 0, slot, advance;
+ int ret = 0, slot;
size_t total_size = 0, size_left = size;
unsigned long name_ptr;
size_t name_len;
- u32 nritems;
/*
* ok we want all objects associated with this id.
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- advance = 0;
+
while (1) {
leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
slot = path->slots[0];
/* this is where we start walking through the path */
- if (advance || slot >= nritems) {
+ if (slot >= btrfs_header_nritems(leaf)) {
/*
* if we've reached the last slot in this leaf we need
* to go to the next leaf and reset everything
*/
- if (slot >= nritems-1) {
- ret = btrfs_next_leaf(root, path);
- if (ret)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- slot = path->slots[0];
- } else {
- /*
- * just walking through the slots on this leaf
- */
- slot++;
- path->slots[0]++;
- }
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto err;
+ else if (ret > 0)
+ break;
+ continue;
}
- advance = 1;
btrfs_item_key_to_cpu(leaf, &found_key, slot);
/* we are just looking for how big our buffer needs to be */
if (!size)
- continue;
+ goto next;
if (!buffer || (name_len + 1) > size_left) {
ret = -ERANGE;
size_left -= name_len + 1;
buffer += name_len + 1;
+next:
+ path->slots[0]++;
}
ret = total_size;
static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift __read_mostly;
-struct dcache_hash_bucket {
- struct hlist_bl_head head;
-};
-static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
+static struct hlist_bl_head *dentry_hashtable __read_mostly;
-static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
+static inline struct hlist_bl_head *d_hash(struct dentry *parent,
unsigned long hash)
{
hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
return dentry_hashtable + (hash & D_HASHMASK);
}
-static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
+static inline void spin_lock_bucket(struct hlist_bl_head *b)
{
- bit_spin_lock(0, (unsigned long *)&b->head.first);
+ bit_spin_lock(0, (unsigned long *)&b->first);
}
-static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
+static inline void spin_unlock_bucket(struct hlist_bl_head *b)
{
- __bit_spin_unlock(0, (unsigned long *)&b->head.first);
+ __bit_spin_unlock(0, (unsigned long *)&b->first);
}
/* Statistics gathering. */
void __d_drop(struct dentry *dentry)
{
if (!(dentry->d_flags & DCACHE_UNHASHED)) {
+ struct hlist_bl_head *b;
if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
- bit_spin_lock(0,
- (unsigned long *)&dentry->d_sb->s_anon.first);
+ b = &dentry->d_sb->s_anon;
+ spin_lock_bucket(b);
dentry->d_flags |= DCACHE_UNHASHED;
hlist_bl_del_init(&dentry->d_hash);
- __bit_spin_unlock(0,
- (unsigned long *)&dentry->d_sb->s_anon.first);
+ spin_unlock_bucket(b);
} else {
- struct dcache_hash_bucket *b;
+ struct hlist_bl_head *b;
b = d_hash(dentry->d_parent, dentry->d_name.hash);
spin_lock_bucket(b);
/*
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
- struct dcache_hash_bucket *b = d_hash(parent, hash);
+ struct hlist_bl_head *b = d_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *dentry;
*
* See Documentation/filesystems/path-lookup.txt for more details.
*/
- hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
struct inode *i;
const char *tname;
int tlen;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
- struct dcache_hash_bucket *b = d_hash(parent, hash);
+ struct hlist_bl_head *b = d_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *found = NULL;
struct dentry *dentry;
*/
rcu_read_lock();
- hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
const char *tname;
int tlen;
}
EXPORT_SYMBOL(d_delete);
-static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
+static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
{
BUG_ON(!d_unhashed(entry));
spin_lock_bucket(b);
entry->d_flags &= ~DCACHE_UNHASHED;
- hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
+ hlist_bl_add_head_rcu(&entry->d_hash, b);
spin_unlock_bucket(b);
}
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
- sizeof(struct dcache_hash_bucket),
+ sizeof(struct hlist_bl_head),
dhash_entries,
13,
HASH_EARLY,
0);
for (loop = 0; loop < (1 << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
static void __init dcache_init(void)
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
- sizeof(struct dcache_hash_bucket),
+ sizeof(struct hlist_bl_head),
dhash_entries,
13,
0,
0);
for (loop = 0; loop < (1 << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
/* SLAB cache for __getname() consumers */
}
brelse(dibh);
- gfs2_trans_end(sdp);
failed:
+ gfs2_trans_end(sdp);
if (al) {
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
- be64_to_cpu(dent->de_inum.no_formal_ino));
+ be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
return inode;
}
return generic_file_aio_write(iocb, iov, nr_segs, pos);
}
-static void empty_write_end(struct page *page, unsigned from,
- unsigned to)
+static int empty_write_end(struct page *page, unsigned from,
+ unsigned to, int mode)
{
- struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+ struct inode *inode = page->mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct buffer_head *bh;
+ unsigned offset, blksize = 1 << inode->i_blkbits;
+ pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
zero_user(page, from, to-from);
mark_page_accessed(page);
- if (!gfs2_is_writeback(ip))
- gfs2_page_add_databufs(ip, page, from, to);
+ if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
+ if (!gfs2_is_writeback(ip))
+ gfs2_page_add_databufs(ip, page, from, to);
+
+ block_commit_write(page, from, to);
+ return 0;
+ }
+
+ offset = 0;
+ bh = page_buffers(page);
+ while (offset < to) {
+ if (offset >= from) {
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ clear_buffer_new(bh);
+ write_dirty_buffer(bh, WRITE);
+ }
+ offset += blksize;
+ bh = bh->b_this_page;
+ }
- block_commit_write(page, from, to);
+ offset = 0;
+ bh = page_buffers(page);
+ while (offset < to) {
+ if (offset >= from) {
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh))
+ return -EIO;
+ }
+ offset += blksize;
+ bh = bh->b_this_page;
+ }
+ return 0;
}
static int needs_empty_write(sector_t block, struct inode *inode)
return !buffer_mapped(&bh_map);
}
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
+ int mode)
{
struct inode *inode = page->mapping->host;
unsigned start, end, next, blksize;
gfs2_block_map);
if (unlikely(ret))
return ret;
- empty_write_end(page, start, end);
+ ret = empty_write_end(page, start, end, mode);
+ if (unlikely(ret))
+ return ret;
end = 0;
}
start = next;
ret = __block_write_begin(page, start, end - start, gfs2_block_map);
if (unlikely(ret))
return ret;
- empty_write_end(page, start, end);
+ ret = empty_write_end(page, start, end, mode);
+ if (unlikely(ret))
+ return ret;
}
return 0;
if (curr == end)
to = end_offset;
- error = write_empty_blocks(page, from, to);
+ error = write_empty_blocks(page, from, to, mode);
if (!error && offset + to > inode->i_size &&
!(mode & FALLOC_FL_KEEP_SIZE)) {
i_size_write(inode, offset + to);
static void iopen_go_callback(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+
+ if (sdp->sd_vfs->s_flags & MS_RDONLY)
+ return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
u64 ir_length;
};
+struct gfs2_skip_data {
+ u64 no_addr;
+ int skipped;
+ int non_block;
+};
+
static int iget_test(struct inode *inode, void *opaque)
{
struct gfs2_inode *ip = GFS2_I(inode);
- u64 *no_addr = opaque;
+ struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == *no_addr)
+ if (ip->i_no_addr == data->no_addr) {
+ if (data->non_block &&
+ inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
+ data->skipped = 1;
+ return 0;
+ }
return 1;
-
+ }
return 0;
}
static int iget_set(struct inode *inode, void *opaque)
{
struct gfs2_inode *ip = GFS2_I(inode);
- u64 *no_addr = opaque;
+ struct gfs2_skip_data *data = opaque;
- inode->i_ino = (unsigned long)*no_addr;
- ip->i_no_addr = *no_addr;
+ if (data->skipped)
+ return -ENOENT;
+ inode->i_ino = (unsigned long)(data->no_addr);
+ ip->i_no_addr = data->no_addr;
return 0;
}
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
{
unsigned long hash = (unsigned long)no_addr;
- return ilookup5(sb, hash, iget_test, &no_addr);
+ struct gfs2_skip_data data;
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ data.non_block = 0;
+ return ilookup5(sb, hash, iget_test, &data);
}
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
+static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr,
+ int non_block)
{
+ struct gfs2_skip_data data;
unsigned long hash = (unsigned long)no_addr;
- return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ data.non_block = non_block;
+ return iget5_locked(sb, hash, iget_test, iget_set, &data);
}
/**
* @sb: The super block
* @no_addr: The inode number
* @type: The type of the inode
+ * non_block: Can we block on inodes that are being freed?
*
* Returns: A VFS inode, or an error
*/
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
- u64 no_addr, u64 no_formal_ino)
+ u64 no_addr, u64 no_formal_ino, int non_block)
{
struct inode *inode;
struct gfs2_inode *ip;
struct gfs2_glock *io_gl = NULL;
int error;
- inode = gfs2_iget(sb, no_addr);
+ inode = gfs2_iget(sb, no_addr, non_block);
ip = GFS2_I(inode);
if (!inode)
{
struct super_block *sb = sdp->sd_vfs;
struct gfs2_holder i_gh;
- struct inode *inode;
+ struct inode *inode = NULL;
int error;
+ /* Must not read in block until block type is verified */
error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
- LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
if (error)
return ERR_PTR(error);
if (error)
goto fail;
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1);
if (IS_ERR(inode))
goto fail;
goto fail_gunlock2;
inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
- inum.no_formal_ino);
+ inum.no_formal_ino, 0);
if (IS_ERR(inode))
goto fail_gunlock2;
}
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino);
+ u64 no_addr, u64 no_formal_ino,
+ int non_block);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
u64 *no_formal_ino,
unsigned int blktype);
struct dentry *dentry;
struct inode *inode;
- inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
if (IS_ERR(inode)) {
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
return PTR_ERR(inode);
/* rgblk_search can return a block < goal, so we need to
keep it marching forward. */
no_addr = block + rgd->rd_data0;
- goal++;
+ goal = max(block + 1, goal + 1);
if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
continue;
if (no_addr == skip)
found++;
/* Limit reclaim to sensible number of tasks */
- if (found > 2*NR_CPUS)
+ if (found > NR_CPUS)
return;
}
static void gfs2_evict_inode(struct inode *inode)
{
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
+ struct super_block *sb = inode->i_sb;
+ struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int error;
- if (inode->i_nlink)
+ if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
goto out;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ /* Must not read inode block until block type has been verified */
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
goto out;
if (error)
goto out_truncate;
+ if (test_bit(GIF_INVALID, &ip->i_flags)) {
+ error = gfs2_inode_refresh(ip);
+ if (error)
+ goto out_truncate;
+ }
+
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
if (atomic_dec_and_test(&fp->fi_delegees)) {
vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
fp->fi_lease = NULL;
+ fput(fp->fi_deleg_file);
fp->fi_deleg_file = NULL;
}
}
if (stp->st_access_bmap) {
oflag = nfs4_access_bmap_to_omode(stp);
nfs4_file_put_access(stp->st_file, oflag);
- put_nfs4_file(stp->st_file);
}
+ put_nfs4_file(stp->st_file);
kmem_cache_free(stateid_slab, stp);
}
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
goto out;
if (IS_ERR(dchild))
goto out_nfserr;
+ /* If file doesn't exist, check for permissions to create one */
+ if (!dchild->d_inode) {
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ if (err)
+ goto out;
+ }
+
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
- struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+ unsigned int nr;
+ struct task_struct *reaper;
struct tgid_iter iter;
struct pid_namespace *ns;
+ if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
+ goto out_no_task;
+ nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+
+ reaper = get_proc_task(filp->f_path.dentry->d_inode);
if (!reaper)
goto out_no_task;
handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (!handler)
return -EOPNOTSUPP;
- return handler->set(dentry, name, value, size, 0, handler->flags);
+ return handler->set(dentry, name, value, size, flags, handler->flags);
}
/*
const struct xfs_mount *mp,
struct va_format *vaf)
{
- if (mp && mp->m_fsname)
+ if (mp && mp->m_fsname) {
printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+ return;
+ }
printk("%sXFS: %pV\n", level, vaf);
}
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unplugged_fn) (struct request_queue *);
struct bio_vec;
struct bvec_merge_data {
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
- unplugged_fn *unplugged_fn;
/*
* Dispatch queue sorting
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
-#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
-#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
+#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
+#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
+extern void __blk_run_queue(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
+extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
-extern void blk_queue_unplugged(struct request_queue *, unplugged_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
struct blk_plug {
unsigned long magic;
struct list_head list;
+ struct list_head cb_list;
unsigned int should_sort;
};
+struct blk_plug_cb {
+ struct list_head list;
+ void (*callback)(struct blk_plug_cb *);
+};
extern void blk_start_plug(struct blk_plug *);
extern void blk_finish_plug(struct blk_plug *);
{
struct blk_plug *plug = tsk->plug;
- return plug && !list_empty(&plug->list);
+ return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
}
/*
struct dm_target_callbacks {
struct list_head list;
int (*congested_fn) (struct dm_target_callbacks *, int);
- void (*unplug_fn)(struct dm_target_callbacks *);
};
int dm_register_target(struct target_type *t);
#define SYN_REPORT 0
#define SYN_CONFIG 1
#define SYN_MT_REPORT 2
+#define SYN_DROPPED 3
/*
* Keys and buttons
#define KEY_DVD 0x185 /* Media Select DVD */
#define KEY_AUX 0x186
#define KEY_MP3 0x187
-#define KEY_AUDIO 0x188
-#define KEY_VIDEO 0x189
+#define KEY_AUDIO 0x188 /* AL Audio Browser */
+#define KEY_VIDEO 0x189 /* AL Movie Browser */
#define KEY_DIRECTORY 0x18a
#define KEY_LIST 0x18b
#define KEY_MEMO 0x18c /* Media Select Messages */
#define KEY_FRAMEFORWARD 0x1b5
#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */
#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */
-#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
-#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
+#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
+#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
+#define KEY_IMAGES 0x1ba /* AL Image Browser */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
}
+static inline bool input_is_mt_axis(int axis)
+{
+ return axis == ABS_MT_SLOT ||
+ (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST);
+}
+
void input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
*/
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-int next_pidmap(struct pid_namespace *pid_ns, int last);
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
extern struct pid *alloc_pid(struct pid_namespace *ns);
extern void free_pid(struct pid *pid);
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/posix-timers.h>
+#include <linux/rwsem.h>
struct posix_clock;
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
* @kref: Reference count.
- * @mutex: Protects the 'zombie' field from concurrent access.
+ * @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
* @release: A function to free the structure when the reference count reaches
* zero. May be NULL if structure is statically allocated.
struct posix_clock_operations ops;
struct cdev cdev;
struct kref kref;
- struct mutex mutex;
+ struct rw_semaphore rwsem;
bool zombie;
void (*release)(struct posix_clock *clk);
};
struct inode *new_dir, struct dentry *new_dentry);
int (*inode_readlink) (struct dentry *dentry);
int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
- int (*inode_permission) (struct inode *inode, int mask);
+ int (*inode_permission) (struct inode *inode, int mask, unsigned flags);
int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
int (*inode_setxattr) (struct dentry *dentry, const char *name,
* Indicates to usbnet, that USB driver accumulates multiple IP packets.
* Affects statistic (counters) and short packet handling.
*/
-#define FLAG_MULTI_PACKET 0x1000
-#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */
+#define FLAG_MULTI_PACKET 0x2000
+#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);
return -1;
}
-int next_pidmap(struct pid_namespace *pid_ns, int last)
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
{
int offset;
struct pidmap *map, *end;
+ if (last >= PID_MAX_LIMIT)
+ return -1;
+
offset = (last + 1) & BITS_PER_PAGE_MASK;
map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
end = &pid_ns->pidmap[PIDMAP_ENTRIES];
*/
#include <linux/device.h>
#include <linux/file.h>
-#include <linux/mutex.h>
#include <linux/posix-clock.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
{
struct posix_clock *clk = fp->private_data;
- mutex_lock(&clk->mutex);
+ down_read(&clk->rwsem);
if (!clk->zombie)
return clk;
- mutex_unlock(&clk->mutex);
+ up_read(&clk->rwsem);
return NULL;
}
static void put_posix_clock(struct posix_clock *clk)
{
- mutex_unlock(&clk->mutex);
+ up_read(&clk->rwsem);
}
static ssize_t posix_clock_read(struct file *fp, char __user *buf,
struct posix_clock *clk =
container_of(inode->i_cdev, struct posix_clock, cdev);
- mutex_lock(&clk->mutex);
+ down_read(&clk->rwsem);
if (clk->zombie) {
err = -ENODEV;
fp->private_data = clk;
}
out:
- mutex_unlock(&clk->mutex);
+ up_read(&clk->rwsem);
return err;
}
int err;
kref_init(&clk->kref);
- mutex_init(&clk->mutex);
+ init_rwsem(&clk->rwsem);
cdev_init(&clk->cdev, &posix_clock_file_operations);
clk->cdev.owner = clk->ops.owner;
err = cdev_add(&clk->cdev, devid, 1);
- if (err)
- goto no_cdev;
return err;
-no_cdev:
- mutex_destroy(&clk->mutex);
- return err;
}
EXPORT_SYMBOL_GPL(posix_clock_register);
static void delete_clock(struct kref *kref)
{
struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
- mutex_destroy(&clk->mutex);
+
if (clk->release)
clk->release(clk);
}
{
cdev_del(&clk->cdev);
- mutex_lock(&clk->mutex);
+ down_write(&clk->rwsem);
clk->zombie = true;
- mutex_unlock(&clk->mutex);
+ up_write(&clk->rwsem);
kref_put(&clk->kref, delete_clock);
}
goto drop;
}
- /* Zero out the CB buffer if no options present */
- if (iph->ihl == 5) {
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (iph->ihl == 5)
return 0;
- }
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
+
#define container_obj(layr) ((struct cfsrvl *) layr)
#define DGM_CMD_BIT 0x80
static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
+ u8 packet_type;
u32 zero = 0;
struct caif_payload_info *info;
struct cfsrvl *service = container_obj(layr);
if (cfpkt_getlen(pkt) > DGM_MTU)
return -EMSGSIZE;
- cfpkt_add_head(pkt, &zero, 4);
+ cfpkt_add_head(pkt, &zero, 3);
+ packet_type = 0x08; /* B9 set - UNCLASSIFIED */
+ cfpkt_add_head(pkt, &packet_type, 1);
/* Add info for MUX-layer to route the packet out. */
info = cfpkt_info(pkt);
int phyid)
{
struct cfmuxl *muxl = container_obj(layr);
- struct list_head *node;
+ struct list_head *node, *next;
struct cflayer *layer;
- list_for_each(node, &muxl->srvl_list) {
+ list_for_each_safe(node, next, &muxl->srvl_list) {
layer = list_entry(node, struct cflayer, node);
if (cfsrvl_phyid_match(layer, phyid))
layer->ctrlcmd(layer, ctrl, phyid);
}
/* TSO requires that SG is present as well. */
- if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
- features &= ~NETIF_F_TSO;
+ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
+ netdev_info(dev, "Dropping TSO features since no SG feature.\n");
+ features &= ~NETIF_F_ALL_TSO;
}
+ /* TSO ECN requires that TSO is present as well. */
+ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
+ features &= ~NETIF_F_TSO_ECN;
+
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
-
-ccflags-y += -Wall -DDEBUG
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
+ sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
- !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
}
/* May be called with local BH enabled. */
-static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
+static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
int do_free;
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
- struct inet_peer __rcu **stack[PEER_MAXDEPTH];
struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(&p->daddr, stack, base) != p)
BUG();
}
/* May be called with local BH enabled. */
-static int cleanup_once(unsigned long ttl)
+static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
struct inet_peer *p = NULL;
* happen because of entry limits in route cache. */
return -1;
- unlink_from_pool(p, peer_to_base(p));
+ unlink_from_pool(p, peer_to_base(p), stack);
return 0;
}
if (base->total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
- cleanup_once(0);
+ cleanup_once(0, stack);
return p;
}
{
unsigned long now = jiffies;
int ttl, total;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH];
total = compute_total();
if (total >= inet_peer_threshold)
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
total / inet_peer_threshold * HZ;
- while (!cleanup_once(ttl)) {
+ while (!cleanup_once(ttl, stack)) {
if (jiffies != now)
break;
}
pp_ptr = optptr + 2;
goto error;
}
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
goto error;
}
opt->ts = optptr - iph;
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
unsigned long orefdst;
int err;
- if (!opt->srr)
+ if (!opt->srr || !rt)
return 0;
if (skb->pkt_type != PACKET_HOST)
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
-#ifdef CONFIG_IP_MULTICAST
{
.procname = "igmp_max_memberships",
.data = &sysctl_igmp_max_memberships,
.mode = 0644,
.proc_handler = proc_dointvec
},
-
-#endif
{
.procname = "igmp_max_msf",
.data = &sysctl_igmp_max_msf,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
+ sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
MSG_NOSIGNAL)) {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
lock_sock(sk);
s32 data_size = ntohs(pdulen) - llc_len;
if (data_size < 0 ||
- ((skb_tail_pointer(skb) -
- (u8 *)pdu) - llc_len) < data_size)
+ !pskb_may_pull(skb, data_size))
return 0;
if (unlikely(pskb_trim_rcsum(skb, data_size)))
return 0;
ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data;
+ /* MAC can be src only */
+ if (!(flags & IPSET_DIM_TWO_SRC))
+ return 0;
+
data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
if (data.id < map->first_ip || data.id > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
if (cb->args[1] >= ip_set_max)
goto out;
- pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+dump_last:
+ pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
for (; cb->args[1] < max; cb->args[1]++) {
index = (ip_set_id_t) cb->args[1];
set = ip_set_list[index];
* so that lists (unions of sets) are dumped last.
*/
if (cb->args[0] != DUMP_ONE &&
- !((cb->args[0] == DUMP_ALL) ^
- (set->type->features & IPSET_DUMP_LAST)))
+ ((cb->args[0] == DUMP_ALL) ==
+ !!(set->type->features & IPSET_DUMP_LAST)))
continue;
pr_debug("List set: %s\n", set->name);
if (!cb->args[2]) {
goto release_refcount;
}
}
+ /* If we dump all sets, continue with dumping last ones */
+ if (cb->args[0] == DUMP_ALL) {
+ cb->args[0] = DUMP_LAST;
+ cb->args[1] = 0;
+ goto dump_last;
+ }
goto out;
nla_put_failure:
pr_debug("release set %s\n", ip_set_list[index]->name);
ip_set_put_byindex(index);
}
-
- /* If we dump all sets, continue with dumping last ones */
- if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
- cb->args[0] = DUMP_LAST;
-
out:
if (nlh) {
nlmsg_end(skb, nlh);
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
+ ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}
if (info->match_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
+ ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index,
skb, par->family,
- info->add_set.dim,
+ info->del_set.dim,
info->del_set.flags);
return XT_CONTINUE;
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
if (info->add_set.dim > IPSET_DIM_MAX ||
- info->del_set.flags > IPSET_DIM_MAX) {
+ info->del_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}
sctp_assoc_set_primary(asoc, transport);
if (asoc->peer.active_path == peer)
asoc->peer.active_path = transport;
+ if (asoc->peer.retran_path == peer)
+ asoc->peer.retran_path = transport;
if (asoc->peer.last_data_from == peer)
asoc->peer.last_data_from = transport;
if (t)
asoc->peer.retran_path = t;
+ else
+ t = asoc->peer.retran_path;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",
return 0;
}
-static int cap_inode_permission(struct inode *inode, int mask)
+static int cap_inode_permission(struct inode *inode, int mask, unsigned flags)
{
return 0;
}
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
- return security_ops->inode_permission(inode, mask);
+ return security_ops->inode_permission(inode, mask, 0);
}
int security_inode_exec_permission(struct inode *inode, unsigned int flags)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
- if (flags)
- return -ECHILD;
- return security_ops->inode_permission(inode, MAY_EXEC);
+ return security_ops->inode_permission(inode, MAY_EXEC, flags);
}
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
return dentry_has_perm(cred, NULL, dentry, FILE__READ);
}
-static int selinux_inode_permission(struct inode *inode, int mask)
+static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
if (!mask)
return 0;
+ /* May be droppable after audit */
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+
COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.inode = inode;
*
* Returns 0 if access is permitted, -EACCES otherwise
*/
-static int smack_inode_permission(struct inode *inode, int mask)
+static int smack_inode_permission(struct inode *inode, int mask, unsigned flags)
{
struct smk_audit_info ad;
*/
if (mask == 0)
return 0;
+
+ /* May be droppable after audit */
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
smk_ad_setfield_u_fs_inode(&ad, inode);
return smk_curacc(smk_of_inode(inode), mask, &ad);
}
EXPORT_SYMBOL_HDA(snd_hda_shutup_pins);
+#ifdef SND_HDA_NEEDS_RESUME
/* Restore the pin controls cleared previously via snd_hda_shutup_pins() */
static void restore_shutup_pins(struct hda_codec *codec)
{
}
codec->pins_shutup = 0;
}
+#endif
static void init_hda_cache(struct hda_cache_rec *cache,
unsigned int record_size);
}
}
+#ifdef SND_HDA_NEEDS_RESUME
/* clean up all streams; called from suspend */
static void hda_cleanup_all_streams(struct hda_codec *codec)
{
really_cleanup_stream(codec, p);
}
}
+#endif
/*
* amp access functions
alc_write_coef_idx(codec, 0x1e, coef | 0x80);
}
+static void alc271_fixup_dmic(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+{
+ static struct hda_verb verbs[] = {
+ {0x20, AC_VERB_SET_COEF_INDEX, 0x0d},
+ {0x20, AC_VERB_SET_PROC_COEF, 0x4000},
+ {}
+ };
+ unsigned int cfg;
+
+ if (strcmp(codec->chip_name, "ALC271X"))
+ return;
+ cfg = snd_hda_codec_get_pincfg(codec, 0x12);
+ if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED)
+ snd_hda_sequence_write(codec, verbs);
+}
+
enum {
ALC269_FIXUP_SONY_VAIO,
ALC275_FIXUP_SONY_VAIO_GPIO2,
ALC269_FIXUP_ASUS_G73JW,
ALC269_FIXUP_LENOVO_EAPD,
ALC275_FIXUP_SONY_HWEQ,
+ ALC271_FIXUP_DMIC,
};
static const struct alc_fixup alc269_fixups[] = {
.v.func = alc269_fixup_hweq,
.chained = true,
.chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2
- }
+ },
+ [ALC271_FIXUP_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc271_fixup_dmic,
+ },
};
static struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
ARRAY_SIZE(jz4740_codec_dapm_routes));
- snd_soc_dapm_new_widgets(codec);
-
jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
.owner = THIS_MODULE,
},
.probe = sn95031_device_probe,
- .remove = sn95031_device_remove,
+ .remove = __devexit_p(sn95031_device_remove),
};
static int __init sn95031_init(void)
case WM8903_REVISION_NUMBER:
case WM8903_INTERRUPT_STATUS_1:
case WM8903_WRITE_SEQUENCER_4:
- case WM8903_POWER_MANAGEMENT_3:
- case WM8903_POWER_MANAGEMENT_2:
case WM8903_DC_SERVO_READBACK_1:
case WM8903_DC_SERVO_READBACK_2:
case WM8903_DC_SERVO_READBACK_3:
SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0,
right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)),
-SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
- 4, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
+SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
+ 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
0, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
+SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 1, 0,
NULL, 0),
-SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
+SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 0, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("HPL_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 7, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("HPL_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 6, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA", 1, WM8903_ANALOGUE_HP_0, 4, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("HPR_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 3, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("HPR_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 2, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA", 1, WM8903_ANALOGUE_HP_0, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTL_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 7, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 6, 0,
NULL, 0),
-SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTR_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 3, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 2, 0,
NULL, 0),
-SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("DCS Master", WM8903_DC_SERVO_0, 4, 0, NULL, 0),
{ "Left Speaker PGA", NULL, "Left Speaker Mixer" },
{ "Right Speaker PGA", NULL, "Right Speaker Mixer" },
- { "HPL_ENA_DLY", NULL, "Left Headphone Output PGA" },
- { "HPR_ENA_DLY", NULL, "Right Headphone Output PGA" },
- { "LINEOUTL_ENA_DLY", NULL, "Left Line Output PGA" },
- { "LINEOUTR_ENA_DLY", NULL, "Right Line Output PGA" },
+ { "HPL_ENA", NULL, "Left Headphone Output PGA" },
+ { "HPR_ENA", NULL, "Right Headphone Output PGA" },
+ { "HPL_ENA_DLY", NULL, "HPL_ENA" },
+ { "HPR_ENA_DLY", NULL, "HPR_ENA" },
+ { "LINEOUTL_ENA", NULL, "Left Line Output PGA" },
+ { "LINEOUTR_ENA", NULL, "Right Line Output PGA" },
+ { "LINEOUTL_ENA_DLY", NULL, "LINEOUTL_ENA" },
+ { "LINEOUTR_ENA_DLY", NULL, "LINEOUTR_ENA" },
{ "HPL_DCS", NULL, "DCS Master" },
{ "HPR_DCS", NULL, "DCS Master" },
wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch volume updates (right only; we always do left then right). */
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
+ WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
+ WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
+ snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
+ WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
+ snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
+ WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
+ snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
+ WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
+ snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
+ WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
+ snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
+ WM8994_DAC1_VU, WM8994_DAC1_VU);
snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
WM8994_DAC1_VU, WM8994_DAC1_VU);
+ snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
+ WM8994_DAC2_VU, WM8994_DAC2_VU);
snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
WM8994_DAC2_VU, WM8994_DAC2_VU);
{ "SPKL", "Input Switch", "MIXINL" },
{ "SPKL", "IN1LP Switch", "IN1LP" },
- { "SPKL", "Output Switch", "Left Output Mixer" },
+ { "SPKL", "Output Switch", "Left Output PGA" },
{ "SPKL", NULL, "TOCLK" },
{ "SPKR", "Input Switch", "MIXINR" },
{ "SPKR", "IN1RP Switch", "IN1RP" },
- { "SPKR", "Output Switch", "Right Output Mixer" },
+ { "SPKR", "Output Switch", "Right Output PGA" },
{ "SPKR", NULL, "TOCLK" },
{ "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
{ "SPKOUTRP", NULL, "SPKR Driver" },
{ "SPKOUTRN", NULL, "SPKR Driver" },
- { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
- { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
+ { "Left Headphone Mux", "Mixer", "Left Output PGA" },
+ { "Right Headphone Mux", "Mixer", "Right Output PGA" },
{ "Headphone PGA", NULL, "Left Headphone Mux" },
{ "Headphone PGA", NULL, "Right Headphone Mux" },
static inline void sst_set_stream_status(struct sst_runtime_stream *stream,
int state)
{
- spin_lock(&stream->status_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&stream->status_lock, flags);
stream->stream_status = state;
- spin_unlock(&stream->status_lock);
+ spin_unlock_irqrestore(&stream->status_lock, flags);
}
static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
{
int state;
+ unsigned long flags;
- spin_lock(&stream->status_lock);
+ spin_lock_irqsave(&stream->status_lock, flags);
state = stream->stream_status;
- spin_unlock(&stream->status_lock);
+ spin_unlock_irqrestore(&stream->status_lock, flags);
return state;
}
ctl = readl(regs + S3C_PCM_CTL);
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- /* Nothing to do, NB_NF by default */
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Nothing to do, IB_NF by default */
break;
default:
dev_err(pcm->dev, "Unsupported clock inversion!\n");
master->fsib.master = master;
pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
dev_set_drvdata(&pdev->dev, master);
+ pm_runtime_get_sync(&pdev->dev);
fsi_soft_all_reset(master);
+ pm_runtime_put_sync(&pdev->dev);
ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED,
id_entry->name, master);
goto exit_free_irq;
}
- return snd_soc_register_dais(&pdev->dev, fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
+ ret = snd_soc_register_dais(&pdev->dev, fsi_soc_dai,
+ ARRAY_SIZE(fsi_soc_dai));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot snd dai register\n");
+ goto exit_snd_soc;
+ }
+
+ return ret;
+exit_snd_soc:
+ snd_soc_unregister_platform(&pdev->dev);
exit_free_irq:
free_irq(irq, master);
exit_iounmap:
master = dev_get_drvdata(&pdev->dev);
- snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
- snd_soc_unregister_platform(&pdev->dev);
-
+ free_irq(master->irq, master);
pm_runtime_disable(&pdev->dev);
- free_irq(master->irq, master);
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
+ snd_soc_unregister_platform(&pdev->dev);
iounmap(master->base);
kfree(master);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SuperH onchip FSI audio driver");
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_ALIAS("platform:fsi-pcm-audio");
runtime->hw.rates |= codec_dai_drv->capture.rates;
}
+ ret = -EINVAL;
snd_pcm_limit_hw_rates(runtime);
if (!runtime->hw.rates) {
printk(KERN_ERR "asoc: %s <-> %s No matching rates\n",
codec_dai->name, cpu_dai->name);
goto config_err;
}
- if (!runtime->hw.channels_min || !runtime->hw.channels_max) {
+ if (!runtime->hw.channels_min || !runtime->hw.channels_max ||
+ runtime->hw.channels_min > runtime->hw.channels_max) {
printk(KERN_ERR "asoc: %s <-> %s No matching channels\n",
codec_dai->name, cpu_dai->name);
goto config_err;
.resume = snd_soc_resume,
.poweroff = snd_soc_poweroff,
};
+EXPORT_SYMBOL_GPL(snd_soc_pm_ops);
/* ASoC platform driver */
static struct platform_driver soc_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = tegra_snd_harmony_probe,
.remove = __devexit_p(tegra_snd_harmony_remove),
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
+ attr->inherit = !no_inherit;
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING |
PERF_FORMAT_ID;
{
struct perf_evsel *pos;
+ if (evlist->cpus->map[0] < 0)
+ no_inherit = true;
+
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
/*
retry_sample_id:
attr->sample_id_all = sample_id_all_avail ? 1 : 0;
try_again:
- if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
- !no_inherit) < 0) {
+ if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
+ attr->inherit = !no_inherit;
+
if (system_wide)
- return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false);
+ return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
- attr->inherit = !no_inherit;
if (target_pid == -1 && target_tid == -1) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
- return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false);
+ return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
}
/*
goto out_thread_map_delete;
}
- if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
+ if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
}
if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
- pr_debug("perf_evsel__open_read_on_cpu\n");
+ pr_debug("perf_evsel__read_on_cpu\n");
goto out_close_fd;
}
goto out_thread_map_delete;
}
- if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
+ if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
continue;
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
- pr_debug("perf_evsel__open_read_on_cpu\n");
+ pr_debug("perf_evsel__read_on_cpu\n");
err = -1;
break;
}
perf_evlist__add(evlist, evsels[i]);
- if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) {
+ if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
}
attr->mmap = 1;
+ attr->inherit = inherit;
try_again:
if (perf_evsel__open(counter, top.evlist->cpus,
- top.evlist->threads, group, inherit) < 0) {
+ top.evlist->threads, group) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
#include "evlist.h"
#include "evsel.h"
#include "util.h"
+#include "debug.h"
#include <sys/mman.h>
return evlist->mmap != NULL ? 0 : -ENOMEM;
}
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
- int mask, int fd)
+static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int prot, int mask, int fd)
{
evlist->mmap[cpu].prev = 0;
evlist->mmap[cpu].mask = mask;
evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
MAP_SHARED, fd, 0);
- if (evlist->mmap[cpu].base == MAP_FAILED)
+ if (evlist->mmap[cpu].base == MAP_FAILED) {
+ if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit)
+ ui__warning("Inherit is not allowed on per-task "
+ "events using mmap.\n");
return -1;
+ }
perf_evlist__add_pollfd(evlist, fd);
return 0;
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
FD(first_evsel, cpu, 0)) != 0)
goto out_unmap;
- } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
+ } else if (__perf_evlist__mmap(evlist, evsel, cpu,
+ prot, mask, fd) < 0)
goto out_unmap;
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
}
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group, bool inherit)
+ struct thread_map *threads, bool group)
{
int cpu, thread;
unsigned long flags = 0;
for (cpu = 0; cpu < cpus->nr; cpu++) {
int group_fd = -1;
- /*
- * Don't allow mmap() of inherited per-task counters. This
- * would create a performance issue due to all children writing
- * to the same buffer.
- *
- * FIXME:
- * Proper fix is not to pass 'inherit' to perf_evsel__open*,
- * but a 'flags' parameter, with 'group' folded there as well,
- * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
- * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
- * set. Lets go for the minimal fix first tho.
- */
- evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
for (thread = 0; thread < threads->nr; thread++) {
};
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group, bool inherit)
+ struct thread_map *threads, bool group)
{
if (cpus == NULL) {
/* Work around old compiler warnings about strict aliasing */
if (threads == NULL)
threads = &empty_thread_map.map;
- return __perf_evsel__open(evsel, cpus, threads, group, inherit);
+ return __perf_evsel__open(evsel, cpus, threads, group);
}
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group, bool inherit)
+ struct cpu_map *cpus, bool group)
{
- return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
}
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group, bool inherit)
+ struct thread_map *threads, bool group)
{
- return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
}
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group, bool inherit);
+ struct cpu_map *cpus, bool group);
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group, bool inherit);
+ struct thread_map *threads, bool group);
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group, bool inherit);
+ struct thread_map *threads, bool group);
#define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \
struct cpu_map *cpus = NULL;
struct thread_map *threads = NULL;
PyObject *pcpus = NULL, *pthreads = NULL;
- int group = 0, overwrite = 0;
- static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL};
+ int group = 0, inherit = 0;
+ static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
- &pcpus, &pthreads, &group, &overwrite))
+ &pcpus, &pthreads, &group, &inherit))
return NULL;
if (pthreads != NULL)
if (pcpus != NULL)
cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
- if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) {
+ evsel->attr.inherit = inherit;
+ if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
int refresh)
{
struct objdump_line *pos, *n;
- struct annotation *notes = symbol__annotation(sym);
+ struct annotation *notes;
struct annotate_browser browser = {
.b = {
- .entries = ¬es->src->source,
.refresh = ui_browser__list_head_refresh,
.seek = ui_browser__list_head_seek,
.write = annotate_browser__write,
ui_helpline__push("Press <- or ESC to exit");
+ notes = symbol__annotation(sym);
+
list_for_each_entry(pos, ¬es->src->source, node) {
struct objdump_line_rb_node *rbpos;
size_t line_len = strlen(pos->line);
rbpos->idx = browser.b.nr_entries++;
}
+ browser.b.entries = ¬es->src->source,
browser.b.width += 18; /* Percentage */
ret = annotate_browser__run(&browser, evidx, refresh);
list_for_each_entry_safe(pos, n, ¬es->src->source, node) {
goto out_free_stack;
case 'a':
if (browser->selection == NULL ||
- browser->selection->map == NULL ||
+ browser->selection->sym == NULL ||
browser->selection->map->dso->annotate_warned)
continue;
goto do_annotate;