Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Jul 2008 02:04:58 +0000 (19:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Jul 2008 02:04:58 +0000 (19:04 -0700)
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (249 commits)
  powerpc: Fix pte_update for CONFIG_PTE_64BIT and !PTE_ATOMIC_UPDATES
  powerpc: Fix a build problem on ppc32 with new DMA_ATTRs
  ibm_newemac: Add MII mode support to the EMAC RGMII bridge.
  powerpc: Don't spin on sync instruction at boot time
  powerpc: Add VSX load/store alignment exception handler
  powerpc: fix giveup_vsx to save registers correctly
  powerpc: support for latencytop
  powerpc: Remove unnecessary condition when sanity-checking WIMG bits
  powerpc: Add PPC_FEATURE_PSERIES_PERFMON_COMPAT
  powerpc: Add driver for Barrier Synchronization Register
  powerpc: mman.h export fixups
  powerpc/fsl: update crypto node definition and device tree instances
  powerpc/fsl: Refactor device bindings
  powerpc/85xx: Minor fixes for 85xxds and 8536ds board.
  powerpc: Add 82xx/83xx/86xx to 6xx Multiplatform
  powerpc/85xx: publish of device for cds platforms
  powerpc/booke: don't reinitialize time base
  powerpc/86xx: Refactor pic init
  powerpc/CPM: Add i2c pins to dts and board setup
  cpm_uart: Support uart_wait_until_sent()
  ...

111 files changed:
Documentation/ftrace.txt
Documentation/scsi/aacraid.txt
block/bsg.c
crypto/Kconfig
crypto/Makefile
crypto/prng.c [deleted file]
crypto/prng.h [deleted file]
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/md/Kconfig
drivers/md/Makefile
drivers/md/dm-emc.c [deleted file]
drivers/md/dm-hw-handler.c [deleted file]
drivers/md/dm-hw-handler.h [deleted file]
drivers/md/dm-mpath-hp-sw.c [deleted file]
drivers/md/dm-mpath-rdac.c [deleted file]
drivers/md/dm-mpath.c
drivers/md/dm-mpath.h
drivers/message/fusion/lsi/mpi.h
drivers/message/fusion/lsi/mpi_cnfg.h
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptbase.h
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptctl.h
drivers/message/fusion/mptdebug.h
drivers/message/fusion/mptfc.c
drivers/message/fusion/mptlan.c
drivers/message/fusion/mptlan.h
drivers/message/fusion/mptsas.c
drivers/message/fusion/mptsas.h
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptscsih.h
drivers/message/fusion/mptspi.c
drivers/s390/scsi/Makefile
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_ccw.c
drivers/s390/scsi/zfcp_cfdc.c [new file with mode: 0644]
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_dbf.h
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fc.c [new file with mode: 0644]
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_fsf.h
drivers/s390/scsi/zfcp_qdio.c
drivers/s390/scsi/zfcp_scsi.c
drivers/s390/scsi/zfcp_sysfs.c [new file with mode: 0644]
drivers/s390/scsi/zfcp_sysfs_adapter.c [deleted file]
drivers/s390/scsi/zfcp_sysfs_driver.c [deleted file]
drivers/s390/scsi/zfcp_sysfs_port.c [deleted file]
drivers/s390/scsi/zfcp_sysfs_unit.c [deleted file]
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/aacraid/commctrl.c
drivers/scsi/aacraid/linit.c
drivers/scsi/device_handler/Kconfig [new file with mode: 0644]
drivers/scsi/device_handler/Makefile [new file with mode: 0644]
drivers/scsi/device_handler/scsi_dh.c [new file with mode: 0644]
drivers/scsi/device_handler/scsi_dh_emc.c [new file with mode: 0644]
drivers/scsi/device_handler/scsi_dh_hp_sw.c [new file with mode: 0644]
drivers/scsi/device_handler/scsi_dh_rdac.c [new file with mode: 0644]
drivers/scsi/esp_scsi.c
drivers/scsi/hosts.c
drivers/scsi/ibmvscsi/Makefile
drivers/scsi/ibmvscsi/ibmvfc.c [new file with mode: 0644]
drivers/scsi/ibmvscsi/ibmvfc.h [new file with mode: 0644]
drivers/scsi/iscsi_tcp.c
drivers/scsi/iscsi_tcp.h
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/mesh.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h [moved from include/scsi/sd.h with 91% similarity]
drivers/scsi/sg.c
drivers/scsi/sym53c8xx_2/sym_misc.h
include/linux/crc-t10dif.h [new file with mode: 0644]
include/scsi/iscsi_if.h
include/scsi/iscsi_proto.h
include/scsi/libiscsi.h
include/scsi/scsi.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/scsi/scsi_dh.h [new file with mode: 0644]
include/scsi/scsi_transport_iscsi.h
include/scsi/sg.h
lib/Kconfig
lib/Makefile
lib/crc-t10dif.c [new file with mode: 0644]

index 77d3faa..f218f61 100644 (file)
@@ -4,9 +4,10 @@
 Copyright 2008 Red Hat Inc.
    Author:   Steven Rostedt <srostedt@redhat.com>
   License:   The GNU Free Documentation License, Version 1.2
-Reviewers:   Elias Oltmanns and Randy Dunlap
+Reviewers:   Elias Oltmanns, Randy Dunlap, Andrew Morton,
+            John Kacur, and David Teigland.
 
-Writen for: 2.6.26-rc8 linux-2.6-tip.git tip/tracing/ftrace branch
+Written for: 2.6.27-rc1
 
 Introduction
 ------------
@@ -18,10 +19,11 @@ issues that take place outside of user-space.
 
 Although ftrace is the function tracer, it also includes an
 infrastructure that allows for other types of tracing. Some of the
-tracers that are currently in ftrace is a tracer to trace
+tracers that are currently in ftrace include a tracer to trace
 context switches, the time it takes for a high priority task to
 run after it was woken up, the time interrupts are disabled, and
-more.
+more (ftrace allows for tracer plugins, which means that the list of
+tracers can always grow).
 
 
 The File System
@@ -35,6 +37,8 @@ To mount the debugfs system:
   # mkdir /debug
   # mount -t debugfs nodev /debug
 
+(Note: it is more common to mount at /sys/kernel/debug, but for simplicity
+ this document will use /debug)
 
 That's it! (assuming that you have ftrace configured into your kernel)
 
@@ -50,20 +54,19 @@ of ftrace. Here is a list of some of the key files:
 
   available_tracers : This holds the different types of tracers that
                have been compiled into the kernel. The tracers
-               listed here can be configured by echoing in their
-               name into current_tracer.
+               listed here can be configured by echoing their name
+               into current_tracer.
 
   tracing_enabled : This sets or displays whether the current_tracer
                is activated and tracing or not. Echo 0 into this
-               file to disable the tracer or 1 (or non-zero) to
-               enable it.
+               file to disable the tracer or 1 to enable it.
 
   trace : This file holds the output of the trace in a human readable
-               format.
+               format (described below).
 
   latency_trace : This file shows the same trace but the information
                is organized more to display possible latencies
-               in the system.
+               in the system (described below).
 
   trace_pipe : The output is the same as the "trace" file but this
                file is meant to be streamed with live tracing.
@@ -75,7 +78,7 @@ of ftrace. Here is a list of some of the key files:
                file, it is consumed, and will not be read
                again with a sequential read. The "trace" and
                "latency_trace" files are static, and if the
-               tracer isn't adding more data, they will display
+               tracer is not adding more data, they will display
                the same information every time they are read.
 
   iter_ctrl : This file lets the user control the amount of data
@@ -92,10 +95,10 @@ of ftrace. Here is a list of some of the key files:
 
   trace_entries : This sets or displays the number of trace
                entries each CPU buffer can hold. The tracer buffers
-               are the same size for each CPU, so care must be
-               taken when modifying the trace_entries. The trace
-               buffers are allocated in pages (blocks of memory that
-               the kernel uses for allocation, usually 4 KB in size).
+               are the same size for each CPU. The displayed number
+               is the size of the CPU buffer and not total size. The
+               trace buffers are allocated in pages (blocks of memory
+               that the kernel uses for allocation, usually 4 KB in size).
                Since each entry is smaller than a page, if the last
                allocated page has room for more entries than were
                requested, the rest of the page is used to allocate
@@ -112,20 +115,19 @@ of ftrace. Here is a list of some of the key files:
                on specified CPUS. The format is a hex string
                representing the CPUS.
 
-  set_ftrace_filter : When dynamic ftrace is configured in, the
-               code is dynamically modified to disable calling
-               of the function profiler (mcount). This lets
-               tracing be configured in with practically no overhead
-               in performance.  This also has a side effect of
-               enabling or disabling specific functions to be
-               traced.  Echoing in names of functions into this
-               file will limit the trace to only these functions.
-
-  set_ftrace_notrace: This has the opposite effect that
-               set_ftrace_filter has. Any function that is added
-               here will not be traced. If a function exists
-               in both set_ftrace_filter and set_ftrace_notrace,
-               the function will _not_ be traced.
+  set_ftrace_filter : When dynamic ftrace is configured in (see the
+               section below "dynamic ftrace"), the code is dynamically
+               modified (code text rewrite) to disable calling of the
+               function profiler (mcount). This lets tracing be configured
+               in with practically no overhead in performance.  This also
+               has a side effect of enabling or disabling specific functions
+               to be traced. Echoing names of functions into this file
+               will limit the trace to only those functions.
+
+  set_ftrace_notrace: This has an effect opposite to that of
+               set_ftrace_filter. Any function that is added here will not
+               be traced. If a function exists in both set_ftrace_filter
+               and set_ftrace_notrace, the function will _not_ be traced.
 
   available_filter_functions : When a function is encountered the first
                time by the dynamic tracer, it is recorded and
@@ -133,32 +135,31 @@ of ftrace. Here is a list of some of the key files:
                lists the functions that have been recorded
                by the dynamic tracer and these functions can
                be used to set the ftrace filter by the above
-               "set_ftrace_filter" file.
+               "set_ftrace_filter" file. (See the section "dynamic ftrace"
+               below for more details).
 
 
 The Tracers
 -----------
 
-Here are the list of current tracers that can be configured.
+Here is the list of current tracers that may be configured.
 
   ftrace - function tracer that uses mcount to trace all functions.
-               It is possible to filter out which functions that are
-               to be traced when dynamic ftrace is configured in.
 
   sched_switch - traces the context switches between tasks.
 
-  irqsoff - traces the areas that disable interrupts and saves off
+  irqsoff - traces the areas that disable interrupts and saves
                the trace with the longest max latency.
                See tracing_max_latency.  When a new max is recorded,
                it replaces the old trace. It is best to view this
-               trace with the latency_trace file.
+               trace via the latency_trace file.
 
-  preemptoff - Similar to irqsoff but traces and records the time
-               preemption is disabled.
+  preemptoff - Similar to irqsoff but traces and records the amount of
+               time for which preemption is disabled.
 
   preemptirqsoff - Similar to irqsoff and preemptoff, but traces and
-                records the largest time irqs and/or preemption is
-                disabled.
+                records the largest time for which irqs and/or preemption
+                is disabled.
 
   wakeup - Traces and records the max latency that it takes for
                the highest priority task to get scheduled after
@@ -171,13 +172,13 @@ Here are the list of current tracers that can be configured.
 Examples of using the tracer
 ----------------------------
 
-Here are typical examples of using the tracers with only controlling
-them with the debugfs interface (without using any user-land utilities).
+Here are typical examples of using the tracers when controlling them only
+with the debugfs interface (without using any user-land utilities).
 
 Output format:
 --------------
 
-Here's an example of the output format of the file "trace"
+Here is an example of the output format of the file "trace"
 
                              --------
 # tracer: ftrace
@@ -189,14 +190,15 @@ Here's an example of the output format of the file "trace"
             bash-4251  [01] 10152.583855: _atomic_dec_and_lock <-dput
                              --------
 
-A header is printed with the trace that is represented. In this case
-the tracer is "ftrace". Then a header showing the format. Task name
-"bash", the task PID "4251", the CPU that it was running on
+A header is printed with the tracer name that is represented by the trace.
+In this case the tracer is "ftrace". Then a header showing the format. Task
+name "bash", the task PID "4251", the CPU that it was running on
 "01", the timestamp in <secs>.<usecs> format, the function name that was
 traced "path_put" and the parent function that called this function
-"path_walk".
+"path_walk". The timestamp is the time at which the function was
+entered.
 
-The sched_switch tracer also includes tracing of task wake ups and
+The sched_switch tracer also includes tracing of task wakeups and
 context switches.
 
      ksoftirqd/1-7     [01]  1453.070013:      7:115:R   +  2916:115:S
@@ -206,7 +208,7 @@ context switches.
      kondemand/1-2916  [01]  1453.070013:   2916:115:S ==>     7:115:R
      ksoftirqd/1-7     [01]  1453.070013:      7:115:S ==>     0:140:R
 
-Wake ups are represented by a "+" and the context switches show
+Wake ups are represented by a "+" and the context switches are shown as
 "==>".  The format is:
 
  Context switches:
@@ -221,7 +223,7 @@ Wake ups are represented by a "+" and the context switches show
 
   <pid>:<prio>:<state>    +  <pid>:<prio>:<state>
 
-The prio is the internal kernel priority, which is inverse to the
+The prio is the internal kernel priority, which is the inverse of the
 priority that is usually displayed by user-space tools. Zero represents
 the highest priority (99). Prio 100 starts the "nice" priorities with
 100 being equal to nice -20 and 139 being nice 19. The prio "140" is
@@ -232,7 +234,7 @@ Latency trace format
 --------------------
 
 For traces that display latency times, the latency_trace file gives
-a bit more information to see why a latency happened. Here's a typical
+somewhat more information to see why a latency happened. Here is a typical
 trace.
 
 # tracer: irqsoff
@@ -260,21 +262,20 @@ irqsoff latency trace v1.1.5 on 2.6.26-rc8
   <idle>-0     0d.s1   98us : trace_hardirqs_on (do_softirq)
 
 
-vim:ft=help
-
 
-This shows that the current tracer is "irqsoff" tracing the time
-interrupts are disabled. It gives the trace version and the kernel
-this was executed on (2.6.26-rc8). Then it displays the max latency
-in microsecs (97 us). The number of trace entries displayed
-by the total number recorded (both are three: #3/3). The type of
+This shows that the current tracer is "irqsoff" tracing the time for which
+interrupts were disabled. It gives the trace version and the version
+of the kernel upon which this was executed on (2.6.26-rc8). Then it displays
+the max latency in microsecs (97 us). The number of trace entries displayed
+and the total number recorded (both are three: #3/3). The type of
 preemption that was used (PREEMPT). VP, KP, SP, and HP are always zero
-and reserved for later use. #P is the number of online CPUS (#P:2).
+and are reserved for later use. #P is the number of online CPUS (#P:2).
 
-The task is the process that was running when the latency happened.
+The task is the process that was running when the latency occurred.
 (swapper pid: 0).
 
-The start and stop that caused the latencies:
+The start and stop (the functions in which the interrupts were disabled and
+enabled respectively) that caused the latencies:
 
   apic_timer_interrupt is where the interrupts were disabled.
   do_softirq is where they were enabled again.
@@ -286,14 +287,14 @@ explains which is which.
 
   pid: The PID of that process.
 
-  CPU#: The CPU that the process was running on.
+  CPU#: The CPU which the process was running on.
 
   irqs-off: 'd' interrupts are disabled. '.' otherwise.
 
   need-resched: 'N' task need_resched is set, '.' otherwise.
 
   hardirq/softirq:
-       'H' - hard irq happened inside a softirq.
+       'H' - hard irq occurred inside a softirq.
        'h' - hard irq is running
        's' - soft irq is running
        '.' - normal context.
@@ -303,7 +304,7 @@ explains which is which.
 The above is mostly meaningful for kernel developers.
 
   time: This differs from the trace file output. The trace file output
-       included an absolute timestamp. The timestamp used by the
+       includes an absolute timestamp. The timestamp used by the
        latency_trace file is relative to the start of the trace.
 
   delay: This is just to help catch your eye a bit better. And
@@ -385,7 +386,7 @@ Here are the available options:
 sched_switch
 ------------
 
-This tracer simply records schedule switches. Here's an example
+This tracer simply records schedule switches. Here is an example
 of how to use it.
 
  # echo sched_switch > /debug/tracing/current_tracer
@@ -421,8 +422,8 @@ the name of the trace and points to the options. The "FUNCTION"
 is a misnomer since here it represents the wake ups and context
 switches.
 
-The sched_switch only lists the wake ups (represented with '+')
-and context switches ('==>') with the previous task or current
+The sched_switch file only lists the wake ups (represented with '+')
+and context switches ('==>') with the previous task or current task
 first followed by the next task or task waking up. The format for both
 of these is PID:KERNEL-PRIO:TASK-STATE. Remember that the KERNEL-PRIO
 is the inverse of the actual priority with zero (0) being the highest
@@ -437,7 +438,8 @@ The task states are:
 
  R - running : wants to run, may not actually be running
  S - sleep   : process is waiting to be woken up (handles signals)
- D - deep sleep : process must be woken up (ignores signals)
+ D - disk sleep (uninterruptible sleep) : process must be woken up
+                                       (ignores signals)
  T - stopped : process suspended
  t - traced  : process is being traced (with something like gdb)
  Z - zombie  : process waiting to be cleaned up
@@ -447,8 +449,8 @@ The task states are:
 ftrace_enabled
 --------------
 
-The following tracers give different output depending on whether
-or not the sysctl ftrace_enabled is set. To set ftrace_enabled,
+The following tracers (listed below) give different output depending
+on whether or not the sysctl ftrace_enabled is set. To set ftrace_enabled,
 one can either use the sysctl function or set it via the proc
 file system interface.
 
@@ -475,13 +477,12 @@ interrupt from triggering or the mouse interrupt from letting the
 kernel know of a new mouse event. The result is a latency with the
 reaction time.
 
-The irqsoff tracer tracks the time interrupts are disabled to the time
-they are re-enabled. When a new maximum latency is hit, it saves off
-the trace so that it may be retrieved at a later time. Every time a
-new maximum in reached, the old saved trace is discarded and the new
-trace is saved.
+The irqsoff tracer tracks the time for which interrupts are disabled.
+When a new maximum latency is hit, the tracer saves the trace leading up
+to that latency point so that every time a new maximum is reached, the old
+saved trace is discarded and the new trace is saved.
 
-To reset the maximum, echo 0 into tracing_max_latency. Here's an
+To reset the maximum, echo 0 into tracing_max_latency. Here is an
 example:
 
  # echo irqsoff > /debug/tracing/current_tracer
@@ -493,14 +494,14 @@ example:
  # cat /debug/tracing/latency_trace
 # tracer: irqsoff
 #
-irqsoff latency trace v1.1.5 on 2.6.26-rc8
+irqsoff latency trace v1.1.5 on 2.6.26
 --------------------------------------------------------------------
- latency: 6 us, #3/3, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
+ latency: 12 us, #3/3, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
     -----------------
-    | task: bash-4269 (uid:0 nice:0 policy:0 rt_prio:0)
+    | task: bash-3730 (uid:0 nice:0 policy:0 rt_prio:0)
     -----------------
- => started at: copy_page_range
- => ended at:   copy_page_range
+ => started at: sys_setpgid
+ => ended at:   sys_setpgid
 
 #                _------=> CPU#
 #               / _-----=> irqs-off
@@ -511,21 +512,19 @@ irqsoff latency trace v1.1.5 on 2.6.26-rc8
 #              |||||     delay
 #  cmd     pid ||||| time  |   caller
 #     \   /    |||||   \   |   /
-    bash-4269  1...1    0us+: _spin_lock (copy_page_range)
-    bash-4269  1...1    7us : _spin_unlock (copy_page_range)
-    bash-4269  1...2    7us : trace_preempt_on (copy_page_range)
-
+    bash-3730  1d...    0us : _write_lock_irq (sys_setpgid)
+    bash-3730  1d..1    1us+: _write_unlock_irq (sys_setpgid)
+    bash-3730  1d..2   14us : trace_hardirqs_on (sys_setpgid)
 
-vim:ft=help
 
-Here we see that that we had a latency of 6 microsecs (which is
-very good). The spin_lock in copy_page_range disabled interrupts.
-The difference between the 6 and the displayed timestamp 7us is
-because the clock must have incremented between the time of recording
-the max latency and recording the function that had that latency.
+Here we see that that we had a latency of 12 microsecs (which is
+very good). The _write_lock_irq in sys_setpgid disabled interrupts.
+The difference between the 12 and the displayed timestamp 14us occurred
+because the clock was incremented between the time of recording the max
+latency and the time of recording the function that had that latency.
 
-Note the above had ftrace_enabled not set. If we set the ftrace_enabled,
-we get a much larger output:
+Note the above example had ftrace_enabled not set. If we set the
+ftrace_enabled, we get a much larger output:
 
 # tracer: irqsoff
 #
@@ -571,12 +570,10 @@ irqsoff latency trace v1.1.5 on 2.6.26-rc8
       ls-4339  0d..2   51us : trace_hardirqs_on (__alloc_pages_internal)
 
 
-vim:ft=help
-
 
 Here we traced a 50 microsecond latency. But we also see all the
 functions that were called during that time. Note that by enabling
-function tracing, we endure an added overhead. This overhead may
+function tracing, we incur an added overhead. This overhead may
 extend the latency times. But nevertheless, this trace has provided
 some very helpful debugging information.
 
@@ -590,8 +587,9 @@ for preemption to be enabled again before it can preempt a lower
 priority task.
 
 The preemptoff tracer traces the places that disable preemption.
-Like the irqsoff, it records the maximum latency that preemption
-was disabled. The control of preemptoff is much like the irqsoff.
+Like the irqsoff tracer, it records the maximum latency for which preemption
+was disabled. The control of preemptoff tracer is much like the irqsoff
+tracer.
 
  # echo preemptoff > /debug/tracing/current_tracer
  # echo 0 > /debug/tracing/tracing_max_latency
@@ -625,8 +623,6 @@ preemptoff latency trace v1.1.5 on 2.6.26-rc8
     sshd-4261  0d.s1   30us : trace_preempt_on (__do_softirq)
 
 
-vim:ft=help
-
 This has some more changes. Preemption was disabled when an interrupt
 came in (notice the 'h'), and was enabled while doing a softirq.
 (notice the 's'). But we also see that interrupts have been disabled
@@ -694,16 +690,16 @@ The above is an example of the preemptoff trace with ftrace_enabled
 set. Here we see that interrupts were disabled the entire time.
 The irq_enter code lets us know that we entered an interrupt 'h'.
 Before that, the functions being traced still show that it is not
-in an interrupt, but we can see by the functions themselves that
+in an interrupt, but we can see from the functions themselves that
 this is not the case.
 
-Notice that the __do_softirq when called doesn't have a preempt_count.
-It may seem that we missed a preempt enabled. What really happened
-is that the preempt count is held on the threads stack and we
+Notice that __do_softirq when called does not have a preempt_count.
+It may seem that we missed a preempt enabling. What really happened
+is that the preempt count is held on the thread's stack and we
 switched to the softirq stack (4K stacks in effect). The code
 does not copy the preempt count, but because interrupts are disabled,
-we don't need to worry about it. Having a tracer like this is good
-to let people know what really happens inside the kernel.
+we do not need to worry about it. Having a tracer like this is good
+for letting people know what really happens inside the kernel.
 
 
 preemptirqsoff
@@ -713,7 +709,7 @@ Knowing the locations that have interrupts disabled or preemption
 disabled for the longest times is helpful. But sometimes we would
 like to know when either preemption and/or interrupts are disabled.
 
-The following code:
+Consider the following code:
 
     local_irq_disable();
     call_function_with_irqs_off();
@@ -769,12 +765,10 @@ preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
       ls-4860  0d.s1  294us : trace_preempt_on (__do_softirq)
 
 
-vim:ft=help
-
 
 The trace_hardirqs_off_thunk is called from assembly on x86 when
 interrupts are disabled in the assembly code. Without the function
-tracing, we don't know if interrupts were enabled within the preemption
+tracing, we do not know if interrupts were enabled within the preemption
 points. We do see that it started with preemption enabled.
 
 Here is a trace with ftrace_enabled set:
@@ -865,19 +859,19 @@ preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
 
 This is a very interesting trace. It started with the preemption of
 the ls task. We see that the task had the "need_resched" bit set
-with the 'N' in the trace.  Interrupts are disabled in the spin_lock
-and the trace started. We see that a schedule took place to run
+via the 'N' in the trace.  Interrupts were disabled before the spin_lock
+at the beginning of the trace. We see that a schedule took place to run
 sshd.  When the interrupts were enabled, we took an interrupt.
 On return from the interrupt handler, the softirq ran. We took another
-interrupt while running the softirq as we see with the capital 'H'.
+interrupt while running the softirq as we see from the capital 'H'.
 
 
 wakeup
 ------
 
-In Real-Time environment it is very important to know the wakeup
-time it takes for the highest priority task that wakes up to the
-time it executes. This is also known as "schedule latency".
+In Real-Time environment it is very important to know the wakeup
+time it takes for the highest priority task that is woken up to the
+time that it executes. This is also known as "schedule latency".
 I stress the point that this is about RT tasks. It is also important
 to know the scheduling latency of non-RT tasks, but the average
 schedule latency is better for non-RT tasks. Tools like
@@ -926,8 +920,6 @@ wakeup latency trace v1.1.5 on 2.6.26-rc8
   <idle>-0     1d..4    4us : schedule (cpu_idle)
 
 
-vim:ft=help
-
 
 Running this on an idle system, we see that it only took 4 microseconds
 to perform the task switch.  Note, since the trace marker in the
@@ -996,15 +988,15 @@ ksoftirq-7     1d..6   49us : sub_preempt_count (_spin_unlock)
 ksoftirq-7     1d..4   50us : schedule (__cond_resched)
 
 The interrupt went off while running ksoftirqd. This task runs at
-SCHED_OTHER. Why didn't we see the 'N' set early? This may be
+SCHED_OTHER. Why did not we see the 'N' set early? This may be
 a harmless bug with x86_32 and 4K stacks. On x86_32 with 4K stacks
-configured, the interrupt and softirq runs with their own stack.
+configured, the interrupt and softirq run with their own stack.
 Some information is held on the top of the task's stack (need_resched
 and preempt_count are both stored there). The setting of the NEED_RESCHED
 bit is done directly to the task's stack, but the reading of the
 NEED_RESCHED is done by looking at the current stack, which in this case
 is the stack for the hard interrupt. This hides the fact that NEED_RESCHED
-has been set. We don't see the 'N' until we switch back to the task's
+has been set. We do not see the 'N' until we switch back to the task's
 assigned stack.
 
 ftrace
@@ -1044,14 +1036,14 @@ this tracer is a nop.
 [...]
 
 
-Note: It is sometimes better to enable or disable tracing directly from
-a program, because the buffer may be overflowed by the echo commands
-before you get to the point you want to trace. It is also easier to
-stop the tracing at the point that you hit the part that you are
-interested in. Since the ftrace buffer is a ring buffer with the
-oldest data being overwritten, usually it is sufficient to start the
-tracer with an echo command but have you code stop it. Something
-like the following is usually appropriate for this.
+Note: ftrace uses ring buffers to store the above entries. The newest data
+may overwrite the oldest data. Sometimes using echo to stop the trace
+is not sufficient because the tracing could have overwritten the data
+that you wanted to record. For this reason, it is sometimes better to
+disable tracing directly from a program. This allows you to stop the
+tracing at the point that you hit the part that you are interested in.
+To disable the tracing directly from a C program, something like following
+code snippet can be used:
 
 int trace_fd;
 [...]
@@ -1060,20 +1052,26 @@ int main(int argc, char *argv[]) {
        trace_fd = open("/debug/tracing/tracing_enabled", O_WRONLY);
        [...]
        if (condition_hit()) {
-       write(trace_fd, "0", 1);
+               write(trace_fd, "0", 1);
        }
        [...]
 }
 
+Note: Here we hard coded the path name. The debugfs mount is not
+guaranteed to be at /debug (and is more commonly at /sys/kernel/debug).
+For simple one time traces, the above is sufficent. For anything else,
+a search through /proc/mounts may be needed to find where the debugfs
+file-system is mounted.
 
 dynamic ftrace
 --------------
 
-If CONFIG_DYNAMIC_FTRACE is set, then the system will run with
+If CONFIG_DYNAMIC_FTRACE is set, the system will run with
 virtually no overhead when function tracing is disabled. The way
 this works is the mcount function call (placed at the start of
 every kernel function, produced by the -pg switch in gcc), starts
-of pointing to a simple return.
+of pointing to a simple return. (Enabling FTRACE will include the
+-pg switch in the compiling of the kernel.)
 
 When dynamic ftrace is initialized, it calls kstop_machine to make
 the machine act like a uniprocessor so that it can freely modify code
@@ -1086,15 +1084,15 @@ Later on the ftraced kernel thread is awoken and will again call
 kstop_machine if new functions have been recorded. The ftraced thread
 will change all calls to mcount to "nop".  Just calling mcount
 and having mcount return has shown a 10% overhead. By converting
-it to a nop, there is no recordable overhead to the system.
+it to a nop, there is no measurable overhead to the system.
 
 One special side-effect to the recording of the functions being
-traced, is that we can now selectively choose which functions we
-want to trace and which ones we want the mcount calls to remain as
+traced is that we can now selectively choose which functions we
+wish to trace and which ones we want the mcount calls to remain as
 nops.
 
 Two files are used, one for enabling and one for disabling the tracing
-of recorded functions. They are:
+of specified functions. They are:
 
   set_ftrace_filter
 
@@ -1116,7 +1114,7 @@ pick_next_task_fair
 mutex_lock
 [...]
 
-If I'm only interested in sys_nanosleep and hrtimer_interrupt:
+If I am only interested in sys_nanosleep and hrtimer_interrupt:
 
  # echo sys_nanosleep hrtimer_interrupt \
                > /debug/tracing/set_ftrace_filter
@@ -1133,21 +1131,21 @@ If I'm only interested in sys_nanosleep and hrtimer_interrupt:
           usleep-4134  [00]  1317.070111: sys_nanosleep <-syscall_call
           <idle>-0     [00]  1317.070115: hrtimer_interrupt <-smp_apic_timer_interrupt
 
-To see what functions are being traced, you can cat the file:
+To see which functions are being traced, you can cat the file:
 
  # cat /debug/tracing/set_ftrace_filter
 hrtimer_interrupt
 sys_nanosleep
 
 
-Perhaps this isn't enough. The filters also allow simple wild cards.
+Perhaps this is not enough. The filters also allow simple wild cards.
 Only the following are currently available
 
   <match>*  - will match functions that begin with <match>
   *<match>  - will match functions that end with <match>
   *<match>* - will match functions that have <match> in it
 
-Thats all the wild cards that are allowed.
+These are the only wild cards which are supported.
 
   <match>*<match> will not work.
 
@@ -1258,15 +1256,15 @@ calls that need to be converted into nops. If there are not any, then
 it simply goes back to sleep. But if there are some, it will call
 kstop_machine to convert the calls to nops.
 
-There may be a case that you do not want this added latency.
+There may be a case in which you do not want this added latency.
 Perhaps you are doing some audio recording and this activity might
 cause skips in the playback. There is an interface to disable
-and enable the ftraced kernel thread.
+and enable the "ftraced" kernel thread.
 
  # echo 0 > /debug/tracing/ftraced_enabled
 
-This will disable the calling of the kstop_machine to update the
-mcount calls to nops. Remember that there's a large overhead
+This will disable the calling of kstop_machine to update the
+mcount calls to nops. Remember that there is a large overhead
 to calling mcount. Without this kernel thread, that overhead will
 exist.
 
@@ -1282,8 +1280,8 @@ that uses ftrace function recording.
 trace_pipe
 ----------
 
-The trace_pipe outputs the same as trace, but the effect on the
-tracing is different. Every read from trace_pipe is consumed.
+The trace_pipe outputs the same content as the trace file, but the effect
+on the tracing is different. Every read from trace_pipe is consumed.
 This means that subsequent reads will be different. The trace
 is live.
 
@@ -1313,7 +1311,7 @@ is live.
             bash-4043  [00] 41.267111: select_task_rq_rt <-try_to_wake_up
 
 
-Note, reading the trace_pipe will block until more input is added.
+Note, reading the trace_pipe file will block until more input is added.
 By changing the tracer, trace_pipe will issue an EOF. We needed
 to set the ftrace tracer _before_ cating the trace_pipe file.
 
@@ -1322,7 +1320,7 @@ trace entries
 -------------
 
 Having too much or not enough data can be troublesome in diagnosing
-some issue in the kernel. The file trace_entries is used to modify
+an issue in the kernel. The file trace_entries is used to modify
 the size of the internal trace buffers. The number listed
 is the number of entries that can be recorded per CPU. To know
 the full size, multiply the number of possible CPUS with the
@@ -1332,7 +1330,8 @@ number of entries.
 65620
 
 Note, to modify this, you must have tracing completely disabled. To do that,
-echo "none" into the current_tracer.
+echo "none" into the current_tracer. If the current_tracer is not set
+to "none", an EINVAL error will be returned.
 
  # echo none > /debug/tracing/current_tracer
  # echo 100000 > /debug/tracing/trace_entries
@@ -1341,18 +1340,18 @@ echo "none" into the current_tracer.
 
 
 Notice that we echoed in 100,000 but the size is 100,045. The entries
-are held by individual pages. It allocates the number of pages it takes
+are held in individual pages. It allocates the number of pages it takes
 to fulfill the request. If more entries may fit on the last page
-it will add them.
+then they will be added.
 
  # echo 1 > /debug/tracing/trace_entries
  # cat /debug/tracing/trace_entries
 85
 
-This shows us that 85 entries can fit on a single page.
+This shows us that 85 entries can fit in a single page.
 
-The number of pages that will be allocated is a percentage of available
-memory. Allocating too much will produce an error.
+The number of pages which will be allocated is limited to a percentage
+of available memory. Allocating too much will produce an error.
 
  # echo 1000000000000 > /debug/tracing/trace_entries
 -bash: echo: write error: Cannot allocate memory
index d16011a..709ca99 100644 (file)
@@ -56,19 +56,33 @@ Supported Cards/Chipsets
        9005:0285:9005:02d1     Adaptec 5405 (Voodoo40)
        9005:0285:15d9:02d2     SMC     AOC-USAS-S8i-LP
        9005:0285:15d9:02d3     SMC     AOC-USAS-S8iR-LP
-       9005:0285:9005:02d4     Adaptec 2045 (Voodoo04 Lite)
-       9005:0285:9005:02d5     Adaptec 2405 (Voodoo40 Lite)
-       9005:0285:9005:02d6     Adaptec 2445 (Voodoo44 Lite)
-       9005:0285:9005:02d7     Adaptec 2805 (Voodoo80 Lite)
+       9005:0285:9005:02d4     Adaptec ASR-2045 (Voodoo04 Lite)
+       9005:0285:9005:02d5     Adaptec ASR-2405 (Voodoo40 Lite)
+       9005:0285:9005:02d6     Adaptec ASR-2445 (Voodoo44 Lite)
+       9005:0285:9005:02d7     Adaptec ASR-2805 (Voodoo80 Lite)
+       9005:0285:9005:02d8     Adaptec 5405G (Voodoo40 PM)
+       9005:0285:9005:02d9     Adaptec 5445G (Voodoo44 PM)
+       9005:0285:9005:02da     Adaptec 5805G (Voodoo80 PM)
+       9005:0285:9005:02db     Adaptec 5085G (Voodoo08 PM)
+       9005:0285:9005:02dc     Adaptec 51245G (Voodoo124 PM)
+       9005:0285:9005:02dd     Adaptec 51645G (Voodoo164 PM)
+       9005:0285:9005:02de     Adaptec 52445G (Voodoo244 PM)
+       9005:0285:9005:02df     Adaptec ASR-2045G (Voodoo04 Lite PM)
+       9005:0285:9005:02e0     Adaptec ASR-2405G (Voodoo40 Lite PM)
+       9005:0285:9005:02e1     Adaptec ASR-2445G (Voodoo44 Lite PM)
+       9005:0285:9005:02e2     Adaptec ASR-2805G (Voodoo80 Lite PM)
        1011:0046:9005:0364     Adaptec 5400S (Mustang)
+       1011:0046:9005:0365     Adaptec 5400S (Mustang)
        9005:0287:9005:0800     Adaptec Themisto (Jupiter)
        9005:0200:9005:0200     Adaptec Themisto (Jupiter)
        9005:0286:9005:0800     Adaptec Callisto (Jupiter)
        1011:0046:9005:1364     Dell    PERC 2/QC (Quad Channel, Mustang)
+       1011:0046:9005:1365     Dell    PERC 2/QC (Quad Channel, Mustang)
        1028:0001:1028:0001     Dell    PERC 2/Si (Iguana)
        1028:0003:1028:0003     Dell    PERC 3/Si (SlimFast)
        1028:0002:1028:0002     Dell    PERC 3/Di (Opal)
-       1028:0004:1028:0004     Dell    PERC 3/DiF (Iguana)
+       1028:0004:1028:0004     Dell    PERC 3/SiF (Iguana)
+       1028:0004:1028:00d0     Dell    PERC 3/DiF (Iguana)
        1028:0002:1028:00d1     Dell    PERC 3/DiV (Viper)
        1028:0002:1028:00d9     Dell    PERC 3/DiL (Lexus)
        1028:000a:1028:0106     Dell    PERC 3/DiJ (Jaguar)
index 0b3b282..5fb9b0b 100644 (file)
@@ -740,8 +740,13 @@ static int bsg_put_device(struct bsg_device *bd)
        mutex_lock(&bsg_mutex);
 
        do_free = atomic_dec_and_test(&bd->ref_count);
-       if (!do_free)
+       if (!do_free) {
+               mutex_unlock(&bsg_mutex);
                goto out;
+       }
+
+       hlist_del(&bd->dev_list);
+       mutex_unlock(&bsg_mutex);
 
        dprintk("%s: tearing down\n", bd->name);
 
@@ -757,10 +762,8 @@ static int bsg_put_device(struct bsg_device *bd)
         */
        ret = bsg_complete_all_commands(bd);
 
-       hlist_del(&bd->dev_list);
        kfree(bd);
 out:
-       mutex_unlock(&bsg_mutex);
        kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
        if (do_free)
                blk_put_queue(q);
index ea50357..d831859 100644 (file)
@@ -666,15 +666,6 @@ config CRYPTO_LZO
        help
          This is the LZO algorithm.
 
-comment "Random Number Generation"
-
-config CRYPTO_PRNG
-       tristate "Pseudo Random Number Generation for Cryptographic modules"
-       help
-         This option enables the generic pseudo random number generator
-         for cryptographic modules.  Uses the Algorithm specified in
-         ANSI X9.31 A.2.4
-
 source "drivers/crypto/Kconfig"
 
 endif  # if CRYPTO
index ef61b3b..d4f3ed8 100644 (file)
@@ -69,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
-obj-$(CONFIG_CRYPTO_PRNG) += prng.o
+
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 
 #
diff --git a/crypto/prng.c b/crypto/prng.c
deleted file mode 100644 (file)
index 24e4f32..0000000
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * PRNG: Pseudo Random Number Generator
- *       Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
- *       AES 128 cipher in RFC3686 ctr mode
- *
- *  (C) Neil Horman <nhorman@tuxdriver.com>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the
- *  Free Software Foundation; either version 2 of the License, or (at your
- *  any later version.
- *
- *
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/highmem.h>
-#include <linux/moduleparam.h>
-#include <linux/jiffies.h>
-#include <linux/timex.h>
-#include <linux/interrupt.h>
-#include <linux/miscdevice.h>
-#include "prng.h"
-
-#define TEST_PRNG_ON_START 0
-
-#define DEFAULT_PRNG_KEY "0123456789abcdef1011"
-#define DEFAULT_PRNG_KSZ 20
-#define DEFAULT_PRNG_IV "defaultv"
-#define DEFAULT_PRNG_IVSZ 8
-#define DEFAULT_BLK_SZ 16
-#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
-
-/*
- * Flags for the prng_context flags field
- */
-
-#define PRNG_FIXED_SIZE 0x1
-#define PRNG_NEED_RESET 0x2
-
-/*
- * Note: DT is our counter value
- *      I is our intermediate value
- *      V is our seed vector
- * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
- * for implementation details
- */
-
-
-struct prng_context {
-       char *prng_key;
-       char *prng_iv;
-       spinlock_t prng_lock;
-       unsigned char rand_data[DEFAULT_BLK_SZ];
-       unsigned char last_rand_data[DEFAULT_BLK_SZ];
-       unsigned char DT[DEFAULT_BLK_SZ];
-       unsigned char I[DEFAULT_BLK_SZ];
-       unsigned char V[DEFAULT_BLK_SZ];
-       u32 rand_data_valid;
-       struct crypto_blkcipher *tfm;
-       u32 flags;
-};
-
-static int dbg;
-
-static void hexdump(char *note, unsigned char *buf, unsigned int len)
-{
-       if (dbg) {
-               printk(KERN_CRIT "%s", note);
-               print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
-                               16, 1,
-                               buf, len, false);
-       }
-}
-
-#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0)
-
-static void xor_vectors(unsigned char *in1, unsigned char *in2,
-                       unsigned char *out, unsigned int size)
-{
-       int i;
-
-       for (i=0;i<size;i++)
-               out[i] = in1[i] ^ in2[i];
-
-}
-/*
- * Returns DEFAULT_BLK_SZ bytes of random data per call
- * returns 0 if generation succeded, <0 if something went wrong
- */
-static int _get_more_prng_bytes(struct prng_context *ctx)
-{
-       int i;
-       struct blkcipher_desc desc;
-       struct scatterlist sg_in, sg_out;
-       int ret;
-       unsigned char tmp[DEFAULT_BLK_SZ];
-
-       desc.tfm = ctx->tfm;
-       desc.flags = 0;
-
-
-       dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx);
-
-       hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
-       hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
-       hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
-
-       /*
-        * This algorithm is a 3 stage state machine
-        */
-       for (i=0;i<3;i++) {
-
-               desc.tfm = ctx->tfm;
-               desc.flags = 0;
-               switch (i) {
-                       case 0:
-                               /*
-                                * Start by encrypting the counter value
-                                * This gives us an intermediate value I
-                                */
-                               memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
-                               sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ);
-                               hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
-                               break;
-                       case 1:
-
-                               /*
-                                * Next xor I with our secret vector V
-                                * encrypt that result to obtain our
-                                * pseudo random data which we output
-                                */
-                               xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
-                               sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ);
-                               hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
-                               break;
-                       case 2:
-                               /*
-                                * First check that we didn't produce the same random data
-                                * that we did last time around through this
-                                */
-                               if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) {
-                                       printk(KERN_ERR "ctx %p Failed repetition check!\n",
-                                               ctx);
-                                       ctx->flags |= PRNG_NEED_RESET;
-                                       return -1;
-                               }
-                               memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ);
-
-                               /*
-                                * Lastly xor the random data with I
-                                * and encrypt that to obtain a new secret vector V
-                                */
-                               xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ);
-                               sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ);
-                               hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
-                               break;
-               }
-
-               /* Initialize our input buffer */
-               sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ);
-
-               /* do the encryption */
-               ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ);
-
-               /* And check the result */
-               if (ret) {
-                       dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx);
-                       ctx->rand_data_valid = DEFAULT_BLK_SZ;
-                       return -1;
-               }
-
-       }
-
-       /*
-        * Now update our DT value
-        */
-       for (i=DEFAULT_BLK_SZ-1;i>0;i--) {
-               ctx->DT[i] = ctx->DT[i-1];
-       }
-       ctx->DT[0] += 1;
-
-       dbgprint("Returning new block for context %p\n",ctx);
-       ctx->rand_data_valid = 0;
-
-       hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
-       hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
-       hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
-       hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
-
-       return 0;
-}
-
-/* Our exported functions */
-int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx)
-{
-       unsigned long flags;
-       unsigned char *ptr = buf;
-       unsigned int byte_count = (unsigned int)nbytes;
-       int err;
-
-
-       if (nbytes < 0)
-               return -EINVAL;
-
-       spin_lock_irqsave(&ctx->prng_lock, flags);
-
-       err = -EFAULT;
-       if (ctx->flags & PRNG_NEED_RESET)
-               goto done;
-
-       /*
-        * If the FIXED_SIZE flag is on, only return whole blocks of
-        * pseudo random data
-        */
-       err = -EINVAL;
-       if (ctx->flags & PRNG_FIXED_SIZE) {
-               if (nbytes < DEFAULT_BLK_SZ)
-                       goto done;
-               byte_count = DEFAULT_BLK_SZ;
-       }
-
-       err = byte_count;
-
-       dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx);
-
-
-remainder:
-       if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
-               if (_get_more_prng_bytes(ctx) < 0) {
-                       memset(buf, 0, nbytes);
-                       err = -EFAULT;
-                       goto done;
-               }
-       }
-
-       /*
-        * Copy up to the next whole block size
-        */
-       if (byte_count < DEFAULT_BLK_SZ) {
-               for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) {
-                       *ptr = ctx->rand_data[ctx->rand_data_valid];
-                       ptr++;
-                       byte_count--;
-                       if (byte_count == 0)
-                               goto done;
-               }
-       }
-
-       /*
-        * Now copy whole blocks
-        */
-       for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
-               if (_get_more_prng_bytes(ctx) < 0) {
-                       memset(buf, 0, nbytes);
-                       err = -1;
-                       goto done;
-               }
-               memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
-               ctx->rand_data_valid += DEFAULT_BLK_SZ;
-               ptr += DEFAULT_BLK_SZ;
-       }
-
-       /*
-        * Now copy any extra partial data
-        */
-       if (byte_count)
-               goto remainder;
-
-done:
-       spin_unlock_irqrestore(&ctx->prng_lock, flags);
-       dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx);
-       return err;
-}
-EXPORT_SYMBOL_GPL(get_prng_bytes);
-
-struct prng_context *alloc_prng_context(void)
-{
-       struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL);
-
-       spin_lock_init(&ctx->prng_lock);
-
-       if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) {
-               kfree(ctx);
-               ctx = NULL;
-       }
-
-       dbgprint(KERN_CRIT "returning context %p\n",ctx);
-       return ctx;
-}
-
-EXPORT_SYMBOL_GPL(alloc_prng_context);
-
-void free_prng_context(struct prng_context *ctx)
-{
-       crypto_free_blkcipher(ctx->tfm);
-       kfree(ctx);
-}
-EXPORT_SYMBOL_GPL(free_prng_context);
-
-int reset_prng_context(struct prng_context *ctx,
-                      unsigned char *key, unsigned char *iv,
-                      unsigned char *V, unsigned char *DT)
-{
-       int ret;
-       int iv_len;
-       int rc = -EFAULT;
-
-       spin_lock(&ctx->prng_lock);
-       ctx->flags |= PRNG_NEED_RESET;
-
-       if (key)
-               memcpy(ctx->prng_key,key,strlen(ctx->prng_key));
-       else
-               ctx->prng_key = DEFAULT_PRNG_KEY;
-
-       if (iv)
-               memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv));
-       else
-               ctx->prng_iv = DEFAULT_PRNG_IV;
-
-       if (V)
-               memcpy(ctx->V,V,DEFAULT_BLK_SZ);
-       else
-               memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ);
-
-       if (DT)
-               memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
-       else
-               memset(ctx->DT, 0, DEFAULT_BLK_SZ);
-
-       memset(ctx->rand_data,0,DEFAULT_BLK_SZ);
-       memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ);
-
-       if (ctx->tfm)
-               crypto_free_blkcipher(ctx->tfm);
-
-       ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0);
-       if (!ctx->tfm) {
-               dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm);
-               goto out;
-       }
-
-       ctx->rand_data_valid = DEFAULT_BLK_SZ;
-
-       ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key));
-       if (ret) {
-               dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
-                       crypto_blkcipher_get_flags(ctx->tfm));
-               crypto_free_blkcipher(ctx->tfm);
-               goto out;
-       }
-
-       iv_len = crypto_blkcipher_ivsize(ctx->tfm);
-       if (iv_len) {
-               crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len);
-       }
-       rc = 0;
-       ctx->flags &= ~PRNG_NEED_RESET;
-out:
-       spin_unlock(&ctx->prng_lock);
-
-       return rc;
-
-}
-EXPORT_SYMBOL_GPL(reset_prng_context);
-
-/* Module initalization */
-static int __init prng_mod_init(void)
-{
-
-#ifdef TEST_PRNG_ON_START
-       int i;
-       unsigned char tmpbuf[DEFAULT_BLK_SZ];
-
-       struct prng_context *ctx = alloc_prng_context();
-       if (ctx == NULL)
-               return -EFAULT;
-       for (i=0;i<16;i++) {
-               if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) {
-                       free_prng_context(ctx);
-                       return -EFAULT;
-               }
-       }
-       free_prng_context(ctx);
-#endif
-
-       return 0;
-}
-
-static void __exit prng_mod_fini(void)
-{
-       return;
-}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
-MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
-module_param(dbg, int, 0);
-MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-module_init(prng_mod_init);
-module_exit(prng_mod_fini);
diff --git a/crypto/prng.h b/crypto/prng.h
deleted file mode 100644 (file)
index 1ac9be5..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * PRNG: Pseudo Random Number Generator
- *
- *  (C) Neil Horman <nhorman@tuxdriver.com>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the
- *  Free Software Foundation; either version 2 of the License, or (at your
- *  any later version.
- *
- *
- */
-
-#ifndef _PRNG_H_
-#define _PRNG_H_
-struct prng_context;
-
-int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx);
-struct prng_context *alloc_prng_context(void);
-int reset_prng_context(struct prng_context *ctx,
-                       unsigned char *key, unsigned char *iv,
-                       unsigned char *V,
-                       unsigned char *DT);
-void free_prng_context(struct prng_context *ctx);
-
-#endif
-
index 356fac6..5a1cf25 100644 (file)
 
 #include "iscsi_iser.h"
 
+static struct scsi_host_template iscsi_iser_sht;
+static struct iscsi_transport iscsi_iser_transport;
+static struct scsi_transport_template *iscsi_iser_scsi_transport;
+
 static unsigned int iscsi_max_lun = 512;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
@@ -91,7 +95,6 @@ iscsi_iser_recv(struct iscsi_conn *conn,
                struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
 {
        int rc = 0;
-       uint32_t ret_itt;
        int datalen;
        int ahslen;
 
@@ -107,12 +110,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
        /* read AHS */
        ahslen = hdr->hlength * 4;
 
-       /* verify itt (itt encoding: age+cid+itt) */
-       rc = iscsi_verify_itt(conn, hdr, &ret_itt);
-
-       if (!rc)
-               rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
-
+       rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
        if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
                goto error;
 
@@ -123,25 +121,33 @@ error:
 
 
 /**
- * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * iscsi_iser_task_init - Initialize task
+ * @task: iscsi task
  *
- **/
+ * Initialize the task for the scsi command or mgmt command.
+ */
 static int
-iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+iscsi_iser_task_init(struct iscsi_task *task)
 {
-       struct iscsi_iser_conn     *iser_conn  = ctask->conn->dd_data;
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_conn *iser_conn  = task->conn->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
+
+       /* mgmt task */
+       if (!task->sc) {
+               iser_task->desc.data = task->data;
+               return 0;
+       }
 
-       iser_ctask->command_sent = 0;
-       iser_ctask->iser_conn    = iser_conn;
-       iser_ctask_rdma_init(iser_ctask);
+       iser_task->command_sent = 0;
+       iser_task->iser_conn    = iser_conn;
+       iser_task_rdma_init(iser_task);
        return 0;
 }
 
 /**
- * iscsi_mtask_xmit - xmit management(immediate) task
+ * iscsi_iser_mtask_xmit - xmit management(immediate) task
  * @conn: iscsi connection
- * @mtask: task management task
+ * @task: task management task
  *
  * Notes:
  *     The function can return -EAGAIN in which case caller must
@@ -150,20 +156,19 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
  *
  **/
 static int
-iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
-                     struct iscsi_mgmt_task *mtask)
+iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
 {
        int error = 0;
 
-       debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+       debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
 
-       error = iser_send_control(conn, mtask);
+       error = iser_send_control(conn, task);
 
-       /* since iser xmits control with zero copy, mtasks can not be recycled
+       /* since iser xmits control with zero copy, tasks can not be recycled
         * right after sending them.
         * The recycling scheme is based on whether a response is expected
-        * - if yes, the mtask is recycled at iscsi_complete_pdu
-        * - if no,  the mtask is recycled at iser_snd_completion
+        * - if yes, the task is recycled at iscsi_complete_pdu
+        * - if no,  the task is recycled at iser_snd_completion
         */
        if (error && error != -ENOBUFS)
                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -172,97 +177,86 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
 }
 
 static int
-iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
-                                struct iscsi_cmd_task *ctask)
+iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+                                struct iscsi_task *task)
 {
        struct iscsi_data  hdr;
        int error = 0;
 
        /* Send data-out PDUs while there's still unsolicited data to send */
-       while (ctask->unsol_count > 0) {
-               iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+       while (task->unsol_count > 0) {
+               iscsi_prep_unsolicit_data_pdu(task, &hdr);
                debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
-                          hdr.itt, ctask->data_count);
+                          hdr.itt, task->data_count);
 
                /* the buffer description has been passed with the command */
                /* Send the command */
-               error = iser_send_data_out(conn, ctask, &hdr);
+               error = iser_send_data_out(conn, task, &hdr);
                if (error) {
-                       ctask->unsol_datasn--;
-                       goto iscsi_iser_ctask_xmit_unsol_data_exit;
+                       task->unsol_datasn--;
+                       goto iscsi_iser_task_xmit_unsol_data_exit;
                }
-               ctask->unsol_count -= ctask->data_count;
+               task->unsol_count -= task->data_count;
                debug_scsi("Need to send %d more as data-out PDUs\n",
-                          ctask->unsol_count);
+                          task->unsol_count);
        }
 
-iscsi_iser_ctask_xmit_unsol_data_exit:
+iscsi_iser_task_xmit_unsol_data_exit:
        return error;
 }
 
 static int
-iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
-                     struct iscsi_cmd_task *ctask)
+iscsi_iser_task_xmit(struct iscsi_task *task)
 {
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_conn *conn = task->conn;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        int error = 0;
 
-       if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
-               BUG_ON(scsi_bufflen(ctask->sc) == 0);
+       if (!task->sc)
+               return iscsi_iser_mtask_xmit(conn, task);
+
+       if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+               BUG_ON(scsi_bufflen(task->sc) == 0);
 
                debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
-                          ctask->itt, scsi_bufflen(ctask->sc),
-                          ctask->imm_count, ctask->unsol_count);
+                          task->itt, scsi_bufflen(task->sc),
+                          task->imm_count, task->unsol_count);
        }
 
-       debug_scsi("ctask deq [cid %d itt 0x%x]\n",
-                  conn->id, ctask->itt);
+       debug_scsi("task deq [cid %d itt 0x%x]\n",
+                  conn->id, task->itt);
 
        /* Send the cmd PDU */
-       if (!iser_ctask->command_sent) {
-               error = iser_send_command(conn, ctask);
+       if (!iser_task->command_sent) {
+               error = iser_send_command(conn, task);
                if (error)
-                       goto iscsi_iser_ctask_xmit_exit;
-               iser_ctask->command_sent = 1;
+                       goto iscsi_iser_task_xmit_exit;
+               iser_task->command_sent = 1;
        }
 
        /* Send unsolicited data-out PDU(s) if necessary */
-       if (ctask->unsol_count)
-               error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+       if (task->unsol_count)
+               error = iscsi_iser_task_xmit_unsol_data(conn, task);
 
- iscsi_iser_ctask_xmit_exit:
+ iscsi_iser_task_xmit_exit:
        if (error && error != -ENOBUFS)
                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
        return error;
 }
 
 static void
-iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
 {
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
 
-       if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
-               iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
-               iser_ctask_rdma_finalize(iser_ctask);
-       }
-}
-
-static struct iser_conn *
-iscsi_iser_ib_conn_lookup(__u64 ep_handle)
-{
-       struct iser_conn *ib_conn;
-       struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+       /* mgmt tasks do not need special cleanup */
+       if (!task->sc)
+               return;
 
-       mutex_lock(&ig.connlist_mutex);
-       list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
-               if (ib_conn == uib_conn) {
-                       mutex_unlock(&ig.connlist_mutex);
-                       return ib_conn;
-               }
+       if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+               iser_task->status = ISER_TASK_STATUS_COMPLETED;
+               iser_task_rdma_finalize(iser_task);
        }
-       mutex_unlock(&ig.connlist_mutex);
-       iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
-       return NULL;
 }
 
 static struct iscsi_cls_conn *
@@ -272,7 +266,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
        struct iscsi_cls_conn *cls_conn;
        struct iscsi_iser_conn *iser_conn;
 
-       cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+       cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
        if (!cls_conn)
                return NULL;
        conn = cls_conn->dd_data;
@@ -283,21 +277,11 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
         */
        conn->max_recv_dlength = 128;
 
-       iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
-       if (!iser_conn)
-               goto conn_alloc_fail;
-
-       /* currently this is the only field which need to be initiated */
-       rwlock_init(&iser_conn->lock);
-
+       iser_conn = conn->dd_data;
        conn->dd_data = iser_conn;
        iser_conn->iscsi_conn = conn;
 
        return cls_conn;
-
-conn_alloc_fail:
-       iscsi_conn_teardown(cls_conn);
-       return NULL;
 }
 
 static void
@@ -305,11 +289,18 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
+       struct iser_conn *ib_conn = iser_conn->ib_conn;
 
        iscsi_conn_teardown(cls_conn);
-       if (iser_conn->ib_conn)
-               iser_conn->ib_conn->iser_conn = NULL;
-       kfree(iser_conn);
+       /*
+        * Userspace will normally call the stop callback and
+        * already have freed the ib_conn, but if it goofed up then
+        * we free it here.
+        */
+       if (ib_conn) {
+               ib_conn->iser_conn = NULL;
+               iser_conn_put(ib_conn);
+       }
 }
 
 static int
@@ -320,6 +311,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_iser_conn *iser_conn;
        struct iser_conn *ib_conn;
+       struct iscsi_endpoint *ep;
        int error;
 
        error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
@@ -328,12 +320,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
 
        /* the transport ep handle comes from user space so it must be
         * verified against the global ib connections list */
-       ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
-       if (!ib_conn) {
+       ep = iscsi_lookup_endpoint(transport_eph);
+       if (!ep) {
                iser_err("can't bind eph %llx\n",
                         (unsigned long long)transport_eph);
                return -EINVAL;
        }
+       ib_conn = ep->dd_data;
+
        /* binds the iSER connection retrieved from the previously
         * connected ep_handle to the iSCSI layer connection. exchanges
         * connection pointers */
@@ -341,10 +335,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        iser_conn = conn->dd_data;
        ib_conn->iser_conn = iser_conn;
        iser_conn->ib_conn  = ib_conn;
+       iser_conn_get(ib_conn);
+       return 0;
+}
 
-       conn->recv_lock = &iser_conn->lock;
+static void
+iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct iscsi_iser_conn *iser_conn = conn->dd_data;
+       struct iser_conn *ib_conn = iser_conn->ib_conn;
 
-       return 0;
+       /*
+        * Userspace may have goofed up and not bound the connection or
+        * might have only partially setup the connection.
+        */
+       if (ib_conn) {
+               iscsi_conn_stop(cls_conn, flag);
+               /*
+                * There is no unbind event so the stop callback
+                * must release the ref from the bind.
+                */
+               iser_conn_put(ib_conn);
+       }
+       iser_conn->ib_conn = NULL;
 }
 
 static int
@@ -360,55 +374,75 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
        return iscsi_conn_start(cls_conn);
 }
 
-static struct iscsi_transport iscsi_iser_transport;
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+       iscsi_host_remove(shost);
+       iscsi_host_free(shost);
+}
 
 static struct iscsi_cls_session *
-iscsi_iser_session_create(struct iscsi_transport *iscsit,
-                        struct scsi_transport_template *scsit,
-                        uint16_t cmds_max, uint16_t qdepth,
-                        uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_iser_session_create(struct iscsi_endpoint *ep,
+                         uint16_t cmds_max, uint16_t qdepth,
+                         uint32_t initial_cmdsn, uint32_t *hostno)
 {
        struct iscsi_cls_session *cls_session;
        struct iscsi_session *session;
+       struct Scsi_Host *shost;
        int i;
-       uint32_t hn;
-       struct iscsi_cmd_task  *ctask;
-       struct iscsi_mgmt_task *mtask;
-       struct iscsi_iser_cmd_task *iser_ctask;
-       struct iser_desc *desc;
+       struct iscsi_task *task;
+       struct iscsi_iser_task *iser_task;
+       struct iser_conn *ib_conn;
+
+       shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+       if (!shost)
+               return NULL;
+       shost->transportt = iscsi_iser_scsi_transport;
+       shost->max_lun = iscsi_max_lun;
+       shost->max_id = 0;
+       shost->max_channel = 0;
+       shost->max_cmd_len = 16;
+
+       /*
+        * older userspace tools (before 2.0-870) did not pass us
+        * the leading conn's ep so this will be NULL;
+        */
+       if (ep)
+               ib_conn = ep->dd_data;
+
+       if (iscsi_host_add(shost,
+                          ep ? ib_conn->device->ib_device->dma_device : NULL))
+               goto free_host;
+       *hostno = shost->host_no;
 
        /*
         * we do not support setting can_queue cmd_per_lun from userspace yet
         * because we preallocate so many resources
         */
-       cls_session = iscsi_session_setup(iscsit, scsit,
+       cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
                                          ISCSI_DEF_XMIT_CMDS_MAX,
-                                         ISCSI_MAX_CMD_PER_LUN,
-                                         sizeof(struct iscsi_iser_cmd_task),
-                                         sizeof(struct iser_desc),
-                                         initial_cmdsn, &hn);
+                                         sizeof(struct iscsi_iser_task),
+                                         initial_cmdsn, 0);
        if (!cls_session)
-       return NULL;
-
-       *hostno = hn;
-       session = class_to_transport_session(cls_session);
+               goto remove_host;
+       session = cls_session->dd_data;
 
+       shost->can_queue = session->scsi_cmds_max;
        /* libiscsi setup itts, data and pool so just set desc fields */
        for (i = 0; i < session->cmds_max; i++) {
-               ctask      = session->cmds[i];
-               iser_ctask = ctask->dd_data;
-               ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
-               ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
-       }
-
-       for (i = 0; i < session->mgmtpool_max; i++) {
-               mtask      = session->mgmt_cmds[i];
-               desc       = mtask->dd_data;
-               mtask->hdr = &desc->iscsi_header;
-               desc->data = mtask->data;
+               task = session->cmds[i];
+               iser_task = task->dd_data;
+               task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+               task->hdr_max = sizeof(iser_task->desc.iscsi_header);
        }
-
        return cls_session;
+
+remove_host:
+       iscsi_host_remove(shost);
+free_host:
+       iscsi_host_free(shost);
+       return NULL;
 }
 
 static int
@@ -481,34 +515,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
        stats->custom[3].value = conn->fmr_unalign_cnt;
 }
 
-static int
-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
-                     __u64 *ep_handle)
+static struct iscsi_endpoint *
+iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
 {
        int err;
        struct iser_conn *ib_conn;
+       struct iscsi_endpoint *ep;
 
-       err = iser_conn_init(&ib_conn);
-       if (err)
-               goto out;
+       ep = iscsi_create_endpoint(sizeof(*ib_conn));
+       if (!ep)
+               return ERR_PTR(-ENOMEM);
 
-       err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
-       if (!err)
-               *ep_handle = (__u64)(unsigned long)ib_conn;
+       ib_conn = ep->dd_data;
+       ib_conn->ep = ep;
+       iser_conn_init(ib_conn);
 
-out:
-       return err;
+       err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+                          non_blocking);
+       if (err) {
+               iscsi_destroy_endpoint(ep);
+               return ERR_PTR(err);
+       }
+       return ep;
 }
 
 static int
-iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
 {
-       struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+       struct iser_conn *ib_conn;
        int rc;
 
-       if (!ib_conn)
-               return -EINVAL;
-
+       ib_conn = ep->dd_data;
        rc = wait_event_interruptible_timeout(ib_conn->wait,
                             ib_conn->state == ISER_CONN_UP,
                             msecs_to_jiffies(timeout_ms));
@@ -530,13 +567,21 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
 }
 
 static void
-iscsi_iser_ep_disconnect(__u64 ep_handle)
+iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
 {
        struct iser_conn *ib_conn;
 
-       ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
-       if (!ib_conn)
-               return;
+       ib_conn = ep->dd_data;
+       if (ib_conn->iser_conn)
+               /*
+                * Must suspend xmit path if the ep is bound to the
+                * iscsi_conn, so we know we are not accessing the ib_conn
+                * when we free it.
+                *
+                * This may not be bound if the ep poll failed.
+                */
+               iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+
 
        iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
        iser_conn_terminate(ib_conn);
@@ -547,7 +592,6 @@ static struct scsi_host_template iscsi_iser_sht = {
        .name                   = "iSCSI Initiator over iSER, v." DRV_VER,
        .queuecommand           = iscsi_queuecommand,
        .change_queue_depth     = iscsi_change_queue_depth,
-       .can_queue              = ISCSI_DEF_XMIT_CMDS_MAX - 1,
        .sg_tablesize           = ISCSI_ISER_SG_TABLESIZE,
        .max_sectors            = 1024,
        .cmd_per_lun            = ISCSI_MAX_CMD_PER_LUN,
@@ -581,17 +625,14 @@ static struct iscsi_transport iscsi_iser_transport = {
                                  ISCSI_USERNAME | ISCSI_PASSWORD |
                                  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
                                  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-                                 ISCSI_PING_TMO | ISCSI_RECV_TMO,
+                                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
+                                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
        .host_param_mask        = ISCSI_HOST_HWADDRESS |
                                  ISCSI_HOST_NETDEV_NAME |
                                  ISCSI_HOST_INITIATOR_NAME,
-       .host_template          = &iscsi_iser_sht,
-       .conndata_size          = sizeof(struct iscsi_conn),
-       .max_lun                = ISCSI_ISER_MAX_LUN,
-       .max_cmd_len            = ISCSI_ISER_MAX_CMD_LEN,
        /* session management */
        .create_session         = iscsi_iser_session_create,
-       .destroy_session        = iscsi_session_teardown,
+       .destroy_session        = iscsi_iser_session_destroy,
        /* connection management */
        .create_conn            = iscsi_iser_conn_create,
        .bind_conn              = iscsi_iser_conn_bind,
@@ -600,17 +641,16 @@ static struct iscsi_transport iscsi_iser_transport = {
        .get_conn_param         = iscsi_conn_get_param,
        .get_session_param      = iscsi_session_get_param,
        .start_conn             = iscsi_iser_conn_start,
-       .stop_conn              = iscsi_conn_stop,
+       .stop_conn              = iscsi_iser_conn_stop,
        /* iscsi host params */
        .get_host_param         = iscsi_host_get_param,
        .set_host_param         = iscsi_host_set_param,
        /* IO */
        .send_pdu               = iscsi_conn_send_pdu,
        .get_stats              = iscsi_iser_conn_get_stats,
-       .init_cmd_task          = iscsi_iser_cmd_init,
-       .xmit_cmd_task          = iscsi_iser_ctask_xmit,
-       .xmit_mgmt_task         = iscsi_iser_mtask_xmit,
-       .cleanup_cmd_task       = iscsi_iser_cleanup_ctask,
+       .init_task              = iscsi_iser_task_init,
+       .xmit_task              = iscsi_iser_task_xmit,
+       .cleanup_task           = iscsi_iser_cleanup_task,
        /* recovery */
        .session_recovery_timedout = iscsi_session_recovery_timedout,
 
@@ -630,8 +670,6 @@ static int __init iser_init(void)
                return -EINVAL;
        }
 
-       iscsi_iser_transport.max_lun = iscsi_max_lun;
-
        memset(&ig, 0, sizeof(struct iser_global));
 
        ig.desc_cache = kmem_cache_create("iser_descriptors",
@@ -647,7 +685,9 @@ static int __init iser_init(void)
        mutex_init(&ig.connlist_mutex);
        INIT_LIST_HEAD(&ig.connlist);
 
-       if (!iscsi_register_transport(&iscsi_iser_transport)) {
+       iscsi_iser_scsi_transport = iscsi_register_transport(
+                                                       &iscsi_iser_transport);
+       if (!iscsi_iser_scsi_transport) {
                iser_err("iscsi_register_transport failed\n");
                err = -EINVAL;
                goto register_transport_failure;
index 0e10703..81a8262 100644 (file)
@@ -94,7 +94,6 @@
                                        /* support upto 512KB in one RDMA */
 #define ISCSI_ISER_SG_TABLESIZE         (0x80000 >> SHIFT_4K)
 #define ISCSI_ISER_MAX_LUN             256
-#define ISCSI_ISER_MAX_CMD_LEN         16
 
 /* QP settings */
 /* Maximal bounds on received asynchronous PDUs */
@@ -172,7 +171,8 @@ struct iser_data_buf {
 /* fwd declarations */
 struct iser_device;
 struct iscsi_iser_conn;
-struct iscsi_iser_cmd_task;
+struct iscsi_iser_task;
+struct iscsi_endpoint;
 
 struct iser_mem_reg {
        u32  lkey;
@@ -196,7 +196,7 @@ struct iser_regd_buf {
 #define MAX_REGD_BUF_VECTOR_LEN        2
 
 struct iser_dto {
-       struct iscsi_iser_cmd_task *ctask;
+       struct iscsi_iser_task *task;
        struct iser_conn *ib_conn;
        int                        notify_enable;
 
@@ -240,7 +240,9 @@ struct iser_device {
 
 struct iser_conn {
        struct iscsi_iser_conn       *iser_conn; /* iser conn for upcalls  */
+       struct iscsi_endpoint        *ep;
        enum iser_ib_conn_state      state;         /* rdma connection state   */
+       atomic_t                     refcount;
        spinlock_t                   lock;          /* used for state changes  */
        struct iser_device           *device;       /* device context          */
        struct rdma_cm_id            *cma_id;       /* CMA ID                  */
@@ -259,11 +261,9 @@ struct iser_conn {
 struct iscsi_iser_conn {
        struct iscsi_conn            *iscsi_conn;/* ptr to iscsi conn */
        struct iser_conn             *ib_conn;   /* iSER IB conn      */
-
-       rwlock_t                     lock;
 };
 
-struct iscsi_iser_cmd_task {
+struct iscsi_iser_task {
        struct iser_desc             desc;
        struct iscsi_iser_conn       *iser_conn;
        enum iser_task_status        status;
@@ -296,22 +296,26 @@ extern int iser_debug_level;
 /* allocate connection resources needed for rdma functionality */
 int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
 
-int iser_send_control(struct iscsi_conn      *conn,
-                     struct iscsi_mgmt_task *mtask);
+int iser_send_control(struct iscsi_conn *conn,
+                     struct iscsi_task *task);
 
-int iser_send_command(struct iscsi_conn      *conn,
-                     struct iscsi_cmd_task  *ctask);
+int iser_send_command(struct iscsi_conn *conn,
+                     struct iscsi_task *task);
 
-int iser_send_data_out(struct iscsi_conn     *conn,
-                      struct iscsi_cmd_task *ctask,
-                      struct iscsi_data          *hdr);
+int iser_send_data_out(struct iscsi_conn *conn,
+                      struct iscsi_task *task,
+                      struct iscsi_data *hdr);
 
 void iscsi_iser_recv(struct iscsi_conn *conn,
                     struct iscsi_hdr       *hdr,
                     char                   *rx_data,
                     int                    rx_data_len);
 
-int  iser_conn_init(struct iser_conn **ib_conn);
+void iser_conn_init(struct iser_conn *ib_conn);
+
+void iser_conn_get(struct iser_conn *ib_conn);
+
+void iser_conn_put(struct iser_conn *ib_conn);
 
 void iser_conn_terminate(struct iser_conn *ib_conn);
 
@@ -320,9 +324,9 @@ void iser_rcv_completion(struct iser_desc *desc,
 
 void iser_snd_completion(struct iser_desc *desc);
 
-void iser_ctask_rdma_init(struct iscsi_iser_cmd_task     *ctask);
+void iser_task_rdma_init(struct iscsi_iser_task *task);
 
-void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+void iser_task_rdma_finalize(struct iscsi_iser_task *task);
 
 void iser_dto_buffs_release(struct iser_dto *dto);
 
@@ -332,10 +336,10 @@ void iser_reg_single(struct iser_device      *device,
                     struct iser_regd_buf    *regd_buf,
                     enum dma_data_direction direction);
 
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
                                     enum iser_data_dir         cmd_dir);
 
-int  iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+int  iser_reg_rdma_mem(struct iscsi_iser_task *task,
                       enum   iser_data_dir        cmd_dir);
 
 int  iser_connect(struct iser_conn   *ib_conn,
@@ -355,10 +359,10 @@ int  iser_post_send(struct iser_desc *tx_desc);
 int iser_conn_state_comp(struct iser_conn *ib_conn,
                         enum iser_ib_conn_state comp);
 
-int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
                            struct iser_data_buf       *data,
                            enum   iser_data_dir       iser_dir,
                            enum   dma_data_direction  dma_dir);
 
-void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
 #endif
index 31ad498..cdd2831 100644 (file)
@@ -64,46 +64,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
 
 /* Register user buffer memory and initialize passive rdma
  *  dto descriptor. Total data size is stored in
- *  iser_ctask->data[ISER_DIR_IN].data_len
+ *  iser_task->data[ISER_DIR_IN].data_len
  */
-static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+static int iser_prepare_read_cmd(struct iscsi_task *task,
                                 unsigned int edtl)
 
 {
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_regd_buf *regd_buf;
        int err;
-       struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
-       struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+       struct iser_hdr *hdr = &iser_task->desc.iser_header;
+       struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
 
-       err = iser_dma_map_task_data(iser_ctask,
+       err = iser_dma_map_task_data(iser_task,
                                     buf_in,
                                     ISER_DIR_IN,
                                     DMA_FROM_DEVICE);
        if (err)
                return err;
 
-       if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+       if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
                iser_err("Total data length: %ld, less than EDTL: "
                         "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
-                        iser_ctask->data[ISER_DIR_IN].data_len, edtl,
-                        ctask->itt, iser_ctask->iser_conn);
+                        iser_task->data[ISER_DIR_IN].data_len, edtl,
+                        task->itt, iser_task->iser_conn);
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+       err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
        if (err) {
                iser_err("Failed to set up Data-IN RDMA\n");
                return err;
        }
-       regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+       regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
 
        hdr->flags    |= ISER_RSV;
        hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
        hdr->read_va   = cpu_to_be64(regd_buf->reg.va);
 
        iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
-                ctask->itt, regd_buf->reg.rkey,
+                task->itt, regd_buf->reg.rkey,
                 (unsigned long long)regd_buf->reg.va);
 
        return 0;
@@ -111,43 +111,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
 
 /* Register user buffer memory and initialize passive rdma
  *  dto descriptor. Total data size is stored in
- *  ctask->data[ISER_DIR_OUT].data_len
+ *  task->data[ISER_DIR_OUT].data_len
  */
 static int
-iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+iser_prepare_write_cmd(struct iscsi_task *task,
                       unsigned int imm_sz,
                       unsigned int unsol_sz,
                       unsigned int edtl)
 {
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_regd_buf *regd_buf;
        int err;
-       struct iser_dto *send_dto = &iser_ctask->desc.dto;
-       struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
-       struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+       struct iser_dto *send_dto = &iser_task->desc.dto;
+       struct iser_hdr *hdr = &iser_task->desc.iser_header;
+       struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
 
-       err = iser_dma_map_task_data(iser_ctask,
+       err = iser_dma_map_task_data(iser_task,
                                     buf_out,
                                     ISER_DIR_OUT,
                                     DMA_TO_DEVICE);
        if (err)
                return err;
 
-       if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+       if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
                iser_err("Total data length: %ld, less than EDTL: %d, "
                         "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
-                        iser_ctask->data[ISER_DIR_OUT].data_len,
-                        edtl, ctask->itt, ctask->conn);
+                        iser_task->data[ISER_DIR_OUT].data_len,
+                        edtl, task->itt, task->conn);
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+       err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
        if (err != 0) {
                iser_err("Failed to register write cmd RDMA mem\n");
                return err;
        }
 
-       regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+       regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
 
        if (unsol_sz < edtl) {
                hdr->flags     |= ISER_WSV;
@@ -156,13 +156,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
 
                iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
                         "VA:%#llX + unsol:%d\n",
-                        ctask->itt, regd_buf->reg.rkey,
+                        task->itt, regd_buf->reg.rkey,
                         (unsigned long long)regd_buf->reg.va, unsol_sz);
        }
 
        if (imm_sz > 0) {
                iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
-                        ctask->itt, imm_sz);
+                        task->itt, imm_sz);
                iser_dto_add_regd_buff(send_dto,
                                       regd_buf,
                                       0,
@@ -314,38 +314,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
 /**
  * iser_send_command - send command PDU
  */
-int iser_send_command(struct iscsi_conn     *conn,
-                     struct iscsi_cmd_task *ctask)
+int iser_send_command(struct iscsi_conn *conn,
+                     struct iscsi_task *task)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_dto *send_dto = NULL;
        unsigned long edtl;
        int err = 0;
        struct iser_data_buf *data_buf;
 
-       struct iscsi_cmd *hdr =  ctask->hdr;
-       struct scsi_cmnd *sc  =  ctask->sc;
+       struct iscsi_cmd *hdr =  task->hdr;
+       struct scsi_cmnd *sc  =  task->sc;
 
        if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
                iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
                return -EPERM;
        }
-       if (iser_check_xmit(conn, ctask))
+       if (iser_check_xmit(conn, task))
                return -ENOBUFS;
 
        edtl = ntohl(hdr->data_length);
 
        /* build the tx desc regd header and add it to the tx desc dto */
-       iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
-       send_dto = &iser_ctask->desc.dto;
-       send_dto->ctask = iser_ctask;
-       iser_create_send_desc(iser_conn, &iser_ctask->desc);
+       iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+       send_dto = &iser_task->desc.dto;
+       send_dto->task = iser_task;
+       iser_create_send_desc(iser_conn, &iser_task->desc);
 
        if (hdr->flags & ISCSI_FLAG_CMD_READ)
-               data_buf = &iser_ctask->data[ISER_DIR_IN];
+               data_buf = &iser_task->data[ISER_DIR_IN];
        else
-               data_buf = &iser_ctask->data[ISER_DIR_OUT];
+               data_buf = &iser_task->data[ISER_DIR_OUT];
 
        if (scsi_sg_count(sc)) { /* using a scatter list */
                data_buf->buf  = scsi_sglist(sc);
@@ -355,15 +355,15 @@ int iser_send_command(struct iscsi_conn     *conn,
        data_buf->data_len = scsi_bufflen(sc);
 
        if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-               err = iser_prepare_read_cmd(ctask, edtl);
+               err = iser_prepare_read_cmd(task, edtl);
                if (err)
                        goto send_command_error;
        }
        if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
-               err = iser_prepare_write_cmd(ctask,
-                                            ctask->imm_count,
-                                            ctask->imm_count +
-                                            ctask->unsol_count,
+               err = iser_prepare_write_cmd(task,
+                                            task->imm_count,
+                                            task->imm_count +
+                                            task->unsol_count,
                                             edtl);
                if (err)
                        goto send_command_error;
@@ -378,27 +378,27 @@ int iser_send_command(struct iscsi_conn     *conn,
                goto send_command_error;
        }
 
-       iser_ctask->status = ISER_TASK_STATUS_STARTED;
+       iser_task->status = ISER_TASK_STATUS_STARTED;
 
-       err = iser_post_send(&iser_ctask->desc);
+       err = iser_post_send(&iser_task->desc);
        if (!err)
                return 0;
 
 send_command_error:
        iser_dto_buffs_release(send_dto);
-       iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+       iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
        return err;
 }
 
 /**
  * iser_send_data_out - send data out PDU
  */
-int iser_send_data_out(struct iscsi_conn     *conn,
-                      struct iscsi_cmd_task *ctask,
+int iser_send_data_out(struct iscsi_conn *conn,
+                      struct iscsi_task *task,
                       struct iscsi_data *hdr)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_desc *tx_desc = NULL;
        struct iser_dto *send_dto = NULL;
        unsigned long buf_offset;
@@ -411,7 +411,7 @@ int iser_send_data_out(struct iscsi_conn     *conn,
                return -EPERM;
        }
 
-       if (iser_check_xmit(conn, ctask))
+       if (iser_check_xmit(conn, task))
                return -ENOBUFS;
 
        itt = (__force uint32_t)hdr->itt;
@@ -432,7 +432,7 @@ int iser_send_data_out(struct iscsi_conn     *conn,
 
        /* build the tx desc regd header and add it to the tx desc dto */
        send_dto = &tx_desc->dto;
-       send_dto->ctask = iser_ctask;
+       send_dto->task = iser_task;
        iser_create_send_desc(iser_conn, tx_desc);
 
        iser_reg_single(iser_conn->ib_conn->device,
@@ -440,15 +440,15 @@ int iser_send_data_out(struct iscsi_conn     *conn,
 
        /* all data was registered for RDMA, we can use the lkey */
        iser_dto_add_regd_buff(send_dto,
-                              &iser_ctask->rdma_regd[ISER_DIR_OUT],
+                              &iser_task->rdma_regd[ISER_DIR_OUT],
                               buf_offset,
                               data_seg_len);
 
-       if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+       if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
                iser_err("Offset:%ld & DSL:%ld in Data-Out "
                         "inconsistent with total len:%ld, itt:%d\n",
                         buf_offset, data_seg_len,
-                        iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+                        iser_task->data[ISER_DIR_OUT].data_len, itt);
                err = -EINVAL;
                goto send_data_out_error;
        }
@@ -468,10 +468,11 @@ send_data_out_error:
 }
 
 int iser_send_control(struct iscsi_conn *conn,
-                     struct iscsi_mgmt_task *mtask)
+                     struct iscsi_task *task)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
-       struct iser_desc *mdesc = mtask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
+       struct iser_desc *mdesc = &iser_task->desc;
        struct iser_dto *send_dto = NULL;
        unsigned long data_seg_len;
        int err = 0;
@@ -483,27 +484,27 @@ int iser_send_control(struct iscsi_conn *conn,
                return -EPERM;
        }
 
-       if (iser_check_xmit(conn,mtask))
+       if (iser_check_xmit(conn, task))
                return -ENOBUFS;
 
        /* build the tx desc regd header and add it to the tx desc dto */
        mdesc->type = ISCSI_TX_CONTROL;
        send_dto = &mdesc->dto;
-       send_dto->ctask = NULL;
+       send_dto->task = NULL;
        iser_create_send_desc(iser_conn, mdesc);
 
        device = iser_conn->ib_conn->device;
 
        iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
 
-       data_seg_len = ntoh24(mtask->hdr->dlength);
+       data_seg_len = ntoh24(task->hdr->dlength);
 
        if (data_seg_len > 0) {
                regd_buf = &mdesc->data_regd_buf;
                memset(regd_buf, 0, sizeof(struct iser_regd_buf));
                regd_buf->device = device;
-               regd_buf->virt_addr = mtask->data;
-               regd_buf->data_size = mtask->data_count;
+               regd_buf->virt_addr = task->data;
+               regd_buf->data_size = task->data_count;
                iser_reg_single(device, regd_buf,
                                DMA_TO_DEVICE);
                iser_dto_add_regd_buff(send_dto, regd_buf,
@@ -533,15 +534,13 @@ send_control_error:
 void iser_rcv_completion(struct iser_desc *rx_desc,
                         unsigned long dto_xfer_len)
 {
-       struct iser_dto        *dto = &rx_desc->dto;
+       struct iser_dto *dto = &rx_desc->dto;
        struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
-       struct iscsi_session *session = conn->iscsi_conn->session;
-       struct iscsi_cmd_task *ctask;
-       struct iscsi_iser_cmd_task *iser_ctask;
+       struct iscsi_task *task;
+       struct iscsi_iser_task *iser_task;
        struct iscsi_hdr *hdr;
        char   *rx_data = NULL;
        int     rx_data_len = 0;
-       unsigned int itt;
        unsigned char opcode;
 
        hdr = &rx_desc->iscsi_header;
@@ -557,19 +556,24 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
        opcode = hdr->opcode & ISCSI_OPCODE_MASK;
 
        if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
-               itt = get_itt(hdr->itt); /* mask out cid and age bits */
-               if (!(itt < session->cmds_max))
+               spin_lock(&conn->iscsi_conn->session->lock);
+               task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+               if (task)
+                       __iscsi_get_task(task);
+               spin_unlock(&conn->iscsi_conn->session->lock);
+
+               if (!task)
                        iser_err("itt can't be matched to task!!! "
-                                "conn %p opcode %d cmds_max %d itt %d\n",
-                                conn->iscsi_conn,opcode,session->cmds_max,itt);
-               /* use the mapping given with the cmds array indexed by itt */
-               ctask = (struct iscsi_cmd_task *)session->cmds[itt];
-               iser_ctask = ctask->dd_data;
-               iser_dbg("itt %d ctask %p\n",itt,ctask);
-               iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
-               iser_ctask_rdma_finalize(iser_ctask);
+                                "conn %p opcode %d itt %d\n",
+                                conn->iscsi_conn, opcode, hdr->itt);
+               else {
+                       iser_task = task->dd_data;
+                       iser_dbg("itt %d task %p\n",hdr->itt, task);
+                       iser_task->status = ISER_TASK_STATUS_COMPLETED;
+                       iser_task_rdma_finalize(iser_task);
+                       iscsi_put_task(task);
+               }
        }
-
        iser_dto_buffs_release(dto);
 
        iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
@@ -590,7 +594,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
        struct iser_conn       *ib_conn = dto->ib_conn;
        struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
        struct iscsi_conn      *conn = iser_conn->iscsi_conn;
-       struct iscsi_mgmt_task *mtask;
+       struct iscsi_task *task;
        int resume_tx = 0;
 
        iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
@@ -613,36 +617,31 @@ void iser_snd_completion(struct iser_desc *tx_desc)
 
        if (tx_desc->type == ISCSI_TX_CONTROL) {
                /* this arithmetic is legal by libiscsi dd_data allocation */
-               mtask = (void *) ((long)(void *)tx_desc -
-                                 sizeof(struct iscsi_mgmt_task));
-               if (mtask->hdr->itt == RESERVED_ITT) {
-                       struct iscsi_session *session = conn->session;
-
-                       spin_lock(&conn->session->lock);
-                       iscsi_free_mgmt_task(conn, mtask);
-                       spin_unlock(&session->lock);
-               }
+               task = (void *) ((long)(void *)tx_desc -
+                                 sizeof(struct iscsi_task));
+               if (task->hdr->itt == RESERVED_ITT)
+                       iscsi_put_task(task);
        }
 }
 
-void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
 
 {
-       iser_ctask->status = ISER_TASK_STATUS_INIT;
+       iser_task->status = ISER_TASK_STATUS_INIT;
 
-       iser_ctask->dir[ISER_DIR_IN] = 0;
-       iser_ctask->dir[ISER_DIR_OUT] = 0;
+       iser_task->dir[ISER_DIR_IN] = 0;
+       iser_task->dir[ISER_DIR_OUT] = 0;
 
-       iser_ctask->data[ISER_DIR_IN].data_len  = 0;
-       iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+       iser_task->data[ISER_DIR_IN].data_len  = 0;
+       iser_task->data[ISER_DIR_OUT].data_len = 0;
 
-       memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+       memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
               sizeof(struct iser_regd_buf));
-       memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+       memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
               sizeof(struct iser_regd_buf));
 }
 
-void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
 {
        int deferred;
        int is_rdma_aligned = 1;
@@ -651,17 +650,17 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
        /* if we were reading, copy back to unaligned sglist,
         * anyway dma_unmap and free the copy
         */
-       if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+       if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
                is_rdma_aligned = 0;
-               iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+               iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
        }
-       if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+       if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
                is_rdma_aligned = 0;
-               iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+               iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
        }
 
-       if (iser_ctask->dir[ISER_DIR_IN]) {
-               regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+       if (iser_task->dir[ISER_DIR_IN]) {
+               regd = &iser_task->rdma_regd[ISER_DIR_IN];
                deferred = iser_regd_buff_release(regd);
                if (deferred) {
                        iser_err("%d references remain for BUF-IN rdma reg\n",
@@ -669,8 +668,8 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
                }
        }
 
-       if (iser_ctask->dir[ISER_DIR_OUT]) {
-               regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+       if (iser_task->dir[ISER_DIR_OUT]) {
+               regd = &iser_task->rdma_regd[ISER_DIR_OUT];
                deferred = iser_regd_buff_release(regd);
                if (deferred) {
                        iser_err("%d references remain for BUF-OUT rdma reg\n",
@@ -680,7 +679,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
 
        /* if the data was unaligned, it was already unmapped and then copied */
        if (is_rdma_aligned)
-               iser_dma_unmap_task_data(iser_ctask);
+               iser_dma_unmap_task_data(iser_task);
 }
 
 void iser_dto_buffs_release(struct iser_dto *dto)
index 81e49cb..b9453d0 100644 (file)
@@ -99,13 +99,13 @@ void iser_reg_single(struct iser_device *device,
 /**
  * iser_start_rdma_unaligned_sg
  */
-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
                                        enum iser_data_dir cmd_dir)
 {
        int dma_nents;
        struct ib_device *dev;
        char *mem = NULL;
-       struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+       struct iser_data_buf *data = &iser_task->data[cmd_dir];
        unsigned long  cmd_data_len = data->data_len;
 
        if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
@@ -138,37 +138,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
                }
        }
 
-       sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
-       iser_ctask->data_copy[cmd_dir].buf  =
-               &iser_ctask->data_copy[cmd_dir].sg_single;
-       iser_ctask->data_copy[cmd_dir].size = 1;
+       sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+       iser_task->data_copy[cmd_dir].buf  =
+               &iser_task->data_copy[cmd_dir].sg_single;
+       iser_task->data_copy[cmd_dir].size = 1;
 
-       iser_ctask->data_copy[cmd_dir].copy_buf  = mem;
+       iser_task->data_copy[cmd_dir].copy_buf  = mem;
 
-       dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+       dev = iser_task->iser_conn->ib_conn->device->ib_device;
        dma_nents = ib_dma_map_sg(dev,
-                                 &iser_ctask->data_copy[cmd_dir].sg_single,
+                                 &iser_task->data_copy[cmd_dir].sg_single,
                                  1,
                                  (cmd_dir == ISER_DIR_OUT) ?
                                  DMA_TO_DEVICE : DMA_FROM_DEVICE);
        BUG_ON(dma_nents == 0);
 
-       iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+       iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
        return 0;
 }
 
 /**
  * iser_finalize_rdma_unaligned_sg
  */
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
                                     enum iser_data_dir         cmd_dir)
 {
        struct ib_device *dev;
        struct iser_data_buf *mem_copy;
        unsigned long  cmd_data_len;
 
-       dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
-       mem_copy = &iser_ctask->data_copy[cmd_dir];
+       dev = iser_task->iser_conn->ib_conn->device->ib_device;
+       mem_copy = &iser_task->data_copy[cmd_dir];
 
        ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
                        (cmd_dir == ISER_DIR_OUT) ?
@@ -184,8 +184,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
                /* copy back read RDMA to unaligned sg */
                mem     = mem_copy->copy_buf;
 
-               sgl     = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
-               sg_size = iser_ctask->data[ISER_DIR_IN].size;
+               sgl     = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+               sg_size = iser_task->data[ISER_DIR_IN].size;
 
                p = mem;
                for_each_sg(sgl, sg, sg_size, i) {
@@ -198,7 +198,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
                }
        }
 
-       cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+       cmd_data_len = iser_task->data[cmd_dir].data_len;
 
        if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
                free_pages((unsigned long)mem_copy->copy_buf,
@@ -376,15 +376,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
        }
 }
 
-int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
-                           struct iser_data_buf       *data,
-                           enum   iser_data_dir       iser_dir,
-                           enum   dma_data_direction  dma_dir)
+int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+                           struct iser_data_buf *data,
+                           enum iser_data_dir iser_dir,
+                           enum dma_data_direction dma_dir)
 {
        struct ib_device *dev;
 
-       iser_ctask->dir[iser_dir] = 1;
-       dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+       iser_task->dir[iser_dir] = 1;
+       dev = iser_task->iser_conn->ib_conn->device->ib_device;
 
        data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
        if (data->dma_nents == 0) {
@@ -394,20 +394,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
        return 0;
 }
 
-void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
 {
        struct ib_device *dev;
        struct iser_data_buf *data;
 
-       dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+       dev = iser_task->iser_conn->ib_conn->device->ib_device;
 
-       if (iser_ctask->dir[ISER_DIR_IN]) {
-               data = &iser_ctask->data[ISER_DIR_IN];
+       if (iser_task->dir[ISER_DIR_IN]) {
+               data = &iser_task->data[ISER_DIR_IN];
                ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
        }
 
-       if (iser_ctask->dir[ISER_DIR_OUT]) {
-               data = &iser_ctask->data[ISER_DIR_OUT];
+       if (iser_task->dir[ISER_DIR_OUT]) {
+               data = &iser_task->data[ISER_DIR_OUT];
                ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
        }
 }
@@ -418,21 +418,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
  *
  * returns 0 on success, errno code on failure
  */
-int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
                      enum   iser_data_dir        cmd_dir)
 {
-       struct iscsi_conn    *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
-       struct iser_conn     *ib_conn = iser_ctask->iser_conn->ib_conn;
+       struct iscsi_conn    *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+       struct iser_conn     *ib_conn = iser_task->iser_conn->ib_conn;
        struct iser_device   *device = ib_conn->device;
        struct ib_device     *ibdev = device->ib_device;
-       struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+       struct iser_data_buf *mem = &iser_task->data[cmd_dir];
        struct iser_regd_buf *regd_buf;
        int aligned_len;
        int err;
        int i;
        struct scatterlist *sg;
 
-       regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+       regd_buf = &iser_task->rdma_regd[cmd_dir];
 
        aligned_len = iser_data_buf_aligned_len(mem, ibdev);
        if (aligned_len != mem->dma_nents) {
@@ -442,13 +442,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
                iser_data_buf_dump(mem, ibdev);
 
                /* unmap the command data before accessing it */
-               iser_dma_unmap_task_data(iser_ctask);
+               iser_dma_unmap_task_data(iser_task);
 
                /* allocate copy buf, if we are writing, copy the */
                /* unaligned scatterlist, dma map the copy        */
-               if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+               if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
                                return -ENOMEM;
-               mem = &iser_ctask->data_copy[cmd_dir];
+               mem = &iser_task->data_copy[cmd_dir];
        }
 
        /* if there a single dma entry, FMR is not needed */
@@ -472,8 +472,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
                err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
                if (err) {
                        iser_data_buf_dump(mem, ibdev);
-                       iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
-                                ntoh24(iser_ctask->desc.iscsi_header.dlength));
+                       iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+                                mem->dma_nents,
+                                ntoh24(iser_task->desc.iscsi_header.dlength));
                        iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
                                 ib_conn->page_vec->data_size, ib_conn->page_vec->length,
                                 ib_conn->page_vec->offset);
index 77cabee..3a917c1 100644 (file)
@@ -323,7 +323,18 @@ static void iser_conn_release(struct iser_conn *ib_conn)
                iser_device_try_release(device);
        if (ib_conn->iser_conn)
                ib_conn->iser_conn->ib_conn = NULL;
-       kfree(ib_conn);
+       iscsi_destroy_endpoint(ib_conn->ep);
+}
+
+void iser_conn_get(struct iser_conn *ib_conn)
+{
+       atomic_inc(&ib_conn->refcount);
+}
+
+void iser_conn_put(struct iser_conn *ib_conn)
+{
+       if (atomic_dec_and_test(&ib_conn->refcount))
+               iser_conn_release(ib_conn);
 }
 
 /**
@@ -347,7 +358,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
        wait_event_interruptible(ib_conn->wait,
                                 ib_conn->state == ISER_CONN_DOWN);
 
-       iser_conn_release(ib_conn);
+       iser_conn_put(ib_conn);
 }
 
 static void iser_connect_error(struct rdma_cm_id *cma_id)
@@ -481,24 +492,15 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
        return ret;
 }
 
-int iser_conn_init(struct iser_conn **ibconn)
+void iser_conn_init(struct iser_conn *ib_conn)
 {
-       struct iser_conn *ib_conn;
-
-       ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
-       if (!ib_conn) {
-               iser_err("can't alloc memory for struct iser_conn\n");
-               return -ENOMEM;
-       }
        ib_conn->state = ISER_CONN_INIT;
        init_waitqueue_head(&ib_conn->wait);
        atomic_set(&ib_conn->post_recv_buf_count, 0);
        atomic_set(&ib_conn->post_send_buf_count, 0);
+       atomic_set(&ib_conn->refcount, 1);
        INIT_LIST_HEAD(&ib_conn->conn_list);
        spin_lock_init(&ib_conn->lock);
-
-       *ibconn = ib_conn;
-       return 0;
 }
 
  /**
index 610af91..07d92c1 100644 (file)
@@ -252,27 +252,14 @@ config DM_ZERO
 config DM_MULTIPATH
        tristate "Multipath target"
        depends on BLK_DEV_DM
+       # nasty syntax but means make DM_MULTIPATH independent
+       # of SCSI_DH if the latter isn't defined but if
+       # it is, DM_MULTIPATH must depend on it.  We get a build
+       # error if SCSI_DH=m and DM_MULTIPATH=y
+       depends on SCSI_DH || !SCSI_DH
        ---help---
          Allow volume managers to support multipath hardware.
 
-config DM_MULTIPATH_EMC
-       tristate "EMC CX/AX multipath support"
-       depends on DM_MULTIPATH && BLK_DEV_DM
-       ---help---
-         Multipath support for EMC CX/AX series hardware.
-
-config DM_MULTIPATH_RDAC
-       tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)"
-       depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
-       ---help---
-         Multipath support for LSI/Engenio RDAC.
-
-config DM_MULTIPATH_HP
-        tristate "HP MSA multipath support (EXPERIMENTAL)"
-        depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
-        ---help---
-          Multipath support for HP MSA (Active/Passive) series hardware.
-
 config DM_DELAY
        tristate "I/O delaying target (EXPERIMENTAL)"
        depends on BLK_DEV_DM && EXPERIMENTAL
index 7be09ee..f1ef33d 100644 (file)
@@ -4,11 +4,9 @@
 
 dm-mod-objs    := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
                   dm-ioctl.o dm-io.o dm-kcopyd.o
-dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
+dm-multipath-objs := dm-path-selector.o dm-mpath.o
 dm-snapshot-objs := dm-snap.o dm-exception-store.o
 dm-mirror-objs := dm-raid1.o
-dm-rdac-objs   := dm-mpath-rdac.o
-dm-hp-sw-objs  := dm-mpath-hp-sw.o
 md-mod-objs     := md.o bitmap.o
 raid456-objs   := raid5.o raid6algos.o raid6recov.o raid6tables.o \
                   raid6int1.o raid6int2.o raid6int4.o \
@@ -35,9 +33,6 @@ obj-$(CONFIG_BLK_DEV_DM)      += dm-mod.o
 obj-$(CONFIG_DM_CRYPT)         += dm-crypt.o
 obj-$(CONFIG_DM_DELAY)         += dm-delay.o
 obj-$(CONFIG_DM_MULTIPATH)     += dm-multipath.o dm-round-robin.o
-obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
-obj-$(CONFIG_DM_MULTIPATH_HP)  += dm-hp-sw.o
-obj-$(CONFIG_DM_MULTIPATH_RDAC)        += dm-rdac.o
 obj-$(CONFIG_DM_SNAPSHOT)      += dm-snapshot.o
 obj-$(CONFIG_DM_MIRROR)                += dm-mirror.o dm-log.o
 obj-$(CONFIG_DM_ZERO)          += dm-zero.o
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
deleted file mode 100644 (file)
index 3ea5ad4..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Copyright (C) 2004 SUSE LINUX Products GmbH. All rights reserved.
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
- *
- * This file is released under the GPL.
- *
- * Multipath support for EMC CLARiiON AX/CX-series hardware.
- */
-
-#include "dm.h"
-#include "dm-hw-handler.h"
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-
-#define DM_MSG_PREFIX "multipath emc"
-
-struct emc_handler {
-       spinlock_t lock;
-
-       /* Whether we should send the short trespass command (FC-series)
-        * or the long version (default for AX/CX CLARiiON arrays). */
-       unsigned short_trespass;
-       /* Whether or not to honor SCSI reservations when initiating a
-        * switch-over. Default: Don't. */
-       unsigned hr;
-
-       unsigned char sense[SCSI_SENSE_BUFFERSIZE];
-};
-
-#define TRESPASS_PAGE 0x22
-#define EMC_FAILOVER_TIMEOUT (60 * HZ)
-
-/* Code borrowed from dm-lsi-rdac by Mike Christie */
-
-static inline void free_bio(struct bio *bio)
-{
-       __free_page(bio->bi_io_vec[0].bv_page);
-       bio_put(bio);
-}
-
-static void emc_endio(struct bio *bio, int error)
-{
-       struct dm_path *path = bio->bi_private;
-
-       /* We also need to look at the sense keys here whether or not to
-        * switch to the next PG etc.
-        *
-        * For now simple logic: either it works or it doesn't.
-        */
-       if (error)
-               dm_pg_init_complete(path, MP_FAIL_PATH);
-       else
-               dm_pg_init_complete(path, 0);
-
-       /* request is freed in block layer */
-       free_bio(bio);
-}
-
-static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size)
-{
-       struct bio *bio;
-       struct page *page;
-
-       bio = bio_alloc(GFP_ATOMIC, 1);
-       if (!bio) {
-               DMERR("get_failover_bio: bio_alloc() failed.");
-               return NULL;
-       }
-
-       bio->bi_rw |= (1 << BIO_RW);
-       bio->bi_bdev = path->dev->bdev;
-       bio->bi_sector = 0;
-       bio->bi_private = path;
-       bio->bi_end_io = emc_endio;
-
-       page = alloc_page(GFP_ATOMIC);
-       if (!page) {
-               DMERR("get_failover_bio: alloc_page() failed.");
-               bio_put(bio);
-               return NULL;
-       }
-
-       if (bio_add_page(bio, page, data_size, 0) != data_size) {
-               DMERR("get_failover_bio: bio_add_page() failed.");
-               __free_page(page);
-               bio_put(bio);
-               return NULL;
-       }
-
-       return bio;
-}
-
-static struct request *get_failover_req(struct emc_handler *h,
-                                       struct bio *bio, struct dm_path *path)
-{
-       struct request *rq;
-       struct block_device *bdev = bio->bi_bdev;
-       struct request_queue *q = bdev_get_queue(bdev);
-
-       /* FIXME: Figure out why it fails with GFP_ATOMIC. */
-       rq = blk_get_request(q, WRITE, __GFP_WAIT);
-       if (!rq) {
-               DMERR("get_failover_req: blk_get_request failed");
-               return NULL;
-       }
-
-       blk_rq_append_bio(q, rq, bio);
-
-       rq->sense = h->sense;
-       memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
-       rq->sense_len = 0;
-
-       rq->timeout = EMC_FAILOVER_TIMEOUT;
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
-       rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
-
-       return rq;
-}
-
-static struct request *emc_trespass_get(struct emc_handler *h,
-                                       struct dm_path *path)
-{
-       struct bio *bio;
-       struct request *rq;
-       unsigned char *page22;
-       unsigned char long_trespass_pg[] = {
-               0, 0, 0, 0,
-               TRESPASS_PAGE,        /* Page code */
-               0x09,                 /* Page length - 2 */
-               h->hr ? 0x01 : 0x81,  /* Trespass code + Honor reservation bit */
-               0xff, 0xff,           /* Trespass target */
-               0, 0, 0, 0, 0, 0      /* Reserved bytes / unknown */
-               };
-       unsigned char short_trespass_pg[] = {
-               0, 0, 0, 0,
-               TRESPASS_PAGE,        /* Page code */
-               0x02,                 /* Page length - 2 */
-               h->hr ? 0x01 : 0x81,  /* Trespass code + Honor reservation bit */
-               0xff,                 /* Trespass target */
-               };
-       unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) :
-                               sizeof(long_trespass_pg);
-
-       /* get bio backing */
-       if (data_size > PAGE_SIZE)
-               /* this should never happen */
-               return NULL;
-
-       bio = get_failover_bio(path, data_size);
-       if (!bio) {
-               DMERR("emc_trespass_get: no bio");
-               return NULL;
-       }
-
-       page22 = (unsigned char *)bio_data(bio);
-       memset(page22, 0, data_size);
-
-       memcpy(page22, h->short_trespass ?
-               short_trespass_pg : long_trespass_pg, data_size);
-
-       /* get request for block layer packet command */
-       rq = get_failover_req(h, bio, path);
-       if (!rq) {
-               DMERR("emc_trespass_get: no rq");
-               free_bio(bio);
-               return NULL;
-       }
-
-       /* Prepare the command. */
-       rq->cmd[0] = MODE_SELECT;
-       rq->cmd[1] = 0x10;
-       rq->cmd[4] = data_size;
-       rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
-
-       return rq;
-}
-
-static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
-                       struct dm_path *path)
-{
-       struct request *rq;
-       struct request_queue *q = bdev_get_queue(path->dev->bdev);
-
-       /*
-        * We can either blindly init the pg (then look at the sense),
-        * or we can send some commands to get the state here (then
-        * possibly send the fo cmnd), or we can also have the
-        * initial state passed into us and then get an update here.
-        */
-       if (!q) {
-               DMINFO("emc_pg_init: no queue");
-               goto fail_path;
-       }
-
-       /* FIXME: The request should be pre-allocated. */
-       rq = emc_trespass_get(hwh->context, path);
-       if (!rq) {
-               DMERR("emc_pg_init: no rq");
-               goto fail_path;
-       }
-
-       DMINFO("emc_pg_init: sending switch-over command");
-       elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
-       return;
-
-fail_path:
-       dm_pg_init_complete(path, MP_FAIL_PATH);
-}
-
-static struct emc_handler *alloc_emc_handler(void)
-{
-       struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL);
-
-       if (h)
-               spin_lock_init(&h->lock);
-
-       return h;
-}
-
-static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
-{
-       struct emc_handler *h;
-       unsigned hr, short_trespass;
-
-       if (argc == 0) {
-               /* No arguments: use defaults */
-               hr = 0;
-               short_trespass = 0;
-       } else if (argc != 2) {
-               DMWARN("incorrect number of arguments");
-               return -EINVAL;
-       } else {
-               if ((sscanf(argv[0], "%u", &short_trespass) != 1)
-                       || (short_trespass > 1)) {
-                       DMWARN("invalid trespass mode selected");
-                       return -EINVAL;
-               }
-
-               if ((sscanf(argv[1], "%u", &hr) != 1)
-                       || (hr > 1)) {
-                       DMWARN("invalid honor reservation flag selected");
-                       return -EINVAL;
-               }
-       }
-
-       h = alloc_emc_handler();
-       if (!h)
-               return -ENOMEM;
-
-       hwh->context = h;
-
-       if ((h->short_trespass = short_trespass))
-               DMWARN("short trespass command will be send");
-       else
-               DMWARN("long trespass command will be send");
-
-       if ((h->hr = hr))
-               DMWARN("honor reservation bit will be set");
-       else
-               DMWARN("honor reservation bit will not be set (default)");
-
-       return 0;
-}
-
-static void emc_destroy(struct hw_handler *hwh)
-{
-       struct emc_handler *h = (struct emc_handler *) hwh->context;
-
-       kfree(h);
-       hwh->context = NULL;
-}
-
-static unsigned emc_error(struct hw_handler *hwh, struct bio *bio)
-{
-       /* FIXME: Patch from axboe still missing */
-#if 0
-       int sense;
-
-       if (bio->bi_error & BIO_SENSE) {
-               sense = bio->bi_error & 0xffffff; /* sense key / asc / ascq */
-
-               if (sense == 0x020403) {
-                       /* LUN Not Ready - Manual Intervention Required
-                        * indicates this is a passive path.
-                        *
-                        * FIXME: However, if this is seen and EVPD C0
-                        * indicates that this is due to a NDU in
-                        * progress, we should set FAIL_PATH too.
-                        * This indicates we might have to do a SCSI
-                        * inquiry in the end_io path. Ugh. */
-                       return MP_BYPASS_PG | MP_RETRY_IO;
-               } else if (sense == 0x052501) {
-                       /* An array based copy is in progress. Do not
-                        * fail the path, do not bypass to another PG,
-                        * do not retry. Fail the IO immediately.
-                        * (Actually this is the same conclusion as in
-                        * the default handler, but lets make sure.) */
-                       return 0;
-               } else if (sense == 0x062900) {
-                       /* Unit Attention Code. This is the first IO
-                        * to the new path, so just retry. */
-                       return MP_RETRY_IO;
-               }
-       }
-#endif
-
-       /* Try default handler */
-       return dm_scsi_err_handler(hwh, bio);
-}
-
-static struct hw_handler_type emc_hwh = {
-       .name = "emc",
-       .module = THIS_MODULE,
-       .create = emc_create,
-       .destroy = emc_destroy,
-       .pg_init = emc_pg_init,
-       .error = emc_error,
-};
-
-static int __init dm_emc_init(void)
-{
-       int r = dm_register_hw_handler(&emc_hwh);
-
-       if (r < 0)
-               DMERR("register failed %d", r);
-
-       DMINFO("version 0.0.3 loaded");
-
-       return r;
-}
-
-static void __exit dm_emc_exit(void)
-{
-       int r = dm_unregister_hw_handler(&emc_hwh);
-
-       if (r < 0)
-               DMERR("unregister failed %d", r);
-}
-
-module_init(dm_emc_init);
-module_exit(dm_emc_exit);
-
-MODULE_DESCRIPTION(DM_NAME " EMC CX/AX/FC-family multipath");
-MODULE_AUTHOR("Lars Marowsky-Bree <lmb@suse.de>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-hw-handler.c b/drivers/md/dm-hw-handler.c
deleted file mode 100644 (file)
index 2ee84d8..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
- *
- * This file is released under the GPL.
- *
- * Multipath hardware handler registration.
- */
-
-#include "dm.h"
-#include "dm-hw-handler.h"
-
-#include <linux/slab.h>
-
-struct hwh_internal {
-       struct hw_handler_type hwht;
-
-       struct list_head list;
-       long use;
-};
-
-#define hwht_to_hwhi(__hwht) container_of((__hwht), struct hwh_internal, hwht)
-
-static LIST_HEAD(_hw_handlers);
-static DECLARE_RWSEM(_hwh_lock);
-
-static struct hwh_internal *__find_hw_handler_type(const char *name)
-{
-       struct hwh_internal *hwhi;
-
-       list_for_each_entry(hwhi, &_hw_handlers, list) {
-               if (!strcmp(name, hwhi->hwht.name))
-                       return hwhi;
-       }
-
-       return NULL;
-}
-
-static struct hwh_internal *get_hw_handler(const char *name)
-{
-       struct hwh_internal *hwhi;
-
-       down_read(&_hwh_lock);
-       hwhi = __find_hw_handler_type(name);
-       if (hwhi) {
-               if ((hwhi->use == 0) && !try_module_get(hwhi->hwht.module))
-                       hwhi = NULL;
-               else
-                       hwhi->use++;
-       }
-       up_read(&_hwh_lock);
-
-       return hwhi;
-}
-
-struct hw_handler_type *dm_get_hw_handler(const char *name)
-{
-       struct hwh_internal *hwhi;
-
-       if (!name)
-               return NULL;
-
-       hwhi = get_hw_handler(name);
-       if (!hwhi) {
-               request_module("dm-%s", name);
-               hwhi = get_hw_handler(name);
-       }
-
-       return hwhi ? &hwhi->hwht : NULL;
-}
-
-void dm_put_hw_handler(struct hw_handler_type *hwht)
-{
-       struct hwh_internal *hwhi;
-
-       if (!hwht)
-               return;
-
-       down_read(&_hwh_lock);
-       hwhi = __find_hw_handler_type(hwht->name);
-       if (!hwhi)
-               goto out;
-
-       if (--hwhi->use == 0)
-               module_put(hwhi->hwht.module);
-
-       BUG_ON(hwhi->use < 0);
-
-      out:
-       up_read(&_hwh_lock);
-}
-
-static struct hwh_internal *_alloc_hw_handler(struct hw_handler_type *hwht)
-{
-       struct hwh_internal *hwhi = kzalloc(sizeof(*hwhi), GFP_KERNEL);
-
-       if (hwhi)
-               hwhi->hwht = *hwht;
-
-       return hwhi;
-}
-
-int dm_register_hw_handler(struct hw_handler_type *hwht)
-{
-       int r = 0;
-       struct hwh_internal *hwhi = _alloc_hw_handler(hwht);
-
-       if (!hwhi)
-               return -ENOMEM;
-
-       down_write(&_hwh_lock);
-
-       if (__find_hw_handler_type(hwht->name)) {
-               kfree(hwhi);
-               r = -EEXIST;
-       } else
-               list_add(&hwhi->list, &_hw_handlers);
-
-       up_write(&_hwh_lock);
-
-       return r;
-}
-
-int dm_unregister_hw_handler(struct hw_handler_type *hwht)
-{
-       struct hwh_internal *hwhi;
-
-       down_write(&_hwh_lock);
-
-       hwhi = __find_hw_handler_type(hwht->name);
-       if (!hwhi) {
-               up_write(&_hwh_lock);
-               return -EINVAL;
-       }
-
-       if (hwhi->use) {
-               up_write(&_hwh_lock);
-               return -ETXTBSY;
-       }
-
-       list_del(&hwhi->list);
-
-       up_write(&_hwh_lock);
-
-       kfree(hwhi);
-
-       return 0;
-}
-
-unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio)
-{
-#if 0
-       int sense_key, asc, ascq;
-
-       if (bio->bi_error & BIO_SENSE) {
-               /* FIXME: This is just an initial guess. */
-               /* key / asc / ascq */
-               sense_key = (bio->bi_error >> 16) & 0xff;
-               asc = (bio->bi_error >> 8) & 0xff;
-               ascq = bio->bi_error & 0xff;
-
-               switch (sense_key) {
-                       /* This block as a whole comes from the device.
-                        * So no point retrying on another path. */
-               case 0x03:      /* Medium error */
-               case 0x05:      /* Illegal request */
-               case 0x07:      /* Data protect */
-               case 0x08:      /* Blank check */
-               case 0x0a:      /* copy aborted */
-               case 0x0c:      /* obsolete - no clue ;-) */
-               case 0x0d:      /* volume overflow */
-               case 0x0e:      /* data miscompare */
-               case 0x0f:      /* reserved - no idea either. */
-                       return MP_ERROR_IO;
-
-                       /* For these errors it's unclear whether they
-                        * come from the device or the controller.
-                        * So just lets try a different path, and if
-                        * it eventually succeeds, user-space will clear
-                        * the paths again... */
-               case 0x02:      /* Not ready */
-               case 0x04:      /* Hardware error */
-               case 0x09:      /* vendor specific */
-               case 0x0b:      /* Aborted command */
-                       return MP_FAIL_PATH;
-
-               case 0x06:      /* Unit attention - might want to decode */
-                       if (asc == 0x04 && ascq == 0x01)
-                               /* "Unit in the process of
-                                * becoming ready" */
-                               return 0;
-                       return MP_FAIL_PATH;
-
-                       /* FIXME: For Unit Not Ready we may want
-                        * to have a generic pg activation
-                        * feature (START_UNIT). */
-
-                       /* Should these two ever end up in the
-                        * error path? I don't think so. */
-               case 0x00:      /* No sense */
-               case 0x01:      /* Recovered error */
-                       return 0;
-               }
-       }
-#endif
-
-       /* We got no idea how to decode the other kinds of errors ->
-        * assume generic error condition. */
-       return MP_FAIL_PATH;
-}
-
-EXPORT_SYMBOL_GPL(dm_register_hw_handler);
-EXPORT_SYMBOL_GPL(dm_unregister_hw_handler);
-EXPORT_SYMBOL_GPL(dm_scsi_err_handler);
diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h
deleted file mode 100644 (file)
index 46809dc..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
- *
- * This file is released under the GPL.
- *
- * Multipath hardware handler registration.
- */
-
-#ifndef        DM_HW_HANDLER_H
-#define        DM_HW_HANDLER_H
-
-#include <linux/device-mapper.h>
-
-#include "dm-mpath.h"
-
-struct hw_handler_type;
-struct hw_handler {
-       struct hw_handler_type *type;
-       struct mapped_device *md;
-       void *context;
-};
-
-/*
- * Constructs a hardware handler object, takes custom arguments
- */
-/* Information about a hardware handler type */
-struct hw_handler_type {
-       char *name;
-       struct module *module;
-
-       int (*create) (struct hw_handler *handler, unsigned int argc,
-                      char **argv);
-       void (*destroy) (struct hw_handler *hwh);
-
-       void (*pg_init) (struct hw_handler *hwh, unsigned bypassed,
-                        struct dm_path *path);
-       unsigned (*error) (struct hw_handler *hwh, struct bio *bio);
-       int (*status) (struct hw_handler *hwh, status_type_t type,
-                      char *result, unsigned int maxlen);
-};
-
-/* Register a hardware handler */
-int dm_register_hw_handler(struct hw_handler_type *type);
-
-/* Unregister a hardware handler */
-int dm_unregister_hw_handler(struct hw_handler_type *type);
-
-/* Returns a registered hardware handler type */
-struct hw_handler_type *dm_get_hw_handler(const char *name);
-
-/* Releases a hardware handler  */
-void dm_put_hw_handler(struct hw_handler_type *hwht);
-
-/* Default err function */
-unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio);
-
-/* Error flags for err and dm_pg_init_complete */
-#define MP_FAIL_PATH 1
-#define MP_BYPASS_PG 2
-#define MP_ERROR_IO  4 /* Don't retry this I/O */
-#define MP_RETRY 8
-
-#endif
diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c
deleted file mode 100644 (file)
index b63a0ab..0000000
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (C) 2005 Mike Christie, All rights reserved.
- * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
- * Authors: Mike Christie
- *          Dave Wysochanski
- *
- * This file is released under the GPL.
- *
- * This module implements the specific path activation code for
- * HP StorageWorks and FSC FibreCat Asymmetric (Active/Passive)
- * storage arrays.
- * These storage arrays have controller-based failover, not
- * LUN-based failover.  However, LUN-based failover is the design
- * of dm-multipath. Thus, this module is written for LUN-based failover.
- */
-#include <linux/blkdev.h>
-#include <linux/list.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_dbg.h>
-
-#include "dm.h"
-#include "dm-hw-handler.h"
-
-#define DM_MSG_PREFIX "multipath hp-sw"
-#define DM_HP_HWH_NAME "hp-sw"
-#define DM_HP_HWH_VER "1.0.0"
-
-struct hp_sw_context {
-       unsigned char sense[SCSI_SENSE_BUFFERSIZE];
-};
-
-/*
- * hp_sw_error_is_retryable - Is an HP-specific check condition retryable?
- * @req: path activation request
- *
- * Examine error codes of request and determine whether the error is retryable.
- * Some error codes are already retried by scsi-ml (see
- * scsi_decide_disposition), but some HP specific codes are not.
- * The intent of this routine is to supply the logic for the HP specific
- * check conditions.
- *
- * Returns:
- *  1 - command completed with retryable error
- *  0 - command completed with non-retryable error
- *
- * Possible optimizations
- * 1. More hardware-specific error codes
- */
-static int hp_sw_error_is_retryable(struct request *req)
-{
-       /*
-        * NOT_READY is known to be retryable
-        * For now we just dump out the sense data and call it retryable
-        */
-       if (status_byte(req->errors) == CHECK_CONDITION)
-               __scsi_print_sense(DM_HP_HWH_NAME, req->sense, req->sense_len);
-
-       /*
-        * At this point we don't have complete information about all the error
-        * codes from this hardware, so we are just conservative and retry
-        * when in doubt.
-        */
-       return 1;
-}
-
-/*
- * hp_sw_end_io - Completion handler for HP path activation.
- * @req: path activation request
- * @error: scsi-ml error
- *
- *  Check sense data, free request structure, and notify dm that
- *  pg initialization has completed.
- *
- * Context: scsi-ml softirq
- *
- */
-static void hp_sw_end_io(struct request *req, int error)
-{
-       struct dm_path *path = req->end_io_data;
-       unsigned err_flags = 0;
-
-       if (!error) {
-               DMDEBUG("%s path activation command - success",
-                       path->dev->name);
-               goto out;
-       }
-
-       if (hp_sw_error_is_retryable(req)) {
-               DMDEBUG("%s path activation command - retry",
-                       path->dev->name);
-               err_flags = MP_RETRY;
-               goto out;
-       }
-
-       DMWARN("%s path activation fail - error=0x%x",
-              path->dev->name, error);
-       err_flags = MP_FAIL_PATH;
-
-out:
-       req->end_io_data = NULL;
-       __blk_put_request(req->q, req);
-       dm_pg_init_complete(path, err_flags);
-}
-
-/*
- * hp_sw_get_request - Allocate an HP specific path activation request
- * @path: path on which request will be sent (needed for request queue)
- *
- * The START command is used for path activation request.
- * These arrays are controller-based failover, not LUN based.
- * One START command issued to a single path will fail over all
- * LUNs for the same controller.
- *
- * Possible optimizations
- * 1. Make timeout configurable
- * 2. Preallocate request
- */
-static struct request *hp_sw_get_request(struct dm_path *path)
-{
-       struct request *req;
-       struct block_device *bdev = path->dev->bdev;
-       struct request_queue *q = bdev_get_queue(bdev);
-       struct hp_sw_context *h = path->hwhcontext;
-
-       req = blk_get_request(q, WRITE, GFP_NOIO);
-       if (!req)
-               goto out;
-
-       req->timeout = 60 * HZ;
-
-       req->errors = 0;
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
-       req->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
-       req->end_io_data = path;
-       req->sense = h->sense;
-       memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
-
-       req->cmd[0] = START_STOP;
-       req->cmd[4] = 1;
-       req->cmd_len = COMMAND_SIZE(req->cmd[0]);
-
-out:
-       return req;
-}
-
-/*
- * hp_sw_pg_init - HP path activation implementation.
- * @hwh: hardware handler specific data
- * @bypassed: unused; is the path group bypassed? (see dm-mpath.c)
- * @path: path to send initialization command
- *
- * Send an HP-specific path activation command on 'path'.
- * Do not try to optimize in any way, just send the activation command.
- * More than one path activation command may be sent to the same controller.
- * This seems to work fine for basic failover support.
- *
- * Possible optimizations
- * 1. Detect an in-progress activation request and avoid submitting another one
- * 2. Model the controller and only send a single activation request at a time
- * 3. Determine the state of a path before sending an activation request
- *
- * Context: kmpathd (see process_queued_ios() in dm-mpath.c)
- */
-static void hp_sw_pg_init(struct hw_handler *hwh, unsigned bypassed,
-                         struct dm_path *path)
-{
-       struct request *req;
-       struct hp_sw_context *h;
-
-       path->hwhcontext = hwh->context;
-       h = hwh->context;
-
-       req = hp_sw_get_request(path);
-       if (!req) {
-               DMERR("%s path activation command - allocation fail",
-                     path->dev->name);
-               goto retry;
-       }
-
-       DMDEBUG("%s path activation command - sent", path->dev->name);
-
-       blk_execute_rq_nowait(req->q, NULL, req, 1, hp_sw_end_io);
-       return;
-
-retry:
-       dm_pg_init_complete(path, MP_RETRY);
-}
-
-static int hp_sw_create(struct hw_handler *hwh, unsigned argc, char **argv)
-{
-       struct hp_sw_context *h;
-
-       h = kmalloc(sizeof(*h), GFP_KERNEL);
-       if (!h)
-               return -ENOMEM;
-
-       hwh->context = h;
-
-       return 0;
-}
-
-static void hp_sw_destroy(struct hw_handler *hwh)
-{
-       struct hp_sw_context *h = hwh->context;
-
-       kfree(h);
-}
-
-static struct hw_handler_type hp_sw_hwh = {
-       .name = DM_HP_HWH_NAME,
-       .module = THIS_MODULE,
-       .create = hp_sw_create,
-       .destroy = hp_sw_destroy,
-       .pg_init = hp_sw_pg_init,
-};
-
-static int __init hp_sw_init(void)
-{
-       int r;
-
-       r = dm_register_hw_handler(&hp_sw_hwh);
-       if (r < 0)
-               DMERR("register failed %d", r);
-       else
-               DMINFO("version " DM_HP_HWH_VER " loaded");
-
-       return r;
-}
-
-static void __exit hp_sw_exit(void)
-{
-       int r;
-
-       r = dm_unregister_hw_handler(&hp_sw_hwh);
-       if (r < 0)
-               DMERR("unregister failed %d", r);
-}
-
-module_init(hp_sw_init);
-module_exit(hp_sw_exit);
-
-MODULE_DESCRIPTION("DM Multipath HP StorageWorks / FSC FibreCat (A/P) support");
-MODULE_AUTHOR("Mike Christie, Dave Wysochanski <dm-devel@redhat.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DM_HP_HWH_VER);
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
deleted file mode 100644 (file)
index 95e7773..0000000
+++ /dev/null
@@ -1,700 +0,0 @@
-/*
- * Engenio/LSI RDAC DM HW handler
- *
- * Copyright (C) 2005 Mike Christie. All rights reserved.
- * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_eh.h>
-
-#define DM_MSG_PREFIX "multipath rdac"
-
-#include "dm.h"
-#include "dm-hw-handler.h"
-
-#define RDAC_DM_HWH_NAME "rdac"
-#define RDAC_DM_HWH_VER "0.4"
-
-/*
- * LSI mode page stuff
- *
- * These struct definitions and the forming of the
- * mode page were taken from the LSI RDAC 2.4 GPL'd
- * driver, and then converted to Linux conventions.
- */
-#define RDAC_QUIESCENCE_TIME 20;
-/*
- * Page Codes
- */
-#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
-
-/*
- * Controller modes definitions
- */
-#define RDAC_MODE_TRANSFER_ALL_LUNS            0x01
-#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS      0x02
-
-/*
- * RDAC Options field
- */
-#define RDAC_FORCED_QUIESENCE 0x02
-
-#define RDAC_FAILOVER_TIMEOUT (60 * HZ)
-
-struct rdac_mode_6_hdr {
-       u8      data_len;
-       u8      medium_type;
-       u8      device_params;
-       u8      block_desc_len;
-};
-
-struct rdac_mode_10_hdr {
-       u16     data_len;
-       u8      medium_type;
-       u8      device_params;
-       u16     reserved;
-       u16     block_desc_len;
-};
-
-struct rdac_mode_common {
-       u8      controller_serial[16];
-       u8      alt_controller_serial[16];
-       u8      rdac_mode[2];
-       u8      alt_rdac_mode[2];
-       u8      quiescence_timeout;
-       u8      rdac_options;
-};
-
-struct rdac_pg_legacy {
-       struct rdac_mode_6_hdr hdr;
-       u8      page_code;
-       u8      page_len;
-       struct rdac_mode_common common;
-#define MODE6_MAX_LUN  32
-       u8      lun_table[MODE6_MAX_LUN];
-       u8      reserved2[32];
-       u8      reserved3;
-       u8      reserved4;
-};
-
-struct rdac_pg_expanded {
-       struct rdac_mode_10_hdr hdr;
-       u8      page_code;
-       u8      subpage_code;
-       u8      page_len[2];
-       struct rdac_mode_common common;
-       u8      lun_table[256];
-       u8      reserved3;
-       u8      reserved4;
-};
-
-struct c9_inquiry {
-       u8      peripheral_info;
-       u8      page_code;      /* 0xC9 */
-       u8      reserved1;
-       u8      page_len;
-       u8      page_id[4];     /* "vace" */
-       u8      avte_cvp;
-       u8      path_prio;
-       u8      reserved2[38];
-};
-
-#define SUBSYS_ID_LEN  16
-#define SLOT_ID_LEN    2
-
-struct c4_inquiry {
-       u8      peripheral_info;
-       u8      page_code;      /* 0xC4 */
-       u8      reserved1;
-       u8      page_len;
-       u8      page_id[4];     /* "subs" */
-       u8      subsys_id[SUBSYS_ID_LEN];
-       u8      revision[4];
-       u8      slot_id[SLOT_ID_LEN];
-       u8      reserved[2];
-};
-
-struct rdac_controller {
-       u8                      subsys_id[SUBSYS_ID_LEN];
-       u8                      slot_id[SLOT_ID_LEN];
-       int                     use_10_ms;
-       struct kref             kref;
-       struct list_head        node; /* list of all controllers */
-       spinlock_t              lock;
-       int                     submitted;
-       struct list_head        cmd_list; /* list of commands to be submitted */
-       union                   {
-               struct rdac_pg_legacy legacy;
-               struct rdac_pg_expanded expanded;
-       } mode_select;
-};
-struct c8_inquiry {
-       u8      peripheral_info;
-       u8      page_code; /* 0xC8 */
-       u8      reserved1;
-       u8      page_len;
-       u8      page_id[4]; /* "edid" */
-       u8      reserved2[3];
-       u8      vol_uniq_id_len;
-       u8      vol_uniq_id[16];
-       u8      vol_user_label_len;
-       u8      vol_user_label[60];
-       u8      array_uniq_id_len;
-       u8      array_unique_id[16];
-       u8      array_user_label_len;
-       u8      array_user_label[60];
-       u8      lun[8];
-};
-
-struct c2_inquiry {
-       u8      peripheral_info;
-       u8      page_code;      /* 0xC2 */
-       u8      reserved1;
-       u8      page_len;
-       u8      page_id[4];     /* "swr4" */
-       u8      sw_version[3];
-       u8      sw_date[3];
-       u8      features_enabled;
-       u8      max_lun_supported;
-       u8      partitions[239]; /* Total allocation length should be 0xFF */
-};
-
-struct rdac_handler {
-       struct list_head        entry; /* list waiting to submit MODE SELECT */
-       unsigned                timeout;
-       struct rdac_controller  *ctlr;
-#define UNINITIALIZED_LUN      (1 << 8)
-       unsigned                lun;
-       unsigned char           sense[SCSI_SENSE_BUFFERSIZE];
-       struct dm_path          *path;
-       struct work_struct      work;
-#define        SEND_C2_INQUIRY         1
-#define        SEND_C4_INQUIRY         2
-#define        SEND_C8_INQUIRY         3
-#define        SEND_C9_INQUIRY         4
-#define        SEND_MODE_SELECT        5
-       int                     cmd_to_send;
-       union                   {
-               struct c2_inquiry c2;
-               struct c4_inquiry c4;
-               struct c8_inquiry c8;
-               struct c9_inquiry c9;
-       } inq;
-};
-
-static LIST_HEAD(ctlr_list);
-static DEFINE_SPINLOCK(list_lock);
-static struct workqueue_struct *rdac_wkqd;
-
-static inline int had_failures(struct request *req, int error)
-{
-       return (error || host_byte(req->errors) != DID_OK ||
-                       msg_byte(req->errors) != COMMAND_COMPLETE);
-}
-
-static void rdac_resubmit_all(struct rdac_handler *h)
-{
-       struct rdac_controller *ctlr = h->ctlr;
-       struct rdac_handler *tmp, *h1;
-
-       spin_lock(&ctlr->lock);
-       list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) {
-               h1->cmd_to_send = SEND_C9_INQUIRY;
-               queue_work(rdac_wkqd, &h1->work);
-               list_del(&h1->entry);
-       }
-       ctlr->submitted = 0;
-       spin_unlock(&ctlr->lock);
-}
-
-static void mode_select_endio(struct request *req, int error)
-{
-       struct rdac_handler *h = req->end_io_data;
-       struct scsi_sense_hdr sense_hdr;
-       int sense = 0, fail = 0;
-
-       if (had_failures(req, error)) {
-               fail = 1;
-               goto failed;
-       }
-
-       if (status_byte(req->errors) == CHECK_CONDITION) {
-               scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE,
-                               &sense_hdr);
-               sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
-                               sense_hdr.ascq;
-               /* If it is retryable failure, submit the c9 inquiry again */
-               if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
-                   sense == 0x62900) {
-                       /* 0x59136    - Command lock contention
-                        * 0x[6b]8b02 - Quiesense in progress or achieved
-                        * 0x62900    - Power On, Reset, or Bus Device Reset
-                        */
-                       h->cmd_to_send = SEND_C9_INQUIRY;
-                       queue_work(rdac_wkqd, &h->work);
-                       goto done;
-               }
-               if (sense)
-                       DMINFO("MODE_SELECT failed on %s with sense 0x%x",
-                                               h->path->dev->name, sense);
-       }
-failed:
-       if (fail || sense)
-               dm_pg_init_complete(h->path, MP_FAIL_PATH);
-       else
-               dm_pg_init_complete(h->path, 0);
-
-done:
-       rdac_resubmit_all(h);
-       __blk_put_request(req->q, req);
-}
-
-static struct request *get_rdac_req(struct rdac_handler *h,
-                       void *buffer, unsigned buflen, int rw)
-{
-       struct request *rq;
-       struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
-
-       rq = blk_get_request(q, rw, GFP_KERNEL);
-
-       if (!rq) {
-               DMINFO("get_rdac_req: blk_get_request failed");
-               return NULL;
-       }
-
-       if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
-               blk_put_request(rq);
-               DMINFO("get_rdac_req: blk_rq_map_kern failed");
-               return NULL;
-       }
-
-       rq->sense = h->sense;
-       memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
-       rq->sense_len = 0;
-
-       rq->end_io_data = h;
-       rq->timeout = h->timeout;
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
-       rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
-       return rq;
-}
-
-static struct request *rdac_failover_get(struct rdac_handler *h)
-{
-       struct request *rq;
-       struct rdac_mode_common *common;
-       unsigned data_size;
-
-       if (h->ctlr->use_10_ms) {
-               struct rdac_pg_expanded *rdac_pg;
-
-               data_size = sizeof(struct rdac_pg_expanded);
-               rdac_pg = &h->ctlr->mode_select.expanded;
-               memset(rdac_pg, 0, data_size);
-               common = &rdac_pg->common;
-               rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
-               rdac_pg->subpage_code = 0x1;
-               rdac_pg->page_len[0] = 0x01;
-               rdac_pg->page_len[1] = 0x28;
-               rdac_pg->lun_table[h->lun] = 0x81;
-       } else {
-               struct rdac_pg_legacy *rdac_pg;
-
-               data_size = sizeof(struct rdac_pg_legacy);
-               rdac_pg = &h->ctlr->mode_select.legacy;
-               memset(rdac_pg, 0, data_size);
-               common = &rdac_pg->common;
-               rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
-               rdac_pg->page_len = 0x68;
-               rdac_pg->lun_table[h->lun] = 0x81;
-       }
-       common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
-       common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
-       common->rdac_options = RDAC_FORCED_QUIESENCE;
-
-       /* get request for block layer packet command */
-       rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE);
-       if (!rq) {
-               DMERR("rdac_failover_get: no rq");
-               return NULL;
-       }
-
-       /* Prepare the command. */
-       if (h->ctlr->use_10_ms) {
-               rq->cmd[0] = MODE_SELECT_10;
-               rq->cmd[7] = data_size >> 8;
-               rq->cmd[8] = data_size & 0xff;
-       } else {
-               rq->cmd[0] = MODE_SELECT;
-               rq->cmd[4] = data_size;
-       }
-       rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
-
-       return rq;
-}
-
-/* Acquires h->ctlr->lock */
-static void submit_mode_select(struct rdac_handler *h)
-{
-       struct request *rq;
-       struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
-
-       spin_lock(&h->ctlr->lock);
-       if (h->ctlr->submitted) {
-               list_add(&h->entry, &h->ctlr->cmd_list);
-               goto drop_lock;
-       }
-
-       if (!q) {
-               DMINFO("submit_mode_select: no queue");
-               goto fail_path;
-       }
-
-       rq = rdac_failover_get(h);
-       if (!rq) {
-               DMERR("submit_mode_select: no rq");
-               goto fail_path;
-       }
-
-       DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name);
-
-       blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio);
-       h->ctlr->submitted = 1;
-       goto drop_lock;
-fail_path:
-       dm_pg_init_complete(h->path, MP_FAIL_PATH);
-drop_lock:
-       spin_unlock(&h->ctlr->lock);
-}
-
-static void release_ctlr(struct kref *kref)
-{
-       struct rdac_controller *ctlr;
-       ctlr = container_of(kref, struct rdac_controller, kref);
-
-       spin_lock(&list_lock);
-       list_del(&ctlr->node);
-       spin_unlock(&list_lock);
-       kfree(ctlr);
-}
-
-static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
-{
-       struct rdac_controller *ctlr, *tmp;
-
-       spin_lock(&list_lock);
-
-       list_for_each_entry(tmp, &ctlr_list, node) {
-               if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
-                         (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
-                       kref_get(&tmp->kref);
-                       spin_unlock(&list_lock);
-                       return tmp;
-               }
-       }
-       ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
-       if (!ctlr)
-               goto done;
-
-       /* initialize fields of controller */
-       memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
-       memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
-       kref_init(&ctlr->kref);
-       spin_lock_init(&ctlr->lock);
-       ctlr->submitted = 0;
-       ctlr->use_10_ms = -1;
-       INIT_LIST_HEAD(&ctlr->cmd_list);
-       list_add(&ctlr->node, &ctlr_list);
-done:
-       spin_unlock(&list_lock);
-       return ctlr;
-}
-
-static void c4_endio(struct request *req, int error)
-{
-       struct rdac_handler *h = req->end_io_data;
-       struct c4_inquiry *sp;
-
-       if (had_failures(req, error)) {
-               dm_pg_init_complete(h->path, MP_FAIL_PATH);
-               goto done;
-       }
-
-       sp = &h->inq.c4;
-
-       h->ctlr = get_controller(sp->subsys_id, sp->slot_id);
-
-       if (h->ctlr) {
-               h->cmd_to_send = SEND_C9_INQUIRY;
-               queue_work(rdac_wkqd, &h->work);
-       } else
-               dm_pg_init_complete(h->path, MP_FAIL_PATH);
-done:
-       __blk_put_request(req->q, req);
-}
-
-static void c2_endio(struct request *req, int error)
-{
-       struct rdac_handler *h = req->end_io_data;
-       struct c2_inquiry *sp;
-
-       if (had_failures(req, error)) {
-               dm_pg_init_complete(h->path, MP_FAIL_PATH);
-               goto done;
-       }
-
-       sp = &h->inq.c2;
-
-       /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
-       if (sp->max_lun_supported >= MODE6_MAX_LUN)
-               h->ctlr->use_10_ms = 1;
-       else
-               h->ctlr->use_10_ms = 0;
-
-       h->cmd_to_send = SEND_MODE_SELECT;
-       queue_work(rdac_wkqd, &h->work);
-done:
-       __blk_put_request(req->q, req);
-}
-
-static void c9_endio(struct request *req, int error)
-{
-       struct rdac_handler *h = req->end_io_data;
-       struct c9_inquiry *sp;
-
-       if (had_failures(req, error)) {
-               dm_pg_init_complete(h->path, MP_FAIL_PATH);
-               goto done;
-       }
-
-       /* We need to look at the sense keys here to take clear action.
-        * For now simple logic: If the host is in AVT mode or if controller
-        * owns the lun, return dm_pg_init_complete(), otherwise submit
-        * MODE SELECT.
-        */
-       sp = &h->inq.c9;
-
-       /* If in AVT mode, return success */
-       if ((sp->avte_cvp >> 7) == 0x1) {
-               dm_pg_init_complete(h->path, 0);
-               goto done;
-       }
-
-       /* If the controller on this path owns the LUN, return success */
-       if (sp->avte_cvp & 0x1) {
-               dm_pg_init_complete(h->path, 0);
-               goto done;
-       }
-
-       if (h->ctlr) {
-               if (h->ctlr->use_10_ms == -1)
-                       h->cmd_to_send = SEND_C2_INQUIRY;
-               else
-                       h->cmd_to_send = SEND_MODE_SELECT;
-       } else
-               h->cmd_to_send = SEND_C4_INQUIRY;
-       queue_work(rdac_wkqd, &h->work);
-done:
-       __blk_put_request(req->q, req);
-}
-
-static void c8_endio(struct request *req, int error)
-{
-       struct rdac_handler *h = req->end_io_data;
-       struct c8_inquiry *sp;
-
-       if (had_failures(req, error)) {
-               dm_pg_init_complete(h->path, MP_FAIL_PATH);
-               goto done;
-       }
-
-       /* We need to look at the sense keys here to take clear action.
-        * For now simple logic: Get the lun from the inquiry page.
-        */
-       sp = &h->inq.c8;
-       h->lun = sp->lun[7]; /* currently it uses only one byte */
-       h->cmd_to_send = SEND_C9_INQUIRY;
-       queue_work(rdac_wkqd, &h->work);
-done:
-       __blk_put_request(req->q, req);
-}
-
-static void submit_inquiry(struct rdac_handler *h, int page_code,
-               unsigned int len, rq_end_io_fn endio)
-{
-       struct request *rq;
-       struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
-
-       if (!q)
-               goto fail_path;
-
-       rq = get_rdac_req(h, &h->inq, len, READ);
-       if (!rq)
-               goto fail_path;
-
-       /* Prepare the command. */
-       rq->cmd[0] = INQUIRY;
-       rq->cmd[1] = 1;
-       rq->cmd[2] = page_code;
-       rq->cmd[4] = len;
-       rq->cmd_len = COMMAND_SIZE(INQUIRY);
-       blk_execute_rq_nowait(q, NULL, rq, 1, endio);
-       return;
-
-fail_path:
-       dm_pg_init_complete(h->path, MP_FAIL_PATH);
-}
-
-static void service_wkq(struct work_struct *work)
-{
-       struct rdac_handler *h = container_of(work, struct rdac_handler, work);
-
-       switch (h->cmd_to_send) {
-       case SEND_C2_INQUIRY:
-               submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio);
-               break;
-       case SEND_C4_INQUIRY:
-               submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio);
-               break;
-       case SEND_C8_INQUIRY:
-               submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
-               break;
-       case SEND_C9_INQUIRY:
-               submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
-               break;
-       case SEND_MODE_SELECT:
-               submit_mode_select(h);
-               break;
-       default:
-               BUG();
-       }
-}
-/*
- * only support subpage2c until we confirm that this is just a matter of
- * of updating firmware or not, and RDAC (basic AVT works already) for now
- * but we can add these in in when we get time and testers
- */
-static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv)
-{
-       struct rdac_handler *h;
-       unsigned timeout;
-
-       if (argc == 0) {
-               /* No arguments: use defaults */
-               timeout = RDAC_FAILOVER_TIMEOUT;
-       } else if (argc != 1) {
-               DMWARN("incorrect number of arguments");
-               return -EINVAL;
-       } else {
-               if (sscanf(argv[1], "%u", &timeout) != 1) {
-                       DMWARN("invalid timeout value");
-                       return -EINVAL;
-               }
-       }
-
-       h = kzalloc(sizeof(*h), GFP_KERNEL);
-       if (!h)
-               return -ENOMEM;
-
-       hwh->context = h;
-       h->timeout = timeout;
-       h->lun = UNINITIALIZED_LUN;
-       INIT_WORK(&h->work, service_wkq);
-       DMWARN("using RDAC command with timeout %u", h->timeout);
-
-       return 0;
-}
-
-static void rdac_destroy(struct hw_handler *hwh)
-{
-       struct rdac_handler *h = hwh->context;
-
-       if (h->ctlr)
-               kref_put(&h->ctlr->kref, release_ctlr);
-       kfree(h);
-       hwh->context = NULL;
-}
-
-static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio)
-{
-       /* Try default handler */
-       return dm_scsi_err_handler(hwh, bio);
-}
-
-static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed,
-                       struct dm_path *path)
-{
-       struct rdac_handler *h = hwh->context;
-
-       h->path = path;
-       switch (h->lun) {
-       case UNINITIALIZED_LUN:
-               submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
-               break;
-       default:
-               submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
-       }
-}
-
-static struct hw_handler_type rdac_handler = {
-       .name = RDAC_DM_HWH_NAME,
-       .module = THIS_MODULE,
-       .create = rdac_create,
-       .destroy = rdac_destroy,
-       .pg_init = rdac_pg_init,
-       .error = rdac_error,
-};
-
-static int __init rdac_init(void)
-{
-       int r;
-
-       rdac_wkqd = create_singlethread_workqueue("rdac_wkqd");
-       if (!rdac_wkqd) {
-               DMERR("Failed to create workqueue rdac_wkqd.");
-               return -ENOMEM;
-       }
-
-       r = dm_register_hw_handler(&rdac_handler);
-       if (r < 0) {
-               DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r);
-               destroy_workqueue(rdac_wkqd);
-               return r;
-       }
-
-       DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER);
-       return 0;
-}
-
-static void __exit rdac_exit(void)
-{
-       int r = dm_unregister_hw_handler(&rdac_handler);
-
-       destroy_workqueue(rdac_wkqd);
-       if (r < 0)
-               DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r);
-}
-
-module_init(rdac_init);
-module_exit(rdac_exit);
-
-MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
-MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(RDAC_DM_HWH_VER);
index e7ee59e..9f7302d 100644 (file)
@@ -7,7 +7,6 @@
 
 #include "dm.h"
 #include "dm-path-selector.h"
-#include "dm-hw-handler.h"
 #include "dm-bio-list.h"
 #include "dm-bio-record.h"
 #include "dm-uevent.h"
@@ -20,6 +19,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/workqueue.h>
+#include <scsi/scsi_dh.h>
 #include <asm/atomic.h>
 
 #define DM_MSG_PREFIX "multipath"
@@ -61,7 +61,8 @@ struct multipath {
 
        spinlock_t lock;
 
-       struct hw_handler hw_handler;
+       const char *hw_handler_name;
+       struct work_struct activate_path;
        unsigned nr_priority_groups;
        struct list_head priority_groups;
        unsigned pg_init_required;      /* pg_init needs calling? */
@@ -106,9 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath);
 
 static struct kmem_cache *_mpio_cache;
 
-static struct workqueue_struct *kmultipathd;
+static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
+static void activate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -178,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                m->queue_io = 1;
                INIT_WORK(&m->process_queued_ios, process_queued_ios);
                INIT_WORK(&m->trigger_event, trigger_event);
+               INIT_WORK(&m->activate_path, activate_path);
                m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
@@ -193,18 +196,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
 static void free_multipath(struct multipath *m)
 {
        struct priority_group *pg, *tmp;
-       struct hw_handler *hwh = &m->hw_handler;
 
        list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
                list_del(&pg->list);
                free_priority_group(pg, m->ti);
        }
 
-       if (hwh->type) {
-               hwh->type->destroy(hwh);
-               dm_put_hw_handler(hwh->type);
-       }
-
+       kfree(m->hw_handler_name);
        mempool_destroy(m->mpio_pool);
        kfree(m);
 }
@@ -216,12 +214,10 @@ static void free_multipath(struct multipath *m)
 
 static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
 {
-       struct hw_handler *hwh = &m->hw_handler;
-
        m->current_pg = pgpath->pg;
 
        /* Must we initialise the PG first, and queue I/O till it's ready? */
-       if (hwh->type && hwh->type->pg_init) {
+       if (m->hw_handler_name) {
                m->pg_init_required = 1;
                m->queue_io = 1;
        } else {
@@ -409,7 +405,6 @@ static void process_queued_ios(struct work_struct *work)
 {
        struct multipath *m =
                container_of(work, struct multipath, process_queued_ios);
-       struct hw_handler *hwh = &m->hw_handler;
        struct pgpath *pgpath = NULL;
        unsigned init_required = 0, must_queue = 1;
        unsigned long flags;
@@ -439,7 +434,7 @@ out:
        spin_unlock_irqrestore(&m->lock, flags);
 
        if (init_required)
-               hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path);
+               queue_work(kmpath_handlerd, &m->activate_path);
 
        if (!must_queue)
                dispatch_queued_ios(m);
@@ -652,8 +647,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
 
 static int parse_hw_handler(struct arg_set *as, struct multipath *m)
 {
-       int r;
-       struct hw_handler_type *hwht;
        unsigned hw_argc;
        struct dm_target *ti = m->ti;
 
@@ -661,30 +654,20 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
                {0, 1024, "invalid number of hardware handler args"},
        };
 
-       r = read_param(_params, shift(as), &hw_argc, &ti->error);
-       if (r)
+       if (read_param(_params, shift(as), &hw_argc, &ti->error))
                return -EINVAL;
 
        if (!hw_argc)
                return 0;
 
-       hwht = dm_get_hw_handler(shift(as));
-       if (!hwht) {
+       m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
+       request_module("scsi_dh_%s", m->hw_handler_name);
+       if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
                ti->error = "unknown hardware handler type";
+               kfree(m->hw_handler_name);
+               m->hw_handler_name = NULL;
                return -EINVAL;
        }
-
-       m->hw_handler.md = dm_table_get_md(ti->table);
-       dm_put(m->hw_handler.md);
-
-       r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
-       if (r) {
-               dm_put_hw_handler(hwht);
-               ti->error = "hardware handler constructor failed";
-               return r;
-       }
-
-       m->hw_handler.type = hwht;
        consume(as, hw_argc - 1);
 
        return 0;
@@ -808,6 +791,7 @@ static void multipath_dtr(struct dm_target *ti)
 {
        struct multipath *m = (struct multipath *) ti->private;
 
+       flush_workqueue(kmpath_handlerd);
        flush_workqueue(kmultipathd);
        free_multipath(m);
 }
@@ -1025,52 +1009,85 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
        return limit_reached;
 }
 
-/*
- * pg_init must call this when it has completed its initialisation
- */
-void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
+static void pg_init_done(struct dm_path *path, int errors)
 {
        struct pgpath *pgpath = path_to_pgpath(path);
        struct priority_group *pg = pgpath->pg;
        struct multipath *m = pg->m;
        unsigned long flags;
 
-       /*
-        * If requested, retry pg_init until maximum number of retries exceeded.
-        * If retry not requested and PG already bypassed, always fail the path.
-        */
-       if (err_flags & MP_RETRY) {
-               if (pg_init_limit_reached(m, pgpath))
-                       err_flags |= MP_FAIL_PATH;
-       } else if (err_flags && pg->bypassed)
-               err_flags |= MP_FAIL_PATH;
-
-       if (err_flags & MP_FAIL_PATH)
+       /* device or driver problems */
+       switch (errors) {
+       case SCSI_DH_OK:
+               break;
+       case SCSI_DH_NOSYS:
+               if (!m->hw_handler_name) {
+                       errors = 0;
+                       break;
+               }
+               DMERR("Cannot failover device because scsi_dh_%s was not "
+                     "loaded.", m->hw_handler_name);
+               /*
+                * Fail path for now, so we do not ping pong
+                */
                fail_path(pgpath);
-
-       if (err_flags & MP_BYPASS_PG)
+               break;
+       case SCSI_DH_DEV_TEMP_BUSY:
+               /*
+                * Probably doing something like FW upgrade on the
+                * controller so try the other pg.
+                */
                bypass_pg(m, pg, 1);
+               break;
+       /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
+       case SCSI_DH_RETRY:
+       case SCSI_DH_IMM_RETRY:
+       case SCSI_DH_RES_TEMP_UNAVAIL:
+               if (pg_init_limit_reached(m, pgpath))
+                       fail_path(pgpath);
+               errors = 0;
+               break;
+       default:
+               /*
+                * We probably do not want to fail the path for a device
+                * error, but this is what the old dm did. In future
+                * patches we can do more advanced handling.
+                */
+               fail_path(pgpath);
+       }
 
        spin_lock_irqsave(&m->lock, flags);
-       if (err_flags & ~MP_RETRY) {
+       if (errors) {
+               DMERR("Could not failover device. Error %d.", errors);
                m->current_pgpath = NULL;
                m->current_pg = NULL;
-       } else if (!m->pg_init_required)
+       } else if (!m->pg_init_required) {
                m->queue_io = 0;
+               pg->bypassed = 0;
+       }
 
        m->pg_init_in_progress = 0;
        queue_work(kmultipathd, &m->process_queued_ios);
        spin_unlock_irqrestore(&m->lock, flags);
 }
 
+static void activate_path(struct work_struct *work)
+{
+       int ret;
+       struct multipath *m =
+               container_of(work, struct multipath, activate_path);
+       struct dm_path *path = &m->current_pgpath->path;
+
+       ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
+       pg_init_done(path, ret);
+}
+
 /*
  * end_io handling
  */
 static int do_end_io(struct multipath *m, struct bio *bio,
                     int error, struct dm_mpath_io *mpio)
 {
-       struct hw_handler *hwh = &m->hw_handler;
-       unsigned err_flags = MP_FAIL_PATH;      /* Default behavior */
        unsigned long flags;
 
        if (!error)
@@ -1097,19 +1114,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
        }
        spin_unlock_irqrestore(&m->lock, flags);
 
-       if (hwh->type && hwh->type->error)
-               err_flags = hwh->type->error(hwh, bio);
-
-       if (mpio->pgpath) {
-               if (err_flags & MP_FAIL_PATH)
-                       fail_path(mpio->pgpath);
-
-               if (err_flags & MP_BYPASS_PG)
-                       bypass_pg(m, mpio->pgpath->pg, 1);
-       }
-
-       if (err_flags & MP_ERROR_IO)
-               return -EIO;
+       if (mpio->pgpath)
+               fail_path(mpio->pgpath);
 
       requeue:
        dm_bio_restore(&mpio->details, bio);
@@ -1194,7 +1200,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
        int sz = 0;
        unsigned long flags;
        struct multipath *m = (struct multipath *) ti->private;
-       struct hw_handler *hwh = &m->hw_handler;
        struct priority_group *pg;
        struct pgpath *p;
        unsigned pg_num;
@@ -1214,12 +1219,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
                        DMEMIT("pg_init_retries %u ", m->pg_init_retries);
        }
 
-       if (hwh->type && hwh->type->status)
-               sz += hwh->type->status(hwh, type, result + sz, maxlen - sz);
-       else if (!hwh->type || type == STATUSTYPE_INFO)
+       if (!m->hw_handler_name || type == STATUSTYPE_INFO)
                DMEMIT("0 ");
        else
-               DMEMIT("1 %s ", hwh->type->name);
+               DMEMIT("1 %s ", m->hw_handler_name);
 
        DMEMIT("%u ", m->nr_priority_groups);
 
@@ -1422,6 +1425,21 @@ static int __init dm_multipath_init(void)
                return -ENOMEM;
        }
 
+       /*
+        * A separate workqueue is used to handle the device handlers
+        * to avoid overloading existing workqueue. Overloading the
+        * old workqueue would also create a bottleneck in the
+        * path of the storage hardware device activation.
+        */
+       kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
+       if (!kmpath_handlerd) {
+               DMERR("failed to create workqueue kmpath_handlerd");
+               destroy_workqueue(kmultipathd);
+               dm_unregister_target(&multipath_target);
+               kmem_cache_destroy(_mpio_cache);
+               return -ENOMEM;
+       }
+
        DMINFO("version %u.%u.%u loaded",
               multipath_target.version[0], multipath_target.version[1],
               multipath_target.version[2]);
@@ -1433,6 +1451,7 @@ static void __exit dm_multipath_exit(void)
 {
        int r;
 
+       destroy_workqueue(kmpath_handlerd);
        destroy_workqueue(kmultipathd);
 
        r = dm_unregister_target(&multipath_target);
@@ -1441,8 +1460,6 @@ static void __exit dm_multipath_exit(void)
        kmem_cache_destroy(_mpio_cache);
 }
 
-EXPORT_SYMBOL_GPL(dm_pg_init_complete);
-
 module_init(dm_multipath_init);
 module_exit(dm_multipath_exit);
 
index b9cdcbb..c198b85 100644 (file)
@@ -16,7 +16,6 @@ struct dm_path {
        unsigned is_active;     /* Read-only */
 
        void *pscontext;        /* For path-selector use */
-       void *hwhcontext;       /* For hw-handler use */
 };
 
 /* Callback for hwh_pg_init_fn to use when complete */
index 1acbdd6..10b6ef7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2007 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi.h
index 2bd8ada..b2db333 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2007 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_cnfg.h
index d40d6d1..75e599b 100644 (file)
@@ -5,7 +5,7 @@
  *      For use with LSI PCI chip/adapter(s)
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
@@ -103,7 +103,7 @@ static int mfcounter = 0;
  *  Public data...
  */
 
-struct proc_dir_entry *mpt_proc_root_dir;
+static struct proc_dir_entry *mpt_proc_root_dir;
 
 #define WHOINIT_UNKNOWN                0xAA
 
@@ -253,6 +253,55 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
        return 0;
 }
 
+/**
+ *     mpt_fault_reset_work - work performed on workq after ioc fault
+ *     @work: input argument, used to derive ioc
+ *
+**/
+static void
+mpt_fault_reset_work(struct work_struct *work)
+{
+       MPT_ADAPTER     *ioc =
+           container_of(work, MPT_ADAPTER, fault_reset_work.work);
+       u32              ioc_raw_state;
+       int              rc;
+       unsigned long    flags;
+
+       if (ioc->diagPending || !ioc->active)
+               goto out;
+
+       ioc_raw_state = mpt_GetIocState(ioc, 0);
+       if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
+               printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
+                   ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
+               printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
+                   ioc->name, __FUNCTION__);
+               rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
+               printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
+                   __FUNCTION__, (rc == 0) ? "success" : "failed");
+               ioc_raw_state = mpt_GetIocState(ioc, 0);
+               if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
+                       printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
+                           "reset (%04xh)\n", ioc->name, ioc_raw_state &
+                           MPI_DOORBELL_DATA_MASK);
+       }
+
+ out:
+       /*
+        * Take turns polling alternate controller
+        */
+       if (ioc->alt_ioc)
+               ioc = ioc->alt_ioc;
+
+       /* rearm the timer */
+       spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
+       if (ioc->reset_work_q)
+               queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
+                       msecs_to_jiffies(MPT_POLLING_INTERVAL));
+       spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
+}
+
+
 /*
  *  Process turbo (context) reply...
  */
@@ -1616,6 +1665,22 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        /* Find lookup slot. */
        INIT_LIST_HEAD(&ioc->list);
 
+
+       /* Initialize workqueue */
+       INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
+       spin_lock_init(&ioc->fault_reset_work_lock);
+
+       snprintf(ioc->reset_work_q_name, KOBJ_NAME_LEN, "mpt_poll_%d", ioc->id);
+       ioc->reset_work_q =
+               create_singlethread_workqueue(ioc->reset_work_q_name);
+       if (!ioc->reset_work_q) {
+               printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
+                   ioc->name);
+               pci_release_selected_regions(pdev, ioc->bars);
+               kfree(ioc);
+               return -ENOMEM;
+       }
+
        dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
            ioc->name, &ioc->facts, &ioc->pfacts[0]));
 
@@ -1727,6 +1792,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
                iounmap(ioc->memmap);
                if (r != -5)
                        pci_release_selected_regions(pdev, ioc->bars);
+
+               destroy_workqueue(ioc->reset_work_q);
+               ioc->reset_work_q = NULL;
+
                kfree(ioc);
                pci_set_drvdata(pdev, NULL);
                return r;
@@ -1759,6 +1828,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 #endif
 
+       if (!ioc->alt_ioc)
+               queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
+                       msecs_to_jiffies(MPT_POLLING_INTERVAL));
+
        return 0;
 }
 
@@ -1774,6 +1847,19 @@ mpt_detach(struct pci_dev *pdev)
        MPT_ADAPTER     *ioc = pci_get_drvdata(pdev);
        char pname[32];
        u8 cb_idx;
+       unsigned long flags;
+       struct workqueue_struct *wq;
+
+       /*
+        * Stop polling ioc for fault condition
+        */
+       spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
+       wq = ioc->reset_work_q;
+       ioc->reset_work_q = NULL;
+       spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
+       cancel_delayed_work(&ioc->fault_reset_work);
+       destroy_workqueue(wq);
+
 
        sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
        remove_proc_entry(pname, NULL);
@@ -7456,7 +7542,6 @@ EXPORT_SYMBOL(mpt_resume);
 EXPORT_SYMBOL(mpt_suspend);
 #endif
 EXPORT_SYMBOL(ioc_list);
-EXPORT_SYMBOL(mpt_proc_root_dir);
 EXPORT_SYMBOL(mpt_register);
 EXPORT_SYMBOL(mpt_deregister);
 EXPORT_SYMBOL(mpt_event_register);
index a8f6174..6adab64 100644 (file)
@@ -5,7 +5,7 @@
  *          LSIFC9xx/LSI409xx Fibre Channel
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
 #endif
 
 #ifndef COPYRIGHT
-#define COPYRIGHT      "Copyright (c) 1999-2007 " MODULEAUTHOR
+#define COPYRIGHT      "Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON       "3.04.06"
-#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.06"
+#define MPT_LINUX_VERSION_COMMON       "3.04.07"
+#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.07"
 #define WHAT_MAGIC_STRING              "@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
 /* debug print string length used for events and iocstatus */
 # define EVENT_DESCR_STR_SZ             100
 
+#define MPT_POLLING_INTERVAL           1000    /* in milliseconds */
+
 #ifdef __KERNEL__      /* { */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 
@@ -709,6 +711,12 @@ typedef struct _MPT_ADAPTER
        struct workqueue_struct *fc_rescan_work_q;
        struct scsi_cmnd        **ScsiLookup;
        spinlock_t                scsi_lookup_lock;
+
+       char                     reset_work_q_name[KOBJ_NAME_LEN];
+       struct workqueue_struct *reset_work_q;
+       struct delayed_work      fault_reset_work;
+       spinlock_t               fault_reset_work_lock;
+
 } MPT_ADAPTER;
 
 /*
@@ -919,7 +927,6 @@ extern int   mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhys
  *  Public data decl's...
  */
 extern struct list_head          ioc_list;
-extern struct proc_dir_entry   *mpt_proc_root_dir;
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 #endif         /* } __KERNEL__ */
index c594656..a592042 100644 (file)
@@ -4,7 +4,7 @@
  *      For use with LSI PCI chip/adapters
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
@@ -66,7 +66,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
 
-#define COPYRIGHT      "Copyright (c) 1999-2007 LSI Corporation"
+#define COPYRIGHT      "Copyright (c) 1999-2008 LSI Corporation"
 #define MODULEAUTHOR   "LSI Corporation"
 #include "mptbase.h"
 #include "mptctl.h"
index 2c18901..d564cc9 100644 (file)
@@ -5,7 +5,7 @@
  *          LSIFC9xx/LSI409xx Fibre Channel
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index ffdb0a6..510b9f4 100644 (file)
@@ -3,7 +3,7 @@
  *      For use with LSI PCI chip/adapter(s)
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index 1e24ab4..fc31ca6 100644 (file)
@@ -3,7 +3,7 @@
  *      For use with LSI PCI chip/adapter(s)
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index 7950fc6..d709d92 100644 (file)
@@ -4,7 +4,7 @@
  *      For use with LSI Fibre Channel PCI chip/adapters
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 2000-2007 LSI Corporation
+ *  Copyright (c) 2000-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index bafb67f..33927ee 100644 (file)
@@ -4,7 +4,7 @@
  *      For use with LSI Fibre Channel PCI chip/adapters
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 2000-2007 LSI Corporation
+ *  Copyright (c) 2000-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index 4d492ba..b1147aa 100644 (file)
@@ -3,7 +3,7 @@
  *      For use with LSI PCI chip/adapter(s)
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
index 7c150f5..2b544e0 100644 (file)
@@ -5,7 +5,7 @@
  *          LSIFC9xx/LSI409xx Fibre Channel
  *      running LSI MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index c68ef00..d142b6b 100644 (file)
@@ -3,7 +3,7 @@
  *      For use with LSI PCI chip/adapter(s)
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index 7ea7da0..319aa30 100644 (file)
@@ -5,7 +5,7 @@
  *          LSIFC9xx/LSI409xx Fibre Channel
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
index 1effca4..6162014 100644 (file)
@@ -3,7 +3,7 @@
  *      For use with LSI PCI chip/adapter(s)
  *      running LSI Fusion MPT (Message Passing Technology) firmware.
  *
- *  Copyright (c) 1999-2007 LSI Corporation
+ *  Copyright (c) 1999-2008 LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  */
@@ -447,6 +447,7 @@ static int mptspi_target_alloc(struct scsi_target *starget)
        spi_max_offset(starget) = ioc->spi_data.maxSyncOffset;
 
        spi_offset(starget) = 0;
+       spi_period(starget) = 0xFF;
        mptspi_write_width(starget, 0);
 
        return 0;
index d6a78f1..cb301cc 100644 (file)
@@ -3,7 +3,6 @@
 #
 
 zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
-            zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
-            zfcp_sysfs_unit.o zfcp_sysfs_driver.o
+            zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
 
 obj-$(CONFIG_ZFCP) += zfcp.o
index 8c7e2b7..90abfd0 100644 (file)
@@ -1,22 +1,9 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Module interface and handling of zfcp data structures.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 /*
  *            Maxim Shchetynin
  *            Volker Sameske
  *            Ralph Wuerthner
+ *            Michael Loehr
+ *            Swen Schillig
+ *            Christof Schmitt
+ *            Martin Petermann
+ *            Sven Schuetz
  */
 
+#include <linux/miscdevice.h>
 #include "zfcp_ext.h"
 
-/* accumulated log level (module parameter) */
-static u32 loglevel = ZFCP_LOG_LEVEL_DEFAULTS;
 static char *device;
-/*********************** FUNCTION PROTOTYPES *********************************/
-
-/* written against the module interface */
-static int __init  zfcp_module_init(void);
-
-/* FCP related */
-static void zfcp_ns_gid_pn_handler(unsigned long);
-
-/* miscellaneous */
-static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
-static void zfcp_sg_list_free(struct zfcp_sg_list *);
-static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
-                                      void __user *, size_t);
-static int zfcp_sg_list_copy_to_user(void __user *,
-                                    struct zfcp_sg_list *, size_t);
-static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
-
-#define ZFCP_CFDC_IOC_MAGIC                     0xDD
-#define ZFCP_CFDC_IOC \
-       _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data)
-
-
-static const struct file_operations zfcp_cfdc_fops = {
-       .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = zfcp_cfdc_dev_ioctl
-#endif
-};
-
-static struct miscdevice zfcp_cfdc_misc = {
-       .minor = ZFCP_CFDC_DEV_MINOR,
-       .name = ZFCP_CFDC_DEV_NAME,
-       .fops = &zfcp_cfdc_fops
-};
-
-/*********************** KERNEL/MODULE PARAMETERS  ***************************/
-
-/* declare driver module init/cleanup functions */
-module_init(zfcp_module_init);
 
 MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
-MODULE_DESCRIPTION
-    ("FCP (SCSI over Fibre Channel) HBA driver for IBM System z9 and zSeries");
+MODULE_DESCRIPTION("FCP HBA driver");
 MODULE_LICENSE("GPL");
 
 module_param(device, charp, 0400);
 MODULE_PARM_DESC(device, "specify initial device");
 
-module_param(loglevel, uint, 0400);
-MODULE_PARM_DESC(loglevel,
-                "log levels, 8 nibbles: "
-                "FC ERP QDIO CIO Config FSF SCSI Other, "
-                "levels: 0=none 1=normal 2=devel 3=trace");
-
-/****************************************************************/
-/************** Functions without logging ***********************/
-/****************************************************************/
-
-void
-_zfcp_hex_dump(char *addr, int count)
-{
-       int i;
-       for (i = 0; i < count; i++) {
-               printk("%02x", addr[i]);
-               if ((i % 4) == 3)
-                       printk(" ");
-               if ((i % 32) == 31)
-                       printk("\n");
-       }
-       if (((i-1) % 32) != 31)
-               printk("\n");
-}
-
-
-/****************************************************************/
-/****** Functions to handle the request ID hash table    ********/
-/****************************************************************/
-
-#define ZFCP_LOG_AREA                  ZFCP_LOG_AREA_FSF
-
 static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
 {
        int idx;
@@ -132,11 +51,12 @@ static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
        return 0;
 }
 
-static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
-{
-       kfree(adapter->req_list);
-}
-
+/**
+ * zfcp_reqlist_isempty - is the request list empty
+ * @adapter: pointer to struct zfcp_adapter
+ *
+ * Returns: true if list is empty, false otherwise
+ */
 int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
 {
        unsigned int idx;
@@ -147,62 +67,58 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
        return 1;
 }
 
-#undef ZFCP_LOG_AREA
-
-/****************************************************************/
-/************** Uncategorised Functions *************************/
-/****************************************************************/
-
-#define ZFCP_LOG_AREA                  ZFCP_LOG_AREA_OTHER
-
-/**
- * zfcp_device_setup - setup function
- * @str: pointer to parameter string
- *
- * Parse "device=..." parameter string.
- */
-static int __init
-zfcp_device_setup(char *devstr)
+static int __init zfcp_device_setup(char *devstr)
 {
-       char *tmp, *str;
-       size_t len;
+       char *token;
+       char *str;
 
        if (!devstr)
                return 0;
 
-       len = strlen(devstr) + 1;
-       str = kmalloc(len, GFP_KERNEL);
+       /* duplicate devstr and keep the original for sysfs presentation*/
+       str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
        if (!str)
-               goto err_out;
-       memcpy(str, devstr, len);
+               return 0;
 
-       tmp = strchr(str, ',');
-       if (!tmp)
-               goto err_out;
-       *tmp++ = '\0';
-       strncpy(zfcp_data.init_busid, str, BUS_ID_SIZE);
-       zfcp_data.init_busid[BUS_ID_SIZE-1] = '\0';
+       strcpy(str, devstr);
 
-       zfcp_data.init_wwpn = simple_strtoull(tmp, &tmp, 0);
-       if (*tmp++ != ',')
+       token = strsep(&str, ",");
+       if (!token || strlen(token) >= BUS_ID_SIZE)
                goto err_out;
-       if (*tmp == '\0')
+       strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
+
+       token = strsep(&str, ",");
+       if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn))
                goto err_out;
 
-       zfcp_data.init_fcp_lun = simple_strtoull(tmp, &tmp, 0);
-       if (*tmp != '\0')
+       token = strsep(&str, ",");
+       if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun))
                goto err_out;
+
        kfree(str);
        return 1;
 
  err_out:
-       ZFCP_LOG_NORMAL("Parse error for device parameter string %s\n", str);
        kfree(str);
+       pr_err("zfcp: Parse error for device parameter string %s, "
+              "device not attached.\n", devstr);
        return 0;
 }
 
-static void __init
-zfcp_init_device_configure(void)
+static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
+{
+       struct zfcp_adapter *adapter;
+
+       list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
+               if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
+                            BUS_ID_SIZE) == 0) &&
+                   !(atomic_read(&adapter->status) &
+                     ZFCP_STATUS_COMMON_REMOVE))
+                   return adapter;
+       return NULL;
+}
+
+static void __init zfcp_init_device_configure(void)
 {
        struct zfcp_adapter *adapter;
        struct zfcp_port *port;
@@ -215,101 +131,75 @@ zfcp_init_device_configure(void)
                zfcp_adapter_get(adapter);
        read_unlock_irq(&zfcp_data.config_lock);
 
-       if (adapter == NULL)
+       if (!adapter)
                goto out_adapter;
        port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
-       if (!port)
+       if (IS_ERR(port))
                goto out_port;
        unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
-       if (!unit)
+       if (IS_ERR(unit))
                goto out_unit;
        up(&zfcp_data.config_sema);
        ccw_device_set_online(adapter->ccw_device);
        zfcp_erp_wait(adapter);
        down(&zfcp_data.config_sema);
        zfcp_unit_put(unit);
- out_unit:
+out_unit:
        zfcp_port_put(port);
- out_port:
+out_port:
        zfcp_adapter_put(adapter);
- out_adapter:
+out_adapter:
        up(&zfcp_data.config_sema);
        return;
 }
 
-static int calc_alignment(int size)
+static struct kmem_cache *zfcp_cache_create(int size, char *name)
 {
        int align = 1;
-
-       if (!size)
-               return 0;
-
        while ((size - align) > 0)
                align <<= 1;
-
-       return align;
+       return kmem_cache_create(name , size, align, 0, NULL);
 }
 
-static int __init
-zfcp_module_init(void)
+static int __init zfcp_module_init(void)
 {
        int retval = -ENOMEM;
-       int size, align;
 
-       size = sizeof(struct zfcp_fsf_req_qtcb);
-       align = calc_alignment(size);
-       zfcp_data.fsf_req_qtcb_cache =
-               kmem_cache_create("zfcp_fsf", size, align, 0, NULL);
+       zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create(
+                       sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf");
        if (!zfcp_data.fsf_req_qtcb_cache)
                goto out;
 
-       size = sizeof(struct fsf_status_read_buffer);
-       align = calc_alignment(size);
-       zfcp_data.sr_buffer_cache =
-               kmem_cache_create("zfcp_sr", size, align, 0, NULL);
+       zfcp_data.sr_buffer_cache = zfcp_cache_create(
+                       sizeof(struct fsf_status_read_buffer), "zfcp_sr");
        if (!zfcp_data.sr_buffer_cache)
                goto out_sr_cache;
 
-       size = sizeof(struct zfcp_gid_pn_data);
-       align = calc_alignment(size);
-       zfcp_data.gid_pn_cache =
-               kmem_cache_create("zfcp_gid", size, align, 0, NULL);
+       zfcp_data.gid_pn_cache = zfcp_cache_create(
+                       sizeof(struct zfcp_gid_pn_data), "zfcp_gid");
        if (!zfcp_data.gid_pn_cache)
                goto out_gid_cache;
 
-       atomic_set(&zfcp_data.loglevel, loglevel);
-
-       /* initialize adapter list */
        INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
-
-       /* initialize adapters to be removed list head */
        INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
 
+       sema_init(&zfcp_data.config_sema, 1);
+       rwlock_init(&zfcp_data.config_lock);
+
        zfcp_data.scsi_transport_template =
                fc_attach_transport(&zfcp_transport_functions);
        if (!zfcp_data.scsi_transport_template)
                goto out_transport;
 
        retval = misc_register(&zfcp_cfdc_misc);
-       if (retval != 0) {
-               ZFCP_LOG_INFO("registration of misc device "
-                             "zfcp_cfdc failed\n");
+       if (retval) {
+               pr_err("zfcp: registration of misc device zfcp_cfdc failed\n");
                goto out_misc;
        }
 
-       ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
-                      ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
-
-       /* Initialise proc semaphores */
-       sema_init(&zfcp_data.config_sema, 1);
-
-       /* initialise configuration rw lock */
-       rwlock_init(&zfcp_data.config_lock);
-
-       /* setup dynamic I/O */
        retval = zfcp_ccw_register();
        if (retval) {
-               ZFCP_LOG_NORMAL("registration with common I/O layer failed\n");
+               pr_err("zfcp: Registration with common I/O layer failed.\n");
                goto out_ccw_register;
        }
 
@@ -318,527 +208,88 @@ zfcp_module_init(void)
 
        goto out;
 
- out_ccw_register:
+out_ccw_register:
        misc_deregister(&zfcp_cfdc_misc);
- out_misc:
+out_misc:
        fc_release_transport(zfcp_data.scsi_transport_template);
- out_transport:
+out_transport:
        kmem_cache_destroy(zfcp_data.gid_pn_cache);
- out_gid_cache:
+out_gid_cache:
        kmem_cache_destroy(zfcp_data.sr_buffer_cache);
- out_sr_cache:
+out_sr_cache:
        kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
- out:
+out:
        return retval;
 }
 
-/*
- * function:    zfcp_cfdc_dev_ioctl
- *
- * purpose:     Handle control file upload/download transaction via IOCTL
- *             interface
- *
- * returns:     0           - Operation completed successfuly
- *              -ENOTTY     - Unknown IOCTL command
- *              -EINVAL     - Invalid sense data record
- *              -ENXIO      - The FCP adapter is not available
- *              -EOPNOTSUPP - The FCP adapter does not have CFDC support
- *              -ENOMEM     - Insufficient memory
- *              -EFAULT     - User space memory I/O operation fault
- *              -EPERM      - Cannot create or queue FSF request or create SBALs
- *              -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS)
- */
-static long
-zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
-                   unsigned long buffer)
-{
-       struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user;
-       struct zfcp_adapter *adapter = NULL;
-       struct zfcp_fsf_req *fsf_req = NULL;
-       struct zfcp_sg_list *sg_list = NULL;
-       u32 fsf_command, option;
-       char *bus_id = NULL;
-       int retval = 0;
-
-       sense_data = kmalloc(sizeof(struct zfcp_cfdc_sense_data), GFP_KERNEL);
-       if (sense_data == NULL) {
-               retval = -ENOMEM;
-               goto out;
-       }
-
-       sg_list = kzalloc(sizeof(struct zfcp_sg_list), GFP_KERNEL);
-       if (sg_list == NULL) {
-               retval = -ENOMEM;
-               goto out;
-       }
-
-       if (command != ZFCP_CFDC_IOC) {
-               ZFCP_LOG_INFO("IOC request code 0x%x invalid\n", command);
-               retval = -ENOTTY;
-               goto out;
-       }
-
-       if ((sense_data_user = (void __user *) buffer) == NULL) {
-               ZFCP_LOG_INFO("sense data record is required\n");
-               retval = -EINVAL;
-               goto out;
-       }
-
-       retval = copy_from_user(sense_data, sense_data_user,
-                               sizeof(struct zfcp_cfdc_sense_data));
-       if (retval) {
-               retval = -EFAULT;
-               goto out;
-       }
-
-       if (sense_data->signature != ZFCP_CFDC_SIGNATURE) {
-               ZFCP_LOG_INFO("invalid sense data request signature 0x%08x\n",
-                             ZFCP_CFDC_SIGNATURE);
-               retval = -EINVAL;
-               goto out;
-       }
-
-       switch (sense_data->command) {
-
-       case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
-               fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-               option = FSF_CFDC_OPTION_NORMAL_MODE;
-               break;
-
-       case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
-               fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-               option = FSF_CFDC_OPTION_FORCE;
-               break;
-
-       case ZFCP_CFDC_CMND_FULL_ACCESS:
-               fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-               option = FSF_CFDC_OPTION_FULL_ACCESS;
-               break;
-
-       case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
-               fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-               option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
-               break;
-
-       case ZFCP_CFDC_CMND_UPLOAD:
-               fsf_command = FSF_QTCB_UPLOAD_CONTROL_FILE;
-               option = 0;
-               break;
-
-       default:
-               ZFCP_LOG_INFO("invalid command code 0x%08x\n",
-                             sense_data->command);
-               retval = -EINVAL;
-               goto out;
-       }
-
-       bus_id = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
-       if (bus_id == NULL) {
-               retval = -ENOMEM;
-               goto out;
-       }
-       snprintf(bus_id, BUS_ID_SIZE, "%d.%d.%04x",
-               (sense_data->devno >> 24),
-               (sense_data->devno >> 16) & 0xFF,
-               (sense_data->devno & 0xFFFF));
-
-       read_lock_irq(&zfcp_data.config_lock);
-       adapter = zfcp_get_adapter_by_busid(bus_id);
-       if (adapter)
-               zfcp_adapter_get(adapter);
-       read_unlock_irq(&zfcp_data.config_lock);
-
-       kfree(bus_id);
-
-       if (adapter == NULL) {
-               ZFCP_LOG_INFO("invalid adapter\n");
-               retval = -ENXIO;
-               goto out;
-       }
-
-       if (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE) {
-               retval = zfcp_sg_list_alloc(sg_list,
-                                           ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
-               if (retval) {
-                       retval = -ENOMEM;
-                       goto out;
-               }
-       }
-
-       if ((sense_data->command & ZFCP_CFDC_DOWNLOAD) &&
-           (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE)) {
-               retval = zfcp_sg_list_copy_from_user(
-                       sg_list, &sense_data_user->control_file,
-                       ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
-               if (retval) {
-                       retval = -EFAULT;
-                       goto out;
-               }
-       }
-
-       retval = zfcp_fsf_control_file(adapter, &fsf_req, fsf_command,
-                                      option, sg_list);
-       if (retval)
-               goto out;
-
-       if ((fsf_req->qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
-           (fsf_req->qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
-               retval = -ENXIO;
-               goto out;
-       }
-
-       sense_data->fsf_status = fsf_req->qtcb->header.fsf_status;
-       memcpy(&sense_data->fsf_status_qual,
-              &fsf_req->qtcb->header.fsf_status_qual,
-              sizeof(union fsf_status_qual));
-       memcpy(&sense_data->payloads, &fsf_req->qtcb->bottom.support.els, 256);
-
-       retval = copy_to_user(sense_data_user, sense_data,
-               sizeof(struct zfcp_cfdc_sense_data));
-       if (retval) {
-               retval = -EFAULT;
-               goto out;
-       }
-
-       if (sense_data->command & ZFCP_CFDC_UPLOAD) {
-               retval = zfcp_sg_list_copy_to_user(
-                       &sense_data_user->control_file, sg_list,
-                       ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
-               if (retval) {
-                       retval = -EFAULT;
-                       goto out;
-               }
-       }
-
- out:
-       if (fsf_req != NULL)
-               zfcp_fsf_req_free(fsf_req);
-
-       if ((adapter != NULL) && (retval != -ENXIO))
-               zfcp_adapter_put(adapter);
-
-       if (sg_list != NULL) {
-               zfcp_sg_list_free(sg_list);
-               kfree(sg_list);
-       }
-
-       kfree(sense_data);
-
-       return retval;
-}
-
-
-/**
- * zfcp_sg_list_alloc - create a scatter-gather list of the specified size
- * @sg_list: structure describing a scatter gather list
- * @size: size of scatter-gather list
- * Return: 0 on success, else -ENOMEM
- *
- * In sg_list->sg a pointer to the created scatter-gather list is returned,
- * or NULL if we run out of memory. sg_list->count specifies the number of
- * elements of the scatter-gather list. The maximum size of a single element
- * in the scatter-gather list is PAGE_SIZE.
- */
-static int
-zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
-{
-       struct scatterlist *sg;
-       unsigned int i;
-       int retval = 0;
-       void *address;
-
-       BUG_ON(sg_list == NULL);
-
-       sg_list->count = size >> PAGE_SHIFT;
-       if (size & ~PAGE_MASK)
-               sg_list->count++;
-       sg_list->sg = kcalloc(sg_list->count, sizeof(struct scatterlist),
-                             GFP_KERNEL);
-       if (sg_list->sg == NULL) {
-               sg_list->count = 0;
-               retval = -ENOMEM;
-               goto out;
-       }
-       sg_init_table(sg_list->sg, sg_list->count);
-
-       for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) {
-               address = (void *) get_zeroed_page(GFP_KERNEL);
-               if (address == NULL) {
-                       sg_list->count = i;
-                       zfcp_sg_list_free(sg_list);
-                       retval = -ENOMEM;
-                       goto out;
-               }
-               zfcp_address_to_sg(address, sg, min(size, PAGE_SIZE));
-               size -= sg->length;
-       }
-
- out:
-       return retval;
-}
-
-
-/**
- * zfcp_sg_list_free - free memory of a scatter-gather list
- * @sg_list: structure describing a scatter-gather list
- *
- * Memory for each element in the scatter-gather list is freed.
- * Finally sg_list->sg is freed itself and sg_list->count is reset.
- */
-static void
-zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
-{
-       struct scatterlist *sg;
-       unsigned int i;
-
-       BUG_ON(sg_list == NULL);
-
-       for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++)
-               free_page((unsigned long) zfcp_sg_to_address(sg));
-
-       sg_list->count = 0;
-       kfree(sg_list->sg);
-}
-
-/**
- * zfcp_sg_size - determine size of a scatter-gather list
- * @sg: array of (struct scatterlist)
- * @sg_count: elements in array
- * Return: size of entire scatter-gather list
- */
-static size_t zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
-{
-       unsigned int i;
-       struct scatterlist *p;
-       size_t size;
-
-       size = 0;
-       for (i = 0, p = sg; i < sg_count; i++, p++) {
-               BUG_ON(p == NULL);
-               size += p->length;
-       }
-
-       return size;
-}
-
-
-/**
- * zfcp_sg_list_copy_from_user -copy data from user space to scatter-gather list
- * @sg_list: structure describing a scatter-gather list
- * @user_buffer: pointer to buffer in user space
- * @size: number of bytes to be copied
- * Return: 0 on success, -EFAULT if copy_from_user fails.
- */
-static int
-zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
-                           void __user *user_buffer,
-                            size_t size)
-{
-       struct scatterlist *sg;
-       unsigned int length;
-       void *zfcp_buffer;
-       int retval = 0;
-
-       BUG_ON(sg_list == NULL);
-
-       if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
-               return -EFAULT;
-
-       for (sg = sg_list->sg; size > 0; sg++) {
-               length = min((unsigned int)size, sg->length);
-               zfcp_buffer = zfcp_sg_to_address(sg);
-               if (copy_from_user(zfcp_buffer, user_buffer, length)) {
-                       retval = -EFAULT;
-                       goto out;
-               }
-               user_buffer += length;
-               size -= length;
-       }
-
- out:
-       return retval;
-}
-
-
-/**
- * zfcp_sg_list_copy_to_user - copy data from scatter-gather list to user space
- * @user_buffer: pointer to buffer in user space
- * @sg_list: structure describing a scatter-gather list
- * @size: number of bytes to be copied
- * Return: 0 on success, -EFAULT if copy_to_user fails
- */
-static int
-zfcp_sg_list_copy_to_user(void __user  *user_buffer,
-                         struct zfcp_sg_list *sg_list,
-                          size_t size)
-{
-       struct scatterlist *sg;
-       unsigned int length;
-       void *zfcp_buffer;
-       int retval = 0;
-
-       BUG_ON(sg_list == NULL);
-
-       if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
-               return -EFAULT;
-
-       for (sg = sg_list->sg; size > 0; sg++) {
-               length = min((unsigned int) size, sg->length);
-               zfcp_buffer = zfcp_sg_to_address(sg);
-               if (copy_to_user(user_buffer, zfcp_buffer, length)) {
-                       retval = -EFAULT;
-                       goto out;
-               }
-               user_buffer += length;
-               size -= length;
-       }
-
- out:
-       return retval;
-}
-
-
-#undef ZFCP_LOG_AREA
-
-/****************************************************************/
-/****** Functions for configuration/set-up of structures ********/
-/****************************************************************/
-
-#define ZFCP_LOG_AREA                  ZFCP_LOG_AREA_CONFIG
+module_init(zfcp_module_init);
 
 /**
  * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
  * @port: pointer to port to search for unit
  * @fcp_lun: FCP LUN to search for
- * Traverse list of all units of a port and return pointer to a unit
- * with the given FCP LUN.
+ *
+ * Returns: pointer to zfcp_unit or NULL
  */
-struct zfcp_unit *
-zfcp_get_unit_by_lun(struct zfcp_port *port, fcp_lun_t fcp_lun)
+struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
+                                      fcp_lun_t fcp_lun)
 {
        struct zfcp_unit *unit;
-       int found = 0;
 
-       list_for_each_entry(unit, &port->unit_list_head, list) {
+       list_for_each_entry(unit, &port->unit_list_head, list)
                if ((unit->fcp_lun == fcp_lun) &&
-                   !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status))
-               {
-                       found = 1;
-                       break;
-               }
-       }
-       return found ? unit : NULL;
+                   !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE))
+                   return unit;
+       return NULL;
 }
 
 /**
  * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
  * @adapter: pointer to adapter to search for port
  * @wwpn: wwpn to search for
- * Traverse list of all ports of an adapter and return pointer to a port
- * with the given wwpn.
- */
-struct zfcp_port *
-zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, wwn_t wwpn)
-{
-       struct zfcp_port *port;
-       int found = 0;
-
-       list_for_each_entry(port, &adapter->port_list_head, list) {
-               if ((port->wwpn == wwpn) &&
-                   !(atomic_read(&port->status) &
-                     (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) {
-                       found = 1;
-                       break;
-               }
-       }
-       return found ? port : NULL;
-}
-
-/**
- * zfcp_get_port_by_did - find port in port list of adapter by d_id
- * @adapter: pointer to adapter to search for port
- * @d_id: d_id to search for
- * Traverse list of all ports of an adapter and return pointer to a port
- * with the given d_id.
+ *
+ * Returns: pointer to zfcp_port or NULL
  */
-struct zfcp_port *
-zfcp_get_port_by_did(struct zfcp_adapter *adapter, u32 d_id)
+struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
+                                       wwn_t wwpn)
 {
        struct zfcp_port *port;
-       int found = 0;
 
-       list_for_each_entry(port, &adapter->port_list_head, list) {
-               if ((port->d_id == d_id) &&
-                   !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status))
-               {
-                       found = 1;
-                       break;
-               }
-       }
-       return found ? port : NULL;
+       list_for_each_entry(port, &adapter->port_list_head, list)
+               if ((port->wwpn == wwpn) && !(atomic_read(&port->status) &
+                     (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE)))
+                       return port;
+       return NULL;
 }
 
-/**
- * zfcp_get_adapter_by_busid - find adpater in adapter list by bus_id
- * @bus_id: bus_id to search for
- * Traverse list of all adapters and return pointer to an adapter
- * with the given bus_id.
- */
-struct zfcp_adapter *
-zfcp_get_adapter_by_busid(char *bus_id)
+static void zfcp_sysfs_unit_release(struct device *dev)
 {
-       struct zfcp_adapter *adapter;
-       int found = 0;
-
-       list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list) {
-               if ((strncmp(bus_id, zfcp_get_busid_by_adapter(adapter),
-                            BUS_ID_SIZE) == 0) &&
-                   !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE,
-                                     &adapter->status)){
-                       found = 1;
-                       break;
-               }
-       }
-       return found ? adapter : NULL;
+       kfree(container_of(dev, struct zfcp_unit, sysfs_device));
 }
 
 /**
  * zfcp_unit_enqueue - enqueue unit to unit list of a port.
  * @port: pointer to port where unit is added
  * @fcp_lun: FCP LUN of unit to be enqueued
- * Return: pointer to enqueued unit on success, NULL on error
+ * Returns: pointer to enqueued unit on success, ERR_PTR on error
  * Locks: config_sema must be held to serialize changes to the unit list
  *
  * Sets up some unit internal structures and creates sysfs entry.
  */
-struct zfcp_unit *
-zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
+struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
 {
        struct zfcp_unit *unit;
 
-       /*
-        * check that there is no unit with this FCP_LUN already in list
-        * and enqueue it.
-        * Note: Unlike for the adapter and the port, this is an error
-        */
-       read_lock_irq(&zfcp_data.config_lock);
-       unit = zfcp_get_unit_by_lun(port, fcp_lun);
-       read_unlock_irq(&zfcp_data.config_lock);
-       if (unit)
-               return NULL;
-
-       unit = kzalloc(sizeof (struct zfcp_unit), GFP_KERNEL);
+       unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
        if (!unit)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
-       /* initialise reference count stuff */
        atomic_set(&unit->refcount, 0);
        init_waitqueue_head(&unit->remove_wq);
 
        unit->port = port;
        unit->fcp_lun = fcp_lun;
 
-       /* setup for sysfs registration */
        snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun);
        unit->sysfs_device.parent = &port->sysfs_device;
        unit->sysfs_device.release = zfcp_sysfs_unit_release;
@@ -847,14 +298,28 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
        /* mark unit unusable as long as sysfs registration is not complete */
        atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
 
-       if (device_register(&unit->sysfs_device)) {
-               kfree(unit);
-               return NULL;
+       spin_lock_init(&unit->latencies.lock);
+       unit->latencies.write.channel.min = 0xFFFFFFFF;
+       unit->latencies.write.fabric.min = 0xFFFFFFFF;
+       unit->latencies.read.channel.min = 0xFFFFFFFF;
+       unit->latencies.read.fabric.min = 0xFFFFFFFF;
+       unit->latencies.cmd.channel.min = 0xFFFFFFFF;
+       unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
+
+       read_lock_irq(&zfcp_data.config_lock);
+       if (zfcp_get_unit_by_lun(port, fcp_lun)) {
+               read_unlock_irq(&zfcp_data.config_lock);
+               goto err_out_free;
        }
+       read_unlock_irq(&zfcp_data.config_lock);
 
-       if (zfcp_sysfs_unit_create_files(&unit->sysfs_device)) {
+       if (device_register(&unit->sysfs_device))
+               goto err_out_free;
+
+       if (sysfs_create_group(&unit->sysfs_device.kobj,
+                              &zfcp_sysfs_unit_attrs)) {
                device_unregister(&unit->sysfs_device);
-               return NULL;
+               return ERR_PTR(-EIO);
        }
 
        zfcp_unit_get(unit);
@@ -864,16 +329,27 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
        list_add_tail(&unit->list, &port->unit_list_head);
        atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
        atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
+
        write_unlock_irq(&zfcp_data.config_lock);
 
        port->units++;
        zfcp_port_get(port);
 
        return unit;
+
+err_out_free:
+       kfree(unit);
+       return ERR_PTR(-EINVAL);
 }
 
-void
-zfcp_unit_dequeue(struct zfcp_unit *unit)
+/**
+ * zfcp_unit_dequeue - dequeue unit
+ * @unit: pointer to zfcp_unit
+ *
+ * waits until all work is done on unit and removes it then from the unit->list
+ * of the associated port.
+ */
+void zfcp_unit_dequeue(struct zfcp_unit *unit)
 {
        zfcp_unit_wait(unit);
        write_lock_irq(&zfcp_data.config_lock);
@@ -881,68 +357,51 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
        write_unlock_irq(&zfcp_data.config_lock);
        unit->port->units--;
        zfcp_port_put(unit->port);
-       zfcp_sysfs_unit_remove_files(&unit->sysfs_device);
+       sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
        device_unregister(&unit->sysfs_device);
 }
 
-/*
- * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
- * commands.
- * It also genrates fcp-nameserver request/response buffer and unsolicited
- * status read fsf_req buffers.
- *
- * locks:       must only be called with zfcp_data.config_sema taken
- */
-static int
-zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
+static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 {
+       /* must only be called with zfcp_data.config_sema taken */
        adapter->pool.fsf_req_erp =
-               mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
-                                        zfcp_data.fsf_req_qtcb_cache);
+               mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
        if (!adapter->pool.fsf_req_erp)
                return -ENOMEM;
 
        adapter->pool.fsf_req_scsi =
-               mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
-                                        zfcp_data.fsf_req_qtcb_cache);
+               mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
        if (!adapter->pool.fsf_req_scsi)
                return -ENOMEM;
 
        adapter->pool.fsf_req_abort =
-               mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
-                                        zfcp_data.fsf_req_qtcb_cache);
+               mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
        if (!adapter->pool.fsf_req_abort)
                return -ENOMEM;
 
        adapter->pool.fsf_req_status_read =
-               mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
+               mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
                                            sizeof(struct zfcp_fsf_req));
        if (!adapter->pool.fsf_req_status_read)
                return -ENOMEM;
 
        adapter->pool.data_status_read =
-               mempool_create_slab_pool(ZFCP_POOL_STATUS_READ_NR,
+               mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
                                         zfcp_data.sr_buffer_cache);
        if (!adapter->pool.data_status_read)
                return -ENOMEM;
 
        adapter->pool.data_gid_pn =
-               mempool_create_slab_pool(ZFCP_POOL_DATA_GID_PN_NR,
-                                        zfcp_data.gid_pn_cache);
+               mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
        if (!adapter->pool.data_gid_pn)
                return -ENOMEM;
 
        return 0;
 }
 
-/**
- * zfcp_free_low_mem_buffers - free memory pools of an adapter
- * @adapter: pointer to zfcp_adapter for which memory pools should be freed
- * locking:  zfcp_data.config_sema must be held
- */
-static void
-zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
+static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
 {
+       /* zfcp_data.config_sema must be held */
        if (adapter->pool.fsf_req_erp)
                mempool_destroy(adapter->pool.fsf_req_erp);
        if (adapter->pool.fsf_req_scsi)
@@ -962,20 +421,61 @@ static void zfcp_dummy_release(struct device *dev)
        return;
 }
 
-/*
+/**
+ * zfcp_status_read_refill - refill the long running status_read_requests
+ * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * if there are 16 or more status_read requests missing an adapter_reopen
+ * is triggered
+ */
+int zfcp_status_read_refill(struct zfcp_adapter *adapter)
+{
+       while (atomic_read(&adapter->stat_miss) > 0)
+               if (zfcp_fsf_status_read(adapter)) {
+                       if (atomic_read(&adapter->stat_miss) >= 16) {
+                               zfcp_erp_adapter_reopen(adapter, 0, 103, NULL);
+                               return 1;
+                       }
+                       break;
+               } else
+                       atomic_dec(&adapter->stat_miss);
+       return 0;
+}
+
+static void _zfcp_status_read_scheduler(struct work_struct *work)
+{
+       zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
+                                            stat_work));
+}
+
+static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
+{
+       struct zfcp_port *port;
+
+       port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
+                                ZFCP_DID_DIRECTORY_SERVICE);
+       if (IS_ERR(port))
+               return PTR_ERR(port);
+       zfcp_port_put(port);
+
+       return 0;
+}
+
+/**
+ * zfcp_adapter_enqueue - enqueue a new adapter to the list
+ * @ccw_device: pointer to the struct cc_device
+ *
+ * Returns:    0             if a new adapter was successfully enqueued
+ *             -ENOMEM       if alloc failed
  * Enqueues an adapter at the end of the adapter list in the driver data.
  * All adapter internal structures are set up.
  * Proc-fs entries are also created.
- *
- * returns:    0             if a new adapter was successfully enqueued
- *              ZFCP_KNOWN    if an adapter with this devno was already present
- *             -ENOMEM       if alloc failed
  * locks:      config_sema must be held to serialise changes to the adapter list
  */
-struct zfcp_adapter *
-zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 {
-       int retval = 0;
        struct zfcp_adapter *adapter;
 
        /*
@@ -983,85 +483,58 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
         * are protected by the config_sema, which must be held to get here
         */
 
-       /* try to allocate new adapter data structure (zeroed) */
-       adapter = kzalloc(sizeof (struct zfcp_adapter), GFP_KERNEL);
-       if (!adapter) {
-               ZFCP_LOG_INFO("error: allocation of base adapter "
-                             "structure failed\n");
-               goto out;
-       }
+       adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
+       if (!adapter)
+               return -ENOMEM;
 
        ccw_device->handler = NULL;
-
-       /* save ccw_device pointer */
        adapter->ccw_device = ccw_device;
+       atomic_set(&adapter->refcount, 0);
 
-       retval = zfcp_qdio_allocate_queues(adapter);
-       if (retval)
-               goto queues_alloc_failed;
-
-       retval = zfcp_qdio_allocate(adapter);
-       if (retval)
+       if (zfcp_qdio_allocate(adapter))
                goto qdio_allocate_failed;
 
-       retval = zfcp_allocate_low_mem_buffers(adapter);
-       if (retval) {
-               ZFCP_LOG_INFO("error: pool allocation failed\n");
+       if (zfcp_allocate_low_mem_buffers(adapter))
                goto failed_low_mem_buffers;
-       }
 
-       /* initialise reference count stuff */
-       atomic_set(&adapter->refcount, 0);
+       if (zfcp_reqlist_alloc(adapter))
+               goto failed_low_mem_buffers;
+
+       if (zfcp_adapter_debug_register(adapter))
+               goto debug_register_failed;
+
        init_waitqueue_head(&adapter->remove_wq);
+       init_waitqueue_head(&adapter->erp_thread_wqh);
+       init_waitqueue_head(&adapter->erp_done_wqh);
 
-       /* initialise list of ports */
        INIT_LIST_HEAD(&adapter->port_list_head);
-
-       /* initialise list of ports to be removed */
        INIT_LIST_HEAD(&adapter->port_remove_lh);
+       INIT_LIST_HEAD(&adapter->erp_ready_head);
+       INIT_LIST_HEAD(&adapter->erp_running_head);
 
-       /* initialize list of fsf requests */
        spin_lock_init(&adapter->req_list_lock);
-       retval = zfcp_reqlist_alloc(adapter);
-       if (retval) {
-               ZFCP_LOG_INFO("request list initialization failed\n");
-               goto failed_low_mem_buffers;
-       }
-
-       /* initialize debug locks */
 
        spin_lock_init(&adapter->hba_dbf_lock);
        spin_lock_init(&adapter->san_dbf_lock);
        spin_lock_init(&adapter->scsi_dbf_lock);
        spin_lock_init(&adapter->rec_dbf_lock);
-
-       retval = zfcp_adapter_debug_register(adapter);
-       if (retval)
-               goto debug_register_failed;
-
-       /* initialize error recovery stuff */
+       spin_lock_init(&adapter->req_q.lock);
 
        rwlock_init(&adapter->erp_lock);
-       sema_init(&adapter->erp_ready_sem, 0);
-       INIT_LIST_HEAD(&adapter->erp_ready_head);
-       INIT_LIST_HEAD(&adapter->erp_running_head);
-
-       /* initialize abort lock */
        rwlock_init(&adapter->abort_lock);
 
-       /* initialise some erp stuff */
-       init_waitqueue_head(&adapter->erp_thread_wqh);
-       init_waitqueue_head(&adapter->erp_done_wqh);
+       sema_init(&adapter->erp_ready_sem, 0);
 
-       /* initialize lock of associated request queue */
-       rwlock_init(&adapter->request_queue.queue_lock);
+       INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
+       INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
 
        /* mark adapter unusable as long as sysfs registration is not complete */
        atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
 
        dev_set_drvdata(&ccw_device->dev, adapter);
 
-       if (zfcp_sysfs_adapter_create_files(&ccw_device->dev))
+       if (sysfs_create_group(&ccw_device->dev.kobj,
+                              &zfcp_sysfs_adapter_attrs))
                goto sysfs_failed;
 
        adapter->generic_services.parent = &adapter->ccw_device->dev;
@@ -1072,7 +545,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
        if (device_register(&adapter->generic_services))
                goto generic_services_failed;
 
-       /* put allocated adapter at list tail */
        write_lock_irq(&zfcp_data.config_lock);
        atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
        list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
@@ -1080,57 +552,49 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 
        zfcp_data.adapters++;
 
-       goto out;
+       zfcp_nameserver_enqueue(adapter);
+
+       return 0;
 
- generic_services_failed:
-       zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
- sysfs_failed:
+generic_services_failed:
+       sysfs_remove_group(&ccw_device->dev.kobj,
+                          &zfcp_sysfs_adapter_attrs);
+sysfs_failed:
        zfcp_adapter_debug_unregister(adapter);
- debug_register_failed:
+debug_register_failed:
        dev_set_drvdata(&ccw_device->dev, NULL);
-       zfcp_reqlist_free(adapter);
- failed_low_mem_buffers:
+       kfree(adapter->req_list);
+failed_low_mem_buffers:
        zfcp_free_low_mem_buffers(adapter);
-       if (qdio_free(ccw_device) != 0)
-               ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
-                               zfcp_get_busid_by_adapter(adapter));
- qdio_allocate_failed:
-       zfcp_qdio_free_queues(adapter);
- queues_alloc_failed:
+qdio_allocate_failed:
+       zfcp_qdio_free(adapter);
        kfree(adapter);
-       adapter = NULL;
- out:
-       return adapter;
+       return -ENOMEM;
 }
 
-/*
- * returns:    0 - struct zfcp_adapter  data structure successfully removed
- *             !0 - struct zfcp_adapter  data structure could not be removed
- *                     (e.g. still used)
+/**
+ * zfcp_adapter_dequeue - remove the adapter from the resource list
+ * @adapter: pointer to struct zfcp_adapter which should be removed
  * locks:      adapter list write lock is assumed to be held by caller
  */
-void
-zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
+void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
 {
        int retval = 0;
        unsigned long flags;
 
+       cancel_work_sync(&adapter->scan_work);
+       cancel_work_sync(&adapter->stat_work);
        zfcp_adapter_scsi_unregister(adapter);
        device_unregister(&adapter->generic_services);
-       zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
+       sysfs_remove_group(&adapter->ccw_device->dev.kobj,
+                          &zfcp_sysfs_adapter_attrs);
        dev_set_drvdata(&adapter->ccw_device->dev, NULL);
        /* sanity check: no pending FSF requests */
        spin_lock_irqsave(&adapter->req_list_lock, flags);
        retval = zfcp_reqlist_isempty(adapter);
        spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-       if (!retval) {
-               ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
-                               "%i requests outstanding\n",
-                               zfcp_get_busid_by_adapter(adapter), adapter,
-                               atomic_read(&adapter->reqs_active));
-               retval = -EBUSY;
-               goto out;
-       }
+       if (!retval)
+               return;
 
        zfcp_adapter_debug_unregister(adapter);
 
@@ -1142,26 +606,18 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
        /* decrease number of adapters in list */
        zfcp_data.adapters--;
 
-       ZFCP_LOG_TRACE("adapter %s (%p) removed from list, "
-                      "%i adapters still in list\n",
-                      zfcp_get_busid_by_adapter(adapter),
-                      adapter, zfcp_data.adapters);
-
-       retval = qdio_free(adapter->ccw_device);
-       if (retval)
-               ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
-                               zfcp_get_busid_by_adapter(adapter));
+       zfcp_qdio_free(adapter);
 
        zfcp_free_low_mem_buffers(adapter);
-       /* free memory of adapter data structure and queues */
-       zfcp_qdio_free_queues(adapter);
-       zfcp_reqlist_free(adapter);
+       kfree(adapter->req_list);
        kfree(adapter->fc_stats);
        kfree(adapter->stats_reset_data);
-       ZFCP_LOG_TRACE("freeing adapter structure\n");
        kfree(adapter);
- out:
-       return;
+}
+
+static void zfcp_sysfs_port_release(struct device *dev)
+{
+       kfree(container_of(dev, struct zfcp_port, sysfs_device));
 }
 
 /**
@@ -1170,98 +626,90 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
  * @wwpn: WWPN of the remote port to be enqueued
  * @status: initial status for the port
  * @d_id: destination id of the remote port to be enqueued
- * Return: pointer to enqueued port on success, NULL on error
+ * Returns: pointer to enqueued port on success, ERR_PTR on error
  * Locks: config_sema must be held to serialize changes to the port list
  *
  * All port internal structures are set up and the sysfs entry is generated.
  * d_id is used to enqueue ports with a well known address like the Directory
  * Service for nameserver lookup.
  */
-struct zfcp_port *
-zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
-                 u32 d_id)
+struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
+                                    u32 status, u32 d_id)
 {
        struct zfcp_port *port;
-       int check_wwpn;
-
-       check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
-       /*
-        * check that there is no port with this WWPN already in list
-        */
-       if (check_wwpn) {
-               read_lock_irq(&zfcp_data.config_lock);
-               port = zfcp_get_port_by_wwpn(adapter, wwpn);
-               read_unlock_irq(&zfcp_data.config_lock);
-               if (port)
-                       return NULL;
-       }
+       int retval;
+       char *bus_id;
 
-       port = kzalloc(sizeof (struct zfcp_port), GFP_KERNEL);
+       port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
        if (!port)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
-       /* initialise reference count stuff */
-       atomic_set(&port->refcount, 0);
        init_waitqueue_head(&port->remove_wq);
 
        INIT_LIST_HEAD(&port->unit_list_head);
        INIT_LIST_HEAD(&port->unit_remove_lh);
 
        port->adapter = adapter;
+       port->d_id = d_id;
+       port->wwpn = wwpn;
 
-       if (check_wwpn)
-               port->wwpn = wwpn;
-
-       atomic_set_mask(status, &port->status);
+       /* mark port unusable as long as sysfs registration is not complete */
+       atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
+       atomic_set(&port->refcount, 0);
 
-       /* setup for sysfs registration */
        if (status & ZFCP_STATUS_PORT_WKA) {
                switch (d_id) {
                case ZFCP_DID_DIRECTORY_SERVICE:
-                       snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
-                                "directory");
+                       bus_id = "directory";
                        break;
                case ZFCP_DID_MANAGEMENT_SERVICE:
-                       snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
-                                "management");
+                       bus_id = "management";
                        break;
                case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
-                       snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
-                                "key_distribution");
+                       bus_id = "key_distribution";
                        break;
                case ZFCP_DID_ALIAS_SERVICE:
-                       snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
-                                "alias");
+                       bus_id = "alias";
                        break;
                case ZFCP_DID_TIME_SERVICE:
-                       snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
-                                "time");
+                       bus_id = "time";
                        break;
                default:
                        kfree(port);
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
-               port->d_id = d_id;
+               snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
                port->sysfs_device.parent = &adapter->generic_services;
        } else {
                snprintf(port->sysfs_device.bus_id,
                         BUS_ID_SIZE, "0x%016llx", wwpn);
                port->sysfs_device.parent = &adapter->ccw_device->dev;
        }
+
        port->sysfs_device.release = zfcp_sysfs_port_release;
        dev_set_drvdata(&port->sysfs_device, port);
 
-       /* mark port unusable as long as sysfs registration is not complete */
-       atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
+       read_lock_irq(&zfcp_data.config_lock);
+       if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
+               if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
+                       read_unlock_irq(&zfcp_data.config_lock);
+                       goto err_out_free;
+               }
+       read_unlock_irq(&zfcp_data.config_lock);
 
-       if (device_register(&port->sysfs_device)) {
-               kfree(port);
-               return NULL;
-       }
+       if (device_register(&port->sysfs_device))
+               goto err_out_free;
+
+       if (status & ZFCP_STATUS_PORT_WKA)
+               retval = sysfs_create_group(&port->sysfs_device.kobj,
+                                           &zfcp_sysfs_ns_port_attrs);
+       else
+               retval = sysfs_create_group(&port->sysfs_device.kobj,
+                                           &zfcp_sysfs_port_attrs);
 
-       if (zfcp_sysfs_port_create_files(&port->sysfs_device, status)) {
+       if (retval) {
                device_unregister(&port->sysfs_device);
-               return NULL;
+               goto err_out;
        }
 
        zfcp_port_get(port);
@@ -1274,15 +722,23 @@ zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
                if (!adapter->nameserver_port)
                        adapter->nameserver_port = port;
        adapter->ports++;
+
        write_unlock_irq(&zfcp_data.config_lock);
 
        zfcp_adapter_get(adapter);
-
        return port;
+
+err_out_free:
+       kfree(port);
+err_out:
+       return ERR_PTR(-EINVAL);
 }
 
-void
-zfcp_port_dequeue(struct zfcp_port *port)
+/**
+ * zfcp_port_dequeue - dequeues a port from the port list of the adapter
+ * @port: pointer to struct zfcp_port which should be removed
+ */
+void zfcp_port_dequeue(struct zfcp_port *port)
 {
        zfcp_port_wait(port);
        write_lock_irq(&zfcp_data.config_lock);
@@ -1293,546 +749,53 @@ zfcp_port_dequeue(struct zfcp_port *port)
                fc_remote_port_delete(port->rport);
        port->rport = NULL;
        zfcp_adapter_put(port->adapter);
-       zfcp_sysfs_port_remove_files(&port->sysfs_device,
-                                    atomic_read(&port->status));
-       device_unregister(&port->sysfs_device);
-}
-
-/* Enqueues a nameserver port */
-int
-zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
-{
-       struct zfcp_port *port;
-
-       port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
-                                ZFCP_DID_DIRECTORY_SERVICE);
-       if (!port) {
-               ZFCP_LOG_INFO("error: enqueue of nameserver port for "
-                             "adapter %s failed\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               return -ENXIO;
-       }
-       zfcp_port_put(port);
-
-       return 0;
-}
-
-#undef ZFCP_LOG_AREA
-
-/****************************************************************/
-/******* Fibre Channel Standard related Functions  **************/
-/****************************************************************/
-
-#define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_FC
-
-static void zfcp_fsf_incoming_els_rscn(struct zfcp_fsf_req *fsf_req)
-{
-       struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct fcp_rscn_head *fcp_rscn_head;
-       struct fcp_rscn_element *fcp_rscn_element;
-       struct zfcp_port *port;
-       u16 i;
-       u16 no_entries;
-       u32 range_mask;
-       unsigned long flags;
-
-       fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload;
-       fcp_rscn_element = (struct fcp_rscn_element *) status_buffer->payload;
-
-       /* see FC-FS */
-       no_entries = (fcp_rscn_head->payload_len / 4);
-
-       for (i = 1; i < no_entries; i++) {
-               /* skip head and start with 1st element */
-               fcp_rscn_element++;
-               switch (fcp_rscn_element->addr_format) {
-               case ZFCP_PORT_ADDRESS:
-                       range_mask = ZFCP_PORTS_RANGE_PORT;
-                       break;
-               case ZFCP_AREA_ADDRESS:
-                       range_mask = ZFCP_PORTS_RANGE_AREA;
-                       break;
-               case ZFCP_DOMAIN_ADDRESS:
-                       range_mask = ZFCP_PORTS_RANGE_DOMAIN;
-                       break;
-               case ZFCP_FABRIC_ADDRESS:
-                       range_mask = ZFCP_PORTS_RANGE_FABRIC;
-                       break;
-               default:
-                       ZFCP_LOG_INFO("incoming RSCN with unknown "
-                                     "address format\n");
-                       continue;
-               }
-               read_lock_irqsave(&zfcp_data.config_lock, flags);
-               list_for_each_entry(port, &adapter->port_list_head, list) {
-                       if (atomic_test_mask
-                           (ZFCP_STATUS_PORT_WKA, &port->status))
-                               continue;
-                       /* Do we know this port? If not skip it. */
-                       if (!atomic_test_mask
-                           (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
-                               ZFCP_LOG_INFO("incoming RSCN, trying to open "
-                                             "port 0x%016Lx\n", port->wwpn);
-                               zfcp_erp_port_reopen(port,
-                                                    ZFCP_STATUS_COMMON_ERP_FAILED,
-                                                    82, fsf_req);
-                               continue;
-                       }
-
-                       /*
-                        * FIXME: race: d_id might being invalidated
-                        * (...DID_DID reset)
-                        */
-                       if ((port->d_id & range_mask)
-                           == (fcp_rscn_element->nport_did & range_mask)) {
-                               ZFCP_LOG_TRACE("reopen did 0x%08x\n",
-                                              fcp_rscn_element->nport_did);
-                               /*
-                                * Unfortunately, an RSCN does not specify the
-                                * type of change a target underwent. We assume
-                                * that it makes sense to reopen the link.
-                                * FIXME: Shall we try to find out more about
-                                * the target and link state before closing it?
-                                * How to accomplish this? (nameserver?)
-                                * Where would such code be put in?
-                                * (inside or outside erp)
-                                */
-                               ZFCP_LOG_INFO("incoming RSCN, trying to open "
-                                             "port 0x%016Lx\n", port->wwpn);
-                               zfcp_test_link(port);
-                       }
-               }
-               read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-       }
-}
-
-static void zfcp_fsf_incoming_els_plogi(struct zfcp_fsf_req *fsf_req)
-{
-       struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct fsf_plogi *els_plogi;
-       struct zfcp_port *port;
-       unsigned long flags;
-
-       els_plogi = (struct fsf_plogi *) status_buffer->payload;
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       list_for_each_entry(port, &adapter->port_list_head, list) {
-               if (port->wwpn == (*(wwn_t *) &els_plogi->serv_param.wwpn))
-                       break;
-       }
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-
-       if (!port || (port->wwpn != (*(wwn_t *) &els_plogi->serv_param.wwpn))) {
-               ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port "
-                              "with d_id 0x%06x on adapter %s\n",
-                              status_buffer->d_id,
-                              zfcp_get_busid_by_adapter(adapter));
-       } else {
-               zfcp_erp_port_forced_reopen(port, 0, 83, fsf_req);
-       }
-}
-
-static void zfcp_fsf_incoming_els_logo(struct zfcp_fsf_req *fsf_req)
-{
-       struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload;
-       struct zfcp_port *port;
-       unsigned long flags;
-
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       list_for_each_entry(port, &adapter->port_list_head, list) {
-               if (port->wwpn == els_logo->nport_wwpn)
-                       break;
-       }
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-
-       if (!port || (port->wwpn != els_logo->nport_wwpn)) {
-               ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port "
-                              "with d_id 0x%06x on adapter %s\n",
-                              status_buffer->d_id,
-                              zfcp_get_busid_by_adapter(adapter));
-       } else {
-               zfcp_erp_port_forced_reopen(port, 0, 84, fsf_req);
-       }
-}
-
-static void
-zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
-                             struct fsf_status_read_buffer *status_buffer)
-{
-       ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
-                       "for adapter %s\n", *(u32 *) (status_buffer->payload),
-                       zfcp_get_busid_by_adapter(adapter));
-
-}
-
-void
-zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
-{
-       struct fsf_status_read_buffer *status_buffer;
-       u32 els_type;
-       struct zfcp_adapter *adapter;
-
-       status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
-       els_type = *(u32 *) (status_buffer->payload);
-       adapter = fsf_req->adapter;
-
-       zfcp_san_dbf_event_incoming_els(fsf_req);
-       if (els_type == LS_PLOGI)
-               zfcp_fsf_incoming_els_plogi(fsf_req);
-       else if (els_type == LS_LOGO)
-               zfcp_fsf_incoming_els_logo(fsf_req);
-       else if ((els_type & 0xffff0000) == LS_RSCN)
-               /* we are only concerned with the command, not the length */
-               zfcp_fsf_incoming_els_rscn(fsf_req);
-       else
-               zfcp_fsf_incoming_els_unknown(adapter, status_buffer);
-}
-
-
-/**
- * zfcp_gid_pn_buffers_alloc - allocate buffers for GID_PN nameserver request
- * @gid_pn: pointer to return pointer to struct zfcp_gid_pn_data
- * @pool: pointer to mempool_t if non-null memory pool is used for allocation
- */
-static int
-zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
-{
-       struct zfcp_gid_pn_data *data;
-
-       if (pool != NULL) {
-               data = mempool_alloc(pool, GFP_ATOMIC);
-               if (likely(data != NULL)) {
-                       data->ct.pool = pool;
-               }
-       } else {
-               data = kmem_cache_alloc(zfcp_data.gid_pn_cache, GFP_ATOMIC);
-       }
-
-        if (NULL == data)
-                return -ENOMEM;
-
-       memset(data, 0, sizeof(*data));
-       sg_init_table(&data->req , 1);
-       sg_init_table(&data->resp , 1);
-        data->ct.req = &data->req;
-        data->ct.resp = &data->resp;
-       data->ct.req_count = data->ct.resp_count = 1;
-       zfcp_address_to_sg(&data->ct_iu_req, &data->req, sizeof(struct ct_iu_gid_pn_req));
-        zfcp_address_to_sg(&data->ct_iu_resp, &data->resp, sizeof(struct ct_iu_gid_pn_resp));
-
-       *gid_pn = data;
-       return 0;
-}
-
-/**
- * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
- * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
- */
-static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
-{
-       if (gid_pn->ct.pool)
-               mempool_free(gid_pn, gid_pn->ct.pool);
+       if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
+               sysfs_remove_group(&port->sysfs_device.kobj,
+                                  &zfcp_sysfs_ns_port_attrs);
        else
-               kmem_cache_free(zfcp_data.gid_pn_cache, gid_pn);
-}
-
-/**
- * zfcp_ns_gid_pn_request - initiate GID_PN nameserver request
- * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
- */
-int
-zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
-{
-       int ret;
-        struct ct_iu_gid_pn_req *ct_iu_req;
-        struct zfcp_gid_pn_data *gid_pn;
-        struct zfcp_adapter *adapter = erp_action->adapter;
-
-       ret = zfcp_gid_pn_buffers_alloc(&gid_pn, adapter->pool.data_gid_pn);
-       if (ret < 0) {
-               ZFCP_LOG_INFO("error: buffer allocation for gid_pn nameserver "
-                             "request failed for adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               goto out;
-       }
-
-       /* setup nameserver request */
-        ct_iu_req = zfcp_sg_to_address(gid_pn->ct.req);
-        ct_iu_req->header.revision = ZFCP_CT_REVISION;
-        ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
-        ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
-        ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS;
-        ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GID_PN;
-        ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE;
-       ct_iu_req->wwpn = erp_action->port->wwpn;
-
-        /* setup parameters for send generic command */
-        gid_pn->ct.port = adapter->nameserver_port;
-       gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
-       gid_pn->ct.handler_data = (unsigned long) gid_pn;
-        gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
-       gid_pn->port = erp_action->port;
-
-       ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
-                              erp_action);
-       if (ret) {
-               ZFCP_LOG_INFO("error: initiation of gid_pn nameserver request "
-                              "failed for adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-
-                zfcp_gid_pn_buffers_free(gid_pn);
-       }
-
- out:
-       return ret;
-}
-
-/**
- * zfcp_ns_gid_pn_handler - handler for GID_PN nameserver request
- * @data: unsigned long, contains pointer to struct zfcp_gid_pn_data
- */
-static void zfcp_ns_gid_pn_handler(unsigned long data)
-{
-       struct zfcp_port *port;
-        struct zfcp_send_ct *ct;
-       struct ct_iu_gid_pn_req *ct_iu_req;
-       struct ct_iu_gid_pn_resp *ct_iu_resp;
-        struct zfcp_gid_pn_data *gid_pn;
-
-
-       gid_pn = (struct zfcp_gid_pn_data *) data;
-       port = gid_pn->port;
-        ct = &gid_pn->ct;
-       ct_iu_req = zfcp_sg_to_address(ct->req);
-       ct_iu_resp = zfcp_sg_to_address(ct->resp);
-
-       if (ct->status != 0)
-               goto failed;
-
-       if (zfcp_check_ct_response(&ct_iu_resp->header)) {
-               /* FIXME: do we need some specific erp entry points */
-               atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
-               goto failed;
-       }
-       /* paranoia */
-       if (ct_iu_req->wwpn != port->wwpn) {
-               ZFCP_LOG_NORMAL("bug: wwpn 0x%016Lx returned by nameserver "
-                               "lookup does not match expected wwpn 0x%016Lx "
-                               "for adapter %s\n", ct_iu_req->wwpn, port->wwpn,
-                               zfcp_get_busid_by_port(port));
-               goto mismatch;
-       }
-
-       /* looks like a valid d_id */
-        port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
-       atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
-       ZFCP_LOG_DEBUG("adapter %s:  wwpn=0x%016Lx ---> d_id=0x%06x\n",
-                      zfcp_get_busid_by_port(port), port->wwpn, port->d_id);
-       goto out;
-
- mismatch:
-       ZFCP_LOG_DEBUG("CT IUs do not match:\n");
-       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_req,
-                     sizeof(struct ct_iu_gid_pn_req));
-       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_resp,
-                     sizeof(struct ct_iu_gid_pn_resp));
-
- failed:
-       ZFCP_LOG_NORMAL("warning: failed gid_pn nameserver request for wwpn "
-                       "0x%016Lx for adapter %s\n",
-                       port->wwpn, zfcp_get_busid_by_port(port));
- out:
-        zfcp_gid_pn_buffers_free(gid_pn);
-       return;
+               sysfs_remove_group(&port->sysfs_device.kobj,
+                                  &zfcp_sysfs_port_attrs);
+       device_unregister(&port->sysfs_device);
 }
 
-/* reject CT_IU reason codes acc. to FC-GS-4 */
-static const struct zfcp_rc_entry zfcp_ct_rc[] = {
-       {0x01, "invalid command code"},
-       {0x02, "invalid version level"},
-       {0x03, "logical error"},
-       {0x04, "invalid CT_IU size"},
-       {0x05, "logical busy"},
-       {0x07, "protocol error"},
-       {0x09, "unable to perform command request"},
-       {0x0b, "command not supported"},
-       {0x0d, "server not available"},
-       {0x0e, "session could not be established"},
-       {0xff, "vendor specific error"},
-       {0, NULL},
-};
-
-/* LS_RJT reason codes acc. to FC-FS */
-static const struct zfcp_rc_entry zfcp_ls_rjt_rc[] = {
-       {0x01, "invalid LS_Command code"},
-       {0x03, "logical error"},
-       {0x05, "logical busy"},
-       {0x07, "protocol error"},
-       {0x09, "unable to perform command request"},
-       {0x0b, "command not supported"},
-       {0x0e, "command already in progress"},
-       {0xff, "vendor specific error"},
-       {0, NULL},
-};
-
-/* reject reason codes according to FC-PH/FC-FS */
-static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
-       {0x01, "invalid D_ID"},
-       {0x02, "invalid S_ID"},
-       {0x03, "Nx_Port not available, temporary"},
-       {0x04, "Nx_Port not available, permament"},
-       {0x05, "class not supported"},
-       {0x06, "delimiter usage error"},
-       {0x07, "TYPE not supported"},
-       {0x08, "invalid Link_Control"},
-       {0x09, "invalid R_CTL field"},
-       {0x0a, "invalid F_CTL field"},
-       {0x0b, "invalid OX_ID"},
-       {0x0c, "invalid RX_ID"},
-       {0x0d, "invalid SEQ_ID"},
-       {0x0e, "invalid DF_CTL"},
-       {0x0f, "invalid SEQ_CNT"},
-       {0x10, "invalid parameter field"},
-       {0x11, "exchange error"},
-       {0x12, "protocol error"},
-       {0x13, "incorrect length"},
-       {0x14, "unsupported ACK"},
-       {0x15, "class of service not supported by entity at FFFFFE"},
-       {0x16, "login required"},
-       {0x17, "excessive sequences attempted"},
-       {0x18, "unable to establish exchange"},
-       {0x1a, "fabric path not available"},
-       {0x1b, "invalid VC_ID (class 4)"},
-       {0x1c, "invalid CS_CTL field"},
-       {0x1d, "insufficient resources for VC (class 4)"},
-       {0x1f, "invalid class of service"},
-       {0x20, "preemption request rejected"},
-       {0x21, "preemption not enabled"},
-       {0x22, "multicast error"},
-       {0x23, "multicast error terminate"},
-       {0x24, "process login required"},
-       {0xff, "vendor specific reject"},
-       {0, NULL},
-};
-
 /**
- * zfcp_rc_description - return description for given reaon code
- * @code: reason code
- * @rc_table: table of reason codes and descriptions
+ * zfcp_sg_free_table - free memory used by scatterlists
+ * @sg: pointer to scatterlist
+ * @count: number of scatterlist which are to be free'ed
+ * the scatterlist are expected to reference pages always
  */
-static const char *
-zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
+void zfcp_sg_free_table(struct scatterlist *sg, int count)
 {
-       const char *descr = "unknown reason code";
+       int i;
 
-       do {
-               if (code == rc_table->code) {
-                       descr = rc_table->description;
+       for (i = 0; i < count; i++, sg++)
+               if (sg)
+                       free_page((unsigned long) sg_virt(sg));
+               else
                        break;
-               }
-               rc_table++;
-       } while (rc_table->code && rc_table->description);
-
-       return descr;
 }
 
 /**
- * zfcp_check_ct_response - evaluate reason code for CT_IU
- * @rjt: response payload to an CT_IU request
- * Return: 0 for accept CT_IU, 1 for reject CT_IU or invlid response code
+ * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
+ * @sg: pointer to struct scatterlist
+ * @count: number of scatterlists which should be assigned with buffers
+ * of size page
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
  */
-int
-zfcp_check_ct_response(struct ct_hdr *rjt)
+int zfcp_sg_setup_table(struct scatterlist *sg, int count)
 {
-       if (rjt->cmd_rsp_code == ZFCP_CT_ACCEPT)
-               return 0;
+       void *addr;
+       int i;
 
-       if (rjt->cmd_rsp_code != ZFCP_CT_REJECT) {
-               ZFCP_LOG_NORMAL("error: invalid Generic Service command/"
-                               "response code (0x%04hx)\n",
-                               rjt->cmd_rsp_code);
-               return 1;
+       sg_init_table(sg, count);
+       for (i = 0; i < count; i++, sg++) {
+               addr = (void *) get_zeroed_page(GFP_KERNEL);
+               if (!addr) {
+                       zfcp_sg_free_table(sg, i);
+                       return -ENOMEM;
+               }
+               sg_set_buf(sg, addr, PAGE_SIZE);
        }
-
-       ZFCP_LOG_INFO("Generic Service command rejected\n");
-       ZFCP_LOG_INFO("%s (0x%02x, 0x%02x, 0x%02x)\n",
-                     zfcp_rc_description(rjt->reason_code, zfcp_ct_rc),
-                     (u32) rjt->reason_code, (u32) rjt->reason_code_expl,
-                     (u32) rjt->vendor_unique);
-
-       return 1;
-}
-
-/**
- * zfcp_print_els_rjt - print reject parameter and description for ELS reject
- * @rjt_par: reject parameter acc. to FC-PH/FC-FS
- * @rc_table: table of reason codes and descriptions
- */
-static void
-zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
-                  const struct zfcp_rc_entry *rc_table)
-{
-       ZFCP_LOG_INFO("%s (%02x %02x %02x %02x)\n",
-                     zfcp_rc_description(rjt_par->reason_code, rc_table),
-                     (u32) rjt_par->action, (u32) rjt_par->reason_code,
-                     (u32) rjt_par->reason_expl, (u32) rjt_par->vendor_unique);
-}
-
-/**
- * zfcp_fsf_handle_els_rjt - evaluate status qualifier/reason code on ELS reject
- * @sq: status qualifier word
- * @rjt_par: reject parameter as described in FC-PH and FC-FS
- * Return: -EROMTEIO for LS_RJT, -EREMCHG for invalid D_ID, -EIO else
- */
-int
-zfcp_handle_els_rjt(u32 sq, struct zfcp_ls_rjt_par *rjt_par)
-{
-       int ret = -EIO;
-
-       if (sq == FSF_IOSTAT_NPORT_RJT) {
-               ZFCP_LOG_INFO("ELS rejected (P_RJT)\n");
-               zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
-               /* invalid d_id */
-               if (rjt_par->reason_code == 0x01)
-                       ret = -EREMCHG;
-       } else if (sq == FSF_IOSTAT_FABRIC_RJT) {
-               ZFCP_LOG_INFO("ELS rejected (F_RJT)\n");
-               zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
-               /* invalid d_id */
-               if (rjt_par->reason_code == 0x01)
-                       ret = -EREMCHG;
-       } else if (sq == FSF_IOSTAT_LS_RJT) {
-               ZFCP_LOG_INFO("ELS rejected (LS_RJT)\n");
-               zfcp_print_els_rjt(rjt_par, zfcp_ls_rjt_rc);
-               ret = -EREMOTEIO;
-       } else
-               ZFCP_LOG_INFO("unexpected SQ: 0x%02x\n", sq);
-
-       return ret;
-}
-
-/**
- * zfcp_plogi_evaluate - evaluate PLOGI playload and copy important fields
- * into zfcp_port structure
- * @port: zfcp_port structure
- * @plogi: plogi payload
- */
-void
-zfcp_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
-{
-       port->maxframe_size = plogi->serv_param.common_serv_param[7] |
-               ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
-       if (plogi->serv_param.class1_serv_param[0] & 0x80)
-               port->supported_classes |= FC_COS_CLASS1;
-       if (plogi->serv_param.class2_serv_param[0] & 0x80)
-               port->supported_classes |= FC_COS_CLASS2;
-       if (plogi->serv_param.class3_serv_param[0] & 0x80)
-               port->supported_classes |= FC_COS_CLASS3;
-       if (plogi->serv_param.class4_serv_param[0] & 0x80)
-               port->supported_classes |= FC_COS_CLASS4;
+       return 0;
 }
-
-#undef ZFCP_LOG_AREA
index 66d3b88..391dd29 100644 (file)
@@ -1,64 +1,13 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Registration and callback for the s390 common I/O layer.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 #include "zfcp_ext.h"
 
-#define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_CONFIG
-
-static int zfcp_ccw_probe(struct ccw_device *);
-static void zfcp_ccw_remove(struct ccw_device *);
-static int zfcp_ccw_set_online(struct ccw_device *);
-static int zfcp_ccw_set_offline(struct ccw_device *);
-static int zfcp_ccw_notify(struct ccw_device *, int);
-static void zfcp_ccw_shutdown(struct ccw_device *);
-
-static struct ccw_device_id zfcp_ccw_device_id[] = {
-       {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
-                           ZFCP_CONTROL_UNIT_MODEL,
-                           ZFCP_DEVICE_TYPE,
-                           ZFCP_DEVICE_MODEL)},
-       {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
-                           ZFCP_CONTROL_UNIT_MODEL,
-                           ZFCP_DEVICE_TYPE,
-                           ZFCP_DEVICE_MODEL_PRIV)},
-       {},
-};
-
-static struct ccw_driver zfcp_ccw_driver = {
-       .owner       = THIS_MODULE,
-       .name        = ZFCP_NAME,
-       .ids         = zfcp_ccw_device_id,
-       .probe       = zfcp_ccw_probe,
-       .remove      = zfcp_ccw_remove,
-       .set_online  = zfcp_ccw_set_online,
-       .set_offline = zfcp_ccw_set_offline,
-       .notify      = zfcp_ccw_notify,
-       .shutdown    = zfcp_ccw_shutdown,
-       .driver = {
-               .groups = zfcp_driver_attr_groups,
-       },
-};
-
-MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
-
 /**
  * zfcp_ccw_probe - probe function of zfcp driver
  * @ccw_device: pointer to belonging ccw device
@@ -69,19 +18,16 @@ MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
  * In addition the nameserver port will be added to the ports of the adapter
  * and its sysfs representation will be created too.
  */
-static int
-zfcp_ccw_probe(struct ccw_device *ccw_device)
+static int zfcp_ccw_probe(struct ccw_device *ccw_device)
 {
-       struct zfcp_adapter *adapter;
        int retval = 0;
 
        down(&zfcp_data.config_sema);
-       adapter = zfcp_adapter_enqueue(ccw_device);
-       if (!adapter)
+       if (zfcp_adapter_enqueue(ccw_device)) {
+               dev_err(&ccw_device->dev,
+                       "Setup of data structures failed.\n");
                retval = -EINVAL;
-       else
-               ZFCP_LOG_DEBUG("Probed adapter %s\n",
-                              zfcp_get_busid_by_adapter(adapter));
+       }
        up(&zfcp_data.config_sema);
        return retval;
 }
@@ -95,8 +41,7 @@ zfcp_ccw_probe(struct ccw_device *ccw_device)
  * ports that belong to this adapter. And in addition all resources of this
  * adapter will be freed too.
  */
-static void
-zfcp_ccw_remove(struct ccw_device *ccw_device)
+static void zfcp_ccw_remove(struct ccw_device *ccw_device)
 {
        struct zfcp_adapter *adapter;
        struct zfcp_port *port, *p;
@@ -106,8 +51,6 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
        down(&zfcp_data.config_sema);
        adapter = dev_get_drvdata(&ccw_device->dev);
 
-       ZFCP_LOG_DEBUG("Removing adapter %s\n",
-                      zfcp_get_busid_by_adapter(adapter));
        write_lock_irq(&zfcp_data.config_lock);
        list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
                list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
@@ -145,8 +88,7 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
  * registered with the SCSI stack, that the QDIO queues will be set up
  * and that the adapter will be opened (asynchronously).
  */
-static int
-zfcp_ccw_set_online(struct ccw_device *ccw_device)
+static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
 {
        struct zfcp_adapter *adapter;
        int retval;
@@ -155,12 +97,8 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
        adapter = dev_get_drvdata(&ccw_device->dev);
 
        retval = zfcp_erp_thread_setup(adapter);
-       if (retval) {
-               ZFCP_LOG_INFO("error: start of error recovery thread for "
-                             "adapter %s failed\n",
-                             zfcp_get_busid_by_adapter(adapter));
+       if (retval)
                goto out;
-       }
 
        retval = zfcp_adapter_scsi_register(adapter);
        if (retval)
@@ -191,8 +129,7 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
  * This function gets called by the common i/o layer and sets an adapter
  * into state offline.
  */
-static int
-zfcp_ccw_set_offline(struct ccw_device *ccw_device)
+static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
 {
        struct zfcp_adapter *adapter;
 
@@ -206,15 +143,14 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device)
 }
 
 /**
- * zfcp_ccw_notify
+ * zfcp_ccw_notify - ccw notify function
  * @ccw_device: pointer to belonging ccw device
  * @event: indicates if adapter was detached or attached
  *
  * This function gets called by the common i/o layer if an adapter has gone
  * or reappeared.
  */
-static int
-zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
+static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
 {
        struct zfcp_adapter *adapter;
 
@@ -222,18 +158,15 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
        adapter = dev_get_drvdata(&ccw_device->dev);
        switch (event) {
        case CIO_GONE:
-               ZFCP_LOG_NORMAL("adapter %s: device gone\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&adapter->ccw_device->dev, "device gone\n");
                zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
                break;
        case CIO_NO_PATH:
-               ZFCP_LOG_NORMAL("adapter %s: no path\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&adapter->ccw_device->dev, "no path\n");
                zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
                break;
        case CIO_OPER:
-               ZFCP_LOG_NORMAL("adapter %s: operational again\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_info(&adapter->ccw_device->dev, "operational again\n");
                zfcp_erp_modify_adapter_status(adapter, 11, NULL,
                                               ZFCP_STATUS_COMMON_RUNNING,
                                               ZFCP_SET);
@@ -247,24 +180,10 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
 }
 
 /**
- * zfcp_ccw_register - ccw register function
- *
- * Registers the driver at the common i/o layer. This function will be called
- * at module load time/system start.
- */
-int __init
-zfcp_ccw_register(void)
-{
-       return ccw_driver_register(&zfcp_ccw_driver);
-}
-
-/**
- * zfcp_ccw_shutdown - gets called on reboot/shutdown
- *
- * Makes sure that QDIO queues are down when the system gets stopped.
+ * zfcp_ccw_shutdown - handle shutdown from cio
+ * @cdev: device for adapter to shutdown.
  */
-static void
-zfcp_ccw_shutdown(struct ccw_device *cdev)
+static void zfcp_ccw_shutdown(struct ccw_device *cdev)
 {
        struct zfcp_adapter *adapter;
 
@@ -275,4 +194,33 @@ zfcp_ccw_shutdown(struct ccw_device *cdev)
        up(&zfcp_data.config_sema);
 }
 
-#undef ZFCP_LOG_AREA
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+       { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
+       { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
+       {},
+};
+
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+
+static struct ccw_driver zfcp_ccw_driver = {
+       .owner       = THIS_MODULE,
+       .name        = "zfcp",
+       .ids         = zfcp_ccw_device_id,
+       .probe       = zfcp_ccw_probe,
+       .remove      = zfcp_ccw_remove,
+       .set_online  = zfcp_ccw_set_online,
+       .set_offline = zfcp_ccw_set_offline,
+       .notify      = zfcp_ccw_notify,
+       .shutdown    = zfcp_ccw_shutdown,
+};
+
+/**
+ * zfcp_ccw_register - ccw register function
+ *
+ * Registers the driver at the common i/o layer. This function will be called
+ * at module load time/system start.
+ */
+int __init zfcp_ccw_register(void)
+{
+       return ccw_driver_register(&zfcp_ccw_driver);
+}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
new file mode 100644 (file)
index 0000000..ec2abce
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * zfcp device driver
+ *
+ * Userspace interface for accessing the
+ * Access Control Lists / Control File Data Channel
+ *
+ * Copyright IBM Corporation 2008
+ */
+
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <asm/ccwdev.h>
+#include "zfcp_def.h"
+#include "zfcp_ext.h"
+#include "zfcp_fsf.h"
+
+#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL         0x00010001
+#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE          0x00010101
+#define ZFCP_CFDC_CMND_FULL_ACCESS             0x00000201
+#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS       0x00000401
+#define ZFCP_CFDC_CMND_UPLOAD                  0x00010002
+
+#define ZFCP_CFDC_DOWNLOAD                     0x00000001
+#define ZFCP_CFDC_UPLOAD                       0x00000002
+#define ZFCP_CFDC_WITH_CONTROL_FILE            0x00010000
+
+#define ZFCP_CFDC_IOC_MAGIC                     0xDD
+#define ZFCP_CFDC_IOC \
+       _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
+
+/**
+ * struct zfcp_cfdc_data - data for ioctl cfdc interface
+ * @signature: request signature
+ * @devno: FCP adapter device number
+ * @command: command code
+ * @fsf_status: returns status of FSF command to userspace
+ * @fsf_status_qual: returned to userspace
+ * @payloads: access conflicts list
+ * @control_file: access control table
+ */
+struct zfcp_cfdc_data {
+       u32 signature;
+       u32 devno;
+       u32 command;
+       u32 fsf_status;
+       u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+       u8  payloads[256];
+       u8  control_file[0];
+};
+
+static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
+                                   void __user *user_buffer)
+{
+       unsigned int length;
+       unsigned int size = ZFCP_CFDC_MAX_SIZE;
+
+       while (size) {
+               length = min((unsigned int)size, sg->length);
+               if (copy_from_user(sg_virt(sg++), user_buffer, length))
+                       return -EFAULT;
+               user_buffer += length;
+               size -= length;
+       }
+       return 0;
+}
+
+static int zfcp_cfdc_copy_to_user(void __user  *user_buffer,
+                                 struct scatterlist *sg)
+{
+       unsigned int length;
+       unsigned int size = ZFCP_CFDC_MAX_SIZE;
+
+       while (size) {
+               length = min((unsigned int) size, sg->length);
+               if (copy_to_user(user_buffer, sg_virt(sg++), length))
+                       return -EFAULT;
+               user_buffer += length;
+               size -= length;
+       }
+       return 0;
+}
+
+static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
+{
+       struct zfcp_adapter *adapter = NULL, *cur_adapter;
+       struct ccw_dev_id dev_id;
+
+       read_lock_irq(&zfcp_data.config_lock);
+       list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
+               ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
+               if (dev_id.devno == devno) {
+                       adapter = cur_adapter;
+                       zfcp_adapter_get(adapter);
+                       break;
+               }
+       }
+       read_unlock_irq(&zfcp_data.config_lock);
+       return adapter;
+}
+
+static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
+{
+       switch (command) {
+       case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
+               fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+               fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
+               break;
+       case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
+               fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+               fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
+               break;
+       case ZFCP_CFDC_CMND_FULL_ACCESS:
+               fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+               fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
+               break;
+       case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
+               fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+               fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
+               break;
+       case ZFCP_CFDC_CMND_UPLOAD:
+               fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
+               fsf_cfdc->option = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
+                             u8 __user *control_file)
+{
+       int retval;
+       retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
+       if (retval)
+               return retval;
+
+       sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
+
+       if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
+           command & ZFCP_CFDC_DOWNLOAD) {
+               retval = zfcp_cfdc_copy_from_user(sg, control_file);
+               if (retval) {
+                       zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
+                                  struct zfcp_fsf_req *req)
+{
+       data->fsf_status = req->qtcb->header.fsf_status;
+       memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
+              sizeof(union fsf_status_qual));
+       memcpy(&data->payloads, &req->qtcb->bottom.support.els,
+              sizeof(req->qtcb->bottom.support.els));
+}
+
+static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
+                               unsigned long buffer)
+{
+       struct zfcp_cfdc_data *data;
+       struct zfcp_cfdc_data __user *data_user;
+       struct zfcp_adapter *adapter;
+       struct zfcp_fsf_req *req;
+       struct zfcp_fsf_cfdc *fsf_cfdc;
+       int retval;
+
+       if (command != ZFCP_CFDC_IOC)
+               return -ENOTTY;
+
+       data_user = (void __user *) buffer;
+       if (!data_user)
+               return -EINVAL;
+
+       fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
+       if (!fsf_cfdc)
+               return -ENOMEM;
+
+       data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
+       if (!data) {
+               retval = -ENOMEM;
+               goto no_mem_sense;
+       }
+
+       retval = copy_from_user(data, data_user, sizeof(*data));
+       if (retval) {
+               retval = -EFAULT;
+               goto free_buffer;
+       }
+
+       if (data->signature != 0xCFDCACDF) {
+               retval = -EINVAL;
+               goto free_buffer;
+       }
+
+       retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
+
+       adapter = zfcp_cfdc_get_adapter(data->devno);
+       if (!adapter) {
+               retval = -ENXIO;
+               goto free_buffer;
+       }
+
+       retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
+                                   data_user->control_file);
+       if (retval)
+               goto adapter_put;
+       req = zfcp_fsf_control_file(adapter, fsf_cfdc);
+       if (IS_ERR(req)) {
+               retval = PTR_ERR(req);
+               goto free_sg;
+       }
+
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+               retval = -ENXIO;
+               goto free_fsf;
+       }
+
+       zfcp_cfdc_req_to_sense(data, req);
+       retval = copy_to_user(data_user, data, sizeof(*data_user));
+       if (retval) {
+               retval = -EFAULT;
+               goto free_fsf;
+       }
+
+       if (data->command & ZFCP_CFDC_UPLOAD)
+               retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
+                                               fsf_cfdc->sg);
+
+ free_fsf:
+       zfcp_fsf_req_free(req);
+ free_sg:
+       zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
+ adapter_put:
+       zfcp_adapter_put(adapter);
+ free_buffer:
+       kfree(data);
+ no_mem_sense:
+       kfree(fsf_cfdc);
+       return retval;
+}
+
+static const struct file_operations zfcp_cfdc_fops = {
+       .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = zfcp_cfdc_dev_ioctl
+#endif
+};
+
+struct miscdevice zfcp_cfdc_misc = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "zfcp_cfdc",
+       .fops = &zfcp_cfdc_fops,
+};
index c8bad67..36169c6 100644 (file)
@@ -1,22 +1,9 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Debug traces for zfcp.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 #include <linux/ctype.h>
@@ -29,8 +16,6 @@ module_param(dbfsize, uint, 0400);
 MODULE_PARM_DESC(dbfsize,
                 "number of pages for each debug feature area (default 4)");
 
-#define ZFCP_LOG_AREA                  ZFCP_LOG_AREA_OTHER
-
 static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
                             int level, char *from, int from_len)
 {
@@ -186,8 +171,8 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
               fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
        response->fsf_req_status = fsf_req->status;
        response->sbal_first = fsf_req->sbal_first;
-       response->sbal_curr = fsf_req->sbal_curr;
        response->sbal_last = fsf_req->sbal_last;
+       response->sbal_response = fsf_req->sbal_response;
        response->pool = fsf_req->pool != NULL;
        response->erp_action = (unsigned long)fsf_req->erp_action;
 
@@ -268,7 +253,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
        strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
        strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
 
-       rec->u.status.failed = adapter->status_read_failed;
+       rec->u.status.failed = atomic_read(&adapter->stat_miss);
        if (status_buffer != NULL) {
                rec->u.status.status_type = status_buffer->status_type;
                rec->u.status.status_subtype = status_buffer->status_subtype;
@@ -355,8 +340,8 @@ static void zfcp_hba_dbf_view_response(char **p,
                      FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
        zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
        zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
-       zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr);
        zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
+       zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
        zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
 
        switch (r->fsf_command) {
@@ -515,13 +500,13 @@ static const char *zfcp_rec_dbf_ids[] = {
        [52]    = "port boxed close unit",
        [53]    = "port boxed fcp",
        [54]    = "unit boxed fcp",
-       [55]    = "port access denied ct",
-       [56]    = "port access denied els",
-       [57]    = "port access denied open port",
-       [58]    = "port access denied close physical",
-       [59]    = "unit access denied open unit",
+       [55]    = "port access denied",
+       [56]    = "",
+       [57]    = "",
+       [58]    = "",
+       [59]    = "unit access denied",
        [60]    = "shared unit access denied open unit",
-       [61]    = "unit access denied fcp",
+       [61]    = "",
        [62]    = "request timeout",
        [63]    = "adisc link test reject or timeout",
        [64]    = "adisc link test d_id changed",
@@ -546,8 +531,8 @@ static const char *zfcp_rec_dbf_ids[] = {
        [80]    = "exclusive read-only unit access unsupported",
        [81]    = "shared read-write unit access unsupported",
        [82]    = "incoming rscn",
-       [83]    = "incoming plogi",
-       [84]    = "incoming logo",
+       [83]    = "incoming wwpn",
+       [84]    = "",
        [85]    = "online",
        [86]    = "offline",
        [87]    = "ccw device gone",
@@ -586,8 +571,8 @@ static const char *zfcp_rec_dbf_ids[] = {
        [120]   = "unknown fsf command",
        [121]   = "no recommendation for status qualifier",
        [122]   = "status read physical port closed in error",
-       [123]   = "fc service class not supported ct",
-       [124]   = "fc service class not supported els",
+       [123]   = "fc service class not supported",
+       [124]   = "",
        [125]   = "need newer zfcp",
        [126]   = "need newer microcode",
        [127]   = "arbitrated loop not supported",
@@ -595,7 +580,7 @@ static const char *zfcp_rec_dbf_ids[] = {
        [129]   = "qtcb size mismatch",
        [130]   = "unknown fsf status ecd",
        [131]   = "fcp request too big",
-       [132]   = "fc service class not supported fcp",
+       [132]   = "",
        [133]   = "data direction not valid fcp",
        [134]   = "command length not valid fcp",
        [135]   = "status read act update",
@@ -603,13 +588,18 @@ static const char *zfcp_rec_dbf_ids[] = {
        [137]   = "hbaapi port open",
        [138]   = "hbaapi unit open",
        [139]   = "hbaapi unit shutdown",
-       [140]   = "qdio error",
+       [140]   = "qdio error outbound",
        [141]   = "scsi host reset",
        [142]   = "dismissing fsf request for recovery action",
        [143]   = "recovery action timed out",
        [144]   = "recovery action gone",
        [145]   = "recovery action being processed",
        [146]   = "recovery action ready for next step",
+       [147]   = "qdio error inbound",
+       [148]   = "nameserver needed for port scan",
+       [149]   = "port scan",
+       [150]   = "ptp attach",
+       [151]   = "port validation failed",
 };
 
 static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
@@ -670,24 +660,20 @@ static struct debug_view zfcp_rec_dbf_view = {
  * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
  * @id2: identifier for event
  * @adapter: adapter
- * @lock: non-zero value indicates that erp_lock has not yet been acquired
+ * This function assumes that the caller is holding erp_lock.
  */
-void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
+void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
 {
        struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
        unsigned long flags = 0;
        struct list_head *entry;
        unsigned ready = 0, running = 0, total;
 
-       if (lock)
-               read_lock_irqsave(&adapter->erp_lock, flags);
        list_for_each(entry, &adapter->erp_ready_head)
                ready++;
        list_for_each(entry, &adapter->erp_running_head)
                running++;
        total = adapter->erp_total_count;
-       if (lock)
-               read_unlock_irqrestore(&adapter->erp_lock, flags);
 
        spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
        memset(r, 0, sizeof(*r));
@@ -696,10 +682,25 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
        r->u.thread.total = total;
        r->u.thread.ready = ready;
        r->u.thread.running = running;
-       debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
+       debug_event(adapter->rec_dbf, 6, r, sizeof(*r));
        spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
 }
 
+/**
+ * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
+ * @id2: identifier for event
+ * @adapter: adapter
+ * This function assumes that the caller does not hold erp_lock.
+ */
+void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
+{
+       unsigned long flags;
+
+       read_lock_irqsave(&adapter->erp_lock, flags);
+       zfcp_rec_dbf_event_thread(id2, adapter);
+       read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
                                      struct zfcp_adapter *adapter,
                                      atomic_t *status, atomic_t *erp_count,
@@ -823,7 +824,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
        r->u.action.status = erp_action->status;
        r->u.action.step = erp_action->step;
        r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
-       debug_event(adapter->rec_dbf, 4, r, sizeof(*r));
+       debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
        spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
 }
 
@@ -960,7 +961,7 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
 
        zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
                               fc_host_port_id(adapter->scsi_host),
-                              *(u8 *)buf->payload, (void *)buf->payload,
+                              buf->payload.data[0], (void *)buf->payload.data,
                               length);
 }
 
@@ -1064,8 +1065,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
                        if (fsf_req != NULL) {
                                fcp_rsp = (struct fcp_rsp_iu *)
                                    &(fsf_req->qtcb->bottom.io.fcp_rsp);
-                               fcp_rsp_info =
-                                   zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
+                               fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
                                fcp_sns_info =
                                    zfcp_get_fcp_sns_info_ptr(fcp_rsp);
 
@@ -1279,5 +1279,3 @@ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
        adapter->hba_dbf = NULL;
        adapter->rec_dbf = NULL;
 }
-
-#undef ZFCP_LOG_AREA
index 54c34e4..d04aea6 100644 (file)
@@ -38,7 +38,7 @@ struct zfcp_rec_dbf_record_thread {
        u32 total;
        u32 ready;
        u32 running;
-} __attribute__ ((packed));
+};
 
 struct zfcp_rec_dbf_record_target {
        u64 ref;
@@ -47,7 +47,7 @@ struct zfcp_rec_dbf_record_target {
        u64 wwpn;
        u64 fcp_lun;
        u32 erp_count;
-} __attribute__ ((packed));
+};
 
 struct zfcp_rec_dbf_record_trigger {
        u8 want;
@@ -59,14 +59,14 @@ struct zfcp_rec_dbf_record_trigger {
        u64 action;
        u64 wwpn;
        u64 fcp_lun;
-} __attribute__ ((packed));
+};
 
 struct zfcp_rec_dbf_record_action {
        u32 status;
        u32 step;
        u64 action;
        u64 fsf_req;
-} __attribute__ ((packed));
+};
 
 struct zfcp_rec_dbf_record {
        u8 id;
@@ -77,7 +77,7 @@ struct zfcp_rec_dbf_record {
                struct zfcp_rec_dbf_record_target target;
                struct zfcp_rec_dbf_record_trigger trigger;
        } u;
-} __attribute__ ((packed));
+};
 
 enum {
        ZFCP_REC_DBF_ID_ACTION,
@@ -97,8 +97,8 @@ struct zfcp_hba_dbf_record_response {
        u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
        u32 fsf_req_status;
        u8 sbal_first;
-       u8 sbal_curr;
        u8 sbal_last;
+       u8 sbal_response;
        u8 pool;
        u64 erp_action;
        union {
index bda8c77..67f45fc 100644 (file)
@@ -1,22 +1,9 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Global definitions for the zfcp device driver.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 #ifndef ZFCP_DEF_H
@@ -26,7 +13,6 @@
 
 #include <linux/init.h>
 #include <linux/moduleparam.h>
-#include <linux/miscdevice.h>
 #include <linux/major.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -53,9 +39,6 @@
 
 /********************* GENERAL DEFINES *********************************/
 
-/* zfcp version number, it consists of major, minor, and patch-level number */
-#define ZFCP_VERSION           "4.8.0"
-
 /**
  * zfcp_sg_to_address - determine kernel address from struct scatterlist
  * @list: struct scatterlist
@@ -93,11 +76,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
 #define ZFCP_DEVICE_MODEL       0x03
 #define ZFCP_DEVICE_MODEL_PRIV 0x04
 
-/* allow as many chained SBALs as are supported by hardware */
-#define ZFCP_MAX_SBALS_PER_REQ         FSF_MAX_SBALS_PER_REQ
-#define ZFCP_MAX_SBALS_PER_CT_REQ      FSF_MAX_SBALS_PER_REQ
-#define ZFCP_MAX_SBALS_PER_ELS_REQ     FSF_MAX_SBALS_PER_ELS_REQ
-
 /* DMQ bug workaround: don't use last SBALE */
 #define ZFCP_MAX_SBALES_PER_SBAL       (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
 
@@ -106,42 +84,17 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
 
 /* max. number of (data buffer) SBALEs in largest SBAL chain */
 #define ZFCP_MAX_SBALES_PER_REQ                \
-       (ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
+       (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
         /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
 
 #define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
         /* max. number of (data buffer) SBALEs in largest SBAL chain
            multiplied with number of sectors per 4k block */
 
-/* FIXME(tune): free space should be one max. SBAL chain plus what? */
-#define ZFCP_QDIO_PCI_INTERVAL         (QDIO_MAX_BUFFERS_PER_Q \
-                                         - (ZFCP_MAX_SBALS_PER_REQ + 4))
-
-#define ZFCP_SBAL_TIMEOUT               (5*HZ)
-
-#define ZFCP_TYPE2_RECOVERY_TIME        8      /* seconds */
-
-/* queue polling (values in microseconds) */
-#define ZFCP_MAX_INPUT_THRESHOLD       5000    /* FIXME: tune */
-#define ZFCP_MAX_OUTPUT_THRESHOLD      1000    /* FIXME: tune */
-#define ZFCP_MIN_INPUT_THRESHOLD       1       /* ignored by QDIO layer */
-#define ZFCP_MIN_OUTPUT_THRESHOLD      1       /* ignored by QDIO layer */
-
-#define QDIO_SCSI_QFMT                 1       /* 1 for FSF */
-#define QBUFF_PER_PAGE                 (PAGE_SIZE / sizeof(struct qdio_buffer))
-
 /********************* FSF SPECIFIC DEFINES *********************************/
 
-#define ZFCP_ULP_INFO_VERSION                   26
-#define ZFCP_QTCB_VERSION      FSF_QTCB_CURRENT_VERSION
 /* ATTENTION: value must not be used by hardware */
 #define FSF_QTCB_UNSOLICITED_STATUS            0x6305
-#define ZFCP_STATUS_READ_FAILED_THRESHOLD      3
-#define ZFCP_STATUS_READS_RECOM                        FSF_STATUS_READS_RECOM
-
-/* Do 1st retry in 1 second, then double the timeout for each following retry */
-#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP  1
-#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES      7
 
 /* timeout value for "default timer" for fsf requests */
 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
@@ -153,17 +106,9 @@ typedef unsigned long long fcp_lun_t;
 /* data length field may be at variable position in FCP-2 FCP_CMND IU */
 typedef unsigned int       fcp_dl_t;
 
-#define ZFCP_FC_SERVICE_CLASS_DEFAULT  FSF_CLASS_3
-
 /* timeout for name-server lookup (in seconds) */
 #define ZFCP_NS_GID_PN_TIMEOUT         10
 
-/* largest SCSI command we can process */
-/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */
-#define ZFCP_MAX_SCSI_CMND_LENGTH      255
-/* maximum number of commands in LUN queue (tagged queueing) */
-#define ZFCP_CMND_PER_LUN               32
-
 /* task attribute values in FCP-2 FCP_CMND IU */
 #define SIMPLE_Q       0
 #define HEAD_OF_Q      1
@@ -224,9 +169,9 @@ struct fcp_rsp_iu {
 #define RSP_CODE_TASKMAN_FAILED         5
 
 /* see fc-fs */
-#define LS_RSCN  0x61040000
-#define LS_LOGO  0x05000000
-#define LS_PLOGI 0x03000000
+#define LS_RSCN  0x61
+#define LS_LOGO  0x05
+#define LS_PLOGI 0x03
 
 struct fcp_rscn_head {
         u8  command;
@@ -266,7 +211,6 @@ struct fcp_logo {
  * FC-FS stuff
  */
 #define R_A_TOV                                10 /* seconds */
-#define ZFCP_ELS_TIMEOUT               (2 * R_A_TOV)
 
 #define ZFCP_LS_RLS                    0x0f
 #define ZFCP_LS_ADISC                  0x52
@@ -311,7 +255,10 @@ struct zfcp_rc_entry {
 #define ZFCP_CT_DIRECTORY_SERVICE      0xFC
 #define ZFCP_CT_NAME_SERVER            0x02
 #define ZFCP_CT_SYNCHRONOUS            0x00
+#define ZFCP_CT_SCSI_FCP               0x08
+#define ZFCP_CT_UNABLE_TO_PERFORM_CMD  0x09
 #define ZFCP_CT_GID_PN                 0x0121
+#define ZFCP_CT_GPN_FT                 0x0172
 #define ZFCP_CT_MAX_SIZE               0x1020
 #define ZFCP_CT_ACCEPT                 0x8002
 #define ZFCP_CT_REJECT                 0x8001
@@ -321,107 +268,6 @@ struct zfcp_rc_entry {
  */
 #define ZFCP_CT_TIMEOUT                        (3 * R_A_TOV)
 
-/******************** LOGGING MACROS AND DEFINES *****************************/
-
-/*
- * Logging may be applied on certain kinds of driver operations
- * independently. Additionally, different log-levels are supported for
- * each of these areas.
- */
-
-#define ZFCP_NAME               "zfcp"
-
-/* independent log areas */
-#define ZFCP_LOG_AREA_OTHER    0
-#define ZFCP_LOG_AREA_SCSI     1
-#define ZFCP_LOG_AREA_FSF      2
-#define ZFCP_LOG_AREA_CONFIG   3
-#define ZFCP_LOG_AREA_CIO      4
-#define ZFCP_LOG_AREA_QDIO     5
-#define ZFCP_LOG_AREA_ERP      6
-#define ZFCP_LOG_AREA_FC       7
-
-/* log level values*/
-#define ZFCP_LOG_LEVEL_NORMAL  0
-#define ZFCP_LOG_LEVEL_INFO    1
-#define ZFCP_LOG_LEVEL_DEBUG   2
-#define ZFCP_LOG_LEVEL_TRACE   3
-
-/*
- * this allows removal of logging code by the preprocessor
- * (the most detailed log level still to be compiled in is specified,
- * higher log levels are removed)
- */
-#define ZFCP_LOG_LEVEL_LIMIT   ZFCP_LOG_LEVEL_TRACE
-
-/* get "loglevel" nibble assignment */
-#define ZFCP_GET_LOG_VALUE(zfcp_lognibble) \
-              ((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF)
-
-/* set "loglevel" nibble */
-#define ZFCP_SET_LOG_NIBBLE(value, zfcp_lognibble) \
-              (value << (zfcp_lognibble << 2))
-
-/* all log-level defaults are combined to generate initial log-level */
-#define ZFCP_LOG_LEVEL_DEFAULTS \
-       (ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_OTHER) | \
-        ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_SCSI) | \
-        ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FSF) | \
-        ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CONFIG) | \
-        ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CIO) | \
-        ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_QDIO) | \
-        ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_ERP) | \
-        ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FC))
-
-/* check whether we have the right level for logging */
-#define ZFCP_LOG_CHECK(level) \
-       ((ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA)) >= level)
-
-/* logging routine for zfcp */
-#define _ZFCP_LOG(fmt, args...) \
-       printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
-              __LINE__ , ##args)
-
-#define ZFCP_LOG(level, fmt, args...) \
-do { \
-       if (ZFCP_LOG_CHECK(level)) \
-               _ZFCP_LOG(fmt, ##args); \
-} while (0)
-
-#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
-# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
-#else
-# define ZFCP_LOG_NORMAL(fmt, args...) \
-do { \
-       if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_NORMAL)) \
-               printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
-} while (0)
-#endif
-
-#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
-# define ZFCP_LOG_INFO(fmt, args...)   do { } while (0)
-#else
-# define ZFCP_LOG_INFO(fmt, args...) \
-do { \
-       if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_INFO)) \
-               printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
-} while (0)
-#endif
-
-#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
-# define ZFCP_LOG_DEBUG(fmt, args...)  do { } while (0)
-#else
-# define ZFCP_LOG_DEBUG(fmt, args...) \
-       ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
-#endif
-
-#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
-# define ZFCP_LOG_TRACE(fmt, args...)  do { } while (0)
-#else
-# define ZFCP_LOG_TRACE(fmt, args...) \
-       ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
-#endif
-
 /*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
 
 /*
@@ -441,6 +287,7 @@ do { \
 #define ZFCP_STATUS_COMMON_ERP_INUSE           0x01000000
 #define ZFCP_STATUS_COMMON_ACCESS_DENIED       0x00800000
 #define ZFCP_STATUS_COMMON_ACCESS_BOXED                0x00400000
+#define ZFCP_STATUS_COMMON_NOESC               0x00200000
 
 /* adapter status */
 #define ZFCP_STATUS_ADAPTER_QDIOUP             0x00000002
@@ -496,77 +343,6 @@ do { \
 #define ZFCP_STATUS_FSFREQ_RETRY                0x00000800
 #define ZFCP_STATUS_FSFREQ_DISMISSED            0x00001000
 
-/*********************** ERROR RECOVERY PROCEDURE DEFINES ********************/
-
-#define ZFCP_MAX_ERPS                   3
-
-#define ZFCP_ERP_FSFREQ_TIMEOUT                (30 * HZ)
-#define ZFCP_ERP_MEMWAIT_TIMEOUT       HZ
-
-#define ZFCP_STATUS_ERP_TIMEDOUT       0x10000000
-#define ZFCP_STATUS_ERP_CLOSE_ONLY     0x01000000
-#define ZFCP_STATUS_ERP_DISMISSING     0x00100000
-#define ZFCP_STATUS_ERP_DISMISSED      0x00200000
-#define ZFCP_STATUS_ERP_LOWMEM         0x00400000
-
-#define ZFCP_ERP_STEP_UNINITIALIZED    0x00000000
-#define ZFCP_ERP_STEP_FSF_XCONFIG      0x00000001
-#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING        0x00000010
-#define ZFCP_ERP_STEP_PORT_CLOSING     0x00000100
-#define ZFCP_ERP_STEP_NAMESERVER_OPEN  0x00000200
-#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP        0x00000400
-#define ZFCP_ERP_STEP_PORT_OPENING     0x00000800
-#define ZFCP_ERP_STEP_UNIT_CLOSING     0x00001000
-#define ZFCP_ERP_STEP_UNIT_OPENING     0x00002000
-
-/* Ordered by escalation level (necessary for proper erp-code operation) */
-#define ZFCP_ERP_ACTION_REOPEN_ADAPTER         0x4
-#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED     0x3
-#define ZFCP_ERP_ACTION_REOPEN_PORT            0x2
-#define ZFCP_ERP_ACTION_REOPEN_UNIT            0x1
-
-#define ZFCP_ERP_ACTION_RUNNING                        0x1
-#define ZFCP_ERP_ACTION_READY                  0x2
-
-#define ZFCP_ERP_SUCCEEDED     0x0
-#define ZFCP_ERP_FAILED                0x1
-#define ZFCP_ERP_CONTINUES     0x2
-#define ZFCP_ERP_EXIT          0x3
-#define ZFCP_ERP_DISMISSED     0x4
-#define ZFCP_ERP_NOMEM         0x5
-
-
-/******************** CFDC SPECIFIC STUFF *****************************/
-
-/* Firewall data channel sense data record */
-struct zfcp_cfdc_sense_data {
-       u32 signature;           /* Request signature */
-       u32 devno;               /* FCP adapter device number */
-       u32 command;             /* Command code */
-       u32 fsf_status;          /* FSF request status and status qualifier */
-       u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
-       u8  payloads[256];       /* Access conflicts list */
-       u8  control_file[0];     /* Access control table */
-};
-
-#define ZFCP_CFDC_SIGNATURE                    0xCFDCACDF
-
-#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL         0x00010001
-#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE          0x00010101
-#define ZFCP_CFDC_CMND_FULL_ACCESS             0x00000201
-#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS       0x00000401
-#define ZFCP_CFDC_CMND_UPLOAD                  0x00010002
-
-#define ZFCP_CFDC_DOWNLOAD                     0x00000001
-#define ZFCP_CFDC_UPLOAD                       0x00000002
-#define ZFCP_CFDC_WITH_CONTROL_FILE            0x00010000
-
-#define ZFCP_CFDC_DEV_NAME                     "zfcp_cfdc"
-#define ZFCP_CFDC_DEV_MAJOR                    MISC_MAJOR
-#define ZFCP_CFDC_DEV_MINOR                    MISC_DYNAMIC_MINOR
-
-#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE                127 * 1024
-
 /************************* STRUCTURE DEFINITIONS *****************************/
 
 struct zfcp_fsf_req;
@@ -623,7 +399,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
  * @resp_count: number of elements in response scatter-gather list
  * @handler: handler function (called for response to the request)
  * @handler_data: data passed to handler function
- * @pool: pointer to memory pool for ct request structure
  * @timeout: FSF timeout for this request
  * @completion: completion for synchronization purposes
  * @status: used to pass error status to calling function
@@ -636,7 +411,6 @@ struct zfcp_send_ct {
        unsigned int resp_count;
        zfcp_send_ct_handler_t handler;
        unsigned long handler_data;
-       mempool_t *pool;
        int timeout;
        struct completion *completion;
        int status;
@@ -685,13 +459,13 @@ struct zfcp_send_els {
 };
 
 struct zfcp_qdio_queue {
-       struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
-       u8                 free_index;        /* index of next free bfr
+       struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
+       u8                 first;             /* index of next free bfr
                                                 in queue (free_count>0) */
-       atomic_t           free_count;        /* number of free buffers
+       atomic_t           count;             /* number of free buffers
                                                 in queue */
-       rwlock_t           queue_lock;        /* lock for operations on queue */
-        int                distance_from_int; /* SBALs used since PCI indication
+       spinlock_t         lock;              /* lock for operations on queue */
+       int                pci_batch;         /* SBALs since PCI indication
                                                 was last set */
 };
 
@@ -708,6 +482,24 @@ struct zfcp_erp_action {
        struct timer_list timer;
 };
 
+struct fsf_latency_record {
+       u32 min;
+       u32 max;
+       u64 sum;
+};
+
+struct latency_cont {
+       struct fsf_latency_record channel;
+       struct fsf_latency_record fabric;
+       u64 counter;
+};
+
+struct zfcp_latencies {
+       struct latency_cont read;
+       struct latency_cont write;
+       struct latency_cont cmd;
+       spinlock_t lock;
+};
 
 struct zfcp_adapter {
        struct list_head        list;              /* list of adapters */
@@ -723,24 +515,25 @@ struct zfcp_adapter {
        u32                     adapter_features;  /* FCP channel features */
        u32                     connection_features; /* host connection features */
         u32                    hardware_version;  /* of FCP channel */
+       u16                     timer_ticks;       /* time int for a tick */
        struct Scsi_Host        *scsi_host;        /* Pointer to mid-layer */
        struct list_head        port_list_head;    /* remote port list */
        struct list_head        port_remove_lh;    /* head of ports to be
                                                      removed */
        u32                     ports;             /* number of remote ports */
-       atomic_t                reqs_active;       /* # active FSF reqs */
        unsigned long           req_no;            /* unique FSF req number */
        struct list_head        *req_list;         /* list of pending reqs */
        spinlock_t              req_list_lock;     /* request list lock */
-       struct zfcp_qdio_queue  request_queue;     /* request queue */
+       struct zfcp_qdio_queue  req_q;             /* request queue */
        u32                     fsf_req_seq_no;    /* FSF cmnd seq number */
        wait_queue_head_t       request_wq;        /* can be used to wait for
                                                      more avaliable SBALs */
-       struct zfcp_qdio_queue  response_queue;    /* response queue */
+       struct zfcp_qdio_queue  resp_q;    /* response queue */
        rwlock_t                abort_lock;        /* Protects against SCSI
                                                      stack abort/command
                                                      completion races */
-       u16                     status_read_failed; /* # failed status reads */
+       atomic_t                stat_miss;         /* # missing status reads*/
+       struct work_struct      stat_work;
        atomic_t                status;            /* status of this adapter */
        struct list_head        erp_ready_head;    /* error recovery for this
                                                      adapter/devices */
@@ -774,13 +567,9 @@ struct zfcp_adapter {
        struct fc_host_statistics *fc_stats;
        struct fsf_qtcb_bottom_port *stats_reset_data;
        unsigned long           stats_reset;
+       struct work_struct      scan_work;
 };
 
-/*
- * the struct device sysfs_device must be at the beginning of this structure.
- * pointer to struct device is used to free port structure in release function
- * of the device. don't change!
- */
 struct zfcp_port {
        struct device          sysfs_device;   /* sysfs device */
        struct fc_rport        *rport;         /* rport of fc transport class */
@@ -804,10 +593,6 @@ struct zfcp_port {
        u32                    supported_classes;
 };
 
-/* the struct device sysfs_device must be at the beginning of this structure.
- * pointer to struct device is used to free unit structure in release function
- * of the device. don't change!
- */
 struct zfcp_unit {
        struct device          sysfs_device;   /* sysfs device */
        struct list_head       list;           /* list of logical units */
@@ -822,6 +607,7 @@ struct zfcp_unit {
         struct scsi_device     *device;        /* scsi device struct pointer */
        struct zfcp_erp_action erp_action;     /* pending error recovery */
         atomic_t               erp_counter;
+       struct zfcp_latencies   latencies;
 };
 
 /* FSF request */
@@ -831,19 +617,19 @@ struct zfcp_fsf_req {
        struct zfcp_adapter    *adapter;       /* adapter request belongs to */
        u8                     sbal_number;    /* nr of SBALs free for use */
        u8                     sbal_first;     /* first SBAL for this request */
-       u8                     sbal_last;      /* last possible SBAL for
+       u8                     sbal_last;      /* last SBAL for this request */
+       u8                     sbal_limit;      /* last possible SBAL for
                                                  this reuest */
-       u8                     sbal_curr;      /* current SBAL during creation
-                                                 of request */
        u8                     sbale_curr;     /* current SBALE during creation
                                                  of request */
+       u8                      sbal_response;  /* SBAL used in interrupt */
        wait_queue_head_t      completion_wq;  /* can be used by a routine
                                                  to wait for completion */
        volatile u32           status;         /* status of this request */
        u32                    fsf_command;    /* FSF Command copy */
        struct fsf_qtcb        *qtcb;          /* address of associated QTCB */
        u32                    seq_no;         /* Sequence number of request */
-       unsigned long          data;           /* private data of request */
+       void                    *data;           /* private data of request */
        struct timer_list     timer;           /* used for erp or scsi er */
        struct zfcp_erp_action *erp_action;    /* used if this request is
                                                  issued on behalf of erp */
@@ -851,10 +637,9 @@ struct zfcp_fsf_req {
                                                  from emergency pool */
        unsigned long long     issued;         /* request sent time (STCK) */
        struct zfcp_unit       *unit;
+       void                    (*handler)(struct zfcp_fsf_req *);
 };
 
-typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
-
 /* driver data */
 struct zfcp_data {
        struct scsi_host_template scsi_host_template;
@@ -873,29 +658,11 @@ struct zfcp_data {
        char                    init_busid[BUS_ID_SIZE];
        wwn_t                   init_wwpn;
        fcp_lun_t               init_fcp_lun;
-       char                    *driver_version;
        struct kmem_cache               *fsf_req_qtcb_cache;
        struct kmem_cache               *sr_buffer_cache;
        struct kmem_cache               *gid_pn_cache;
 };
 
-/**
- * struct zfcp_sg_list - struct describing a scatter-gather list
- * @sg: pointer to array of (struct scatterlist)
- * @count: number of elements in scatter-gather list
- */
-struct zfcp_sg_list {
-       struct scatterlist *sg;
-       unsigned int count;
-};
-
-/* number of elements for various memory pools */
-#define ZFCP_POOL_FSF_REQ_ERP_NR       1
-#define ZFCP_POOL_FSF_REQ_SCSI_NR      1
-#define ZFCP_POOL_FSF_REQ_ABORT_NR     1
-#define ZFCP_POOL_STATUS_READ_NR       ZFCP_STATUS_READS_RECOM
-#define ZFCP_POOL_DATA_GID_PN_NR       1
-
 /* struct used by memory pools for fsf_requests */
 struct zfcp_fsf_req_qtcb {
        struct zfcp_fsf_req fsf_req;
@@ -905,7 +672,6 @@ struct zfcp_fsf_req_qtcb {
 /********************** ZFCP SPECIFIC DEFINES ********************************/
 
 #define ZFCP_REQ_AUTO_CLEANUP  0x00000002
-#define ZFCP_WAIT_FOR_SBAL     0x00000004
 #define ZFCP_REQ_NO_QTCB       0x00000008
 
 #define ZFCP_SET                0x00000100
@@ -916,12 +682,6 @@ struct zfcp_fsf_req_qtcb {
            ((atomic_read(target) & mask) == mask)
 #endif
 
-extern void _zfcp_hex_dump(char *, int);
-#define ZFCP_HEX_DUMP(level, addr, count) \
-               if (ZFCP_LOG_CHECK(level)) { \
-                       _zfcp_hex_dump(addr, count); \
-               }
-
 #define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
 #define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
 #define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
@@ -934,15 +694,6 @@ static inline int zfcp_reqlist_hash(unsigned long req_id)
        return req_id % REQUEST_LIST_SIZE;
 }
 
-static inline void zfcp_reqlist_add(struct zfcp_adapter *adapter,
-                                   struct zfcp_fsf_req *fsf_req)
-{
-       unsigned int idx;
-
-       idx = zfcp_reqlist_hash(fsf_req->req_id);
-       list_add_tail(&fsf_req->list, &adapter->req_list[idx]);
-}
-
 static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
                                       struct zfcp_fsf_req *fsf_req)
 {
index 8054846..643ac4b 100644 (file)
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Error Recovery Procedures (ERP).
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
-#define ZFCP_LOG_AREA                  ZFCP_LOG_AREA_ERP
-
 #include "zfcp_ext.h"
 
-static int zfcp_erp_adisc(struct zfcp_port *);
-static void zfcp_erp_adisc_handler(unsigned long);
-
-static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int, u8,
-                                           void *);
-static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int, u8,
-                                               void *);
-static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int, u8, void *);
-static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int, u8, void *);
-
-static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int, u8,
-                                            void *);
-static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int, u8,
-                                            void *);
-
-static void zfcp_erp_adapter_block(struct zfcp_adapter *, int);
-static void zfcp_erp_adapter_unblock(struct zfcp_adapter *);
-static void zfcp_erp_port_block(struct zfcp_port *, int);
-static void zfcp_erp_port_unblock(struct zfcp_port *);
-static void zfcp_erp_unit_block(struct zfcp_unit *, int);
-static void zfcp_erp_unit_unblock(struct zfcp_unit *);
-
-static int zfcp_erp_thread(void *);
-
-static int zfcp_erp_strategy(struct zfcp_erp_action *);
-
-static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *);
-static int zfcp_erp_strategy_memwait(struct zfcp_erp_action *);
-static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *, int);
-static int zfcp_erp_strategy_check_unit(struct zfcp_unit *, int);
-static int zfcp_erp_strategy_check_port(struct zfcp_port *, int);
-static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
-static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
-                                        struct zfcp_port *,
-                                        struct zfcp_unit *, int);
-static int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
-static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
-                                             struct zfcp_port *,
-                                             struct zfcp_unit *, int);
-static int zfcp_erp_strategy_check_queues(struct zfcp_adapter *);
-static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
-
-static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
-static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_open_fsf_statusread(
-       struct zfcp_erp_action *);
-
-static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *);
-static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *);
-
-static int zfcp_erp_port_strategy(struct zfcp_erp_action *);
-static int zfcp_erp_port_strategy_clearstati(struct zfcp_port *);
-static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *);
-static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *);
-static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *);
-static int zfcp_erp_port_strategy_open_nameserver_wakeup(
-       struct zfcp_erp_action *);
-static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *);
-static int zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *);
-static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *);
-
-static int zfcp_erp_unit_strategy(struct zfcp_erp_action *);
-static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
-static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
-static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
-
-static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
-static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
-static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
-static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
-
-static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
-                                  struct zfcp_port *, struct zfcp_unit *,
-                                  u8 id, void *ref);
-static int zfcp_erp_action_dequeue(struct zfcp_erp_action *);
-static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
-                                   struct zfcp_port *, struct zfcp_unit *,
-                                   int);
-
-static void zfcp_erp_action_ready(struct zfcp_erp_action *);
-static int  zfcp_erp_action_exists(struct zfcp_erp_action *);
-
-static void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
-static void zfcp_erp_action_to_running(struct zfcp_erp_action *);
-
-static void zfcp_erp_memwait_handler(unsigned long);
+#define ZFCP_MAX_ERPS                   3
 
-/**
- * zfcp_close_qdio - close qdio queues for an adapter
- */
-static void zfcp_close_qdio(struct zfcp_adapter *adapter)
-{
-       struct zfcp_qdio_queue *req_queue;
-       int first, count;
+enum zfcp_erp_act_flags {
+       ZFCP_STATUS_ERP_TIMEDOUT        = 0x10000000,
+       ZFCP_STATUS_ERP_CLOSE_ONLY      = 0x01000000,
+       ZFCP_STATUS_ERP_DISMISSING      = 0x00100000,
+       ZFCP_STATUS_ERP_DISMISSED       = 0x00200000,
+       ZFCP_STATUS_ERP_LOWMEM          = 0x00400000,
+};
 
-       if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
-               return;
+enum zfcp_erp_steps {
+       ZFCP_ERP_STEP_UNINITIALIZED     = 0x0000,
+       ZFCP_ERP_STEP_FSF_XCONFIG       = 0x0001,
+       ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
+       ZFCP_ERP_STEP_PORT_CLOSING      = 0x0100,
+       ZFCP_ERP_STEP_NAMESERVER_OPEN   = 0x0200,
+       ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
+       ZFCP_ERP_STEP_PORT_OPENING      = 0x0800,
+       ZFCP_ERP_STEP_UNIT_CLOSING      = 0x1000,
+       ZFCP_ERP_STEP_UNIT_OPENING      = 0x2000,
+};
 
-       /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
-       req_queue = &adapter->request_queue;
-       write_lock_irq(&req_queue->queue_lock);
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
-       write_unlock_irq(&req_queue->queue_lock);
-
-       while (qdio_shutdown(adapter->ccw_device,
-                            QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
-               ssleep(1);
-
-       /* cleanup used outbound sbals */
-       count = atomic_read(&req_queue->free_count);
-       if (count < QDIO_MAX_BUFFERS_PER_Q) {
-               first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q;
-               count = QDIO_MAX_BUFFERS_PER_Q - count;
-               zfcp_qdio_zero_sbals(req_queue->buffer, first, count);
-       }
-       req_queue->free_index = 0;
-       atomic_set(&req_queue->free_count, 0);
-       req_queue->distance_from_int = 0;
-       adapter->response_queue.free_index = 0;
-       atomic_set(&adapter->response_queue.free_count, 0);
+enum zfcp_erp_act_type {
+       ZFCP_ERP_ACTION_REOPEN_UNIT        = 1,
+       ZFCP_ERP_ACTION_REOPEN_PORT        = 2,
+       ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
+       ZFCP_ERP_ACTION_REOPEN_ADAPTER     = 4,
+};
+
+enum zfcp_erp_act_state {
+       ZFCP_ERP_ACTION_RUNNING = 1,
+       ZFCP_ERP_ACTION_READY   = 2,
+};
+
+enum zfcp_erp_act_result {
+       ZFCP_ERP_SUCCEEDED = 0,
+       ZFCP_ERP_FAILED    = 1,
+       ZFCP_ERP_CONTINUES = 2,
+       ZFCP_ERP_EXIT      = 3,
+       ZFCP_ERP_DISMISSED = 4,
+       ZFCP_ERP_NOMEM     = 5,
+};
+
+static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
+{
+       zfcp_erp_modify_adapter_status(adapter, 15, NULL,
+                                      ZFCP_STATUS_COMMON_UNBLOCKED | mask,
+                                      ZFCP_CLEAR);
 }
 
-/**
- * zfcp_close_fsf - stop FSF operations for an adapter
- *
- * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of
- * requests waiting for completion; especially this returns SCSI commands
- * with error state).
- */
-static void zfcp_close_fsf(struct zfcp_adapter *adapter)
+static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
 {
-       /* close queues to ensure that buffers are not accessed by adapter */
-       zfcp_close_qdio(adapter);
-       zfcp_fsf_req_dismiss_all(adapter);
-       /* reset FSF request sequence number */
-       adapter->fsf_req_seq_no = 0;
-       /* all ports and units are closed */
-       zfcp_erp_modify_adapter_status(adapter, 24, NULL,
-                                      ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
+       struct zfcp_erp_action *curr_act;
+
+       list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
+               if (act == curr_act)
+                       return ZFCP_ERP_ACTION_RUNNING;
+       return 0;
 }
 
-/**
- * zfcp_fsf_request_timeout_handler - called if a request timed out
- * @data: pointer to adapter for handler function
- *
- * This function needs to be called if requests (ELS, Generic Service,
- * or SCSI commands) exceed a certain time limit. The assumption is
- * that after the time limit the adapter get stuck. So we trigger a reopen of
- * the adapter.
- */
-static void zfcp_fsf_request_timeout_handler(unsigned long data)
+static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
 {
-       struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
-       zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
-                               NULL);
+       struct zfcp_adapter *adapter = act->adapter;
+
+       list_move(&act->list, &act->adapter->erp_ready_head);
+       zfcp_rec_dbf_event_action(146, act);
+       up(&adapter->erp_ready_sem);
+       zfcp_rec_dbf_event_thread(2, adapter);
 }
 
-void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
+static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
 {
-       fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
-       fsf_req->timer.data = (unsigned long) fsf_req->adapter;
-       fsf_req->timer.expires = jiffies + timeout;
-       add_timer(&fsf_req->timer);
+       act->status |= ZFCP_STATUS_ERP_DISMISSED;
+       if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING)
+               zfcp_erp_action_ready(act);
 }
 
-/*
- * function:
- *
- * purpose:    called if an adapter failed,
- *             initiates adapter recovery which is done
- *             asynchronously
- *
- * returns:    0       - initiated action successfully
- *             <0      - failed to initiate action
- */
-static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter,
-                                           int clear_mask, u8 id, void *ref)
+static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
 {
-       int retval;
+       if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+               zfcp_erp_action_dismiss(&unit->erp_action);
+}
 
-       ZFCP_LOG_DEBUG("reopen adapter %s\n",
-                      zfcp_get_busid_by_adapter(adapter));
+static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+{
+       struct zfcp_unit *unit;
 
-       zfcp_erp_adapter_block(adapter, clear_mask);
+       if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+               zfcp_erp_action_dismiss(&port->erp_action);
+       else
+               list_for_each_entry(unit, &port->unit_list_head, list)
+                   zfcp_erp_action_dismiss_unit(unit);
+}
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
-               ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n",
-                              zfcp_get_busid_by_adapter(adapter));
-               /* ensure propagation of failed status to new devices */
-               zfcp_erp_adapter_failed(adapter, 13, NULL);
-               retval = -EIO;
-               goto out;
-       }
-       retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
-                                        adapter, NULL, NULL, id, ref);
+static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+{
+       struct zfcp_port *port;
 
- out:
-       return retval;
+       if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+               zfcp_erp_action_dismiss(&adapter->erp_action);
+       else
+               list_for_each_entry(port, &adapter->port_list_head, list)
+                   zfcp_erp_action_dismiss_port(port);
 }
 
-/*
- * function:
- *
- * purpose:    Wrappper for zfcp_erp_adapter_reopen_internal
- *              used to ensure the correct locking
- *
- * returns:    0       - initiated action successfully
- *             <0      - failed to initiate action
- */
-int zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask,
-                           u8 id, void *ref)
+static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
+                                struct zfcp_port *port,
+                                struct zfcp_unit *unit)
 {
-       int retval;
-       unsigned long flags;
+       int need = want;
+       int u_status, p_status, a_status;
 
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       write_lock(&adapter->erp_lock);
-       retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask, id, ref);
-       write_unlock(&adapter->erp_lock);
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+       switch (want) {
+       case ZFCP_ERP_ACTION_REOPEN_UNIT:
+               u_status = atomic_read(&unit->status);
+               if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+                       return 0;
+               p_status = atomic_read(&port->status);
+               if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
+                     p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+                       return 0;
+               if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
+                       need = ZFCP_ERP_ACTION_REOPEN_PORT;
+               /* fall through */
+       case ZFCP_ERP_ACTION_REOPEN_PORT:
+       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+               p_status = atomic_read(&port->status);
+               if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+                       return 0;
+               a_status = atomic_read(&adapter->status);
+               if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
+                     a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+                       return 0;
+               if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
+                       need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
+               /* fall through */
+       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+               a_status = atomic_read(&adapter->status);
+               if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+                       return 0;
+       }
 
-       return retval;
+       return need;
 }
 
-int zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask,
-                             u8 id, void *ref)
+static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
+                                                 struct zfcp_adapter *adapter,
+                                                 struct zfcp_port *port,
+                                                 struct zfcp_unit *unit)
 {
-       int retval;
+       struct zfcp_erp_action *erp_action;
+       u32 status = 0;
 
-       retval = zfcp_erp_adapter_reopen(adapter,
-                                        ZFCP_STATUS_COMMON_RUNNING |
-                                        ZFCP_STATUS_COMMON_ERP_FAILED |
-                                        clear_mask, id, ref);
+       switch (need) {
+       case ZFCP_ERP_ACTION_REOPEN_UNIT:
+               zfcp_unit_get(unit);
+               atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
+               erp_action = &unit->erp_action;
+               if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
+                       status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+               break;
 
-       return retval;
-}
+       case ZFCP_ERP_ACTION_REOPEN_PORT:
+       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+               zfcp_port_get(port);
+               zfcp_erp_action_dismiss_port(port);
+               atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+               erp_action = &port->erp_action;
+               if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
+                       status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+               break;
 
-int zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask, u8 id,
-                          void *ref)
-{
-       int retval;
+       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+               zfcp_adapter_get(adapter);
+               zfcp_erp_action_dismiss_adapter(adapter);
+               atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+               erp_action = &adapter->erp_action;
+               if (!(atomic_read(&adapter->status) &
+                     ZFCP_STATUS_COMMON_RUNNING))
+                       status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+               break;
 
-       retval = zfcp_erp_port_reopen(port,
-                                     ZFCP_STATUS_COMMON_RUNNING |
-                                     ZFCP_STATUS_COMMON_ERP_FAILED |
-                                     clear_mask, id, ref);
+       default:
+               return NULL;
+       }
 
-       return retval;
+       memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+       erp_action->adapter = adapter;
+       erp_action->port = port;
+       erp_action->unit = unit;
+       erp_action->action = need;
+       erp_action->status = status;
+
+       return erp_action;
 }
 
-int zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask, u8 id,
-                          void *ref)
+static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+                                  struct zfcp_port *port,
+                                  struct zfcp_unit *unit, u8 id, void *ref)
 {
-       int retval;
+       int retval = 1, need;
+       struct zfcp_erp_action *act = NULL;
+
+       if (!(atomic_read(&adapter->status) &
+             ZFCP_STATUS_ADAPTER_ERP_THREAD_UP))
+               return -EIO;
 
-       retval = zfcp_erp_unit_reopen(unit,
-                                     ZFCP_STATUS_COMMON_RUNNING |
-                                     ZFCP_STATUS_COMMON_ERP_FAILED |
-                                     clear_mask, id, ref);
+       need = zfcp_erp_required_act(want, adapter, port, unit);
+       if (!need)
+               goto out;
 
+       atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+       act = zfcp_erp_setup_act(need, adapter, port, unit);
+       if (!act)
+               goto out;
+       ++adapter->erp_total_count;
+       list_add_tail(&act->list, &adapter->erp_ready_head);
+       up(&adapter->erp_ready_sem);
+       zfcp_rec_dbf_event_thread(1, adapter);
+       retval = 0;
+ out:
+       zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
+                                  adapter, port, unit);
        return retval;
 }
 
-
-/**
- * zfcp_erp_adisc - send ADISC ELS command
- * @port: port structure
- */
-static int
-zfcp_erp_adisc(struct zfcp_port *port)
+static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
+                                   int clear_mask, u8 id, void *ref)
 {
-       struct zfcp_adapter *adapter = port->adapter;
-       struct zfcp_send_els *send_els;
-       struct zfcp_ls_adisc *adisc;
-       void *address = NULL;
-       int retval = 0;
-
-       send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
-       if (send_els == NULL)
-               goto nomem;
-
-       send_els->req = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
-       if (send_els->req == NULL)
-               goto nomem;
-       sg_init_table(send_els->req, 1);
-
-       send_els->resp = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
-       if (send_els->resp == NULL)
-               goto nomem;
-       sg_init_table(send_els->resp, 1);
-
-       address = (void *) get_zeroed_page(GFP_ATOMIC);
-       if (address == NULL)
-               goto nomem;
-
-       zfcp_address_to_sg(address, send_els->req, sizeof(struct zfcp_ls_adisc));
-       address += PAGE_SIZE >> 1;
-       zfcp_address_to_sg(address, send_els->resp, sizeof(struct zfcp_ls_adisc_acc));
-       send_els->req_count = send_els->resp_count = 1;
-
-       send_els->adapter = adapter;
-       send_els->port = port;
-       send_els->d_id = port->d_id;
-       send_els->handler = zfcp_erp_adisc_handler;
-       send_els->handler_data = (unsigned long) send_els;
-
-       adisc = zfcp_sg_to_address(send_els->req);
-       send_els->ls_code = adisc->code = ZFCP_LS_ADISC;
-
-       /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
-          without FC-AL-2 capability, so we don't set it */
-       adisc->wwpn = fc_host_port_name(adapter->scsi_host);
-       adisc->wwnn = fc_host_node_name(adapter->scsi_host);
-       adisc->nport_id = fc_host_port_id(adapter->scsi_host);
-       ZFCP_LOG_INFO("ADISC request from s_id 0x%06x to d_id 0x%06x "
-                     "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
-                     "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
-                     adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
-                     (wwn_t) adisc->wwnn, adisc->hard_nport_id,
-                     adisc->nport_id);
-
-       retval = zfcp_fsf_send_els(send_els);
-       if (retval != 0) {
-               ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
-                               "0x%06x on adapter %s\n", send_els->d_id,
-                               zfcp_get_busid_by_adapter(adapter));
-               goto freemem;
-       }
+       zfcp_erp_adapter_block(adapter, clear_mask);
 
-       goto out;
-
- nomem:
-       retval = -ENOMEM;
- freemem:
-       if (address != NULL)
-               __free_pages(sg_page(send_els->req), 0);
-       if (send_els != NULL) {
-               kfree(send_els->req);
-               kfree(send_els->resp);
-               kfree(send_els);
+       /* ensure propagation of failed status to new devices */
+       if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+               zfcp_erp_adapter_failed(adapter, 13, NULL);
+               return -EIO;
        }
- out:
-       return retval;
+       return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
+                                      adapter, NULL, NULL, id, ref);
 }
 
-
 /**
- * zfcp_erp_adisc_handler - handler for ADISC ELS command
- * @data: pointer to struct zfcp_send_els
- *
- * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
+ * zfcp_erp_adapter_reopen - Reopen adapter.
+ * @adapter: Adapter to reopen.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ * @ref: Reference for debug trace event.
  */
-static void
-zfcp_erp_adisc_handler(unsigned long data)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
+                            u8 id, void *ref)
 {
-       struct zfcp_send_els *send_els;
-       struct zfcp_port *port;
-       struct zfcp_adapter *adapter;
-       u32 d_id;
-       struct zfcp_ls_adisc_acc *adisc;
-
-       send_els = (struct zfcp_send_els *) data;
-       adapter = send_els->adapter;
-       port = send_els->port;
-       d_id = send_els->d_id;
-
-       /* request rejected or timed out */
-       if (send_els->status != 0) {
-               ZFCP_LOG_NORMAL("ELS request rejected/timed out, "
-                               "force physical port reopen "
-                               "(adapter %s, port d_id=0x%06x)\n",
-                               zfcp_get_busid_by_adapter(adapter), d_id);
-               if (zfcp_erp_port_forced_reopen(port, 0, 63, NULL))
-                       ZFCP_LOG_NORMAL("failed reopen of port "
-                                       "(adapter %s, wwpn=0x%016Lx)\n",
-                                       zfcp_get_busid_by_port(port),
-                                       port->wwpn);
-               goto out;
-       }
-
-       adisc = zfcp_sg_to_address(send_els->resp);
-
-       ZFCP_LOG_INFO("ADISC response from d_id 0x%06x to s_id "
-                     "0x%06x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
-                     "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
-                     d_id, fc_host_port_id(adapter->scsi_host),
-                     (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
-                     adisc->hard_nport_id, adisc->nport_id);
-
-       /* set wwnn for port */
-       if (port->wwnn == 0)
-               port->wwnn = adisc->wwnn;
-
-       if (port->wwpn != adisc->wwpn) {
-               ZFCP_LOG_NORMAL("d_id assignment changed, reopening "
-                               "port (adapter %s, wwpn=0x%016Lx, "
-                               "adisc_resp_wwpn=0x%016Lx)\n",
-                               zfcp_get_busid_by_port(port),
-                               port->wwpn, (wwn_t) adisc->wwpn);
-               if (zfcp_erp_port_reopen(port, 0, 64, NULL))
-                       ZFCP_LOG_NORMAL("failed reopen of port "
-                                       "(adapter %s, wwpn=0x%016Lx)\n",
-                                       zfcp_get_busid_by_port(port),
-                                       port->wwpn);
-       }
+       unsigned long flags;
 
- out:
-       zfcp_port_put(port);
-       __free_pages(sg_page(send_els->req), 0);
-       kfree(send_els->req);
-       kfree(send_els->resp);
-       kfree(send_els);
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       write_lock(&adapter->erp_lock);
+       _zfcp_erp_adapter_reopen(adapter, clear, id, ref);
+       write_unlock(&adapter->erp_lock);
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
 
-
 /**
- * zfcp_test_link - lightweight link test procedure
- * @port: port to be tested
- *
- * Test status of a link to a remote port using the ELS command ADISC.
+ * zfcp_erp_adapter_shutdown - Shutdown adapter.
+ * @adapter: Adapter to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ * @ref: Reference for debug trace event.
  */
-int
-zfcp_test_link(struct zfcp_port *port)
+void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
+                              u8 id, void *ref)
 {
-       int retval;
-
-       zfcp_port_get(port);
-       retval = zfcp_erp_adisc(port);
-       if (retval != 0 && retval != -EBUSY) {
-               zfcp_port_put(port);
-               ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
-                               "on adapter %s\n ", port->wwpn,
-                               zfcp_get_busid_by_port(port));
-               retval = zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
-               if (retval != 0) {
-                       ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx "
-                                       "on adapter %s failed\n", port->wwpn,
-                                       zfcp_get_busid_by_port(port));
-                       retval = -EPERM;
-               }
-       }
-
-       return retval;
+       int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+       zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
 }
 
-
-/*
- * function:
- *
- * purpose:    called if a port failed to be opened normally
- *             initiates Forced Reopen recovery which is done
- *             asynchronously
- *
- * returns:    0       - initiated action successfully
- *             <0      - failed to initiate action
+/**
+ * zfcp_erp_port_shutdown - Shutdown port
+ * @port: Port to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ * @ref: Reference for debug trace event.
  */
-static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port,
-                                               int clear_mask, u8 id,
-                                               void *ref)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref)
 {
-       int retval;
+       int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+       zfcp_erp_port_reopen(port, clear | flags, id, ref);
+}
 
-       ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n",
-                      port->wwpn, zfcp_get_busid_by_port(port));
+/**
+ * zfcp_erp_unit_shutdown - Shutdown unit
+ * @unit: Unit to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ * @ref: Reference for debug trace event.
+ */
+void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref)
+{
+       int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+       zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
+}
 
-       zfcp_erp_port_block(port, clear_mask);
+static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
+{
+       zfcp_erp_modify_port_status(port, 17, NULL,
+                                   ZFCP_STATUS_COMMON_UNBLOCKED | clear,
+                                   ZFCP_CLEAR);
+}
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
-               ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx "
-                              "on adapter %s\n", port->wwpn,
-                              zfcp_get_busid_by_port(port));
-               retval = -EIO;
-               goto out;
-       }
+static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
+                                        int clear, u8 id, void *ref)
+{
+       zfcp_erp_port_block(port, clear);
 
-       retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
-                                        port->adapter, port, NULL, id, ref);
+       if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+               return;
 
- out:
-       return retval;
+       zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+                               port->adapter, port, NULL, id, ref);
 }
 
-/*
- * function:
- *
- * purpose:    Wrappper for zfcp_erp_port_forced_reopen_internal
- *              used to ensure the correct locking
- *
- * returns:    0       - initiated action successfully
- *             <0      - failed to initiate action
+/**
+ * zfcp_erp_port_forced_reopen - Forced close of port and open again
+ * @port: Port to force close and to reopen.
+ * @id: Id for debug trace event.
+ * @ref: Reference for debug trace event.
  */
-int zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask, u8 id,
-                               void *ref)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id,
+                                void *ref)
 {
-       int retval;
        unsigned long flags;
-       struct zfcp_adapter *adapter;
+       struct zfcp_adapter *adapter = port->adapter;
 
-       adapter = port->adapter;
        read_lock_irqsave(&zfcp_data.config_lock, flags);
        write_lock(&adapter->erp_lock);
-       retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask, id,
-                                                     ref);
+       _zfcp_erp_port_forced_reopen(port, clear, id, ref);
        write_unlock(&adapter->erp_lock);
        read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-
-       return retval;
 }
 
-/*
- * function:
- *
- * purpose:    called if a port is to be opened
- *             initiates Reopen recovery which is done
- *             asynchronously
- *
- * returns:    0       - initiated action successfully
- *             <0      - failed to initiate action
- */
-static int zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask,
-                                        u8 id, void *ref)
+static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id,
+                                void *ref)
 {
-       int retval;
-
-       ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n",
-                      port->wwpn, zfcp_get_busid_by_port(port));
+       zfcp_erp_port_block(port, clear);
 
-       zfcp_erp_port_block(port, clear_mask);
-
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
-               ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx "
-                              "on adapter %s\n", port->wwpn,
-                              zfcp_get_busid_by_port(port));
+       if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
                /* ensure propagation of failed status to new devices */
                zfcp_erp_port_failed(port, 14, NULL);
-               retval = -EIO;
-               goto out;
+               return -EIO;
        }
 
-       retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
-                                        port->adapter, port, NULL, id, ref);
-
- out:
-       return retval;
+       return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
+                                      port->adapter, port, NULL, id, ref);
 }
 
 /**
- * zfcp_erp_port_reopen - initiate reopen of a remote port
- * @port: port to be reopened
- * @clear_mask: specifies flags in port status to be cleared
- * Return: 0 on success, < 0 on error
+ * zfcp_erp_port_reopen - trigger remote port recovery
+ * @port: port to recover
+ * @clear_mask: flags in port status to be cleared
  *
- * This is a wrappper function for zfcp_erp_port_reopen_internal. It ensures
- * correct locking. An error recovery task is initiated to do the reopen.
- * To wait for the completion of the reopen zfcp_erp_wait should be used.
+ * Returns 0 if recovery has been triggered, < 0 if not.
  */
-int zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask, u8 id,
-                        void *ref)
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref)
 {
-       int retval;
        unsigned long flags;
+       int retval;
        struct zfcp_adapter *adapter = port->adapter;
 
        read_lock_irqsave(&zfcp_data.config_lock, flags);
        write_lock(&adapter->erp_lock);
-       retval = zfcp_erp_port_reopen_internal(port, clear_mask, id, ref);
+       retval = _zfcp_erp_port_reopen(port, clear, id, ref);
        write_unlock(&adapter->erp_lock);
        read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
        return retval;
 }
 
-/*
- * function:
- *
- * purpose:    called if a unit is to be opened
- *             initiates Reopen recovery which is done
- *             asynchronously
- *
- * returns:    0       - initiated action successfully
- *             <0      - failed to initiate action
- */
-static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask,
-                                        u8 id, void *ref)
+static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
 {
-       int retval;
-       struct zfcp_adapter *adapter = unit->port->adapter;
+       zfcp_erp_modify_unit_status(unit, 19, NULL,
+                                   ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
+                                   ZFCP_CLEAR);
+}
 
-       ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx "
-                      "on adapter %s\n", unit->fcp_lun,
-                      unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id,
+                                 void *ref)
+{
+       struct zfcp_adapter *adapter = unit->port->adapter;
 
-       zfcp_erp_unit_block(unit, clear_mask);
+       zfcp_erp_unit_block(unit, clear);
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
-               ZFCP_LOG_DEBUG("skipped reopen of failed unit 0x%016Lx "
-                              "on port 0x%016Lx on adapter %s\n",
-                              unit->fcp_lun, unit->port->wwpn,
-                              zfcp_get_busid_by_unit(unit));
-               retval = -EIO;
-               goto out;
-       }
+       if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+               return;
 
-       retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
-                                        adapter, unit->port, unit, id, ref);
- out:
-       return retval;
+       zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
+                               adapter, unit->port, unit, id, ref);
 }
 
 /**
@@ -643,807 +408,655 @@ static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask,
  * @unit: unit to be reopened
  * @clear_mask: specifies flags in unit status to be cleared
  * Return: 0 on success, < 0 on error
- *
- * This is a wrappper for zfcp_erp_unit_reopen_internal. It ensures correct
- * locking. An error recovery task is initiated to do the reopen.
- * To wait for the completion of the reopen zfcp_erp_wait should be used.
  */
-int zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask, u8 id,
-                        void *ref)
+void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, void *ref)
 {
-       int retval;
        unsigned long flags;
-       struct zfcp_adapter *adapter;
-       struct zfcp_port *port;
-
-       port = unit->port;
-       adapter = port->adapter;
+       struct zfcp_port *port = unit->port;
+       struct zfcp_adapter *adapter = port->adapter;
 
        read_lock_irqsave(&zfcp_data.config_lock, flags);
        write_lock(&adapter->erp_lock);
-       retval = zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref);
+       _zfcp_erp_unit_reopen(unit, clear, id, ref);
        write_unlock(&adapter->erp_lock);
        read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-
-       return retval;
 }
 
-/**
- * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
- */
-static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
+static int status_change_set(unsigned long mask, atomic_t *status)
 {
-       zfcp_erp_modify_adapter_status(adapter, 15, NULL,
-                                      ZFCP_STATUS_COMMON_UNBLOCKED |
-                                      clear_mask, ZFCP_CLEAR);
+       return (atomic_read(status) ^ mask) & mask;
 }
 
-/* FIXME: isn't really atomic */
-/*
- * returns the mask which has not been set so far, i.e.
- * 0 if no bit has been changed, !0 if some bit has been changed
- */
-static int atomic_test_and_set_mask(unsigned long mask, atomic_t *v)
+static int status_change_clear(unsigned long mask, atomic_t *status)
 {
-       int changed_bits = (atomic_read(v) /*XOR*/^ mask) & mask;
-       atomic_set_mask(mask, v);
-       return changed_bits;
+       return atomic_read(status) & mask;
 }
 
-/* FIXME: isn't really atomic */
-/*
- * returns the mask which has not been cleared so far, i.e.
- * 0 if no bit has been changed, !0 if some bit has been changed
- */
-static int atomic_test_and_clear_mask(unsigned long mask, atomic_t *v)
+static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
 {
-       int changed_bits = atomic_read(v) & mask;
-       atomic_clear_mask(mask, v);
-       return changed_bits;
+       if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
+               zfcp_rec_dbf_event_adapter(16, NULL, adapter);
+       atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
-/**
- * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
- */
-static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
+static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
-       if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                                    &adapter->status))
-               zfcp_rec_dbf_event_adapter(16, NULL, adapter);
+       if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
+               zfcp_rec_dbf_event_port(18, NULL, port);
+       atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
-/*
- * function:
- *
- * purpose:    disable I/O,
- *             return any open requests and clean them up,
- *             aim: no pending and incoming I/O
- *
- * returns:
- */
-static void
-zfcp_erp_port_block(struct zfcp_port *port, int clear_mask)
+static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
 {
-       zfcp_erp_modify_port_status(port, 17, NULL,
-                                   ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
-                                   ZFCP_CLEAR);
-}
-
-/*
- * function:
- *
- * purpose:    enable I/O
- *
- * returns:
- */
-static void
-zfcp_erp_port_unblock(struct zfcp_port *port)
-{
-       if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                                    &port->status))
-               zfcp_rec_dbf_event_port(18, NULL, port);
-}
-
-/*
- * function:
- *
- * purpose:    disable I/O,
- *             return any open requests and clean them up,
- *             aim: no pending and incoming I/O
- *
- * returns:
- */
-static void
-zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
-{
-       zfcp_erp_modify_unit_status(unit, 19, NULL,
-                                   ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
-                                   ZFCP_CLEAR);
-}
-
-/*
- * function:
- *
- * purpose:    enable I/O
- *
- * returns:
- */
-static void
-zfcp_erp_unit_unblock(struct zfcp_unit *unit)
-{
-       if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                                    &unit->status))
+       if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
                zfcp_rec_dbf_event_unit(20, NULL, unit);
+       atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
 }
 
-static void
-zfcp_erp_action_ready(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
 {
-       struct zfcp_adapter *adapter = erp_action->adapter;
-
-       zfcp_erp_action_to_ready(erp_action);
-       up(&adapter->erp_ready_sem);
-       zfcp_rec_dbf_event_thread(2, adapter, 0);
+       list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
+       zfcp_rec_dbf_event_action(145, erp_action);
 }
 
-/*
- * function:
- *
- * purpose:
- *
- * returns:    <0                      erp_action not found in any list
- *             ZFCP_ERP_ACTION_READY   erp_action is in ready list
- *             ZFCP_ERP_ACTION_RUNNING erp_action is in running list
- *
- * locks:      erp_lock must be held
- */
-static int
-zfcp_erp_action_exists(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
 {
-       int retval = -EINVAL;
-       struct list_head *entry;
-       struct zfcp_erp_action *entry_erp_action;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-
-       /* search in running list */
-       list_for_each(entry, &adapter->erp_running_head) {
-               entry_erp_action =
-                   list_entry(entry, struct zfcp_erp_action, list);
-               if (entry_erp_action == erp_action) {
-                       retval = ZFCP_ERP_ACTION_RUNNING;
-                       goto out;
-               }
-       }
-       /* search in ready list */
-       list_for_each(entry, &adapter->erp_ready_head) {
-               entry_erp_action =
-                   list_entry(entry, struct zfcp_erp_action, list);
-               if (entry_erp_action == erp_action) {
-                       retval = ZFCP_ERP_ACTION_READY;
-                       goto out;
-               }
-       }
-
- out:
-       return retval;
-}
+       struct zfcp_adapter *adapter = act->adapter;
 
-/*
- * purpose:    checks current status of action (timed out, dismissed, ...)
- *             and does appropriate preparations (dismiss fsf request, ...)
- *
- * locks:      called under erp_lock (disabled interrupts)
- */
-static void
-zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
-{
-       struct zfcp_adapter *adapter = erp_action->adapter;
+       if (!act->fsf_req)
+               return;
 
-       if (erp_action->fsf_req) {
-               /* take lock to ensure that request is not deleted meanwhile */
-               spin_lock(&adapter->req_list_lock);
-               if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) &&
-                   erp_action->fsf_req->erp_action == erp_action) {
-                       /* fsf_req still exists */
-                       /* dismiss fsf_req of timed out/dismissed erp_action */
-                       if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
-                                                 ZFCP_STATUS_ERP_TIMEDOUT)) {
-                               erp_action->fsf_req->status |=
-                                       ZFCP_STATUS_FSFREQ_DISMISSED;
-                               zfcp_rec_dbf_event_action(142, erp_action);
-                       }
-                       if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
-                               zfcp_rec_dbf_event_action(143, erp_action);
-                               ZFCP_LOG_NORMAL("error: erp step timed out "
-                                               "(action=%d, fsf_req=%p)\n ",
-                                               erp_action->action,
-                                               erp_action->fsf_req);
-                       }
-                       /*
-                        * If fsf_req is neither dismissed nor completed
-                        * then keep it running asynchronously and don't mess
-                        * with the association of erp_action and fsf_req.
-                        */
-                       if (erp_action->fsf_req->status &
-                                       (ZFCP_STATUS_FSFREQ_COMPLETED |
-                                              ZFCP_STATUS_FSFREQ_DISMISSED)) {
-                               /* forget about association between fsf_req
-                                  and erp_action */
-                               erp_action->fsf_req = NULL;
-                       }
-               } else {
-                       /*
-                        * even if this fsf_req has gone, forget about
-                        * association between erp_action and fsf_req
-                        */
-                       erp_action->fsf_req = NULL;
+       spin_lock(&adapter->req_list_lock);
+       if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
+           act->fsf_req->erp_action == act) {
+               if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
+                                  ZFCP_STATUS_ERP_TIMEDOUT)) {
+                       act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+                       zfcp_rec_dbf_event_action(142, act);
                }
-               spin_unlock(&adapter->req_list_lock);
-       }
-}
-
-/**
- * zfcp_erp_async_handler_nolock - complete erp_action
- *
- * Used for normal completion, time-out, dismissal and failure after
- * low memory condition.
- */
-static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
-                                         unsigned long set_mask)
-{
-       if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
-               erp_action->status |= set_mask;
-               zfcp_erp_action_ready(erp_action);
-       } else {
-               /* action is ready or gone - nothing to do */
-       }
+               if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
+                       zfcp_rec_dbf_event_action(143, act);
+               if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
+                                           ZFCP_STATUS_FSFREQ_DISMISSED))
+                       act->fsf_req = NULL;
+       } else
+               act->fsf_req = NULL;
+       spin_unlock(&adapter->req_list_lock);
 }
 
 /**
- * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
+ * zfcp_erp_notify - Trigger ERP action.
+ * @erp_action: ERP action to continue.
+ * @set_mask: ERP action status flags to set.
  */
-void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
-                           unsigned long set_mask)
+void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
 {
        struct zfcp_adapter *adapter = erp_action->adapter;
        unsigned long flags;
 
        write_lock_irqsave(&adapter->erp_lock, flags);
-       zfcp_erp_async_handler_nolock(erp_action, set_mask);
+       if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
+               erp_action->status |= set_mask;
+               zfcp_erp_action_ready(erp_action);
+       }
        write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-/*
- * purpose:    is called for erp_action which was slept waiting for
- *             memory becoming avaliable,
- *             will trigger that this action will be continued
+/**
+ * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
+ * @data: ERP action (from timer data)
  */
-static void
-zfcp_erp_memwait_handler(unsigned long data)
+void zfcp_erp_timeout_handler(unsigned long data)
 {
-       struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
-
-       zfcp_erp_async_handler(erp_action, 0);
+       struct zfcp_erp_action *act = (struct zfcp_erp_action *) data;
+       zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
 }
 
-/*
- * purpose:    is called if an asynchronous erp step timed out,
- *             action gets an appropriate flag and will be processed
- *             accordingly
- */
-static void zfcp_erp_timeout_handler(unsigned long data)
+static void zfcp_erp_memwait_handler(unsigned long data)
 {
-       struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
-
-       zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
+       zfcp_erp_notify((struct zfcp_erp_action *)data, 0);
 }
 
-/**
- * zfcp_erp_action_dismiss - dismiss an erp_action
- *
- * adapter->erp_lock must be held
- *
- * Dismissal of an erp_action is usually required if an erp_action of
- * higher priority is generated.
- */
-static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
 {
-       erp_action->status |= ZFCP_STATUS_ERP_DISMISSED;
-       if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING)
-               zfcp_erp_action_ready(erp_action);
+       init_timer(&erp_action->timer);
+       erp_action->timer.function = zfcp_erp_memwait_handler;
+       erp_action->timer.data = (unsigned long) erp_action;
+       erp_action->timer.expires = jiffies + HZ;
+       add_timer(&erp_action->timer);
 }
 
-int
-zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
+static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
+                                     int clear, u8 id, void *ref)
 {
-       int retval = 0;
-
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-
-       retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
-       if (retval < 0) {
-               ZFCP_LOG_NORMAL("error: creation of erp thread failed for "
-                               "adapter %s\n",
-                               zfcp_get_busid_by_adapter(adapter));
-       } else {
-               wait_event(adapter->erp_thread_wqh,
-                          atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
-                                           &adapter->status));
-       }
+       struct zfcp_port *port;
 
-       return (retval < 0);
+       list_for_each_entry(port, &adapter->port_list_head, list)
+               if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA))
+                       _zfcp_erp_port_reopen(port, clear, id, ref);
 }
 
-/*
- * function:
- *
- * purpose:
- *
- * returns:
- *
- * context:    process (i.e. proc-fs or rmmod/insmod)
- *
- * note:       The caller of this routine ensures that the specified
- *             adapter has been shut down and that this operation
- *             has been completed. Thus, there are no pending erp_actions
- *             which would need to be handled here.
- */
-int
-zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
+static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
+                                     void *ref)
 {
-       int retval = 0;
-
-       atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
-       up(&adapter->erp_ready_sem);
-       zfcp_rec_dbf_event_thread(2, adapter, 1);
-
-       wait_event(adapter->erp_thread_wqh,
-                  !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
-                                    &adapter->status));
-
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
-                         &adapter->status);
+       struct zfcp_unit *unit;
 
-       return retval;
+       list_for_each_entry(unit, &port->unit_list_head, list)
+               _zfcp_erp_unit_reopen(unit, clear, id, ref);
 }
 
-/*
- * purpose:    is run as a kernel thread,
- *             goes through list of error recovery actions of associated adapter
- *             and delegates single action to execution
- *
- * returns:    0
- */
-static int
-zfcp_erp_thread(void *data)
+static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act)
 {
-       struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
-       struct list_head *next;
-       struct zfcp_erp_action *erp_action;
-       unsigned long flags;
+       struct zfcp_adapter *adapter = act->adapter;
+       struct zfcp_port *port = act->port;
+       struct zfcp_unit *unit = act->unit;
+       u32 status = act->status;
 
-       daemonize("zfcperp%s", zfcp_get_busid_by_adapter(adapter));
-       /* Block all signals */
-       siginitsetinv(&current->blocked, 0);
-       atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-       wake_up(&adapter->erp_thread_wqh);
+       /* initiate follow-up actions depending on success of finished action */
+       switch (act->action) {
 
-       while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
-                                &adapter->status)) {
+       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+               if (status == ZFCP_ERP_SUCCEEDED)
+                       _zfcp_erp_port_reopen_all(adapter, 0, 70, NULL);
+               else
+                       _zfcp_erp_adapter_reopen(adapter, 0, 71, NULL);
+               break;
 
-               write_lock_irqsave(&adapter->erp_lock, flags);
-               next = adapter->erp_ready_head.next;
-               write_unlock_irqrestore(&adapter->erp_lock, flags);
+       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+               if (status == ZFCP_ERP_SUCCEEDED)
+                       _zfcp_erp_port_reopen(port, 0, 72, NULL);
+               else
+                       _zfcp_erp_adapter_reopen(adapter, 0, 73, NULL);
+               break;
 
-               if (next != &adapter->erp_ready_head) {
-                       erp_action =
-                           list_entry(next, struct zfcp_erp_action, list);
-                       /*
-                        * process action (incl. [re]moving it
-                        * from 'ready' queue)
-                        */
-                       zfcp_erp_strategy(erp_action);
-               }
+       case ZFCP_ERP_ACTION_REOPEN_PORT:
+               if (status == ZFCP_ERP_SUCCEEDED)
+                       _zfcp_erp_unit_reopen_all(port, 0, 74, NULL);
+               else
+                       _zfcp_erp_port_forced_reopen(port, 0, 75, NULL);
+               break;
 
-               /*
-                * sleep as long as there is nothing to do, i.e.
-                * no action in 'ready' queue to be processed and
-                * thread is not to be killed
-                */
-               zfcp_rec_dbf_event_thread(4, adapter, 1);
-               down_interruptible(&adapter->erp_ready_sem);
-               zfcp_rec_dbf_event_thread(5, adapter, 1);
+       case ZFCP_ERP_ACTION_REOPEN_UNIT:
+               if (status != ZFCP_ERP_SUCCEEDED)
+                       _zfcp_erp_port_reopen(unit->port, 0, 76, NULL);
+               break;
        }
-
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-       wake_up(&adapter->erp_thread_wqh);
-
-       return 0;
 }
 
-/*
- * function:
- *
- * purpose:    drives single error recovery action and schedules higher and
- *             subordinate actions, if necessary
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_SUCCEEDED      - action finished successfully (deqd)
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully (deqd)
- *             ZFCP_ERP_EXIT           - action finished (dequeued), offline
- *             ZFCP_ERP_DISMISSED      - action canceled (dequeued)
- */
-static int
-zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
 {
-       int retval = 0;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-       struct zfcp_port *port = erp_action->port;
-       struct zfcp_unit *unit = erp_action->unit;
-       int action = erp_action->action;
-       u32 status = erp_action->status;
        unsigned long flags;
 
-       /* serialise dismissing, timing out, moving, enqueueing */
        read_lock_irqsave(&zfcp_data.config_lock, flags);
-       write_lock(&adapter->erp_lock);
-
-       /* dequeue dismissed action and leave, if required */
-       retval = zfcp_erp_strategy_check_action(erp_action, retval);
-       if (retval == ZFCP_ERP_DISMISSED) {
-               goto unlock;
+       read_lock(&adapter->erp_lock);
+       if (list_empty(&adapter->erp_ready_head) &&
+           list_empty(&adapter->erp_running_head)) {
+                       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+                                         &adapter->status);
+                       wake_up(&adapter->erp_done_wqh);
        }
+       read_unlock(&adapter->erp_lock);
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+}
 
-       /*
-        * move action to 'running' queue before processing it
-        * (to avoid a race condition regarding moving the
-        * action to the 'running' queue and back)
-        */
-       zfcp_erp_action_to_running(erp_action);
+static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
+{
+       if (zfcp_qdio_open(act->adapter))
+               return ZFCP_ERP_FAILED;
+       init_waitqueue_head(&act->adapter->request_wq);
+       atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
+       return ZFCP_ERP_SUCCEEDED;
+}
 
-       /*
-        * try to process action as far as possible,
-        * no lock to allow for blocking operations (kmalloc, qdio, ...),
-        * afterwards the lock is required again for the following reasons:
-        * - dequeueing of finished action and enqueueing of
-        *   follow-up actions must be atomic so that any other
-        *   reopen-routine does not believe there is nothing to do
-        *   and that it is safe to enqueue something else,
-        * - we want to force any control thread which is dismissing
-        *   actions to finish this before we decide about
-        *   necessary steps to be taken here further
-        */
-       write_unlock(&adapter->erp_lock);
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-       retval = zfcp_erp_strategy_do_action(erp_action);
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       write_lock(&adapter->erp_lock);
+static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
+{
+       struct zfcp_port *port;
+       port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0,
+                                adapter->peer_d_id);
+       if (IS_ERR(port)) /* error or port already attached */
+               return;
+       _zfcp_erp_port_reopen(port, 0, 150, NULL);
+}
 
-       /*
-        * check for dismissed status again to avoid follow-up actions,
-        * failing of targets and so on for dismissed actions,
-        * we go through down() here because there has been an up()
-        */
-       if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
-               retval = ZFCP_ERP_CONTINUES;
+static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
+{
+       int retries;
+       int sleep = 1;
+       struct zfcp_adapter *adapter = erp_action->adapter;
 
-       switch (retval) {
-       case ZFCP_ERP_NOMEM:
-               /* no memory to continue immediately, let it sleep */
-               if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
-                       ++adapter->erp_low_mem_count;
-                       erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
-               }
-               /* This condition is true if there is no memory available
-                  for any erp_action on this adapter. This implies that there
-                  are no elements in the memory pool(s) left for erp_actions.
-                  This might happen if an erp_action that used a memory pool
-                  element was timed out.
-                */
-               if (adapter->erp_total_count == adapter->erp_low_mem_count) {
-                       ZFCP_LOG_NORMAL("error: no mempool elements available, "
-                                       "restarting I/O on adapter %s "
-                                       "to free mempool\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                       zfcp_erp_adapter_reopen_internal(adapter, 0, 66, NULL);
-               } else {
-               retval = zfcp_erp_strategy_memwait(erp_action);
-               }
-               goto unlock;
-       case ZFCP_ERP_CONTINUES:
-               /* leave since this action runs asynchronously */
-               if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
-                       --adapter->erp_low_mem_count;
-                       erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+
+       for (retries = 7; retries; retries--) {
+               atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+                                 &adapter->status);
+               write_lock_irq(&adapter->erp_lock);
+               zfcp_erp_action_to_running(erp_action);
+               write_unlock_irq(&adapter->erp_lock);
+               if (zfcp_fsf_exchange_config_data(erp_action)) {
+                       atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+                                         &adapter->status);
+                       return ZFCP_ERP_FAILED;
                }
-               goto unlock;
-       }
-       /* ok, finished action (whatever its result is) */
 
-       /* check for unrecoverable targets */
-       retval = zfcp_erp_strategy_check_target(erp_action, retval);
+               zfcp_rec_dbf_event_thread_lock(6, adapter);
+               down(&adapter->erp_ready_sem);
+               zfcp_rec_dbf_event_thread_lock(7, adapter);
+               if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
+                       break;
 
-       /* action must be dequeued (here to allow for further ones) */
-       zfcp_erp_action_dequeue(erp_action);
+               if (!(atomic_read(&adapter->status) &
+                     ZFCP_STATUS_ADAPTER_HOST_CON_INIT))
+                       break;
 
-       /*
-        * put this target through the erp mill again if someone has
-        * requested to change the status of a target being online
-        * to offline or the other way around
-        * (old retval is preserved if nothing has to be done here)
-        */
-       retval = zfcp_erp_strategy_statechange(action, status, adapter,
-                                              port, unit, retval);
-
-       /*
-        * leave if target is in permanent error state or if
-        * action is repeated in order to process state change
-        */
-       if (retval == ZFCP_ERP_EXIT) {
-               goto unlock;
+               ssleep(sleep);
+               sleep *= 2;
        }
 
-       /* trigger follow up actions */
-       zfcp_erp_strategy_followup_actions(action, adapter, port, unit, retval);
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+                         &adapter->status);
 
- unlock:
-       write_unlock(&adapter->erp_lock);
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+       if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
+               return ZFCP_ERP_FAILED;
 
-       if (retval != ZFCP_ERP_CONTINUES)
-               zfcp_erp_action_cleanup(action, adapter, port, unit, retval);
-
-       /*
-        * a few tasks remain when the erp queues are empty
-        * (don't do that if the last action evaluated was dismissed
-        * since this clearly indicates that there is more to come) :
-        * - close the name server port if it is open yet
-        *   (enqueues another [probably] final action)
-        * - otherwise, wake up whoever wants to be woken when we are
-        *   done with erp
-        */
-       if (retval != ZFCP_ERP_DISMISSED)
-               zfcp_erp_strategy_check_queues(adapter);
+       if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
+               zfcp_erp_enqueue_ptp_port(adapter);
 
-       return retval;
+       return ZFCP_ERP_SUCCEEDED;
 }
 
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_DISMISSED      - if action has been dismissed
- *             retval                  - otherwise
- */
-static int
-zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval)
+static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
 {
-       zfcp_erp_strategy_check_fsfreq(erp_action);
+       int ret;
+       struct zfcp_adapter *adapter = act->adapter;
 
-       if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
-               zfcp_erp_action_dequeue(erp_action);
-               retval = ZFCP_ERP_DISMISSED;
-       }
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
 
-       return retval;
+       write_lock_irq(&adapter->erp_lock);
+       zfcp_erp_action_to_running(act);
+       write_unlock_irq(&adapter->erp_lock);
+
+       ret = zfcp_fsf_exchange_port_data(act);
+       if (ret == -EOPNOTSUPP)
+               return ZFCP_ERP_SUCCEEDED;
+       if (ret)
+               return ZFCP_ERP_FAILED;
+
+       zfcp_rec_dbf_event_thread_lock(8, adapter);
+       down(&adapter->erp_ready_sem);
+       zfcp_rec_dbf_event_thread_lock(9, adapter);
+       if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
+               return ZFCP_ERP_FAILED;
+
+       return ZFCP_ERP_SUCCEEDED;
 }
 
-static int
-zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
 {
-       int retval = ZFCP_ERP_FAILED;
+       if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
+               return ZFCP_ERP_FAILED;
 
-       /*
-        * try to execute/continue action as far as possible,
-        * note: no lock in subsequent strategy routines
-        * (this allows these routine to call schedule, e.g.
-        * kmalloc with such flags or qdio_initialize & friends)
-        * Note: in case of timeout, the separate strategies will fail
-        * anyhow. No need for a special action. Even worse, a nameserver
-        * failure would not wake up waiting ports without the call.
-        */
-       switch (erp_action->action) {
+       if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
+               return ZFCP_ERP_FAILED;
 
-       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               retval = zfcp_erp_adapter_strategy(erp_action);
-               break;
+       atomic_set(&act->adapter->stat_miss, 16);
+       if (zfcp_status_read_refill(act->adapter))
+               return ZFCP_ERP_FAILED;
 
-       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-               retval = zfcp_erp_port_forced_strategy(erp_action);
-               break;
+       return ZFCP_ERP_SUCCEEDED;
+}
 
-       case ZFCP_ERP_ACTION_REOPEN_PORT:
-               retval = zfcp_erp_port_strategy(erp_action);
-               break;
+static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
+                                            int close)
+{
+       int retval = ZFCP_ERP_SUCCEEDED;
+       struct zfcp_adapter *adapter = act->adapter;
 
-       case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               retval = zfcp_erp_unit_strategy(erp_action);
-               break;
+       if (close)
+               goto close_only;
 
-       default:
-               ZFCP_LOG_NORMAL("bug: unknown erp action requested on "
-                               "adapter %s (action=%d)\n",
-                               zfcp_get_busid_by_adapter(erp_action->adapter),
-                               erp_action->action);
-       }
+       retval = zfcp_erp_adapter_strategy_open_qdio(act);
+       if (retval != ZFCP_ERP_SUCCEEDED)
+               goto failed_qdio;
+
+       retval = zfcp_erp_adapter_strategy_open_fsf(act);
+       if (retval != ZFCP_ERP_SUCCEEDED)
+               goto failed_openfcp;
+
+       atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
+       schedule_work(&act->adapter->scan_work);
+
+       return ZFCP_ERP_SUCCEEDED;
+
+ close_only:
+       atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+                         &act->adapter->status);
 
+ failed_openfcp:
+       /* close queues to ensure that buffers are not accessed by adapter */
+       zfcp_qdio_close(adapter);
+       zfcp_fsf_req_dismiss_all(adapter);
+       adapter->fsf_req_seq_no = 0;
+       /* all ports and units are closed */
+       zfcp_erp_modify_adapter_status(adapter, 24, NULL,
+                                      ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
+ failed_qdio:
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+                         ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+                         ZFCP_STATUS_ADAPTER_XPORT_OK,
+                         &act->adapter->status);
        return retval;
 }
 
-/*
- * function:
- *
- * purpose:    triggers retry of this action after a certain amount of time
- *             by means of timer provided by erp_action
- *
- * returns:    ZFCP_ERP_CONTINUES - erp_action sleeps in erp running queue
- */
-static int
-zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
 {
-       int retval = ZFCP_ERP_CONTINUES;
+       int retval;
 
-       init_timer(&erp_action->timer);
-       erp_action->timer.function = zfcp_erp_memwait_handler;
-       erp_action->timer.data = (unsigned long) erp_action;
-       erp_action->timer.expires = jiffies + ZFCP_ERP_MEMWAIT_TIMEOUT;
-       add_timer(&erp_action->timer);
+       atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
+       zfcp_erp_adapter_strategy_generic(act, 1); /* close */
+       atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
+       if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+               return ZFCP_ERP_EXIT;
+
+       atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
+       retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
+       atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
+
+       if (retval == ZFCP_ERP_FAILED)
+               ssleep(8);
 
        return retval;
 }
 
-/*
- * function:    zfcp_erp_adapter_failed
- *
- * purpose:     sets the adapter and all underlying devices to ERP_FAILED
- *
- */
-void
-zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
+static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
 {
-       zfcp_erp_modify_adapter_status(adapter, id, ref,
-                                      ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-       ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n",
-                       zfcp_get_busid_by_adapter(adapter));
+       int retval;
+
+       retval = zfcp_fsf_close_physical_port(act);
+       if (retval == -ENOMEM)
+               return ZFCP_ERP_NOMEM;
+       act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
+       if (retval)
+               return ZFCP_ERP_FAILED;
+
+       return ZFCP_ERP_CONTINUES;
 }
 
-/*
- * function:    zfcp_erp_port_failed
- *
- * purpose:     sets the port and all underlying devices to ERP_FAILED
- *
- */
-void
-zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
+static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
 {
-       zfcp_erp_modify_port_status(port, id, ref,
-                                   ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
+       atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
+                         ZFCP_STATUS_COMMON_CLOSING |
+                         ZFCP_STATUS_COMMON_ACCESS_DENIED |
+                         ZFCP_STATUS_PORT_DID_DID |
+                         ZFCP_STATUS_PORT_PHYS_CLOSING |
+                         ZFCP_STATUS_PORT_INVALID_WWPN,
+                         &port->status);
+}
 
-       if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
-               ZFCP_LOG_NORMAL("port erp failed (adapter %s, "
-                               "port d_id=0x%06x)\n",
-                               zfcp_get_busid_by_port(port), port->d_id);
-       else
-               ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
-                               zfcp_get_busid_by_port(port), port->wwpn);
+static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
+{
+       struct zfcp_port *port = erp_action->port;
+       int status = atomic_read(&port->status);
+
+       switch (erp_action->step) {
+       case ZFCP_ERP_STEP_UNINITIALIZED:
+               zfcp_erp_port_strategy_clearstati(port);
+               if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
+                   (status & ZFCP_STATUS_COMMON_OPEN))
+                       return zfcp_erp_port_forced_strategy_close(erp_action);
+               else
+                       return ZFCP_ERP_FAILED;
+
+       case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+               if (status & ZFCP_STATUS_PORT_PHYS_OPEN)
+                       return ZFCP_ERP_SUCCEEDED;
+       }
+       return ZFCP_ERP_FAILED;
 }
 
-/*
- * function:    zfcp_erp_unit_failed
- *
- * purpose:     sets the unit to ERP_FAILED
- *
- */
-void
-zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
+static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
 {
-       zfcp_erp_modify_unit_status(unit, id, ref,
-                                   ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
+       int retval;
 
-       ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx "
-                       " on adapter %s\n", unit->fcp_lun,
-                       unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+       retval = zfcp_fsf_close_port(erp_action);
+       if (retval == -ENOMEM)
+               return ZFCP_ERP_NOMEM;
+       erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
+       if (retval)
+               return ZFCP_ERP_FAILED;
+       return ZFCP_ERP_CONTINUES;
 }
 
-/*
- * function:   zfcp_erp_strategy_check_target
- *
- * purpose:    increments the erp action count on the device currently in
- *              recovery if the action failed or resets the count in case of
- *              success. If a maximum count is exceeded the device is marked
- *              as ERP_FAILED.
- *             The 'blocked' state of a target which has been recovered
- *              successfully is reset.
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (not considered)
- *             ZFCP_ERP_SUCCEEDED      - action finished successfully
- *             ZFCP_ERP_EXIT           - action failed and will not continue
- */
-static int
-zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, int result)
+static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
 {
-       struct zfcp_adapter *adapter = erp_action->adapter;
-       struct zfcp_port *port = erp_action->port;
-       struct zfcp_unit *unit = erp_action->unit;
+       int retval;
 
-       switch (erp_action->action) {
+       retval = zfcp_fsf_open_port(erp_action);
+       if (retval == -ENOMEM)
+               return ZFCP_ERP_NOMEM;
+       erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
+       if (retval)
+               return ZFCP_ERP_FAILED;
+       return ZFCP_ERP_CONTINUES;
+}
 
-       case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               result = zfcp_erp_strategy_check_unit(unit, result);
-               break;
+static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
+{
+       unsigned long flags;
+       struct zfcp_adapter *adapter = ns_act->adapter;
+       struct zfcp_erp_action *act, *tmp;
+       int status;
 
-       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-       case ZFCP_ERP_ACTION_REOPEN_PORT:
-               result = zfcp_erp_strategy_check_port(port, result);
-               break;
+       read_lock_irqsave(&adapter->erp_lock, flags);
+       list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
+               if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
+                       status = atomic_read(&adapter->nameserver_port->status);
+                       if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
+                               zfcp_erp_port_failed(act->port, 27, NULL);
+                       zfcp_erp_action_ready(act);
+               }
+       }
+       read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
 
-       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               result = zfcp_erp_strategy_check_adapter(adapter, result);
-               break;
+static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
+{
+       int retval;
+
+       switch (act->step) {
+       case ZFCP_ERP_STEP_UNINITIALIZED:
+       case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+       case ZFCP_ERP_STEP_PORT_CLOSING:
+               return zfcp_erp_port_strategy_open_port(act);
+
+       case ZFCP_ERP_STEP_PORT_OPENING:
+               if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
+                       retval = ZFCP_ERP_SUCCEEDED;
+               else
+                       retval = ZFCP_ERP_FAILED;
+               /* this is needed anyway  */
+               zfcp_erp_port_strategy_open_ns_wake(act);
+               return retval;
+
+       default:
+               return ZFCP_ERP_FAILED;
        }
+}
 
-       return result;
+static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
+{
+       int retval;
+
+       retval = zfcp_fc_ns_gid_pn_request(act);
+       if (retval == -ENOMEM)
+               return ZFCP_ERP_NOMEM;
+       act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
+       if (retval)
+               return ZFCP_ERP_FAILED;
+       return ZFCP_ERP_CONTINUES;
 }
 
-static int
-zfcp_erp_strategy_statechange(int action,
-                             u32 status,
-                             struct zfcp_adapter *adapter,
-                             struct zfcp_port *port,
-                             struct zfcp_unit *unit, int retval)
+static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
 {
-       switch (action) {
+       struct zfcp_adapter *adapter = act->adapter;
+       struct zfcp_port *port = act->port;
 
-       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               if (zfcp_erp_strategy_statechange_detected(&adapter->status,
-                                                          status)) {
-                       zfcp_erp_adapter_reopen_internal(adapter,
-                                               ZFCP_STATUS_COMMON_ERP_FAILED,
-                                               67, NULL);
-                       retval = ZFCP_ERP_EXIT;
+       if (port->wwpn != adapter->peer_wwpn) {
+               dev_err(&adapter->ccw_device->dev,
+                       "Failed to open port 0x%016Lx, "
+                       "Peer WWPN 0x%016Lx does not "
+                       "match.\n", port->wwpn,
+                       adapter->peer_wwpn);
+               zfcp_erp_port_failed(port, 25, NULL);
+               return ZFCP_ERP_FAILED;
+       }
+       port->d_id = adapter->peer_d_id;
+       atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
+       return zfcp_erp_port_strategy_open_port(act);
+}
+
+static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
+{
+       struct zfcp_adapter *adapter = act->adapter;
+       struct zfcp_port *port = act->port;
+       struct zfcp_port *ns_port = adapter->nameserver_port;
+       int p_status = atomic_read(&port->status);
+
+       switch (act->step) {
+       case ZFCP_ERP_STEP_UNINITIALIZED:
+       case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+       case ZFCP_ERP_STEP_PORT_CLOSING:
+               if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
+                       return zfcp_erp_open_ptp_port(act);
+               if (!ns_port) {
+                       dev_err(&adapter->ccw_device->dev,
+                               "Nameserver port unavailable.\n");
+                       return ZFCP_ERP_FAILED;
                }
-               break;
+               if (!(atomic_read(&ns_port->status) &
+                     ZFCP_STATUS_COMMON_UNBLOCKED)) {
+                       /* nameserver port may live again */
+                       atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
+                                       &ns_port->status);
+                       if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
+                               act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
+                               return ZFCP_ERP_CONTINUES;
+                       }
+                       return ZFCP_ERP_FAILED;
+               }
+               /* else nameserver port is already open, fall through */
+       case ZFCP_ERP_STEP_NAMESERVER_OPEN:
+               if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
+                       return ZFCP_ERP_FAILED;
+               return zfcp_erp_port_strategy_open_lookup(act);
 
-       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-       case ZFCP_ERP_ACTION_REOPEN_PORT:
-               if (zfcp_erp_strategy_statechange_detected(&port->status,
-                                                          status)) {
-                       zfcp_erp_port_reopen_internal(port,
-                                               ZFCP_STATUS_COMMON_ERP_FAILED,
-                                               68, NULL);
-                       retval = ZFCP_ERP_EXIT;
+       case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
+               if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
+                       if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
+                               zfcp_erp_port_failed(port, 26, NULL);
+                               return ZFCP_ERP_EXIT;
+                       }
+                       return ZFCP_ERP_FAILED;
                }
+               return zfcp_erp_port_strategy_open_port(act);
+
+       case ZFCP_ERP_STEP_PORT_OPENING:
+               /* D_ID might have changed during open */
+               if ((p_status & ZFCP_STATUS_COMMON_OPEN) &&
+                   (p_status & ZFCP_STATUS_PORT_DID_DID))
+                       return ZFCP_ERP_SUCCEEDED;
+               /* fall through otherwise */
+       }
+       return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
+{
+       if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
+               return zfcp_erp_port_strategy_open_nameserver(act);
+       return zfcp_erp_port_strategy_open_common(act);
+}
+
+static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
+{
+       struct zfcp_port *port = erp_action->port;
+
+       switch (erp_action->step) {
+       case ZFCP_ERP_STEP_UNINITIALIZED:
+               zfcp_erp_port_strategy_clearstati(port);
+               if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
+                       return zfcp_erp_port_strategy_close(erp_action);
                break;
 
-       case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               if (zfcp_erp_strategy_statechange_detected(&unit->status,
-                                                          status)) {
-                       zfcp_erp_unit_reopen_internal(unit,
-                                               ZFCP_STATUS_COMMON_ERP_FAILED,
-                                               69, NULL);
-                       retval = ZFCP_ERP_EXIT;
-               }
+       case ZFCP_ERP_STEP_PORT_CLOSING:
+               if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
+                       return ZFCP_ERP_FAILED;
                break;
        }
+       if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+               return ZFCP_ERP_EXIT;
+       else
+               return zfcp_erp_port_strategy_open(erp_action);
 
-       return retval;
+       return ZFCP_ERP_FAILED;
+}
+
+static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
+{
+       atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
+                         ZFCP_STATUS_COMMON_CLOSING |
+                         ZFCP_STATUS_COMMON_ACCESS_DENIED |
+                         ZFCP_STATUS_UNIT_SHARED |
+                         ZFCP_STATUS_UNIT_READONLY,
+                         &unit->status);
+}
+
+static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
+{
+       int retval = zfcp_fsf_close_unit(erp_action);
+       if (retval == -ENOMEM)
+               return ZFCP_ERP_NOMEM;
+       erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
+       if (retval)
+               return ZFCP_ERP_FAILED;
+       return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
+{
+       int retval = zfcp_fsf_open_unit(erp_action);
+       if (retval == -ENOMEM)
+               return ZFCP_ERP_NOMEM;
+       erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
+       if (retval)
+               return  ZFCP_ERP_FAILED;
+       return ZFCP_ERP_CONTINUES;
 }
 
-static int
-zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
+static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
 {
-       return
-           /* take it online */
-           (atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
-            (ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)) ||
-           /* take it offline */
-           (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
-            !(ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status));
+       struct zfcp_unit *unit = erp_action->unit;
+
+       switch (erp_action->step) {
+       case ZFCP_ERP_STEP_UNINITIALIZED:
+               zfcp_erp_unit_strategy_clearstati(unit);
+               if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
+                       return zfcp_erp_unit_strategy_close(erp_action);
+               /* already closed, fall through */
+       case ZFCP_ERP_STEP_UNIT_CLOSING:
+               if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
+                       return ZFCP_ERP_FAILED;
+               if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+                       return ZFCP_ERP_EXIT;
+               return zfcp_erp_unit_strategy_open(erp_action);
+
+       case ZFCP_ERP_STEP_UNIT_OPENING:
+               if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
+                       return ZFCP_ERP_SUCCEEDED;
+       }
+       return ZFCP_ERP_FAILED;
 }
 
-static int
-zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
+static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
 {
        switch (result) {
        case ZFCP_ERP_SUCCEEDED :
@@ -1455,1605 +1068,614 @@ zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
                if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
                        zfcp_erp_unit_failed(unit, 21, NULL);
                break;
-       case ZFCP_ERP_EXIT :
-               /* nothing */
-               break;
        }
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
-               zfcp_erp_unit_block(unit, 0); /* for ZFCP_ERP_SUCCEEDED */
+       if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+               zfcp_erp_unit_block(unit, 0);
                result = ZFCP_ERP_EXIT;
        }
-
        return result;
 }
 
-static int
-zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
+static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
 {
        switch (result) {
        case ZFCP_ERP_SUCCEEDED :
                atomic_set(&port->erp_counter, 0);
                zfcp_erp_port_unblock(port);
                break;
+
        case ZFCP_ERP_FAILED :
+               if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) {
+                       zfcp_erp_port_block(port, 0);
+                       result = ZFCP_ERP_EXIT;
+               }
                atomic_inc(&port->erp_counter);
                if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
                        zfcp_erp_port_failed(port, 22, NULL);
                break;
-       case ZFCP_ERP_EXIT :
-               /* nothing */
-               break;
        }
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
-               zfcp_erp_port_block(port, 0); /* for ZFCP_ERP_SUCCEEDED */
+       if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+               zfcp_erp_port_block(port, 0);
                result = ZFCP_ERP_EXIT;
        }
-
        return result;
 }
 
-static int
-zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result)
+static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
+                                          int result)
 {
        switch (result) {
        case ZFCP_ERP_SUCCEEDED :
                atomic_set(&adapter->erp_counter, 0);
                zfcp_erp_adapter_unblock(adapter);
                break;
+
        case ZFCP_ERP_FAILED :
                atomic_inc(&adapter->erp_counter);
                if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
                        zfcp_erp_adapter_failed(adapter, 23, NULL);
                break;
-       case ZFCP_ERP_EXIT :
-               /* nothing */
-               break;
        }
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
-               zfcp_erp_adapter_block(adapter, 0); /* for ZFCP_ERP_SUCCEEDED */
+       if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+               zfcp_erp_adapter_block(adapter, 0);
                result = ZFCP_ERP_EXIT;
        }
-
        return result;
 }
 
-struct zfcp_erp_add_work {
-       struct zfcp_unit  *unit;
-       struct work_struct work;
-};
-
-/**
- * zfcp_erp_scsi_scan
- * @data: pointer to a struct zfcp_erp_add_work
- *
- * Registers a logical unit with the SCSI stack.
- */
-static void zfcp_erp_scsi_scan(struct work_struct *work)
+static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
+                                         int result)
 {
-       struct zfcp_erp_add_work *p =
-               container_of(work, struct zfcp_erp_add_work, work);
-       struct zfcp_unit *unit = p->unit;
-       struct fc_rport *rport = unit->port->rport;
-       scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
-                        unit->scsi_lun, 0);
-       atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
-       zfcp_unit_put(unit);
-       kfree(p);
-}
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       struct zfcp_port *port = erp_action->port;
+       struct zfcp_unit *unit = erp_action->unit;
 
-/**
- * zfcp_erp_schedule_work
- * @unit: pointer to unit which should be registered with SCSI stack
- *
- * Schedules work which registers a unit with the SCSI stack
- */
-static void
-zfcp_erp_schedule_work(struct zfcp_unit *unit)
-{
-       struct zfcp_erp_add_work *p;
+       switch (erp_action->action) {
 
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
-       if (!p) {
-               ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
-                               "the FCP-LUN 0x%Lx connected to "
-                               "the port with WWPN 0x%Lx connected to "
-                               "the adapter %s with the SCSI stack.\n",
-                               unit->fcp_lun,
-                               unit->port->wwpn,
-                               zfcp_get_busid_by_unit(unit));
-               return;
-       }
+       case ZFCP_ERP_ACTION_REOPEN_UNIT:
+               result = zfcp_erp_strategy_check_unit(unit, result);
+               break;
 
-       zfcp_unit_get(unit);
-       atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
-       INIT_WORK(&p->work, zfcp_erp_scsi_scan);
-       p->unit = unit;
-       schedule_work(&p->work);
+       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+       case ZFCP_ERP_ACTION_REOPEN_PORT:
+               result = zfcp_erp_strategy_check_port(port, result);
+               break;
+
+       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+               result = zfcp_erp_strategy_check_adapter(adapter, result);
+               break;
+       }
+       return result;
 }
 
-/*
- * function:
- *
- * purpose:    remaining things in good cases,
- *             escalation in bad cases
- *
- * returns:
- */
-static int
-zfcp_erp_strategy_followup_actions(int action,
-                                  struct zfcp_adapter *adapter,
-                                  struct zfcp_port *port,
-                                  struct zfcp_unit *unit, int status)
+static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
 {
-       /* initiate follow-up actions depending on success of finished action */
-       switch (action) {
+       int status = atomic_read(target_status);
+
+       if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
+           (erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
+               return 1; /* take it online */
+
+       if (!(status & ZFCP_STATUS_COMMON_RUNNING) &&
+           !(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
+               return 1; /* take it offline */
+
+       return 0;
+}
+
+static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
+{
+       int action = act->action;
+       struct zfcp_adapter *adapter = act->adapter;
+       struct zfcp_port *port = act->port;
+       struct zfcp_unit *unit = act->unit;
+       u32 erp_status = act->status;
 
+       switch (action) {
        case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               if (status == ZFCP_ERP_SUCCEEDED)
-                       zfcp_erp_port_reopen_all_internal(adapter, 0, 70, NULL);
-               else
-                       zfcp_erp_adapter_reopen_internal(adapter, 0, 71, NULL);
+               if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
+                       _zfcp_erp_adapter_reopen(adapter,
+                                                ZFCP_STATUS_COMMON_ERP_FAILED,
+                                                67, NULL);
+                       return ZFCP_ERP_EXIT;
+               }
                break;
 
        case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-               if (status == ZFCP_ERP_SUCCEEDED)
-                       zfcp_erp_port_reopen_internal(port, 0, 72, NULL);
-               else
-                       zfcp_erp_adapter_reopen_internal(adapter, 0, 73, NULL);
-               break;
-
        case ZFCP_ERP_ACTION_REOPEN_PORT:
-               if (status == ZFCP_ERP_SUCCEEDED)
-                       zfcp_erp_unit_reopen_all_internal(port, 0, 74, NULL);
-               else
-                       zfcp_erp_port_forced_reopen_internal(port, 0, 75, NULL);
+               if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
+                       _zfcp_erp_port_reopen(port,
+                                             ZFCP_STATUS_COMMON_ERP_FAILED,
+                                             68, NULL);
+                       return ZFCP_ERP_EXIT;
+               }
                break;
 
        case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               /* Nothing to do if status == ZFCP_ERP_SUCCEEDED */
-               if (status != ZFCP_ERP_SUCCEEDED)
-                       zfcp_erp_port_reopen_internal(unit->port, 0, 76, NULL);
+               if (zfcp_erp_strat_change_det(&unit->status, erp_status)) {
+                       _zfcp_erp_unit_reopen(unit,
+                                             ZFCP_STATUS_COMMON_ERP_FAILED,
+                                             69, NULL);
+                       return ZFCP_ERP_EXIT;
+               }
                break;
        }
-
-       return 0;
+       return ret;
 }
 
-static int
-zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter)
+static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
 {
-       unsigned long flags;
+       struct zfcp_adapter *adapter = erp_action->adapter;
 
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       read_lock(&adapter->erp_lock);
-       if (list_empty(&adapter->erp_ready_head) &&
-           list_empty(&adapter->erp_running_head)) {
-                       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
-                                         &adapter->status);
-                       wake_up(&adapter->erp_done_wqh);
+       adapter->erp_total_count--;
+       if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+               adapter->erp_low_mem_count--;
+               erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
        }
-       read_unlock(&adapter->erp_lock);
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
-       return 0;
-}
+       list_del(&erp_action->list);
+       zfcp_rec_dbf_event_action(144, erp_action);
 
-/**
- * zfcp_erp_wait - wait for completion of error recovery on an adapter
- * @adapter: adapter for which to wait for completion of its error recovery
- * Return: 0
- */
-int
-zfcp_erp_wait(struct zfcp_adapter *adapter)
-{
-       int retval = 0;
+       switch (erp_action->action) {
+       case ZFCP_ERP_ACTION_REOPEN_UNIT:
+               atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+                                 &erp_action->unit->status);
+               break;
 
-       wait_event(adapter->erp_done_wqh,
-                  !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
-                                    &adapter->status));
+       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+       case ZFCP_ERP_ACTION_REOPEN_PORT:
+               atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+                                 &erp_action->port->status);
+               break;
 
-       return retval;
+       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+               atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+                                 &erp_action->adapter->status);
+               break;
+       }
 }
 
-void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
-                                   void *ref, u32 mask, int set_or_clear)
-{
-       struct zfcp_port *port;
-       u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS;
-
-       if (set_or_clear == ZFCP_SET) {
-               changed = atomic_test_and_set_mask(mask, &adapter->status);
-       } else {
-               changed = atomic_test_and_clear_mask(mask, &adapter->status);
-               if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
-                       atomic_set(&adapter->erp_counter, 0);
-       }
-       if (changed)
-               zfcp_rec_dbf_event_adapter(id, ref, adapter);
+struct zfcp_erp_add_work {
+       struct zfcp_unit  *unit;
+       struct work_struct work;
+};
 
-       /* Deal with all underlying devices, only pass common_mask */
-       if (common_mask)
-               list_for_each_entry(port, &adapter->port_list_head, list)
-                       zfcp_erp_modify_port_status(port, id, ref, common_mask,
-                                                   set_or_clear);
+static void zfcp_erp_scsi_scan(struct work_struct *work)
+{
+       struct zfcp_erp_add_work *p =
+               container_of(work, struct zfcp_erp_add_work, work);
+       struct zfcp_unit *unit = p->unit;
+       struct fc_rport *rport = unit->port->rport;
+       scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
+                        unit->scsi_lun, 0);
+       atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
+       zfcp_unit_put(unit);
+       kfree(p);
 }
 
-/*
- * function:   zfcp_erp_modify_port_status
- *
- * purpose:    sets the port and all underlying devices to ERP_FAILED
- *
- */
-void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
-                                u32 mask, int set_or_clear)
+static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
 {
-       struct zfcp_unit *unit;
-       u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS;
+       struct zfcp_erp_add_work *p;
 
-       if (set_or_clear == ZFCP_SET) {
-               changed = atomic_test_and_set_mask(mask, &port->status);
-       } else {
-               changed = atomic_test_and_clear_mask(mask, &port->status);
-               if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
-                       atomic_set(&port->erp_counter, 0);
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p) {
+               dev_err(&unit->port->adapter->ccw_device->dev,
+                       "Out of resources. Could not register unit "
+                       "0x%016Lx on port 0x%016Lx with SCSI stack.\n",
+                       unit->fcp_lun, unit->port->wwpn);
+               return;
        }
-       if (changed)
-               zfcp_rec_dbf_event_port(id, ref, port);
 
-       /* Modify status of all underlying devices, only pass common mask */
-       if (common_mask)
-               list_for_each_entry(unit, &port->unit_list_head, list)
-                       zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
-                                                   set_or_clear);
+       zfcp_unit_get(unit);
+       atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
+       INIT_WORK(&p->work, zfcp_erp_scsi_scan);
+       p->unit = unit;
+       schedule_work(&p->work);
 }
 
-/*
- * function:   zfcp_erp_modify_unit_status
- *
- * purpose:    sets the unit to ERP_FAILED
- *
- */
-void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
-                                u32 mask, int set_or_clear)
+static void zfcp_erp_rport_register(struct zfcp_port *port)
 {
-       u32 changed;
-
-       if (set_or_clear == ZFCP_SET) {
-               changed = atomic_test_and_set_mask(mask, &unit->status);
-       } else {
-               changed = atomic_test_and_clear_mask(mask, &unit->status);
-               if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
-                       atomic_set(&unit->erp_counter, 0);
-               }
+       struct fc_rport_identifiers ids;
+       ids.node_name = port->wwnn;
+       ids.port_name = port->wwpn;
+       ids.port_id = port->d_id;
+       ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+       port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+       if (!port->rport) {
+               dev_err(&port->adapter->ccw_device->dev,
+                       "Failed registration of rport "
+                       "0x%016Lx.\n", port->wwpn);
+               return;
        }
-       if (changed)
-               zfcp_rec_dbf_event_unit(id, ref, unit);
-}
-
-/*
- * function:
- *
- * purpose:    Wrappper for zfcp_erp_port_reopen_all_internal
- *              used to ensure the correct locking
- *
- * returns:    0       - initiated action successfully
- *             <0      - failed to initiate action
- */
-int zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask,
-                            u8 id, void *ref)
-{
-       int retval;
-       unsigned long flags;
-
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       write_lock(&adapter->erp_lock);
-       retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask, id,
-                                                  ref);
-       write_unlock(&adapter->erp_lock);
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
-       return retval;
+       scsi_target_unblock(&port->rport->dev);
+       port->rport->maxframe_size = port->maxframe_size;
+       port->rport->supported_classes = port->supported_classes;
 }
 
-static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter,
-                                            int clear_mask, u8 id, void *ref)
+static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
 {
-       int retval = 0;
        struct zfcp_port *port;
-
        list_for_each_entry(port, &adapter->port_list_head, list)
-               if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
-                       zfcp_erp_port_reopen_internal(port, clear_mask, id,
-                                                     ref);
-
-       return retval;
+               if (port->rport && !(atomic_read(&port->status) &
+                                       ZFCP_STATUS_PORT_WKA)) {
+                       fc_remote_port_delete(port->rport);
+                       port->rport = NULL;
+               }
 }
 
-/*
- * function:
- *
- * purpose:
- *
- * returns:    FIXME
- */
-static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port,
-                                            int clear_mask, u8 id, void *ref)
+static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
 {
-       int retval = 0;
-       struct zfcp_unit *unit;
+       struct zfcp_adapter *adapter = act->adapter;
+       struct zfcp_port *port = act->port;
+       struct zfcp_unit *unit = act->unit;
 
-       list_for_each_entry(unit, &port->unit_list_head, list)
-               zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref);
+       switch (act->action) {
+       case ZFCP_ERP_ACTION_REOPEN_UNIT:
+               if ((result == ZFCP_ERP_SUCCEEDED) &&
+                   !unit->device && port->rport) {
+                       atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
+                                       &unit->status);
+                       if (!(atomic_read(&unit->status) &
+                             ZFCP_STATUS_UNIT_SCSI_WORK_PENDING))
+                               zfcp_erp_schedule_work(unit);
+               }
+               zfcp_unit_put(unit);
+               break;
 
-       return retval;
+       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+       case ZFCP_ERP_ACTION_REOPEN_PORT:
+               if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN) {
+                       zfcp_port_put(port);
+                       return;
+               }
+               if ((result == ZFCP_ERP_SUCCEEDED) && !port->rport)
+                       zfcp_erp_rport_register(port);
+               if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) {
+                       fc_remote_port_delete(port->rport);
+                       port->rport = NULL;
+               }
+               zfcp_port_put(port);
+               break;
+
+       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+               if (result != ZFCP_ERP_SUCCEEDED)
+                       zfcp_erp_rports_del(adapter);
+               zfcp_adapter_put(adapter);
+               break;
+       }
 }
 
-/*
- * function:
- *
- * purpose:    this routine executes the 'Reopen Adapter' action
- *             (the entire action is processed synchronously, since
- *             there are no actions which might be run concurrently
- *             per definition)
- *
- * returns:    ZFCP_ERP_SUCCEEDED      - action finished successfully
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
 {
-       int retval;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-
-       retval = zfcp_erp_adapter_strategy_close(erp_action);
-       if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
-               retval = ZFCP_ERP_EXIT;
-       else
-               retval = zfcp_erp_adapter_strategy_open(erp_action);
-
-       if (retval == ZFCP_ERP_FAILED) {
-               ZFCP_LOG_INFO("Waiting to allow the adapter %s "
-                             "to recover itself\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               ssleep(ZFCP_TYPE2_RECOVERY_TIME);
+       switch (erp_action->action) {
+       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+               return zfcp_erp_adapter_strategy(erp_action);
+       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+               return zfcp_erp_port_forced_strategy(erp_action);
+       case ZFCP_ERP_ACTION_REOPEN_PORT:
+               return zfcp_erp_port_strategy(erp_action);
+       case ZFCP_ERP_ACTION_REOPEN_UNIT:
+               return zfcp_erp_unit_strategy(erp_action);
        }
-
-       return retval;
+       return ZFCP_ERP_FAILED;
 }
 
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_SUCCEEDED      - action finished successfully
- *              ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
 {
        int retval;
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       unsigned long flags;
 
-       atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING,
-                       &erp_action->adapter->status);
-       retval = zfcp_erp_adapter_strategy_generic(erp_action, 1);
-       atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING,
-                         &erp_action->adapter->status);
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       write_lock(&adapter->erp_lock);
 
-       return retval;
-}
+       zfcp_erp_strategy_check_fsfreq(erp_action);
 
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_SUCCEEDED      - action finished successfully
- *              ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *erp_action)
-{
-       int retval;
+       if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
+               zfcp_erp_action_dequeue(erp_action);
+               retval = ZFCP_ERP_DISMISSED;
+               goto unlock;
+       }
 
-       atomic_set_mask(ZFCP_STATUS_COMMON_OPENING,
-                       &erp_action->adapter->status);
-       retval = zfcp_erp_adapter_strategy_generic(erp_action, 0);
-       atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING,
-                         &erp_action->adapter->status);
+       zfcp_erp_action_to_running(erp_action);
 
-       return retval;
-}
+       /* no lock to allow for blocking operations */
+       write_unlock(&adapter->erp_lock);
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+       retval = zfcp_erp_strategy_do_action(erp_action);
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       write_lock(&adapter->erp_lock);
 
-/*
- * function:    zfcp_register_adapter
- *
- * purpose:    allocate the irq associated with this devno and register
- *             the FSF adapter with the SCSI stack
- *
- * returns:
- */
-static int
-zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
-{
-       int retval = ZFCP_ERP_SUCCEEDED;
+       if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
+               retval = ZFCP_ERP_CONTINUES;
 
-       if (close)
-               goto close_only;
-
-       retval = zfcp_erp_adapter_strategy_open_qdio(erp_action);
-       if (retval != ZFCP_ERP_SUCCEEDED)
-               goto failed_qdio;
-
-       retval = zfcp_erp_adapter_strategy_open_fsf(erp_action);
-       if (retval != ZFCP_ERP_SUCCEEDED)
-               goto failed_openfcp;
-
-       atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &erp_action->adapter->status);
-       goto out;
-
- close_only:
-       atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
-                         &erp_action->adapter->status);
-
- failed_openfcp:
-       zfcp_close_fsf(erp_action->adapter);
- failed_qdio:
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
-                         ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
-                         ZFCP_STATUS_ADAPTER_XPORT_OK,
-                         &erp_action->adapter->status);
- out:
-       return retval;
-}
-
-/*
- * function:    zfcp_qdio_init
- *
- * purpose:    setup QDIO operation for specified adapter
- *
- * returns:    0 - successful setup
- *             !0 - failed setup
- */
-static int
-zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-       int i;
-       volatile struct qdio_buffer_element *sbale;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-
-       if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
-               ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on "
-                               "adapter %s\n",
-                               zfcp_get_busid_by_adapter(adapter));
-               goto failed_sanity;
-       }
-
-       if (qdio_establish(&adapter->qdio_init_data) != 0) {
-               ZFCP_LOG_INFO("error: establishment of QDIO queues failed "
-                             "on adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               goto failed_qdio_establish;
-       }
-
-       if (qdio_activate(adapter->ccw_device, 0) != 0) {
-               ZFCP_LOG_INFO("error: activation of QDIO queues failed "
-                             "on adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               goto failed_qdio_activate;
-       }
-
-       /*
-        * put buffers into response queue,
-        */
-       for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
-               sbale = &(adapter->response_queue.buffer[i]->element[0]);
-               sbale->length = 0;
-               sbale->flags = SBAL_FLAGS_LAST_ENTRY;
-               sbale->addr = NULL;
-       }
-
-       ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
-                      "queue_no=%i, index_in_queue=%i, count=%i)\n",
-                      zfcp_get_busid_by_adapter(adapter),
-                      QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q);
-
-       retval = do_QDIO(adapter->ccw_device,
-                        QDIO_FLAG_SYNC_INPUT,
-                        0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL);
-
-       if (retval) {
-               ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n",
-                               retval);
-               goto failed_do_qdio;
-       } else {
-               adapter->response_queue.free_index = 0;
-               atomic_set(&adapter->response_queue.free_count, 0);
-               ZFCP_LOG_DEBUG("%i buffers successfully enqueued to "
-                              "response queue\n", QDIO_MAX_BUFFERS_PER_Q);
-       }
-       /* set index of first avalable SBALS / number of available SBALS */
-       adapter->request_queue.free_index = 0;
-       atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q);
-       adapter->request_queue.distance_from_int = 0;
-
-       /* initialize waitqueue used to wait for free SBALs in requests queue */
-       init_waitqueue_head(&adapter->request_wq);
-
-       /* ok, we did it - skip all cleanups for different failures */
-       atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
-       retval = ZFCP_ERP_SUCCEEDED;
-       goto out;
-
- failed_do_qdio:
-       /* NOP */
-
- failed_qdio_activate:
-       while (qdio_shutdown(adapter->ccw_device,
-                            QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
-               ssleep(1);
-
- failed_qdio_establish:
- failed_sanity:
-       retval = ZFCP_ERP_FAILED;
-
- out:
-       return retval;
-}
-
-
-static int
-zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
-       if (retval == ZFCP_ERP_FAILED)
-               return ZFCP_ERP_FAILED;
-
-       retval = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
-       if (retval == ZFCP_ERP_FAILED)
-               return ZFCP_ERP_FAILED;
-
-       return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
-}
-
-static int
-zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
-{
-       int retval = ZFCP_ERP_SUCCEEDED;
-       int retries;
-       int sleep = ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
-
-       for (retries = ZFCP_EXCHANGE_CONFIG_DATA_RETRIES; retries; retries--) {
-               atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
-                                 &adapter->status);
-               ZFCP_LOG_DEBUG("Doing exchange config data\n");
-               write_lock_irq(&adapter->erp_lock);
-               zfcp_erp_action_to_running(erp_action);
-               write_unlock_irq(&adapter->erp_lock);
-               if (zfcp_fsf_exchange_config_data(erp_action)) {
-                       retval = ZFCP_ERP_FAILED;
-                       ZFCP_LOG_INFO("error:  initiation of exchange of "
-                                     "configuration data failed for "
-                                     "adapter %s\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       break;
-               }
-               ZFCP_LOG_DEBUG("Xchange underway\n");
-
-               /*
-                * Why this works:
-                * Both the normal completion handler as well as the timeout
-                * handler will do an 'up' when the 'exchange config data'
-                * request completes or times out. Thus, the signal to go on
-                * won't be lost utilizing this semaphore.
-                * Furthermore, this 'adapter_reopen' action is
-                * guaranteed to be the only action being there (highest action
-                * which prevents other actions from being created).
-                * Resulting from that, the wake signal recognized here
-                * _must_ be the one belonging to the 'exchange config
-                * data' request.
-                */
-               zfcp_rec_dbf_event_thread(6, adapter, 1);
-               down(&adapter->erp_ready_sem);
-               zfcp_rec_dbf_event_thread(7, adapter, 1);
-               if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
-                       ZFCP_LOG_INFO("error: exchange of configuration data "
-                                     "for adapter %s timed out\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       break;
-               }
-
-               if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
-                                    &adapter->status))
-                       break;
-
-               ZFCP_LOG_DEBUG("host connection still initialising... "
-                              "waiting and retrying...\n");
-               /* sleep a little bit before retry */
-               ssleep(sleep);
-               sleep *= 2;
-       }
-
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
-                         &adapter->status);
-
-       if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
-                             &adapter->status)) {
-               ZFCP_LOG_INFO("error: exchange of configuration data for "
-                             "adapter %s failed\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               retval = ZFCP_ERP_FAILED;
-       }
-
-       return retval;
-}
-
-static int
-zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
-{
-       int ret;
-       struct zfcp_adapter *adapter;
-
-       adapter = erp_action->adapter;
-       atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
-
-       write_lock_irq(&adapter->erp_lock);
-       zfcp_erp_action_to_running(erp_action);
-       write_unlock_irq(&adapter->erp_lock);
-
-       ret = zfcp_fsf_exchange_port_data(erp_action);
-       if (ret == -EOPNOTSUPP) {
-               return ZFCP_ERP_SUCCEEDED;
-       } else if (ret) {
-               return ZFCP_ERP_FAILED;
-       }
-
-       ret = ZFCP_ERP_SUCCEEDED;
-       zfcp_rec_dbf_event_thread(8, adapter, 1);
-       down(&adapter->erp_ready_sem);
-       zfcp_rec_dbf_event_thread(9, adapter, 1);
-       if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
-               ZFCP_LOG_INFO("error: exchange port data timed out (adapter "
-                             "%s)\n", zfcp_get_busid_by_adapter(adapter));
-               ret = ZFCP_ERP_FAILED;
-       }
-
-       /* don't treat as error for the sake of compatibility */
-       if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
-               ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
-                             "%s\n", zfcp_get_busid_by_adapter(adapter));
-
-       return ret;
-}
-
-static int
-zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
-                                             *erp_action)
-{
-       int retval = ZFCP_ERP_SUCCEEDED;
-       int temp_ret;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-       int i;
-
-       adapter->status_read_failed = 0;
-       for (i = 0; i < ZFCP_STATUS_READS_RECOM; i++) {
-               temp_ret = zfcp_fsf_status_read(adapter, ZFCP_WAIT_FOR_SBAL);
-               if (temp_ret < 0) {
-                       ZFCP_LOG_INFO("error: set-up of unsolicited status "
-                                     "notification failed on adapter %s\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       retval = ZFCP_ERP_FAILED;
-                       i--;
-                       break;
-               }
-       }
-
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:    this routine executes the 'Reopen Physical Port' action
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_SUCCEEDED      - action finished successfully
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
-{
-       int retval = ZFCP_ERP_FAILED;
-       struct zfcp_port *port = erp_action->port;
-
-       switch (erp_action->step) {
-
-               /*
-                * FIXME:
-                * the ULP spec. begs for waiting for oustanding commands
-                */
-       case ZFCP_ERP_STEP_UNINITIALIZED:
-               zfcp_erp_port_strategy_clearstati(port);
-               /*
-                * it would be sufficient to test only the normal open flag
-                * since the phys. open flag cannot be set if the normal
-                * open flag is unset - however, this is for readabilty ...
-                */
-               if (atomic_test_mask((ZFCP_STATUS_PORT_PHYS_OPEN |
-                                     ZFCP_STATUS_COMMON_OPEN),
-                                    &port->status)) {
-                       ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
-                                      "close physical\n", port->wwpn);
-                       retval =
-                           zfcp_erp_port_forced_strategy_close(erp_action);
-               } else
-                       retval = ZFCP_ERP_FAILED;
-               break;
-
-       case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
-               if (atomic_test_mask(ZFCP_STATUS_PORT_PHYS_OPEN,
-                                    &port->status)) {
-                       ZFCP_LOG_DEBUG("close physical failed for port "
-                                      "0x%016Lx\n", port->wwpn);
-                       retval = ZFCP_ERP_FAILED;
-               } else
-                       retval = ZFCP_ERP_SUCCEEDED;
-               break;
-       }
-
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:    this routine executes the 'Reopen Port' action
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_SUCCEEDED      - action finished successfully
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
-{
-       int retval = ZFCP_ERP_FAILED;
-       struct zfcp_port *port = erp_action->port;
-
-       switch (erp_action->step) {
-
-               /*
-                * FIXME:
-                * the ULP spec. begs for waiting for oustanding commands
-                */
-       case ZFCP_ERP_STEP_UNINITIALIZED:
-               zfcp_erp_port_strategy_clearstati(port);
-               if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
-                       ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
-                                      "close\n", port->wwpn);
-                       retval = zfcp_erp_port_strategy_close(erp_action);
-                       goto out;
-               }               /* else it's already closed, open it */
-               break;
-
-       case ZFCP_ERP_STEP_PORT_CLOSING:
-               if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
-                       ZFCP_LOG_DEBUG("close failed for port 0x%016Lx\n",
-                                      port->wwpn);
-                       retval = ZFCP_ERP_FAILED;
-                       goto out;
-               }               /* else it's closed now, open it */
-               break;
-       }
-       if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
-               retval = ZFCP_ERP_EXIT;
-       else
-               retval = zfcp_erp_port_strategy_open(erp_action);
-
- out:
-       return retval;
-}
-
-static int
-zfcp_erp_port_strategy_open(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       if (atomic_test_mask(ZFCP_STATUS_PORT_WKA,
-                            &erp_action->port->status))
-               retval = zfcp_erp_port_strategy_open_nameserver(erp_action);
-       else
-               retval = zfcp_erp_port_strategy_open_common(erp_action);
-
-       return retval;
-}
-
-static int
-zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action)
-{
-       int retval = 0;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-       struct zfcp_port *port = erp_action->port;
-
-       switch (erp_action->step) {
-
-       case ZFCP_ERP_STEP_UNINITIALIZED:
-       case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
-       case ZFCP_ERP_STEP_PORT_CLOSING:
-               if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) {
-                       if (port->wwpn != adapter->peer_wwpn) {
-                               ZFCP_LOG_NORMAL("Failed to open port 0x%016Lx "
-                                               "on adapter %s.\nPeer WWPN "
-                                               "0x%016Lx does not match\n",
-                                               port->wwpn,
-                                               zfcp_get_busid_by_adapter(adapter),
-                                               adapter->peer_wwpn);
-                               zfcp_erp_port_failed(port, 25, NULL);
-                               retval = ZFCP_ERP_FAILED;
-                               break;
-                       }
-                       port->d_id = adapter->peer_d_id;
-                       atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
-                       retval = zfcp_erp_port_strategy_open_port(erp_action);
-                       break;
-               }
-               if (!(adapter->nameserver_port)) {
-                       retval = zfcp_nameserver_enqueue(adapter);
-                       if (retval != 0) {
-                               ZFCP_LOG_NORMAL("error: nameserver port "
-                                               "unavailable for adapter %s\n",
-                                               zfcp_get_busid_by_adapter(adapter));
-                               retval = ZFCP_ERP_FAILED;
-                               break;
-                       }
-               }
-               if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                                     &adapter->nameserver_port->status)) {
-                       ZFCP_LOG_DEBUG("nameserver port is not open -> open "
-                                      "nameserver port\n");
-                       /* nameserver port may live again */
-                       atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
-                                       &adapter->nameserver_port->status);
-                       if (zfcp_erp_port_reopen(adapter->nameserver_port, 0,
-                                                77, erp_action) >= 0) {
-                               erp_action->step =
-                                       ZFCP_ERP_STEP_NAMESERVER_OPEN;
-                               retval = ZFCP_ERP_CONTINUES;
-                       } else
-                               retval = ZFCP_ERP_FAILED;
-                       break;
-               }
-               /* else nameserver port is already open, fall through */
-       case ZFCP_ERP_STEP_NAMESERVER_OPEN:
-               if (!atomic_test_mask(ZFCP_STATUS_COMMON_OPEN,
-                                     &adapter->nameserver_port->status)) {
-                       ZFCP_LOG_DEBUG("open failed for nameserver port\n");
-                       retval = ZFCP_ERP_FAILED;
-               } else {
-                       ZFCP_LOG_DEBUG("nameserver port is open -> "
-                                      "nameserver look-up for port 0x%016Lx\n",
-                                      port->wwpn);
-                       retval = zfcp_erp_port_strategy_open_common_lookup
-                               (erp_action);
-               }
-               break;
-
-       case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
-               if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) {
-                       if (atomic_test_mask
-                           (ZFCP_STATUS_PORT_INVALID_WWPN, &port->status)) {
-                               ZFCP_LOG_DEBUG("nameserver look-up failed "
-                                              "for port 0x%016Lx "
-                                              "(misconfigured WWPN?)\n",
-                                              port->wwpn);
-                               zfcp_erp_port_failed(port, 26, NULL);
-                               retval = ZFCP_ERP_EXIT;
-                       } else {
-                               ZFCP_LOG_DEBUG("nameserver look-up failed for "
-                                              "port 0x%016Lx\n", port->wwpn);
-                               retval = ZFCP_ERP_FAILED;
-                       }
-               } else {
-                       ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> "
-                                      "trying open\n", port->wwpn, port->d_id);
-                       retval = zfcp_erp_port_strategy_open_port(erp_action);
-               }
-               break;
-
-       case ZFCP_ERP_STEP_PORT_OPENING:
-               /* D_ID might have changed during open */
-               if (atomic_test_mask((ZFCP_STATUS_COMMON_OPEN |
-                                     ZFCP_STATUS_PORT_DID_DID),
-                                    &port->status)) {
-                       ZFCP_LOG_DEBUG("port 0x%016Lx is open\n", port->wwpn);
-                       retval = ZFCP_ERP_SUCCEEDED;
-               } else {
-                       ZFCP_LOG_DEBUG("open failed for port 0x%016Lx\n",
-                                      port->wwpn);
-                       retval = ZFCP_ERP_FAILED;
-               }
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
-                               erp_action->step);
-               retval = ZFCP_ERP_FAILED;
-       }
-
-       return retval;
-}
-
-static int
-zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-       struct zfcp_port *port = erp_action->port;
-
-       switch (erp_action->step) {
-
-       case ZFCP_ERP_STEP_UNINITIALIZED:
-       case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
-       case ZFCP_ERP_STEP_PORT_CLOSING:
-               ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> trying open\n",
-                              port->wwpn, port->d_id);
-               retval = zfcp_erp_port_strategy_open_port(erp_action);
-               break;
-
-       case ZFCP_ERP_STEP_PORT_OPENING:
-               if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
-                       ZFCP_LOG_DEBUG("WKA port is open\n");
-                       retval = ZFCP_ERP_SUCCEEDED;
-               } else {
-                       ZFCP_LOG_DEBUG("open failed for WKA port\n");
-                       retval = ZFCP_ERP_FAILED;
-               }
-               /* this is needed anyway (dont care for retval of wakeup) */
-               ZFCP_LOG_DEBUG("continue other open port operations\n");
-               zfcp_erp_port_strategy_open_nameserver_wakeup(erp_action);
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
-                               erp_action->step);
-               retval = ZFCP_ERP_FAILED;
-       }
-
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:    makes the erp thread continue with reopen (physical) port
- *             actions which have been paused until the name server port
- *             is opened (or failed)
- *
- * returns:    0       (a kind of void retval, its not used)
- */
-static int
-zfcp_erp_port_strategy_open_nameserver_wakeup(struct zfcp_erp_action
-                                             *ns_erp_action)
-{
-       int retval = 0;
-       unsigned long flags;
-       struct zfcp_adapter *adapter = ns_erp_action->adapter;
-       struct zfcp_erp_action *erp_action, *tmp;
-
-       read_lock_irqsave(&adapter->erp_lock, flags);
-       list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head,
-                                list) {
-               if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
-                       if (atomic_test_mask(
-                                   ZFCP_STATUS_COMMON_ERP_FAILED,
-                                   &adapter->nameserver_port->status))
-                               zfcp_erp_port_failed(erp_action->port, 27,
-                                                    NULL);
-                       zfcp_erp_action_ready(erp_action);
-               }
-       }
-       read_unlock_irqrestore(&adapter->erp_lock, flags);
-
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       retval = zfcp_fsf_close_physical_port(erp_action);
-       if (retval == -ENOMEM) {
-               retval = ZFCP_ERP_NOMEM;
-               goto out;
-       }
-       erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
-       if (retval != 0) {
-               /* could not send 'open', fail */
-               retval = ZFCP_ERP_FAILED;
-               goto out;
-       }
-       retval = ZFCP_ERP_CONTINUES;
- out:
-       return retval;
-}
-
-static int
-zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
-{
-       int retval = 0;
-
-       atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
-                         ZFCP_STATUS_COMMON_CLOSING |
-                         ZFCP_STATUS_COMMON_ACCESS_DENIED |
-                         ZFCP_STATUS_PORT_DID_DID |
-                         ZFCP_STATUS_PORT_PHYS_CLOSING |
-                         ZFCP_STATUS_PORT_INVALID_WWPN,
-                         &port->status);
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       retval = zfcp_fsf_close_port(erp_action);
-       if (retval == -ENOMEM) {
-               retval = ZFCP_ERP_NOMEM;
-               goto out;
-       }
-       erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
-       if (retval != 0) {
-               /* could not send 'close', fail */
-               retval = ZFCP_ERP_FAILED;
-               goto out;
-       }
-       retval = ZFCP_ERP_CONTINUES;
- out:
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       retval = zfcp_fsf_open_port(erp_action);
-       if (retval == -ENOMEM) {
-               retval = ZFCP_ERP_NOMEM;
-               goto out;
-       }
-       erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
-       if (retval != 0) {
-               /* could not send 'open', fail */
-               retval = ZFCP_ERP_FAILED;
-               goto out;
-       }
-       retval = ZFCP_ERP_CONTINUES;
- out:
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       retval = zfcp_ns_gid_pn_request(erp_action);
-       if (retval == -ENOMEM) {
-               retval = ZFCP_ERP_NOMEM;
-               goto out;
-       }
-       erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
-       if (retval != 0) {
-               /* could not send nameserver request, fail */
-               retval = ZFCP_ERP_FAILED;
-               goto out;
-       }
-       retval = ZFCP_ERP_CONTINUES;
- out:
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:    this routine executes the 'Reopen Unit' action
- *             currently no retries
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_SUCCEEDED      - action finished successfully
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
-{
-       int retval = ZFCP_ERP_FAILED;
-       struct zfcp_unit *unit = erp_action->unit;
-
-       switch (erp_action->step) {
-
-               /*
-                * FIXME:
-                * the ULP spec. begs for waiting for oustanding commands
-                */
-       case ZFCP_ERP_STEP_UNINITIALIZED:
-               zfcp_erp_unit_strategy_clearstati(unit);
-               if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
-                       ZFCP_LOG_DEBUG("unit 0x%016Lx is open -> "
-                                      "trying close\n", unit->fcp_lun);
-                       retval = zfcp_erp_unit_strategy_close(erp_action);
-                       break;
+       switch (retval) {
+       case ZFCP_ERP_NOMEM:
+               if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
+                       ++adapter->erp_low_mem_count;
+                       erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
                }
-               /* else it's already closed, fall through */
-       case ZFCP_ERP_STEP_UNIT_CLOSING:
-               if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
-                       ZFCP_LOG_DEBUG("close failed for unit 0x%016Lx\n",
-                                      unit->fcp_lun);
-                       retval = ZFCP_ERP_FAILED;
-               } else {
-                       if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
-                               retval = ZFCP_ERP_EXIT;
-                       else {
-                               ZFCP_LOG_DEBUG("unit 0x%016Lx is not open -> "
-                                              "trying open\n", unit->fcp_lun);
-                               retval =
-                                   zfcp_erp_unit_strategy_open(erp_action);
-                       }
+               if (adapter->erp_total_count == adapter->erp_low_mem_count)
+                       _zfcp_erp_adapter_reopen(adapter, 0, 66, NULL);
+               else {
+                       zfcp_erp_strategy_memwait(erp_action);
+                       retval = ZFCP_ERP_CONTINUES;
                }
-               break;
+               goto unlock;
 
-       case ZFCP_ERP_STEP_UNIT_OPENING:
-               if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
-                       ZFCP_LOG_DEBUG("unit 0x%016Lx is open\n",
-                                      unit->fcp_lun);
-                       retval = ZFCP_ERP_SUCCEEDED;
-               } else {
-                       ZFCP_LOG_DEBUG("open failed for unit 0x%016Lx\n",
-                                      unit->fcp_lun);
-                       retval = ZFCP_ERP_FAILED;
+       case ZFCP_ERP_CONTINUES:
+               if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+                       --adapter->erp_low_mem_count;
+                       erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
                }
-               break;
-       }
-
-       return retval;
-}
-
-static int
-zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
-{
-       int retval = 0;
-
-       atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
-                         ZFCP_STATUS_COMMON_CLOSING |
-                         ZFCP_STATUS_COMMON_ACCESS_DENIED |
-                         ZFCP_STATUS_UNIT_SHARED |
-                         ZFCP_STATUS_UNIT_READONLY,
-                         &unit->status);
-
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       retval = zfcp_fsf_close_unit(erp_action);
-       if (retval == -ENOMEM) {
-               retval = ZFCP_ERP_NOMEM;
-               goto out;
-       }
-       erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
-       if (retval != 0) {
-               /* could not send 'close', fail */
-               retval = ZFCP_ERP_FAILED;
-               goto out;
-       }
-       retval = ZFCP_ERP_CONTINUES;
-
- out:
-       return retval;
-}
-
-/*
- * function:
- *
- * purpose:
- *
- * returns:    ZFCP_ERP_CONTINUES      - action continues (asynchronously)
- *             ZFCP_ERP_FAILED         - action finished unsuccessfully
- */
-static int
-zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
-{
-       int retval;
-
-       retval = zfcp_fsf_open_unit(erp_action);
-       if (retval == -ENOMEM) {
-               retval = ZFCP_ERP_NOMEM;
-               goto out;
-       }
-       erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
-       if (retval != 0) {
-               /* could not send 'open', fail */
-               retval = ZFCP_ERP_FAILED;
-               goto out;
+               goto unlock;
        }
-       retval = ZFCP_ERP_CONTINUES;
- out:
-       return retval;
-}
-
-void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req)
-{
-       BUG_ON(!fsf_req->erp_action);
-       fsf_req->timer.function = zfcp_erp_timeout_handler;
-       fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
-       fsf_req->timer.expires = jiffies + ZFCP_ERP_FSFREQ_TIMEOUT;
-       add_timer(&fsf_req->timer);
-}
-
-/*
- * function:
- *
- * purpose:    enqueue the specified error recovery action, if needed
- *
- * returns:
- */
-static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
-                                  struct zfcp_port *port,
-                                  struct zfcp_unit *unit, u8 id, void *ref)
-{
-       int retval = 1, need = want;
-       struct zfcp_erp_action *erp_action = NULL;
-       u32 status = 0;
-
-       /*
-        * We need some rules here which check whether we really need
-        * this action or whether we should just drop it.
-        * E.g. if there is a unfinished 'Reopen Port' request then we drop a
-        * 'Reopen Unit' request for an associated unit since we can't
-        * satisfy this request now. A 'Reopen Port' action will trigger
-        * 'Reopen Unit' actions when it completes.
-        * Thus, there are only actions in the queue which can immediately be
-        * executed. This makes the processing of the action queue more
-        * efficient.
-        */
-
-       if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
-                             &adapter->status))
-               return -EIO;
-
-       /* check whether we really need this */
-       switch (want) {
-       case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               if (atomic_test_mask
-                   (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) {
-                       goto out;
-               }
-               if (!atomic_test_mask
-                   (ZFCP_STATUS_COMMON_RUNNING, &port->status) ||
-                   atomic_test_mask
-                   (ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
-                       goto out;
-               }
-               if (!atomic_test_mask
-                   (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
-                       need = ZFCP_ERP_ACTION_REOPEN_PORT;
-               /* fall through !!! */
 
-       case ZFCP_ERP_ACTION_REOPEN_PORT:
-               if (atomic_test_mask
-                   (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
-                       goto out;
-               }
-               /* fall through !!! */
+       retval = zfcp_erp_strategy_check_target(erp_action, retval);
+       zfcp_erp_action_dequeue(erp_action);
+       retval = zfcp_erp_strategy_statechange(erp_action, retval);
+       if (retval == ZFCP_ERP_EXIT)
+               goto unlock;
+       zfcp_erp_strategy_followup_actions(erp_action);
 
-       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-               if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
-                                    &port->status)) {
-                       if (port->erp_action.action !=
-                           ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
-                               ZFCP_LOG_INFO("dropped erp action %i (port "
-                                             "0x%016Lx, action in use: %i)\n",
-                                             want, port->wwpn,
-                                             port->erp_action.action);
-                       }
-                       goto out;
-               }
-               if (!atomic_test_mask
-                   (ZFCP_STATUS_COMMON_RUNNING, &adapter->status) ||
-                   atomic_test_mask
-                   (ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
-                       goto out;
-               }
-               if (!atomic_test_mask
-                   (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
-                       need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
-               /* fall through !!! */
+ unlock:
+       write_unlock(&adapter->erp_lock);
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
-       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               if (atomic_test_mask
-                   (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) {
-                       goto out;
-               }
-               break;
+       if (retval != ZFCP_ERP_CONTINUES)
+               zfcp_erp_action_cleanup(erp_action, retval);
 
-       default:
-               ZFCP_LOG_NORMAL("bug: unknown erp action requested "
-                               "on adapter %s (action=%d)\n",
-                               zfcp_get_busid_by_adapter(adapter), want);
-               goto out;
-       }
+       return retval;
+}
 
-       /* check whether we need something stronger first */
-       if (need) {
-               ZFCP_LOG_DEBUG("stronger erp action %d needed before "
-                              "erp action %d on adapter %s\n",
-                              need, want, zfcp_get_busid_by_adapter(adapter));
-       }
+static int zfcp_erp_thread(void *data)
+{
+       struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+       struct list_head *next;
+       struct zfcp_erp_action *act;
+       unsigned long flags;
 
-       /* mark adapter to have some error recovery pending */
-       atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+       daemonize("zfcperp%s", adapter->ccw_device->dev.bus_id);
+       /* Block all signals */
+       siginitsetinv(&current->blocked, 0);
+       atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
+       wake_up(&adapter->erp_thread_wqh);
 
-       /* setup error recovery action */
-       switch (need) {
+       while (!(atomic_read(&adapter->status) &
+                ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {
+               write_lock_irqsave(&adapter->erp_lock, flags);
+               next = adapter->erp_ready_head.next;
+               write_unlock_irqrestore(&adapter->erp_lock, flags);
 
-       case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               zfcp_unit_get(unit);
-               atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
-               erp_action = &unit->erp_action;
-               if (!atomic_test_mask
-                   (ZFCP_STATUS_COMMON_RUNNING, &unit->status))
-                       status = ZFCP_STATUS_ERP_CLOSE_ONLY;
-               break;
+               if (next != &adapter->erp_ready_head) {
+                       act = list_entry(next, struct zfcp_erp_action, list);
 
-       case ZFCP_ERP_ACTION_REOPEN_PORT:
-       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-               zfcp_port_get(port);
-               zfcp_erp_action_dismiss_port(port);
-               atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
-               erp_action = &port->erp_action;
-               if (!atomic_test_mask
-                   (ZFCP_STATUS_COMMON_RUNNING, &port->status))
-                       status = ZFCP_STATUS_ERP_CLOSE_ONLY;
-               break;
+                       /* there is more to come after dismission, no notify */
+                       if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
+                               zfcp_erp_wakeup(adapter);
+               }
 
-       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               zfcp_adapter_get(adapter);
-               zfcp_erp_action_dismiss_adapter(adapter);
-               atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
-               erp_action = &adapter->erp_action;
-               if (!atomic_test_mask
-                   (ZFCP_STATUS_COMMON_RUNNING, &adapter->status))
-                       status = ZFCP_STATUS_ERP_CLOSE_ONLY;
-               break;
+               zfcp_rec_dbf_event_thread(4, adapter);
+               down_interruptible(&adapter->erp_ready_sem);
+               zfcp_rec_dbf_event_thread(5, adapter);
        }
 
-       memset(erp_action, 0, sizeof (struct zfcp_erp_action));
-       erp_action->adapter = adapter;
-       erp_action->port = port;
-       erp_action->unit = unit;
-       erp_action->action = need;
-       erp_action->status = status;
-
-       ++adapter->erp_total_count;
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
+       wake_up(&adapter->erp_thread_wqh);
 
-       /* finally put it into 'ready' queue and kick erp thread */
-       list_add_tail(&erp_action->list, &adapter->erp_ready_head);
-       up(&adapter->erp_ready_sem);
-       zfcp_rec_dbf_event_thread(1, adapter, 0);
-       retval = 0;
- out:
-       zfcp_rec_dbf_event_trigger(id, ref, want, need, erp_action,
-                                  adapter, port, unit);
-       return retval;
+       return 0;
 }
 
-static int
-zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
+/**
+ * zfcp_erp_thread_setup - Start ERP thread for adapter
+ * @adapter: Adapter to start the ERP thread for
+ *
+ * Returns 0 on success or error code from kernel_thread()
+ */
+int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
 {
-       int retval = 0;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-
-       --adapter->erp_total_count;
-       if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
-               --adapter->erp_low_mem_count;
-               erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
-       }
-
-       list_del(&erp_action->list);
-       zfcp_rec_dbf_event_action(144, erp_action);
+       int retval;
 
-       switch (erp_action->action) {
-       case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
-                                 &erp_action->unit->status);
-               break;
-       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-       case ZFCP_ERP_ACTION_REOPEN_PORT:
-               atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
-                                 &erp_action->port->status);
-               break;
-       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
-                                 &erp_action->adapter->status);
-               break;
-       default:
-               /* bug */
-               break;
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
+       retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
+       if (retval < 0) {
+               dev_err(&adapter->ccw_device->dev,
+                       "Creation of ERP thread failed.\n");
+               return retval;
        }
-       return retval;
+       wait_event(adapter->erp_thread_wqh,
+                  atomic_read(&adapter->status) &
+                       ZFCP_STATUS_ADAPTER_ERP_THREAD_UP);
+       return 0;
 }
 
 /**
- * zfcp_erp_action_cleanup
+ * zfcp_erp_thread_kill - Stop ERP thread.
+ * @adapter: Adapter where the ERP thread should be stopped.
  *
- * Register unit with scsi stack if appropriate and fix reference counts.
- * Note: Temporary units are not registered with scsi stack.
+ * The caller of this routine ensures that the specified adapter has
+ * been shut down and that this operation has been completed. Thus,
+ * there are no pending erp_actions which would need to be handled
+ * here.
  */
-static void
-zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
-                       struct zfcp_port *port, struct zfcp_unit *unit,
-                       int result)
+void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
 {
-       switch (action) {
-       case ZFCP_ERP_ACTION_REOPEN_UNIT:
-               if ((result == ZFCP_ERP_SUCCEEDED)
-                   && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY,
-                                         &unit->status))
-                   && !unit->device
-                   && port->rport) {
-                       atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
-                                       &unit->status);
-                       if (atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING,
-                                            &unit->status) == 0)
-                               zfcp_erp_schedule_work(unit);
-               }
-               zfcp_unit_put(unit);
-               break;
-       case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-       case ZFCP_ERP_ACTION_REOPEN_PORT:
-               if (atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN,
-                                    &port->status)) {
-                       zfcp_port_put(port);
-                       break;
-               }
+       atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
+       up(&adapter->erp_ready_sem);
+       zfcp_rec_dbf_event_thread_lock(2, adapter);
 
-               if ((result == ZFCP_ERP_SUCCEEDED)
-                   && !port->rport) {
-                       struct fc_rport_identifiers ids;
-                       ids.node_name = port->wwnn;
-                       ids.port_name = port->wwpn;
-                       ids.port_id = port->d_id;
-                       ids.roles = FC_RPORT_ROLE_FCP_TARGET;
-                       port->rport =
-                               fc_remote_port_add(adapter->scsi_host, 0, &ids);
-                       if (!port->rport)
-                               ZFCP_LOG_NORMAL("failed registration of rport"
-                                               "(adapter %s, wwpn=0x%016Lx)\n",
-                                               zfcp_get_busid_by_port(port),
-                                               port->wwpn);
-                       else {
-                               scsi_target_unblock(&port->rport->dev);
-                               port->rport->maxframe_size = port->maxframe_size;
-                               port->rport->supported_classes =
-                                       port->supported_classes;
-                       }
-               }
-               if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) {
-                       fc_remote_port_delete(port->rport);
-                       port->rport = NULL;
-               }
-               zfcp_port_put(port);
-               break;
-       case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-               if (result != ZFCP_ERP_SUCCEEDED) {
-                       list_for_each_entry(port, &adapter->port_list_head, list)
-                               if (port->rport &&
-                                   !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
-                                                     &port->status)) {
-                                       fc_remote_port_delete(port->rport);
-                                       port->rport = NULL;
-                               }
-               }
-               zfcp_adapter_put(adapter);
-               break;
-       default:
-               break;
-       }
+       wait_event(adapter->erp_thread_wqh,
+                  !(atomic_read(&adapter->status) &
+                               ZFCP_STATUS_ADAPTER_ERP_THREAD_UP));
+
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
+                         &adapter->status);
 }
 
+/**
+ * zfcp_erp_adapter_failed - Set adapter status to failed.
+ * @adapter: Failed adapter.
+ * @id: Event id for debug trace.
+ * @ref: Reference for debug trace.
+ */
+void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
+{
+       zfcp_erp_modify_adapter_status(adapter, id, ref,
+                                      ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
+       dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
+}
 
-static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+/**
+ * zfcp_erp_port_failed - Set port status to failed.
+ * @port: Failed port.
+ * @id: Event id for debug trace.
+ * @ref: Reference for debug trace.
+ */
+void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
 {
-       struct zfcp_port *port;
+       zfcp_erp_modify_port_status(port, id, ref,
+                                   ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status))
-               zfcp_erp_action_dismiss(&adapter->erp_action);
+       if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
+               dev_err(&port->adapter->ccw_device->dev,
+                       "Port ERP failed for WKA port d_id=0x%06x.\n",
+                       port->d_id);
        else
-               list_for_each_entry(port, &adapter->port_list_head, list)
-                   zfcp_erp_action_dismiss_port(port);
+               dev_err(&port->adapter->ccw_device->dev,
+                       "Port ERP failed for port wwpn=0x%016Lx.\n",
+                       port->wwpn);
 }
 
-static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+/**
+ * zfcp_erp_unit_failed - Set unit status to failed.
+ * @unit: Failed unit.
+ * @id: Event id for debug trace.
+ * @ref: Reference for debug trace.
+ */
+void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
 {
-       struct zfcp_unit *unit;
+       zfcp_erp_modify_unit_status(unit, id, ref,
+                                   ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
 
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status))
-               zfcp_erp_action_dismiss(&port->erp_action);
-       else
-               list_for_each_entry(unit, &port->unit_list_head, list)
-                   zfcp_erp_action_dismiss_unit(unit);
+       dev_err(&unit->port->adapter->ccw_device->dev,
+               "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
+               unit->fcp_lun, unit->port->wwpn);
 }
 
-static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
+/**
+ * zfcp_erp_wait - wait for completion of error recovery on an adapter
+ * @adapter: adapter for which to wait for completion of its error recovery
+ */
+void zfcp_erp_wait(struct zfcp_adapter *adapter)
 {
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
-               zfcp_erp_action_dismiss(&unit->erp_action);
+       wait_event(adapter->erp_done_wqh,
+                  !(atomic_read(&adapter->status) &
+                       ZFCP_STATUS_ADAPTER_ERP_PENDING));
 }
 
-static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
+/**
+ * zfcp_erp_modify_adapter_status - change adapter status bits
+ * @adapter: adapter to change the status
+ * @id: id for the debug trace
+ * @ref: reference for the debug trace
+ * @mask: status bits to change
+ * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
+ *
+ * Changes in common status bits are propagated to attached ports and units.
+ */
+void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
+                                   void *ref, u32 mask, int set_or_clear)
 {
-       list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
-       zfcp_rec_dbf_event_action(145, erp_action);
+       struct zfcp_port *port;
+       u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+
+       if (set_or_clear == ZFCP_SET) {
+               if (status_change_set(mask, &adapter->status))
+                       zfcp_rec_dbf_event_adapter(id, ref, adapter);
+               atomic_set_mask(mask, &adapter->status);
+       } else {
+               if (status_change_clear(mask, &adapter->status))
+                       zfcp_rec_dbf_event_adapter(id, ref, adapter);
+               atomic_clear_mask(mask, &adapter->status);
+               if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+                       atomic_set(&adapter->erp_counter, 0);
+       }
+
+       if (common_mask)
+               list_for_each_entry(port, &adapter->port_list_head, list)
+                       zfcp_erp_modify_port_status(port, id, ref, common_mask,
+                                                   set_or_clear);
+}
+
+/**
+ * zfcp_erp_modify_port_status - change port status bits
+ * @port: port to change the status bits
+ * @id: id for the debug trace
+ * @ref: reference for the debug trace
+ * @mask: status bits to change
+ * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
+ *
+ * Changes in common status bits are propagated to attached units.
+ */
+void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
+                                u32 mask, int set_or_clear)
+{
+       struct zfcp_unit *unit;
+       u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+
+       if (set_or_clear == ZFCP_SET) {
+               if (status_change_set(mask, &port->status))
+                       zfcp_rec_dbf_event_port(id, ref, port);
+               atomic_set_mask(mask, &port->status);
+       } else {
+               if (status_change_clear(mask, &port->status))
+                       zfcp_rec_dbf_event_port(id, ref, port);
+               atomic_clear_mask(mask, &port->status);
+               if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+                       atomic_set(&port->erp_counter, 0);
+       }
+
+       if (common_mask)
+               list_for_each_entry(unit, &port->unit_list_head, list)
+                       zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
+                                                   set_or_clear);
 }
 
-static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
+/**
+ * zfcp_erp_modify_unit_status - change unit status bits
+ * @unit: unit to change the status bits
+ * @id: id for the debug trace
+ * @ref: reference for the debug trace
+ * @mask: status bits to change
+ * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
+ */
+void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
+                                u32 mask, int set_or_clear)
 {
-       list_move(&erp_action->list, &erp_action->adapter->erp_ready_head);
-       zfcp_rec_dbf_event_action(146, erp_action);
+       if (set_or_clear == ZFCP_SET) {
+               if (status_change_set(mask, &unit->status))
+                       zfcp_rec_dbf_event_unit(id, ref, unit);
+               atomic_set_mask(mask, &unit->status);
+       } else {
+               if (status_change_clear(mask, &unit->status))
+                       zfcp_rec_dbf_event_unit(id, ref, unit);
+               atomic_clear_mask(mask, &unit->status);
+               if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
+                       atomic_set(&unit->erp_counter, 0);
+               }
+       }
 }
 
+/**
+ * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
+ * @port: The "boxed" port.
+ * @id: The debug trace id.
+ * @id: Reference for the debug trace.
+ */
 void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
 {
        unsigned long flags;
@@ -3065,6 +1687,12 @@ void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
        zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
 }
 
+/**
+ * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
+ * @port: The "boxed" unit.
+ * @id: The debug trace id.
+ * @id: Reference for the debug trace.
+ */
 void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
 {
        zfcp_erp_modify_unit_status(unit, id, ref,
@@ -3072,6 +1700,15 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
        zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
 }
 
+/**
+ * zfcp_erp_port_access_denied - Adapter denied access to port.
+ * @port: port where access has been denied
+ * @id: id for debug trace
+ * @ref: reference for debug trace
+ *
+ * Since the adapter has denied access, stop using the port and the
+ * attached units.
+ */
 void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
 {
        unsigned long flags;
@@ -3083,6 +1720,14 @@ void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
        read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
 
+/**
+ * zfcp_erp_unit_access_denied - Adapter denied access to unit.
+ * @unit: unit where access has been denied
+ * @id: id for debug trace
+ * @ref: reference for debug trace
+ *
+ * Since the adapter has denied access, stop using the unit.
+ */
 void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
 {
        zfcp_erp_modify_unit_status(unit, id, ref,
@@ -3090,67 +1735,54 @@ void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
                                    ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
 }
 
-void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
-                                    void *ref)
+static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id,
+                                        void *ref)
 {
-       struct zfcp_port *port;
-       unsigned long flags;
-
-       if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+       int status = atomic_read(&unit->status);
+       if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
+                       ZFCP_STATUS_COMMON_ACCESS_BOXED)))
                return;
 
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       if (adapter->nameserver_port)
-               zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
-       list_for_each_entry(port, &adapter->port_list_head, list)
-               if (port != adapter->nameserver_port)
-                       zfcp_erp_port_access_changed(port, id, ref);
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+       zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
 }
 
-void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, void *ref)
+static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
+                                        void *ref)
 {
-       struct zfcp_adapter *adapter = port->adapter;
        struct zfcp_unit *unit;
+       int status = atomic_read(&port->status);
 
-       if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
-                             &port->status) &&
-           !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
-                             &port->status)) {
-               if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
+       if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
+                       ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
+               if (!(status & ZFCP_STATUS_PORT_WKA))
                        list_for_each_entry(unit, &port->unit_list_head, list)
                                zfcp_erp_unit_access_changed(unit, id, ref);
                return;
        }
 
-       ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s "
-                       "(due to ACT update)\n",
-                       port->wwpn, zfcp_get_busid_by_adapter(adapter));
-       if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref))
-               ZFCP_LOG_NORMAL("failed reopen of port"
-                               "(adapter %s, wwpn=0x%016Lx)\n",
-                               zfcp_get_busid_by_adapter(adapter), port->wwpn);
+       zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
 }
 
-void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, void *ref)
+/**
+ * zfcp_erp_adapter_access_changed - Process change in adapter ACT
+ * @adapter: Adapter where the Access Control Table (ACT) changed
+ * @id: Id for debug trace
+ * @ref: Reference for debug trace
+ */
+void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
+                                    void *ref)
 {
-       struct zfcp_adapter *adapter = unit->port->adapter;
+       struct zfcp_port *port;
+       unsigned long flags;
 
-       if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
-                             &unit->status) &&
-           !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
-                             &unit->status))
+       if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
                return;
 
-       ZFCP_LOG_NORMAL("reopen of unit 0x%016Lx on port 0x%016Lx "
-                       " on adapter %s (due to ACT update)\n",
-                       unit->fcp_lun, unit->port->wwpn,
-                       zfcp_get_busid_by_adapter(adapter));
-       if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref))
-               ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, "
-                               "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               unit->port->wwpn, unit->fcp_lun);
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       if (adapter->nameserver_port)
+               zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
+       list_for_each_entry(port, &adapter->port_list_head, list)
+               if (port != adapter->nameserver_port)
+                       zfcp_erp_port_access_changed(port, id, ref);
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
-
-#undef ZFCP_LOG_AREA
index 6abf178..8065b2b 100644 (file)
@@ -1,22 +1,9 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * External function declarations.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 #ifndef ZFCP_EXT_H
 
 #include "zfcp_def.h"
 
-extern struct zfcp_data zfcp_data;
-
-/******************************** SYSFS  *************************************/
-extern struct attribute_group *zfcp_driver_attr_groups[];
-extern int  zfcp_sysfs_adapter_create_files(struct device *);
-extern void zfcp_sysfs_adapter_remove_files(struct device *);
-extern int  zfcp_sysfs_port_create_files(struct device *, u32);
-extern void zfcp_sysfs_port_remove_files(struct device *, u32);
-extern int  zfcp_sysfs_unit_create_files(struct device *);
-extern void zfcp_sysfs_unit_remove_files(struct device *);
-extern void zfcp_sysfs_port_release(struct device *);
-extern void zfcp_sysfs_unit_release(struct device *);
-
-/**************************** CONFIGURATION  *********************************/
-extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, fcp_lun_t);
-extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, wwn_t);
-extern struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *, u32);
-struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
-extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
-extern int    zfcp_adapter_debug_register(struct zfcp_adapter *);
-extern void   zfcp_adapter_dequeue(struct zfcp_adapter *);
-extern void   zfcp_adapter_debug_unregister(struct zfcp_adapter *);
-extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t,
-                                          u32, u32);
-extern void   zfcp_port_dequeue(struct zfcp_port *);
+/* zfcp_aux.c */
+extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *,
+                                             fcp_lun_t);
+extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
+                                              wwn_t);
+extern int zfcp_adapter_enqueue(struct ccw_device *);
+extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
+extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32,
+                                          u32);
+extern void zfcp_port_dequeue(struct zfcp_port *);
 extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
-extern void   zfcp_unit_dequeue(struct zfcp_unit *);
-
-/******************************* S/390 IO ************************************/
-extern int  zfcp_ccw_register(void);
-
-extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int);
-extern int  zfcp_qdio_allocate(struct zfcp_adapter *);
-extern int  zfcp_qdio_allocate_queues(struct zfcp_adapter *);
-extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
-extern int  zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
-                                   struct zfcp_fsf_req *);
-
-extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
-       (struct zfcp_fsf_req *, int, int);
-extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr
-       (struct zfcp_fsf_req *);
-extern int zfcp_qdio_sbals_from_sg
-       (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int);
-extern int zfcp_qdio_sbals_from_scsicmnd
-       (struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *);
-
-
-/******************************** FSF ****************************************/
-extern int  zfcp_fsf_open_port(struct zfcp_erp_action *);
-extern int  zfcp_fsf_close_port(struct zfcp_erp_action *);
-extern int  zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
-
-extern int  zfcp_fsf_open_unit(struct zfcp_erp_action *);
-extern int  zfcp_fsf_close_unit(struct zfcp_erp_action *);
-
-extern int  zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
-extern int  zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
-                                              struct fsf_qtcb_bottom_config *);
-extern int  zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
-extern int  zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
-                                             struct fsf_qtcb_bottom_port *);
-extern int  zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
-                                 u32, u32, struct zfcp_sg_list *);
-extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
-extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
-extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
-extern int  zfcp_fsf_status_read(struct zfcp_adapter *, int);
-extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
-                              unsigned long *, struct zfcp_fsf_req **);
-extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
-                           struct zfcp_erp_action *);
-extern int zfcp_fsf_send_els(struct zfcp_send_els *);
-extern int  zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
-                                          struct zfcp_unit *,
-                                          struct scsi_cmnd *, int, int);
-extern int  zfcp_fsf_req_complete(struct zfcp_fsf_req *);
-extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
-extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
-extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management(
-       struct zfcp_adapter *, struct zfcp_unit *, u8, int);
-extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(
-       unsigned long, struct zfcp_adapter *, struct zfcp_unit *, int);
-
-/******************************* FC/FCP **************************************/
-extern int  zfcp_nameserver_enqueue(struct zfcp_adapter *);
-extern int  zfcp_ns_gid_pn_request(struct zfcp_erp_action *);
-extern int  zfcp_check_ct_response(struct ct_hdr *);
-extern int  zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *);
-extern void zfcp_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
-
-/******************************* SCSI ****************************************/
-extern int  zfcp_adapter_scsi_register(struct zfcp_adapter *);
-extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
-extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
-extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
-extern void set_host_byte(int *, char);
-extern void set_driver_byte(int *, char);
-extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
-extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
-
-extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
-                                  struct scsi_cmnd *, int);
-extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int);
-extern struct fc_function_template zfcp_transport_functions;
-
-/******************************** ERP ****************************************/
-extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
-                                          u32, int);
-extern int  zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
-extern int  zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
-extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
-
-extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
-                                       int);
-extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
-extern int  zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
-extern int  zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
-extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
-extern int  zfcp_erp_port_reopen_all(struct zfcp_adapter *, int, u8, void *);
-
-extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
-                                       int);
-extern int  zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
-extern int  zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
-extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
-
-extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
-extern int  zfcp_erp_thread_kill(struct zfcp_adapter *);
-extern int  zfcp_erp_wait(struct zfcp_adapter *);
-extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
-
-extern int  zfcp_test_link(struct zfcp_port *);
-
-extern void zfcp_erp_port_boxed(struct zfcp_port *, u8 id, void *ref);
-extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8 id, void *ref);
-extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8 id, void *ref);
-extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8 id, void *ref);
-extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
-extern void zfcp_erp_port_access_changed(struct zfcp_port *, u8, void *);
-extern void zfcp_erp_unit_access_changed(struct zfcp_unit *, u8, void *);
-
-/******************************** AUX ****************************************/
-extern void zfcp_rec_dbf_event_thread(u8 id, struct zfcp_adapter *adapter,
-                                     int lock);
-extern void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port);
-extern void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit);
-extern void zfcp_rec_dbf_event_trigger(u8 id, void *ref, u8 want, u8 need,
-                                      void *action, struct zfcp_adapter *,
+extern void zfcp_unit_dequeue(struct zfcp_unit *);
+extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
+extern void zfcp_sg_free_table(struct scatterlist *, int);
+extern int zfcp_sg_setup_table(struct scatterlist *, int);
+
+/* zfcp_ccw.c */
+extern int zfcp_ccw_register(void);
+
+/* zfcp_cfdc.c */
+extern struct miscdevice zfcp_cfdc_misc;
+
+/* zfcp_dbf.c */
+extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
+extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_thread(u8, struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_thread_lock(u8, struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_adapter(u8, void *, struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_port(u8, void *, struct zfcp_port *);
+extern void zfcp_rec_dbf_event_unit(u8, void *, struct zfcp_unit *);
+extern void zfcp_rec_dbf_event_trigger(u8, void *, u8, u8, void *,
+                                      struct zfcp_adapter *,
                                       struct zfcp_port *, struct zfcp_unit *);
-extern void zfcp_rec_dbf_event_action(u8 id, struct zfcp_erp_action *);
-
+extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
 extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
 extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
                                         struct fsf_status_read_buffer *);
 extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
                                    unsigned int, unsigned int, unsigned int,
                                    int, int);
-
 extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
 extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
 extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
 extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
 extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
-
 extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
                                       struct scsi_cmnd *,
                                       struct zfcp_fsf_req *);
@@ -198,6 +64,101 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
                                      unsigned long);
 extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
                                         struct scsi_cmnd *);
-extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
+
+/* zfcp_erp.c */
+extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
+                                          u32, int);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
+extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
+extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
+                                       int);
+extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
+extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
+extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
+                                       int);
+extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
+extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
+extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
+extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
+extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
+extern void zfcp_erp_wait(struct zfcp_adapter *);
+extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
+extern void zfcp_erp_port_boxed(struct zfcp_port *, u8, void *);
+extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8, void *);
+extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
+extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
+extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
+extern void zfcp_erp_timeout_handler(unsigned long);
+
+/* zfcp_fc.c */
+extern int zfcp_scan_ports(struct zfcp_adapter *);
+extern void _zfcp_scan_ports_later(struct work_struct *);
+extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
+extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *);
+extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
+extern void zfcp_test_link(struct zfcp_port *);
+
+/* zfcp_fsf.c */
+extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
+                                             struct fsf_qtcb_bottom_config *);
+extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
+                                           struct fsf_qtcb_bottom_port *);
+extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
+                                                 struct zfcp_fsf_cfdc *);
+extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
+extern int zfcp_fsf_status_read(struct zfcp_adapter *);
+extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
+extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
+                           struct zfcp_erp_action *);
+extern int zfcp_fsf_send_els(struct zfcp_send_els *);
+extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
+                                         struct zfcp_unit *,
+                                         struct scsi_cmnd *, int, int);
+extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
+extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *,
+                                                 struct zfcp_unit *, u8, int);
+extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
+                                                      struct zfcp_adapter *,
+                                                      struct zfcp_unit *, int);
+
+/* zfcp_qdio.c */
+extern int zfcp_qdio_allocate(struct zfcp_adapter *);
+extern void zfcp_qdio_free(struct zfcp_adapter *);
+extern int zfcp_qdio_send(struct zfcp_fsf_req *);
+extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req(
+                                               struct zfcp_fsf_req *);
+extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
+                                               struct zfcp_fsf_req *);
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
+                                  struct scatterlist *, int);
+extern int zfcp_qdio_open(struct zfcp_adapter *);
+extern void zfcp_qdio_close(struct zfcp_adapter *);
+
+/* zfcp_scsi.c */
+extern struct zfcp_data zfcp_data;
+extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
+extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
+extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
+extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
+extern struct fc_function_template zfcp_transport_functions;
+
+/* zfcp_sysfs.c */
+extern struct attribute_group zfcp_sysfs_unit_attrs;
+extern struct attribute_group zfcp_sysfs_adapter_attrs;
+extern struct attribute_group zfcp_sysfs_ns_port_attrs;
+extern struct attribute_group zfcp_sysfs_port_attrs;
+extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
+extern struct device_attribute *zfcp_sysfs_shost_attrs[];
 
 #endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
new file mode 100644 (file)
index 0000000..e984469
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related functions for the zfcp device driver.
+ *
+ * Copyright IBM Corporation 2008
+ */
+
+#include "zfcp_ext.h"
+
+struct ct_iu_gpn_ft_req {
+       struct ct_hdr header;
+       u8 flags;
+       u8 domain_id_scope;
+       u8 area_id_scope;
+       u8 fc4_type;
+} __attribute__ ((packed));
+
+struct gpn_ft_resp_acc {
+       u8 control;
+       u8 port_id[3];
+       u8 reserved[4];
+       u64 wwpn;
+} __attribute__ ((packed));
+
+#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \
+                               / sizeof(struct gpn_ft_resp_acc))
+#define ZFCP_GPN_FT_BUFFERS 4
+#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
+
+struct ct_iu_gpn_ft_resp {
+       struct ct_hdr header;
+       struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
+} __attribute__ ((packed));
+
+struct zfcp_gpn_ft {
+       struct zfcp_send_ct ct;
+       struct scatterlist sg_req;
+       struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
+};
+
+static struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *adapter,
+                                             u32 d_id)
+{
+       struct zfcp_port *port;
+
+       list_for_each_entry(port, &adapter->port_list_head, list)
+               if ((port->d_id == d_id) &&
+                   !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status))
+                       return port;
+       return NULL;
+}
+
+static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
+                                  struct fcp_rscn_element *elem)
+{
+       unsigned long flags;
+       struct zfcp_port *port;
+
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
+               if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
+                       continue;
+               /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
+               if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status))
+                       /* Try to connect to unused ports anyway. */
+                       zfcp_erp_port_reopen(port,
+                                            ZFCP_STATUS_COMMON_ERP_FAILED,
+                                            82, fsf_req);
+               else if ((port->d_id & range) == (elem->nport_did & range))
+                       /* Check connection status for connected ports */
+                       zfcp_test_link(port);
+       }
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+}
+
+static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
+{
+       struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+       struct fcp_rscn_head *fcp_rscn_head;
+       struct fcp_rscn_element *fcp_rscn_element;
+       u16 i;
+       u16 no_entries;
+       u32 range_mask;
+
+       fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data;
+       fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head;
+
+       /* see FC-FS */
+       no_entries = fcp_rscn_head->payload_len /
+                       sizeof(struct fcp_rscn_element);
+
+       for (i = 1; i < no_entries; i++) {
+               /* skip head and start with 1st element */
+               fcp_rscn_element++;
+               switch (fcp_rscn_element->addr_format) {
+               case ZFCP_PORT_ADDRESS:
+                       range_mask = ZFCP_PORTS_RANGE_PORT;
+                       break;
+               case ZFCP_AREA_ADDRESS:
+                       range_mask = ZFCP_PORTS_RANGE_AREA;
+                       break;
+               case ZFCP_DOMAIN_ADDRESS:
+                       range_mask = ZFCP_PORTS_RANGE_DOMAIN;
+                       break;
+               case ZFCP_FABRIC_ADDRESS:
+                       range_mask = ZFCP_PORTS_RANGE_FABRIC;
+                       break;
+               default:
+                       continue;
+               }
+               _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
+       }
+       schedule_work(&fsf_req->adapter->scan_work);
+}
+
+static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn)
+{
+       struct zfcp_adapter *adapter = req->adapter;
+       struct zfcp_port *port;
+       unsigned long flags;
+
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       list_for_each_entry(port, &adapter->port_list_head, list)
+               if (port->wwpn == wwpn)
+                       break;
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+       if (port && (port->wwpn == wwpn))
+               zfcp_erp_port_forced_reopen(port, 0, 83, req);
+}
+
+static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
+{
+       struct fsf_status_read_buffer *status_buffer =
+               (struct fsf_status_read_buffer *)req->data;
+       struct fsf_plogi *els_plogi =
+               (struct fsf_plogi *) status_buffer->payload.data;
+
+       zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn);
+}
+
+static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
+{
+       struct fsf_status_read_buffer *status_buffer =
+               (struct fsf_status_read_buffer *)req->data;
+       struct fcp_logo *els_logo =
+               (struct fcp_logo *) status_buffer->payload.data;
+
+       zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn);
+}
+
+/**
+ * zfcp_fc_incoming_els - handle incoming ELS
+ * @fsf_req - request which contains incoming ELS
+ */
+void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
+{
+       struct fsf_status_read_buffer *status_buffer =
+               (struct fsf_status_read_buffer *) fsf_req->data;
+       unsigned int els_type = status_buffer->payload.data[0];
+
+       zfcp_san_dbf_event_incoming_els(fsf_req);
+       if (els_type == LS_PLOGI)
+               zfcp_fc_incoming_plogi(fsf_req);
+       else if (els_type == LS_LOGO)
+               zfcp_fc_incoming_logo(fsf_req);
+       else if (els_type == LS_RSCN)
+               zfcp_fc_incoming_rscn(fsf_req);
+}
+
+static void zfcp_ns_gid_pn_handler(unsigned long data)
+{
+       struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
+       struct zfcp_send_ct *ct = &gid_pn->ct;
+       struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
+       struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
+       struct zfcp_port *port = gid_pn->port;
+
+       if (ct->status)
+               goto out;
+       if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
+               atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
+               goto out;
+       }
+       /* paranoia */
+       if (ct_iu_req->wwpn != port->wwpn)
+               goto out;
+       /* looks like a valid d_id */
+       port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
+       atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
+out:
+       mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
+}
+
+/**
+ * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
+ * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
+ * return: -ENOMEM on error, 0 otherwise
+ */
+int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
+{
+       int ret;
+       struct zfcp_gid_pn_data *gid_pn;
+       struct zfcp_adapter *adapter = erp_action->adapter;
+
+       gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
+       if (!gid_pn)
+               return -ENOMEM;
+
+       memset(gid_pn, 0, sizeof(*gid_pn));
+
+       /* setup parameters for send generic command */
+       gid_pn->port = erp_action->port;
+       gid_pn->ct.port = adapter->nameserver_port;
+       gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
+       gid_pn->ct.handler_data = (unsigned long) gid_pn;
+       gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
+       gid_pn->ct.req = &gid_pn->req;
+       gid_pn->ct.resp = &gid_pn->resp;
+       gid_pn->ct.req_count = 1;
+       gid_pn->ct.resp_count = 1;
+       sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
+                   sizeof(struct ct_iu_gid_pn_req));
+       sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
+                   sizeof(struct ct_iu_gid_pn_resp));
+
+       /* setup nameserver request */
+       gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION;
+       gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
+       gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
+       gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
+       gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
+       gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
+       gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
+
+       ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
+                              erp_action);
+       if (ret)
+               mempool_free(gid_pn, adapter->pool.data_gid_pn);
+       return ret;
+}
+
+/**
+ * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
+ * @port: zfcp_port structure
+ * @plogi: plogi payload
+ *
+ * Evaluate PLOGI playload and copy important fields into zfcp_port structure
+ */
+void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
+{
+       port->maxframe_size = plogi->serv_param.common_serv_param[7] |
+               ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
+       if (plogi->serv_param.class1_serv_param[0] & 0x80)
+               port->supported_classes |= FC_COS_CLASS1;
+       if (plogi->serv_param.class2_serv_param[0] & 0x80)
+               port->supported_classes |= FC_COS_CLASS2;
+       if (plogi->serv_param.class3_serv_param[0] & 0x80)
+               port->supported_classes |= FC_COS_CLASS3;
+       if (plogi->serv_param.class4_serv_param[0] & 0x80)
+               port->supported_classes |= FC_COS_CLASS4;
+}
+
+struct zfcp_els_adisc {
+       struct zfcp_send_els els;
+       struct scatterlist req;
+       struct scatterlist resp;
+       struct zfcp_ls_adisc ls_adisc;
+       struct zfcp_ls_adisc_acc ls_adisc_acc;
+};
+
+static void zfcp_fc_adisc_handler(unsigned long data)
+{
+       struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
+       struct zfcp_port *port = adisc->els.port;
+       struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc;
+
+       if (adisc->els.status) {
+               /* request rejected or timed out */
+               zfcp_erp_port_forced_reopen(port, 0, 63, NULL);
+               goto out;
+       }
+
+       if (!port->wwnn)
+               port->wwnn = ls_adisc->wwnn;
+
+       if (port->wwpn != ls_adisc->wwpn)
+               zfcp_erp_port_reopen(port, 0, 64, NULL);
+
+ out:
+       zfcp_port_put(port);
+       kfree(adisc);
+}
+
+static int zfcp_fc_adisc(struct zfcp_port *port)
+{
+       struct zfcp_els_adisc *adisc;
+       struct zfcp_adapter *adapter = port->adapter;
+
+       adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC);
+       if (!adisc)
+               return -ENOMEM;
+
+       adisc->els.req = &adisc->req;
+       adisc->els.resp = &adisc->resp;
+       sg_init_one(adisc->els.req, &adisc->ls_adisc,
+                   sizeof(struct zfcp_ls_adisc));
+       sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
+                   sizeof(struct zfcp_ls_adisc_acc));
+
+       adisc->els.req_count = 1;
+       adisc->els.resp_count = 1;
+       adisc->els.adapter = adapter;
+       adisc->els.port = port;
+       adisc->els.d_id = port->d_id;
+       adisc->els.handler = zfcp_fc_adisc_handler;
+       adisc->els.handler_data = (unsigned long) adisc;
+       adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
+
+       /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
+          without FC-AL-2 capability, so we don't set it */
+       adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host);
+       adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host);
+       adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host);
+
+       return zfcp_fsf_send_els(&adisc->els);
+}
+
+/**
+ * zfcp_test_link - lightweight link test procedure
+ * @port: port to be tested
+ *
+ * Test status of a link to a remote port using the ELS command ADISC.
+ * If there is a problem with the remote port, error recovery steps
+ * will be triggered.
+ */
+void zfcp_test_link(struct zfcp_port *port)
+{
+       int retval;
+
+       zfcp_port_get(port);
+       retval = zfcp_fc_adisc(port);
+       if (retval == 0 || retval == -EBUSY)
+               return;
+
+       /* send of ADISC was not possible */
+       zfcp_port_put(port);
+       zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
+}
+
+static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
+{
+       int ret;
+
+       if (!adapter->nameserver_port)
+               return -EINTR;
+
+       if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+                              &adapter->nameserver_port->status)) {
+               ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
+                                          NULL);
+               if (ret)
+                       return ret;
+               zfcp_erp_wait(adapter);
+               zfcp_port_put(adapter->nameserver_port);
+       }
+       return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+                                 &adapter->nameserver_port->status);
+}
+
+static void zfcp_gpn_ft_handler(unsigned long _done)
+{
+       complete((struct completion *)_done);
+}
+
+static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
+{
+       struct scatterlist *sg = &gpn_ft->sg_req;
+
+       kfree(sg_virt(sg)); /* free request buffer */
+       zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS);
+
+       kfree(gpn_ft);
+}
+
+static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
+{
+       struct zfcp_gpn_ft *gpn_ft;
+       struct ct_iu_gpn_ft_req *req;
+
+       gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
+       if (!gpn_ft)
+               return NULL;
+
+       req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
+       if (!req) {
+               kfree(gpn_ft);
+               gpn_ft = NULL;
+               goto out;
+       }
+       sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
+
+       if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) {
+               zfcp_free_sg_env(gpn_ft);
+               gpn_ft = NULL;
+       }
+out:
+       return gpn_ft;
+}
+
+
+static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
+                                 struct zfcp_adapter *adapter)
+{
+       struct zfcp_send_ct *ct = &gpn_ft->ct;
+       struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+       struct completion done;
+       int ret;
+
+       /* prepare CT IU for GPN_FT */
+       req->header.revision = ZFCP_CT_REVISION;
+       req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
+       req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
+       req->header.options = ZFCP_CT_SYNCHRONOUS;
+       req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
+       req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) *
+                                       (ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
+       req->flags = 0;
+       req->domain_id_scope = 0;
+       req->area_id_scope = 0;
+       req->fc4_type = ZFCP_CT_SCSI_FCP;
+
+       /* prepare zfcp_send_ct */
+       ct->port = adapter->nameserver_port;
+       ct->handler = zfcp_gpn_ft_handler;
+       ct->handler_data = (unsigned long)&done;
+       ct->timeout = 10;
+       ct->req = &gpn_ft->sg_req;
+       ct->resp = gpn_ft->sg_resp;
+       ct->req_count = 1;
+       ct->resp_count = ZFCP_GPN_FT_BUFFERS;
+
+       init_completion(&done);
+       ret = zfcp_fsf_send_ct(ct, NULL, NULL);
+       if (!ret)
+               wait_for_completion(&done);
+       return ret;
+}
+
+static void zfcp_validate_port(struct zfcp_port *port)
+{
+       struct zfcp_adapter *adapter = port->adapter;
+
+       atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
+
+       if (port == adapter->nameserver_port)
+               return;
+       if ((port->supported_classes != 0) || (port->units != 0)) {
+               zfcp_port_put(port);
+               return;
+       }
+       zfcp_erp_port_shutdown(port, 0, 151, NULL);
+       zfcp_erp_wait(adapter);
+       zfcp_port_put(port);
+       zfcp_port_dequeue(port);
+}
+
+static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
+{
+       struct zfcp_send_ct *ct = &gpn_ft->ct;
+       struct scatterlist *sg = gpn_ft->sg_resp;
+       struct ct_hdr *hdr = sg_virt(sg);
+       struct gpn_ft_resp_acc *acc = sg_virt(sg);
+       struct zfcp_adapter *adapter = ct->port->adapter;
+       struct zfcp_port *port, *tmp;
+       u32 d_id;
+       int ret = 0, x;
+
+       if (ct->status)
+               return -EIO;
+
+       if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) {
+               if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD)
+                       return -EAGAIN; /* might be a temporary condition */
+               return -EIO;
+       }
+
+       if (hdr->max_res_size)
+               return -E2BIG;
+
+       down(&zfcp_data.config_sema);
+
+       /* first entry is the header */
+       for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES; x++) {
+               if (x % (ZFCP_GPN_FT_ENTRIES + 1))
+                       acc++;
+               else
+                       acc = sg_virt(++sg);
+
+               d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
+                      acc->port_id[2];
+
+               /* skip the adapter's port and known remote ports */
+               if (acc->wwpn == fc_host_port_name(adapter->scsi_host) ||
+                    zfcp_get_port_by_did(adapter, d_id))
+                       continue;
+
+               port = zfcp_port_enqueue(adapter, acc->wwpn,
+                                        ZFCP_STATUS_PORT_DID_DID |
+                                        ZFCP_STATUS_COMMON_NOESC, d_id);
+               if (IS_ERR(port))
+                       ret = PTR_ERR(port);
+               else
+                       zfcp_erp_port_reopen(port, 0, 149, NULL);
+               if (acc->control & 0x80) /* last entry */
+                       break;
+       }
+
+       zfcp_erp_wait(adapter);
+       list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
+               zfcp_validate_port(port);
+       up(&zfcp_data.config_sema);
+       return ret;
+}
+
+/**
+ * zfcp_scan_ports - scan remote ports and attach new ports
+ * @adapter: pointer to struct zfcp_adapter
+ */
+int zfcp_scan_ports(struct zfcp_adapter *adapter)
+{
+       int ret, i;
+       struct zfcp_gpn_ft *gpn_ft;
+
+       zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
+       if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
+               return 0;
+
+       ret = zfcp_scan_get_nameserver(adapter);
+       if (ret)
+               return ret;
+
+       gpn_ft = zfcp_alloc_sg_env();
+       if (!gpn_ft)
+               return -ENOMEM;
+
+       for (i = 0; i < 3; i++) {
+               ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
+               if (!ret) {
+                       ret = zfcp_scan_eval_gpn_ft(gpn_ft);
+                       if (ret == -EAGAIN)
+                               ssleep(1);
+                       else
+                               break;
+               }
+       }
+       zfcp_free_sg_env(gpn_ft);
+
+       return ret;
+}
+
+
+void _zfcp_scan_ports_later(struct work_struct *work)
+{
+       zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
+}
index b2ea4ea..19c1ca9 100644 (file)
@@ -1,54 +1,37 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Implementation of FSF commands.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 #include "zfcp_ext.h"
 
-static int zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *);
-static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_open_port_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_close_port_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_send_fcp_command_task_management_handler(
-       struct zfcp_fsf_req *);
-static int zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_status_read_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *);
-static int zfcp_fsf_control_file_handler(struct zfcp_fsf_req *);
-static inline int zfcp_fsf_req_sbal_check(
-       unsigned long *, struct zfcp_qdio_queue *, int);
-static inline int zfcp_use_one_sbal(
-       struct scatterlist *, int, struct scatterlist *, int);
-static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int);
-static int zfcp_fsf_req_send(struct zfcp_fsf_req *);
-static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
-static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
-static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
-static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *, u8,
-       struct fsf_link_down_info *);
-static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
+static void zfcp_fsf_request_timeout_handler(unsigned long data)
+{
+       struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+       zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
+                               NULL);
+}
+
+static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
+                                unsigned long timeout)
+{
+       fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
+       fsf_req->timer.data = (unsigned long) fsf_req->adapter;
+       fsf_req->timer.expires = jiffies + timeout;
+       add_timer(&fsf_req->timer);
+}
+
+static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
+{
+       BUG_ON(!fsf_req->erp_action);
+       fsf_req->timer.function = zfcp_erp_timeout_handler;
+       fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
+       fsf_req->timer.expires = jiffies + 30 * HZ;
+       add_timer(&fsf_req->timer);
+}
 
 /* association between FSF command and FSF QTCB type */
 static u32 fsf_qtcb_type[] = {
@@ -67,96 +50,77 @@ static u32 fsf_qtcb_type[] = {
        [FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
 };
 
-static const char zfcp_act_subtable_type[5][8] = {
+static const char *zfcp_act_subtable_type[] = {
        "unknown", "OS", "WWPN", "DID", "LUN"
 };
 
-/****************************************************************/
-/*************** FSF related Functions  *************************/
-/****************************************************************/
-
-#define ZFCP_LOG_AREA                  ZFCP_LOG_AREA_FSF
-
-/*
- * function:   zfcp_fsf_req_alloc
- *
- * purpose:     Obtains an fsf_req and potentially a qtcb (for all but
- *              unsolicited requests) via helper functions
- *              Does some initial fsf request set-up.
- *
- * returns:    pointer to allocated fsf_req if successfull
- *              NULL otherwise
- *
- * locks:       none
- *
- */
-static struct zfcp_fsf_req *
-zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
+static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
 {
-       size_t size;
-       void *ptr;
-       struct zfcp_fsf_req *fsf_req = NULL;
-
-       if (req_flags & ZFCP_REQ_NO_QTCB)
-               size = sizeof(struct zfcp_fsf_req);
-       else
-               size = sizeof(struct zfcp_fsf_req_qtcb);
-
-       if (likely(pool))
-               ptr = mempool_alloc(pool, GFP_ATOMIC);
-       else {
-               if (req_flags & ZFCP_REQ_NO_QTCB)
-                       ptr = kmalloc(size, GFP_ATOMIC);
-               else
-                       ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
-                                              GFP_ATOMIC);
-       }
-
-       if (unlikely(!ptr))
-               goto out;
+       u16 subtable = table >> 16;
+       u16 rule = table & 0xffff;
 
-       memset(ptr, 0, size);
+       if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type))
+               dev_warn(&adapter->ccw_device->dev,
+                        "Access denied in subtable %s, rule %d.\n",
+                        zfcp_act_subtable_type[subtable], rule);
+}
 
-       if (req_flags & ZFCP_REQ_NO_QTCB) {
-               fsf_req = (struct zfcp_fsf_req *) ptr;
-       } else {
-               fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req;
-               fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb;
-       }
+static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
+                                       struct zfcp_port *port)
+{
+       struct fsf_qtcb_header *header = &req->qtcb->header;
+       dev_warn(&req->adapter->ccw_device->dev,
+                "Access denied, cannot send command to port 0x%016Lx.\n",
+                port->wwpn);
+       zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
+       zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
+       zfcp_erp_port_access_denied(port, 55, req);
+       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
 
-       fsf_req->pool = pool;
+static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
+                                       struct zfcp_unit *unit)
+{
+       struct fsf_qtcb_header *header = &req->qtcb->header;
+       dev_warn(&req->adapter->ccw_device->dev,
+                "Access denied for unit 0x%016Lx on port 0x%016Lx.\n",
+                unit->fcp_lun, unit->port->wwpn);
+       zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
+       zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
+       zfcp_erp_unit_access_denied(unit, 59, req);
+       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
 
- out:
-       return fsf_req;
+static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
+{
+       dev_err(&req->adapter->ccw_device->dev,
+               "Required FC class not supported by adapter, "
+               "shutting down adapter.\n");
+       zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
+       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
 
-/*
- * function:   zfcp_fsf_req_free
- *
- * purpose:     Frees the memory of an fsf_req (and potentially a qtcb) or
- *              returns it into the pool via helper functions.
- *
- * returns:     sod all
- *
- * locks:       none
+/**
+ * zfcp_fsf_req_free - free memory used by fsf request
+ * @fsf_req: pointer to struct zfcp_fsf_req
  */
-void
-zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
+void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
 {
-       if (likely(fsf_req->pool)) {
-               mempool_free(fsf_req, fsf_req->pool);
+       if (likely(req->pool)) {
+               mempool_free(req, req->pool);
                return;
        }
 
-       if (fsf_req->qtcb) {
-               kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req);
+       if (req->qtcb) {
+               kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req);
                return;
        }
-
-       kfree(fsf_req);
 }
 
-/*
+/**
+ * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
+ * @adapter: pointer to struct zfcp_adapter
+ *
  * Never ever call this without shutting down the adapter first.
  * Otherwise the adapter would continue using and corrupting s390 storage.
  * Included BUG_ON() call to ensure this is done.
@@ -164,2353 +128,1359 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
  */
 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
 {
-       struct zfcp_fsf_req *fsf_req, *tmp;
+       struct zfcp_fsf_req *req, *tmp;
        unsigned long flags;
        LIST_HEAD(remove_queue);
        unsigned int i;
 
-       BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status));
+       BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
        spin_lock_irqsave(&adapter->req_list_lock, flags);
-       atomic_set(&adapter->reqs_active, 0);
        for (i = 0; i < REQUEST_LIST_SIZE; i++)
                list_splice_init(&adapter->req_list[i], &remove_queue);
        spin_unlock_irqrestore(&adapter->req_list_lock, flags);
 
-       list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) {
-               list_del(&fsf_req->list);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
-               zfcp_fsf_req_complete(fsf_req);
-       }
-}
-
-/*
- * function:    zfcp_fsf_req_complete
- *
- * purpose:    Updates active counts and timers for openfcp-reqs
- *              May cleanup request after req_eval returns
- *
- * returns:    0 - success
- *             !0 - failure
- *
- * context:
- */
-int
-zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
-{
-       int retval = 0;
-       int cleanup;
-
-       if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
-               ZFCP_LOG_DEBUG("Status read response received\n");
-               /*
-                * Note: all cleanup handling is done in the callchain of
-                * the function call-chain below.
-                */
-               zfcp_fsf_status_read_handler(fsf_req);
-               goto out;
-       } else {
-               del_timer(&fsf_req->timer);
-               zfcp_fsf_protstatus_eval(fsf_req);
-       }
-
-       /*
-        * fsf_req may be deleted due to waking up functions, so
-        * cleanup is saved here and used later
-        */
-       if (likely(fsf_req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
-               cleanup = 1;
-       else
-               cleanup = 0;
-
-       fsf_req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
-
-       /* cleanup request if requested by initiator */
-       if (likely(cleanup)) {
-               ZFCP_LOG_TRACE("removing FSF request %p\n", fsf_req);
-               /*
-                * lock must not be held here since it will be
-                * grabed by the called routine, too
-                */
-               zfcp_fsf_req_free(fsf_req);
-       } else {
-               /* notify initiator waiting for the requests completion */
-               ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req);
-               /*
-                * FIXME: Race! We must not access fsf_req here as it might have been
-                * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
-                * flag. It's an improbable case. But, we have the same paranoia for
-                * the cleanup flag already.
-                * Might better be handled using complete()?
-                * (setting the flag and doing wakeup ought to be atomic
-                *  with regard to checking the flag as long as waitqueue is
-                *  part of the to be released structure)
-                */
-               wake_up(&fsf_req->completion_wq);
-       }
-
- out:
-       return retval;
-}
-
-/*
- * function:    zfcp_fsf_protstatus_eval
- *
- * purpose:    evaluates the QTCB of the finished FSF request
- *             and initiates appropriate actions
- *             (usually calling FSF command specific handlers)
- *
- * returns:
- *
- * context:
- *
- * locks:
- */
-static int
-zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
-{
-       int retval = 0;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct fsf_qtcb *qtcb = fsf_req->qtcb;
-       union fsf_prot_status_qual *prot_status_qual =
-               &qtcb->prefix.prot_status_qual;
-
-       zfcp_hba_dbf_event_fsf_response(fsf_req);
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
-               ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
-                              (unsigned long) fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-                       ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
-               goto skip_protstatus;
-       }
-
-       /* evaluate FSF Protocol Status */
-       switch (qtcb->prefix.prot_status) {
-
-       case FSF_PROT_GOOD:
-       case FSF_PROT_FSF_STATUS_PRESENTED:
-               break;
-
-       case FSF_PROT_QTCB_VERSION_ERROR:
-               ZFCP_LOG_NORMAL("error: The adapter %s contains "
-                               "microcode of version 0x%x, the device driver "
-                               "only supports 0x%x. Aborting.\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               prot_status_qual->version_error.fsf_version,
-                               ZFCP_QTCB_VERSION);
-               zfcp_erp_adapter_shutdown(adapter, 0, 117, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_PROT_SEQ_NUMB_ERROR:
-               ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
-                               "driver (0x%x) and adapter %s (0x%x). "
-                               "Restarting all operations on this adapter.\n",
-                               qtcb->prefix.req_seq_no,
-                               zfcp_get_busid_by_adapter(adapter),
-                               prot_status_qual->sequence_error.exp_req_seq_no);
-               zfcp_erp_adapter_reopen(adapter, 0, 98, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_PROT_UNSUPP_QTCB_TYPE:
-               ZFCP_LOG_NORMAL("error: Packet header type used by the "
-                               "device driver is incompatible with "
-                               "that used on adapter %s. "
-                               "Stopping all operations on this adapter.\n",
-                               zfcp_get_busid_by_adapter(adapter));
-               zfcp_erp_adapter_shutdown(adapter, 0, 118, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_PROT_HOST_CONNECTION_INITIALIZING:
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
-                               &(adapter->status));
-               break;
-
-       case FSF_PROT_DUPLICATE_REQUEST_ID:
-                       ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
-                                       "to the adapter %s is ambiguous. "
-                               "Stopping all operations on this adapter.\n",
-                               *(unsigned long long*)
-                               (&qtcb->bottom.support.req_handle),
-                                       zfcp_get_busid_by_adapter(adapter));
-               zfcp_erp_adapter_shutdown(adapter, 0, 78, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_PROT_LINK_DOWN:
-               zfcp_fsf_link_down_info_eval(fsf_req, 37,
-                                            &prot_status_qual->link_down_info);
-               /* FIXME: reopening adapter now? better wait for link up */
-               zfcp_erp_adapter_reopen(adapter, 0, 79, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_PROT_REEST_QUEUE:
-               ZFCP_LOG_NORMAL("The local link to adapter with "
-                             "%s was re-plugged. "
-                             "Re-starting operations on this adapter.\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               /* All ports should be marked as ready to run again */
-               zfcp_erp_modify_adapter_status(adapter, 28, NULL,
-                                              ZFCP_STATUS_COMMON_RUNNING,
-                                              ZFCP_SET);
-               zfcp_erp_adapter_reopen(adapter,
-                                       ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
-                                       | ZFCP_STATUS_COMMON_ERP_FAILED,
-                                       99, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_PROT_ERROR_STATE:
-               ZFCP_LOG_NORMAL("error: The adapter %s "
-                               "has entered the error state. "
-                               "Restarting all operations on this "
-                               "adapter.\n",
-                               zfcp_get_busid_by_adapter(adapter));
-               zfcp_erp_adapter_reopen(adapter, 0, 100, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: Transfer protocol status information "
-                               "provided by the adapter %s "
-                               "is not compatible with the device driver. "
-                               "Stopping all operations on this adapter. "
-                               "(debug info 0x%x).\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               qtcb->prefix.prot_status);
-               zfcp_erp_adapter_shutdown(adapter, 0, 119, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+       list_for_each_entry_safe(req, tmp, &remove_queue, list) {
+               list_del(&req->list);
+               req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+               zfcp_fsf_req_complete(req);
        }
-
- skip_protstatus:
-       /*
-        * always call specific handlers to give them a chance to do
-        * something meaningful even in error cases
-        */
-       zfcp_fsf_fsfstatus_eval(fsf_req);
-       return retval;
 }
 
-/*
- * function:   zfcp_fsf_fsfstatus_eval
- *
- * purpose:    evaluates FSF status of completed FSF request
- *             and acts accordingly
- *
- * returns:
- */
-static int
-zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
 {
-       int retval = 0;
-
-       if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
-               goto skip_fsfstatus;
-       }
-
-       /* evaluate FSF Status */
-       switch (fsf_req->qtcb->header.fsf_status) {
-       case FSF_UNKNOWN_COMMAND:
-               ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
-                               "not known by the adapter %s "
-                               "Stopping all operations on this adapter. "
-                               "(debug info 0x%x).\n",
-                               zfcp_get_busid_by_adapter(fsf_req->adapter),
-                               fsf_req->qtcb->header.fsf_command);
-               zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 120, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_FCP_RSP_AVAILABLE:
-               ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
-                              "SCSI stack.\n");
-               break;
-
-       case FSF_ADAPTER_STATUS_AVAILABLE:
-               zfcp_fsf_fsfstatus_qual_eval(fsf_req);
-               break;
-       }
-
- skip_fsfstatus:
-       /*
-        * always call specific handlers to give them a chance to do
-        * something meaningful even in error cases
-        */
-       zfcp_fsf_req_dispatch(fsf_req);
+       struct fsf_status_read_buffer *sr_buf = req->data;
+       struct zfcp_adapter *adapter = req->adapter;
+       struct zfcp_port *port;
+       int d_id = sr_buf->d_id & ZFCP_DID_MASK;
+       unsigned long flags;
 
-       return retval;
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       list_for_each_entry(port, &adapter->port_list_head, list)
+               if (port->d_id == d_id) {
+                       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+                       switch (sr_buf->status_subtype) {
+                       case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
+                               zfcp_erp_port_reopen(port, 0, 101, req);
+                               break;
+                       case FSF_STATUS_READ_SUB_ERROR_PORT:
+                               zfcp_erp_port_shutdown(port, 0, 122, req);
+                               break;
+                       }
+                       return;
+               }
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
 
-/*
- * function:   zfcp_fsf_fsfstatus_qual_eval
- *
- * purpose:    evaluates FSF status-qualifier of completed FSF request
- *             and acts accordingly
- *
- * returns:
- */
-static int
-zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req)
 {
-       int retval = 0;
-
-       switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
-       case FSF_SQ_FCP_RSP_AVAILABLE:
-               break;
-       case FSF_SQ_RETRY_IF_POSSIBLE:
-               /* The SCSI-stack may now issue retries or escalate */
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-       case FSF_SQ_COMMAND_ABORTED:
-               /* Carry the aborted state on to upper layer */
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-       case FSF_SQ_NO_RECOM:
-               ZFCP_LOG_NORMAL("bug: No recommendation could be given for a "
-                               "problem on the adapter %s "
-                               "Stopping all operations on this adapter. ",
-                               zfcp_get_busid_by_adapter(fsf_req->adapter));
-               zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 121, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-       case FSF_SQ_ULP_PROGRAMMING_ERROR:
-               ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
-                               "(adapter %s)\n",
-                               zfcp_get_busid_by_adapter(fsf_req->adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-       case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-       case FSF_SQ_NO_RETRY_POSSIBLE:
-       case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-               /* dealt with in the respective functions */
-               break;
-       default:
-               ZFCP_LOG_NORMAL("bug: Additional status info could "
-                               "not be interpreted properly.\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
-                             (char *) &fsf_req->qtcb->header.fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-       }
-
-       return retval;
+       struct zfcp_adapter *adapter = req->adapter;
+       struct fsf_status_read_buffer *sr_buf = req->data;
+       struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
+
+       dev_warn(&adapter->ccw_device->dev,
+                "Warning: bit error threshold data "
+                "received for the adapter: "
+                "link failures = %i, loss of sync errors = %i, "
+                "loss of signal errors = %i, "
+                "primitive sequence errors = %i, "
+                "invalid transmission word errors = %i, "
+                "CRC errors = %i).\n",
+                err->link_failure_error_count,
+                err->loss_of_sync_error_count,
+                err->loss_of_signal_error_count,
+                err->primitive_sequence_error_count,
+                err->invalid_transmission_word_error_count,
+                err->crc_error_count);
+       dev_warn(&adapter->ccw_device->dev,
+                "Additional bit error threshold data of the adapter: "
+                "primitive sequence event time-outs = %i, "
+                "elastic buffer overrun errors = %i, "
+                "advertised receive buffer-to-buffer credit = %i, "
+                "current receice buffer-to-buffer credit = %i, "
+                "advertised transmit buffer-to-buffer credit = %i, "
+                "current transmit buffer-to-buffer credit = %i).\n",
+                err->primitive_sequence_event_timeout_count,
+                err->elastic_buffer_overrun_error_count,
+                err->advertised_receive_b2b_credit,
+                err->current_receive_b2b_credit,
+                err->advertised_transmit_b2b_credit,
+                err->current_transmit_b2b_credit);
 }
 
-/**
- * zfcp_fsf_link_down_info_eval - evaluate link down information block
- */
-static void
-zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *fsf_req, u8 id,
-                            struct fsf_link_down_info *link_down)
+static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
+                                        struct fsf_link_down_info *link_down)
 {
-       struct zfcp_adapter *adapter = fsf_req->adapter;
+       struct zfcp_adapter *adapter = req->adapter;
 
-       if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
-                            &adapter->status))
+       if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
                return;
 
        atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 
-       if (link_down == NULL)
+       if (!link_down)
                goto out;
 
        switch (link_down->error_code) {
        case FSF_PSQ_LINK_NO_LIGHT:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(no light detected)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: no light detected.\n");
                break;
        case FSF_PSQ_LINK_WRAP_PLUG:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(wrap plug detected)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: wrap plug detected.\n");
                break;
        case FSF_PSQ_LINK_NO_FCP:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(adjacent node on link does not support FCP)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: "
+                        "adjacent node on link does not support FCP.\n");
                break;
        case FSF_PSQ_LINK_FIRMWARE_UPDATE:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(firmware update in progress)\n",
-                               zfcp_get_busid_by_adapter(adapter));
-                       break;
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: "
+                        "firmware update in progress.\n");
+               break;
        case FSF_PSQ_LINK_INVALID_WWPN:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(duplicate or invalid WWPN detected)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: "
+                        "duplicate or invalid WWPN detected.\n");
                break;
        case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(no support for NPIV by Fabric)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: "
+                        "no support for NPIV by Fabric.\n");
                break;
        case FSF_PSQ_LINK_NO_FCP_RESOURCES:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(out of resource in FCP daughtercard)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: "
+                        "out of resource in FCP daughtercard.\n");
                break;
        case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(out of resource in Fabric)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: "
+                        "out of resource in Fabric.\n");
                break;
        case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(unable to Fabric login)\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link is down: "
+                        "unable to login to Fabric.\n");
                break;
        case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
-               ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "WWPN assignment file corrupted on adapter.\n");
                break;
        case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
-               ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "Mode table corrupted on adapter.\n");
                break;
        case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
-               ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n",
-                               zfcp_get_busid_by_adapter(adapter));
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "No WWPN for assignment table on adapter.\n");
                break;
        default:
-               ZFCP_LOG_NORMAL("The local link to adapter %s is down "
-                               "(warning: unknown reason code %d)\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               link_down->error_code);
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The local link to adapter is down.\n");
        }
+out:
+       zfcp_erp_adapter_failed(adapter, id, req);
+}
 
-       if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
-               ZFCP_LOG_DEBUG("Debug information to link down: "
-                              "primary_status=0x%02x "
-                              "ioerr_code=0x%02x "
-                              "action_code=0x%02x "
-                              "reason_code=0x%02x "
-                              "explanation_code=0x%02x "
-                              "vendor_specific_code=0x%02x\n",
-                               link_down->primary_status,
-                               link_down->ioerr_code,
-                               link_down->action_code,
-                               link_down->reason_code,
-                               link_down->explanation_code,
-                               link_down->vendor_specific_code);
-
- out:
-       zfcp_erp_adapter_failed(adapter, id, fsf_req);
+static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
+{
+       struct zfcp_adapter *adapter = req->adapter;
+       struct fsf_status_read_buffer *sr_buf = req->data;
+       struct fsf_link_down_info *ldi =
+               (struct fsf_link_down_info *) &sr_buf->payload;
+
+       switch (sr_buf->status_subtype) {
+       case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
+               dev_warn(&adapter->ccw_device->dev,
+                        "Physical link is down.\n");
+               zfcp_fsf_link_down_info_eval(req, 38, ldi);
+               break;
+       case FSF_STATUS_READ_SUB_FDISC_FAILED:
+               dev_warn(&adapter->ccw_device->dev,
+                        "Local link is down "
+                        "due to failed FDISC login.\n");
+               zfcp_fsf_link_down_info_eval(req, 39, ldi);
+               break;
+       case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
+               dev_warn(&adapter->ccw_device->dev,
+                        "Local link is down "
+                        "due to firmware update on adapter.\n");
+               zfcp_fsf_link_down_info_eval(req, 40, NULL);
+       };
 }
 
-/*
- * function:   zfcp_fsf_req_dispatch
- *
- * purpose:    calls the appropriate command specific handler
- *
- * returns:
- */
-static int
-zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 {
-       struct zfcp_erp_action *erp_action = fsf_req->erp_action;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       int retval = 0;
+       struct zfcp_adapter *adapter = req->adapter;
+       struct fsf_status_read_buffer *sr_buf = req->data;
 
+       if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+               zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
+               mempool_free(sr_buf, adapter->pool.data_status_read);
+               zfcp_fsf_req_free(req);
+               return;
+       }
 
-       switch (fsf_req->fsf_command) {
+       zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
 
-       case FSF_QTCB_FCP_CMND:
-               zfcp_fsf_send_fcp_command_handler(fsf_req);
+       switch (sr_buf->status_type) {
+       case FSF_STATUS_READ_PORT_CLOSED:
+               zfcp_fsf_status_read_port_closed(req);
                break;
-
-       case FSF_QTCB_ABORT_FCP_CMND:
-               zfcp_fsf_abort_fcp_command_handler(fsf_req);
+       case FSF_STATUS_READ_INCOMING_ELS:
+               zfcp_fc_incoming_els(req);
                break;
-
-       case FSF_QTCB_SEND_GENERIC:
-               zfcp_fsf_send_ct_handler(fsf_req);
+       case FSF_STATUS_READ_SENSE_DATA_AVAIL:
                break;
-
-       case FSF_QTCB_OPEN_PORT_WITH_DID:
-               zfcp_fsf_open_port_handler(fsf_req);
+       case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
+               zfcp_fsf_bit_error_threshold(req);
                break;
-
-       case FSF_QTCB_OPEN_LUN:
-               zfcp_fsf_open_unit_handler(fsf_req);
+       case FSF_STATUS_READ_LINK_DOWN:
+               zfcp_fsf_status_read_link_down(req);
                break;
-
-       case FSF_QTCB_CLOSE_LUN:
-               zfcp_fsf_close_unit_handler(fsf_req);
+       case FSF_STATUS_READ_LINK_UP:
+               dev_info(&adapter->ccw_device->dev,
+                        "Local link was replugged.\n");
+               /* All ports should be marked as ready to run again */
+               zfcp_erp_modify_adapter_status(adapter, 30, NULL,
+                                              ZFCP_STATUS_COMMON_RUNNING,
+                                              ZFCP_SET);
+               zfcp_erp_adapter_reopen(adapter,
+                                       ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+                                       ZFCP_STATUS_COMMON_ERP_FAILED,
+                                       102, req);
                break;
-
-       case FSF_QTCB_CLOSE_PORT:
-               zfcp_fsf_close_port_handler(fsf_req);
+       case FSF_STATUS_READ_NOTIFICATION_LOST:
+               if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
+                       zfcp_erp_adapter_access_changed(adapter, 135, req);
+               if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
+                       schedule_work(&adapter->scan_work);
                break;
-
-       case FSF_QTCB_CLOSE_PHYSICAL_PORT:
-               zfcp_fsf_close_physical_port_handler(fsf_req);
+       case FSF_STATUS_READ_CFDC_UPDATED:
+               zfcp_erp_adapter_access_changed(adapter, 136, req);
                break;
-
-       case FSF_QTCB_EXCHANGE_CONFIG_DATA:
-               zfcp_fsf_exchange_config_data_handler(fsf_req);
+       case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
+               adapter->adapter_features = sr_buf->payload.word[0];
                break;
+       }
 
-       case FSF_QTCB_EXCHANGE_PORT_DATA:
-               zfcp_fsf_exchange_port_data_handler(fsf_req);
-               break;
+       mempool_free(sr_buf, adapter->pool.data_status_read);
+       zfcp_fsf_req_free(req);
 
-       case FSF_QTCB_SEND_ELS:
-               zfcp_fsf_send_els_handler(fsf_req);
-               break;
+       atomic_inc(&adapter->stat_miss);
+       schedule_work(&adapter->stat_work);
+}
 
-       case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
-               zfcp_fsf_control_file_handler(fsf_req);
+static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
+{
+       switch (req->qtcb->header.fsf_status_qual.word[0]) {
+       case FSF_SQ_FCP_RSP_AVAILABLE:
+       case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+       case FSF_SQ_NO_RETRY_POSSIBLE:
+       case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+               return;
+       case FSF_SQ_COMMAND_ABORTED:
+               req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
                break;
-
-       case FSF_QTCB_UPLOAD_CONTROL_FILE:
-               zfcp_fsf_control_file_handler(fsf_req);
+       case FSF_SQ_NO_RECOM:
+               dev_err(&req->adapter->ccw_device->dev,
+                       "No recommendation could be given for a "
+                       "problem on the adapter.\n");
+               zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
                break;
-
-       default:
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
-                               "not supported by the adapter %s\n",
-                               zfcp_get_busid_by_adapter(adapter));
-               if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
-                       ZFCP_LOG_NORMAL
-                           ("bug: Command issued by the device driver differs "
-                            "from the command returned by the adapter %s "
-                            "(debug info 0x%x, 0x%x).\n",
-                            zfcp_get_busid_by_adapter(adapter),
-                            fsf_req->fsf_command,
-                            fsf_req->qtcb->header.fsf_command);
        }
-
-       if (!erp_action)
-               return retval;
-
-       zfcp_erp_async_handler(erp_action, 0);
-
-       return retval;
+       /* all non-return stats set FSFREQ_ERROR*/
+       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
 
-/*
- * function:    zfcp_fsf_status_read
- *
- * purpose:    initiates a Status Read command at the specified adapter
- *
- * returns:
- */
-int
-zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
+static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
 {
-       struct zfcp_fsf_req *fsf_req;
-       struct fsf_status_read_buffer *status_buffer;
-       unsigned long lock_flags;
-       volatile struct qdio_buffer_element *sbale;
-       int retval = 0;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
-                                    req_flags | ZFCP_REQ_NO_QTCB,
-                                    adapter->pool.fsf_req_status_read,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create unsolicited status "
-                             "buffer for adapter %s.\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               goto failed_req_create;
-       }
-
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-        sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
-        sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
-        fsf_req->sbale_curr = 2;
+       if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+               return;
 
-       status_buffer =
-               mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
-       if (!status_buffer) {
-               ZFCP_LOG_NORMAL("bug: could not get some buffer\n");
-               goto failed_buf;
+       switch (req->qtcb->header.fsf_status) {
+       case FSF_UNKNOWN_COMMAND:
+               dev_err(&req->adapter->ccw_device->dev,
+                       "Command issued by the device driver (0x%x) is "
+                       "not known by the adapter.\n",
+                       req->qtcb->header.fsf_command);
+               zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               break;
+       case FSF_ADAPTER_STATUS_AVAILABLE:
+               zfcp_fsf_fsfstatus_qual_eval(req);
+               break;
        }
-       memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
-       fsf_req->data = (unsigned long) status_buffer;
-
-       /* insert pointer to respective buffer */
-       sbale = zfcp_qdio_sbale_curr(fsf_req);
-       sbale->addr = (void *) status_buffer;
-       sbale->length = sizeof(struct fsf_status_read_buffer);
+}
 
-       retval = zfcp_fsf_req_send(fsf_req);
-       if (retval) {
-               ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status "
-                              "environment.\n");
-               goto failed_req_send;
-       }
+static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
+{
+       struct zfcp_adapter *adapter = req->adapter;
+       struct fsf_qtcb *qtcb = req->qtcb;
+       union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
 
-       ZFCP_LOG_TRACE("Status Read request initiated (adapter%s)\n",
-                      zfcp_get_busid_by_adapter(adapter));
-       goto out;
+       zfcp_hba_dbf_event_fsf_response(req);
 
- failed_req_send:
-       mempool_free(status_buffer, adapter->pool.data_status_read);
+       if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                       ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
+               return;
+       }
 
- failed_buf:
-       zfcp_fsf_req_free(fsf_req);
- failed_req_create:
-       zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
- out:
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
-       return retval;
+       switch (qtcb->prefix.prot_status) {
+       case FSF_PROT_GOOD:
+       case FSF_PROT_FSF_STATUS_PRESENTED:
+               return;
+       case FSF_PROT_QTCB_VERSION_ERROR:
+               dev_err(&adapter->ccw_device->dev,
+                       "The QTCB version requested by zfcp (0x%x) is not "
+                       "supported by the FCP adapter (lowest supported "
+                       "0x%x, highest supported 0x%x).\n",
+                       FSF_QTCB_CURRENT_VERSION, psq->word[0],
+                       psq->word[1]);
+               zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
+               break;
+       case FSF_PROT_ERROR_STATE:
+       case FSF_PROT_SEQ_NUMB_ERROR:
+               zfcp_erp_adapter_reopen(adapter, 0, 98, req);
+               req->status |= ZFCP_STATUS_FSFREQ_RETRY;
+               break;
+       case FSF_PROT_UNSUPP_QTCB_TYPE:
+               dev_err(&adapter->ccw_device->dev,
+                       "Packet header type used by the device driver is "
+                       "incompatible with that used on the adapter.\n");
+               zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
+               break;
+       case FSF_PROT_HOST_CONNECTION_INITIALIZING:
+               atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+                               &adapter->status);
+               break;
+       case FSF_PROT_DUPLICATE_REQUEST_ID:
+               dev_err(&adapter->ccw_device->dev,
+                       "The request identifier 0x%Lx is ambiguous.\n",
+                       (unsigned long long)qtcb->bottom.support.req_handle);
+               zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
+               break;
+       case FSF_PROT_LINK_DOWN:
+               zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info);
+               /* FIXME: reopening adapter now? better wait for link up */
+               zfcp_erp_adapter_reopen(adapter, 0, 79, req);
+               break;
+       case FSF_PROT_REEST_QUEUE:
+               /* All ports should be marked as ready to run again */
+               zfcp_erp_modify_adapter_status(adapter, 28, NULL,
+                                              ZFCP_STATUS_COMMON_RUNNING,
+                                              ZFCP_SET);
+               zfcp_erp_adapter_reopen(adapter,
+                                       ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+                                       ZFCP_STATUS_COMMON_ERP_FAILED, 99, req);
+               break;
+       default:
+               dev_err(&adapter->ccw_device->dev,
+                       "Transfer protocol status information"
+                       "provided by the adapter (0x%x) "
+                       "is not compatible with the device driver.\n",
+                       qtcb->prefix.prot_status);
+               zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
+       }
+       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
 
-static int
-zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
+/**
+ * zfcp_fsf_req_complete - process completion of a FSF request
+ * @fsf_req: The FSF request that has been completed.
+ *
+ * When a request has been completed either from the FCP adapter,
+ * or it has been dismissed due to a queue shutdown, this function
+ * is called to process the completion status and trigger further
+ * events related to the FSF request.
+ */
+void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
 {
-       struct fsf_status_read_buffer *status_buffer;
-       struct zfcp_adapter *adapter;
-       struct zfcp_port *port;
-       unsigned long flags;
+       if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
+               zfcp_fsf_status_read_handler(req);
+               return;
+       }
 
-       status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
-       adapter = fsf_req->adapter;
+       del_timer(&req->timer);
+       zfcp_fsf_protstatus_eval(req);
+       zfcp_fsf_fsfstatus_eval(req);
+       req->handler(req);
 
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       list_for_each_entry(port, &adapter->port_list_head, list)
-           if (port->d_id == (status_buffer->d_id & ZFCP_DID_MASK))
-               break;
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+       if (req->erp_action)
+               zfcp_erp_notify(req->erp_action, 0);
+       req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
 
-       if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) {
-               ZFCP_LOG_NORMAL("bug: Reopen port indication received for "
-                               "nonexisting port with d_id 0x%06x on "
-                               "adapter %s. Ignored.\n",
-                               status_buffer->d_id & ZFCP_DID_MASK,
-                               zfcp_get_busid_by_adapter(adapter));
-               goto out;
-       }
+       if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
+               zfcp_fsf_req_free(req);
+       else
+       /* notify initiator waiting for the requests completion */
+       /*
+        * FIXME: Race! We must not access fsf_req here as it might have been
+        * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
+        * flag. It's an improbable case. But, we have the same paranoia for
+        * the cleanup flag already.
+        * Might better be handled using complete()?
+        * (setting the flag and doing wakeup ought to be atomic
+        *  with regard to checking the flag as long as waitqueue is
+        *  part of the to be released structure)
+        */
+               wake_up(&req->completion_wq);
+}
+
+static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+{
+       struct fsf_qtcb_bottom_config *bottom;
+       struct zfcp_adapter *adapter = req->adapter;
+       struct Scsi_Host *shost = adapter->scsi_host;
 
-       switch (status_buffer->status_subtype) {
+       bottom = &req->qtcb->bottom.config;
 
-       case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
-               zfcp_erp_port_reopen(port, 0, 101, fsf_req);
-               break;
+       if (req->data)
+               memcpy(req->data, bottom, sizeof(*bottom));
 
-       case FSF_STATUS_READ_SUB_ERROR_PORT:
-               zfcp_erp_port_shutdown(port, 0, 122, fsf_req);
-               break;
+       fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
+       fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
+       fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
+       fc_host_speed(shost) = bottom->fc_link_speed;
+       fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+       adapter->hydra_version = bottom->adapter_type;
+       adapter->timer_ticks = bottom->timer_interval;
 
+       if (fc_host_permanent_port_name(shost) == -1)
+               fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+
+       switch (bottom->fc_topology) {
+       case FSF_TOPO_P2P:
+               adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
+               adapter->peer_wwpn = bottom->plogi_payload.wwpn;
+               adapter->peer_wwnn = bottom->plogi_payload.wwnn;
+               fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+               if (req->erp_action)
+                       dev_info(&adapter->ccw_device->dev,
+                                "Point-to-Point fibrechannel "
+                                "configuration detected.\n");
+               break;
+       case FSF_TOPO_FABRIC:
+               fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+               if (req->erp_action)
+                       dev_info(&adapter->ccw_device->dev,
+                                "Switched fabric fibrechannel "
+                                "network detected.\n");
+               break;
+       case FSF_TOPO_AL:
+               fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+               dev_err(&adapter->ccw_device->dev,
+                       "Unsupported arbitrated loop fibrechannel "
+                       "topology detected, shutting down "
+                       "adapter.\n");
+               zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
+               return -EIO;
        default:
-               ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
-                               "for a reopen indication on port with "
-                               "d_id 0x%06x on the adapter %s. "
-                               "Ignored. (debug info 0x%x)\n",
-                               status_buffer->d_id,
-                               zfcp_get_busid_by_adapter(adapter),
-                               status_buffer->status_subtype);
+               fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+               dev_err(&adapter->ccw_device->dev,
+                       "The fibrechannel topology reported by the"
+                       " adapter is not known by the zfcp driver,"
+                       " shutting down adapter.\n");
+               zfcp_erp_adapter_shutdown(adapter, 0, 128, req);
+               return -EIO;
        }
- out:
+
        return 0;
 }
 
-/*
- * function:    zfcp_fsf_status_read_handler
- *
- * purpose:    is called for finished Open Port command
- *
- * returns:
- */
-static int
-zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 {
-       int retval = 0;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct fsf_status_read_buffer *status_buffer =
-               (struct fsf_status_read_buffer *) fsf_req->data;
-       struct fsf_bit_error_payload *fsf_bit_error;
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
-               zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
-               mempool_free(status_buffer, adapter->pool.data_status_read);
-               zfcp_fsf_req_free(fsf_req);
-               goto out;
-       }
+       struct zfcp_adapter *adapter = req->adapter;
+       struct fsf_qtcb *qtcb = req->qtcb;
+       struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
+       struct Scsi_Host *shost = adapter->scsi_host;
 
-       zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer);
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+               return;
 
-       switch (status_buffer->status_type) {
+       adapter->fsf_lic_version = bottom->lic_version;
+       adapter->adapter_features = bottom->adapter_features;
+       adapter->connection_features = bottom->connection_features;
+       adapter->peer_wwpn = 0;
+       adapter->peer_wwnn = 0;
+       adapter->peer_d_id = 0;
 
-       case FSF_STATUS_READ_PORT_CLOSED:
-               zfcp_fsf_status_read_port_closed(fsf_req);
-               break;
+       switch (qtcb->header.fsf_status) {
+       case FSF_GOOD:
+               if (zfcp_fsf_exchange_config_evaluate(req))
+                       return;
 
-       case FSF_STATUS_READ_INCOMING_ELS:
-               zfcp_fsf_incoming_els(fsf_req);
+               if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
+                       dev_err(&adapter->ccw_device->dev,
+                               "Maximum QTCB size (%d bytes) allowed by "
+                               "the adapter is lower than the minimum "
+                               "required by the driver (%ld bytes).\n",
+                               bottom->max_qtcb_size,
+                               sizeof(struct fsf_qtcb));
+                       zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
+                       return;
+               }
+               atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+                               &adapter->status);
                break;
+       case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+               fc_host_node_name(shost) = 0;
+               fc_host_port_name(shost) = 0;
+               fc_host_port_id(shost) = 0;
+               fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+               fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+               adapter->hydra_version = 0;
 
-       case FSF_STATUS_READ_SENSE_DATA_AVAIL:
-               ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               break;
+               atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+                               &adapter->status);
 
-       case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-               fsf_bit_error = (struct fsf_bit_error_payload *)
-                       status_buffer->payload;
-               ZFCP_LOG_NORMAL("Warning: bit error threshold data "
-                   "received (adapter %s, "
-                   "link failures = %i, loss of sync errors = %i, "
-                   "loss of signal errors = %i, "
-                   "primitive sequence errors = %i, "
-                   "invalid transmission word errors = %i, "
-                   "CRC errors = %i)\n",
-                   zfcp_get_busid_by_adapter(adapter),
-                   fsf_bit_error->link_failure_error_count,
-                   fsf_bit_error->loss_of_sync_error_count,
-                   fsf_bit_error->loss_of_signal_error_count,
-                   fsf_bit_error->primitive_sequence_error_count,
-                   fsf_bit_error->invalid_transmission_word_error_count,
-                   fsf_bit_error->crc_error_count);
-               ZFCP_LOG_INFO("Additional bit error threshold data "
-                   "(adapter %s, "
-                   "primitive sequence event time-outs = %i, "
-                   "elastic buffer overrun errors = %i, "
-                   "advertised receive buffer-to-buffer credit = %i, "
-                   "current receice buffer-to-buffer credit = %i, "
-                   "advertised transmit buffer-to-buffer credit = %i, "
-                   "current transmit buffer-to-buffer credit = %i)\n",
-                   zfcp_get_busid_by_adapter(adapter),
-                   fsf_bit_error->primitive_sequence_event_timeout_count,
-                   fsf_bit_error->elastic_buffer_overrun_error_count,
-                   fsf_bit_error->advertised_receive_b2b_credit,
-                   fsf_bit_error->current_receive_b2b_credit,
-                   fsf_bit_error->advertised_transmit_b2b_credit,
-                   fsf_bit_error->current_transmit_b2b_credit);
+               zfcp_fsf_link_down_info_eval(req, 42,
+                       &qtcb->header.fsf_status_qual.link_down_info);
                break;
+       default:
+               zfcp_erp_adapter_shutdown(adapter, 0, 130, req);
+               return;
+       }
 
-       case FSF_STATUS_READ_LINK_DOWN:
-               switch (status_buffer->status_subtype) {
-               case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
-                       ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       zfcp_fsf_link_down_info_eval(fsf_req, 38,
-                               (struct fsf_link_down_info *)
-                               &status_buffer->payload);
-                       break;
-               case FSF_STATUS_READ_SUB_FDISC_FAILED:
-                       ZFCP_LOG_INFO("Local link to adapter %s is down "
-                                     "due to failed FDISC login\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       zfcp_fsf_link_down_info_eval(fsf_req, 39,
-                               (struct fsf_link_down_info *)
-                               &status_buffer->payload);
-                       break;
-               case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
-                       ZFCP_LOG_INFO("Local link to adapter %s is down "
-                                     "due to firmware update on adapter\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       zfcp_fsf_link_down_info_eval(fsf_req, 40, NULL);
-                       break;
-               default:
-                       ZFCP_LOG_INFO("Local link to adapter %s is down "
-                                     "due to unknown reason\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       zfcp_fsf_link_down_info_eval(fsf_req, 41, NULL);
-               };
-               break;
+       if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+               adapter->hardware_version = bottom->hardware_version;
+               memcpy(fc_host_serial_number(shost), bottom->serial_number,
+                      min(FC_SERIAL_NUMBER_SIZE, 17));
+               EBCASC(fc_host_serial_number(shost),
+                      min(FC_SERIAL_NUMBER_SIZE, 17));
+       }
 
-       case FSF_STATUS_READ_LINK_UP:
-               ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. "
-                               "Restarting operations on this adapter\n",
-                               zfcp_get_busid_by_adapter(adapter));
-               /* All ports should be marked as ready to run again */
-               zfcp_erp_modify_adapter_status(adapter, 30, NULL,
-                                              ZFCP_STATUS_COMMON_RUNNING,
-                                              ZFCP_SET);
-               zfcp_erp_adapter_reopen(adapter,
-                                       ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
-                                       | ZFCP_STATUS_COMMON_ERP_FAILED,
-                                       102, fsf_req);
-               break;
+       if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
+               dev_err(&adapter->ccw_device->dev,
+                       "The adapter only supports newer control block "
+                       "versions, try updated device driver.\n");
+               zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
+               return;
+       }
+       if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
+               dev_err(&adapter->ccw_device->dev,
+                       "The adapter only supports older control block "
+                       "versions, consider a microcode upgrade.\n");
+               zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
+       }
+}
 
-       case FSF_STATUS_READ_NOTIFICATION_LOST:
-               ZFCP_LOG_NORMAL("Unsolicited status notification(s) lost: "
-                               "adapter %s%s%s%s%s%s%s%s%s\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_INCOMING_ELS) ?
-                                       ", incoming ELS" : "",
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_SENSE_DATA) ?
-                                       ", sense data" : "",
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_LINK_STATUS) ?
-                                       ", link status change" : "",
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_PORT_CLOSED) ?
-                                       ", port close" : "",
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD) ?
-                                       ", bit error exception" : "",
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_ACT_UPDATED) ?
-                                       ", ACT update" : "",
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_ACT_HARDENED) ?
-                                       ", ACT hardening" : "",
-                               (status_buffer->status_subtype &
-                                       FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT) ?
-                                       ", adapter feature change" : "");
-
-               if (status_buffer->status_subtype &
-                   FSF_STATUS_READ_SUB_ACT_UPDATED)
-                       zfcp_erp_adapter_access_changed(adapter, 135, fsf_req);
-               break;
+static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
+{
+       struct zfcp_adapter *adapter = req->adapter;
+       struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
+       struct Scsi_Host *shost = adapter->scsi_host;
 
-       case FSF_STATUS_READ_CFDC_UPDATED:
-               ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               zfcp_erp_adapter_access_changed(adapter, 136, fsf_req);
-               break;
+       if (req->data)
+               memcpy(req->data, bottom, sizeof(*bottom));
 
-       case FSF_STATUS_READ_CFDC_HARDENED:
-               switch (status_buffer->status_subtype) {
-               case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
-                       ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       break;
-               case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
-                       ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
-                                     "to the secondary SE\n",
-                               zfcp_get_busid_by_adapter(adapter));
-                       break;
-               default:
-                       ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-               }
-               break;
+       if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+               fc_host_permanent_port_name(shost) = bottom->wwpn;
+       else
+               fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+       fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+       fc_host_supported_speeds(shost) = bottom->supported_speed;
+}
 
-       case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
-               ZFCP_LOG_INFO("List of supported features on adapter %s has "
-                             "been changed from 0x%08X to 0x%08X\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             *(u32*) (status_buffer->payload + 4),
-                             *(u32*) (status_buffer->payload));
-               adapter->adapter_features = *(u32*) status_buffer->payload;
-               break;
+static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
+{
+       struct zfcp_adapter *adapter = req->adapter;
+       struct fsf_qtcb *qtcb = req->qtcb;
 
-       default:
-               ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown "
-                               "type was received (debug info 0x%x)\n",
-                               status_buffer->status_type);
-               ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
-                              status_buffer);
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) status_buffer,
-                             sizeof (struct fsf_status_read_buffer));
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+               return;
+
+       switch (qtcb->header.fsf_status) {
+       case FSF_GOOD:
+               zfcp_fsf_exchange_port_evaluate(req);
+               atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+               break;
+       case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+               zfcp_fsf_exchange_port_evaluate(req);
+               atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+               zfcp_fsf_link_down_info_eval(req, 43,
+                       &qtcb->header.fsf_status_qual.link_down_info);
                break;
        }
-       mempool_free(status_buffer, adapter->pool.data_status_read);
-       zfcp_fsf_req_free(fsf_req);
-       /*
-        * recycle buffer and start new request repeat until outbound
-        * queue is empty or adapter shutdown is requested
-        */
-       /*
-        * FIXME(qdio):
-        * we may wait in the req_create for 5s during shutdown, so
-        * qdio_cleanup will have to wait at least that long before returning
-        * with failure to allow us a proper cleanup under all circumstances
-        */
-       /*
-        * FIXME:
-        * allocation failure possible? (Is this code needed?)
-        */
-       retval = zfcp_fsf_status_read(adapter, 0);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("Failed to create unsolicited status read "
-                             "request for the adapter %s.\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               /* temporary fix to avoid status read buffer shortage */
-               adapter->status_read_failed++;
-               if ((ZFCP_STATUS_READS_RECOM - adapter->status_read_failed)
-                   < ZFCP_STATUS_READ_FAILED_THRESHOLD) {
-                       ZFCP_LOG_INFO("restart adapter %s due to status read "
-                                     "buffer shortage\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                       zfcp_erp_adapter_reopen(adapter, 0, 103, fsf_req);
-               }
-       }
- out:
-       return retval;
 }
 
-/*
- * function:    zfcp_fsf_abort_fcp_command
- *
- * purpose:    tells FSF to abort a running SCSI command
- *
- * returns:    address of initiated FSF request
- *             NULL - request could not be initiated
- *
- * FIXME(design): should be watched by a timeout !!!
- * FIXME(design) shouldn't this be modified to return an int
- *               also...don't know how though
- */
-struct zfcp_fsf_req *
-zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
-                          struct zfcp_adapter *adapter,
-                          struct zfcp_unit *unit, int req_flags)
+static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue)
+{
+       spin_lock(&queue->lock);
+       if (atomic_read(&queue->count))
+               return 1;
+       spin_unlock(&queue->lock);
+       return 0;
+}
+
+static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
+{
+       long ret;
+       struct zfcp_qdio_queue *req_q = &adapter->req_q;
+
+       spin_unlock(&req_q->lock);
+       ret = wait_event_interruptible_timeout(adapter->request_wq,
+                                       zfcp_fsf_sbal_check(req_q), 5 * HZ);
+       if (ret > 0)
+               return 0;
+
+       spin_lock(&req_q->lock);
+       return -EIO;
+}
+
+static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
+{
+       struct zfcp_fsf_req *req;
+       req = mempool_alloc(pool, GFP_ATOMIC);
+       if (!req)
+               return NULL;
+       memset(req, 0, sizeof(*req));
+       return req;
+}
+
+static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool)
+{
+       struct zfcp_fsf_req_qtcb *qtcb;
+
+       if (likely(pool))
+               qtcb = mempool_alloc(pool, GFP_ATOMIC);
+       else
+               qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
+                                       GFP_ATOMIC);
+       if (unlikely(!qtcb))
+               return NULL;
+
+       memset(qtcb, 0, sizeof(*qtcb));
+       qtcb->fsf_req.qtcb = &qtcb->qtcb;
+       qtcb->fsf_req.pool = pool;
+
+       return &qtcb->fsf_req;
+}
+
+static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
+                                               u32 fsf_cmd, int req_flags,
+                                               mempool_t *pool)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req = NULL;
-       unsigned long lock_flags;
-       int retval = 0;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
-                                    req_flags, adapter->pool.fsf_req_abort,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Failed to create an abort command "
-                             "request for lun 0x%016Lx on port 0x%016Lx "
-                             "on adapter %s.\n",
-                             unit->fcp_lun,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_adapter(adapter));
-               goto out;
+
+       struct zfcp_fsf_req *req;
+       struct zfcp_qdio_queue *req_q = &adapter->req_q;
+
+       if (req_flags & ZFCP_REQ_NO_QTCB)
+               req = zfcp_fsf_alloc_noqtcb(pool);
+       else
+               req = zfcp_fsf_alloc_qtcb(pool);
+
+       if (unlikely(!req))
+               return ERR_PTR(-EIO);
+
+       if (adapter->req_no == 0)
+               adapter->req_no++;
+
+       INIT_LIST_HEAD(&req->list);
+       init_timer(&req->timer);
+       init_waitqueue_head(&req->completion_wq);
+
+       req->adapter = adapter;
+       req->fsf_command = fsf_cmd;
+       req->req_id = adapter->req_no++;
+       req->sbal_number = 1;
+       req->sbal_first = req_q->first;
+       req->sbal_last = req_q->first;
+       req->sbale_curr = 1;
+
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].addr = (void *) req->req_id;
+       sbale[0].flags |= SBAL_FLAGS0_COMMAND;
+
+       if (likely(req->qtcb)) {
+               req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
+               req->qtcb->prefix.req_id = req->req_id;
+               req->qtcb->prefix.ulp_info = 26;
+               req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
+               req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
+               req->qtcb->header.req_handle = req->req_id;
+               req->qtcb->header.fsf_command = req->fsf_command;
+               req->seq_no = adapter->fsf_req_seq_no;
+               req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
+               sbale[1].addr = (void *) req->qtcb;
+               sbale[1].length = sizeof(struct fsf_qtcb);
        }
 
-       if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                       &unit->status)))
-               goto unit_blocked;
+       if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
+               zfcp_fsf_req_free(req);
+               return ERR_PTR(-EIO);
+       }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-        sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-        sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
+               req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
 
-       fsf_req->data = (unsigned long) unit;
+       return req;
+}
 
-       /* set handles of unit and its parent port in QTCB */
-       fsf_req->qtcb->header.lun_handle = unit->handle;
-       fsf_req->qtcb->header.port_handle = unit->port->handle;
+static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+{
+       struct zfcp_adapter *adapter = req->adapter;
+       struct zfcp_qdio_queue *req_q = &adapter->req_q;
+       int idx;
 
-       /* set handle of request which should be aborted */
-       fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id;
+       /* put allocated FSF request into hash table */
+       spin_lock(&adapter->req_list_lock);
+       idx = zfcp_reqlist_hash(req->req_id);
+       list_add_tail(&req->list, &adapter->req_list[idx]);
+       spin_unlock(&adapter->req_list_lock);
 
-       zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
-       retval = zfcp_fsf_req_send(fsf_req);
-       if (!retval)
-               goto out;
+       req->issued = get_clock();
+       if (zfcp_qdio_send(req)) {
+               /* Queues are down..... */
+               del_timer(&req->timer);
+               spin_lock(&adapter->req_list_lock);
+               zfcp_reqlist_remove(adapter, req);
+               spin_unlock(&adapter->req_list_lock);
+               /* undo changes in request queue made for this request */
+               atomic_add(req->sbal_number, &req_q->count);
+               req_q->first -= req->sbal_number;
+               req_q->first += QDIO_MAX_BUFFERS_PER_Q;
+               req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
+               zfcp_erp_adapter_reopen(adapter, 0, 116, req);
+               return -EIO;
+       }
 
- unit_blocked:
-               zfcp_fsf_req_free(fsf_req);
-               fsf_req = NULL;
+       /* Don't increase for unsolicited status */
+       if (req->qtcb)
+               adapter->fsf_req_seq_no++;
 
- out:
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
-       return fsf_req;
+       return 0;
 }
 
-/*
- * function:    zfcp_fsf_abort_fcp_command_handler
- *
- * purpose:    is called for finished Abort FCP Command request
- *
- * returns:
+/**
+ * zfcp_fsf_status_read - send status read request
+ * @adapter: pointer to struct zfcp_adapter
+ * @req_flags: request flags
+ * Returns: 0 on success, ERROR otherwise
  */
-static int
-zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
+int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
 {
-       int retval = -EINVAL;
-       struct zfcp_unit *unit;
-       union fsf_status_qual *fsf_stat_qual =
-               &new_fsf_req->qtcb->header.fsf_status_qual;
+       struct zfcp_fsf_req *req;
+       struct fsf_status_read_buffer *sr_buf;
+       volatile struct qdio_buffer_element *sbale;
+       int retval = -EIO;
 
-       if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
-               goto skip_fsfstatus;
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
+
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
+                                 ZFCP_REQ_NO_QTCB,
+                                 adapter->pool.fsf_req_status_read);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
+               goto out;
+       }
+
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
+       sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
+       req->sbale_curr = 2;
+
+       sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
+       if (!sr_buf) {
+               retval = -ENOMEM;
+               goto failed_buf;
        }
+       memset(sr_buf, 0, sizeof(*sr_buf));
+       req->data = sr_buf;
+       sbale = zfcp_qdio_sbale_curr(req);
+       sbale->addr = (void *) sr_buf;
+       sbale->length = sizeof(*sr_buf);
 
-       unit = (struct zfcp_unit *) new_fsf_req->data;
+       retval = zfcp_fsf_req_send(req);
+       if (retval)
+               goto failed_req_send;
 
-       /* evaluate FSF status in QTCB */
-       switch (new_fsf_req->qtcb->header.fsf_status) {
+       goto out;
+
+failed_req_send:
+       mempool_free(sr_buf, adapter->pool.data_status_read);
+failed_buf:
+       zfcp_fsf_req_free(req);
+       zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
+out:
+       spin_unlock(&adapter->req_q.lock);
+       return retval;
+}
+
+static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
+{
+       struct zfcp_unit *unit = req->data;
+       union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
 
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+               return;
+
+       switch (req->qtcb->header.fsf_status) {
        case FSF_PORT_HANDLE_NOT_VALID:
-               if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
-                       /*
-                        * In this case a command that was sent prior to a port
-                        * reopen was aborted (handles are different). This is
-                        * fine.
-                        */
-               } else {
-                       ZFCP_LOG_INFO("Temporary port identifier 0x%x for "
-                                     "port 0x%016Lx on adapter %s invalid. "
-                                     "This may happen occasionally.\n",
-                                     unit->port->handle,
-                                     unit->port->wwpn,
-                                     zfcp_get_busid_by_unit(unit));
-                       ZFCP_LOG_INFO("status qualifier:\n");
-                       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
-                                     (char *) &new_fsf_req->qtcb->header.
-                                     fsf_status_qual,
-                                     sizeof (union fsf_status_qual));
-                       /* Let's hope this sorts out the mess */
+               if (fsq->word[0] == fsq->word[1]) {
                        zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104,
-                                               new_fsf_req);
-                       new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                                               req);
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                }
                break;
-
        case FSF_LUN_HANDLE_NOT_VALID:
-               if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
-                       /*
-                        * In this case a command that was sent prior to a unit
-                        * reopen was aborted (handles are different).
-                        * This is fine.
-                        */
-               } else {
-                       ZFCP_LOG_INFO
-                           ("Warning: Temporary LUN identifier 0x%x of LUN "
-                            "0x%016Lx on port 0x%016Lx on adapter %s is "
-                            "invalid. This may happen in rare cases. "
-                            "Trying to re-establish link.\n",
-                            unit->handle,
-                            unit->fcp_lun,
-                            unit->port->wwpn,
-                            zfcp_get_busid_by_unit(unit));
-                       ZFCP_LOG_DEBUG("Status qualifier data:\n");
-                       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                                     (char *) &new_fsf_req->qtcb->header.
-                                     fsf_status_qual,
-                                     sizeof (union fsf_status_qual));
-                       /* Let's hope this sorts out the mess */
-                       zfcp_erp_port_reopen(unit->port, 0, 105, new_fsf_req);
-                       new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               if (fsq->word[0] == fsq->word[1]) {
+                       zfcp_erp_port_reopen(unit->port, 0, 105, req);
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                }
                break;
-
        case FSF_FCP_COMMAND_DOES_NOT_EXIST:
-               retval = 0;
-               new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
+               req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
                break;
-
        case FSF_PORT_BOXED:
-               ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to "
-                             "be reopened\n", unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit));
-               zfcp_erp_port_boxed(unit->port, 47, new_fsf_req);
-               new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
-                   | ZFCP_STATUS_FSFREQ_RETRY;
+               zfcp_erp_port_boxed(unit->port, 47, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                break;
-
        case FSF_LUN_BOXED:
-                ZFCP_LOG_INFO(
-                        "unit 0x%016Lx on port 0x%016Lx on adapter %s needs "
-                        "to be reopened\n",
-                        unit->fcp_lun, unit->port->wwpn,
-                        zfcp_get_busid_by_unit(unit));
-               zfcp_erp_unit_boxed(unit, 48, new_fsf_req);
-                new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
-                        | ZFCP_STATUS_FSFREQ_RETRY;
+               zfcp_erp_unit_boxed(unit, 48, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                 break;
-
        case FSF_ADAPTER_STATUS_AVAILABLE:
-               switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) {
+               switch (fsq->word[0]) {
                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
                        zfcp_test_link(unit->port);
-                       new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       /* SCSI stack will escalate */
-                       new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
-               default:
-                       ZFCP_LOG_NORMAL
-                           ("bug: Wrong status qualifier 0x%x arrived.\n",
-                            new_fsf_req->qtcb->header.fsf_status_qual.word[0]);
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
                }
                break;
-
        case FSF_GOOD:
-               retval = 0;
-               new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
-                               "(debug info 0x%x)\n",
-                               new_fsf_req->qtcb->header.fsf_status);
+               req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
                break;
        }
- skip_fsfstatus:
-       return retval;
 }
 
 /**
- * zfcp_use_one_sbal - checks whether req buffer and resp bother each fit into
- *     one SBALE
- * Two scatter-gather lists are passed, one for the reqeust and one for the
- * response.
+ * zfcp_fsf_abort_fcp_command - abort running SCSI command
+ * @old_req_id: unsigned long
+ * @adapter: pointer to struct zfcp_adapter
+ * @unit: pointer to struct zfcp_unit
+ * @req_flags: integer specifying the request flags
+ * Returns: pointer to struct zfcp_fsf_req
+ *
+ * FIXME(design): should be watched by a timeout !!!
  */
-static inline int
-zfcp_use_one_sbal(struct scatterlist *req, int req_count,
-                  struct scatterlist *resp, int resp_count)
-{
-        return ((req_count == 1) &&
-               (resp_count == 1) &&
-                (((unsigned long) zfcp_sg_to_address(&req[0]) &
-                 PAGE_MASK) ==
-                ((unsigned long) (zfcp_sg_to_address(&req[0]) +
-                                  req[0].length - 1) & PAGE_MASK)) &&
-                (((unsigned long) zfcp_sg_to_address(&resp[0]) &
-                 PAGE_MASK) ==
-                 ((unsigned long) (zfcp_sg_to_address(&resp[0]) +
-                                  resp[0].length - 1) & PAGE_MASK)));
-}
 
-/**
- * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
- * @ct: pointer to struct zfcp_send_ct which conatins all needed data for
- *     the request
- * @pool: pointer to memory pool, if non-null this pool is used to allocate
- *     a struct zfcp_fsf_req
- * @erp_action: pointer to erp_action, if non-null the Generic Service request
- *     is sent within error recovery
- */
-int
-zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
-                struct zfcp_erp_action *erp_action)
+struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
+                                               struct zfcp_adapter *adapter,
+                                               struct zfcp_unit *unit,
+                                               int req_flags)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_port *port;
-       struct zfcp_adapter *adapter;
-        struct zfcp_fsf_req *fsf_req;
-        unsigned long lock_flags;
-        int bytes;
-       int ret = 0;
-
-       port = ct->port;
-       adapter = port->adapter;
-
-       ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
-                                 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
-                                 pool, &lock_flags, &fsf_req);
-       if (ret < 0) {
-                ZFCP_LOG_INFO("error: Could not create CT request (FC-GS) for "
-                             "adapter: %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               goto failed_req;
-       }
+       struct zfcp_fsf_req *req = NULL;
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-        if (zfcp_use_one_sbal(ct->req, ct->req_count,
-                              ct->resp, ct->resp_count)){
-                /* both request buffer and response buffer
-                   fit into one sbale each */
-                sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
-                sbale[2].addr = zfcp_sg_to_address(&ct->req[0]);
-                sbale[2].length = ct->req[0].length;
-                sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
-                sbale[3].length = ct->resp[0].length;
-                sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
-       } else if (adapter->adapter_features &
-                   FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
-                /* try to use chained SBALs */
-                bytes = zfcp_qdio_sbals_from_sg(fsf_req,
-                                                SBAL_FLAGS0_TYPE_WRITE_READ,
-                                                ct->req, ct->req_count,
-                                                ZFCP_MAX_SBALS_PER_CT_REQ);
-                if (bytes <= 0) {
-                        ZFCP_LOG_INFO("error: creation of CT request failed "
-                                     "on adapter %s\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                        if (bytes == 0)
-                                ret = -ENOMEM;
-                        else
-                                ret = bytes;
-
-                        goto failed_send;
-                }
-                fsf_req->qtcb->bottom.support.req_buf_length = bytes;
-                fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
-                bytes = zfcp_qdio_sbals_from_sg(fsf_req,
-                                                SBAL_FLAGS0_TYPE_WRITE_READ,
-                                                ct->resp, ct->resp_count,
-                                                ZFCP_MAX_SBALS_PER_CT_REQ);
-                if (bytes <= 0) {
-                        ZFCP_LOG_INFO("error: creation of CT request failed "
-                                     "on adapter %s\n",
-                                     zfcp_get_busid_by_adapter(adapter));
-                        if (bytes == 0)
-                                ret = -ENOMEM;
-                        else
-                                ret = bytes;
-
-                        goto failed_send;
-                }
-                fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
-        } else {
-                /* reject send generic request */
-               ZFCP_LOG_INFO(
-                       "error: microcode does not support chained SBALs,"
-                        "CT request too big (adapter %s)\n",
-                       zfcp_get_busid_by_adapter(adapter));
-                ret = -EOPNOTSUPP;
-                goto failed_send;
-        }
-
-       /* settings in QTCB */
-       fsf_req->qtcb->header.port_handle = port->handle;
-       fsf_req->qtcb->bottom.support.service_class =
-               ZFCP_FC_SERVICE_CLASS_DEFAULT;
-       fsf_req->qtcb->bottom.support.timeout = ct->timeout;
-        fsf_req->data = (unsigned long) ct;
-
-       zfcp_san_dbf_event_ct_request(fsf_req);
+       spin_lock(&adapter->req_q.lock);
+       if (!atomic_read(&adapter->req_q.count))
+               goto out;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
+                                 req_flags, adapter->pool.fsf_req_abort);
+       if (unlikely(IS_ERR(req)))
+               goto out;
 
-       if (erp_action) {
-               erp_action->fsf_req = fsf_req;
-               fsf_req->erp_action = erp_action;
-               zfcp_erp_start_timer(fsf_req);
-       } else
-               zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
+       if (unlikely(!(atomic_read(&unit->status) &
+                      ZFCP_STATUS_COMMON_UNBLOCKED)))
+               goto out_error_free;
 
-       ret = zfcp_fsf_req_send(fsf_req);
-       if (ret) {
-               ZFCP_LOG_DEBUG("error: initiation of CT request failed "
-                              "(adapter %s, port 0x%016Lx)\n",
-                              zfcp_get_busid_by_adapter(adapter), port->wwpn);
-               goto failed_send;
-       }
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       ZFCP_LOG_DEBUG("CT request initiated (adapter %s, port 0x%016Lx)\n",
-                      zfcp_get_busid_by_adapter(adapter), port->wwpn);
-       goto out;
+       req->data = unit;
+       req->handler = zfcp_fsf_abort_fcp_command_handler;
+       req->qtcb->header.lun_handle = unit->handle;
+       req->qtcb->header.port_handle = unit->port->handle;
+       req->qtcb->bottom.support.req_handle = (u64) old_req_id;
 
- failed_send:
-       zfcp_fsf_req_free(fsf_req);
-        if (erp_action != NULL) {
-                erp_action->fsf_req = NULL;
-        }
- failed_req:
- out:
-        write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                               lock_flags);
-       return ret;
+       zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+       if (!zfcp_fsf_req_send(req))
+               goto out;
+
+out_error_free:
+       zfcp_fsf_req_free(req);
+       req = NULL;
+out:
+       spin_unlock(&adapter->req_q.lock);
+       return req;
 }
 
-/**
- * zfcp_fsf_send_ct_handler - handler for Generic Service requests
- * @fsf_req: pointer to struct zfcp_fsf_req
- *
- * Data specific for the Generic Service request is passed using
- * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
- * Usually a specific handler for the CT request is called which is
- * found in this structure.
- */
-static int
-zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 {
-       struct zfcp_port *port;
-       struct zfcp_adapter *adapter;
-       struct zfcp_send_ct *send_ct;
-       struct fsf_qtcb_header *header;
-       struct fsf_qtcb_bottom_support *bottom;
-       int retval = -EINVAL;
-       u16 subtable, rule, counter;
+       struct zfcp_adapter *adapter = req->adapter;
+       struct zfcp_send_ct *send_ct = req->data;
+       struct zfcp_port *port = send_ct->port;
+       struct fsf_qtcb_header *header = &req->qtcb->header;
 
-       adapter = fsf_req->adapter;
-       send_ct = (struct zfcp_send_ct *) fsf_req->data;
-       port = send_ct->port;
-       header = &fsf_req->qtcb->header;
-       bottom = &fsf_req->qtcb->bottom.support;
+       send_ct->status = -EINVAL;
 
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                goto skip_fsfstatus;
 
-       /* evaluate FSF status in QTCB */
        switch (header->fsf_status) {
-
         case FSF_GOOD:
-               zfcp_san_dbf_event_ct_response(fsf_req);
-                retval = 0;
+               zfcp_san_dbf_event_ct_response(req);
+               send_ct->status = 0;
                break;
-
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
-               ZFCP_LOG_INFO("error: adapter %s does not support fc "
-                             "class %d.\n",
-                             zfcp_get_busid_by_port(port),
-                             ZFCP_FC_SERVICE_CLASS_DEFAULT);
-               /* stop operation for this adapter */
-               zfcp_erp_adapter_shutdown(adapter, 0, 123, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_fsf_class_not_supp(req);
                break;
-
         case FSF_ADAPTER_STATUS_AVAILABLE:
                 switch (header->fsf_status_qual.word[0]){
                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-                       /* reopening link to port */
                        zfcp_test_link(port);
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       /* ERP strategy will escalate */
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
-                default:
-                       ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x "
-                                     "arrived.\n",
-                                     header->fsf_status_qual.word[0]);
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
                 }
                 break;
-
        case FSF_ACCESS_DENIED:
-               ZFCP_LOG_NORMAL("access denied, cannot send generic service "
-                               "command (adapter %s, port d_id=0x%06x)\n",
-                               zfcp_get_busid_by_port(port), port->d_id);
-               for (counter = 0; counter < 2; counter++) {
-                       subtable = header->fsf_status_qual.halfword[counter * 2];
-                       rule = header->fsf_status_qual.halfword[counter * 2 + 1];
-                       switch (subtable) {
-                       case FSF_SQ_CFDC_SUBTABLE_OS:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
-                       case FSF_SQ_CFDC_SUBTABLE_LUN:
-                                       ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
-                                       zfcp_act_subtable_type[subtable], rule);
-                               break;
-                       }
-               }
-               zfcp_erp_port_access_denied(port, 55, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-        case FSF_GENERIC_COMMAND_REJECTED:
-               ZFCP_LOG_INFO("generic service command rejected "
-                             "(adapter %s, port d_id=0x%06x)\n",
-                             zfcp_get_busid_by_port(port), port->d_id);
-               ZFCP_LOG_INFO("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_fsf_access_denied_port(req, port);
                break;
-
-        case FSF_PORT_HANDLE_NOT_VALID:
-               ZFCP_LOG_DEBUG("Temporary port identifier 0x%x for port "
-                              "0x%016Lx on adapter %s invalid. This may "
-                              "happen occasionally.\n", port->handle,
-                              port->wwpn, zfcp_get_busid_by_port(port));
-               ZFCP_LOG_INFO("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_adapter_reopen(adapter, 0, 106, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
         case FSF_PORT_BOXED:
-               ZFCP_LOG_INFO("port needs to be reopened "
-                             "(adapter %s, port d_id=0x%06x)\n",
-                             zfcp_get_busid_by_port(port), port->d_id);
-               zfcp_erp_port_boxed(port, 49, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
-                   | ZFCP_STATUS_FSFREQ_RETRY;
+               zfcp_erp_port_boxed(port, 49, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                break;
-
-       /* following states should never occure, all cases avoided
-          in zfcp_fsf_send_ct - but who knows ... */
+       case FSF_PORT_HANDLE_NOT_VALID:
+               zfcp_erp_adapter_reopen(adapter, 0, 106, req);
+       case FSF_GENERIC_COMMAND_REJECTED:
        case FSF_PAYLOAD_SIZE_MISMATCH:
-               ZFCP_LOG_INFO("payload size mismatch (adapter: %s, "
-                             "req_buf_length=%d, resp_buf_length=%d)\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             bottom->req_buf_length, bottom->resp_buf_length);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
        case FSF_REQUEST_SIZE_TOO_LARGE:
-               ZFCP_LOG_INFO("request size too large (adapter: %s, "
-                             "req_buf_length=%d)\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             bottom->req_buf_length);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
        case FSF_RESPONSE_SIZE_TOO_LARGE:
-               ZFCP_LOG_INFO("response size too large (adapter: %s, "
-                             "resp_buf_length=%d)\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             bottom->resp_buf_length);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
        case FSF_SBAL_MISMATCH:
-               ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
-                             "resp_buf_length=%d)\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             bottom->req_buf_length, bottom->resp_buf_length);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
-                               "(debug info 0x%x)\n", header->fsf_status);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
        }
 
 skip_fsfstatus:
-       send_ct->status = retval;
-
-       if (send_ct->handler != NULL)
+       if (send_ct->handler)
                send_ct->handler(send_ct->handler_data);
+}
 
-       return retval;
+static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
+                               struct scatterlist *sg_req,
+                               struct scatterlist *sg_resp, int max_sbals)
+{
+       int bytes;
+
+       bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
+                                       sg_req, max_sbals);
+       if (bytes <= 0)
+               return -ENOMEM;
+       req->qtcb->bottom.support.req_buf_length = bytes;
+       req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+
+       bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
+                                       sg_resp, max_sbals);
+       if (bytes <= 0)
+               return -ENOMEM;
+       req->qtcb->bottom.support.resp_buf_length = bytes;
+
+       return 0;
 }
 
 /**
- * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
- * @els: pointer to struct zfcp_send_els which contains all needed data for
- *     the command.
+ * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
+ * @ct: pointer to struct zfcp_send_ct with data for request
+ * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
+ * @erp_action: if non-null the Generic Service request sent within ERP
  */
-int
-zfcp_fsf_send_els(struct zfcp_send_els *els)
+int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
+                    struct zfcp_erp_action *erp_action)
 {
-       volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       u32 d_id;
-       struct zfcp_adapter *adapter;
-       unsigned long lock_flags;
-        int bytes;
-       int ret = 0;
+       struct zfcp_port *port = ct->port;
+       struct zfcp_adapter *adapter = port->adapter;
+       struct zfcp_fsf_req *req;
+       int ret = -EIO;
 
-       d_id = els->d_id;
-       adapter = els->adapter;
-
-        ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
-                                 ZFCP_REQ_AUTO_CLEANUP,
-                                 NULL, &lock_flags, &fsf_req);
-       if (ret < 0) {
-                ZFCP_LOG_INFO("error: creation of ELS request failed "
-                             "(adapter %s, port d_id: 0x%06x)\n",
-                              zfcp_get_busid_by_adapter(adapter), d_id);
-                goto failed_req;
-       }
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
 
-       if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                       &els->port->status))) {
-               ret = -EBUSY;
-               goto port_blocked;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
+                                 ZFCP_REQ_AUTO_CLEANUP, pool);
+       if (unlikely(IS_ERR(req))) {
+               ret = PTR_ERR(req);
+               goto out;
        }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-        if (zfcp_use_one_sbal(els->req, els->req_count,
-                              els->resp, els->resp_count)){
-                /* both request buffer and response buffer
-                   fit into one sbale each */
-                sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
-                sbale[2].addr = zfcp_sg_to_address(&els->req[0]);
-                sbale[2].length = els->req[0].length;
-                sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
-                sbale[3].length = els->resp[0].length;
-                sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
-       } else if (adapter->adapter_features &
-                   FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
-                /* try to use chained SBALs */
-                bytes = zfcp_qdio_sbals_from_sg(fsf_req,
-                                                SBAL_FLAGS0_TYPE_WRITE_READ,
-                                                els->req, els->req_count,
-                                                ZFCP_MAX_SBALS_PER_ELS_REQ);
-                if (bytes <= 0) {
-                        ZFCP_LOG_INFO("error: creation of ELS request failed "
-                                     "(adapter %s, port d_id: 0x%06x)\n",
-                                     zfcp_get_busid_by_adapter(adapter), d_id);
-                        if (bytes == 0) {
-                                ret = -ENOMEM;
-                        } else {
-                                ret = bytes;
-                        }
-                        goto failed_send;
-                }
-                fsf_req->qtcb->bottom.support.req_buf_length = bytes;
-                fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
-                bytes = zfcp_qdio_sbals_from_sg(fsf_req,
-                                                SBAL_FLAGS0_TYPE_WRITE_READ,
-                                                els->resp, els->resp_count,
-                                                ZFCP_MAX_SBALS_PER_ELS_REQ);
-                if (bytes <= 0) {
-                        ZFCP_LOG_INFO("error: creation of ELS request failed "
-                                     "(adapter %s, port d_id: 0x%06x)\n",
-                                     zfcp_get_busid_by_adapter(adapter), d_id);
-                        if (bytes == 0) {
-                                ret = -ENOMEM;
-                        } else {
-                                ret = bytes;
-                        }
-                        goto failed_send;
-                }
-                fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
-        } else {
-                /* reject request */
-               ZFCP_LOG_INFO("error: microcode does not support chained SBALs"
-                              ", ELS request too big (adapter %s, "
-                             "port d_id: 0x%06x)\n",
-                             zfcp_get_busid_by_adapter(adapter), d_id);
-                ret = -EOPNOTSUPP;
-                goto failed_send;
-        }
-
-       /* settings in QTCB */
-       fsf_req->qtcb->bottom.support.d_id = d_id;
-       fsf_req->qtcb->bottom.support.service_class =
-               ZFCP_FC_SERVICE_CLASS_DEFAULT;
-       fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
-       fsf_req->data = (unsigned long) els;
-
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-
-       zfcp_san_dbf_event_els_request(fsf_req);
-
-       zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
-       ret = zfcp_fsf_req_send(fsf_req);
-       if (ret) {
-               ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
-                              "(adapter %s, port d_id: 0x%06x)\n",
-                              zfcp_get_busid_by_adapter(adapter), d_id);
+       ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
+                                  FSF_MAX_SBALS_PER_REQ);
+       if (ret)
                goto failed_send;
-       }
 
-       ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: "
-                      "0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
-       goto out;
+       req->handler = zfcp_fsf_send_ct_handler;
+       req->qtcb->header.port_handle = port->handle;
+       req->qtcb->bottom.support.service_class = FSF_CLASS_3;
+       req->qtcb->bottom.support.timeout = ct->timeout;
+       req->data = ct;
+
+       zfcp_san_dbf_event_ct_request(req);
 
- port_blocked:
- failed_send:
-       zfcp_fsf_req_free(fsf_req);
+       if (erp_action) {
+               erp_action->fsf_req = req;
+               req->erp_action = erp_action;
+               zfcp_fsf_start_erp_timer(req);
+       } else
+               zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 
- failed_req:
- out:
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                               lock_flags);
+       ret = zfcp_fsf_req_send(req);
+       if (ret)
+               goto failed_send;
 
-        return ret;
+       goto out;
+
+failed_send:
+       zfcp_fsf_req_free(req);
+       if (erp_action)
+               erp_action->fsf_req = NULL;
+out:
+       spin_unlock(&adapter->req_q.lock);
+       return ret;
 }
 
-/**
- * zfcp_fsf_send_els_handler - handler for ELS commands
- * @fsf_req: pointer to struct zfcp_fsf_req
- *
- * Data specific for the ELS command is passed using
- * fsf_req->data. There we find the pointer to struct zfcp_send_els.
- * Usually a specific handler for the ELS command is called which is
- * found in this structure.
- */
-static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 {
-       struct zfcp_adapter *adapter;
-       struct zfcp_port *port;
-       u32 d_id;
-       struct fsf_qtcb_header *header;
-       struct fsf_qtcb_bottom_support *bottom;
-       struct zfcp_send_els *send_els;
-       int retval = -EINVAL;
-       u16 subtable, rule, counter;
-
-       send_els = (struct zfcp_send_els *) fsf_req->data;
-       adapter = send_els->adapter;
-       port = send_els->port;
-       d_id = send_els->d_id;
-       header = &fsf_req->qtcb->header;
-       bottom = &fsf_req->qtcb->bottom.support;
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
+       struct zfcp_send_els *send_els = req->data;
+       struct zfcp_port *port = send_els->port;
+       struct fsf_qtcb_header *header = &req->qtcb->header;
+
+       send_els->status = -EINVAL;
+
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                goto skip_fsfstatus;
 
        switch (header->fsf_status) {
-
        case FSF_GOOD:
-               zfcp_san_dbf_event_els_response(fsf_req);
-               retval = 0;
+               zfcp_san_dbf_event_els_response(req);
+               send_els->status = 0;
                break;
-
        case FSF_SERVICE_CLASS_NOT_SUPPORTED:
-               ZFCP_LOG_INFO("error: adapter %s does not support fc "
-                             "class %d.\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             ZFCP_FC_SERVICE_CLASS_DEFAULT);
-               /* stop operation for this adapter */
-               zfcp_erp_adapter_shutdown(adapter, 0, 124, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_fsf_class_not_supp(req);
                break;
-
        case FSF_ADAPTER_STATUS_AVAILABLE:
                switch (header->fsf_status_qual.word[0]){
                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
                        if (port && (send_els->ls_code != ZFCP_LS_ADISC))
                                zfcp_test_link(port);
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
+                       /*fall through */
                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       retval =
-                         zfcp_handle_els_rjt(header->fsf_status_qual.word[1],
-                                             (struct zfcp_ls_rjt_par *)
-                                             &header->fsf_status_qual.word[2]);
-                       break;
                case FSF_SQ_RETRY_IF_POSSIBLE:
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
-               default:
-                       ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x\n",
-                                     header->fsf_status_qual.word[0]);
-                       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
-                               (char*)header->fsf_status_qual.word, 16);
                }
                break;
-
        case FSF_ELS_COMMAND_REJECTED:
-               ZFCP_LOG_INFO("ELS has been rejected because command filter "
-                             "prohibited sending "
-                             "(adapter: %s, port d_id: 0x%06x)\n",
-                             zfcp_get_busid_by_adapter(adapter), d_id);
-
-               break;
-
        case FSF_PAYLOAD_SIZE_MISMATCH:
-               ZFCP_LOG_INFO(
-                       "ELS request size and ELS response size must be either "
-                       "both 0, or both greater than 0 "
-                       "(adapter: %s, req_buf_length=%d resp_buf_length=%d)\n",
-                       zfcp_get_busid_by_adapter(adapter),
-                       bottom->req_buf_length,
-                       bottom->resp_buf_length);
-               break;
-
        case FSF_REQUEST_SIZE_TOO_LARGE:
-               ZFCP_LOG_INFO(
-                       "Length of the ELS request buffer, "
-                       "specified in QTCB bottom, "
-                       "exceeds the size of the buffers "
-                       "that have been allocated for ELS request data "
-                       "(adapter: %s, req_buf_length=%d)\n",
-                       zfcp_get_busid_by_adapter(adapter),
-                       bottom->req_buf_length);
-               break;
-
        case FSF_RESPONSE_SIZE_TOO_LARGE:
-               ZFCP_LOG_INFO(
-                       "Length of the ELS response buffer, "
-                       "specified in QTCB bottom, "
-                       "exceeds the size of the buffers "
-                       "that have been allocated for ELS response data "
-                       "(adapter: %s, resp_buf_length=%d)\n",
-                       zfcp_get_busid_by_adapter(adapter),
-                       bottom->resp_buf_length);
                break;
-
-       case FSF_SBAL_MISMATCH:
-               /* should never occure, avoided in zfcp_fsf_send_els */
-               ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
-                             "resp_buf_length=%d)\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             bottom->req_buf_length, bottom->resp_buf_length);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
        case FSF_ACCESS_DENIED:
-               ZFCP_LOG_NORMAL("access denied, cannot send ELS command "
-                               "(adapter %s, port d_id=0x%06x)\n",
-                               zfcp_get_busid_by_adapter(adapter), d_id);
-               for (counter = 0; counter < 2; counter++) {
-                       subtable = header->fsf_status_qual.halfword[counter * 2];
-                       rule = header->fsf_status_qual.halfword[counter * 2 + 1];
-                       switch (subtable) {
-                       case FSF_SQ_CFDC_SUBTABLE_OS:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
-                       case FSF_SQ_CFDC_SUBTABLE_LUN:
-                               ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
-                                       zfcp_act_subtable_type[subtable], rule);
-                               break;
-                       }
-               }
-               if (port != NULL)
-                       zfcp_erp_port_access_denied(port, 56, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL(
-                       "bug: An unknown FSF Status was presented "
-                       "(adapter: %s, fsf_status=0x%08x)\n",
-                       zfcp_get_busid_by_adapter(adapter),
-                       header->fsf_status);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-       }
-
-skip_fsfstatus:
-       send_els->status = retval;
-
-       if (send_els->handler)
-               send_els->handler(send_els->handler_data);
-
-       return retval;
-}
-
-int
-zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
-{
-       volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       struct zfcp_adapter *adapter = erp_action->adapter;
-       unsigned long lock_flags;
-       int retval;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter,
-                                    FSF_QTCB_EXCHANGE_CONFIG_DATA,
-                                    ZFCP_REQ_AUTO_CLEANUP,
-                                    adapter->pool.fsf_req_erp,
-                                    &lock_flags, &fsf_req);
-       if (retval) {
-               ZFCP_LOG_INFO("error: Could not create exchange configuration "
-                             "data request for adapter %s.\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                                       lock_flags);
-               return retval;
-       }
-
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
-
-       fsf_req->qtcb->bottom.config.feature_selection =
-                       FSF_FEATURE_CFDC |
-                       FSF_FEATURE_LUN_SHARING |
-                       FSF_FEATURE_NOTIFICATION_LOST |
-                       FSF_FEATURE_UPDATE_ALERT;
-       fsf_req->erp_action = erp_action;
-       erp_action->fsf_req = fsf_req;
-
-       zfcp_erp_start_timer(fsf_req);
-       retval = zfcp_fsf_req_send(fsf_req);
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                               lock_flags);
-       if (retval) {
-               ZFCP_LOG_INFO("error: Could not send exchange configuration "
-                             "data command on the adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               zfcp_fsf_req_free(fsf_req);
-               erp_action->fsf_req = NULL;
-       }
-       else
-               ZFCP_LOG_DEBUG("exchange configuration data request initiated "
-                              "(adapter %s)\n",
-                              zfcp_get_busid_by_adapter(adapter));
-
-       return retval;
-}
-
-int
-zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
-                               struct fsf_qtcb_bottom_config *data)
-{
-       volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       unsigned long lock_flags;
-       int retval;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
-                                    ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags,
-                                    &fsf_req);
-       if (retval) {
-               ZFCP_LOG_INFO("error: Could not create exchange configuration "
-                             "data request for adapter %s.\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                                       lock_flags);
-               return retval;
-       }
-
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
-
-       fsf_req->qtcb->bottom.config.feature_selection =
-                       FSF_FEATURE_CFDC |
-                       FSF_FEATURE_LUN_SHARING |
-                       FSF_FEATURE_NOTIFICATION_LOST |
-                       FSF_FEATURE_UPDATE_ALERT;
-
-       if (data)
-               fsf_req->data = (unsigned long) data;
-
-       zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
-       retval = zfcp_fsf_req_send(fsf_req);
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                               lock_flags);
-       if (retval)
-               ZFCP_LOG_INFO("error: Could not send exchange configuration "
-                             "data command on the adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-       else
-               wait_event(fsf_req->completion_wq,
-                          fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
-
-       zfcp_fsf_req_free(fsf_req);
-
-       return retval;
-}
-
-/**
- * zfcp_fsf_exchange_config_evaluate
- * @fsf_req: fsf_req which belongs to xchg config data request
- * @xchg_ok: specifies if xchg config data was incomplete or complete (0/1)
- *
- * returns: -EIO on error, 0 otherwise
- */
-static int
-zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
-{
-       struct fsf_qtcb_bottom_config *bottom;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct Scsi_Host *shost = adapter->scsi_host;
-
-       bottom = &fsf_req->qtcb->bottom.config;
-       ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
-                      bottom->low_qtcb_version, bottom->high_qtcb_version);
-       adapter->fsf_lic_version = bottom->lic_version;
-       adapter->adapter_features = bottom->adapter_features;
-       adapter->connection_features = bottom->connection_features;
-       adapter->peer_wwpn = 0;
-       adapter->peer_wwnn = 0;
-       adapter->peer_d_id = 0;
-
-       if (xchg_ok) {
-
-               if (fsf_req->data)
-                       memcpy((struct fsf_qtcb_bottom_config *) fsf_req->data,
-                               bottom, sizeof (struct fsf_qtcb_bottom_config));
-
-               fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
-               fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
-               fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
-               fc_host_speed(shost) = bottom->fc_link_speed;
-               fc_host_supported_classes(shost) =
-                               FC_COS_CLASS2 | FC_COS_CLASS3;
-               adapter->hydra_version = bottom->adapter_type;
-               if (fc_host_permanent_port_name(shost) == -1)
-                       fc_host_permanent_port_name(shost) =
-                               fc_host_port_name(shost);
-               if (bottom->fc_topology == FSF_TOPO_P2P) {
-                       adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
-                       adapter->peer_wwpn = bottom->plogi_payload.wwpn;
-                       adapter->peer_wwnn = bottom->plogi_payload.wwnn;
-                       fc_host_port_type(shost) = FC_PORTTYPE_PTP;
-               } else if (bottom->fc_topology == FSF_TOPO_FABRIC)
-                       fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
-               else if (bottom->fc_topology == FSF_TOPO_AL)
-                       fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
-               else
-                       fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
-       } else {
-               fc_host_node_name(shost) = 0;
-               fc_host_port_name(shost) = 0;
-               fc_host_port_id(shost) = 0;
-               fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
-               fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
-               adapter->hydra_version = 0;
-       }
-
-       if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
-               adapter->hardware_version = bottom->hardware_version;
-               memcpy(fc_host_serial_number(shost), bottom->serial_number,
-                      min(FC_SERIAL_NUMBER_SIZE, 17));
-               EBCASC(fc_host_serial_number(shost),
-                      min(FC_SERIAL_NUMBER_SIZE, 17));
-       }
-
-       if (fsf_req->erp_action)
-               ZFCP_LOG_NORMAL("The adapter %s reported the following "
-                               "characteristics:\n"
-                               "WWNN 0x%016Lx, WWPN 0x%016Lx, "
-                               "S_ID 0x%06x,\n"
-                               "adapter version 0x%x, "
-                               "LIC version 0x%x, "
-                               "FC link speed %d Gb/s\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               (wwn_t) fc_host_node_name(shost),
-                               (wwn_t) fc_host_port_name(shost),
-                               fc_host_port_id(shost),
-                               adapter->hydra_version,
-                               adapter->fsf_lic_version,
-                               fc_host_speed(shost));
-       if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
-               ZFCP_LOG_NORMAL("error: the adapter %s "
-                               "only supports newer control block "
-                               "versions in comparison to this device "
-                               "driver (try updated device driver)\n",
-                               zfcp_get_busid_by_adapter(adapter));
-               zfcp_erp_adapter_shutdown(adapter, 0, 125, fsf_req);
-               return -EIO;
-       }
-       if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) {
-               ZFCP_LOG_NORMAL("error: the adapter %s "
-                               "only supports older control block "
-                               "versions than this device driver uses"
-                               "(consider a microcode upgrade)\n",
-                               zfcp_get_busid_by_adapter(adapter));
-               zfcp_erp_adapter_shutdown(adapter, 0, 126, fsf_req);
-               return -EIO;
+               zfcp_fsf_access_denied_port(req, port);
+               break;
+       case FSF_SBAL_MISMATCH:
+               /* should never occure, avoided in zfcp_fsf_send_els */
+               /* fall through */
+       default:
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               break;
        }
-       return 0;
+skip_fsfstatus:
+       if (send_els->handler)
+               send_els->handler(send_els->handler_data);
 }
 
 /**
- * function:    zfcp_fsf_exchange_config_data_handler
- *
- * purpose:     is called for finished Exchange Configuration Data command
- *
- * returns:
+ * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
+ * @els: pointer to struct zfcp_send_els with data for the command
  */
-static int
-zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
+int zfcp_fsf_send_els(struct zfcp_send_els *els)
 {
-       struct fsf_qtcb_bottom_config *bottom;
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct fsf_qtcb *qtcb = fsf_req->qtcb;
+       struct zfcp_fsf_req *req;
+       struct zfcp_adapter *adapter = els->adapter;
+       struct fsf_qtcb_bottom_support *bottom;
+       int ret = -EIO;
 
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
-               return -EIO;
+       if (unlikely(!(atomic_read(&els->port->status) &
+                      ZFCP_STATUS_COMMON_UNBLOCKED)))
+               return -EBUSY;
 
-       switch (qtcb->header.fsf_status) {
+       spin_lock(&adapter->req_q.lock);
+       if (!atomic_read(&adapter->req_q.count))
+               goto out;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
+                                 ZFCP_REQ_AUTO_CLEANUP, NULL);
+       if (unlikely(IS_ERR(req))) {
+               ret = PTR_ERR(req);
+               goto out;
+       }
 
-       case FSF_GOOD:
-               if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
-                       return -EIO;
-
-               switch (fc_host_port_type(adapter->scsi_host)) {
-               case FC_PORTTYPE_PTP:
-                       ZFCP_LOG_NORMAL("Point-to-Point fibrechannel "
-                                       "configuration detected at adapter %s\n"
-                                       "Peer WWNN 0x%016llx, "
-                                       "peer WWPN 0x%016llx, "
-                                       "peer d_id 0x%06x\n",
-                                       zfcp_get_busid_by_adapter(adapter),
-                                       adapter->peer_wwnn,
-                                       adapter->peer_wwpn,
-                                       adapter->peer_d_id);
-                       break;
-               case FC_PORTTYPE_NLPORT:
-                       ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel "
-                                       "topology detected at adapter %s "
-                                       "unsupported, shutting down adapter\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                       zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
-                       return -EIO;
-               case FC_PORTTYPE_NPORT:
-                       if (fsf_req->erp_action)
-                               ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
-                                               "network detected at adapter "
-                                               "%s.\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                       break;
-               default:
-                       ZFCP_LOG_NORMAL("bug: The fibrechannel topology "
-                                       "reported by the exchange "
-                                       "configuration command for "
-                                       "the adapter %s is not "
-                                       "of a type known to the zfcp "
-                                       "driver, shutting down adapter\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                       zfcp_erp_adapter_shutdown(adapter, 0, 128, fsf_req);
-                       return -EIO;
-               }
-               bottom = &qtcb->bottom.config;
-               if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
-                       ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
-                                       "allowed by the adapter %s "
-                                       "is lower than the minimum "
-                                       "required by the driver (%ld bytes).\n",
-                                       bottom->max_qtcb_size,
-                                       zfcp_get_busid_by_adapter(adapter),
-                                       sizeof(struct fsf_qtcb));
-                       zfcp_erp_adapter_shutdown(adapter, 0, 129, fsf_req);
-                       return -EIO;
-               }
-               atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
-                               &adapter->status);
-               break;
-       case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
-               if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
-                       return -EIO;
+       ret = zfcp_fsf_setup_sbals(req, els->req, els->resp,
+                                  FSF_MAX_SBALS_PER_ELS_REQ);
+       if (ret)
+               goto failed_send;
 
-               atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
-                               &adapter->status);
+       bottom = &req->qtcb->bottom.support;
+       req->handler = zfcp_fsf_send_els_handler;
+       bottom->d_id = els->d_id;
+       bottom->service_class = FSF_CLASS_3;
+       bottom->timeout = 2 * R_A_TOV;
+       req->data = els;
 
-               zfcp_fsf_link_down_info_eval(fsf_req, 42,
-                       &qtcb->header.fsf_status_qual.link_down_info);
-               break;
-       default:
-               zfcp_erp_adapter_shutdown(adapter, 0, 130, fsf_req);
-               return -EIO;
-       }
-       return 0;
+       zfcp_san_dbf_event_els_request(req);
+
+       zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+       ret = zfcp_fsf_req_send(req);
+       if (ret)
+               goto failed_send;
+
+       goto out;
+
+failed_send:
+       zfcp_fsf_req_free(req);
+out:
+       spin_unlock(&adapter->req_q.lock);
+       return ret;
 }
 
-/**
- * zfcp_fsf_exchange_port_data - request information about local port
- * @erp_action: ERP action for the adapter for which port data is requested
- */
-int
-zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
+       struct zfcp_fsf_req *req;
        struct zfcp_adapter *adapter = erp_action->adapter;
-       unsigned long lock_flags;
-       int retval;
+       int retval = -EIO;
 
-       if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
-               ZFCP_LOG_INFO("error: exchange port data "
-                             "command not supported by adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               return -EOPNOTSUPP;
-       }
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
-                                    ZFCP_REQ_AUTO_CLEANUP,
-                                    adapter->pool.fsf_req_erp,
-                                    &lock_flags, &fsf_req);
-       if (retval) {
-               ZFCP_LOG_INFO("error: Out of resources. Could not create an "
-                             "exchange port data request for "
-                             "the adapter %s.\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                                       lock_flags);
-               return retval;
+       spin_lock(&adapter->req_q.lock);
+       if (!atomic_read(&adapter->req_q.count))
+               goto out;
+       req = zfcp_fsf_req_create(adapter,
+                                 FSF_QTCB_EXCHANGE_CONFIG_DATA,
+                                 ZFCP_REQ_AUTO_CLEANUP,
+                                 adapter->pool.fsf_req_erp);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
+               goto out;
        }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       sbale = zfcp_qdio_sbale_req(req);
        sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
        sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       erp_action->fsf_req = fsf_req;
-       fsf_req->erp_action = erp_action;
-       zfcp_erp_start_timer(fsf_req);
-
-       retval = zfcp_fsf_req_send(fsf_req);
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+       req->qtcb->bottom.config.feature_selection =
+                       FSF_FEATURE_CFDC |
+                       FSF_FEATURE_LUN_SHARING |
+                       FSF_FEATURE_NOTIFICATION_LOST |
+                       FSF_FEATURE_UPDATE_ALERT;
+       req->erp_action = erp_action;
+       req->handler = zfcp_fsf_exchange_config_data_handler;
+       erp_action->fsf_req = req;
 
+       zfcp_fsf_start_erp_timer(req);
+       retval = zfcp_fsf_req_send(req);
        if (retval) {
-               ZFCP_LOG_INFO("error: Could not send an exchange port data "
-                             "command on the adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               zfcp_fsf_req_free(fsf_req);
+               zfcp_fsf_req_free(req);
                erp_action->fsf_req = NULL;
        }
-       else
-               ZFCP_LOG_DEBUG("exchange port data request initiated "
-                              "(adapter %s)\n",
-                              zfcp_get_busid_by_adapter(adapter));
+out:
+       spin_unlock(&adapter->req_q.lock);
        return retval;
 }
 
-
-/**
- * zfcp_fsf_exchange_port_data_sync - request information about local port
- * and wait until information is ready
- */
-int
-zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
-                               struct fsf_qtcb_bottom_port *data)
+int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
+                                      struct fsf_qtcb_bottom_config *data)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       unsigned long lock_flags;
-       int retval;
-
-       if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
-               ZFCP_LOG_INFO("error: exchange port data "
-                             "command not supported by adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               return -EOPNOTSUPP;
-       }
+       struct zfcp_fsf_req *req = NULL;
+       int retval = -EIO;
 
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
-                               0, NULL, &lock_flags, &fsf_req);
-       if (retval) {
-               ZFCP_LOG_INFO("error: Out of resources. Could not create an "
-                             "exchange port data request for "
-                             "the adapter %s.\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               write_unlock_irqrestore(&adapter->request_queue.queue_lock,
-                                       lock_flags);
-               return retval;
-       }
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
 
-       if (data)
-               fsf_req->data = (unsigned long) data;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+                                 0, NULL);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
+               goto out;
+       }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       sbale = zfcp_qdio_sbale_req(req);
        sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
        sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       req->handler = zfcp_fsf_exchange_config_data_handler;
 
-       zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
-       retval = zfcp_fsf_req_send(fsf_req);
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+       req->qtcb->bottom.config.feature_selection =
+                       FSF_FEATURE_CFDC |
+                       FSF_FEATURE_LUN_SHARING |
+                       FSF_FEATURE_NOTIFICATION_LOST |
+                       FSF_FEATURE_UPDATE_ALERT;
 
-       if (retval)
-               ZFCP_LOG_INFO("error: Could not send an exchange port data "
-                             "command on the adapter %s\n",
-                             zfcp_get_busid_by_adapter(adapter));
-       else
-               wait_event(fsf_req->completion_wq,
-                          fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+       if (data)
+               req->data = data;
 
-       zfcp_fsf_req_free(fsf_req);
+       zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+       retval = zfcp_fsf_req_send(req);
+out:
+       spin_unlock(&adapter->req_q.lock);
+       if (!retval)
+               wait_event(req->completion_wq,
+                          req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+
+       zfcp_fsf_req_free(req);
 
        return retval;
 }
 
 /**
- * zfcp_fsf_exchange_port_evaluate
- * @fsf_req: fsf_req which belongs to xchg port data request
- * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1)
+ * zfcp_fsf_exchange_port_data - request information about local port
+ * @erp_action: ERP action for the adapter for which port data is requested
+ * Returns: 0 on success, error otherwise
  */
-static void
-zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
+int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
 {
-       struct zfcp_adapter *adapter;
-       struct fsf_qtcb_bottom_port *bottom;
-       struct Scsi_Host *shost;
-
-       adapter = fsf_req->adapter;
-       bottom = &fsf_req->qtcb->bottom.port;
-       shost = adapter->scsi_host;
-
-       if (fsf_req->data)
-               memcpy((struct fsf_qtcb_bottom_port*) fsf_req->data, bottom,
-                       sizeof(struct fsf_qtcb_bottom_port));
+       volatile struct qdio_buffer_element *sbale;
+       struct zfcp_fsf_req *req;
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       int retval = -EIO;
 
-       if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
-               fc_host_permanent_port_name(shost) = bottom->wwpn;
-       else
-               fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
-       fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
-       fc_host_supported_speeds(shost) = bottom->supported_speed;
-}
+       if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+               return -EOPNOTSUPP;
 
-/**
- * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
- * @fsf_req: pointer to struct zfcp_fsf_req
- */
-static void
-zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
-{
-       struct zfcp_adapter *adapter;
-       struct fsf_qtcb *qtcb;
+       spin_lock(&adapter->req_q.lock);
+       if (!atomic_read(&adapter->req_q.count))
+               goto out;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
+                                 ZFCP_REQ_AUTO_CLEANUP,
+                                 adapter->pool.fsf_req_erp);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
+               goto out;
+       }
 
-       adapter = fsf_req->adapter;
-       qtcb = fsf_req->qtcb;
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
-               return;
+       req->handler = zfcp_fsf_exchange_port_data_handler;
+       req->erp_action = erp_action;
+       erp_action->fsf_req = req;
 
-       switch (qtcb->header.fsf_status) {
-        case FSF_GOOD:
-               zfcp_fsf_exchange_port_evaluate(fsf_req, 1);
-               atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
-               break;
-       case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
-               zfcp_fsf_exchange_port_evaluate(fsf_req, 0);
-               atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
-               zfcp_fsf_link_down_info_eval(fsf_req, 43,
-                       &qtcb->header.fsf_status_qual.link_down_info);
-                break;
+       zfcp_fsf_start_erp_timer(req);
+       retval = zfcp_fsf_req_send(req);
+       if (retval) {
+               zfcp_fsf_req_free(req);
+               erp_action->fsf_req = NULL;
        }
+out:
+       spin_unlock(&adapter->req_q.lock);
+       return retval;
 }
 
-
-/*
- * function:    zfcp_fsf_open_port
- *
- * purpose:
- *
- * returns:    address of initiated FSF request
- *             NULL - request could not be initiated
+/**
+ * zfcp_fsf_exchange_port_data_sync - request information about local port
+ * @adapter: pointer to struct zfcp_adapter
+ * @data: pointer to struct fsf_qtcb_bottom_port
+ * Returns: 0 on success, error otherwise
  */
-int
-zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
+                                    struct fsf_qtcb_bottom_port *data)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       unsigned long lock_flags;
-       int retval = 0;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(erp_action->adapter,
-                                    FSF_QTCB_OPEN_PORT_WITH_DID,
-                                    ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
-                                    erp_action->adapter->pool.fsf_req_erp,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create open port request "
-                             "for port 0x%016Lx on adapter %s.\n",
-                             erp_action->port->wwpn,
-                             zfcp_get_busid_by_adapter(erp_action->adapter));
-               goto out;
-       }
+       struct zfcp_fsf_req *req = NULL;
+       int retval = -EIO;
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-        sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-        sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+               return -EOPNOTSUPP;
 
-       fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
-       atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
-       fsf_req->data = (unsigned long) erp_action->port;
-       fsf_req->erp_action = erp_action;
-       erp_action->fsf_req = fsf_req;
+       spin_lock(&adapter->req_q.lock);
+       if (!atomic_read(&adapter->req_q.count))
+               goto out;
 
-       zfcp_erp_start_timer(fsf_req);
-       retval = zfcp_fsf_req_send(fsf_req);
-       if (retval) {
-               ZFCP_LOG_INFO("error: Could not send open port request for "
-                             "port 0x%016Lx on adapter %s.\n",
-                             erp_action->port->wwpn,
-                             zfcp_get_busid_by_adapter(erp_action->adapter));
-               zfcp_fsf_req_free(fsf_req);
-               erp_action->fsf_req = NULL;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
+                                 NULL);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
                goto out;
        }
 
-       ZFCP_LOG_DEBUG("open port request initiated "
-                      "(adapter %s,  port 0x%016Lx)\n",
-                      zfcp_get_busid_by_adapter(erp_action->adapter),
-                      erp_action->port->wwpn);
- out:
-       write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
-                               lock_flags);
+       if (data)
+               req->data = data;
+
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+       req->handler = zfcp_fsf_exchange_port_data_handler;
+       zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+       retval = zfcp_fsf_req_send(req);
+out:
+       spin_unlock(&adapter->req_q.lock);
+       if (!retval)
+               wait_event(req->completion_wq,
+                          req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+       zfcp_fsf_req_free(req);
+
        return retval;
 }
 
-/*
- * function:    zfcp_fsf_open_port_handler
- *
- * purpose:    is called for finished Open Port command
- *
- * returns:
- */
-static int
-zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 {
-       int retval = -EINVAL;
-       struct zfcp_port *port;
+       struct zfcp_port *port = req->data;
+       struct fsf_qtcb_header *header = &req->qtcb->header;
        struct fsf_plogi *plogi;
-       struct fsf_qtcb_header *header;
-       u16 subtable, rule, counter;
-
-       port = (struct zfcp_port *) fsf_req->data;
-       header = &fsf_req->qtcb->header;
 
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               /* don't change port status in our bookkeeping */
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                goto skip_fsfstatus;
-       }
 
-       /* evaluate FSF status in QTCB */
        switch (header->fsf_status) {
-
        case FSF_PORT_ALREADY_OPEN:
-               ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s "
-                               "is already open.\n",
-                               port->wwpn, zfcp_get_busid_by_port(port));
-               /*
-                * This is a bug, however operation should continue normally
-                * if it is simply ignored
-                */
                break;
-
        case FSF_ACCESS_DENIED:
-               ZFCP_LOG_NORMAL("Access denied, cannot open port 0x%016Lx "
-                               "on adapter %s\n",
-                               port->wwpn, zfcp_get_busid_by_port(port));
-               for (counter = 0; counter < 2; counter++) {
-                       subtable = header->fsf_status_qual.halfword[counter * 2];
-                       rule = header->fsf_status_qual.halfword[counter * 2 + 1];
-                       switch (subtable) {
-                       case FSF_SQ_CFDC_SUBTABLE_OS:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
-                       case FSF_SQ_CFDC_SUBTABLE_LUN:
-                               ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
-                                       zfcp_act_subtable_type[subtable], rule);
-                               break;
-                       }
-               }
-               zfcp_erp_port_access_denied(port, 57, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_fsf_access_denied_port(req, port);
                break;
-
        case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
-               ZFCP_LOG_INFO("error: The FSF adapter is out of resources. "
-                             "The remote port 0x%016Lx on adapter %s "
-                             "could not be opened. Disabling it.\n",
-                             port->wwpn, zfcp_get_busid_by_port(port));
-               zfcp_erp_port_failed(port, 31, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               dev_warn(&req->adapter->ccw_device->dev,
+                        "The adapter is out of resources. The remote port "
+                        "0x%016Lx could not be opened, disabling it.\n",
+                        port->wwpn);
+               zfcp_erp_port_failed(port, 31, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_ADAPTER_STATUS_AVAILABLE:
                switch (header->fsf_status_qual.word[0]) {
                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-                       /* ERP strategy will escalate */
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       /* ERP strategy will escalate */
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
                case FSF_SQ_NO_RETRY_POSSIBLE:
-                       ZFCP_LOG_NORMAL("The remote port 0x%016Lx on "
-                                       "adapter %s could not be opened. "
-                                       "Disabling it.\n",
-                                       port->wwpn,
-                                       zfcp_get_busid_by_port(port));
-                       zfcp_erp_port_failed(port, 32, fsf_req);
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
-               default:
-                       ZFCP_LOG_NORMAL
-                           ("bug: Wrong status qualifier 0x%x arrived.\n",
-                            header->fsf_status_qual.word[0]);
+                       dev_warn(&req->adapter->ccw_device->dev,
+                                "The remote port 0x%016Lx could not be "
+                                "opened. Disabling it.\n", port->wwpn);
+                       zfcp_erp_port_failed(port, 32, req);
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
                }
                break;
-
        case FSF_GOOD:
-               /* save port handle assigned by FSF */
                port->handle = header->port_handle;
-               ZFCP_LOG_INFO("The remote port 0x%016Lx via adapter %s "
-                             "was opened, it's port handle is 0x%x\n",
-                             port->wwpn, zfcp_get_busid_by_port(port),
-                             port->handle);
-               /* mark port as open */
                atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
                                ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
                atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
                                  ZFCP_STATUS_COMMON_ACCESS_BOXED,
                                  &port->status);
-               retval = 0;
                /* check whether D_ID has changed during open */
                /*
                 * FIXME: This check is not airtight, as the FCP channel does
@@ -2526,320 +1496,168 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
                 * another GID_PN straight after a port has been opened.
                 * Alternately, an ADISC/PDISC ELS should suffice, as well.
                 */
-               plogi = (struct fsf_plogi *) fsf_req->qtcb->bottom.support.els;
-               if (!atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, &port->status))
-               {
-                       if (fsf_req->qtcb->bottom.support.els1_length <
-                           sizeof (struct fsf_plogi)) {
-                               ZFCP_LOG_INFO(
-                                       "warning: insufficient length of "
-                                       "PLOGI payload (%i)\n",
-                                       fsf_req->qtcb->bottom.support.els1_length);
-                               /* skip sanity check and assume wwpn is ok */
-                       } else {
-                               if (plogi->serv_param.wwpn != port->wwpn) {
-                                       ZFCP_LOG_INFO("warning: d_id of port "
-                                                     "0x%016Lx changed during "
-                                                     "open\n", port->wwpn);
-                                       atomic_clear_mask(
-                                               ZFCP_STATUS_PORT_DID_DID,
-                                               &port->status);
-                               } else {
-                                       port->wwnn = plogi->serv_param.wwnn;
-                                       zfcp_plogi_evaluate(port, plogi);
-                               }
+               if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
+                       break;
+
+               plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
+               if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
+                       if (plogi->serv_param.wwpn != port->wwpn)
+                               atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
+                                                 &port->status);
+                       else {
+                               port->wwnn = plogi->serv_param.wwnn;
+                               zfcp_fc_plogi_evaluate(port, plogi);
                        }
                }
                break;
-
        case FSF_UNKNOWN_OP_SUBTYPE:
-               /* should never occure, subtype not set in zfcp_fsf_open_port */
-               ZFCP_LOG_INFO("unknown operation subtype (adapter: %s, "
-                             "op_subtype=0x%x)\n",
-                             zfcp_get_busid_by_port(port),
-                             fsf_req->qtcb->bottom.support.operation_subtype);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
-                               "(debug info 0x%x)\n",
-                               header->fsf_status);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
        }
 
- skip_fsfstatus:
+skip_fsfstatus:
        atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
-       return retval;
 }
 
-/*
- * function:    zfcp_fsf_close_port
- *
- * purpose:     submit FSF command "close port"
- *
- * returns:     address of initiated FSF request
- *              NULL - request could not be initiated
+/**
+ * zfcp_fsf_open_port - create and send open port request
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
  */
-int
-zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       unsigned long lock_flags;
-       int retval = 0;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(erp_action->adapter,
-                                    FSF_QTCB_CLOSE_PORT,
-                                    ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
-                                    erp_action->adapter->pool.fsf_req_erp,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create a close port request "
-                             "for port 0x%016Lx on adapter %s.\n",
-                             erp_action->port->wwpn,
-                             zfcp_get_busid_by_adapter(erp_action->adapter));
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       struct zfcp_fsf_req *req;
+       int retval = -EIO;
+
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
+
+       req = zfcp_fsf_req_create(adapter,
+                                 FSF_QTCB_OPEN_PORT_WITH_DID,
+                                 ZFCP_REQ_AUTO_CLEANUP,
+                                 adapter->pool.fsf_req_erp);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
                goto out;
        }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       sbale = zfcp_qdio_sbale_req(req);
         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
-       fsf_req->data = (unsigned long) erp_action->port;
-       fsf_req->erp_action = erp_action;
-       fsf_req->qtcb->header.port_handle = erp_action->port->handle;
-       fsf_req->erp_action = erp_action;
-       erp_action->fsf_req = fsf_req;
-
-       zfcp_erp_start_timer(fsf_req);
-       retval = zfcp_fsf_req_send(fsf_req);
+       req->handler = zfcp_fsf_open_port_handler;
+       req->qtcb->bottom.support.d_id = erp_action->port->d_id;
+       req->data = erp_action->port;
+       req->erp_action = erp_action;
+       erp_action->fsf_req = req;
+       atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
+
+       zfcp_fsf_start_erp_timer(req);
+       retval = zfcp_fsf_req_send(req);
        if (retval) {
-               ZFCP_LOG_INFO("error: Could not send a close port request for "
-                             "port 0x%016Lx on adapter %s.\n",
-                             erp_action->port->wwpn,
-                             zfcp_get_busid_by_adapter(erp_action->adapter));
-               zfcp_fsf_req_free(fsf_req);
+               zfcp_fsf_req_free(req);
                erp_action->fsf_req = NULL;
-               goto out;
        }
-
-       ZFCP_LOG_TRACE("close port request initiated "
-                      "(adapter %s, port 0x%016Lx)\n",
-                      zfcp_get_busid_by_adapter(erp_action->adapter),
-                      erp_action->port->wwpn);
- out:
-       write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
-                               lock_flags);
+out:
+       spin_unlock(&adapter->req_q.lock);
        return retval;
 }
 
-/*
- * function:    zfcp_fsf_close_port_handler
- *
- * purpose:     is called for finished Close Port FSF command
- *
- * returns:
- */
-static int
-zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
 {
-       int retval = -EINVAL;
-       struct zfcp_port *port;
+       struct zfcp_port *port = req->data;
 
-       port = (struct zfcp_port *) fsf_req->data;
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               /* don't change port status in our bookkeeping */
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                goto skip_fsfstatus;
-       }
-
-       /* evaluate FSF status in QTCB */
-       switch (fsf_req->qtcb->header.fsf_status) {
 
+       switch (req->qtcb->header.fsf_status) {
        case FSF_PORT_HANDLE_NOT_VALID:
-               ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
-                             "0x%016Lx on adapter %s invalid. This may happen "
-                             "occasionally.\n", port->handle,
-                             port->wwpn, zfcp_get_busid_by_port(port));
-               ZFCP_LOG_DEBUG("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &fsf_req->qtcb->header.fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_adapter_reopen(port->adapter, 0, 107, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_erp_adapter_reopen(port->adapter, 0, 107, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_ADAPTER_STATUS_AVAILABLE:
-               /* Note: FSF has actually closed the port in this case.
-                * The status code is just daft. Fingers crossed for a change
-                */
-               retval = 0;
                break;
-
        case FSF_GOOD:
-               ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, "
-                              "port handle 0x%x\n", port->wwpn,
-                              zfcp_get_busid_by_port(port), port->handle);
-               zfcp_erp_modify_port_status(port, 33, fsf_req,
+               zfcp_erp_modify_port_status(port, 33, req,
                                            ZFCP_STATUS_COMMON_OPEN,
                                            ZFCP_CLEAR);
-               retval = 0;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
-                               "(debug info 0x%x)\n",
-                               fsf_req->qtcb->header.fsf_status);
                break;
        }
 
- skip_fsfstatus:
+skip_fsfstatus:
        atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
-       return retval;
 }
 
-/*
- * function:    zfcp_fsf_close_physical_port
- *
- * purpose:     submit FSF command "close physical port"
- *
- * returns:     address of initiated FSF request
- *              NULL - request could not be initiated
+/**
+ * zfcp_fsf_close_port - create and send close port request
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
  */
-int
-zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       unsigned long lock_flags;
-       int retval = 0;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(erp_action->adapter,
-                                    FSF_QTCB_CLOSE_PHYSICAL_PORT,
-                                    ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
-                                    erp_action->adapter->pool.fsf_req_erp,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create close physical port "
-                             "request (adapter %s, port 0x%016Lx)\n",
-                             zfcp_get_busid_by_adapter(erp_action->adapter),
-                             erp_action->port->wwpn);
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       struct zfcp_fsf_req *req;
+       int retval = -EIO;
+
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
 
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
+                                 ZFCP_REQ_AUTO_CLEANUP,
+                                 adapter->pool.fsf_req_erp);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
                goto out;
        }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       sbale = zfcp_qdio_sbale_req(req);
        sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
        sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       /* mark port as being closed */
-       atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
-                       &erp_action->port->status);
-       /* save a pointer to this port */
-       fsf_req->data = (unsigned long) erp_action->port;
-       fsf_req->qtcb->header.port_handle = erp_action->port->handle;
-       fsf_req->erp_action = erp_action;
-       erp_action->fsf_req = fsf_req;
-
-       zfcp_erp_start_timer(fsf_req);
-       retval = zfcp_fsf_req_send(fsf_req);
+       req->handler = zfcp_fsf_close_port_handler;
+       req->data = erp_action->port;
+       req->erp_action = erp_action;
+       req->qtcb->header.port_handle = erp_action->port->handle;
+       erp_action->fsf_req = req;
+       atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
+
+       zfcp_fsf_start_erp_timer(req);
+       retval = zfcp_fsf_req_send(req);
        if (retval) {
-               ZFCP_LOG_INFO("error: Could not send close physical port "
-                             "request (adapter %s, port 0x%016Lx)\n",
-                             zfcp_get_busid_by_adapter(erp_action->adapter),
-                             erp_action->port->wwpn);
-               zfcp_fsf_req_free(fsf_req);
+               zfcp_fsf_req_free(req);
                erp_action->fsf_req = NULL;
-               goto out;
        }
-
-       ZFCP_LOG_TRACE("close physical port request initiated "
-                      "(adapter %s, port 0x%016Lx)\n",
-                      zfcp_get_busid_by_adapter(erp_action->adapter),
-                      erp_action->port->wwpn);
- out:
-       write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
-                               lock_flags);
+out:
+       spin_unlock(&adapter->req_q.lock);
        return retval;
 }
 
-/*
- * function:    zfcp_fsf_close_physical_port_handler
- *
- * purpose:     is called for finished Close Physical Port FSF command
- *
- * returns:
- */
-static int
-zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 {
-       int retval = -EINVAL;
-       struct zfcp_port *port;
+       struct zfcp_port *port = req->data;
+       struct fsf_qtcb_header *header = &req->qtcb->header;
        struct zfcp_unit *unit;
-       struct fsf_qtcb_header *header;
-       u16 subtable, rule, counter;
 
-       port = (struct zfcp_port *) fsf_req->data;
-       header = &fsf_req->qtcb->header;
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               /* don't change port status in our bookkeeping */
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                goto skip_fsfstatus;
-       }
 
-       /* evaluate FSF status in QTCB */
        switch (header->fsf_status) {
-
        case FSF_PORT_HANDLE_NOT_VALID:
-               ZFCP_LOG_INFO("Temporary port identifier 0x%x invalid"
-                             "(adapter %s, port 0x%016Lx). "
-                             "This may happen occasionally.\n",
-                             port->handle,
-                             zfcp_get_busid_by_port(port),
-                             port->wwpn);
-               ZFCP_LOG_DEBUG("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_adapter_reopen(port->adapter, 0, 108, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_erp_adapter_reopen(port->adapter, 0, 108, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_ACCESS_DENIED:
-               ZFCP_LOG_NORMAL("Access denied, cannot close "
-                               "physical port 0x%016Lx on adapter %s\n",
-                               port->wwpn, zfcp_get_busid_by_port(port));
-               for (counter = 0; counter < 2; counter++) {
-                       subtable = header->fsf_status_qual.halfword[counter * 2];
-                       rule = header->fsf_status_qual.halfword[counter * 2 + 1];
-                       switch (subtable) {
-                       case FSF_SQ_CFDC_SUBTABLE_OS:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
-                       case FSF_SQ_CFDC_SUBTABLE_LUN:
-                               ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
-                                       zfcp_act_subtable_type[subtable], rule);
-                               break;
-                       }
-               }
-               zfcp_erp_port_access_denied(port, 58, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_fsf_access_denied_port(req, port);
                break;
-
        case FSF_PORT_BOXED:
-               ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter "
-                              "%s needs to be reopened but it was attempted "
-                              "to close it physically.\n",
-                              port->wwpn,
-                              zfcp_get_busid_by_port(port));
-               zfcp_erp_port_boxed(port, 50, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-                       ZFCP_STATUS_FSFREQ_RETRY;
-
+               zfcp_erp_port_boxed(port, 50, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                /* can't use generic zfcp_erp_modify_port_status because
                 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
                atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
@@ -2847,154 +1665,88 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
                        atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
                                          &unit->status);
                break;
-
        case FSF_ADAPTER_STATUS_AVAILABLE:
                switch (header->fsf_status_qual.word[0]) {
                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-                       /* This will now be escalated by ERP */
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
+                       /* fall through */
                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       /* ERP strategy will escalate */
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
-               default:
-                       ZFCP_LOG_NORMAL
-                           ("bug: Wrong status qualifier 0x%x arrived.\n",
-                            header->fsf_status_qual.word[0]);
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
                }
                break;
-
        case FSF_GOOD:
-               ZFCP_LOG_DEBUG("Remote port 0x%016Lx via adapter %s "
-                              "physically closed, port handle 0x%x\n",
-                              port->wwpn,
-                              zfcp_get_busid_by_port(port), port->handle);
                /* can't use generic zfcp_erp_modify_port_status because
                 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
                 */
                atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
                list_for_each_entry(unit, &port->unit_list_head, list)
-                   atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
-               retval = 0;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
-                               "(debug info 0x%x)\n",
-                               header->fsf_status);
+                       atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+                                         &unit->status);
                break;
        }
-
- skip_fsfstatus:
+skip_fsfstatus:
        atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
-       return retval;
 }
 
-/*
- * function:    zfcp_fsf_open_unit
- *
- * purpose:
- *
- * returns:
- *
- * assumptions:        This routine does not check whether the associated
- *             remote port has already been opened. This should be
- *             done by calling routines. Otherwise some status
- *             may be presented by FSF
+/**
+ * zfcp_fsf_close_physical_port - close physical port
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success
  */
-int
-zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       unsigned long lock_flags;
-       int retval = 0;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(erp_action->adapter,
-                                    FSF_QTCB_OPEN_LUN,
-                                    ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
-                                    erp_action->adapter->pool.fsf_req_erp,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create open unit request for "
-                             "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
-                             erp_action->unit->fcp_lun,
-                             erp_action->unit->port->wwpn,
-                             zfcp_get_busid_by_adapter(erp_action->adapter));
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       struct zfcp_fsf_req *req;
+       int retval = -EIO;
+
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
+
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+                                 ZFCP_REQ_AUTO_CLEANUP,
+                                 adapter->pool.fsf_req_erp);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
                goto out;
        }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-        sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-        sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       fsf_req->qtcb->header.port_handle = erp_action->port->handle;
-       fsf_req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
-       if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
-               fsf_req->qtcb->bottom.support.option =
-                       FSF_OPEN_LUN_SUPPRESS_BOXING;
-       atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
-       fsf_req->data = (unsigned long) erp_action->unit;
-       fsf_req->erp_action = erp_action;
-       erp_action->fsf_req = fsf_req;
+       req->data = erp_action->port;
+       req->qtcb->header.port_handle = erp_action->port->handle;
+       req->erp_action = erp_action;
+       req->handler = zfcp_fsf_close_physical_port_handler;
+       erp_action->fsf_req = req;
+       atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
+                       &erp_action->port->status);
 
-       zfcp_erp_start_timer(fsf_req);
-       retval = zfcp_fsf_req_send(erp_action->fsf_req);
+       zfcp_fsf_start_erp_timer(req);
+       retval = zfcp_fsf_req_send(req);
        if (retval) {
-               ZFCP_LOG_INFO("error: Could not send an open unit request "
-                             "on the adapter %s, port 0x%016Lx for "
-                             "unit 0x%016Lx\n",
-                             zfcp_get_busid_by_adapter(erp_action->adapter),
-                             erp_action->port->wwpn,
-                             erp_action->unit->fcp_lun);
-               zfcp_fsf_req_free(fsf_req);
+               zfcp_fsf_req_free(req);
                erp_action->fsf_req = NULL;
-               goto out;
        }
-
-       ZFCP_LOG_TRACE("Open LUN request initiated (adapter %s, "
-                      "port 0x%016Lx, unit 0x%016Lx)\n",
-                      zfcp_get_busid_by_adapter(erp_action->adapter),
-                      erp_action->port->wwpn, erp_action->unit->fcp_lun);
- out:
-       write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
-                               lock_flags);
+out:
+       spin_unlock(&adapter->req_q.lock);
        return retval;
 }
 
-/*
- * function:    zfcp_fsf_open_unit_handler
- *
- * purpose:    is called for finished Open LUN command
- *
- * returns:
- */
-static int
-zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
 {
-       int retval = -EINVAL;
-       struct zfcp_adapter *adapter;
-       struct zfcp_unit *unit;
-       struct fsf_qtcb_header *header;
-       struct fsf_qtcb_bottom_support *bottom;
-       struct fsf_queue_designator *queue_designator;
-       u16 subtable, rule, counter;
+       struct zfcp_adapter *adapter = req->adapter;
+       struct zfcp_unit *unit = req->data;
+       struct fsf_qtcb_header *header = &req->qtcb->header;
+       struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
+       struct fsf_queue_designator *queue_designator =
+                               &header->fsf_status_qual.fsf_queue_designator;
        int exclusive, readwrite;
 
-       unit = (struct zfcp_unit *) fsf_req->data;
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               /* don't change unit status in our bookkeeping */
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                goto skip_fsfstatus;
-       }
-
-       adapter = fsf_req->adapter;
-       header = &fsf_req->qtcb->header;
-       bottom = &fsf_req->qtcb->bottom.support;
-       queue_designator = &header->fsf_status_qual.fsf_queue_designator;
 
        atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
                          ZFCP_STATUS_COMMON_ACCESS_BOXED |
@@ -3002,155 +1754,65 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
                          ZFCP_STATUS_UNIT_READONLY,
                          &unit->status);
 
-       /* evaluate FSF status in QTCB */
        switch (header->fsf_status) {
 
        case FSF_PORT_HANDLE_NOT_VALID:
-               ZFCP_LOG_INFO("Temporary port identifier 0x%x "
-                             "for port 0x%016Lx on adapter %s invalid "
-                             "This may happen occasionally\n",
-                             unit->port->handle,
-                             unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-               ZFCP_LOG_DEBUG("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
+               zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req);
+               /* fall through */
        case FSF_LUN_ALREADY_OPEN:
-               ZFCP_LOG_NORMAL("bug: Attempted to open unit 0x%016Lx on "
-                               "remote port 0x%016Lx on adapter %s twice.\n",
-                               unit->fcp_lun,
-                               unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_ACCESS_DENIED:
-               ZFCP_LOG_NORMAL("Access denied, cannot open unit 0x%016Lx on "
-                               "remote port 0x%016Lx on adapter %s\n",
-                               unit->fcp_lun, unit->port->wwpn,
-                               zfcp_get_busid_by_unit(unit));
-               for (counter = 0; counter < 2; counter++) {
-                       subtable = header->fsf_status_qual.halfword[counter * 2];
-                       rule = header->fsf_status_qual.halfword[counter * 2 + 1];
-                       switch (subtable) {
-                       case FSF_SQ_CFDC_SUBTABLE_OS:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
-                       case FSF_SQ_CFDC_SUBTABLE_LUN:
-                               ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
-                                       zfcp_act_subtable_type[subtable], rule);
-                               break;
-                       }
-               }
-               zfcp_erp_unit_access_denied(unit, 59, fsf_req);
+               zfcp_fsf_access_denied_unit(req, unit);
                atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
-                atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
                break;
-
        case FSF_PORT_BOXED:
-               ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
-                              "needs to be reopened\n",
-                              unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-               zfcp_erp_port_boxed(unit->port, 51, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-                       ZFCP_STATUS_FSFREQ_RETRY;
+               zfcp_erp_port_boxed(unit->port, 51, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                break;
-
        case FSF_LUN_SHARING_VIOLATION:
-               if (header->fsf_status_qual.word[0] != 0) {
-                       ZFCP_LOG_NORMAL("FCP-LUN 0x%Lx at the remote port "
-                                       "with WWPN 0x%Lx "
-                                       "connected to the adapter %s "
-                                       "is already in use in LPAR%d, CSS%d\n",
-                                       unit->fcp_lun,
-                                       unit->port->wwpn,
-                                       zfcp_get_busid_by_unit(unit),
-                                       queue_designator->hla,
-                                       queue_designator->cssid);
-               } else {
-                       subtable = header->fsf_status_qual.halfword[4];
-                       rule = header->fsf_status_qual.halfword[5];
-                       switch (subtable) {
-                       case FSF_SQ_CFDC_SUBTABLE_OS:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
-                       case FSF_SQ_CFDC_SUBTABLE_LUN:
-                               ZFCP_LOG_NORMAL("Access to FCP-LUN 0x%Lx at the "
-                                               "remote port with WWPN 0x%Lx "
-                                               "connected to the adapter %s "
-                                               "is denied (%s rule %d)\n",
-                                               unit->fcp_lun,
-                                               unit->port->wwpn,
-                                               zfcp_get_busid_by_unit(unit),
-                                               zfcp_act_subtable_type[subtable],
-                                               rule);
-                               break;
-                       }
-               }
-               ZFCP_LOG_DEBUG("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_unit_access_denied(unit, 60, fsf_req);
+               if (header->fsf_status_qual.word[0])
+                       dev_warn(&adapter->ccw_device->dev,
+                                "FCP-LUN 0x%Lx at the remote port "
+                                "with WWPN 0x%Lx "
+                                "connected to the adapter "
+                                "is already in use in LPAR%d, CSS%d.\n",
+                                unit->fcp_lun,
+                                unit->port->wwpn,
+                                queue_designator->hla,
+                                queue_designator->cssid);
+               else
+                       zfcp_act_eval_err(adapter,
+                                         header->fsf_status_qual.word[2]);
+               zfcp_erp_unit_access_denied(unit, 60, req);
                atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
                atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
-               ZFCP_LOG_INFO("error: The adapter ran out of resources. "
-                             "There is no handle (temporary port identifier) "
-                             "available for unit 0x%016Lx on port 0x%016Lx "
-                             "on adapter %s\n",
-                             unit->fcp_lun,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit));
-               zfcp_erp_unit_failed(unit, 34, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               dev_warn(&adapter->ccw_device->dev,
+                        "The adapter ran out of resources. There is no "
+                        "handle available for unit 0x%016Lx on port 0x%016Lx.",
+                        unit->fcp_lun, unit->port->wwpn);
+               zfcp_erp_unit_failed(unit, 34, req);
+               /* fall through */
+       case FSF_INVALID_COMMAND_OPTION:
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_ADAPTER_STATUS_AVAILABLE:
                switch (header->fsf_status_qual.word[0]) {
                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-                       /* Re-establish link to port */
                        zfcp_test_link(unit->port);
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
+                       /* fall through */
                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       /* ERP strategy will escalate */
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
-               default:
-                       ZFCP_LOG_NORMAL
-                           ("bug: Wrong status qualifier 0x%x arrived.\n",
-                            header->fsf_status_qual.word[0]);
                }
                break;
 
-       case FSF_INVALID_COMMAND_OPTION:
-               ZFCP_LOG_NORMAL(
-                       "Invalid option 0x%x has been specified "
-                       "in QTCB bottom sent to the adapter %s\n",
-                       bottom->option,
-                       zfcp_get_busid_by_adapter(adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EINVAL;
-               break;
-
        case FSF_GOOD:
-               /* save LUN handle assigned by FSF */
                unit->handle = header->lun_handle;
-               ZFCP_LOG_TRACE("unit 0x%016Lx on remote port 0x%016Lx on "
-                              "adapter %s opened, port handle 0x%x\n",
-                              unit->fcp_lun,
-                              unit->port->wwpn,
-                              zfcp_get_busid_by_unit(unit),
-                              unit->handle);
-               /* mark unit as open */
                atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
 
                if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
@@ -3168,1528 +1830,629 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
                        if (!readwrite) {
                                atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
                                                &unit->status);
-                               ZFCP_LOG_NORMAL("read-only access for unit "
-                                               "(adapter %s, wwpn=0x%016Lx, "
-                                               "fcp_lun=0x%016Lx)\n",
-                                               zfcp_get_busid_by_unit(unit),
-                                               unit->port->wwpn,
-                                               unit->fcp_lun);
+                               dev_info(&adapter->ccw_device->dev,
+                                        "Read-only access for unit 0x%016Lx "
+                                        "on port 0x%016Lx.\n",
+                                        unit->fcp_lun, unit->port->wwpn);
                        }
 
                        if (exclusive && !readwrite) {
-                               ZFCP_LOG_NORMAL("exclusive access of read-only "
-                                               "unit not supported\n");
-                               zfcp_erp_unit_failed(unit, 35, fsf_req);
-                               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                               zfcp_erp_unit_shutdown(unit, 0, 80, fsf_req);
+                               dev_err(&adapter->ccw_device->dev,
+                                       "Exclusive access of read-only unit "
+                                       "0x%016Lx on port 0x%016Lx not "
+                                       "supported, disabling unit.\n",
+                                       unit->fcp_lun, unit->port->wwpn);
+                               zfcp_erp_unit_failed(unit, 35, req);
+                               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                               zfcp_erp_unit_shutdown(unit, 0, 80, req);
                        } else if (!exclusive && readwrite) {
-                               ZFCP_LOG_NORMAL("shared access of read-write "
-                                               "unit not supported\n");
-                               zfcp_erp_unit_failed(unit, 36, fsf_req);
-                               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                               zfcp_erp_unit_shutdown(unit, 0, 81, fsf_req);
+                               dev_err(&adapter->ccw_device->dev,
+                                       "Shared access of read-write unit "
+                                       "0x%016Lx on port 0x%016Lx not "
+                                       "supported, disabling unit.\n",
+                                       unit->fcp_lun, unit->port->wwpn);
+                               zfcp_erp_unit_failed(unit, 36, req);
+                               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                               zfcp_erp_unit_shutdown(unit, 0, 81, req);
                        }
                }
-
-               retval = 0;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
-                               "(debug info 0x%x)\n",
-                               header->fsf_status);
                break;
        }
 
- skip_fsfstatus:
+skip_fsfstatus:
        atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
-       return retval;
 }
 
-/*
- * function:    zfcp_fsf_close_unit
- *
- * purpose:
- *
- * returns:    address of fsf_req - request successfully initiated
- *             NULL -
- *
- * assumptions: This routine does not check whether the associated
- *              remote port/lun has already been opened. This should be
- *              done by calling routines. Otherwise some status
- *              may be presented by FSF
+/**
+ * zfcp_fsf_open_unit - open unit
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
  */
-int
-zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
 {
        volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req;
-       unsigned long lock_flags;
-       int retval = 0;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(erp_action->adapter,
-                                    FSF_QTCB_CLOSE_LUN,
-                                    ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
-                                    erp_action->adapter->pool.fsf_req_erp,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create close unit request for "
-                             "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
-                             erp_action->unit->fcp_lun,
-                             erp_action->port->wwpn,
-                             zfcp_get_busid_by_adapter(erp_action->adapter));
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       struct zfcp_fsf_req *req;
+       int retval = -EIO;
+
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
+
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
+                                 ZFCP_REQ_AUTO_CLEANUP,
+                                 adapter->pool.fsf_req_erp);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
                goto out;
        }
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       sbale = zfcp_qdio_sbale_req(req);
         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       fsf_req->qtcb->header.port_handle = erp_action->port->handle;
-       fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
-       atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
-       fsf_req->data = (unsigned long) erp_action->unit;
-       fsf_req->erp_action = erp_action;
-       erp_action->fsf_req = fsf_req;
+       req->qtcb->header.port_handle = erp_action->port->handle;
+       req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
+       req->handler = zfcp_fsf_open_unit_handler;
+       req->data = erp_action->unit;
+       req->erp_action = erp_action;
+       erp_action->fsf_req = req;
+
+       if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
+               req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
+
+       atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
 
-       zfcp_erp_start_timer(fsf_req);
-       retval = zfcp_fsf_req_send(erp_action->fsf_req);
+       zfcp_fsf_start_erp_timer(req);
+       retval = zfcp_fsf_req_send(req);
        if (retval) {
-               ZFCP_LOG_INFO("error: Could not send a close unit request for "
-                             "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
-                             erp_action->unit->fcp_lun,
-                             erp_action->port->wwpn,
-                             zfcp_get_busid_by_adapter(erp_action->adapter));
-               zfcp_fsf_req_free(fsf_req);
+               zfcp_fsf_req_free(req);
                erp_action->fsf_req = NULL;
-               goto out;
        }
-
-       ZFCP_LOG_TRACE("Close LUN request initiated (adapter %s, "
-                      "port 0x%016Lx, unit 0x%016Lx)\n",
-                      zfcp_get_busid_by_adapter(erp_action->adapter),
-                      erp_action->port->wwpn, erp_action->unit->fcp_lun);
- out:
-       write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
-                               lock_flags);
+out:
+       spin_unlock(&adapter->req_q.lock);
        return retval;
 }
 
-/*
- * function:    zfcp_fsf_close_unit_handler
- *
- * purpose:     is called for finished Close LUN FSF command
- *
- * returns:
- */
-static int
-zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
 {
-       int retval = -EINVAL;
-       struct zfcp_unit *unit;
+       struct zfcp_unit *unit = req->data;
 
-       unit = (struct zfcp_unit *) fsf_req->data;
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               /* don't change unit status in our bookkeeping */
+       if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                goto skip_fsfstatus;
-       }
-
-       /* evaluate FSF status in QTCB */
-       switch (fsf_req->qtcb->header.fsf_status) {
 
+       switch (req->qtcb->header.fsf_status) {
        case FSF_PORT_HANDLE_NOT_VALID:
-               ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
-                             "0x%016Lx on adapter %s invalid. This may "
-                             "happen in rare circumstances\n",
-                             unit->port->handle,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit));
-               ZFCP_LOG_DEBUG("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &fsf_req->qtcb->header.fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_LUN_HANDLE_NOT_VALID:
-               ZFCP_LOG_INFO("Temporary LUN identifier 0x%x of unit "
-                             "0x%016Lx on port 0x%016Lx on adapter %s is "
-                             "invalid. This may happen occasionally.\n",
-                             unit->handle,
-                             unit->fcp_lun,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit));
-               ZFCP_LOG_DEBUG("Status qualifier data:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &fsf_req->qtcb->header.fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_port_reopen(unit->port, 0, 111, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               zfcp_erp_port_reopen(unit->port, 0, 111, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
        case FSF_PORT_BOXED:
-               ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
-                              "needs to be reopened\n",
-                              unit->port->wwpn,
-                              zfcp_get_busid_by_unit(unit));
-               zfcp_erp_port_boxed(unit->port, 52, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-                       ZFCP_STATUS_FSFREQ_RETRY;
+               zfcp_erp_port_boxed(unit->port, 52, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                break;
-
        case FSF_ADAPTER_STATUS_AVAILABLE:
-               switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
+               switch (req->qtcb->header.fsf_status_qual.word[0]) {
                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-                       /* re-establish link to port */
                        zfcp_test_link(unit->port);
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
+                       /* fall through */
                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       /* ERP strategy will escalate */
-                       fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-                       break;
-               default:
-                       ZFCP_LOG_NORMAL
-                           ("bug: Wrong status qualifier 0x%x arrived.\n",
-                            fsf_req->qtcb->header.fsf_status_qual.word[0]);
+                       req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
                }
                break;
-
        case FSF_GOOD:
-               ZFCP_LOG_TRACE("unit 0x%016Lx on port 0x%016Lx on adapter %s "
-                              "closed, port handle 0x%x\n",
-                              unit->fcp_lun,
-                              unit->port->wwpn,
-                              zfcp_get_busid_by_unit(unit),
-                              unit->handle);
-               /* mark unit as closed */
                atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
-               retval = 0;
-               break;
-
-       default:
-               ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
-                               "(debug info 0x%x)\n",
-                               fsf_req->qtcb->header.fsf_status);
                break;
        }
-
- skip_fsfstatus:
+skip_fsfstatus:
        atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
-       return retval;
 }
 
 /**
- * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
- * @adapter: adapter where scsi command is issued
- * @unit: unit where command is sent to
- * @scsi_cmnd: scsi command to be sent
- * @timer: timer to be started when request is initiated
- * @req_flags: flags for fsf_request
+ * zfcp_fsf_close_unit - close zfcp unit
+ * @erp_action: pointer to struct zfcp_unit
+ * Returns: 0 on success, error otherwise
  */
-int
-zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
-                              struct zfcp_unit *unit,
-                              struct scsi_cmnd * scsi_cmnd,
-                              int use_timer, int req_flags)
+int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
 {
-       struct zfcp_fsf_req *fsf_req = NULL;
-       struct fcp_cmnd_iu *fcp_cmnd_iu;
-       unsigned int sbtype;
-       unsigned long lock_flags;
-       int real_bytes = 0;
-       int retval = 0;
-       int mask;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
-                                    adapter->pool.fsf_req_scsi,
-                                    &lock_flags, &fsf_req);
-       if (unlikely(retval < 0)) {
-               ZFCP_LOG_DEBUG("error: Could not create FCP command request "
-                              "for unit 0x%016Lx on port 0x%016Lx on "
-                              "adapter %s\n",
-                              unit->fcp_lun,
-                              unit->port->wwpn,
-                              zfcp_get_busid_by_adapter(adapter));
-               goto failed_req_create;
-       }
-
-       if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                       &unit->status))) {
-               retval = -EBUSY;
-               goto unit_blocked;
-       }
-
-       zfcp_unit_get(unit);
-       fsf_req->unit = unit;
-
-       /* associate FSF request with SCSI request (for look up on abort) */
-       scsi_cmnd->host_scribble = (unsigned char *) fsf_req->req_id;
-
-       /* associate SCSI command with FSF request */
-       fsf_req->data = (unsigned long) scsi_cmnd;
-
-       /* set handles of unit and its parent port in QTCB */
-       fsf_req->qtcb->header.lun_handle = unit->handle;
-       fsf_req->qtcb->header.port_handle = unit->port->handle;
-
-       /* FSF does not define the structure of the FCP_CMND IU */
-       fcp_cmnd_iu = (struct fcp_cmnd_iu *)
-           &(fsf_req->qtcb->bottom.io.fcp_cmnd);
-
-       /*
-        * set depending on data direction:
-        *      data direction bits in SBALE (SB Type)
-        *      data direction bits in QTCB
-        *      data direction bits in FCP_CMND IU
-        */
-       switch (scsi_cmnd->sc_data_direction) {
-       case DMA_NONE:
-               fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
-               /*
-                * FIXME(qdio):
-                * what is the correct type for commands
-                * without 'real' data buffers?
-                */
-               sbtype = SBAL_FLAGS0_TYPE_READ;
-               break;
-       case DMA_FROM_DEVICE:
-               fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
-               sbtype = SBAL_FLAGS0_TYPE_READ;
-               fcp_cmnd_iu->rddata = 1;
-               break;
-       case DMA_TO_DEVICE:
-               fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
-               sbtype = SBAL_FLAGS0_TYPE_WRITE;
-               fcp_cmnd_iu->wddata = 1;
-               break;
-       case DMA_BIDIRECTIONAL:
-       default:
-               /*
-                * dummy, catch this condition earlier
-                * in zfcp_scsi_queuecommand
-                */
-               goto failed_scsi_cmnd;
-       }
-
-       /* set FC service class in QTCB (3 per default) */
-       fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
-
-       /* set FCP_LUN in FCP_CMND IU in QTCB */
-       fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
-
-       mask = ZFCP_STATUS_UNIT_READONLY | ZFCP_STATUS_UNIT_SHARED;
-
-       /* set task attributes in FCP_CMND IU in QTCB */
-       if (likely((scsi_cmnd->device->simple_tags) ||
-                  (atomic_test_mask(mask, &unit->status))))
-               fcp_cmnd_iu->task_attribute = SIMPLE_Q;
-       else
-               fcp_cmnd_iu->task_attribute = UNTAGGED;
-
-       /* set additional length of FCP_CDB in FCP_CMND IU in QTCB, if needed */
-       if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) {
-               fcp_cmnd_iu->add_fcp_cdb_length
-                   = (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
-               ZFCP_LOG_TRACE("SCSI CDB length is 0x%x, "
-                              "additional FCP_CDB length is 0x%x "
-                              "(shifted right 2 bits)\n",
-                              scsi_cmnd->cmd_len,
-                              fcp_cmnd_iu->add_fcp_cdb_length);
-       }
-       /*
-        * copy SCSI CDB (including additional length, if any) to
-        * FCP_CDB in FCP_CMND IU in QTCB
-        */
-       memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
-
-       /* FCP CMND IU length in QTCB */
-       fsf_req->qtcb->bottom.io.fcp_cmnd_length =
-               sizeof (struct fcp_cmnd_iu) +
-               fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t);
+       volatile struct qdio_buffer_element *sbale;
+       struct zfcp_adapter *adapter = erp_action->adapter;
+       struct zfcp_fsf_req *req;
+       int retval = -EIO;
 
-       /* generate SBALEs from data buffer */
-       real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd);
-       if (unlikely(real_bytes < 0)) {
-               if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) {
-                       ZFCP_LOG_DEBUG(
-                               "Data did not fit into available buffer(s), "
-                              "waiting for more...\n");
-                       retval = -EIO;
-               } else {
-                       ZFCP_LOG_NORMAL("error: No truncation implemented but "
-                                       "required. Shutting down unit "
-                                       "(adapter %s, port 0x%016Lx, "
-                                       "unit 0x%016Lx)\n",
-                                       zfcp_get_busid_by_unit(unit),
-                                       unit->port->wwpn,
-                                       unit->fcp_lun);
-                       zfcp_erp_unit_shutdown(unit, 0, 131, fsf_req);
-                       retval = -EINVAL;
-               }
-               goto no_fit;
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
+                                 ZFCP_REQ_AUTO_CLEANUP,
+                                 adapter->pool.fsf_req_erp);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
+               goto out;
        }
 
-       /* set length of FCP data length in FCP_CMND IU in QTCB */
-       zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       ZFCP_LOG_DEBUG("Sending SCSI command:\n");
-       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                     (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+       req->qtcb->header.port_handle = erp_action->port->handle;
+       req->qtcb->header.lun_handle = erp_action->unit->handle;
+       req->handler = zfcp_fsf_close_unit_handler;
+       req->data = erp_action->unit;
+       req->erp_action = erp_action;
+       erp_action->fsf_req = req;
+       atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
 
-       if (use_timer)
-               zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
-
-       retval = zfcp_fsf_req_send(fsf_req);
-       if (unlikely(retval < 0)) {
-               ZFCP_LOG_INFO("error: Could not send FCP command request "
-                             "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             unit->port->wwpn,
-                             unit->fcp_lun);
-               goto send_failed;
+       zfcp_fsf_start_erp_timer(req);
+       retval = zfcp_fsf_req_send(req);
+       if (retval) {
+               zfcp_fsf_req_free(req);
+               erp_action->fsf_req = NULL;
        }
-
-       ZFCP_LOG_TRACE("Send FCP Command initiated (adapter %s, "
-                      "port 0x%016Lx, unit 0x%016Lx)\n",
-                      zfcp_get_busid_by_adapter(adapter),
-                      unit->port->wwpn,
-                      unit->fcp_lun);
-       goto success;
-
- send_failed:
- no_fit:
- failed_scsi_cmnd:
-       zfcp_unit_put(unit);
- unit_blocked:
-       zfcp_fsf_req_free(fsf_req);
-       fsf_req = NULL;
-       scsi_cmnd->host_scribble = NULL;
- success:
- failed_req_create:
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+out:
+       spin_unlock(&adapter->req_q.lock);
        return retval;
 }
 
-struct zfcp_fsf_req *
-zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
-                                         struct zfcp_unit *unit,
-                                         u8 tm_flags, int req_flags)
+static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
 {
-       struct zfcp_fsf_req *fsf_req = NULL;
-       int retval = 0;
-       struct fcp_cmnd_iu *fcp_cmnd_iu;
-       unsigned long lock_flags;
-       volatile struct qdio_buffer_element *sbale;
-
-       /* setup new FSF request */
-       retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
-                                    adapter->pool.fsf_req_scsi,
-                                    &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create FCP command (task "
-                             "management) request for adapter %s, port "
-                             " 0x%016Lx, unit 0x%016Lx.\n",
-                             zfcp_get_busid_by_adapter(adapter),
-                             unit->port->wwpn, unit->fcp_lun);
-               goto out;
-       }
-
-       if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
-                       &unit->status)))
-               goto unit_blocked;
-
-       /*
-        * Used to decide on proper handler in the return path,
-        * could be either zfcp_fsf_send_fcp_command_task_handler or
-        * zfcp_fsf_send_fcp_command_task_management_handler */
-
-       fsf_req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
-
-       /*
-        * hold a pointer to the unit being target of this
-        * task management request
-        */
-       fsf_req->data = (unsigned long) unit;
-
-       /* set FSF related fields in QTCB */
-       fsf_req->qtcb->header.lun_handle = unit->handle;
-       fsf_req->qtcb->header.port_handle = unit->port->handle;
-       fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
-       fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
-       fsf_req->qtcb->bottom.io.fcp_cmnd_length =
-               sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t);
-
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
-
-       /* set FCP related fields in FCP_CMND IU in QTCB */
-       fcp_cmnd_iu = (struct fcp_cmnd_iu *)
-               &(fsf_req->qtcb->bottom.io.fcp_cmnd);
-       fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
-       fcp_cmnd_iu->task_management_flags = tm_flags;
-
-       zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
-       retval = zfcp_fsf_req_send(fsf_req);
-       if (!retval)
-               goto out;
-
- unit_blocked:
-       zfcp_fsf_req_free(fsf_req);
-       fsf_req = NULL;
-
- out:
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
-       return fsf_req;
+       lat_rec->sum += lat;
+       lat_rec->min = min(lat_rec->min, lat);
+       lat_rec->max = max(lat_rec->max, lat);
 }
 
-/*
- * function:    zfcp_fsf_send_fcp_command_handler
- *
- * purpose:    is called for finished Send FCP Command
- *
- * returns:
- */
-static int
-zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
 {
-       int retval = -EINVAL;
-       struct zfcp_unit *unit;
-       struct fsf_qtcb_header *header;
-       u16 subtable, rule, counter;
-
-       header = &fsf_req->qtcb->header;
-
-       if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
-               unit = (struct zfcp_unit *) fsf_req->data;
-       else
-               unit = fsf_req->unit;
-
-       if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
-               /* go directly to calls of special handlers */
-               goto skip_fsfstatus;
-       }
-
-       /* evaluate FSF status in QTCB */
-       switch (header->fsf_status) {
-
-       case FSF_PORT_HANDLE_NOT_VALID:
-               ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
-                             "0x%016Lx on adapter %s invalid\n",
-                             unit->port->handle,
-                             unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_LUN_HANDLE_NOT_VALID:
-               ZFCP_LOG_INFO("Temporary LUN identifier 0x%x for unit "
-                             "0x%016Lx on port 0x%016Lx on adapter %s is "
-                             "invalid. This may happen occasionally.\n",
-                             unit->handle,
-                             unit->fcp_lun,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit));
-               ZFCP_LOG_NORMAL("Status qualifier data:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_port_reopen(unit->port, 0, 113, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_HANDLE_MISMATCH:
-               ZFCP_LOG_NORMAL("bug: The port handle 0x%x has changed "
-                               "unexpectedly. (adapter %s, port 0x%016Lx, "
-                               "unit 0x%016Lx)\n",
-                               unit->port->handle,
-                               zfcp_get_busid_by_unit(unit),
-                               unit->port->wwpn,
-                               unit->fcp_lun);
-               ZFCP_LOG_NORMAL("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_adapter_reopen(unit->port->adapter, 0, 114, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_SERVICE_CLASS_NOT_SUPPORTED:
-               ZFCP_LOG_INFO("error: adapter %s does not support fc "
-                             "class %d.\n",
-                             zfcp_get_busid_by_unit(unit),
-                             ZFCP_FC_SERVICE_CLASS_DEFAULT);
-               /* stop operation for this adapter */
-               zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 132, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_FCPLUN_NOT_VALID:
-               ZFCP_LOG_NORMAL("bug: unit 0x%016Lx on port 0x%016Lx on "
-                               "adapter %s does not have correct unit "
-                               "handle 0x%x\n",
-                               unit->fcp_lun,
-                               unit->port->wwpn,
-                               zfcp_get_busid_by_unit(unit),
-                               unit->handle);
-               ZFCP_LOG_DEBUG("status qualifier:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (char *) &header->fsf_status_qual,
-                             sizeof (union fsf_status_qual));
-               zfcp_erp_port_reopen(unit->port, 0, 115, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_ACCESS_DENIED:
-               ZFCP_LOG_NORMAL("Access denied, cannot send FCP command to "
-                               "unit 0x%016Lx on port 0x%016Lx on "
-                               "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
-                               zfcp_get_busid_by_unit(unit));
-               for (counter = 0; counter < 2; counter++) {
-                       subtable = header->fsf_status_qual.halfword[counter * 2];
-                       rule = header->fsf_status_qual.halfword[counter * 2 + 1];
-                       switch (subtable) {
-                       case FSF_SQ_CFDC_SUBTABLE_OS:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
-                       case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
-                       case FSF_SQ_CFDC_SUBTABLE_LUN:
-                               ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
-                                       zfcp_act_subtable_type[subtable], rule);
-                               break;
-                       }
-               }
-               zfcp_erp_unit_access_denied(unit, 61, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_DIRECTION_INDICATOR_NOT_VALID:
-               ZFCP_LOG_INFO("bug: Invalid data direction given for unit "
-                             "0x%016Lx on port 0x%016Lx on adapter %s "
-                             "(debug info %d)\n",
-                             unit->fcp_lun,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit),
-                             fsf_req->qtcb->bottom.io.data_direction);
-               /* stop operation for this adapter */
-               zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_CMND_LENGTH_NOT_VALID:
-               ZFCP_LOG_NORMAL
-                   ("bug: An invalid control-data-block length field "
-                    "was found in a command for unit 0x%016Lx on port "
-                    "0x%016Lx on adapter %s " "(debug info %d)\n",
-                    unit->fcp_lun, unit->port->wwpn,
-                    zfcp_get_busid_by_unit(unit),
-                    fsf_req->qtcb->bottom.io.fcp_cmnd_length);
-               /* stop operation for this adapter */
-               zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               break;
-
-       case FSF_PORT_BOXED:
-               ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
-                              "needs to be reopened\n",
-                              unit->port->wwpn, zfcp_get_busid_by_unit(unit));
-               zfcp_erp_port_boxed(unit->port, 53, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-                       ZFCP_STATUS_FSFREQ_RETRY;
-               break;
+       struct fsf_qual_latency_info *lat_inf;
+       struct latency_cont *lat;
+       struct zfcp_unit *unit = req->unit;
+       unsigned long flags;
 
-       case FSF_LUN_BOXED:
-               ZFCP_LOG_NORMAL("unit needs to be reopened (adapter %s, "
-                               "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
-                               zfcp_get_busid_by_unit(unit),
-                               unit->port->wwpn, unit->fcp_lun);
-               zfcp_erp_unit_boxed(unit, 54, fsf_req);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
-                       | ZFCP_STATUS_FSFREQ_RETRY;
-               break;
+       lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
 
-       case FSF_ADAPTER_STATUS_AVAILABLE:
-               switch (header->fsf_status_qual.word[0]) {
-               case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-                       /* re-establish link to port */
-                       zfcp_test_link(unit->port);
-                       break;
-               case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-                       /* FIXME(hw) need proper specs for proper action */
-                       /* let scsi stack deal with retries and escalation */
-                       break;
-               default:
-                       ZFCP_LOG_NORMAL
-                           ("Unknown status qualifier 0x%x arrived.\n",
-                            header->fsf_status_qual.word[0]);
-                       break;
-               }
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+       switch (req->qtcb->bottom.io.data_direction) {
+       case FSF_DATADIR_READ:
+               lat = &unit->latencies.read;
                break;
-
-       case FSF_GOOD:
+       case FSF_DATADIR_WRITE:
+               lat = &unit->latencies.write;
                break;
-
-       case FSF_FCP_RSP_AVAILABLE:
+       case FSF_DATADIR_CMND:
+               lat = &unit->latencies.cmd;
                break;
+       default:
+               return;
        }
 
- skip_fsfstatus:
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) {
-               retval =
-                   zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
-       } else {
-               retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
-               fsf_req->unit = NULL;
-               zfcp_unit_put(unit);
-       }
-       return retval;
+       spin_lock_irqsave(&unit->latencies.lock, flags);
+       zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
+       zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
+       lat->counter++;
+       spin_unlock_irqrestore(&unit->latencies.lock, flags);
 }
 
-/*
- * function:    zfcp_fsf_send_fcp_command_task_handler
- *
- * purpose:    evaluates FCP_RSP IU
- *
- * returns:
- */
-static int
-zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
 {
-       int retval = 0;
-       struct scsi_cmnd *scpnt;
+       struct scsi_cmnd *scpnt = req->data;
        struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
-           &(fsf_req->qtcb->bottom.io.fcp_rsp);
-       struct fcp_cmnd_iu *fcp_cmnd_iu = (struct fcp_cmnd_iu *)
-           &(fsf_req->qtcb->bottom.io.fcp_cmnd);
+           &(req->qtcb->bottom.io.fcp_rsp);
        u32 sns_len;
-       char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
+       char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
        unsigned long flags;
-       struct zfcp_unit *unit = fsf_req->unit;
-
-       read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
-       scpnt = (struct scsi_cmnd *) fsf_req->data;
-       if (unlikely(!scpnt)) {
-               ZFCP_LOG_DEBUG
-                   ("Command with fsf_req %p is not associated to "
-                    "a scsi command anymore. Aborted?\n", fsf_req);
-               goto out;
-       }
-       if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
-               /* FIXME: (design) mid-layer should handle DID_ABORT like
-                *        DID_SOFT_ERROR by retrying the request for devices
-                *        that allow retries.
-                */
-               ZFCP_LOG_DEBUG("Setting DID_SOFT_ERROR and SUGGEST_RETRY\n");
-               set_host_byte(&scpnt->result, DID_SOFT_ERROR);
-               set_driver_byte(&scpnt->result, SUGGEST_RETRY);
+
+       if (unlikely(!scpnt))
+               return;
+
+       read_lock_irqsave(&req->adapter->abort_lock, flags);
+
+       if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
+               set_host_byte(scpnt, DID_SOFT_ERROR);
+               set_driver_byte(scpnt, SUGGEST_RETRY);
                goto skip_fsfstatus;
        }
 
-       if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
-               ZFCP_LOG_DEBUG("Setting DID_ERROR\n");
-               set_host_byte(&scpnt->result, DID_ERROR);
+       if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+               set_host_byte(scpnt, DID_ERROR);
                goto skip_fsfstatus;
        }
 
-       /* set message byte of result in SCSI command */
-       scpnt->result |= COMMAND_COMPLETE << 8;
+       set_msg_byte(scpnt, COMMAND_COMPLETE);
 
-       /*
-        * copy SCSI status code of FCP_STATUS of FCP_RSP IU to status byte
-        * of result in SCSI command
-        */
        scpnt->result |= fcp_rsp_iu->scsi_status;
-       if (unlikely(fcp_rsp_iu->scsi_status)) {
-               /* DEBUG */
-               ZFCP_LOG_DEBUG("status for SCSI Command:\n");
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             scpnt->cmnd, scpnt->cmd_len);
-               ZFCP_LOG_DEBUG("SCSI status code 0x%x\n",
-                               fcp_rsp_iu->scsi_status);
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             (void *) fcp_rsp_iu, sizeof (struct fcp_rsp_iu));
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                             zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu),
-                             fcp_rsp_iu->fcp_sns_len);
-       }
 
-       /* check FCP_RSP_INFO */
-       if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
-               ZFCP_LOG_DEBUG("rsp_len is valid\n");
-               switch (fcp_rsp_info[3]) {
-               case RSP_CODE_GOOD:
-                       /* ok, continue */
-                       ZFCP_LOG_TRACE("no failure or Task Management "
-                                      "Function complete\n");
-                       set_host_byte(&scpnt->result, DID_OK);
-                       break;
-               case RSP_CODE_LENGTH_MISMATCH:
-                       /* hardware bug */
-                       ZFCP_LOG_NORMAL("bug: FCP response code indictates "
-                                       "that the fibrechannel protocol data "
-                                       "length differs from the burst length. "
-                                       "The problem occured on unit 0x%016Lx "
-                                       "on port 0x%016Lx on adapter %s",
-                                       unit->fcp_lun,
-                                       unit->port->wwpn,
-                                       zfcp_get_busid_by_unit(unit));
-                       /* dump SCSI CDB as prepared by zfcp */
-                       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                                     (char *) &fsf_req->qtcb->
-                                     bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
-                       set_host_byte(&scpnt->result, DID_ERROR);
-                       goto skip_fsfstatus;
-               case RSP_CODE_FIELD_INVALID:
-                       /* driver or hardware bug */
-                       ZFCP_LOG_NORMAL("bug: FCP response code indictates "
-                                       "that the fibrechannel protocol data "
-                                       "fields were incorrectly set up. "
-                                       "The problem occured on the unit "
-                                       "0x%016Lx on port 0x%016Lx on "
-                                       "adapter %s",
-                                       unit->fcp_lun,
-                                       unit->port->wwpn,
-                                       zfcp_get_busid_by_unit(unit));
-                       /* dump SCSI CDB as prepared by zfcp */
-                       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                                     (char *) &fsf_req->qtcb->
-                                     bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
-                       set_host_byte(&scpnt->result, DID_ERROR);
-                       goto skip_fsfstatus;
-               case RSP_CODE_RO_MISMATCH:
-                       /* hardware bug */
-                       ZFCP_LOG_NORMAL("bug: The FCP response code indicates "
-                                       "that conflicting  values for the "
-                                       "fibrechannel payload offset from the "
-                                       "header were found. "
-                                       "The problem occured on unit 0x%016Lx "
-                                       "on port 0x%016Lx on adapter %s.\n",
-                                       unit->fcp_lun,
-                                       unit->port->wwpn,
-                                       zfcp_get_busid_by_unit(unit));
-                       /* dump SCSI CDB as prepared by zfcp */
-                       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                                     (char *) &fsf_req->qtcb->
-                                     bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
-                       set_host_byte(&scpnt->result, DID_ERROR);
-                       goto skip_fsfstatus;
-               default:
-                       ZFCP_LOG_NORMAL("bug: An invalid FCP response "
-                                       "code was detected for a command. "
-                                       "The problem occured on the unit "
-                                       "0x%016Lx on port 0x%016Lx on "
-                                       "adapter %s (debug info 0x%x)\n",
-                                       unit->fcp_lun,
-                                       unit->port->wwpn,
-                                       zfcp_get_busid_by_unit(unit),
-                                       fcp_rsp_info[3]);
-                       /* dump SCSI CDB as prepared by zfcp */
-                       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
-                                     (char *) &fsf_req->qtcb->
-                                     bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
-                       set_host_byte(&scpnt->result, DID_ERROR);
+       if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
+               zfcp_fsf_req_latency(req);
+
+       if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
+               if (fcp_rsp_info[3] == RSP_CODE_GOOD)
+                       set_host_byte(scpnt, DID_OK);
+               else {
+                       set_host_byte(scpnt, DID_ERROR);
                        goto skip_fsfstatus;
                }
        }
 
-       /* check for sense data */
        if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
-               sns_len = FSF_FCP_RSP_SIZE -
-                   sizeof (struct fcp_rsp_iu) + fcp_rsp_iu->fcp_rsp_len;
-               ZFCP_LOG_TRACE("room for %i bytes sense data in QTCB\n",
-                              sns_len);
+               sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
+                         fcp_rsp_iu->fcp_rsp_len;
                sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
-               ZFCP_LOG_TRACE("room for %i bytes sense data in SCSI command\n",
-                              SCSI_SENSE_BUFFERSIZE);
                sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
-               ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
-                              scpnt->result);
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
-                             scpnt->cmnd, scpnt->cmd_len);
 
-               ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
-                              fcp_rsp_iu->fcp_sns_len);
                memcpy(scpnt->sense_buffer,
                       zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
-               ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
-                             (void *)scpnt->sense_buffer, sns_len);
-       }
-
-       /* check for overrun */
-       if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_over)) {
-               ZFCP_LOG_INFO("A data overrun was detected for a command. "
-                             "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
-                             "The response data length is "
-                             "%d, the original length was %d.\n",
-                             unit->fcp_lun,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit),
-                             fcp_rsp_iu->fcp_resid,
-                             (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
        }
 
-       /* check for underrun */
        if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
-               ZFCP_LOG_INFO("A data underrun was detected for a command. "
-                             "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
-                             "The response data length is "
-                             "%d, the original length was %d.\n",
-                             unit->fcp_lun,
-                             unit->port->wwpn,
-                             zfcp_get_busid_by_unit(unit),
-                             fcp_rsp_iu->fcp_resid,
-                             (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
-
                scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
                if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
                    scpnt->underflow)
-                       set_host_byte(&scpnt->result, DID_ERROR);
+                       set_host_byte(scpnt, DID_ERROR);
        }
-
- skip_fsfstatus:
-       ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
-
+skip_fsfstatus:
        if (scpnt->result != 0)
-               zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req);
+               zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
        else if (scpnt->retries > 0)
-               zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req);
+               zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
        else
-               zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req);
+               zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
 
-       /* cleanup pointer (need this especially for abort) */
        scpnt->host_scribble = NULL;
-
-       /* always call back */
        (scpnt->scsi_done) (scpnt);
-
        /*
         * We must hold this lock until scsi_done has been called.
         * Otherwise we may call scsi_done after abort regarding this
         * command has completed.
         * Note: scsi_done must not block!
         */
- out:
-       read_unlock_irqrestore(&fsf_req->adapter->abort_lock, flags);
-       return retval;
+       read_unlock_irqrestore(&req->adapter->abort_lock, flags);
 }
 
-/*
- * function:    zfcp_fsf_send_fcp_command_task_management_handler
- *
- * purpose:    evaluates FCP_RSP IU
- *
- * returns:
- */
-static int
-zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
 {
-       int retval = 0;
        struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
-           &(fsf_req->qtcb->bottom.io.fcp_rsp);
-       char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
-       struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
-
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
-               goto skip_fsfstatus;
-       }
-
-       /* check FCP_RSP_INFO */
-       switch (fcp_rsp_info[3]) {
-       case RSP_CODE_GOOD:
-               /* ok, continue */
-               ZFCP_LOG_DEBUG("no failure or Task Management "
-                              "Function complete\n");
-               break;
-       case RSP_CODE_TASKMAN_UNSUPP:
-               ZFCP_LOG_NORMAL("bug: A reuested task management function "
-                               "is not supported on the target device "
-                               "unit 0x%016Lx, port 0x%016Lx, adapter %s\n ",
-                               unit->fcp_lun,
-                               unit->port->wwpn,
-                               zfcp_get_busid_by_unit(unit));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP;
-               break;
-       case RSP_CODE_TASKMAN_FAILED:
-               ZFCP_LOG_NORMAL("bug: A reuested task management function "
-                               "failed to complete successfully. "
-                               "unit 0x%016Lx, port 0x%016Lx, adapter %s.\n",
-                               unit->fcp_lun,
-                               unit->port->wwpn,
-                               zfcp_get_busid_by_unit(unit));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
-               break;
-       default:
-               ZFCP_LOG_NORMAL("bug: An invalid FCP response "
-                               "code was detected for a command. "
-                               "unit 0x%016Lx, port 0x%016Lx, adapter %s "
-                               "(debug info 0x%x)\n",
-                               unit->fcp_lun,
-                               unit->port->wwpn,
-                               zfcp_get_busid_by_unit(unit),
-                               fcp_rsp_info[3]);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
-       }
+           &(req->qtcb->bottom.io.fcp_rsp);
+       char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
 
-      skip_fsfstatus:
-       return retval;
+       if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
+            (req->status & ZFCP_STATUS_FSFREQ_ERROR))
+               req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
 }
 
 
-/*
- * function:    zfcp_fsf_control_file
- *
- * purpose:     Initiator of the control file upload/download FSF requests
- *
- * returns:     0           - FSF request is successfuly created and queued
- *              -EOPNOTSUPP - The FCP adapter does not have Control File support
- *              -EINVAL     - Invalid direction specified
- *              -ENOMEM     - Insufficient memory
- *              -EPERM      - Cannot create FSF request or place it in QDIO queue
- */
-int
-zfcp_fsf_control_file(struct zfcp_adapter *adapter,
-                      struct zfcp_fsf_req **fsf_req_ptr,
-                      u32 fsf_command,
-                      u32 option,
-                      struct zfcp_sg_list *sg_list)
+static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
 {
-       struct zfcp_fsf_req *fsf_req;
-       struct fsf_qtcb_bottom_support *bottom;
-       volatile struct qdio_buffer_element *sbale;
-       unsigned long lock_flags;
-       int req_flags = 0;
-       int direction;
-       int retval = 0;
-
-       if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
-               ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               retval = -EOPNOTSUPP;
-               goto out;
-       }
-
-       switch (fsf_command) {
-
-       case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
-               direction = SBAL_FLAGS0_TYPE_WRITE;
-               if ((option != FSF_CFDC_OPTION_FULL_ACCESS) &&
-                   (option != FSF_CFDC_OPTION_RESTRICTED_ACCESS))
-                       req_flags = ZFCP_WAIT_FOR_SBAL;
-               break;
-
-       case FSF_QTCB_UPLOAD_CONTROL_FILE:
-               direction = SBAL_FLAGS0_TYPE_READ;
-               break;
-
-       default:
-               ZFCP_LOG_INFO("Invalid FSF command code 0x%08x\n", fsf_command);
-               retval = -EINVAL;
-               goto out;
-       }
-
-       retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
-                                    NULL, &lock_flags, &fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("error: Could not create FSF request for the "
-                             "adapter %s\n",
-                       zfcp_get_busid_by_adapter(adapter));
-               retval = -EPERM;
-               goto unlock_queue_lock;
-       }
-
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
-       sbale[0].flags |= direction;
-
-       bottom = &fsf_req->qtcb->bottom.support;
-       bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
-       bottom->option = option;
-
-       if (sg_list->count > 0) {
-               int bytes;
-
-               bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction,
-                                               sg_list->sg, sg_list->count,
-                                               ZFCP_MAX_SBALS_PER_REQ);
-                if (bytes != ZFCP_CFDC_MAX_CONTROL_FILE_SIZE) {
-                       ZFCP_LOG_INFO(
-                               "error: Could not create sufficient number of "
-                               "SBALS for an FSF request to the adapter %s\n",
-                               zfcp_get_busid_by_adapter(adapter));
-                       retval = -ENOMEM;
-                       goto free_fsf_req;
-               }
-       } else
-               sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
-
-       zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
-       retval = zfcp_fsf_req_send(fsf_req);
-       if (retval < 0) {
-               ZFCP_LOG_INFO("initiation of cfdc up/download failed"
-                             "(adapter %s)\n",
-                             zfcp_get_busid_by_adapter(adapter));
-               retval = -EPERM;
-               goto free_fsf_req;
-       }
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
-
-       ZFCP_LOG_NORMAL("Control file %s FSF request has been sent to the "
-                       "adapter %s\n",
-                       fsf_command == FSF_QTCB_DOWNLOAD_CONTROL_FILE ?
-                       "download" : "upload",
-                       zfcp_get_busid_by_adapter(adapter));
-
-       wait_event(fsf_req->completion_wq,
-                  fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
-
-       *fsf_req_ptr = fsf_req;
-       goto out;
-
- free_fsf_req:
-       zfcp_fsf_req_free(fsf_req);
- unlock_queue_lock:
-       write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
- out:
-       return retval;
-}
-
+       struct zfcp_unit *unit;
+       struct fsf_qtcb_header *header = &req->qtcb->header;
 
-/*
- * function:    zfcp_fsf_control_file_handler
- *
- * purpose:     Handler of the control file upload/download FSF requests
- *
- * returns:     0       - FSF request successfuly processed
- *              -EAGAIN - Operation has to be repeated because of a temporary problem
- *              -EACCES - There is no permission to execute an operation
- *              -EPERM  - The control file is not in a right format
- *              -EIO    - There is a problem with the FCP adapter
- *              -EINVAL - Invalid operation
- *              -EFAULT - User space memory I/O operation fault
- */
-static int
-zfcp_fsf_control_file_handler(struct zfcp_fsf_req *fsf_req)
-{
-       struct zfcp_adapter *adapter = fsf_req->adapter;
-       struct fsf_qtcb_header *header = &fsf_req->qtcb->header;
-       struct fsf_qtcb_bottom_support *bottom = &fsf_req->qtcb->bottom.support;
-       int retval = 0;
+       if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
+               unit = req->data;
+       else
+               unit = req->unit;
 
-       if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-               retval = -EINVAL;
+       if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
                goto skip_fsfstatus;
-       }
 
        switch (header->fsf_status) {
-
-       case FSF_GOOD:
-               ZFCP_LOG_NORMAL(
-                       "The FSF request has been successfully completed "
-                       "on the adapter %s\n",
-                       zfcp_get_busid_by_adapter(adapter));
-               break;
-
-       case FSF_OPERATION_PARTIALLY_SUCCESSFUL:
-               if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) {
-                       switch (header->fsf_status_qual.word[0]) {
-
-                       case FSF_SQ_CFDC_HARDENED_ON_SE:
-                               ZFCP_LOG_NORMAL(
-                                       "CFDC on the adapter %s has being "
-                                       "hardened on primary and secondary SE\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                               break;
-
-                       case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE:
-                               ZFCP_LOG_NORMAL(
-                                       "CFDC of the adapter %s could not "
-                                       "be saved on the SE\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                               break;
-
-                       case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2:
-                               ZFCP_LOG_NORMAL(
-                                       "CFDC of the adapter %s could not "
-                                       "be copied to the secondary SE\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                               break;
-
-                       default:
-                               ZFCP_LOG_NORMAL(
-                                       "CFDC could not be hardened "
-                                       "on the adapter %s\n",
-                                       zfcp_get_busid_by_adapter(adapter));
-                       }
-               }
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EAGAIN;
-               break;
-
-       case FSF_AUTHORIZATION_FAILURE:
-               ZFCP_LOG_NORMAL(
-                       "Adapter %s does not accept privileged commands\n",
-                       zfcp_get_busid_by_adapter(adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EACCES;
+       case FSF_HANDLE_MISMATCH:
+       case FSF_PORT_HANDLE_NOT_VALID:
+               zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
-       case FSF_CFDC_ERROR_DETECTED:
-               ZFCP_LOG_NORMAL(
-                       "Error at position %d in the CFDC, "
-                       "CFDC is discarded by the adapter %s\n",
-                       header->fsf_status_qual.word[0],
-                       zfcp_get_busid_by_adapter(adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EPERM;
+       case FSF_FCPLUN_NOT_VALID:
+       case FSF_LUN_HANDLE_NOT_VALID:
+               zfcp_erp_port_reopen(unit->port, 0, 113, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
-       case FSF_CONTROL_FILE_UPDATE_ERROR:
-               ZFCP_LOG_NORMAL(
-                       "Adapter %s cannot harden the control file, "
-                       "file is discarded\n",
-                       zfcp_get_busid_by_adapter(adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EIO;
+       case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+               zfcp_fsf_class_not_supp(req);
                break;
-
-       case FSF_CONTROL_FILE_TOO_LARGE:
-               ZFCP_LOG_NORMAL(
-                       "Control file is too large, file is discarded "
-                       "by the adapter %s\n",
-                       zfcp_get_busid_by_adapter(adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EIO;
+       case FSF_ACCESS_DENIED:
+               zfcp_fsf_access_denied_unit(req, unit);
                break;
-
-       case FSF_ACCESS_CONFLICT_DETECTED:
-               if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE)
-                       ZFCP_LOG_NORMAL(
-                               "CFDC has been discarded by the adapter %s, "
-                               "because activation would impact "
-                               "%d active connection(s)\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               header->fsf_status_qual.word[0]);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EIO;
+       case FSF_DIRECTION_INDICATOR_NOT_VALID:
+               dev_err(&req->adapter->ccw_device->dev,
+                       "Invalid data direction (%d) given for unit "
+                       "0x%016Lx on port 0x%016Lx, shutting down "
+                       "adapter.\n",
+                       req->qtcb->bottom.io.data_direction,
+                       unit->fcp_lun, unit->port->wwpn);
+               zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
-       case FSF_CONFLICTS_OVERRULED:
-               if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE)
-                       ZFCP_LOG_NORMAL(
-                               "CFDC has been activated on the adapter %s, "
-                               "but activation has impacted "
-                               "%d active connection(s)\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               header->fsf_status_qual.word[0]);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EIO;
+       case FSF_CMND_LENGTH_NOT_VALID:
+               dev_err(&req->adapter->ccw_device->dev,
+                       "An invalid control-data-block length field (%d) "
+                       "was found in a command for unit 0x%016Lx on port "
+                       "0x%016Lx. Shutting down adapter.\n",
+                       req->qtcb->bottom.io.fcp_cmnd_length,
+                       unit->fcp_lun, unit->port->wwpn);
+               zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
-
-       case FSF_UNKNOWN_OP_SUBTYPE:
-               ZFCP_LOG_NORMAL("unknown operation subtype (adapter: %s, "
-                               "op_subtype=0x%x)\n",
-                               zfcp_get_busid_by_adapter(adapter),
-                               bottom->operation_subtype);
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EINVAL;
+       case FSF_PORT_BOXED:
+               zfcp_erp_port_boxed(unit->port, 53, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                break;
-
-       case FSF_INVALID_COMMAND_OPTION:
-               ZFCP_LOG_NORMAL(
-                       "Invalid option 0x%x has been specified "
-                       "in QTCB bottom sent to the adapter %s\n",
-                       bottom->option,
-                       zfcp_get_busid_by_adapter(adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EINVAL;
+       case FSF_LUN_BOXED:
+               zfcp_erp_unit_boxed(unit, 54, req);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+                              ZFCP_STATUS_FSFREQ_RETRY;
                break;
-
-       default:
-               ZFCP_LOG_NORMAL(
-                       "bug: An unknown/unexpected FSF status 0x%08x "
-                       "was presented on the adapter %s\n",
-                       header->fsf_status,
-                       zfcp_get_busid_by_adapter(adapter));
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-               retval = -EINVAL;
+       case FSF_ADAPTER_STATUS_AVAILABLE:
+               if (header->fsf_status_qual.word[0] ==
+                   FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
+                       zfcp_test_link(unit->port);
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
        }
-
 skip_fsfstatus:
-       return retval;
-}
-
-static inline int
-zfcp_fsf_req_sbal_check(unsigned long *flags,
-                       struct zfcp_qdio_queue *queue, int needed)
-{
-       write_lock_irqsave(&queue->queue_lock, *flags);
-       if (likely(atomic_read(&queue->free_count) >= needed))
-               return 1;
-       write_unlock_irqrestore(&queue->queue_lock, *flags);
-       return 0;
-}
-
-/*
- * set qtcb pointer in fsf_req and initialize QTCB
- */
-static void
-zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
-{
-       if (likely(fsf_req->qtcb != NULL)) {
-               fsf_req->qtcb->prefix.req_seq_no =
-                       fsf_req->adapter->fsf_req_seq_no;
-               fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
-               fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
-               fsf_req->qtcb->prefix.qtcb_type =
-                       fsf_qtcb_type[fsf_req->fsf_command];
-               fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
-               fsf_req->qtcb->header.req_handle = fsf_req->req_id;
-               fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
+       if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
+               zfcp_fsf_send_fcp_ctm_handler(req);
+       else {
+               zfcp_fsf_send_fcp_command_task_handler(req);
+               req->unit = NULL;
+               zfcp_unit_put(unit);
        }
 }
 
 /**
- * zfcp_fsf_req_sbal_get - try to get one SBAL in the request queue
- * @adapter: adapter for which request queue is examined
- * @req_flags: flags indicating whether to wait for needed SBAL or not
- * @lock_flags: lock_flags if queue_lock is taken
- * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS
- * Locks: lock adapter->request_queue->queue_lock on success
- */
-static int
-zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags,
-                     unsigned long *lock_flags)
-{
-        long ret;
-        struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
-
-        if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
-                ret = wait_event_interruptible_timeout(adapter->request_wq,
-                       zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1),
-                                                      ZFCP_SBAL_TIMEOUT);
-               if (ret < 0)
-                       return ret;
-               if (!ret)
-                       return -EIO;
-        } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1))
-                return -EIO;
-
-        return 0;
-}
-
-/*
- * function:    zfcp_fsf_req_create
- *
- * purpose:    create an FSF request at the specified adapter and
- *             setup common fields
- *
- * returns:    -ENOMEM if there was insufficient memory for a request
- *              -EIO if no qdio buffers could be allocate to the request
- *              -EINVAL/-EPERM on bug conditions in req_dequeue
- *              0 in success
- *
- * note:        The created request is returned by reference.
- *
- * locks:      lock of concerned request queue must not be held,
- *             but is held on completion (write, irqsave)
+ * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
+ * @adapter: adapter where scsi command is issued
+ * @unit: unit where command is sent to
+ * @scsi_cmnd: scsi command to be sent
+ * @timer: timer to be started when request is initiated
+ * @req_flags: flags for fsf_request
  */
-int
-zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
-                   mempool_t *pool, unsigned long *lock_flags,
-                   struct zfcp_fsf_req **fsf_req_p)
+int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
+                                  struct zfcp_unit *unit,
+                                  struct scsi_cmnd *scsi_cmnd,
+                                  int use_timer, int req_flags)
 {
-       volatile struct qdio_buffer_element *sbale;
-       struct zfcp_fsf_req *fsf_req = NULL;
-       int ret = 0;
-       struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
-
-       /* allocate new FSF request */
-       fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
-       if (unlikely(NULL == fsf_req)) {
-               ZFCP_LOG_DEBUG("error: Could not put an FSF request into "
-                              "the outbound (send) queue.\n");
-               ret = -ENOMEM;
-               goto failed_fsf_req;
-       }
-
-       fsf_req->adapter = adapter;
-       fsf_req->fsf_command = fsf_cmd;
-       INIT_LIST_HEAD(&fsf_req->list);
-       init_timer(&fsf_req->timer);
+       struct zfcp_fsf_req *req;
+       struct fcp_cmnd_iu *fcp_cmnd_iu;
+       unsigned int sbtype;
+       int real_bytes, retval = -EIO;
 
-       /* initialize waitqueue which may be used to wait on
-          this request completion */
-       init_waitqueue_head(&fsf_req->completion_wq);
+       if (unlikely(!(atomic_read(&unit->status) &
+                      ZFCP_STATUS_COMMON_UNBLOCKED)))
+               return -EBUSY;
 
-        ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags);
-        if (ret < 0)
-                goto failed_sbals;
+       spin_lock(&adapter->req_q.lock);
+       if (!atomic_read(&adapter->req_q.count))
+               goto out;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
+                                 adapter->pool.fsf_req_scsi);
+       if (unlikely(IS_ERR(req))) {
+               retval = PTR_ERR(req);
+               goto out;
+       }
 
-       /* this is serialized (we are holding req_queue-lock of adapter) */
-       if (adapter->req_no == 0)
-               adapter->req_no++;
-       fsf_req->req_id = adapter->req_no++;
+       zfcp_unit_get(unit);
+       req->unit = unit;
+       req->data = scsi_cmnd;
+       req->handler = zfcp_fsf_send_fcp_command_handler;
+       req->qtcb->header.lun_handle = unit->handle;
+       req->qtcb->header.port_handle = unit->port->handle;
+       req->qtcb->bottom.io.service_class = FSF_CLASS_3;
 
-       zfcp_fsf_req_qtcb_init(fsf_req);
+       scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
 
+       fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
+       fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
        /*
-        * We hold queue_lock here. Check if QDIOUP is set and let request fail
-        * if it is not set (see also *_open_qdio and *_close_qdio).
+        * set depending on data direction:
+        *      data direction bits in SBALE (SB Type)
+        *      data direction bits in QTCB
+        *      data direction bits in FCP_CMND IU
         */
-
-       if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
-               write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
-               ret = -EIO;
-               goto failed_sbals;
+       switch (scsi_cmnd->sc_data_direction) {
+       case DMA_NONE:
+               req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
+               sbtype = SBAL_FLAGS0_TYPE_READ;
+               break;
+       case DMA_FROM_DEVICE:
+               req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
+               sbtype = SBAL_FLAGS0_TYPE_READ;
+               fcp_cmnd_iu->rddata = 1;
+               break;
+       case DMA_TO_DEVICE:
+               req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
+               sbtype = SBAL_FLAGS0_TYPE_WRITE;
+               fcp_cmnd_iu->wddata = 1;
+               break;
+       case DMA_BIDIRECTIONAL:
+       default:
+               retval = -EIO;
+               goto failed_scsi_cmnd;
        }
 
-       if (fsf_req->qtcb) {
-               fsf_req->seq_no = adapter->fsf_req_seq_no;
-               fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
-       }
-       fsf_req->sbal_number = 1;
-       fsf_req->sbal_first = req_queue->free_index;
-       fsf_req->sbal_curr = req_queue->free_index;
-        fsf_req->sbale_curr = 1;
+       if (likely((scsi_cmnd->device->simple_tags) ||
+                  ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
+                   (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
+               fcp_cmnd_iu->task_attribute = SIMPLE_Q;
+       else
+               fcp_cmnd_iu->task_attribute = UNTAGGED;
 
-       if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) {
-               fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       }
+       if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
+               fcp_cmnd_iu->add_fcp_cdb_length =
+                       (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
+
+       memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
 
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
+               fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t);
 
-       /* setup common SBALE fields */
-       sbale[0].addr = (void *) fsf_req->req_id;
-       sbale[0].flags |= SBAL_FLAGS0_COMMAND;
-       if (likely(fsf_req->qtcb != NULL)) {
-               sbale[1].addr = (void *) fsf_req->qtcb;
-               sbale[1].length = sizeof(struct fsf_qtcb);
+       real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
+                                            scsi_sglist(scsi_cmnd),
+                                            FSF_MAX_SBALS_PER_REQ);
+       if (unlikely(real_bytes < 0)) {
+               if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
+                       retval = -EIO;
+               else {
+                       dev_err(&adapter->ccw_device->dev,
+                               "SCSI request too large. "
+                               "Shutting down unit 0x%016Lx on port "
+                               "0x%016Lx.\n", unit->fcp_lun,
+                               unit->port->wwpn);
+                       zfcp_erp_unit_shutdown(unit, 0, 131, req);
+                       retval = -EINVAL;
+               }
+               goto failed_scsi_cmnd;
        }
 
-       ZFCP_LOG_TRACE("got %i free BUFFERs starting at index %i\n",
-                       fsf_req->sbal_number, fsf_req->sbal_first);
+       zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
 
-       goto success;
+       if (use_timer)
+               zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 
- failed_sbals:
-/* dequeue new FSF request previously enqueued */
-       zfcp_fsf_req_free(fsf_req);
-       fsf_req = NULL;
+       retval = zfcp_fsf_req_send(req);
+       if (unlikely(retval))
+               goto failed_scsi_cmnd;
 
- failed_fsf_req:
-       write_lock_irqsave(&req_queue->queue_lock, *lock_flags);
- success:
-       *fsf_req_p = fsf_req;
-       return ret;
+       goto out;
+
+failed_scsi_cmnd:
+       zfcp_unit_put(unit);
+       zfcp_fsf_req_free(req);
+       scsi_cmnd->host_scribble = NULL;
+out:
+       spin_unlock(&adapter->req_q.lock);
+       return retval;
 }
 
-/*
- * function:    zfcp_fsf_req_send
- *
- * purpose:    start transfer of FSF request via QDIO
- *
- * returns:    0 - request transfer succesfully started
- *             !0 - start of request transfer failed
+/**
+ * zfcp_fsf_send_fcp_ctm - send SCSI task management command
+ * @adapter: pointer to struct zfcp-adapter
+ * @unit: pointer to struct zfcp_unit
+ * @tm_flags: unsigned byte for task management flags
+ * @req_flags: int request flags
+ * Returns: on success pointer to struct fsf_req, NULL otherwise
  */
-static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
+struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
+                                          struct zfcp_unit *unit,
+                                          u8 tm_flags, int req_flags)
 {
-       struct zfcp_adapter *adapter;
-       struct zfcp_qdio_queue *req_queue;
        volatile struct qdio_buffer_element *sbale;
-       int inc_seq_no;
-       int new_distance_from_int;
-       int retval = 0;
+       struct zfcp_fsf_req *req = NULL;
+       struct fcp_cmnd_iu *fcp_cmnd_iu;
 
-       adapter = fsf_req->adapter;
-       req_queue = &adapter->request_queue,
+       if (unlikely(!(atomic_read(&unit->status) &
+                      ZFCP_STATUS_COMMON_UNBLOCKED)))
+               return NULL;
 
+       spin_lock(&adapter->req_q.lock);
+       if (!atomic_read(&adapter->req_q.count))
+               goto out;
+       req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
+                                 adapter->pool.fsf_req_scsi);
+       if (unlikely(IS_ERR(req)))
+               goto out;
 
-       /* FIXME(debug): remove it later */
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0);
-       ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags);
-       ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n");
-       ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
-                     sbale[1].length);
+       req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
+       req->data = unit;
+       req->handler = zfcp_fsf_send_fcp_command_handler;
+       req->qtcb->header.lun_handle = unit->handle;
+       req->qtcb->header.port_handle = unit->port->handle;
+       req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
+       req->qtcb->bottom.io.service_class = FSF_CLASS_3;
+       req->qtcb->bottom.io.fcp_cmnd_length =  sizeof(struct fcp_cmnd_iu) +
+                                               sizeof(fcp_dl_t);
+
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
+       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-       /* put allocated FSF request into hash table */
-       spin_lock(&adapter->req_list_lock);
-       zfcp_reqlist_add(adapter, fsf_req);
-       spin_unlock(&adapter->req_list_lock);
+       fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
+       fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
+       fcp_cmnd_iu->task_management_flags = tm_flags;
 
-       inc_seq_no = (fsf_req->qtcb != NULL);
+       zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+       if (!zfcp_fsf_req_send(req))
+               goto out;
 
-       ZFCP_LOG_TRACE("request queue of adapter %s: "
-                      "next free SBAL is %i, %i free SBALs\n",
-                      zfcp_get_busid_by_adapter(adapter),
-                      req_queue->free_index,
-                      atomic_read(&req_queue->free_count));
+       zfcp_fsf_req_free(req);
+       req = NULL;
+out:
+       spin_unlock(&adapter->req_q.lock);
+       return req;
+}
 
-       ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, "
-                      "index_in_queue=%i, count=%i, buffers=%p\n",
-                      zfcp_get_busid_by_adapter(adapter),
-                      QDIO_FLAG_SYNC_OUTPUT,
-                      0, fsf_req->sbal_first, fsf_req->sbal_number,
-                      &req_queue->buffer[fsf_req->sbal_first]);
+static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
+{
+       if (req->qtcb->header.fsf_status != FSF_GOOD)
+               req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
 
-       /*
-        * adjust the number of free SBALs in request queue as well as
-        * position of first one
-        */
-       atomic_sub(fsf_req->sbal_number, &req_queue->free_count);
-       ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count));
-       req_queue->free_index += fsf_req->sbal_number;    /* increase */
-       req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;  /* wrap if needed */
-       new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
+/**
+ * zfcp_fsf_control_file - control file upload/download
+ * @adapter: pointer to struct zfcp_adapter
+ * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
+ * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
+ */
+struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
+                                          struct zfcp_fsf_cfdc *fsf_cfdc)
+{
+       volatile struct qdio_buffer_element *sbale;
+       struct zfcp_fsf_req *req = NULL;
+       struct fsf_qtcb_bottom_support *bottom;
+       int direction, retval = -EIO, bytes;
 
-       fsf_req->issued = get_clock();
+       if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
+               return ERR_PTR(-EOPNOTSUPP);
 
-       retval = do_QDIO(adapter->ccw_device,
-                        QDIO_FLAG_SYNC_OUTPUT,
-                        0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
+       switch (fsf_cfdc->command) {
+       case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+               direction = SBAL_FLAGS0_TYPE_WRITE;
+               break;
+       case FSF_QTCB_UPLOAD_CONTROL_FILE:
+               direction = SBAL_FLAGS0_TYPE_READ;
+               break;
+       default:
+               return ERR_PTR(-EINVAL);
+       }
 
-       if (unlikely(retval)) {
-               /* Queues are down..... */
-               retval = -EIO;
-               del_timer(&fsf_req->timer);
-               spin_lock(&adapter->req_list_lock);
-               zfcp_reqlist_remove(adapter, fsf_req);
-               spin_unlock(&adapter->req_list_lock);
-               /* undo changes in request queue made for this request */
-               zfcp_qdio_zero_sbals(req_queue->buffer,
-                                    fsf_req->sbal_first, fsf_req->sbal_number);
-               atomic_add(fsf_req->sbal_number, &req_queue->free_count);
-               req_queue->free_index -= fsf_req->sbal_number;
-               req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
-               req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
-               zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req);
-       } else {
-               req_queue->distance_from_int = new_distance_from_int;
-               /*
-                * increase FSF sequence counter -
-                * this must only be done for request successfully enqueued to
-                * QDIO this rejected requests may be cleaned up by calling
-                * routines  resulting in missing sequence counter values
-                * otherwise,
-                */
+       spin_lock(&adapter->req_q.lock);
+       if (zfcp_fsf_req_sbal_get(adapter))
+               goto out;
+
+       req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
+       if (unlikely(IS_ERR(req))) {
+               retval = -EPERM;
+               goto out;
+       }
+
+       req->handler = zfcp_fsf_control_file_handler;
+
+       sbale = zfcp_qdio_sbale_req(req);
+       sbale[0].flags |= direction;
 
-               /* Don't increase for unsolicited status */
-               if (inc_seq_no)
-                       adapter->fsf_req_seq_no++;
+       bottom = &req->qtcb->bottom.support;
+       bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
+       bottom->option = fsf_cfdc->option;
 
-               /* count FSF requests pending */
-               atomic_inc(&adapter->reqs_active);
+       bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
+                                       FSF_MAX_SBALS_PER_REQ);
+       if (bytes != ZFCP_CFDC_MAX_SIZE) {
+               retval = -ENOMEM;
+               zfcp_fsf_req_free(req);
+               goto out;
        }
-       return retval;
-}
 
-#undef ZFCP_LOG_AREA
+       zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+       retval = zfcp_fsf_req_send(req);
+out:
+       spin_unlock(&adapter->req_q.lock);
+
+       if (!retval) {
+               wait_event(req->completion_wq,
+                          req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+               return req;
+       }
+       return ERR_PTR(retval);
+}
index 099970b..bf94b4d 100644 (file)
@@ -1,27 +1,16 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Interface to the FSF support functions.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 #ifndef FSF_H
 #define FSF_H
 
+#include <linux/pfn.h>
+
 #define FSF_QTCB_CURRENT_VERSION               0x00000001
 
 /* FSF commands */
 #define FSF_UNIT_ACCESS_EXCLUSIVE              0x02000000
 #define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER      0x10000000
 
+/* FSF interface for CFDC */
+#define ZFCP_CFDC_MAX_SIZE             127 * 1024
+#define ZFCP_CFDC_PAGES                PFN_UP(ZFCP_CFDC_MAX_SIZE)
+
+struct zfcp_fsf_cfdc {
+       struct scatterlist sg[ZFCP_CFDC_PAGES];
+       u32 command;
+       u32 option;
+};
+
 struct fsf_queue_designator {
        u8  cssid;
        u8  chpid;
@@ -288,6 +287,18 @@ struct fsf_bit_error_payload {
        u32 current_transmit_b2b_credit;
 } __attribute__ ((packed));
 
+struct fsf_link_down_info {
+       u32 error_code;
+       u32 res1;
+       u8 res2[2];
+       u8 primary_status;
+       u8 ioerr_code;
+       u8 action_code;
+       u8 reason_code;
+       u8 explanation_code;
+       u8 vendor_specific_code;
+} __attribute__ ((packed));
+
 struct fsf_status_read_buffer {
        u32 status_type;
        u32 status_subtype;
@@ -298,7 +309,12 @@ struct fsf_status_read_buffer {
        u32 class;
        u64 fcp_lun;
        u8  res3[24];
-       u8  payload[FSF_STATUS_READ_PAYLOAD_SIZE];
+       union {
+               u8  data[FSF_STATUS_READ_PAYLOAD_SIZE];
+               u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
+               struct fsf_link_down_info link_down_info;
+               struct fsf_bit_error_payload bit_error;
+       } payload;
 } __attribute__ ((packed));
 
 struct fsf_qual_version_error {
@@ -311,23 +327,19 @@ struct fsf_qual_sequence_error {
        u32 res1[3];
 } __attribute__ ((packed));
 
-struct fsf_link_down_info {
-       u32 error_code;
-       u32 res1;
-       u8 res2[2];
-       u8 primary_status;
-       u8 ioerr_code;
-       u8 action_code;
-       u8 reason_code;
-       u8 explanation_code;
-       u8 vendor_specific_code;
+struct fsf_qual_latency_info {
+       u32 channel_lat;
+       u32 fabric_lat;
+       u8 res1[8];
 } __attribute__ ((packed));
 
 union fsf_prot_status_qual {
+       u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
        u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
        struct fsf_qual_version_error   version_error;
        struct fsf_qual_sequence_error  sequence_error;
        struct fsf_link_down_info link_down_info;
+       struct fsf_qual_latency_info latency_info;
 } __attribute__ ((packed));
 
 struct fsf_qtcb_prefix {
@@ -437,7 +449,9 @@ struct fsf_qtcb_bottom_config {
        u32 fc_link_speed;
        u32 adapter_type;
        u32 peer_d_id;
-       u8 res2[12];
+       u8 res1[2];
+       u16 timer_interval;
+       u8 res2[8];
        u32 s_id;
        struct fsf_nport_serv_param nport_serv_param;
        u8 reserved_nport_serv_param[16];
index 8ca5f07..72e3094 100644 (file)
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Setup and helper functions to access QDIO.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
 #include "zfcp_ext.h"
 
-static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
-static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
-       (struct zfcp_qdio_queue *, int, int);
-static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
-       (struct zfcp_fsf_req *, int, int);
-static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
-       (struct zfcp_fsf_req *, unsigned long);
-static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
-       (struct zfcp_fsf_req *, unsigned long);
-static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
-static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
-static void zfcp_qdio_sbale_fill
-       (struct zfcp_fsf_req *, unsigned long, void *, int);
-static int zfcp_qdio_sbals_from_segment
-       (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
-
-static qdio_handler_t zfcp_qdio_request_handler;
-static qdio_handler_t zfcp_qdio_response_handler;
-static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
-       unsigned int, unsigned int, unsigned int, int, int);
-
-#define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_QDIO
-
-/*
- * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
- * in the adapter struct sbuf is the pointer array.
- *
- * locks:       must only be called with zfcp_data.config_sema taken
- */
-static void
-zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf)
-{
-       int pos;
-
-       for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE)
-               free_page((unsigned long) sbuf[pos]);
-}
+/* FIXME(tune): free space should be one max. SBAL chain plus what? */
+#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
+                               - (FSF_MAX_SBALS_PER_REQ + 4))
+#define QBUFF_PER_PAGE         (PAGE_SIZE / sizeof(struct qdio_buffer))
 
-/*
- * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
- * array in the adapter struct.
- * Cur_buf is the pointer array
- *
- * returns:    zero on success else -ENOMEM
- * locks:       must only be called with zfcp_data.config_sema taken
- */
-static int
-zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf)
+static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
 {
        int pos;
 
        for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
-               sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
-               if (!sbuf[pos]) {
-                       zfcp_qdio_buffers_dequeue(sbuf);
+               sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
+               if (!sbal[pos])
                        return -ENOMEM;
-               }
        }
        for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
                if (pos % QBUFF_PER_PAGE)
-                       sbuf[pos] = sbuf[pos - 1] + 1;
+                       sbal[pos] = sbal[pos - 1] + 1;
        return 0;
 }
 
-/* locks:       must only be called with zfcp_data.config_sema taken */
-int
-zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
+static volatile struct qdio_buffer_element *
+zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
 {
-       int ret;
-
-       ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer);
-       if (ret)
-               return ret;
-       return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer);
+       return &q->sbal[sbal_idx]->element[sbale_idx];
 }
 
-/* locks:       must only be called with zfcp_data.config_sema taken */
-void
-zfcp_qdio_free_queues(struct zfcp_adapter *adapter)
+/**
+ * zfcp_qdio_free - free memory used by request- and resposne queue
+ * @adapter: pointer to the zfcp_adapter structure
+ */
+void zfcp_qdio_free(struct zfcp_adapter *adapter)
 {
-       ZFCP_LOG_TRACE("freeing request_queue buffers\n");
-       zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer);
+       struct qdio_buffer **sbal_req, **sbal_resp;
+       int p;
 
-       ZFCP_LOG_TRACE("freeing response_queue buffers\n");
-       zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer);
-}
+       if (adapter->ccw_device)
+               qdio_free(adapter->ccw_device);
 
-int
-zfcp_qdio_allocate(struct zfcp_adapter *adapter)
-{
-       struct qdio_initialize *init_data;
+       sbal_req = adapter->req_q.sbal;
+       sbal_resp = adapter->resp_q.sbal;
 
-       init_data = &adapter->qdio_init_data;
+       for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
+               free_page((unsigned long) sbal_req[p]);
+               free_page((unsigned long) sbal_resp[p]);
+       }
+}
 
-       init_data->cdev = adapter->ccw_device;
-       init_data->q_format = QDIO_SCSI_QFMT;
-       memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8);
-       ASCEBC(init_data->adapter_name, 8);
-       init_data->qib_param_field_format = 0;
-       init_data->qib_param_field = NULL;
-       init_data->input_slib_elements = NULL;
-       init_data->output_slib_elements = NULL;
-       init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD;
-       init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD;
-       init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD;
-       init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD;
-       init_data->no_input_qs = 1;
-       init_data->no_output_qs = 1;
-       init_data->input_handler = zfcp_qdio_response_handler;
-       init_data->output_handler = zfcp_qdio_request_handler;
-       init_data->int_parm = (unsigned long) adapter;
-       init_data->flags = QDIO_INBOUND_0COPY_SBALS |
-           QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
-       init_data->input_sbal_addr_array =
-           (void **) (adapter->response_queue.buffer);
-       init_data->output_sbal_addr_array =
-           (void **) (adapter->request_queue.buffer);
+static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
+{
+       dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n");
 
-       return qdio_allocate(init_data);
+       zfcp_erp_adapter_reopen(adapter,
+                               ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+                               ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
 }
 
-/*
- * function:           zfcp_qdio_handler_error_check
- *
- * purpose:     called by the response handler to determine error condition
- *
- * returns:    error flag
- *
- */
-static int
-zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
-                             unsigned int qdio_error, unsigned int siga_error,
-                             int first_element, int elements_processed)
+static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
 {
-       int retval = 0;
+       int i, sbal_idx;
 
-       if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
-               retval = -EIO;
-
-               ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, "
-                             "qdio_error=0x%x, siga_error=0x%x)\n",
-                             status, qdio_error, siga_error);
-
-               zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
-                               first_element, elements_processed);
-               /*
-                       * Restarting IO on the failed adapter from scratch.
-                * Since we have been using this adapter, it is save to assume
-                * that it is not failed but recoverable. The card seems to
-                * report link-up events by self-initiated queue shutdown.
-                * That is why we need to clear the link-down flag
-                * which is set again in case we have missed by a mile.
-                */
-               zfcp_erp_adapter_reopen(adapter,
-                                       ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
-                                       ZFCP_STATUS_COMMON_ERP_FAILED, 140,
-                                       NULL);
+       for (i = first; i < first + cnt; i++) {
+               sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
+               memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
        }
-       return retval;
 }
 
-/*
- * function:    zfcp_qdio_request_handler
- *
- * purpose:    is called by QDIO layer for completed SBALs in request queue
- *
- * returns:    (void)
- */
-static void
-zfcp_qdio_request_handler(struct ccw_device *ccw_device,
-                         unsigned int status,
-                         unsigned int qdio_error,
-                         unsigned int siga_error,
-                         unsigned int queue_number,
-                         int first_element,
-                         int elements_processed,
-                         unsigned long int_parm)
+static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status,
+                             unsigned int qdio_err, unsigned int siga_err,
+                             unsigned int queue_no, int first, int count,
+                             unsigned long parm)
 {
-       struct zfcp_adapter *adapter;
-       struct zfcp_qdio_queue *queue;
-
-       adapter = (struct zfcp_adapter *) int_parm;
-       queue = &adapter->request_queue;
-
-       ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n",
-                      zfcp_get_busid_by_adapter(adapter),
-                      first_element, elements_processed);
+       struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
+       struct zfcp_qdio_queue *queue = &adapter->req_q;
 
-       if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
-                                                  siga_error, first_element,
-                                                  elements_processed)))
-               goto out;
-       /*
-        * we stored address of struct zfcp_adapter  data structure
-        * associated with irq in int_parm
-        */
+       if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
+               zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
+                                       first, count);
+               zfcp_qdio_handler_error(adapter, 140);
+               return;
+       }
 
        /* cleanup all SBALs being program-owned now */
-       zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed);
+       zfcp_qdio_zero_sbals(queue->sbal, first, count);
 
-       /* increase free space in outbound queue */
-       atomic_add(elements_processed, &queue->free_count);
-       ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count));
+       atomic_add(count, &queue->count);
        wake_up(&adapter->request_wq);
-       ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n",
-                      elements_processed, atomic_read(&queue->free_count));
- out:
-       return;
 }
 
-/**
- * zfcp_qdio_reqid_check - checks for valid reqids.
- */
 static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
-                                 unsigned long req_id)
+                                 unsigned long req_id, int sbal_idx)
 {
        struct zfcp_fsf_req *fsf_req;
        unsigned long flags;
@@ -248,203 +110,117 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
                 * Unknown request means that we have potentially memory
                 * corruption and must stop the machine immediatly.
                 */
-               panic("error: unknown request id (%ld) on adapter %s.\n",
+               panic("error: unknown request id (%lx) on adapter %s.\n",
                      req_id, zfcp_get_busid_by_adapter(adapter));
 
        zfcp_reqlist_remove(adapter, fsf_req);
-       atomic_dec(&adapter->reqs_active);
        spin_unlock_irqrestore(&adapter->req_list_lock, flags);
 
-       /* finish the FSF request */
+       fsf_req->sbal_response = sbal_idx;
        zfcp_fsf_req_complete(fsf_req);
 }
 
-/*
- * function:           zfcp_qdio_response_handler
- *
- * purpose:    is called by QDIO layer for completed SBALs in response queue
- *
- * returns:    (void)
- */
-static void
-zfcp_qdio_response_handler(struct ccw_device *ccw_device,
-                          unsigned int status,
-                          unsigned int qdio_error,
-                          unsigned int siga_error,
-                          unsigned int queue_number,
-                          int first_element,
-                          int elements_processed,
-                          unsigned long int_parm)
+static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
 {
-       struct zfcp_adapter *adapter;
-       struct zfcp_qdio_queue *queue;
-       int buffer_index;
-       int i;
-       struct qdio_buffer *buffer;
-       int retval = 0;
-       u8 count;
-       u8 start;
-       volatile struct qdio_buffer_element *buffere = NULL;
-       int buffere_index;
-
-       adapter = (struct zfcp_adapter *) int_parm;
-       queue = &adapter->response_queue;
-
-       if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
-                                                  siga_error, first_element,
-                                                  elements_processed)))
-               goto out;
+       struct zfcp_qdio_queue *queue = &adapter->resp_q;
+       struct ccw_device *cdev = adapter->ccw_device;
+       u8 count, start = queue->first;
+       unsigned int retval;
 
-       /*
-        * we stored address of struct zfcp_adapter  data structure
-        * associated with irq in int_parm
-        */
+       count = atomic_read(&queue->count) + processed;
+
+       retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
+                        0, start, count, NULL);
+
+       if (unlikely(retval)) {
+               atomic_set(&queue->count, count);
+               /* FIXME: Recover this with an adapter reopen? */
+       } else {
+               queue->first += count;
+               queue->first %= QDIO_MAX_BUFFERS_PER_Q;
+               atomic_set(&queue->count, 0);
+       }
+}
+
+static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
+                              unsigned int qdio_err, unsigned int siga_err,
+                              unsigned int queue_no, int first, int count,
+                              unsigned long parm)
+{
+       struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
+       struct zfcp_qdio_queue *queue = &adapter->resp_q;
+       volatile struct qdio_buffer_element *sbale;
+       int sbal_idx, sbale_idx, sbal_no;
+
+       if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
+               zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
+                                       first, count);
+               zfcp_qdio_handler_error(adapter, 147);
+               return;
+       }
 
-       buffere = &(queue->buffer[first_element]->element[0]);
-       ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags);
        /*
         * go through all SBALs from input queue currently
         * returned by QDIO layer
         */
-
-       for (i = 0; i < elements_processed; i++) {
-
-               buffer_index = first_element + i;
-               buffer_index %= QDIO_MAX_BUFFERS_PER_Q;
-               buffer = queue->buffer[buffer_index];
+       for (sbal_no = 0; sbal_no < count; sbal_no++) {
+               sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
 
                /* go through all SBALEs of SBAL */
-               for (buffere_index = 0;
-                    buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER;
-                    buffere_index++) {
-
-                       /* look for QDIO request identifiers in SB */
-                       buffere = &buffer->element[buffere_index];
+               for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER;
+                    sbale_idx++) {
+                       sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
                        zfcp_qdio_reqid_check(adapter,
-                                             (unsigned long) buffere->addr);
-
-                       /*
-                        * A single used SBALE per inbound SBALE has been
-                        * implemented by QDIO so far. Hope they will
-                        * do some optimisation. Will need to change to
-                        * unlikely() then.
-                        */
-                       if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY))
+                                             (unsigned long) sbale->addr,
+                                             sbal_idx);
+                       if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
                                break;
                };
 
-               if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) {
-                       ZFCP_LOG_NORMAL("bug: End of inbound data "
-                                       "not marked!\n");
-               }
+               if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
+                       dev_warn(&adapter->ccw_device->dev,
+                                "Protocol violation by adapter. "
+                                "Continuing operations.\n");
        }
 
        /*
         * put range of SBALs back to response queue
         * (including SBALs which have already been free before)
         */
-       count = atomic_read(&queue->free_count) + elements_processed;
-       start = queue->free_index;
-
-       ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
-                      "queue_no=%i, index_in_queue=%i, count=%i, "
-                      "buffers=0x%lx\n",
-                      zfcp_get_busid_by_adapter(adapter),
-                      QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
-                      0, start, count, (unsigned long) &queue->buffer[start]);
-
-       retval = do_QDIO(ccw_device,
-                        QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
-                        0, start, count, NULL);
-
-       if (unlikely(retval)) {
-               atomic_set(&queue->free_count, count);
-               ZFCP_LOG_DEBUG("clearing of inbound data regions failed, "
-                              "queues may be down "
-                              "(count=%d, start=%d, retval=%d)\n",
-                              count, start, retval);
-       } else {
-               queue->free_index += count;
-               queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
-               atomic_set(&queue->free_count, 0);
-               ZFCP_LOG_TRACE("%i buffers enqueued to response "
-                              "queue at position %i\n", count, start);
-       }
- out:
-       return;
-}
-
-/**
- * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
- * @queue: queue from which SBALE should be returned
- * @sbal: specifies number of SBAL in queue
- * @sbale: specifes number of SBALE in SBAL
- */
-static inline volatile struct qdio_buffer_element *
-zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
-{
-       return &queue->buffer[sbal]->element[sbale];
+       zfcp_qdio_resp_put_back(adapter, count);
 }
 
 /**
- * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
- *     a struct zfcp_fsf_req
+ * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
+ * @fsf_req: pointer to struct fsf_req
+ * Returns: pointer to qdio_buffer_element (SBALE) structure
  */
 volatile struct qdio_buffer_element *
-zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
+zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
 {
-       return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
-                                  sbal, sbale);
+       return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
 }
 
 /**
- * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for
- *     a struct zfcp_fsf_req
- */
-static inline volatile struct qdio_buffer_element *
-zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
-{
-       return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue,
-                                  sbal, sbale);
-}
-
-/**
- * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
- *     a struct zfcp_fsf_req
+ * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
+ * @fsf_req: pointer to struct fsf_req
+ * Returns: pointer to qdio_buffer_element (SBALE) structure
  */
 volatile struct qdio_buffer_element *
-zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
+zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
 {
-       return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
-                                  fsf_req->sbale_curr);
+       return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
+                              req->sbale_curr);
 }
 
-/**
- * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used
- *     on the request_queue for a struct zfcp_fsf_req
- * @fsf_req: the number of the last SBAL that can be used is stored herein
- * @max_sbals: used to pass an upper limit for the number of SBALs
- *
- * Note: We can assume at least one free SBAL in the request_queue when called.
- */
-static void
-zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
+static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
 {
-       int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
+       int count = atomic_read(&fsf_req->adapter->req_q.count);
        count = min(count, max_sbals);
-       fsf_req->sbal_last  = fsf_req->sbal_first;
-       fsf_req->sbal_last += (count - 1);
-       fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
+       fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1)
+                                       % QDIO_MAX_BUFFERS_PER_Q;
 }
 
-/**
- * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a
- *     request
- * @fsf_req: zfcp_fsf_req to be processed
- * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL
- *
- * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
- */
 static volatile struct qdio_buffer_element *
 zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
 {
@@ -455,16 +231,16 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
        sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
 
        /* don't exceed last allowed SBAL */
-       if (fsf_req->sbal_curr == fsf_req->sbal_last)
+       if (fsf_req->sbal_last == fsf_req->sbal_limit)
                return NULL;
 
        /* set chaining flag in first SBALE of current SBAL */
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       sbale = zfcp_qdio_sbale_req(fsf_req);
        sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
 
        /* calculate index of next SBAL */
-       fsf_req->sbal_curr++;
-       fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q;
+       fsf_req->sbal_last++;
+       fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
 
        /* keep this requests number of SBALs up-to-date */
        fsf_req->sbal_number++;
@@ -479,214 +255,255 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
        return sbale;
 }
 
-/**
- * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
- */
 static volatile struct qdio_buffer_element *
 zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
 {
        if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
                return zfcp_qdio_sbal_chain(fsf_req, sbtype);
-
        fsf_req->sbale_curr++;
-
        return zfcp_qdio_sbale_curr(fsf_req);
 }
 
-/**
- * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
- *     with zero from
- */
-static int
-zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
-{
-       struct qdio_buffer **buf = queue->buffer;
-       int curr = first;
-       int count = 0;
-
-       for(;;) {
-               curr %= QDIO_MAX_BUFFERS_PER_Q;
-               count++;
-               memset(buf[curr], 0, sizeof(struct qdio_buffer));
-               if (curr == last)
-                       break;
-               curr++;
-       }
-       return count;
-}
-
-
-/**
- * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req
- */
-static inline int
-zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
+static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req)
 {
-       return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue,
-                                   fsf_req->sbal_first, fsf_req->sbal_curr);
+       struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal;
+       int first = fsf_req->sbal_first;
+       int last = fsf_req->sbal_last;
+       int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
+               QDIO_MAX_BUFFERS_PER_Q + 1;
+       zfcp_qdio_zero_sbals(sbal, first, count);
 }
 
-
-/**
- * zfcp_qdio_sbale_fill - set address and length in current SBALE
- *     on request_queue
- */
-static void
-zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
-                    void *addr, int length)
+static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
+                               unsigned int sbtype, void *start_addr,
+                               unsigned int total_length)
 {
        volatile struct qdio_buffer_element *sbale;
-
-       sbale = zfcp_qdio_sbale_curr(fsf_req);
-       sbale->addr = addr;
-       sbale->length = length;
-}
-
-/**
- * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s)
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
- * @start_addr: address of memory segment
- * @total_length: length of memory segment
- *
- * Alignment and length of the segment determine how many SBALEs are needed
- * for the memory segment.
- */
-static int
-zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
-                            void *start_addr, unsigned long total_length)
-{
        unsigned long remaining, length;
        void *addr;
 
-       /* split segment up heeding page boundaries */
+       /* split segment up */
        for (addr = start_addr, remaining = total_length; remaining > 0;
             addr += length, remaining -= length) {
-               /* get next free SBALE for new piece */
-               if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) {
-                       /* no SBALE left, clean up and leave */
-                       zfcp_qdio_sbals_wipe(fsf_req);
+               sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
+               if (!sbale) {
+                       zfcp_qdio_undo_sbals(fsf_req);
                        return -EINVAL;
                }
-               /* calculate length of new piece */
+
+               /* new piece must not exceed next page boundary */
                length = min(remaining,
-                            (PAGE_SIZE - ((unsigned long) addr &
+                            (PAGE_SIZE - ((unsigned long)addr &
                                           (PAGE_SIZE - 1))));
-               /* fill current SBALE with calculated piece */
-               zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length);
+               sbale->addr = addr;
+               sbale->length = length;
        }
-       return total_length;
+       return 0;
 }
 
-
 /**
  * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
  * @fsf_req: request to be processed
  * @sbtype: SBALE flags
  * @sg: scatter-gather list
- * @sg_count: number of elements in scatter-gather list
  * @max_sbals: upper bound for number of SBALs to be used
+ * Returns: number of bytes, or error (negativ)
  */
-int
-zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
-                        struct scatterlist *sgl, int sg_count, int max_sbals)
+int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
+                           struct scatterlist *sg, int max_sbals)
 {
-       int sg_index;
-       struct scatterlist *sg_segment;
-       int retval;
        volatile struct qdio_buffer_element *sbale;
-       int bytes = 0;
+       int retval, bytes = 0;
 
        /* figure out last allowed SBAL */
        zfcp_qdio_sbal_limit(fsf_req, max_sbals);
 
-       /* set storage-block type for current SBAL */
-       sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+       /* set storage-block type for this request */
+       sbale = zfcp_qdio_sbale_req(fsf_req);
        sbale->flags |= sbtype;
 
-       /* process all segements of scatter-gather list */
-       for_each_sg(sgl, sg_segment, sg_count, sg_index) {
-               retval = zfcp_qdio_sbals_from_segment(
-                               fsf_req,
-                               sbtype,
-                               zfcp_sg_to_address(sg_segment),
-                               sg_segment->length);
-               if (retval < 0) {
-                       bytes = retval;
-                       goto out;
-               } else
-                        bytes += retval;
+       for (; sg; sg = sg_next(sg)) {
+               retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg),
+                                             sg->length);
+               if (retval < 0)
+                       return retval;
+               bytes += sg->length;
        }
+
        /* assume that no other SBALEs are to follow in the same SBAL */
        sbale = zfcp_qdio_sbale_curr(fsf_req);
        sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
-out:
+
        return bytes;
 }
 
-
 /**
- * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
- * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
- *     to fill SBALs
+ * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ * Returns: 0 on success, error otherwise
  */
-int
-zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
-                             unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
+int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
 {
-       return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd),
-                                      scsi_sg_count(scsi_cmnd),
-                                      ZFCP_MAX_SBALS_PER_REQ);
+       struct zfcp_adapter *adapter = fsf_req->adapter;
+       struct zfcp_qdio_queue *req_q = &adapter->req_q;
+       int first = fsf_req->sbal_first;
+       int count = fsf_req->sbal_number;
+       int retval, pci, pci_batch;
+       volatile struct qdio_buffer_element *sbale;
+
+       /* acknowledgements for transferred buffers */
+       pci_batch = req_q->pci_batch + count;
+       if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
+               pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
+               pci = first + count - (pci_batch + 1);
+               pci %= QDIO_MAX_BUFFERS_PER_Q;
+               sbale = zfcp_qdio_sbale(req_q, pci, 0);
+               sbale->flags |= SBAL_FLAGS0_PCI;
+       }
+
+       retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
+                        count, NULL);
+       if (unlikely(retval)) {
+               zfcp_qdio_zero_sbals(req_q->sbal, first, count);
+               return retval;
+       }
+
+       /* account for transferred buffers */
+       atomic_sub(count, &req_q->count);
+       req_q->first += count;
+       req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
+       req_q->pci_batch = pci_batch;
+       return 0;
 }
 
 /**
- * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed
+ * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
+ * @adapter: pointer to struct zfcp_adapter
+ * Returns: -ENOMEM on memory allocation error or return value from
+ *          qdio_allocate
  */
-int
-zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue,
-                       struct zfcp_fsf_req *fsf_req)
+int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
 {
-       int new_distance_from_int;
-       int pci_pos;
-       volatile struct qdio_buffer_element *sbale;
+       struct qdio_initialize *init_data;
 
-       new_distance_from_int = req_queue->distance_from_int +
-                fsf_req->sbal_number;
-
-       if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) {
-               new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL;
-                pci_pos  = fsf_req->sbal_first;
-               pci_pos += fsf_req->sbal_number;
-               pci_pos -= new_distance_from_int;
-               pci_pos -= 1;
-               pci_pos %= QDIO_MAX_BUFFERS_PER_Q;
-               sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0);
-               sbale->flags |= SBAL_FLAGS0_PCI;
-       }
-       return new_distance_from_int;
+       if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) ||
+                  zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal))
+               return -ENOMEM;
+
+       init_data = &adapter->qdio_init_data;
+
+       init_data->cdev = adapter->ccw_device;
+       init_data->q_format = QDIO_ZFCP_QFMT;
+       memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8);
+       ASCEBC(init_data->adapter_name, 8);
+       init_data->qib_param_field_format = 0;
+       init_data->qib_param_field = NULL;
+       init_data->input_slib_elements = NULL;
+       init_data->output_slib_elements = NULL;
+       init_data->min_input_threshold = 1;
+       init_data->max_input_threshold = 5000;
+       init_data->min_output_threshold = 1;
+       init_data->max_output_threshold = 1000;
+       init_data->no_input_qs = 1;
+       init_data->no_output_qs = 1;
+       init_data->input_handler = zfcp_qdio_int_resp;
+       init_data->output_handler = zfcp_qdio_int_req;
+       init_data->int_parm = (unsigned long) adapter;
+       init_data->flags = QDIO_INBOUND_0COPY_SBALS |
+                       QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
+       init_data->input_sbal_addr_array =
+                       (void **) (adapter->resp_q.sbal);
+       init_data->output_sbal_addr_array =
+                       (void **) (adapter->req_q.sbal);
+
+       return qdio_allocate(init_data);
 }
 
-/*
- * function:   zfcp_zero_sbals
- *
- * purpose:    zeros specified range of SBALs
- *
- * returns:
+/**
+ * zfcp_close_qdio - close qdio queues for an adapter
  */
-void
-zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count)
+void zfcp_qdio_close(struct zfcp_adapter *adapter)
 {
-       int cur_pos;
-       int index;
-
-       for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) {
-               index = cur_pos % QDIO_MAX_BUFFERS_PER_Q;
-               memset(buf[index], 0, sizeof (struct qdio_buffer));
-               ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n",
-                              index, buf[index]);
+       struct zfcp_qdio_queue *req_q;
+       int first, count;
+
+       if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
+               return;
+
+       /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
+       req_q = &adapter->req_q;
+       spin_lock(&req_q->lock);
+       atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+       spin_unlock(&req_q->lock);
+
+       while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
+                       == -EINPROGRESS)
+               ssleep(1);
+
+       /* cleanup used outbound sbals */
+       count = atomic_read(&req_q->count);
+       if (count < QDIO_MAX_BUFFERS_PER_Q) {
+               first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q;
+               count = QDIO_MAX_BUFFERS_PER_Q - count;
+               zfcp_qdio_zero_sbals(req_q->sbal, first, count);
        }
+       req_q->first = 0;
+       atomic_set(&req_q->count, 0);
+       req_q->pci_batch = 0;
+       adapter->resp_q.first = 0;
+       atomic_set(&adapter->resp_q.count, 0);
 }
 
-#undef ZFCP_LOG_AREA
+/**
+ * zfcp_qdio_open - prepare and initialize response queue
+ * @adapter: pointer to struct zfcp_adapter
+ * Returns: 0 on success, otherwise -EIO
+ */
+int zfcp_qdio_open(struct zfcp_adapter *adapter)
+{
+       volatile struct qdio_buffer_element *sbale;
+       int cc;
+
+       if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
+               return -EIO;
+
+       if (qdio_establish(&adapter->qdio_init_data)) {
+               dev_err(&adapter->ccw_device->dev,
+                        "Establish of QDIO queues failed.\n");
+               return -EIO;
+       }
+
+       if (qdio_activate(adapter->ccw_device, 0)) {
+               dev_err(&adapter->ccw_device->dev,
+                        "Activate of QDIO queues failed.\n");
+               goto failed_qdio;
+       }
+
+       for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
+               sbale = &(adapter->resp_q.sbal[cc]->element[0]);
+               sbale->length = 0;
+               sbale->flags = SBAL_FLAGS_LAST_ENTRY;
+               sbale->addr = NULL;
+       }
+
+       if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
+                    QDIO_MAX_BUFFERS_PER_Q, NULL)) {
+               dev_err(&adapter->ccw_device->dev,
+                        "Init of QDIO response queue failed.\n");
+               goto failed_qdio;
+       }
+
+       /* set index of first avalable SBALS / number of available SBALS */
+       adapter->req_q.first = 0;
+       atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
+       adapter->req_q.pci_batch = 0;
+
+       return 0;
+
+failed_qdio:
+       while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
+                       == -EINPROGRESS)
+               ssleep(1);
+
+       return -EIO;
+}
index 0168755..aeae56b 100644 (file)
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
  *
- * (C) Copyright IBM Corp. 2002, 2006
+ * Interface to Linux SCSI midlayer.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corporation 2002, 2008
  */
 
-#define ZFCP_LOG_AREA                  ZFCP_LOG_AREA_SCSI
-
 #include "zfcp_ext.h"
 #include <asm/atomic.h>
 
-static void zfcp_scsi_slave_destroy(struct scsi_device *sdp);
-static int zfcp_scsi_slave_alloc(struct scsi_device *sdp);
-static int zfcp_scsi_slave_configure(struct scsi_device *sdp);
-static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
-                                 void (*done) (struct scsi_cmnd *));
-static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
-static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
-static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *);
-static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
-static int zfcp_task_management_function(struct zfcp_unit *, u8,
-                                        struct scsi_cmnd *);
-
-static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
-                                         unsigned int, unsigned int);
-
-static struct device_attribute *zfcp_sysfs_sdev_attrs[];
-static struct device_attribute *zfcp_a_stats_attrs[];
-
-struct zfcp_data zfcp_data = {
-       .scsi_host_template = {
-               .name                   = ZFCP_NAME,
-               .module                 = THIS_MODULE,
-               .proc_name              = "zfcp",
-               .slave_alloc            = zfcp_scsi_slave_alloc,
-               .slave_configure        = zfcp_scsi_slave_configure,
-               .slave_destroy          = zfcp_scsi_slave_destroy,
-               .queuecommand           = zfcp_scsi_queuecommand,
-               .eh_abort_handler       = zfcp_scsi_eh_abort_handler,
-               .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
-               .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
-               .eh_host_reset_handler  = zfcp_scsi_eh_host_reset_handler,
-               .can_queue              = 4096,
-               .this_id                = -1,
-               .sg_tablesize           = ZFCP_MAX_SBALES_PER_REQ,
-               .cmd_per_lun            = 1,
-               .use_clustering         = 1,
-               .sdev_attrs             = zfcp_sysfs_sdev_attrs,
-               .max_sectors            = ZFCP_MAX_SECTORS,
-               .shost_attrs            = zfcp_a_stats_attrs,
-       },
-       .driver_version = ZFCP_VERSION,
-};
-
-/* Find start of Response Information in FCP response unit*/
-char *
-zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
-{
-       char *fcp_rsp_info_ptr;
-
-       fcp_rsp_info_ptr =
-           (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
-
-       return fcp_rsp_info_ptr;
-}
-
 /* Find start of Sense Information in FCP response unit*/
-char *
-zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
+char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
 {
        char *fcp_sns_info_ptr;
 
-       fcp_sns_info_ptr =
-           (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
+       fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1];
        if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
-               fcp_sns_info_ptr = (char *) fcp_sns_info_ptr +
-                   fcp_rsp_iu->fcp_rsp_len;
+               fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len;
 
        return fcp_sns_info_ptr;
 }
 
-static fcp_dl_t *
-zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
+void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
 {
-       int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
-       fcp_dl_t *fcp_dl_addr;
+       fcp_dl_t *fcp_dl_ptr;
 
-       fcp_dl_addr = (fcp_dl_t *)
-               ((unsigned char *) fcp_cmd +
-                sizeof (struct fcp_cmnd_iu) + additional_length);
        /*
         * fcp_dl_addr = start address of fcp_cmnd structure +
         * size of fixed part + size of dynamically sized add_dcp_cdb field
         * SEE FCP-2 documentation
         */
-       return fcp_dl_addr;
+       fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
+                                  (fcp_cmd->add_fcp_cdb_length << 2));
+       *fcp_dl_ptr = fcp_dl;
 }
 
-fcp_dl_t
-zfcp_get_fcp_dl(struct fcp_cmnd_iu * fcp_cmd)
-{
-       return *zfcp_get_fcp_dl_ptr(fcp_cmd);
-}
-
-void
-zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
-{
-       *zfcp_get_fcp_dl_ptr(fcp_cmd) = fcp_dl;
-}
-
-/*
- * note: it's a bit-or operation not an assignment
- * regarding the specified byte
- */
-static inline void
-set_byte(int *result, char status, char pos)
-{
-       *result |= status << (pos * 8);
-}
-
-void
-set_host_byte(int *result, char status)
-{
-       set_byte(result, status, 2);
-}
-
-void
-set_driver_byte(int *result, char status)
-{
-       set_byte(result, status, 3);
-}
-
-static int
-zfcp_scsi_slave_alloc(struct scsi_device *sdp)
-{
-       struct zfcp_adapter *adapter;
-       struct zfcp_unit *unit;
-       unsigned long flags;
-       int retval = -ENXIO;
-
-       adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
-       if (!adapter)
-               goto out;
-
-       read_lock_irqsave(&zfcp_data.config_lock, flags);
-       unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
-       if (unit && atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
-                                    &unit->status)) {
-               sdp->hostdata = unit;
-               unit->device = sdp;
-               zfcp_unit_get(unit);
-               retval = 0;
-       }
-       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
- out:
-       return retval;
-}
-
-/**
- * zfcp_scsi_slave_destroy - called when scsi device is removed
- *
- * Remove reference to associated scsi device for an zfcp_unit.
- * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
- * or a scan for this device might have failed.
- */
 static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
 {
        struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
-
+       WARN_ON(!unit);
        if (unit) {
                atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
                sdpnt->hostdata = NULL;
                unit->device = NULL;
                zfcp_erp_unit_failed(unit, 12, NULL);
                zfcp_unit_put(unit);
-       } else
-               ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
-                               "address %p\n", sdpnt);
+       }
 }
 
-/*
- * called from scsi midlayer to allow finetuning of a device.
- */
-static int
-zfcp_scsi_slave_configure(struct scsi_device *sdp)
+static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
 {
        if (sdp->tagged_supported)
-               scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, ZFCP_CMND_PER_LUN);
+               scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
        else
                scsi_adjust_queue_depth(sdp, 0, 1);
        return 0;
 }
 
-/**
- * zfcp_scsi_command_fail - set result in scsi_cmnd and call scsi_done function
- * @scpnt: pointer to struct scsi_cmnd where result is set
- * @result: result to be set in scpnt (e.g. DID_ERROR)
- */
-static void
-zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
+static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
 {
-       set_host_byte(&scpnt->result, result);
+       set_host_byte(scpnt, result);
        if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
                zfcp_scsi_dbf_event_result("fail", 4,
                        (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
@@ -223,114 +68,13 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
        scpnt->scsi_done(scpnt);
 }
 
-/**
- * zfcp_scsi_command_async - worker for zfcp_scsi_queuecommand and
- *     zfcp_scsi_command_sync
- * @adapter: adapter where scsi command is issued
- * @unit: unit to which scsi command is sent
- * @scpnt: scsi command to be sent
- * @timer: timer to be started if request is successfully initiated
- *
- * Note: In scsi_done function must be set in scpnt.
- */
-int
-zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
-                       struct scsi_cmnd *scpnt, int use_timer)
-{
-       int tmp;
-       int retval;
-
-       retval = 0;
-
-       BUG_ON((adapter == NULL) || (adapter != unit->port->adapter));
-       BUG_ON(scpnt->scsi_done == NULL);
-
-       if (unlikely(NULL == unit)) {
-               zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
-               goto out;
-       }
-
-       if (unlikely(
-             atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status) ||
-            !atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status))) {
-               ZFCP_LOG_DEBUG("stopping SCSI I/O on unit 0x%016Lx on port "
-                              "0x%016Lx on adapter %s\n",
-                              unit->fcp_lun, unit->port->wwpn,
-                              zfcp_get_busid_by_adapter(adapter));
-               zfcp_scsi_command_fail(scpnt, DID_ERROR);
-               goto out;
-       }
-
-       tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
-                                            ZFCP_REQ_AUTO_CLEANUP);
-       if (unlikely(tmp == -EBUSY)) {
-               ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx "
-                              "on port 0x%016Lx in recovery\n",
-                              zfcp_get_busid_by_unit(unit),
-                              unit->fcp_lun, unit->port->wwpn);
-               zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
-               goto out;
-       }
-
-       if (unlikely(tmp < 0)) {
-               ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n");
-               retval = SCSI_MLQUEUE_HOST_BUSY;
-       }
-
-out:
-       return retval;
-}
-
-static void
-zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
-{
-       struct completion *wait = (struct completion *) scpnt->SCp.ptr;
-       complete(wait);
-}
-
-
-/**
- * zfcp_scsi_command_sync - send a SCSI command and wait for completion
- * @unit: unit where command is sent to
- * @scpnt: scsi command to be sent
- * @use_timer: indicates whether timer should be setup or not
- * Return: 0
- *
- * Errors are indicated in scpnt->result
- */
-int
-zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
-                      int use_timer)
-{
-       int ret;
-       DECLARE_COMPLETION_ONSTACK(wait);
-
-       scpnt->SCp.ptr = (void *) &wait;  /* silent re-use */
-       scpnt->scsi_done = zfcp_scsi_command_sync_handler;
-       ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt,
-                                     use_timer);
-       if (ret == 0)
-               wait_for_completion(&wait);
-
-       scpnt->SCp.ptr = NULL;
-
-       return 0;
-}
-
-/*
- * function:   zfcp_scsi_queuecommand
- *
- * purpose:    enqueues a SCSI command to the specified target device
- *
- * returns:    0 - success, SCSI command enqueued
- *             !0 - failure
- */
-static int
-zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
-                      void (*done) (struct scsi_cmnd *))
+static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
+                                 void (*done) (struct scsi_cmnd *))
 {
        struct zfcp_unit *unit;
        struct zfcp_adapter *adapter;
+       int    status;
+       int    ret;
 
        /* reset the status for this request */
        scpnt->result = 0;
@@ -342,44 +86,76 @@ zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
         * (stored there by zfcp_scsi_slave_alloc)
         */
        adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
-       unit = (struct zfcp_unit *) scpnt->device->hostdata;
+       unit = scpnt->device->hostdata;
+
+       BUG_ON(!adapter || (adapter != unit->port->adapter));
+       BUG_ON(!scpnt->scsi_done);
 
-       return zfcp_scsi_command_async(adapter, unit, scpnt, 0);
+       if (unlikely(!unit)) {
+               zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
+               return 0;
+       }
+
+       status = atomic_read(&unit->status);
+       if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
+                    !(status & ZFCP_STATUS_COMMON_RUNNING))) {
+               zfcp_scsi_command_fail(scpnt, DID_ERROR);
+               return 0;;
+       }
+
+       ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0,
+                                            ZFCP_REQ_AUTO_CLEANUP);
+       if (unlikely(ret == -EBUSY))
+               zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
+       else if (unlikely(ret < 0))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
+       return ret;
 }
 
-static struct zfcp_unit *
-zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
-                unsigned int lun)
+static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
+                                         int channel, unsigned int id,
+                                         unsigned int lun)
 {
        struct zfcp_port *port;
-       struct zfcp_unit *unit, *retval = NULL;
+       struct zfcp_unit *unit;
 
        list_for_each_entry(port, &adapter->port_list_head, list) {
                if (!port->rport || (id != port->rport->scsi_target_id))
                        continue;
                list_for_each_entry(unit, &port->unit_list_head, list)
-                       if (lun == unit->scsi_lun) {
-                               retval = unit;
-                               goto out;
-                       }
+                       if (lun == unit->scsi_lun)
+                               return unit;
        }
- out:
+
+       return NULL;
+}
+
+static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
+{
+       struct zfcp_adapter *adapter;
+       struct zfcp_unit *unit;
+       unsigned long flags;
+       int retval = -ENXIO;
+
+       adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
+       if (!adapter)
+               goto out;
+
+       read_lock_irqsave(&zfcp_data.config_lock, flags);
+       unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
+       if (unit &&
+           (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_REGISTERED)) {
+               sdp->hostdata = unit;
+               unit->device = sdp;
+               zfcp_unit_get(unit);
+               retval = 0;
+       }
+       read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+out:
        return retval;
 }
 
-/**
- * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
- * @scpnt: pointer to scsi_cmnd to be aborted
- * Return: SUCCESS - command has been aborted and cleaned up in internal
- *          bookkeeping, SCSI stack won't be called for aborted command
- *         FAILED - otherwise
- *
- * We do not need to care for a SCSI command which completes normally
- * but late during this abort routine runs.  We are allowed to return
- * late commands to the SCSI stack.  It tracks the state of commands and
- * will handle late commands.  (Usually, the normal completion of late
- * commands is ignored with respect to the running abort operation.)
- */
 static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 {
        struct Scsi_Host *scsi_host;
@@ -387,44 +163,37 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
        struct zfcp_unit *unit;
        struct zfcp_fsf_req *fsf_req;
        unsigned long flags;
-       unsigned long old_req_id;
+       unsigned long old_req_id = (unsigned long) scpnt->host_scribble;
        int retval = SUCCESS;
 
        scsi_host = scpnt->device->host;
        adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
-       unit = (struct zfcp_unit *) scpnt->device->hostdata;
-
-       ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
-                     scpnt, zfcp_get_busid_by_adapter(adapter));
+       unit = scpnt->device->hostdata;
 
        /* avoid race condition between late normal completion and abort */
        write_lock_irqsave(&adapter->abort_lock, flags);
 
        /* Check whether corresponding fsf_req is still pending */
        spin_lock(&adapter->req_list_lock);
-       fsf_req = zfcp_reqlist_find(adapter,
-                                   (unsigned long) scpnt->host_scribble);
+       fsf_req = zfcp_reqlist_find(adapter, old_req_id);
        spin_unlock(&adapter->req_list_lock);
        if (!fsf_req) {
                write_unlock_irqrestore(&adapter->abort_lock, flags);
                zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0);
-               retval = SUCCESS;
-               goto out;
+               return retval;
        }
-       fsf_req->data = 0;
+       fsf_req->data = NULL;
        fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
-       old_req_id = fsf_req->req_id;
 
        /* don't access old fsf_req after releasing the abort_lock */
        write_unlock_irqrestore(&adapter->abort_lock, flags);
 
        fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0);
        if (!fsf_req) {
-               ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
                zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
                                          old_req_id);
                retval = FAILED;
-               goto out;
+               return retval;
        }
 
        __wait_event(fsf_req->completion_wq,
@@ -432,66 +201,29 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 
        if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
                zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0);
-               retval = SUCCESS;
        } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
                zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0);
-               retval = SUCCESS;
        } else {
                zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0);
                retval = FAILED;
        }
        zfcp_fsf_req_free(fsf_req);
- out:
-       return retval;
-}
-
-static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
-{
-       int retval;
-       struct zfcp_unit *unit = scpnt->device->hostdata;
 
-       if (!unit) {
-               WARN_ON(1);
-               return SUCCESS;
-       }
-       retval = zfcp_task_management_function(unit,
-                                              FCP_LOGICAL_UNIT_RESET,
-                                              scpnt);
-       return retval ? FAILED : SUCCESS;
-}
-
-static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
-{
-       int retval;
-       struct zfcp_unit *unit = scpnt->device->hostdata;
-
-       if (!unit) {
-               WARN_ON(1);
-               return SUCCESS;
-       }
-       retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
-       return retval ? FAILED : SUCCESS;
+       return retval;
 }
 
-static int
-zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
-                             struct scsi_cmnd *scpnt)
+static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags,
+                                        struct scsi_cmnd *scpnt)
 {
        struct zfcp_adapter *adapter = unit->port->adapter;
        struct zfcp_fsf_req *fsf_req;
-       int retval = 0;
+       int retval = SUCCESS;
 
        /* issue task management function */
-       fsf_req = zfcp_fsf_send_fcp_command_task_management
-               (adapter, unit, tm_flags, 0);
+       fsf_req = zfcp_fsf_send_fcp_ctm(adapter, unit, tm_flags, 0);
        if (!fsf_req) {
-               ZFCP_LOG_INFO("error: creation of task management request "
-                             "failed for unit 0x%016Lx on port 0x%016Lx on  "
-                             "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
-                             zfcp_get_busid_by_adapter(adapter));
                zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
-               retval = -ENOMEM;
-               goto out;
+               return FAILED;
        }
 
        __wait_event(fsf_req->completion_wq,
@@ -502,87 +234,90 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
         */
        if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
                zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
-               retval = -EIO;
+               retval = FAILED;
        } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
                zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
-               retval = -ENOTSUPP;
+               retval = FAILED;
        } else
                zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
 
        zfcp_fsf_req_free(fsf_req);
- out:
+
        return retval;
 }
 
-/**
- * zfcp_scsi_eh_host_reset_handler - handler for host reset
- */
+static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
+{
+       struct zfcp_unit *unit = scpnt->device->hostdata;
+
+       if (!unit) {
+               WARN_ON(1);
+               return SUCCESS;
+       }
+       return zfcp_task_mgmt_function(unit, FCP_LOGICAL_UNIT_RESET, scpnt);
+}
+
+static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
+{
+       struct zfcp_unit *unit = scpnt->device->hostdata;
+
+       if (!unit) {
+               WARN_ON(1);
+               return SUCCESS;
+       }
+       return zfcp_task_mgmt_function(unit, FCP_TARGET_RESET, scpnt);
+}
+
 static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 {
        struct zfcp_unit *unit;
        struct zfcp_adapter *adapter;
 
-       unit = (struct zfcp_unit*) scpnt->device->hostdata;
+       unit = scpnt->device->hostdata;
        adapter = unit->port->adapter;
-
-       ZFCP_LOG_NORMAL("host reset because of problems with "
-               "unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
-               unit->fcp_lun, unit->port->wwpn,
-               zfcp_get_busid_by_adapter(unit->port->adapter));
-
        zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt);
        zfcp_erp_wait(adapter);
 
        return SUCCESS;
 }
 
-int
-zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
 {
-       int retval = 0;
-       static unsigned int unique_id = 0;
+       struct ccw_dev_id dev_id;
 
        if (adapter->scsi_host)
-               goto out;
+               return 0;
 
+       ccw_device_get_id(adapter->ccw_device, &dev_id);
        /* register adapter as SCSI host with mid layer of SCSI stack */
        adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
                                             sizeof (struct zfcp_adapter *));
        if (!adapter->scsi_host) {
-               ZFCP_LOG_NORMAL("error: registration with SCSI stack failed "
-                               "for adapter %s ",
-                               zfcp_get_busid_by_adapter(adapter));
-               retval = -EIO;
-               goto out;
+               dev_err(&adapter->ccw_device->dev,
+                       "registration with SCSI stack failed.");
+               return -EIO;
        }
-       ZFCP_LOG_DEBUG("host registered, scsi_host=%p\n", adapter->scsi_host);
 
        /* tell the SCSI stack some characteristics of this adapter */
        adapter->scsi_host->max_id = 1;
        adapter->scsi_host->max_lun = 1;
        adapter->scsi_host->max_channel = 0;
-       adapter->scsi_host->unique_id = unique_id++;    /* FIXME */
-       adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH;
+       adapter->scsi_host->unique_id = dev_id.devno;
+       adapter->scsi_host->max_cmd_len = 255;
        adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
 
-       /*
-        * save a pointer to our own adapter data structure within
-        * hostdata field of SCSI host data structure
-        */
        adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
 
        if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
                scsi_host_put(adapter->scsi_host);
-               retval = -EIO;
-               goto out;
+               return -EIO;
        }
        atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
- out:
-       return retval;
+
+       return 0;
 }
 
-void
-zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
 {
        struct Scsi_Host *shost;
        struct zfcp_port *port;
@@ -590,10 +325,12 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
        shost = adapter->scsi_host;
        if (!shost)
                return;
+
        read_lock_irq(&zfcp_data.config_lock);
        list_for_each_entry(port, &adapter->port_list_head, list)
                if (port->rport)
                        port->rport = NULL;
+
        read_unlock_irq(&zfcp_data.config_lock);
        fc_remove_host(shost);
        scsi_remove_host(shost);
@@ -604,9 +341,6 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
        return;
 }
 
-/*
- * Support functions for FC transport class
- */
 static struct fc_host_statistics*
 zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
 {
@@ -622,13 +356,12 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
        return adapter->fc_stats;
 }
 
-static void
-zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
-                         struct fsf_qtcb_bottom_port *data,
-                         struct fsf_qtcb_bottom_port *old)
+static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
+                                     struct fsf_qtcb_bottom_port *data,
+                                     struct fsf_qtcb_bottom_port *old)
 {
-       fc_stats->seconds_since_last_reset = data->seconds_since_last_reset -
-               old->seconds_since_last_reset;
+       fc_stats->seconds_since_last_reset =
+               data->seconds_since_last_reset - old->seconds_since_last_reset;
        fc_stats->tx_frames = data->tx_frames - old->tx_frames;
        fc_stats->tx_words = data->tx_words - old->tx_words;
        fc_stats->rx_frames = data->rx_frames - old->rx_frames;
@@ -639,26 +372,25 @@ zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
        fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
        fc_stats->link_failure_count = data->link_failure - old->link_failure;
        fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
-       fc_stats->loss_of_signal_count = data->loss_of_signal -
-               old->loss_of_signal;
-       fc_stats->prim_seq_protocol_err_count = data->psp_error_counts -
-               old->psp_error_counts;
-       fc_stats->invalid_tx_word_count = data->invalid_tx_words -
-               old->invalid_tx_words;
+       fc_stats->loss_of_signal_count =
+               data->loss_of_signal - old->loss_of_signal;
+       fc_stats->prim_seq_protocol_err_count =
+               data->psp_error_counts - old->psp_error_counts;
+       fc_stats->invalid_tx_word_count =
+               data->invalid_tx_words - old->invalid_tx_words;
        fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
-       fc_stats->fcp_input_requests = data->input_requests -
-               old->input_requests;
-       fc_stats->fcp_output_requests = data->output_requests -
-               old->output_requests;
-       fc_stats->fcp_control_requests = data->control_requests -
-               old->control_requests;
+       fc_stats->fcp_input_requests =
+               data->input_requests - old->input_requests;
+       fc_stats->fcp_output_requests =
+               data->output_requests - old->output_requests;
+       fc_stats->fcp_control_requests =
+               data->control_requests - old->control_requests;
        fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
        fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
 }
 
-static void
-zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
-                      struct fsf_qtcb_bottom_port *data)
+static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
+                                  struct fsf_qtcb_bottom_port *data)
 {
        fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
        fc_stats->tx_frames = data->tx_frames;
@@ -682,22 +414,14 @@ zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
        fc_stats->fcp_output_megabytes = data->output_mb;
 }
 
-/**
- * zfcp_get_fc_host_stats - provide fc_host_statistics for scsi_transport_fc
- *
- * assumption: scsi_transport_fc synchronizes calls of
- *             get_fc_host_stats and reset_fc_host_stats
- *             (XXX to be checked otherwise introduce locking)
- */
-static struct fc_host_statistics *
-zfcp_get_fc_host_stats(struct Scsi_Host *shost)
+static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
 {
        struct zfcp_adapter *adapter;
        struct fc_host_statistics *fc_stats;
        struct fsf_qtcb_bottom_port *data;
        int ret;
 
-       adapter = (struct zfcp_adapter *)shost->hostdata[0];
+       adapter = (struct zfcp_adapter *)host->hostdata[0];
        fc_stats = zfcp_init_fc_host_stats(adapter);
        if (!fc_stats)
                return NULL;
@@ -709,26 +433,25 @@ zfcp_get_fc_host_stats(struct Scsi_Host *shost)
        ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
        if (ret) {
                kfree(data);
-               return NULL; /* XXX return zeroed fc_stats? */
+               return NULL;
        }
 
        if (adapter->stats_reset &&
            ((jiffies/HZ - adapter->stats_reset) <
-            data->seconds_since_last_reset)) {
+            data->seconds_since_last_reset))
                zfcp_adjust_fc_host_stats(fc_stats, data,
                                          adapter->stats_reset_data);
-       else
+       else
                zfcp_set_fc_host_stats(fc_stats, data);
 
        kfree(data);
        return fc_stats;
 }
 
-static void
-zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
+static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
 {
        struct zfcp_adapter *adapter;
-       struct fsf_qtcb_bottom_port *data, *old_data;
+       struct fsf_qtcb_bottom_port *data;
        int ret;
 
        adapter = (struct zfcp_adapter *)shost->hostdata[0];
@@ -737,17 +460,33 @@ zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
                return;
 
        ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
-       if (ret) {
+       if (ret)
                kfree(data);
-       else {
+       else {
                adapter->stats_reset = jiffies/HZ;
-               old_data = adapter->stats_reset_data;
+               kfree(adapter->stats_reset_data);
                adapter->stats_reset_data = data; /* finally freed in
-                                                    adater_dequeue */
-               kfree(old_data);
+                                                    adapter_dequeue */
        }
 }
 
+static void zfcp_get_host_port_state(struct Scsi_Host *shost)
+{
+       struct zfcp_adapter *adapter =
+               (struct zfcp_adapter *)shost->hostdata[0];
+       int status = atomic_read(&adapter->status);
+
+       if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
+           !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
+               fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+       else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
+               fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+       else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
+               fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+       else
+               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+}
+
 static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
 {
        rport->dev_loss_tmo = timeout;
@@ -770,6 +509,8 @@ struct fc_function_template zfcp_transport_functions = {
        .get_fc_host_stats = zfcp_get_fc_host_stats,
        .reset_fc_host_stats = zfcp_reset_fc_host_stats,
        .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
+       .get_host_port_state = zfcp_get_host_port_state,
+       .show_host_port_state = 1,
        /* no functions registered for following dynamic attributes but
           directly set by LLDD */
        .show_host_port_type = 1,
@@ -778,149 +519,26 @@ struct fc_function_template zfcp_transport_functions = {
        .disable_target_scan = 1,
 };
 
-/**
- * ZFCP_DEFINE_SCSI_ATTR
- * @_name:   name of show attribute
- * @_format: format string
- * @_value:  value to print
- *
- * Generates attribute for a unit.
- */
-#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value)                    \
-static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, struct device_attribute *attr,        \
-                                              char *buf)                 \
-{                                                                        \
-        struct scsi_device *sdev;                                        \
-        struct zfcp_unit *unit;                                          \
-                                                                         \
-        sdev = to_scsi_device(dev);                                      \
-        unit = sdev->hostdata;                                           \
-        return sprintf(buf, _format, _value);                            \
-}                                                                        \
-                                                                         \
-static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
-
-ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", zfcp_get_busid_by_unit(unit));
-ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
-ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
-
-static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
-       &dev_attr_fcp_lun,
-       &dev_attr_wwpn,
-       &dev_attr_hba_id,
-       NULL
-};
-
-static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
-                                           struct device_attribute *attr,
-                                           char *buf)
-{
-       struct Scsi_Host *scsi_host = dev_to_shost(dev);
-       struct fsf_qtcb_bottom_port *qtcb_port;
-       int retval;
-       struct zfcp_adapter *adapter;
-
-       adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
-       if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
-               return -EOPNOTSUPP;
-
-       qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
-       if (!qtcb_port)
-               return -ENOMEM;
-
-       retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
-       if (!retval)
-               retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
-                                qtcb_port->cb_util, qtcb_port->a_util);
-       kfree(qtcb_port);
-       return retval;
-}
-
-static int zfcp_sysfs_adapter_ex_config(struct device *dev,
-                                       struct fsf_statistics_info *stat_inf)
-{
-       int retval;
-       struct fsf_qtcb_bottom_config *qtcb_config;
-       struct Scsi_Host *scsi_host = dev_to_shost(dev);
-       struct zfcp_adapter *adapter;
-
-       adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
-       if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
-               return -EOPNOTSUPP;
-
-       qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
-                              GFP_KERNEL);
-       if (!qtcb_config)
-               return -ENOMEM;
-
-       retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
-       if (!retval)
-               *stat_inf = qtcb_config->stat_info;
-
-       kfree(qtcb_config);
-       return retval;
-}
-
-static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
-                                              struct device_attribute *attr,
-                                              char *buf)
-{
-       struct fsf_statistics_info stat_info;
-       int retval;
-
-       retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
-       if (retval)
-               return retval;
-
-       return sprintf(buf, "%llu %llu %llu\n",
-                      (unsigned long long) stat_info.input_req,
-                      (unsigned long long) stat_info.output_req,
-                      (unsigned long long) stat_info.control_req);
-}
-
-static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
-                                         struct device_attribute *attr,
-                                         char *buf)
-{
-       struct fsf_statistics_info stat_info;
-       int retval;
-
-       retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
-       if (retval)
-               return retval;
-
-       return sprintf(buf, "%llu %llu\n",
-                      (unsigned long long) stat_info.input_mb,
-                      (unsigned long long) stat_info.output_mb);
-}
-
-static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
-                                                 struct device_attribute *attr,
-                                                 char *buf)
-{
-       struct fsf_statistics_info stat_info;
-       int retval;
-
-       retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
-       if (retval)
-               return retval;
-
-       return sprintf(buf, "%llu\n",
-                      (unsigned long long) stat_info.seconds_act);
-}
-
-static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
-static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
-static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
-static DEVICE_ATTR(seconds_active, S_IRUGO,
-                  zfcp_sysfs_adapter_sec_active_show, NULL);
-
-static struct device_attribute *zfcp_a_stats_attrs[] = {
-       &dev_attr_utilization,
-       &dev_attr_requests,
-       &dev_attr_megabytes,
-       &dev_attr_seconds_active,
-       NULL
+struct zfcp_data zfcp_data = {
+       .scsi_host_template = {
+               .name                    = "zfcp",
+               .module                  = THIS_MODULE,
+               .proc_name               = "zfcp",
+               .slave_alloc             = zfcp_scsi_slave_alloc,
+               .slave_configure         = zfcp_scsi_slave_configure,
+               .slave_destroy           = zfcp_scsi_slave_destroy,
+               .queuecommand            = zfcp_scsi_queuecommand,
+               .eh_abort_handler        = zfcp_scsi_eh_abort_handler,
+               .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+               .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+               .eh_host_reset_handler   = zfcp_scsi_eh_host_reset_handler,
+               .can_queue               = 4096,
+               .this_id                 = -1,
+               .sg_tablesize            = ZFCP_MAX_SBALES_PER_REQ,
+               .cmd_per_lun             = 1,
+               .use_clustering          = 1,
+               .sdev_attrs              = zfcp_sysfs_sdev_attrs,
+               .max_sectors             = (ZFCP_MAX_SBALES_PER_REQ * 8),
+               .shost_attrs             = zfcp_sysfs_shost_attrs,
+       },
 };
-
-#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
new file mode 100644 (file)
index 0000000..2e85c6c
--- /dev/null
@@ -0,0 +1,496 @@
+/*
+ * zfcp device driver
+ *
+ * sysfs attributes.
+ *
+ * Copyright IBM Corporation 2008
+ */
+
+#include "zfcp_ext.h"
+
+#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
+                                                           _show, _store)
+#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value)            \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,        \
+                                                  struct device_attribute *at,\
+                                                  char *buf)                  \
+{                                                                             \
+       struct _feat_def *_feat = dev_get_drvdata(dev);                        \
+                                                                              \
+       return sprintf(buf, _format, _value);                                  \
+}                                                                             \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,                                   \
+                    zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
+                atomic_read(&adapter->status));
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
+                adapter->peer_wwnn);
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
+                adapter->peer_wwpn);
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
+                adapter->peer_d_id);
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
+                adapter->hydra_version);
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n",
+                adapter->fsf_lic_version);
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n",
+                adapter->hardware_version);
+ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n",
+                (atomic_read(&adapter->status) &
+                 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+
+ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
+                atomic_read(&port->status));
+ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
+                (atomic_read(&port->status) &
+                 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
+                (atomic_read(&port->status) &
+                 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
+                atomic_read(&unit->status));
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
+                (atomic_read(&unit->status) &
+                 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
+                (atomic_read(&unit->status) &
+                 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
+                (atomic_read(&unit->status) &
+                 ZFCP_STATUS_UNIT_SHARED) != 0);
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
+                (atomic_read(&unit->status) &
+                 ZFCP_STATUS_UNIT_READONLY) != 0);
+
+#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id)     \
+static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev,           \
+                                               struct device_attribute *attr, \
+                                               char *buf)                     \
+{                                                                             \
+       struct _feat_def *_feat = dev_get_drvdata(dev);                        \
+                                                                              \
+       if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED)       \
+               return sprintf(buf, "1\n");                                    \
+       else                                                                   \
+               return sprintf(buf, "0\n");                                    \
+}                                                                             \
+static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev,          \
+                                                struct device_attribute *attr,\
+                                                const char *buf, size_t count)\
+{                                                                             \
+       struct _feat_def *_feat = dev_get_drvdata(dev);                        \
+       unsigned long val;                                                     \
+       int retval = 0;                                                        \
+                                                                              \
+       down(&zfcp_data.config_sema);                                          \
+       if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) {         \
+               retval = -EBUSY;                                               \
+               goto out;                                                      \
+       }                                                                      \
+                                                                              \
+       if (strict_strtoul(buf, 0, &val) || val != 0) {                        \
+               retval = -EINVAL;                                              \
+               goto out;                                                      \
+       }                                                                      \
+                                                                              \
+       zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL,                 \
+                                        ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
+       zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED,        \
+                                 _reopen_id, NULL);                           \
+       zfcp_erp_wait(_adapter);                                               \
+out:                                                                          \
+       up(&zfcp_data.config_sema);                                            \
+       return retval ? retval : (ssize_t) count;                              \
+}                                                                             \
+static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO,                        \
+                    zfcp_sysfs_##_feat##_failed_show,                         \
+                    zfcp_sysfs_##_feat##_failed_store);
+
+ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, 44, 93);
+ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, 45, 96);
+ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, 46, 97);
+
+static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
+                                           struct device_attribute *attr,
+                                           const char *buf, size_t count)
+{
+       struct zfcp_adapter *adapter = dev_get_drvdata(dev);
+       int ret;
+
+       if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
+               return -EBUSY;
+
+       ret = zfcp_scan_ports(adapter);
+       return ret ? ret : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
+                    zfcp_sysfs_port_rescan_store);
+
+static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+                                           struct device_attribute *attr,
+                                           const char *buf, size_t count)
+{
+       struct zfcp_adapter *adapter = dev_get_drvdata(dev);
+       struct zfcp_port *port;
+       wwn_t wwpn;
+       int retval = 0;
+
+       down(&zfcp_data.config_sema);
+       if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
+               retval = -EBUSY;
+               goto out;
+       }
+
+       if (strict_strtoull(buf, 0, &wwpn)) {
+               retval = -EINVAL;
+               goto out;
+       }
+
+       write_lock_irq(&zfcp_data.config_lock);
+       port = zfcp_get_port_by_wwpn(adapter, wwpn);
+       if (port && (atomic_read(&port->refcount) == 0)) {
+               zfcp_port_get(port);
+               atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
+               list_move(&port->list, &adapter->port_remove_lh);
+       } else
+               port = NULL;
+       write_unlock_irq(&zfcp_data.config_lock);
+
+       if (!port) {
+               retval = -ENXIO;
+               goto out;
+       }
+
+       zfcp_erp_port_shutdown(port, 0, 92, NULL);
+       zfcp_erp_wait(adapter);
+       zfcp_port_put(port);
+       zfcp_port_dequeue(port);
+ out:
+       up(&zfcp_data.config_sema);
+       return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
+                    zfcp_sysfs_port_remove_store);
+
+static struct attribute *zfcp_adapter_attrs[] = {
+       &dev_attr_adapter_failed.attr,
+       &dev_attr_adapter_in_recovery.attr,
+       &dev_attr_adapter_port_remove.attr,
+       &dev_attr_adapter_port_rescan.attr,
+       &dev_attr_adapter_peer_wwnn.attr,
+       &dev_attr_adapter_peer_wwpn.attr,
+       &dev_attr_adapter_peer_d_id.attr,
+       &dev_attr_adapter_card_version.attr,
+       &dev_attr_adapter_lic_version.attr,
+       &dev_attr_adapter_status.attr,
+       &dev_attr_adapter_hardware_version.attr,
+       NULL
+};
+
+struct attribute_group zfcp_sysfs_adapter_attrs = {
+       .attrs = zfcp_adapter_attrs,
+};
+
+static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t count)
+{
+       struct zfcp_port *port = dev_get_drvdata(dev);
+       struct zfcp_unit *unit;
+       fcp_lun_t fcp_lun;
+       int retval = -EINVAL;
+
+       down(&zfcp_data.config_sema);
+       if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
+               retval = -EBUSY;
+               goto out;
+       }
+
+       if (strict_strtoull(buf, 0, &fcp_lun))
+               goto out;
+
+       unit = zfcp_unit_enqueue(port, fcp_lun);
+       if (IS_ERR(unit))
+               goto out;
+
+       retval = 0;
+
+       zfcp_erp_unit_reopen(unit, 0, 94, NULL);
+       zfcp_erp_wait(unit->port->adapter);
+       zfcp_unit_put(unit);
+out:
+       up(&zfcp_data.config_sema);
+       return retval ? retval : (ssize_t) count;
+}
+static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
+
+static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
+                                           struct device_attribute *attr,
+                                           const char *buf, size_t count)
+{
+       struct zfcp_port *port = dev_get_drvdata(dev);
+       struct zfcp_unit *unit;
+       fcp_lun_t fcp_lun;
+       int retval = 0;
+
+       down(&zfcp_data.config_sema);
+       if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
+               retval = -EBUSY;
+               goto out;
+       }
+
+       if (strict_strtoull(buf, 0, &fcp_lun)) {
+               retval = -EINVAL;
+               goto out;
+       }
+
+       write_lock_irq(&zfcp_data.config_lock);
+       unit = zfcp_get_unit_by_lun(port, fcp_lun);
+       if (unit && (atomic_read(&unit->refcount) == 0)) {
+               zfcp_unit_get(unit);
+               atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
+               list_move(&unit->list, &port->unit_remove_lh);
+       } else
+               unit = NULL;
+
+       write_unlock_irq(&zfcp_data.config_lock);
+
+       if (!unit) {
+               retval = -ENXIO;
+               goto out;
+       }
+
+       zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
+       zfcp_erp_wait(unit->port->adapter);
+       zfcp_unit_put(unit);
+       zfcp_unit_dequeue(unit);
+out:
+       up(&zfcp_data.config_sema);
+       return retval ? retval : (ssize_t) count;
+}
+static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
+
+static struct attribute *zfcp_port_ns_attrs[] = {
+       &dev_attr_port_failed.attr,
+       &dev_attr_port_in_recovery.attr,
+       &dev_attr_port_status.attr,
+       &dev_attr_port_access_denied.attr,
+       NULL
+};
+
+/**
+ * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
+ */
+struct attribute_group zfcp_sysfs_ns_port_attrs = {
+       .attrs = zfcp_port_ns_attrs,
+};
+
+static struct attribute *zfcp_port_no_ns_attrs[] = {
+       &dev_attr_unit_add.attr,
+       &dev_attr_unit_remove.attr,
+       &dev_attr_port_failed.attr,
+       &dev_attr_port_in_recovery.attr,
+       &dev_attr_port_status.attr,
+       &dev_attr_port_access_denied.attr,
+       NULL
+};
+
+/**
+ * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
+ */
+struct attribute_group zfcp_sysfs_port_attrs = {
+       .attrs = zfcp_port_no_ns_attrs,
+};
+
+static struct attribute *zfcp_unit_attrs[] = {
+       &dev_attr_unit_failed.attr,
+       &dev_attr_unit_in_recovery.attr,
+       &dev_attr_unit_status.attr,
+       &dev_attr_unit_access_denied.attr,
+       &dev_attr_unit_access_shared.attr,
+       &dev_attr_unit_access_readonly.attr,
+       NULL
+};
+
+struct attribute_group zfcp_sysfs_unit_attrs = {
+       .attrs = zfcp_unit_attrs,
+};
+
+#define ZFCP_DEFINE_LATENCY_ATTR(_name)                                \
+static ssize_t                                                         \
+zfcp_sysfs_unit_##_name##_latency_show(struct device *dev,             \
+                                      struct device_attribute *attr,   \
+                                      char *buf) {                     \
+       struct scsi_device *sdev = to_scsi_device(dev);                 \
+       struct zfcp_unit *unit = sdev->hostdata;                        \
+       struct zfcp_latencies *lat = &unit->latencies;                  \
+       struct zfcp_adapter *adapter = unit->port->adapter;             \
+       unsigned long flags;                                            \
+       unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc;      \
+                                                                       \
+       spin_lock_irqsave(&lat->lock, flags);                           \
+       fsum = lat->_name.fabric.sum * adapter->timer_ticks;            \
+       fmin = lat->_name.fabric.min * adapter->timer_ticks;            \
+       fmax = lat->_name.fabric.max * adapter->timer_ticks;            \
+       csum = lat->_name.channel.sum * adapter->timer_ticks;           \
+       cmin = lat->_name.channel.min * adapter->timer_ticks;           \
+       cmax = lat->_name.channel.max * adapter->timer_ticks;           \
+       cc  = lat->_name.counter;                                       \
+       spin_unlock_irqrestore(&lat->lock, flags);                      \
+                                                                       \
+       do_div(fsum, 1000);                                             \
+       do_div(fmin, 1000);                                             \
+       do_div(fmax, 1000);                                             \
+       do_div(csum, 1000);                                             \
+       do_div(cmin, 1000);                                             \
+       do_div(cmax, 1000);                                             \
+                                                                       \
+       return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n",     \
+                      fmin, fmax, fsum, cmin, cmax, csum, cc);         \
+}                                                                      \
+static ssize_t                                                         \
+zfcp_sysfs_unit_##_name##_latency_store(struct device *dev,            \
+                                       struct device_attribute *attr,  \
+                                       const char *buf, size_t count)  \
+{                                                                      \
+       struct scsi_device *sdev = to_scsi_device(dev);                 \
+       struct zfcp_unit *unit = sdev->hostdata;                        \
+       struct zfcp_latencies *lat = &unit->latencies;                  \
+       unsigned long flags;                                            \
+                                                                       \
+       spin_lock_irqsave(&lat->lock, flags);                           \
+       lat->_name.fabric.sum = 0;                                      \
+       lat->_name.fabric.min = 0xFFFFFFFF;                             \
+       lat->_name.fabric.max = 0;                                      \
+       lat->_name.channel.sum = 0;                                     \
+       lat->_name.channel.min = 0xFFFFFFFF;                            \
+       lat->_name.channel.max = 0;                                     \
+       lat->_name.counter = 0;                                         \
+       spin_unlock_irqrestore(&lat->lock, flags);                      \
+                                                                       \
+       return (ssize_t) count;                                         \
+}                                                                      \
+static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO,                 \
+                  zfcp_sysfs_unit_##_name##_latency_show,              \
+                  zfcp_sysfs_unit_##_name##_latency_store);
+
+ZFCP_DEFINE_LATENCY_ATTR(read);
+ZFCP_DEFINE_LATENCY_ATTR(write);
+ZFCP_DEFINE_LATENCY_ATTR(cmd);
+
+#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value)                  \
+static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev,      \
+                                             struct device_attribute *attr,\
+                                             char *buf)                 \
+{                                                                        \
+       struct scsi_device *sdev  = to_scsi_device(dev);                 \
+       struct zfcp_unit *unit = sdev->hostdata;                         \
+                                                                        \
+       return sprintf(buf, _format, _value);                            \
+}                                                                        \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
+
+ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
+       unit->port->adapter->ccw_device->dev.bus_id);
+ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
+ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
+
+struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
+       &dev_attr_fcp_lun,
+       &dev_attr_wwpn,
+       &dev_attr_hba_id,
+       &dev_attr_read_latency,
+       &dev_attr_write_latency,
+       &dev_attr_cmd_latency,
+       NULL
+};
+
+static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
+                                           struct device_attribute *attr,
+                                           char *buf)
+{
+       struct Scsi_Host *scsi_host = dev_to_shost(dev);
+       struct fsf_qtcb_bottom_port *qtcb_port;
+       struct zfcp_adapter *adapter;
+       int retval;
+
+       adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+       if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+               return -EOPNOTSUPP;
+
+       qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
+       if (!qtcb_port)
+               return -ENOMEM;
+
+       retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
+       if (!retval)
+               retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
+                                qtcb_port->cb_util, qtcb_port->a_util);
+       kfree(qtcb_port);
+       return retval;
+}
+static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
+
+static int zfcp_sysfs_adapter_ex_config(struct device *dev,
+                                       struct fsf_statistics_info *stat_inf)
+{
+       struct Scsi_Host *scsi_host = dev_to_shost(dev);
+       struct fsf_qtcb_bottom_config *qtcb_config;
+       struct zfcp_adapter *adapter;
+       int retval;
+
+       adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+       if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+               return -EOPNOTSUPP;
+
+       qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
+                             GFP_KERNEL);
+       if (!qtcb_config)
+               return -ENOMEM;
+
+       retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
+       if (!retval)
+               *stat_inf = qtcb_config->stat_info;
+
+       kfree(qtcb_config);
+       return retval;
+}
+
+#define ZFCP_SHOST_ATTR(_name, _format, _arg...)                       \
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,   \
+                                                struct device_attribute *attr,\
+                                                char *buf)             \
+{                                                                      \
+       struct fsf_statistics_info stat_info;                           \
+       int retval;                                                     \
+                                                                       \
+       retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);         \
+       if (retval)                                                     \
+               return retval;                                          \
+                                                                       \
+       return sprintf(buf, _format, ## _arg);                          \
+}                                                                      \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
+               (unsigned long long) stat_info.input_req,
+               (unsigned long long) stat_info.output_req,
+               (unsigned long long) stat_info.control_req);
+
+ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
+               (unsigned long long) stat_info.input_mb,
+               (unsigned long long) stat_info.output_mb);
+
+ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
+               (unsigned long long) stat_info.seconds_act);
+
+struct device_attribute *zfcp_sysfs_shost_attrs[] = {
+       &dev_attr_utilization,
+       &dev_attr_requests,
+       &dev_attr_megabytes,
+       &dev_attr_seconds_active,
+       NULL
+};
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
deleted file mode 100644 (file)
index ccbba4d..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
- *
- * (C) Copyright IBM Corp. 2002, 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "zfcp_ext.h"
-
-#define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_CONFIG
-
-/**
- * ZFCP_DEFINE_ADAPTER_ATTR
- * @_name:   name of show attribute
- * @_format: format string
- * @_value:  value to print
- *
- * Generates attributes for an adapter.
- */
-#define ZFCP_DEFINE_ADAPTER_ATTR(_name, _format, _value)                      \
-static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct device_attribute *attr,          \
-                                                char *buf)                   \
-{                                                                             \
-       struct zfcp_adapter *adapter;                                         \
-                                                                              \
-       adapter = dev_get_drvdata(dev);                                       \
-       return sprintf(buf, _format, _value);                                 \
-}                                                                             \
-                                                                              \
-static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
-
-ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
-ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
-ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
-ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
-ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
-ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
-ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
-                        adapter->hardware_version);
-ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
-                        (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
-
-/**
- * zfcp_sysfs_port_add_store - add a port to sysfs tree
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- * @count: number of bytes in buffer
- *
- * Store function of the "port_add" attribute of an adapter.
- */
-static ssize_t
-zfcp_sysfs_port_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
-       wwn_t wwpn;
-       char *endp;
-       struct zfcp_adapter *adapter;
-       struct zfcp_port *port;
-       int retval = -EINVAL;
-
-       down(&zfcp_data.config_sema);
-
-       adapter = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       wwpn = simple_strtoull(buf, &endp, 0);
-       if ((endp + 1) < (buf + count))
-               goto out;
-
-       port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
-       if (!port)
-               goto out;
-
-       retval = 0;
-
-       zfcp_erp_port_reopen(port, 0, 91, NULL);
-       zfcp_erp_wait(port->adapter);
-       zfcp_port_put(port);
- out:
-       up(&zfcp_data.config_sema);
-       return retval ? retval : (ssize_t) count;
-}
-
-static DEVICE_ATTR(port_add, S_IWUSR, NULL, zfcp_sysfs_port_add_store);
-
-/**
- * zfcp_sysfs_port_remove_store - remove a port from sysfs tree
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- * @count: number of bytes in buffer
- *
- * Store function of the "port_remove" attribute of an adapter.
- */
-static ssize_t
-zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
-       struct zfcp_adapter *adapter;
-       struct zfcp_port *port;
-       wwn_t wwpn;
-       char *endp;
-       int retval = 0;
-
-       down(&zfcp_data.config_sema);
-
-       adapter = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       wwpn = simple_strtoull(buf, &endp, 0);
-       if ((endp + 1) < (buf + count)) {
-               retval = -EINVAL;
-               goto out;
-       }
-
-       write_lock_irq(&zfcp_data.config_lock);
-       port = zfcp_get_port_by_wwpn(adapter, wwpn);
-       if (port && (atomic_read(&port->refcount) == 0)) {
-               zfcp_port_get(port);
-               atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
-               list_move(&port->list, &adapter->port_remove_lh);
-       }
-       else {
-               port = NULL;
-       }
-       write_unlock_irq(&zfcp_data.config_lock);
-
-       if (!port) {
-               retval = -ENXIO;
-               goto out;
-       }
-
-       zfcp_erp_port_shutdown(port, 0, 92, NULL);
-       zfcp_erp_wait(adapter);
-       zfcp_port_put(port);
-       zfcp_port_dequeue(port);
- out:
-       up(&zfcp_data.config_sema);
-       return retval ? retval : (ssize_t) count;
-}
-
-static DEVICE_ATTR(port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store);
-
-/**
- * zfcp_sysfs_adapter_failed_store - failed state of adapter
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- * @count: number of bytes in buffer
- *
- * Store function of the "failed" attribute of an adapter.
- * If a "0" gets written to "failed", error recovery will be
- * started for the belonging adapter.
- */
-static ssize_t
-zfcp_sysfs_adapter_failed_store(struct device *dev, struct device_attribute *attr,
-                               const char *buf, size_t count)
-{
-       struct zfcp_adapter *adapter;
-       unsigned int val;
-       char *endp;
-       int retval = 0;
-
-       down(&zfcp_data.config_sema);
-
-       adapter = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       val = simple_strtoul(buf, &endp, 0);
-       if (((endp + 1) < (buf + count)) || (val != 0)) {
-               retval = -EINVAL;
-               goto out;
-       }
-
-       zfcp_erp_modify_adapter_status(adapter, 44, NULL,
-                                      ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
-       zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 93,
-                               NULL);
-       zfcp_erp_wait(adapter);
- out:
-       up(&zfcp_data.config_sema);
-       return retval ? retval : (ssize_t) count;
-}
-
-/**
- * zfcp_sysfs_adapter_failed_show - failed state of adapter
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- *
- * Show function of "failed" attribute of adapter. Will be
- * "0" if adapter is working, otherwise "1".
- */
-static ssize_t
-zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct zfcp_adapter *adapter;
-
-       adapter = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status))
-               return sprintf(buf, "1\n");
-       else
-               return sprintf(buf, "0\n");
-}
-
-static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show,
-                  zfcp_sysfs_adapter_failed_store);
-
-static struct attribute *zfcp_adapter_attrs[] = {
-       &dev_attr_failed.attr,
-       &dev_attr_in_recovery.attr,
-       &dev_attr_port_remove.attr,
-       &dev_attr_port_add.attr,
-       &dev_attr_peer_wwnn.attr,
-       &dev_attr_peer_wwpn.attr,
-       &dev_attr_peer_d_id.attr,
-       &dev_attr_card_version.attr,
-       &dev_attr_lic_version.attr,
-       &dev_attr_status.attr,
-       &dev_attr_hardware_version.attr,
-       NULL
-};
-
-static struct attribute_group zfcp_adapter_attr_group = {
-       .attrs = zfcp_adapter_attrs,
-};
-
-/**
- * zfcp_sysfs_create_adapter_files - create sysfs adapter files
- * @dev: pointer to belonging device
- *
- * Create all attributes of the sysfs representation of an adapter.
- */
-int
-zfcp_sysfs_adapter_create_files(struct device *dev)
-{
-       return sysfs_create_group(&dev->kobj, &zfcp_adapter_attr_group);
-}
-
-/**
- * zfcp_sysfs_remove_adapter_files - remove sysfs adapter files
- * @dev: pointer to belonging device
- *
- * Remove all attributes of the sysfs representation of an adapter.
- */
-void
-zfcp_sysfs_adapter_remove_files(struct device *dev)
-{
-       sysfs_remove_group(&dev->kobj, &zfcp_adapter_attr_group);
-}
-
-#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_driver.c b/drivers/s390/scsi/zfcp_sysfs_driver.c
deleted file mode 100644 (file)
index 651edd5..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
- *
- * (C) Copyright IBM Corp. 2002, 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "zfcp_ext.h"
-
-#define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_CONFIG
-
-/**
- * ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes
- * @_name:       name of attribute
- * @_define:     name of ZFCP loglevel define
- *
- * Generates store function for a sysfs loglevel attribute of zfcp driver.
- */
-#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define)                               \
-static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \
-                                                  const char *buf,           \
-                                                  size_t count)              \
-{                                                                             \
-       unsigned int loglevel;                                                \
-       unsigned int new_loglevel;                                            \
-       char *endp;                                                           \
-                                                                              \
-       new_loglevel = simple_strtoul(buf, &endp, 0);                         \
-       if ((endp + 1) < (buf + count))                                       \
-               return -EINVAL;                                               \
-       if (new_loglevel > 3)                                                 \
-               return -EINVAL;                                               \
-       down(&zfcp_data.config_sema);                                         \
-       loglevel = atomic_read(&zfcp_data.loglevel);                          \
-       loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2));  \
-       loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2);           \
-       atomic_set(&zfcp_data.loglevel, loglevel);                            \
-       up(&zfcp_data.config_sema);                                           \
-       return count;                                                         \
-}                                                                             \
-                                                                              \
-static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev,  \
-                                                 char *buf)                  \
-{                                                                             \
-       return sprintf(buf,"%d\n", (unsigned int)                             \
-                      ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define));          \
-}                                                                             \
-                                                                              \
-static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO,                       \
-                  zfcp_sysfs_loglevel_##_name##_show,                        \
-                  zfcp_sysfs_loglevel_##_name##_store);
-
-ZFCP_DEFINE_DRIVER_ATTR(other, OTHER);
-ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI);
-ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF);
-ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG);
-ZFCP_DEFINE_DRIVER_ATTR(cio, CIO);
-ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO);
-ZFCP_DEFINE_DRIVER_ATTR(erp, ERP);
-ZFCP_DEFINE_DRIVER_ATTR(fc, FC);
-
-static ssize_t zfcp_sysfs_version_show(struct device_driver *dev,
-                                             char *buf)
-{
-       return sprintf(buf, "%s\n", zfcp_data.driver_version);
-}
-
-static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL);
-
-static struct attribute *zfcp_driver_attrs[] = {
-       &driver_attr_loglevel_other.attr,
-       &driver_attr_loglevel_scsi.attr,
-       &driver_attr_loglevel_fsf.attr,
-       &driver_attr_loglevel_config.attr,
-       &driver_attr_loglevel_cio.attr,
-       &driver_attr_loglevel_qdio.attr,
-       &driver_attr_loglevel_erp.attr,
-       &driver_attr_loglevel_fc.attr,
-       &driver_attr_version.attr,
-       NULL
-};
-
-static struct attribute_group zfcp_driver_attr_group = {
-       .attrs = zfcp_driver_attrs,
-};
-
-struct attribute_group *zfcp_driver_attr_groups[] = {
-       &zfcp_driver_attr_group,
-       NULL,
-};
-
-#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c
deleted file mode 100644 (file)
index 703c1b5..0000000
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
- *
- * (C) Copyright IBM Corp. 2002, 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "zfcp_ext.h"
-
-#define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_CONFIG
-
-/**
- * zfcp_sysfs_port_release - gets called when a struct device port is released
- * @dev: pointer to belonging device
- */
-void
-zfcp_sysfs_port_release(struct device *dev)
-{
-       kfree(dev);
-}
-
-/**
- * ZFCP_DEFINE_PORT_ATTR
- * @_name:   name of show attribute
- * @_format: format string
- * @_value:  value to print
- *
- * Generates attributes for a port.
- */
-#define ZFCP_DEFINE_PORT_ATTR(_name, _format, _value)                    \
-static ssize_t zfcp_sysfs_port_##_name##_show(struct device *dev, struct device_attribute *attr,        \
-                                              char *buf)                 \
-{                                                                        \
-        struct zfcp_port *port;                                          \
-                                                                         \
-        port = dev_get_drvdata(dev);                                     \
-        return sprintf(buf, _format, _value);                            \
-}                                                                        \
-                                                                         \
-static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL);
-
-ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
-ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
-                     (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
-ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
-                     (ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status));
-
-/**
- * zfcp_sysfs_unit_add_store - add a unit to sysfs tree
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- * @count: number of bytes in buffer
- *
- * Store function of the "unit_add" attribute of a port.
- */
-static ssize_t
-zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
-       fcp_lun_t fcp_lun;
-       char *endp;
-       struct zfcp_port *port;
-       struct zfcp_unit *unit;
-       int retval = -EINVAL;
-
-       down(&zfcp_data.config_sema);
-
-       port = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       fcp_lun = simple_strtoull(buf, &endp, 0);
-       if ((endp + 1) < (buf + count))
-               goto out;
-
-       unit = zfcp_unit_enqueue(port, fcp_lun);
-       if (!unit)
-               goto out;
-
-       retval = 0;
-
-       zfcp_erp_unit_reopen(unit, 0, 94, NULL);
-       zfcp_erp_wait(unit->port->adapter);
-       zfcp_unit_put(unit);
- out:
-       up(&zfcp_data.config_sema);
-       return retval ? retval : (ssize_t) count;
-}
-
-static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
-
-/**
- * zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- * @count: number of bytes in buffer
- */
-static ssize_t
-zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
-       struct zfcp_port *port;
-       struct zfcp_unit *unit;
-       fcp_lun_t fcp_lun;
-       char *endp;
-       int retval = 0;
-
-       down(&zfcp_data.config_sema);
-
-       port = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       fcp_lun = simple_strtoull(buf, &endp, 0);
-       if ((endp + 1) < (buf + count)) {
-               retval = -EINVAL;
-               goto out;
-       }
-
-       write_lock_irq(&zfcp_data.config_lock);
-       unit = zfcp_get_unit_by_lun(port, fcp_lun);
-       if (unit && (atomic_read(&unit->refcount) == 0)) {
-               zfcp_unit_get(unit);
-               atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
-               list_move(&unit->list, &port->unit_remove_lh);
-       }
-       else {
-               unit = NULL;
-       }
-       write_unlock_irq(&zfcp_data.config_lock);
-
-       if (!unit) {
-               retval = -ENXIO;
-               goto out;
-       }
-
-       zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
-       zfcp_erp_wait(unit->port->adapter);
-       zfcp_unit_put(unit);
-       zfcp_unit_dequeue(unit);
- out:
-       up(&zfcp_data.config_sema);
-       return retval ? retval : (ssize_t) count;
-}
-
-static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
-
-/**
- * zfcp_sysfs_port_failed_store - failed state of port
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- * @count: number of bytes in buffer
- *
- * Store function of the "failed" attribute of a port.
- * If a "0" gets written to "failed", error recovery will be
- * started for the belonging port.
- */
-static ssize_t
-zfcp_sysfs_port_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
-       struct zfcp_port *port;
-       unsigned int val;
-       char *endp;
-       int retval = 0;
-
-       down(&zfcp_data.config_sema);
-
-       port = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       val = simple_strtoul(buf, &endp, 0);
-       if (((endp + 1) < (buf + count)) || (val != 0)) {
-               retval = -EINVAL;
-               goto out;
-       }
-
-       zfcp_erp_modify_port_status(port, 45, NULL,
-                                   ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
-       zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 96, NULL);
-       zfcp_erp_wait(port->adapter);
- out:
-       up(&zfcp_data.config_sema);
-       return retval ? retval : (ssize_t) count;
-}
-
-/**
- * zfcp_sysfs_port_failed_show - failed state of port
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- *
- * Show function of "failed" attribute of port. Will be
- * "0" if port is working, otherwise "1".
- */
-static ssize_t
-zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct zfcp_port *port;
-
-       port = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status))
-               return sprintf(buf, "1\n");
-       else
-               return sprintf(buf, "0\n");
-}
-
-static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show,
-                  zfcp_sysfs_port_failed_store);
-
-/**
- * zfcp_port_common_attrs
- * sysfs attributes that are common for all kind of fc ports.
- */
-static struct attribute *zfcp_port_common_attrs[] = {
-       &dev_attr_failed.attr,
-       &dev_attr_in_recovery.attr,
-       &dev_attr_status.attr,
-       &dev_attr_access_denied.attr,
-       NULL
-};
-
-static struct attribute_group zfcp_port_common_attr_group = {
-       .attrs = zfcp_port_common_attrs,
-};
-
-/**
- * zfcp_port_no_ns_attrs
- * sysfs attributes not to be used for nameserver ports.
- */
-static struct attribute *zfcp_port_no_ns_attrs[] = {
-       &dev_attr_unit_add.attr,
-       &dev_attr_unit_remove.attr,
-       NULL
-};
-
-static struct attribute_group zfcp_port_no_ns_attr_group = {
-       .attrs = zfcp_port_no_ns_attrs,
-};
-
-/**
- * zfcp_sysfs_port_create_files - create sysfs port files
- * @dev: pointer to belonging device
- *
- * Create all attributes of the sysfs representation of a port.
- */
-int
-zfcp_sysfs_port_create_files(struct device *dev, u32 flags)
-{
-       int retval;
-
-       retval = sysfs_create_group(&dev->kobj, &zfcp_port_common_attr_group);
-
-       if ((flags & ZFCP_STATUS_PORT_WKA) || retval)
-               return retval;
-
-       retval = sysfs_create_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
-       if (retval)
-               sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
-
-       return retval;
-}
-
-/**
- * zfcp_sysfs_port_remove_files - remove sysfs port files
- * @dev: pointer to belonging device
- *
- * Remove all attributes of the sysfs representation of a port.
- */
-void
-zfcp_sysfs_port_remove_files(struct device *dev, u32 flags)
-{
-       sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
-       if (!(flags & ZFCP_STATUS_PORT_WKA))
-               sysfs_remove_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
-}
-
-#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c
deleted file mode 100644 (file)
index 80fb2c2..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
- *
- * (C) Copyright IBM Corp. 2002, 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "zfcp_ext.h"
-
-#define ZFCP_LOG_AREA                   ZFCP_LOG_AREA_CONFIG
-
-/**
- * zfcp_sysfs_unit_release - gets called when a struct device unit is released
- * @dev: pointer to belonging device
- */
-void
-zfcp_sysfs_unit_release(struct device *dev)
-{
-       kfree(dev);
-}
-
-/**
- * ZFCP_DEFINE_UNIT_ATTR
- * @_name:   name of show attribute
- * @_format: format string
- * @_value:  value to print
- *
- * Generates attribute for a unit.
- */
-#define ZFCP_DEFINE_UNIT_ATTR(_name, _format, _value)                    \
-static ssize_t zfcp_sysfs_unit_##_name##_show(struct device *dev, struct device_attribute *attr,        \
-                                              char *buf)                 \
-{                                                                        \
-        struct zfcp_unit *unit;                                          \
-                                                                         \
-        unit = dev_get_drvdata(dev);                                     \
-        return sprintf(buf, _format, _value);                            \
-}                                                                        \
-                                                                         \
-static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_unit_##_name##_show, NULL);
-
-ZFCP_DEFINE_UNIT_ATTR(status, "0x%08x\n", atomic_read(&unit->status));
-ZFCP_DEFINE_UNIT_ATTR(in_recovery, "%d\n", atomic_test_mask
-                     (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status));
-ZFCP_DEFINE_UNIT_ATTR(access_denied, "%d\n", atomic_test_mask
-                     (ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status));
-ZFCP_DEFINE_UNIT_ATTR(access_shared, "%d\n", atomic_test_mask
-                     (ZFCP_STATUS_UNIT_SHARED, &unit->status));
-ZFCP_DEFINE_UNIT_ATTR(access_readonly, "%d\n", atomic_test_mask
-                     (ZFCP_STATUS_UNIT_READONLY, &unit->status));
-
-/**
- * zfcp_sysfs_unit_failed_store - failed state of unit
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- * @count: number of bytes in buffer
- *
- * Store function of the "failed" attribute of a unit.
- * If a "0" gets written to "failed", error recovery will be
- * started for the belonging unit.
- */
-static ssize_t
-zfcp_sysfs_unit_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
-       struct zfcp_unit *unit;
-       unsigned int val;
-       char *endp;
-       int retval = 0;
-
-       down(&zfcp_data.config_sema);
-       unit = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       val = simple_strtoul(buf, &endp, 0);
-       if (((endp + 1) < (buf + count)) || (val != 0)) {
-               retval = -EINVAL;
-               goto out;
-       }
-
-       zfcp_erp_modify_unit_status(unit, 46, NULL,
-                                   ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
-       zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, 97, NULL);
-       zfcp_erp_wait(unit->port->adapter);
- out:
-       up(&zfcp_data.config_sema);
-       return retval ? retval : (ssize_t) count;
-}
-
-/**
- * zfcp_sysfs_unit_failed_show - failed state of unit
- * @dev: pointer to belonging device
- * @buf: pointer to input buffer
- *
- * Show function of "failed" attribute of unit. Will be
- * "0" if unit is working, otherwise "1".
- */
-static ssize_t
-zfcp_sysfs_unit_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct zfcp_unit *unit;
-
-       unit = dev_get_drvdata(dev);
-       if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status))
-               return sprintf(buf, "1\n");
-       else
-               return sprintf(buf, "0\n");
-}
-
-static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show,
-                  zfcp_sysfs_unit_failed_store);
-
-static struct attribute *zfcp_unit_attrs[] = {
-       &dev_attr_failed.attr,
-       &dev_attr_in_recovery.attr,
-       &dev_attr_status.attr,
-       &dev_attr_access_denied.attr,
-       &dev_attr_access_shared.attr,
-       &dev_attr_access_readonly.attr,
-       NULL
-};
-
-static struct attribute_group zfcp_unit_attr_group = {
-       .attrs = zfcp_unit_attrs,
-};
-
-/**
- * zfcp_sysfs_create_unit_files - create sysfs unit files
- * @dev: pointer to belonging device
- *
- * Create all attributes of the sysfs representation of a unit.
- */
-int
-zfcp_sysfs_unit_create_files(struct device *dev)
-{
-       return sysfs_create_group(&dev->kobj, &zfcp_unit_attr_group);
-}
-
-/**
- * zfcp_sysfs_remove_unit_files - remove sysfs unit files
- * @dev: pointer to belonging device
- *
- * Remove all attributes of the sysfs representation of a unit.
- */
-void
-zfcp_sysfs_unit_remove_files(struct device *dev)
-{
-       sysfs_remove_group(&dev->kobj, &zfcp_unit_attr_group);
-}
-
-#undef ZFCP_LOG_AREA
index 81ccbd7..26be540 100644 (file)
@@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS
          To compile this driver as a module, choose M here: the
          module will be called ibmvstgt.
 
+config SCSI_IBMVFC
+       tristate "IBM Virtual FC support"
+       depends on PPC_PSERIES && SCSI
+       select SCSI_FC_ATTRS
+       help
+         This is the IBM POWER Virtual FC Client
+
+         To compile this driver as a module, choose M here: the
+         module will be called ibmvfc.
+
+config SCSI_IBMVFC_TRACE
+       bool "enable driver internal trace"
+       depends on SCSI_IBMVFC
+       default y
+       help
+         If you say Y here, the driver will trace all commands issued
+         to the adapter. Performance impact is minimal. Trace can be
+         dumped using /sys/class/scsi_host/hostXX/trace.
+
 config SCSI_INITIO
        tristate "Initio 9100U(W) support"
        depends on PCI && SCSI
@@ -1738,10 +1757,12 @@ config SCSI_SUNESP
        select SCSI_SPI_ATTRS
        help
          This is the driver for the Sun ESP SCSI host adapter. The ESP
-         chipset is present in most SPARC SBUS-based computers.
+         chipset is present in most SPARC SBUS-based computers and
+         supports the Emulex family of ESP SCSI chips (esp100, esp100A,
+         esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
 
          To compile this driver as a module, choose M here: the
-         module will be called esp.
+         module will be called sun_esp.
 
 config ZFCP
        tristate "FCP host bus adapter driver for IBM eServer zSeries"
@@ -1771,4 +1792,6 @@ endif # SCSI_LOWLEVEL
 
 source "drivers/scsi/pcmcia/Kconfig"
 
+source "drivers/scsi/device_handler/Kconfig"
+
 endmenu
index 6c775e3..a814967 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS)        += scsi_transport_iscsi.o
 obj-$(CONFIG_SCSI_SAS_ATTRS)   += scsi_transport_sas.o
 obj-$(CONFIG_SCSI_SAS_LIBSAS)  += libsas/
 obj-$(CONFIG_SCSI_SRP_ATTRS)   += scsi_transport_srp.o
+obj-$(CONFIG_SCSI_DH)          += device_handler/
 
 obj-$(CONFIG_ISCSI_TCP)        += libiscsi.o   iscsi_tcp.o
 obj-$(CONFIG_INFINIBAND_ISER)  += libiscsi.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR)              += ipr.o
 obj-$(CONFIG_SCSI_SRP)         += libsrp.o
 obj-$(CONFIG_SCSI_IBMVSCSI)    += ibmvscsi/
 obj-$(CONFIG_SCSI_IBMVSCSIS)   += ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVFC)      += ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)      += hptiop.o
 obj-$(CONFIG_SCSI_STEX)                += stex.o
 obj-$(CONFIG_SCSI_MVSAS)       += mvsas.o
index 5fd83de..a735526 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/kthread.h>
 #include <linux/semaphore.h>
 #include <asm/uaccess.h>
+#include <scsi/scsi_host.h>
 
 #include "aacraid.h"
 
@@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        for (i = 0; i < upsg->count; i++) {
                                u64 addr;
                                void* p;
+                               if (upsg->sg[i].count >
+                                   (dev->adapter_info.options &
+                                    AAC_OPT_NEW_COMM) ?
+                                     (dev->scsi_host_ptr->max_sectors << 9) :
+                                     65536) {
+                                       rcode = -EINVAL;
+                                       goto cleanup;
+                               }
                                /* Does this really need to be GFP_DMA? */
                                p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
                                if(!p) {
@@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        for (i = 0; i < usg->count; i++) {
                                u64 addr;
                                void* p;
+                               if (usg->sg[i].count >
+                                   (dev->adapter_info.options &
+                                    AAC_OPT_NEW_COMM) ?
+                                     (dev->scsi_host_ptr->max_sectors << 9) :
+                                     65536) {
+                                       rcode = -EINVAL;
+                                       goto cleanup;
+                               }
                                /* Does this really need to be GFP_DMA? */
                                p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
                                if(!p) {
@@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        for (i = 0; i < upsg->count; i++) {
                                uintptr_t addr;
                                void* p;
+                               if (usg->sg[i].count >
+                                   (dev->adapter_info.options &
+                                    AAC_OPT_NEW_COMM) ?
+                                     (dev->scsi_host_ptr->max_sectors << 9) :
+                                     65536) {
+                                       rcode = -EINVAL;
+                                       goto cleanup;
+                               }
                                /* Does this really need to be GFP_DMA? */
                                p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
                                if(!p) {
@@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        for (i = 0; i < upsg->count; i++) {
                                dma_addr_t addr;
                                void* p;
+                               if (upsg->sg[i].count >
+                                   (dev->adapter_info.options &
+                                    AAC_OPT_NEW_COMM) ?
+                                     (dev->scsi_host_ptr->max_sectors << 9) :
+                                     65536) {
+                                       rcode = -EINVAL;
+                                       goto cleanup;
+                               }
                                p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
                                if (!p) {
                                        dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
index 68c140e..9aa301c 100644 (file)
@@ -865,7 +865,7 @@ static ssize_t aac_show_bios_version(struct device *device,
        return len;
 }
 
-ssize_t aac_show_serial_number(struct device *device,
+static ssize_t aac_show_serial_number(struct device *device,
                               struct device_attribute *attr, char *buf)
 {
        struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
new file mode 100644 (file)
index 0000000..2adc0f6
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# SCSI Device Handler configuration
+#
+
+menuconfig SCSI_DH
+       tristate "SCSI Device Handlers"
+       depends on SCSI
+       default n
+       help
+         SCSI Device Handlers provide device specific support for
+         devices utilized in multipath configurations. Say Y here to
+         select support for specific hardware.
+
+config SCSI_DH_RDAC
+       tristate "LSI RDAC Device Handler"
+       depends on SCSI_DH
+       help
+       If you have a LSI RDAC select y. Otherwise, say N.
+
+config SCSI_DH_HP_SW
+       tristate "HP/COMPAQ MSA Device Handler"
+       depends on SCSI_DH
+       help
+       If you have a HP/COMPAQ MSA device that requires START_STOP to
+       be sent to start it and cannot upgrade the firmware then select y.
+       Otherwise, say N.
+
+config SCSI_DH_EMC
+       tristate "EMC CLARiiON Device Handler"
+       depends on SCSI_DH
+       help
+       If you have a EMC CLARiiON select y. Otherwise, say N.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
new file mode 100644 (file)
index 0000000..35272e9
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# SCSI Device Handler
+#
+obj-$(CONFIG_SCSI_DH)          += scsi_dh.o
+obj-$(CONFIG_SCSI_DH_RDAC)     += scsi_dh_rdac.o
+obj-$(CONFIG_SCSI_DH_HP_SW)    += scsi_dh_hp_sw.o
+obj-$(CONFIG_SCSI_DH_EMC)      += scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
new file mode 100644 (file)
index 0000000..ab6c21c
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * SCSI device handler infrastruture.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ *      Authors:
+ *               Chandra Seetharaman <sekharan@us.ibm.com>
+ *               Mike Anderson <andmike@linux.vnet.ibm.com>
+ */
+
+#include <scsi/scsi_dh.h>
+#include "../scsi_priv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(scsi_dh_list);
+
+static struct scsi_device_handler *get_device_handler(const char *name)
+{
+       struct scsi_device_handler *tmp, *found = NULL;
+
+       spin_lock(&list_lock);
+       list_for_each_entry(tmp, &scsi_dh_list, list) {
+               if (!strcmp(tmp->name, name)) {
+                       found = tmp;
+                       break;
+               }
+       }
+       spin_unlock(&list_lock);
+       return found;
+}
+
+static int scsi_dh_notifier_add(struct device *dev, void *data)
+{
+       struct scsi_device_handler *scsi_dh = data;
+
+       scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
+       return 0;
+}
+
+/*
+ * scsi_register_device_handler - register a device handler personality
+ *      module.
+ * @scsi_dh - device handler to be registered.
+ *
+ * Returns 0 on success, -EBUSY if handler already registered.
+ */
+int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
+{
+       int ret = -EBUSY;
+       struct scsi_device_handler *tmp;
+
+       tmp = get_device_handler(scsi_dh->name);
+       if (tmp)
+               goto done;
+
+       ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
+
+       bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
+       spin_lock(&list_lock);
+       list_add(&scsi_dh->list, &scsi_dh_list);
+       spin_unlock(&list_lock);
+
+done:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_register_device_handler);
+
+static int scsi_dh_notifier_remove(struct device *dev, void *data)
+{
+       struct scsi_device_handler *scsi_dh = data;
+
+       scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
+       return 0;
+}
+
+/*
+ * scsi_unregister_device_handler - register a device handler personality
+ *      module.
+ * @scsi_dh - device handler to be unregistered.
+ *
+ * Returns 0 on success, -ENODEV if handler not registered.
+ */
+int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
+{
+       int ret = -ENODEV;
+       struct scsi_device_handler *tmp;
+
+       tmp = get_device_handler(scsi_dh->name);
+       if (!tmp)
+               goto done;
+
+       ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
+
+       bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
+                                       scsi_dh_notifier_remove);
+       spin_lock(&list_lock);
+       list_del(&scsi_dh->list);
+       spin_unlock(&list_lock);
+
+done:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
+
+/*
+ * scsi_dh_activate - activate the path associated with the scsi_device
+ *      corresponding to the given request queue.
+ * @q - Request queue that is associated with the scsi_device to be
+ *      activated.
+ */
+int scsi_dh_activate(struct request_queue *q)
+{
+       int err = 0;
+       unsigned long flags;
+       struct scsi_device *sdev;
+       struct scsi_device_handler *scsi_dh = NULL;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       sdev = q->queuedata;
+       if (sdev && sdev->scsi_dh_data)
+               scsi_dh = sdev->scsi_dh_data->scsi_dh;
+       if (!scsi_dh || !get_device(&sdev->sdev_gendev))
+               err = SCSI_DH_NOSYS;
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       if (err)
+               return err;
+
+       if (scsi_dh->activate)
+               err = scsi_dh->activate(sdev);
+       put_device(&sdev->sdev_gendev);
+       return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_activate);
+
+/*
+ * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
+ *     the given name. FALSE(0) otherwise.
+ * @name - name of the device handler.
+ */
+int scsi_dh_handler_exist(const char *name)
+{
+       return (get_device_handler(name) != NULL);
+}
+EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
+
+MODULE_DESCRIPTION("SCSI device handler");
+MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
new file mode 100644 (file)
index 0000000..ed53f14
--- /dev/null
@@ -0,0 +1,499 @@
+/*
+ * Target driver for EMC CLARiiON AX/CX-series hardware.
+ * Based on code from Lars Marowsky-Bree <lmb@suse.de>
+ * and Ed Goggin <egoggin@emc.com>.
+ *
+ * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+#include <scsi/scsi_device.h>
+
+#define CLARIION_NAME                  "emc_clariion"
+
+#define CLARIION_TRESPASS_PAGE         0x22
+#define CLARIION_BUFFER_SIZE           0x80
+#define CLARIION_TIMEOUT               (60 * HZ)
+#define CLARIION_RETRIES               3
+#define CLARIION_UNBOUND_LU            -1
+
+static unsigned char long_trespass[] = {
+       0, 0, 0, 0,
+       CLARIION_TRESPASS_PAGE, /* Page code */
+       0x09,                   /* Page length - 2 */
+       0x81,                   /* Trespass code + Honor reservation bit */
+       0xff, 0xff,             /* Trespass target */
+       0, 0, 0, 0, 0, 0        /* Reserved bytes / unknown */
+};
+
+static unsigned char long_trespass_hr[] = {
+       0, 0, 0, 0,
+       CLARIION_TRESPASS_PAGE, /* Page code */
+       0x09,                   /* Page length - 2 */
+       0x01,                   /* Trespass code + Honor reservation bit */
+       0xff, 0xff,             /* Trespass target */
+       0, 0, 0, 0, 0, 0        /* Reserved bytes / unknown */
+};
+
+static unsigned char short_trespass[] = {
+       0, 0, 0, 0,
+       CLARIION_TRESPASS_PAGE, /* Page code */
+       0x02,                   /* Page length - 2 */
+       0x81,                   /* Trespass code + Honor reservation bit */
+       0xff,                   /* Trespass target */
+};
+
+static unsigned char short_trespass_hr[] = {
+       0, 0, 0, 0,
+       CLARIION_TRESPASS_PAGE, /* Page code */
+       0x02,                   /* Page length - 2 */
+       0x01,                   /* Trespass code + Honor reservation bit */
+       0xff,                   /* Trespass target */
+};
+
+struct clariion_dh_data {
+       /*
+        * Use short trespass command (FC-series) or the long version
+        * (default for AX/CX CLARiiON arrays).
+        */
+       unsigned short_trespass;
+       /*
+        * Whether or not (default) to honor SCSI reservations when
+        * initiating a switch-over.
+        */
+       unsigned hr;
+       /* I/O buffer for both MODE_SELECT and INQUIRY commands. */
+       char buffer[CLARIION_BUFFER_SIZE];
+       /*
+        * SCSI sense buffer for commands -- assumes serial issuance
+        * and completion sequence of all commands for same multipath.
+        */
+       unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+       /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
+       int default_sp;
+       /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
+       int current_sp;
+};
+
+static inline struct clariion_dh_data
+                       *get_clariion_data(struct scsi_device *sdev)
+{
+       struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+       BUG_ON(scsi_dh_data == NULL);
+       return ((struct clariion_dh_data *) scsi_dh_data->buf);
+}
+
+/*
+ * Parse MODE_SELECT cmd reply.
+ */
+static int trespass_endio(struct scsi_device *sdev, int result)
+{
+       int err = SCSI_DH_OK;
+       struct scsi_sense_hdr sshdr;
+       struct clariion_dh_data *csdev = get_clariion_data(sdev);
+       char *sense = csdev->sense;
+
+       if (status_byte(result) == CHECK_CONDITION &&
+           scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
+               sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
+                           "0x%2x, 0x%2x while sending CLARiiON trespass "
+                           "command.\n", sshdr.sense_key, sshdr.asc,
+                            sshdr.ascq);
+
+               if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
+                    (sshdr.ascq == 0x00)) {
+                       /*
+                        * Array based copy in progress -- do not send
+                        * mode_select or copy will be aborted mid-stream.
+                        */
+                       sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
+                                   "progress while sending CLARiiON trespass "
+                                   "command.\n");
+                       err = SCSI_DH_DEV_TEMP_BUSY;
+               } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
+                           (sshdr.ascq == 0x03)) {
+                       /*
+                        * LUN Not Ready - Manual Intervention Required
+                        * indicates in-progress ucode upgrade (NDU).
+                        */
+                       sdev_printk(KERN_INFO, sdev, "Detected in-progress "
+                                   "ucode upgrade NDU operation while sending "
+                                   "CLARiiON trespass command.\n");
+                       err = SCSI_DH_DEV_TEMP_BUSY;
+               } else
+                       err = SCSI_DH_DEV_FAILED;
+       } else if (result) {
+               sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
+                           "CLARiiON trespass command.\n", result);
+               err = SCSI_DH_IO;
+       }
+
+       return err;
+}
+
+static int parse_sp_info_reply(struct scsi_device *sdev, int result,
+               int *default_sp, int *current_sp, int *new_current_sp)
+{
+       int err = SCSI_DH_OK;
+       struct clariion_dh_data *csdev = get_clariion_data(sdev);
+
+       if (result == 0) {
+               /* check for in-progress ucode upgrade (NDU) */
+               if (csdev->buffer[48] != 0) {
+                       sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
+                              "ucode upgrade NDU operation while finding "
+                              "current active SP.");
+                       err = SCSI_DH_DEV_TEMP_BUSY;
+               } else {
+                       *default_sp = csdev->buffer[5];
+
+                       if (csdev->buffer[4] == 2)
+                               /* SP for path is current */
+                               *current_sp = csdev->buffer[8];
+                       else {
+                               if (csdev->buffer[4] == 1)
+                                       /* SP for this path is NOT current */
+                                       if (csdev->buffer[8] == 0)
+                                               *current_sp = 1;
+                                       else
+                                               *current_sp = 0;
+                               else
+                                       /* unbound LU or LUNZ */
+                                       *current_sp = CLARIION_UNBOUND_LU;
+                       }
+                       *new_current_sp =  csdev->buffer[8];
+               }
+       } else {
+               struct scsi_sense_hdr sshdr;
+
+               err = SCSI_DH_IO;
+
+               if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+                                                          &sshdr))
+                       sdev_printk(KERN_ERR, sdev, "Found valid sense data "
+                             "0x%2x, 0x%2x, 0x%2x while finding current "
+                             "active SP.", sshdr.sense_key, sshdr.asc,
+                             sshdr.ascq);
+               else
+                       sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
+                             "current active SP.", result);
+       }
+
+       return err;
+}
+
+static int sp_info_endio(struct scsi_device *sdev, int result,
+                                       int mode_select_sent, int *done)
+{
+       struct clariion_dh_data *csdev = get_clariion_data(sdev);
+       int err_flags, default_sp, current_sp, new_current_sp;
+
+       err_flags = parse_sp_info_reply(sdev, result, &default_sp,
+                                            &current_sp, &new_current_sp);
+
+       if (err_flags != SCSI_DH_OK)
+               goto done;
+
+       if (mode_select_sent) {
+               csdev->default_sp = default_sp;
+               csdev->current_sp = current_sp;
+       } else {
+               /*
+                * Issue the actual module_selec request IFF either
+                * (1) we do not know the identity of the current SP OR
+                * (2) what we think we know is actually correct.
+                */
+               if ((current_sp != CLARIION_UNBOUND_LU) &&
+                   (new_current_sp != current_sp)) {
+
+                       csdev->default_sp = default_sp;
+                       csdev->current_sp = current_sp;
+
+                       sdev_printk(KERN_INFO, sdev, "Ignoring path group "
+                              "switch-over command for CLARiiON SP%s since "
+                              " mapped device is already initialized.",
+                              current_sp ? "B" : "A");
+                       if (done)
+                               *done = 1; /* as good as doing it */
+               }
+       }
+done:
+       return err_flags;
+}
+
+/*
+* Get block request for REQ_BLOCK_PC command issued to path.  Currently
+* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
+*
+* Uses data and sense buffers in hardware handler context structure and
+* assumes serial servicing of commands, both issuance and completion.
+*/
+static struct request *get_req(struct scsi_device *sdev, int cmd)
+{
+       struct clariion_dh_data *csdev = get_clariion_data(sdev);
+       struct request *rq;
+       unsigned char *page22;
+       int len = 0;
+
+       rq = blk_get_request(sdev->request_queue,
+                       (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
+       if (!rq) {
+               sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
+               return NULL;
+       }
+
+       memset(&rq->cmd, 0, BLK_MAX_CDB);
+       rq->cmd[0] = cmd;
+       rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+       switch (cmd) {
+       case MODE_SELECT:
+               if (csdev->short_trespass) {
+                       page22 = csdev->hr ? short_trespass_hr : short_trespass;
+                       len = sizeof(short_trespass);
+               } else {
+                       page22 = csdev->hr ? long_trespass_hr : long_trespass;
+                       len = sizeof(long_trespass);
+               }
+               /*
+                * Can't DMA from kernel BSS -- must copy selected trespass
+                * command mode page contents to context buffer which is
+                * allocated by kmalloc.
+                */
+               BUG_ON((len > CLARIION_BUFFER_SIZE));
+               memcpy(csdev->buffer, page22, len);
+               rq->cmd_flags |= REQ_RW;
+               rq->cmd[1] = 0x10;
+               break;
+       case INQUIRY:
+               rq->cmd[1] = 0x1;
+               rq->cmd[2] = 0xC0;
+               len = CLARIION_BUFFER_SIZE;
+               memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
+               break;
+       default:
+               BUG_ON(1);
+               break;
+       }
+
+       rq->cmd[4] = len;
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->cmd_flags |= REQ_FAILFAST;
+       rq->timeout = CLARIION_TIMEOUT;
+       rq->retries = CLARIION_RETRIES;
+
+       rq->sense = csdev->sense;
+       memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+       rq->sense_len = 0;
+
+       if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
+                                                       len, GFP_ATOMIC)) {
+               __blk_put_request(rq->q, rq);
+               return NULL;
+       }
+
+       return rq;
+}
+
+static int send_cmd(struct scsi_device *sdev, int cmd)
+{
+       struct request *rq = get_req(sdev, cmd);
+
+       if (!rq)
+               return SCSI_DH_RES_TEMP_UNAVAIL;
+
+       return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+}
+
+static int clariion_activate(struct scsi_device *sdev)
+{
+       int result, done = 0;
+
+       result = send_cmd(sdev, INQUIRY);
+       result = sp_info_endio(sdev, result, 0, &done);
+       if (result || done)
+               goto done;
+
+       result = send_cmd(sdev, MODE_SELECT);
+       result = trespass_endio(sdev, result);
+       if (result)
+               goto done;
+
+       result = send_cmd(sdev, INQUIRY);
+       result = sp_info_endio(sdev, result, 1, NULL);
+done:
+       return result;
+}
+
+static int clariion_check_sense(struct scsi_device *sdev,
+                               struct scsi_sense_hdr *sense_hdr)
+{
+       switch (sense_hdr->sense_key) {
+       case NOT_READY:
+               if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
+                       /*
+                        * LUN Not Ready - Manual Intervention Required
+                        * indicates this is a passive path.
+                        *
+                        * FIXME: However, if this is seen and EVPD C0
+                        * indicates that this is due to a NDU in
+                        * progress, we should set FAIL_PATH too.
+                        * This indicates we might have to do a SCSI
+                        * inquiry in the end_io path. Ugh.
+                        *
+                        * Can return FAILED only when we want the error
+                        * recovery process to kick in.
+                        */
+                       return SUCCESS;
+               break;
+       case ILLEGAL_REQUEST:
+               if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
+                       /*
+                        * An array based copy is in progress. Do not
+                        * fail the path, do not bypass to another PG,
+                        * do not retry. Fail the IO immediately.
+                        * (Actually this is the same conclusion as in
+                        * the default handler, but lets make sure.)
+                        *
+                        * Can return FAILED only when we want the error
+                        * recovery process to kick in.
+                        */
+                       return SUCCESS;
+               break;
+       case UNIT_ATTENTION:
+               if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+                       /*
+                        * Unit Attention Code. This is the first IO
+                        * to the new path, so just retry.
+                        */
+                       return NEEDS_RETRY;
+               break;
+       }
+
+       /* success just means we do not care what scsi-ml does */
+       return SUCCESS;
+}
+
+static const struct {
+       char *vendor;
+       char *model;
+} clariion_dev_list[] = {
+       {"DGC", "RAID"},
+       {"DGC", "DISK"},
+       {NULL, NULL},
+};
+
+static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler clariion_dh = {
+       .name           = CLARIION_NAME,
+       .module         = THIS_MODULE,
+       .nb.notifier_call = clariion_bus_notify,
+       .check_sense    = clariion_check_sense,
+       .activate       = clariion_activate,
+};
+
+/*
+ * TODO: need some interface so we can set trespass values
+ */
+static int clariion_bus_notify(struct notifier_block *nb,
+                               unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct scsi_dh_data *scsi_dh_data;
+       struct clariion_dh_data *h;
+       int i, found = 0;
+       unsigned long flags;
+
+       if (action == BUS_NOTIFY_ADD_DEVICE) {
+               for (i = 0; clariion_dev_list[i].vendor; i++) {
+                       if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+                                    strlen(clariion_dev_list[i].vendor)) &&
+                           !strncmp(sdev->model, clariion_dev_list[i].model,
+                                    strlen(clariion_dev_list[i].model))) {
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found)
+                       goto out;
+
+               scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+                               + sizeof(*h) , GFP_KERNEL);
+               if (!scsi_dh_data) {
+                       sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
+                                   CLARIION_NAME);
+                       goto out;
+               }
+
+               scsi_dh_data->scsi_dh = &clariion_dh;
+               h = (struct clariion_dh_data *) scsi_dh_data->buf;
+               h->default_sp = CLARIION_UNBOUND_LU;
+               h->current_sp = CLARIION_UNBOUND_LU;
+
+               spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+               sdev->scsi_dh_data = scsi_dh_data;
+               spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+               sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
+               try_module_get(THIS_MODULE);
+
+       } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+               if (sdev->scsi_dh_data == NULL ||
+                               sdev->scsi_dh_data->scsi_dh != &clariion_dh)
+                       goto out;
+
+               spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+               scsi_dh_data = sdev->scsi_dh_data;
+               sdev->scsi_dh_data = NULL;
+               spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+               sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
+                           CLARIION_NAME);
+
+               kfree(scsi_dh_data);
+               module_put(THIS_MODULE);
+       }
+
+out:
+       return 0;
+}
+
+static int __init clariion_init(void)
+{
+       int r;
+
+       r = scsi_register_device_handler(&clariion_dh);
+       if (r != 0)
+               printk(KERN_ERR "Failed to register scsi device handler.");
+       return r;
+}
+
+static void __exit clariion_exit(void)
+{
+       scsi_unregister_device_handler(&clariion_dh);
+}
+
+module_init(clariion_init);
+module_exit(clariion_exit);
+
+MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
new file mode 100644 (file)
index 0000000..12ceab7
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
+ * upgraded.
+ *
+ * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define HP_SW_NAME     "hp_sw"
+
+#define HP_SW_TIMEOUT (60 * HZ)
+#define HP_SW_RETRIES 3
+
+struct hp_sw_dh_data {
+       unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+       int retries;
+};
+
+static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
+{
+       struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+       BUG_ON(scsi_dh_data == NULL);
+       return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
+}
+
+static int hp_sw_done(struct scsi_device *sdev)
+{
+       struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+       struct scsi_sense_hdr sshdr;
+       int rc;
+
+       sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
+
+       rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+       if (!rc)
+               goto done;
+       switch (sshdr.sense_key) {
+       case NOT_READY:
+               if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
+                       rc = SCSI_DH_RETRY;
+                       h->retries++;
+                       break;
+               }
+               /* fall through */
+       default:
+               h->retries++;
+               rc = SCSI_DH_IMM_RETRY;
+       }
+
+done:
+       if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
+               h->retries = 0;
+       else if (h->retries > HP_SW_RETRIES) {
+               h->retries = 0;
+               rc = SCSI_DH_IO;
+       }
+       return rc;
+}
+
+static int hp_sw_activate(struct scsi_device *sdev)
+{
+       struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+       struct request *req;
+       int ret = SCSI_DH_RES_TEMP_UNAVAIL;
+
+       req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
+       if (!req)
+               goto done;
+
+       sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
+
+       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       req->cmd_flags |= REQ_FAILFAST;
+       req->cmd_len = COMMAND_SIZE(START_STOP);
+       memset(req->cmd, 0, MAX_COMMAND_SIZE);
+       req->cmd[0] = START_STOP;
+       req->cmd[4] = 1;        /* Start spin cycle */
+       req->timeout = HP_SW_TIMEOUT;
+       req->sense = h->sense;
+       memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
+       req->sense_len = 0;
+
+       ret = blk_execute_rq(req->q, NULL, req, 1);
+       if (!ret) /* SUCCESS */
+               ret = hp_sw_done(sdev);
+       else
+               ret = SCSI_DH_IO;
+done:
+       return ret;
+}
+
+static const struct {
+       char *vendor;
+       char *model;
+} hp_sw_dh_data_list[] = {
+       {"COMPAQ", "MSA"},
+       {"HP", "HSV"},
+       {"DEC", "HSG80"},
+       {NULL, NULL},
+};
+
+static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler hp_sw_dh = {
+       .name           = HP_SW_NAME,
+       .module         = THIS_MODULE,
+       .nb.notifier_call = hp_sw_bus_notify,
+       .activate       = hp_sw_activate,
+};
+
+static int hp_sw_bus_notify(struct notifier_block *nb,
+                           unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct scsi_dh_data *scsi_dh_data;
+       int i, found = 0;
+       unsigned long flags;
+
+       if (action == BUS_NOTIFY_ADD_DEVICE) {
+               for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+                       if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+                                    strlen(hp_sw_dh_data_list[i].vendor)) &&
+                           !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+                                    strlen(hp_sw_dh_data_list[i].model))) {
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found)
+                       goto out;
+
+               scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+                               + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+               if (!scsi_dh_data) {
+                       sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
+                                   HP_SW_NAME);
+                       goto out;
+               }
+
+               scsi_dh_data->scsi_dh = &hp_sw_dh;
+               spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+               sdev->scsi_dh_data = scsi_dh_data;
+               spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+               try_module_get(THIS_MODULE);
+
+               sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
+       } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+               if (sdev->scsi_dh_data == NULL ||
+                               sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
+                       goto out;
+
+               spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+               scsi_dh_data = sdev->scsi_dh_data;
+               sdev->scsi_dh_data = NULL;
+               spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+               module_put(THIS_MODULE);
+
+               sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
+
+               kfree(scsi_dh_data);
+       }
+
+out:
+       return 0;
+}
+
+static int __init hp_sw_init(void)
+{
+       return scsi_register_device_handler(&hp_sw_dh);
+}
+
+static void __exit hp_sw_exit(void)
+{
+       scsi_unregister_device_handler(&hp_sw_dh);
+}
+
+module_init(hp_sw_init);
+module_exit(hp_sw_exit);
+
+MODULE_DESCRIPTION("HP MSA 1000");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
new file mode 100644 (file)
index 0000000..6fff077
--- /dev/null
@@ -0,0 +1,691 @@
+/*
+ * Engenio/LSI RDAC SCSI Device Handler
+ *
+ * Copyright (C) 2005 Mike Christie. All rights reserved.
+ * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define RDAC_NAME "rdac"
+
+/*
+ * LSI mode page stuff
+ *
+ * These struct definitions and the forming of the
+ * mode page were taken from the LSI RDAC 2.4 GPL'd
+ * driver, and then converted to Linux conventions.
+ */
+#define RDAC_QUIESCENCE_TIME 20;
+/*
+ * Page Codes
+ */
+#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
+
+/*
+ * Controller modes definitions
+ */
+#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS      0x02
+
+/*
+ * RDAC Options field
+ */
+#define RDAC_FORCED_QUIESENCE 0x02
+
+#define RDAC_TIMEOUT   (60 * HZ)
+#define RDAC_RETRIES   3
+
+struct rdac_mode_6_hdr {
+       u8      data_len;
+       u8      medium_type;
+       u8      device_params;
+       u8      block_desc_len;
+};
+
+struct rdac_mode_10_hdr {
+       u16     data_len;
+       u8      medium_type;
+       u8      device_params;
+       u16     reserved;
+       u16     block_desc_len;
+};
+
+struct rdac_mode_common {
+       u8      controller_serial[16];
+       u8      alt_controller_serial[16];
+       u8      rdac_mode[2];
+       u8      alt_rdac_mode[2];
+       u8      quiescence_timeout;
+       u8      rdac_options;
+};
+
+struct rdac_pg_legacy {
+       struct rdac_mode_6_hdr hdr;
+       u8      page_code;
+       u8      page_len;
+       struct rdac_mode_common common;
+#define MODE6_MAX_LUN  32
+       u8      lun_table[MODE6_MAX_LUN];
+       u8      reserved2[32];
+       u8      reserved3;
+       u8      reserved4;
+};
+
+struct rdac_pg_expanded {
+       struct rdac_mode_10_hdr hdr;
+       u8      page_code;
+       u8      subpage_code;
+       u8      page_len[2];
+       struct rdac_mode_common common;
+       u8      lun_table[256];
+       u8      reserved3;
+       u8      reserved4;
+};
+
+struct c9_inquiry {
+       u8      peripheral_info;
+       u8      page_code;      /* 0xC9 */
+       u8      reserved1;
+       u8      page_len;
+       u8      page_id[4];     /* "vace" */
+       u8      avte_cvp;
+       u8      path_prio;
+       u8      reserved2[38];
+};
+
+#define SUBSYS_ID_LEN  16
+#define SLOT_ID_LEN    2
+
+struct c4_inquiry {
+       u8      peripheral_info;
+       u8      page_code;      /* 0xC4 */
+       u8      reserved1;
+       u8      page_len;
+       u8      page_id[4];     /* "subs" */
+       u8      subsys_id[SUBSYS_ID_LEN];
+       u8      revision[4];
+       u8      slot_id[SLOT_ID_LEN];
+       u8      reserved[2];
+};
+
+struct rdac_controller {
+       u8                      subsys_id[SUBSYS_ID_LEN];
+       u8                      slot_id[SLOT_ID_LEN];
+       int                     use_ms10;
+       struct kref             kref;
+       struct list_head        node; /* list of all controllers */
+       union                   {
+               struct rdac_pg_legacy legacy;
+               struct rdac_pg_expanded expanded;
+       } mode_select;
+};
+struct c8_inquiry {
+       u8      peripheral_info;
+       u8      page_code; /* 0xC8 */
+       u8      reserved1;
+       u8      page_len;
+       u8      page_id[4]; /* "edid" */
+       u8      reserved2[3];
+       u8      vol_uniq_id_len;
+       u8      vol_uniq_id[16];
+       u8      vol_user_label_len;
+       u8      vol_user_label[60];
+       u8      array_uniq_id_len;
+       u8      array_unique_id[16];
+       u8      array_user_label_len;
+       u8      array_user_label[60];
+       u8      lun[8];
+};
+
+struct c2_inquiry {
+       u8      peripheral_info;
+       u8      page_code;      /* 0xC2 */
+       u8      reserved1;
+       u8      page_len;
+       u8      page_id[4];     /* "swr4" */
+       u8      sw_version[3];
+       u8      sw_date[3];
+       u8      features_enabled;
+       u8      max_lun_supported;
+       u8      partitions[239]; /* Total allocation length should be 0xFF */
+};
+
+struct rdac_dh_data {
+       struct rdac_controller  *ctlr;
+#define UNINITIALIZED_LUN      (1 << 8)
+       unsigned                lun;
+#define RDAC_STATE_ACTIVE      0
+#define RDAC_STATE_PASSIVE     1
+       unsigned char           state;
+       unsigned char           sense[SCSI_SENSE_BUFFERSIZE];
+       union                   {
+               struct c2_inquiry c2;
+               struct c4_inquiry c4;
+               struct c8_inquiry c8;
+               struct c9_inquiry c9;
+       } inq;
+};
+
+static LIST_HEAD(ctlr_list);
+static DEFINE_SPINLOCK(list_lock);
+
+static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
+{
+       struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+       BUG_ON(scsi_dh_data == NULL);
+       return ((struct rdac_dh_data *) scsi_dh_data->buf);
+}
+
+static struct request *get_rdac_req(struct scsi_device *sdev,
+                       void *buffer, unsigned buflen, int rw)
+{
+       struct request *rq;
+       struct request_queue *q = sdev->request_queue;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+
+       rq = blk_get_request(q, rw, GFP_KERNEL);
+
+       if (!rq) {
+               sdev_printk(KERN_INFO, sdev,
+                               "get_rdac_req: blk_get_request failed.\n");
+               return NULL;
+       }
+
+       if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
+               blk_put_request(rq);
+               sdev_printk(KERN_INFO, sdev,
+                               "get_rdac_req: blk_rq_map_kern failed.\n");
+               return NULL;
+       }
+
+       memset(&rq->cmd, 0, BLK_MAX_CDB);
+       rq->sense = h->sense;
+       memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+       rq->sense_len = 0;
+
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+       rq->retries = RDAC_RETRIES;
+       rq->timeout = RDAC_TIMEOUT;
+
+       return rq;
+}
+
+static struct request *rdac_failover_get(struct scsi_device *sdev)
+{
+       struct request *rq;
+       struct rdac_mode_common *common;
+       unsigned data_size;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+
+       if (h->ctlr->use_ms10) {
+               struct rdac_pg_expanded *rdac_pg;
+
+               data_size = sizeof(struct rdac_pg_expanded);
+               rdac_pg = &h->ctlr->mode_select.expanded;
+               memset(rdac_pg, 0, data_size);
+               common = &rdac_pg->common;
+               rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
+               rdac_pg->subpage_code = 0x1;
+               rdac_pg->page_len[0] = 0x01;
+               rdac_pg->page_len[1] = 0x28;
+               rdac_pg->lun_table[h->lun] = 0x81;
+       } else {
+               struct rdac_pg_legacy *rdac_pg;
+
+               data_size = sizeof(struct rdac_pg_legacy);
+               rdac_pg = &h->ctlr->mode_select.legacy;
+               memset(rdac_pg, 0, data_size);
+               common = &rdac_pg->common;
+               rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
+               rdac_pg->page_len = 0x68;
+               rdac_pg->lun_table[h->lun] = 0x81;
+       }
+       common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
+       common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
+       common->rdac_options = RDAC_FORCED_QUIESENCE;
+
+       /* get request for block layer packet command */
+       rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
+       if (!rq)
+               return NULL;
+
+       /* Prepare the command. */
+       if (h->ctlr->use_ms10) {
+               rq->cmd[0] = MODE_SELECT_10;
+               rq->cmd[7] = data_size >> 8;
+               rq->cmd[8] = data_size & 0xff;
+       } else {
+               rq->cmd[0] = MODE_SELECT;
+               rq->cmd[4] = data_size;
+       }
+       rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+       return rq;
+}
+
+static void release_controller(struct kref *kref)
+{
+       struct rdac_controller *ctlr;
+       ctlr = container_of(kref, struct rdac_controller, kref);
+
+       spin_lock(&list_lock);
+       list_del(&ctlr->node);
+       spin_unlock(&list_lock);
+       kfree(ctlr);
+}
+
+static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
+{
+       struct rdac_controller *ctlr, *tmp;
+
+       spin_lock(&list_lock);
+
+       list_for_each_entry(tmp, &ctlr_list, node) {
+               if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
+                         (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
+                       kref_get(&tmp->kref);
+                       spin_unlock(&list_lock);
+                       return tmp;
+               }
+       }
+       ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
+       if (!ctlr)
+               goto done;
+
+       /* initialize fields of controller */
+       memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
+       memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
+       kref_init(&ctlr->kref);
+       ctlr->use_ms10 = -1;
+       list_add(&ctlr->node, &ctlr_list);
+done:
+       spin_unlock(&list_lock);
+       return ctlr;
+}
+
+static int submit_inquiry(struct scsi_device *sdev, int page_code,
+               unsigned int len)
+{
+       struct request *rq;
+       struct request_queue *q = sdev->request_queue;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+       int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+       rq = get_rdac_req(sdev, &h->inq, len, READ);
+       if (!rq)
+               goto done;
+
+       /* Prepare the command. */
+       rq->cmd[0] = INQUIRY;
+       rq->cmd[1] = 1;
+       rq->cmd[2] = page_code;
+       rq->cmd[4] = len;
+       rq->cmd_len = COMMAND_SIZE(INQUIRY);
+       err = blk_execute_rq(q, NULL, rq, 1);
+       if (err == -EIO)
+               err = SCSI_DH_IO;
+done:
+       return err;
+}
+
+static int get_lun(struct scsi_device *sdev)
+{
+       int err;
+       struct c8_inquiry *inqp;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+
+       err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
+       if (err == SCSI_DH_OK) {
+               inqp = &h->inq.c8;
+               h->lun = inqp->lun[7]; /* currently it uses only one byte */
+       }
+       return err;
+}
+
+#define RDAC_OWNED     0
+#define RDAC_UNOWNED   1
+#define RDAC_FAILED    2
+static int check_ownership(struct scsi_device *sdev)
+{
+       int err;
+       struct c9_inquiry *inqp;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+
+       err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
+       if (err == SCSI_DH_OK) {
+               err = RDAC_UNOWNED;
+               inqp = &h->inq.c9;
+               /*
+                * If in AVT mode or if the path already owns the LUN,
+                * return RDAC_OWNED;
+                */
+               if (((inqp->avte_cvp >> 7) == 0x1) ||
+                                ((inqp->avte_cvp & 0x1) != 0))
+                       err = RDAC_OWNED;
+       } else
+               err = RDAC_FAILED;
+       return err;
+}
+
+static int initialize_controller(struct scsi_device *sdev)
+{
+       int err;
+       struct c4_inquiry *inqp;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+
+       err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
+       if (err == SCSI_DH_OK) {
+               inqp = &h->inq.c4;
+               h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
+               if (!h->ctlr)
+                       err = SCSI_DH_RES_TEMP_UNAVAIL;
+       }
+       return err;
+}
+
+static int set_mode_select(struct scsi_device *sdev)
+{
+       int err;
+       struct c2_inquiry *inqp;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+
+       err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
+       if (err == SCSI_DH_OK) {
+               inqp = &h->inq.c2;
+               /*
+                * If more than MODE6_MAX_LUN luns are supported, use
+                * mode select 10
+                */
+               if (inqp->max_lun_supported >= MODE6_MAX_LUN)
+                       h->ctlr->use_ms10 = 1;
+               else
+                       h->ctlr->use_ms10 = 0;
+       }
+       return err;
+}
+
+static int mode_select_handle_sense(struct scsi_device *sdev)
+{
+       struct scsi_sense_hdr sense_hdr;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+       int sense, err = SCSI_DH_IO, ret;
+
+       ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+       if (!ret)
+               goto done;
+
+       err = SCSI_DH_OK;
+       sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
+                       sense_hdr.ascq;
+       /* If it is retryable failure, submit the c9 inquiry again */
+       if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
+                           sense == 0x62900) {
+               /* 0x59136    - Command lock contention
+                * 0x[6b]8b02 - Quiesense in progress or achieved
+                * 0x62900    - Power On, Reset, or Bus Device Reset
+                */
+               err = SCSI_DH_RETRY;
+       }
+
+       if (sense)
+               sdev_printk(KERN_INFO, sdev,
+                       "MODE_SELECT failed with sense 0x%x.\n", sense);
+done:
+       return err;
+}
+
+static int send_mode_select(struct scsi_device *sdev)
+{
+       struct request *rq;
+       struct request_queue *q = sdev->request_queue;
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+       int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+       rq = rdac_failover_get(sdev);
+       if (!rq)
+               goto done;
+
+       sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
+
+       err = blk_execute_rq(q, NULL, rq, 1);
+       if (err != SCSI_DH_OK)
+               err = mode_select_handle_sense(sdev);
+       if (err == SCSI_DH_OK)
+               h->state = RDAC_STATE_ACTIVE;
+done:
+       return err;
+}
+
+static int rdac_activate(struct scsi_device *sdev)
+{
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+       int err = SCSI_DH_OK;
+
+       if (h->lun == UNINITIALIZED_LUN) {
+               err = get_lun(sdev);
+               if (err != SCSI_DH_OK)
+                       goto done;
+       }
+
+       err = check_ownership(sdev);
+       switch (err) {
+       case RDAC_UNOWNED:
+               break;
+       case RDAC_OWNED:
+               err = SCSI_DH_OK;
+               goto done;
+       case RDAC_FAILED:
+       default:
+               err = SCSI_DH_IO;
+               goto done;
+       }
+
+       if (!h->ctlr) {
+               err = initialize_controller(sdev);
+               if (err != SCSI_DH_OK)
+                       goto done;
+       }
+
+       if (h->ctlr->use_ms10 == -1) {
+               err = set_mode_select(sdev);
+               if (err != SCSI_DH_OK)
+                       goto done;
+       }
+
+       err = send_mode_select(sdev);
+done:
+       return err;
+}
+
+static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+       int ret = BLKPREP_OK;
+
+       if (h->state != RDAC_STATE_ACTIVE) {
+               ret = BLKPREP_KILL;
+               req->cmd_flags |= REQ_QUIET;
+       }
+       return ret;
+
+}
+
+static int rdac_check_sense(struct scsi_device *sdev,
+                               struct scsi_sense_hdr *sense_hdr)
+{
+       struct rdac_dh_data *h = get_rdac_data(sdev);
+       switch (sense_hdr->sense_key) {
+       case NOT_READY:
+               if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
+                       /* LUN Not Ready - Storage firmware incompatible
+                        * Manual code synchonisation required.
+                        *
+                        * Nothing we can do here. Try to bypass the path.
+                        */
+                       return SUCCESS;
+               if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
+                       /* LUN Not Ready - Quiescense in progress
+                        *
+                        * Just retry and wait.
+                        */
+                       return NEEDS_RETRY;
+               break;
+       case ILLEGAL_REQUEST:
+               if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
+                       /* Invalid Request - Current Logical Unit Ownership.
+                        * Controller is not the current owner of the LUN,
+                        * Fail the path, so that the other path be used.
+                        */
+                       h->state = RDAC_STATE_PASSIVE;
+                       return SUCCESS;
+               }
+               break;
+       case UNIT_ATTENTION:
+               if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+                       /*
+                        * Power On, Reset, or Bus Device Reset, just retry.
+                        */
+                       return NEEDS_RETRY;
+               break;
+       }
+       /* success just means we do not care what scsi-ml does */
+       return SCSI_RETURN_NOT_HANDLED;
+}
+
+static const struct {
+       char *vendor;
+       char *model;
+} rdac_dev_list[] = {
+       {"IBM", "1722"},
+       {"IBM", "1724"},
+       {"IBM", "1726"},
+       {"IBM", "1742"},
+       {"IBM", "1814"},
+       {"IBM", "1815"},
+       {"IBM", "1818"},
+       {"IBM", "3526"},
+       {"SGI", "TP9400"},
+       {"SGI", "TP9500"},
+       {"SGI", "IS"},
+       {"STK", "OPENstorage D280"},
+       {"SUN", "CSM200_R"},
+       {"SUN", "LCSM100_F"},
+       {NULL, NULL},
+};
+
+static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler rdac_dh = {
+       .name = RDAC_NAME,
+       .module = THIS_MODULE,
+       .nb.notifier_call = rdac_bus_notify,
+       .prep_fn = rdac_prep_fn,
+       .check_sense = rdac_check_sense,
+       .activate = rdac_activate,
+};
+
+/*
+ * TODO: need some interface so we can set trespass values
+ */
+static int rdac_bus_notify(struct notifier_block *nb,
+                           unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct scsi_dh_data *scsi_dh_data;
+       struct rdac_dh_data *h;
+       int i, found = 0;
+       unsigned long flags;
+
+       if (action == BUS_NOTIFY_ADD_DEVICE) {
+               for (i = 0; rdac_dev_list[i].vendor; i++) {
+                       if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+                                    strlen(rdac_dev_list[i].vendor)) &&
+                           !strncmp(sdev->model, rdac_dev_list[i].model,
+                                    strlen(rdac_dev_list[i].model))) {
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found)
+                       goto out;
+
+               scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+                               + sizeof(*h) , GFP_KERNEL);
+               if (!scsi_dh_data) {
+                       sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
+                                   RDAC_NAME);
+                       goto out;
+               }
+
+               scsi_dh_data->scsi_dh = &rdac_dh;
+               h = (struct rdac_dh_data *) scsi_dh_data->buf;
+               h->lun = UNINITIALIZED_LUN;
+               h->state = RDAC_STATE_ACTIVE;
+               spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+               sdev->scsi_dh_data = scsi_dh_data;
+               spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+               try_module_get(THIS_MODULE);
+
+               sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
+
+       } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+               if (sdev->scsi_dh_data == NULL ||
+                               sdev->scsi_dh_data->scsi_dh != &rdac_dh)
+                       goto out;
+
+               spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+               scsi_dh_data = sdev->scsi_dh_data;
+               sdev->scsi_dh_data = NULL;
+               spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+               h = (struct rdac_dh_data *) scsi_dh_data->buf;
+               if (h->ctlr)
+                       kref_put(&h->ctlr->kref, release_controller);
+               kfree(scsi_dh_data);
+               module_put(THIS_MODULE);
+               sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
+       }
+
+out:
+       return 0;
+}
+
+static int __init rdac_init(void)
+{
+       int r;
+
+       r = scsi_register_device_handler(&rdac_dh);
+       if (r != 0)
+               printk(KERN_ERR "Failed to register scsi device handler.");
+       return r;
+}
+
+static void __exit rdac_exit(void)
+{
+       scsi_unregister_device_handler(&rdac_dh);
+}
+
+module_init(rdac_init);
+module_exit(rdac_exit);
+
+MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
+MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_LICENSE("GPL");
index 59fbef0..62a4618 100644 (file)
@@ -219,19 +219,10 @@ static void esp_reset_esp(struct esp *esp)
        /* Now reset the ESP chip */
        scsi_esp_cmd(esp, ESP_CMD_RC);
        scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
+       if (esp->rev == FAST)
+               esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
        scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 
-       /* Reload the configuration registers */
-       esp_write8(esp->cfact, ESP_CFACT);
-
-       esp->prev_stp = 0;
-       esp_write8(esp->prev_stp, ESP_STP);
-
-       esp->prev_soff = 0;
-       esp_write8(esp->prev_soff, ESP_SOFF);
-
-       esp_write8(esp->neg_defp, ESP_TIMEO);
-
        /* This is the only point at which it is reliable to read
         * the ID-code for a fast ESP chip variants.
         */
@@ -316,6 +307,17 @@ static void esp_reset_esp(struct esp *esp)
                break;
        }
 
+       /* Reload the configuration registers */
+       esp_write8(esp->cfact, ESP_CFACT);
+
+       esp->prev_stp = 0;
+       esp_write8(esp->prev_stp, ESP_STP);
+
+       esp->prev_soff = 0;
+       esp_write8(esp->prev_soff, ESP_SOFF);
+
+       esp_write8(esp->neg_defp, ESP_TIMEO);
+
        /* Eat any bitrot in the chip */
        esp_read8(ESP_INTRPT);
        udelay(100);
index c6457bf..35cd892 100644 (file)
@@ -290,7 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
        kfree(shost);
 }
 
-struct device_type scsi_host_type = {
+static struct device_type scsi_host_type = {
        .name =         "scsi_host",
        .release =      scsi_host_dev_release,
 };
index 6ac0633..a423d96 100644 (file)
@@ -5,3 +5,4 @@ ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
 ibmvscsic-$(CONFIG_PPC_PSERIES)        += rpa_vscsi.o 
 
 obj-$(CONFIG_SCSI_IBMVSCSIS)   += ibmvstgt.o
+obj-$(CONFIG_SCSI_IBMVFC)      += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
new file mode 100644 (file)
index 0000000..eb702b9
--- /dev/null
@@ -0,0 +1,3910 @@
+/*
+ * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of.h>
+#include <linux/stringify.h>
+#include <asm/firmware.h>
+#include <asm/irq.h>
+#include <asm/vio.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include "ibmvfc.h"
+
+static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
+static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
+static unsigned int max_lun = IBMVFC_MAX_LUN;
+static unsigned int max_targets = IBMVFC_MAX_TARGETS;
+static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
+static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
+static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
+static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
+static LIST_HEAD(ibmvfc_head);
+static DEFINE_SPINLOCK(ibmvfc_driver_lock);
+static struct scsi_transport_template *ibmvfc_transport_template;
+
+MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
+MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVFC_DRIVER_VERSION);
+
+module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
+                "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
+module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_timeout,
+                "Default timeout in seconds for initialization and EH commands. "
+                "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
+module_param_named(max_requests, max_requests, uint, S_IRUGO);
+MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
+                "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_lun, max_lun, uint, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
+                "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
+module_param_named(max_targets, max_targets, uint, S_IRUGO);
+MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
+                "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
+module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
+                "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
+module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable driver debug information. "
+                "[Default=" __stringify(IBMVFC_DEBUG) "]");
+module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
+                "transport should insulate the loss of a remote port. Once this "
+                "value is exceeded, the scsi target is removed. "
+                "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
+module_param_named(log_level, log_level, uint, 0);
+MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
+                "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
+
+static const struct {
+       u16 status;
+       u16 error;
+       u8 result;
+       u8 retry;
+       int log;
+       char *name;
+} cmd_status [] = {
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
+
+       { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
+       { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
+
+       { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
+       { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
+       { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
+       { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
+       { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
+       { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
+       { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
+       { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
+       { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
+       { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
+       { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
+
+       { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+};
+
+static void ibmvfc_npiv_login(struct ibmvfc_host *);
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
+
+static const char *unknown_error = "unknown error";
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_trc_start - Log a start trace entry
+ * @evt:               ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_start(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
+       struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
+       struct ibmvfc_trace_entry *entry;
+
+       entry = &vhost->trace[vhost->trace_index++];
+       entry->evt = evt;
+       entry->time = jiffies;
+       entry->fmt = evt->crq.format;
+       entry->type = IBMVFC_TRC_START;
+
+       switch (entry->fmt) {
+       case IBMVFC_CMD_FORMAT:
+               entry->op_code = vfc_cmd->iu.cdb[0];
+               entry->scsi_id = vfc_cmd->tgt_scsi_id;
+               entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+               entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+               entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
+               break;
+       case IBMVFC_MAD_FORMAT:
+               entry->op_code = mad->opcode;
+               break;
+       default:
+               break;
+       };
+}
+
+/**
+ * ibmvfc_trc_end - Log an end trace entry
+ * @evt:               ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_end(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+       struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
+       struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
+
+       entry->evt = evt;
+       entry->time = jiffies;
+       entry->fmt = evt->crq.format;
+       entry->type = IBMVFC_TRC_END;
+
+       switch (entry->fmt) {
+       case IBMVFC_CMD_FORMAT:
+               entry->op_code = vfc_cmd->iu.cdb[0];
+               entry->scsi_id = vfc_cmd->tgt_scsi_id;
+               entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+               entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+               entry->u.end.status = vfc_cmd->status;
+               entry->u.end.error = vfc_cmd->error;
+               entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
+               entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
+               entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
+               break;
+       case IBMVFC_MAD_FORMAT:
+               entry->op_code = mad->opcode;
+               entry->u.end.status = mad->status;
+               break;
+       default:
+               break;
+
+       };
+}
+
+#else
+#define ibmvfc_trc_start(evt) do { } while (0)
+#define ibmvfc_trc_end(evt) do { } while (0)
+#endif
+
+/**
+ * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
+ * @status:            status / error class
+ * @error:             error
+ *
+ * Return value:
+ *     index into cmd_status / -EINVAL on failure
+ **/
+static int ibmvfc_get_err_index(u16 status, u16 error)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
+               if ((cmd_status[i].status & status) == cmd_status[i].status &&
+                   cmd_status[i].error == error)
+                       return i;
+
+       return -EINVAL;
+}
+
+/**
+ * ibmvfc_get_cmd_error - Find the error description for the fcp response
+ * @status:            status / error class
+ * @error:             error
+ *
+ * Return value:
+ *     error description string
+ **/
+static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
+{
+       int rc = ibmvfc_get_err_index(status, error);
+       if (rc >= 0)
+               return cmd_status[rc].name;
+       return unknown_error;
+}
+
+/**
+ * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vfc_cmd:   ibmvfc command struct
+ *
+ * Return value:
+ *     SCSI result value to return for completed command
+ **/
+static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
+{
+       int err;
+       struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+       int fc_rsp_len = rsp->fcp_rsp_len;
+
+       if ((rsp->flags & FCP_RSP_LEN_VALID) &&
+           ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
+            rsp->data.info.rsp_code))
+               return DID_ERROR << 16;
+
+       if (!vfc_cmd->status) {
+               if (rsp->flags & FCP_RESID_OVER)
+                       return rsp->scsi_status | (DID_ERROR << 16);
+               else
+                       return rsp->scsi_status | (DID_OK << 16);
+       }
+
+       err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+       if (err >= 0)
+               return rsp->scsi_status | (cmd_status[err].result << 16);
+       return rsp->scsi_status | (DID_ERROR << 16);
+}
+
+/**
+ * ibmvfc_retry_cmd - Determine if error status is retryable
+ * @status:            status / error class
+ * @error:             error
+ *
+ * Return value:
+ *     1 if error should be retried / 0 if it should not
+ **/
+static int ibmvfc_retry_cmd(u16 status, u16 error)
+{
+       int rc = ibmvfc_get_err_index(status, error);
+
+       if (rc >= 0)
+               return cmd_status[rc].retry;
+       return 1;
+}
+
+static const char *unknown_fc_explain = "unknown fc explain";
+
+static const struct {
+       u16 fc_explain;
+       char *name;
+} ls_explain [] = {
+       { 0x00, "no additional explanation" },
+       { 0x01, "service parameter error - options" },
+       { 0x03, "service parameter error - initiator control" },
+       { 0x05, "service parameter error - recipient control" },
+       { 0x07, "service parameter error - received data field size" },
+       { 0x09, "service parameter error - concurrent seq" },
+       { 0x0B, "service parameter error - credit" },
+       { 0x0D, "invalid N_Port/F_Port_Name" },
+       { 0x0E, "invalid node/Fabric Name" },
+       { 0x0F, "invalid common service parameters" },
+       { 0x11, "invalid association header" },
+       { 0x13, "association header required" },
+       { 0x15, "invalid originator S_ID" },
+       { 0x17, "invalid OX_ID-RX-ID combination" },
+       { 0x19, "command (request) already in progress" },
+       { 0x1E, "N_Port Login requested" },
+       { 0x1F, "Invalid N_Port_ID" },
+};
+
+static const struct {
+       u16 fc_explain;
+       char *name;
+} gs_explain [] = {
+       { 0x00, "no additional explanation" },
+       { 0x01, "port identifier not registered" },
+       { 0x02, "port name not registered" },
+       { 0x03, "node name not registered" },
+       { 0x04, "class of service not registered" },
+       { 0x06, "initial process associator not registered" },
+       { 0x07, "FC-4 TYPEs not registered" },
+       { 0x08, "symbolic port name not registered" },
+       { 0x09, "symbolic node name not registered" },
+       { 0x0A, "port type not registered" },
+       { 0xF0, "authorization exception" },
+       { 0xF1, "authentication exception" },
+       { 0xF2, "data base full" },
+       { 0xF3, "data base empty" },
+       { 0xF4, "processing request" },
+       { 0xF5, "unable to verify connection" },
+       { 0xF6, "devices not in a common zone" },
+};
+
+/**
+ * ibmvfc_get_ls_explain - Return the FC Explain description text
+ * @status:    FC Explain status
+ *
+ * Returns:
+ *     error string
+ **/
+static const char *ibmvfc_get_ls_explain(u16 status)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
+               if (ls_explain[i].fc_explain == status)
+                       return ls_explain[i].name;
+
+       return unknown_fc_explain;
+}
+
+/**
+ * ibmvfc_get_gs_explain - Return the FC Explain description text
+ * @status:    FC Explain status
+ *
+ * Returns:
+ *     error string
+ **/
+static const char *ibmvfc_get_gs_explain(u16 status)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
+               if (gs_explain[i].fc_explain == status)
+                       return gs_explain[i].name;
+
+       return unknown_fc_explain;
+}
+
+static const struct {
+       enum ibmvfc_fc_type fc_type;
+       char *name;
+} fc_type [] = {
+       { IBMVFC_FABRIC_REJECT, "fabric reject" },
+       { IBMVFC_PORT_REJECT, "port reject" },
+       { IBMVFC_LS_REJECT, "ELS reject" },
+       { IBMVFC_FABRIC_BUSY, "fabric busy" },
+       { IBMVFC_PORT_BUSY, "port busy" },
+       { IBMVFC_BASIC_REJECT, "basic reject" },
+};
+
+static const char *unknown_fc_type = "unknown fc type";
+
+/**
+ * ibmvfc_get_fc_type - Return the FC Type description text
+ * @status:    FC Type error status
+ *
+ * Returns:
+ *     error string
+ **/
+static const char *ibmvfc_get_fc_type(u16 status)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fc_type); i++)
+               if (fc_type[i].fc_type == status)
+                       return fc_type[i].name;
+
+       return unknown_fc_type;
+}
+
+/**
+ * ibmvfc_set_tgt_action - Set the next init action for the target
+ * @tgt:               ibmvfc target struct
+ * @action:            action to perform
+ *
+ **/
+static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+                                 enum ibmvfc_target_action action)
+{
+       switch (tgt->action) {
+       case IBMVFC_TGT_ACTION_DEL_RPORT:
+               break;
+       default:
+               tgt->action = action;
+               break;
+       }
+}
+
+/**
+ * ibmvfc_set_host_state - Set the state for the host
+ * @vhost:             ibmvfc host struct
+ * @state:             state to set host to
+ *
+ * Returns:
+ *     0 if state changed / non-zero if not changed
+ **/
+static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
+                                 enum ibmvfc_host_state state)
+{
+       int rc = 0;
+
+       switch (vhost->state) {
+       case IBMVFC_HOST_OFFLINE:
+               rc = -EINVAL;
+               break;
+       default:
+               vhost->state = state;
+               break;
+       };
+
+       return rc;
+}
+
+/**
+ * ibmvfc_set_host_action - Set the next init action for the host
+ * @vhost:             ibmvfc host struct
+ * @action:            action to perform
+ *
+ **/
+static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+                                  enum ibmvfc_host_action action)
+{
+       switch (action) {
+       case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+               if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
+                       vhost->action = action;
+               break;
+       case IBMVFC_HOST_ACTION_INIT_WAIT:
+               if (vhost->action == IBMVFC_HOST_ACTION_INIT)
+                       vhost->action = action;
+               break;
+       case IBMVFC_HOST_ACTION_QUERY:
+               switch (vhost->action) {
+               case IBMVFC_HOST_ACTION_INIT_WAIT:
+               case IBMVFC_HOST_ACTION_NONE:
+               case IBMVFC_HOST_ACTION_TGT_ADD:
+                       vhost->action = action;
+                       break;
+               default:
+                       break;
+               };
+               break;
+       case IBMVFC_HOST_ACTION_TGT_INIT:
+               if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
+                       vhost->action = action;
+               break;
+       case IBMVFC_HOST_ACTION_INIT:
+       case IBMVFC_HOST_ACTION_TGT_DEL:
+       case IBMVFC_HOST_ACTION_QUERY_TGTS:
+       case IBMVFC_HOST_ACTION_TGT_ADD:
+       case IBMVFC_HOST_ACTION_NONE:
+       default:
+               vhost->action = action;
+               break;
+       };
+}
+
+/**
+ * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
+ * @vhost:             ibmvfc host struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
+{
+       if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+               scsi_block_requests(vhost->host);
+               ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
+               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+       } else
+               vhost->reinit = 1;
+
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_link_down - Handle a link down event from the adapter
+ * @vhost:     ibmvfc host struct
+ * @state:     ibmvfc host state to enter
+ *
+ **/
+static void ibmvfc_link_down(struct ibmvfc_host *vhost,
+                            enum ibmvfc_host_state state)
+{
+       struct ibmvfc_target *tgt;
+
+       ENTER;
+       scsi_block_requests(vhost->host);
+       list_for_each_entry(tgt, &vhost->targets, queue)
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+       ibmvfc_set_host_state(vhost, state);
+       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+       vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
+       wake_up(&vhost->work_wait_q);
+       LEAVE;
+}
+
+/**
+ * ibmvfc_init_host - Start host initialization
+ * @vhost:             ibmvfc host struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ibmvfc_init_host(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_target *tgt;
+
+       if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+               if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+                       dev_err(vhost->dev,
+                               "Host initialization retries exceeded. Taking adapter offline\n");
+                       ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+                       return;
+               }
+       }
+
+       if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+               list_for_each_entry(tgt, &vhost->targets, queue)
+                       tgt->need_login = 1;
+               scsi_block_requests(vhost->host);
+               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+               vhost->job_step = ibmvfc_npiv_login;
+               wake_up(&vhost->work_wait_q);
+       }
+}
+
+/**
+ * ibmvfc_send_crq - Send a CRQ
+ * @vhost:     ibmvfc host struct
+ * @word1:     the first 64 bits of the data
+ * @word2:     the second 64 bits of the data
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
+{
+       struct vio_dev *vdev = to_vio_dev(vhost->dev);
+       return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+}
+
+/**
+ * ibmvfc_send_crq_init - Send a CRQ init message
+ * @vhost:     ibmvfc host struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
+{
+       ibmvfc_dbg(vhost, "Sending CRQ init\n");
+       return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
+ * @vhost:     ibmvfc host struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
+{
+       ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
+       return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
+ * @vhost:     ibmvfc host struct
+ *
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
+ * the crq with the hypervisor.
+ **/
+static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
+{
+       long rc;
+       struct vio_dev *vdev = to_vio_dev(vhost->dev);
+       struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+       ibmvfc_dbg(vhost, "Releasing CRQ\n");
+       free_irq(vdev->irq, vhost);
+       do {
+               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+       vhost->state = IBMVFC_NO_CRQ;
+       dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+       free_page((unsigned long)crq->msgs);
+}
+
+/**
+ * ibmvfc_reenable_crq_queue - reenables the CRQ
+ * @vhost:     ibmvfc host struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
+{
+       int rc;
+       struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+       /* Re-enable the CRQ */
+       do {
+               rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+       } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+       if (rc)
+               dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
+
+       return rc;
+}
+
+/**
+ * ibmvfc_reset_crq - resets a crq after a failure
+ * @vhost:     ibmvfc host struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
+{
+       int rc;
+       struct vio_dev *vdev = to_vio_dev(vhost->dev);
+       struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+       /* Close the CRQ */
+       do {
+               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+       vhost->state = IBMVFC_NO_CRQ;
+       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+
+       /* Clean out the queue */
+       memset(crq->msgs, 0, PAGE_SIZE);
+       crq->cur = 0;
+
+       /* And re-open it again */
+       rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+                               crq->msg_token, PAGE_SIZE);
+
+       if (rc == H_CLOSED)
+               /* Adapter is good, but other end is not ready */
+               dev_warn(vhost->dev, "Partner adapter not ready\n");
+       else if (rc != 0)
+               dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+
+       return rc;
+}
+
+/**
+ * ibmvfc_valid_event - Determines if event is valid.
+ * @pool:      event_pool that contains the event
+ * @evt:       ibmvfc event to be checked for validity
+ *
+ * Return value:
+ *     1 if event is valid / 0 if event is not valid
+ **/
+static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
+                             struct ibmvfc_event *evt)
+{
+       int index = evt - pool->events;
+       if (index < 0 || index >= pool->size)   /* outside of bounds */
+               return 0;
+       if (evt != pool->events + index)        /* unaligned */
+               return 0;
+       return 1;
+}
+
+/**
+ * ibmvfc_free_event - Free the specified event
+ * @evt:       ibmvfc_event to be freed
+ *
+ **/
+static void ibmvfc_free_event(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_event_pool *pool = &vhost->pool;
+
+       BUG_ON(!ibmvfc_valid_event(pool, evt));
+       BUG_ON(atomic_inc_return(&evt->free) != 1);
+       list_add_tail(&evt->queue, &vhost->free);
+}
+
+/**
+ * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
+ * @evt:       ibmvfc event struct
+ *
+ * This function does not setup any error status, that must be done
+ * before this function gets called.
+ **/
+static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
+{
+       struct scsi_cmnd *cmnd = evt->cmnd;
+
+       if (cmnd) {
+               scsi_dma_unmap(cmnd);
+               cmnd->scsi_done(cmnd);
+       }
+
+       ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_fail_request - Fail request with specified error code
+ * @evt:               ibmvfc event struct
+ * @error_code:        error code to fail request with
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
+{
+       if (evt->cmnd) {
+               evt->cmnd->result = (error_code << 16);
+               evt->done = ibmvfc_scsi_eh_done;
+       } else
+               evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
+
+       list_del(&evt->queue);
+       del_timer(&evt->timer);
+       ibmvfc_trc_end(evt);
+       evt->done(evt);
+}
+
+/**
+ * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
+ * @vhost:             ibmvfc host struct
+ * @error_code:        error code to fail requests with
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
+{
+       struct ibmvfc_event *evt, *pos;
+
+       ibmvfc_dbg(vhost, "Purging all requests\n");
+       list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+               ibmvfc_fail_request(evt, error_code);
+}
+
+/**
+ * __ibmvfc_reset_host - Reset the connection to the server (no locking)
+ * @vhost:     struct ibmvfc host to reset
+ **/
+static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+       int rc;
+
+       scsi_block_requests(vhost->host);
+       ibmvfc_purge_requests(vhost, DID_ERROR);
+       if ((rc = ibmvfc_reset_crq(vhost)) ||
+           (rc = ibmvfc_send_crq_init(vhost)) ||
+           (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+               dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+       } else
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+}
+
+/**
+ * ibmvfc_reset_host - Reset the connection to the server
+ * @vhost:     struct ibmvfc host to reset
+ **/
+static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       __ibmvfc_reset_host(vhost);
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_retry_host_init - Retry host initialization if allowed
+ * @vhost:     ibmvfc host struct
+ *
+ **/
+static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+{
+       if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+               if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+                       dev_err(vhost->dev,
+                               "Host initialization retries exceeded. Taking adapter offline\n");
+                       ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+               } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
+                       __ibmvfc_reset_host(vhost);
+               else
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+       }
+
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * __ibmvfc_find_target - Find the specified scsi_target (no locking)
+ * @starget:   scsi target struct
+ *
+ * Return value:
+ *     ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       struct ibmvfc_target *tgt;
+
+       list_for_each_entry(tgt, &vhost->targets, queue)
+               if (tgt->target_id == starget->id)
+                       return tgt;
+       return NULL;
+}
+
+/**
+ * ibmvfc_find_target - Find the specified scsi_target
+ * @starget:   scsi target struct
+ *
+ * Return value:
+ *     ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ibmvfc_target *tgt;
+       unsigned long flags;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       tgt = __ibmvfc_find_target(starget);
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return tgt;
+}
+
+/**
+ * ibmvfc_get_host_speed - Get host port speed
+ * @shost:             scsi host struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
+{
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       unsigned long flags;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       if (vhost->state == IBMVFC_ACTIVE) {
+               switch (vhost->login_buf->resp.link_speed / 100) {
+               case 1:
+                       fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+                       break;
+               case 2:
+                       fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+                       break;
+               case 4:
+                       fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+                       break;
+               case 8:
+                       fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+                       break;
+               case 10:
+                       fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+                       break;
+               case 16:
+                       fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+                       break;
+               default:
+                       ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
+                                  vhost->login_buf->resp.link_speed / 100);
+                       fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+                       break;
+               }
+       } else
+               fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_get_host_port_state - Get host port state
+ * @shost:             scsi host struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
+{
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       unsigned long flags;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       switch (vhost->state) {
+       case IBMVFC_INITIALIZING:
+       case IBMVFC_ACTIVE:
+               fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+               break;
+       case IBMVFC_LINK_DOWN:
+               fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case IBMVFC_LINK_DEAD:
+       case IBMVFC_HOST_OFFLINE:
+               fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+               break;
+       case IBMVFC_HALTED:
+               fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
+               break;
+       default:
+               ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
+               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+               break;
+       }
+       spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
+ * @rport:             rport struct
+ * @timeout:   timeout value
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+       if (timeout)
+               rport->dev_loss_tmo = timeout;
+       else
+               rport->dev_loss_tmo = 1;
+}
+
+/**
+ * ibmvfc_get_starget_node_name - Get SCSI target's node name
+ * @starget:   scsi target struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
+{
+       struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+       fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
+}
+
+/**
+ * ibmvfc_get_starget_port_name - Get SCSI target's port name
+ * @starget:   scsi target struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
+{
+       struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+       fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
+}
+
+/**
+ * ibmvfc_get_starget_port_id - Get SCSI target's port ID
+ * @starget:   scsi target struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
+{
+       struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+       fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
+}
+
+/**
+ * ibmvfc_wait_while_resetting - Wait while the host resets
+ * @vhost:             ibmvfc host struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
+{
+       long timeout = wait_event_timeout(vhost->init_wait_q,
+                                         (vhost->state == IBMVFC_ACTIVE ||
+                                          vhost->state == IBMVFC_HOST_OFFLINE ||
+                                          vhost->state == IBMVFC_LINK_DEAD),
+                                         (init_timeout * HZ));
+
+       return timeout ? 0 : -EIO;
+}
+
+/**
+ * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
+ * @shost:             scsi host struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+       struct ibmvfc_host *vhost = shost_priv(shost);
+
+       dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
+       ibmvfc_reset_host(vhost);
+       return ibmvfc_wait_while_resetting(vhost);
+}
+
+/**
+ * ibmvfc_gather_partition_info - Gather info about the LPAR
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
+{
+       struct device_node *rootdn;
+       const char *name;
+       const unsigned int *num;
+
+       rootdn = of_find_node_by_path("/");
+       if (!rootdn)
+               return;
+
+       name = of_get_property(rootdn, "ibm,partition-name", NULL);
+       if (name)
+               strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
+       num = of_get_property(rootdn, "ibm,partition-no", NULL);
+       if (num)
+               vhost->partition_number = *num;
+       of_node_put(rootdn);
+}
+
+/**
+ * ibmvfc_set_login_info - Setup info for NPIV login
+ * @vhost:     ibmvfc host struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_npiv_login *login_info = &vhost->login_info;
+       struct device_node *of_node = vhost->dev->archdata.of_node;
+       const char *location;
+
+       memset(login_info, 0, sizeof(*login_info));
+
+       login_info->ostype = IBMVFC_OS_LINUX;
+       login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
+       login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
+       login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
+       login_info->partition_num = vhost->partition_number;
+       login_info->vfc_frame_version = 1;
+       login_info->fcp_version = 3;
+       if (vhost->client_migrated)
+               login_info->flags = IBMVFC_CLIENT_MIGRATED;
+
+       login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+       login_info->capabilities = IBMVFC_CAN_MIGRATE;
+       login_info->async.va = vhost->async_crq.msg_token;
+       login_info->async.len = vhost->async_crq.size;
+       strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
+       strncpy(login_info->device_name,
+               vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
+
+       location = of_get_property(of_node, "ibm,loc-code", NULL);
+       location = location ? location : vhost->dev->bus_id;
+       strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
+}
+
+/**
+ * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
+ * @vhost:     ibmvfc host who owns the event pool
+ *
+ * Returns zero on success.
+ **/
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
+{
+       int i;
+       struct ibmvfc_event_pool *pool = &vhost->pool;
+
+       ENTER;
+       pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+       pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+       if (!pool->events)
+               return -ENOMEM;
+
+       pool->iu_storage = dma_alloc_coherent(vhost->dev,
+                                             pool->size * sizeof(*pool->iu_storage),
+                                             &pool->iu_token, 0);
+
+       if (!pool->iu_storage) {
+               kfree(pool->events);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < pool->size; ++i) {
+               struct ibmvfc_event *evt = &pool->events[i];
+               atomic_set(&evt->free, 1);
+               evt->crq.valid = 0x80;
+               evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
+               evt->xfer_iu = pool->iu_storage + i;
+               evt->vhost = vhost;
+               evt->ext_list = NULL;
+               list_add_tail(&evt->queue, &vhost->free);
+       }
+
+       LEAVE;
+       return 0;
+}
+
+/**
+ * ibmvfc_free_event_pool - Frees memory of the event pool of a host
+ * @vhost:     ibmvfc host who owns the event pool
+ *
+ **/
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
+{
+       int i;
+       struct ibmvfc_event_pool *pool = &vhost->pool;
+
+       ENTER;
+       for (i = 0; i < pool->size; ++i) {
+               list_del(&pool->events[i].queue);
+               BUG_ON(atomic_read(&pool->events[i].free) != 1);
+               if (pool->events[i].ext_list)
+                       dma_pool_free(vhost->sg_pool,
+                                     pool->events[i].ext_list,
+                                     pool->events[i].ext_list_token);
+       }
+
+       kfree(pool->events);
+       dma_free_coherent(vhost->dev,
+                         pool->size * sizeof(*pool->iu_storage),
+                         pool->iu_storage, pool->iu_token);
+       LEAVE;
+}
+
+/**
+ * ibmvfc_get_event - Gets the next free event in pool
+ * @vhost:     ibmvfc host struct
+ *
+ * Returns a free event from the pool.
+ **/
+static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_event *evt;
+
+       BUG_ON(list_empty(&vhost->free));
+       evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
+       atomic_set(&evt->free, 0);
+       list_del(&evt->queue);
+       return evt;
+}
+
+/**
+ * ibmvfc_init_event - Initialize fields in an event struct that are always
+ *                             required.
+ * @evt:       The event
+ * @done:      Routine to call when the event is responded to
+ * @format:    SRP or MAD format
+ **/
+static void ibmvfc_init_event(struct ibmvfc_event *evt,
+                             void (*done) (struct ibmvfc_event *), u8 format)
+{
+       evt->cmnd = NULL;
+       evt->sync_iu = NULL;
+       evt->crq.format = format;
+       evt->done = done;
+}
+
+/**
+ * ibmvfc_map_sg_list - Initialize scatterlist
+ * @scmd:      scsi command struct
+ * @nseg:      number of scatterlist segments
+ * @md:        memory descriptor list to initialize
+ **/
+static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
+                              struct srp_direct_buf *md)
+{
+       int i;
+       struct scatterlist *sg;
+
+       scsi_for_each_sg(scmd, sg, nseg, i) {
+               md[i].va = sg_dma_address(sg);
+               md[i].len = sg_dma_len(sg);
+               md[i].key = 0;
+       }
+}
+
+/**
+ * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
+ * @scmd:              Scsi_Cmnd with the scatterlist
+ * @evt:               ibmvfc event struct
+ * @vfc_cmd:   vfc_cmd that contains the memory descriptor
+ * @dev:               device for which to map dma memory
+ *
+ * Returns:
+ *     0 on success / non-zero on failure
+ **/
+static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
+                             struct ibmvfc_event *evt,
+                             struct ibmvfc_cmd *vfc_cmd, struct device *dev)
+{
+
+       int sg_mapped;
+       struct srp_direct_buf *data = &vfc_cmd->ioba;
+       struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+
+       sg_mapped = scsi_dma_map(scmd);
+       if (!sg_mapped) {
+               vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
+               return 0;
+       } else if (unlikely(sg_mapped < 0)) {
+               if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+                       scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
+               return sg_mapped;
+       }
+
+       if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+               vfc_cmd->flags |= IBMVFC_WRITE;
+               vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
+       } else {
+               vfc_cmd->flags |= IBMVFC_READ;
+               vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
+       }
+
+       if (sg_mapped == 1) {
+               ibmvfc_map_sg_list(scmd, sg_mapped, data);
+               return 0;
+       }
+
+       vfc_cmd->flags |= IBMVFC_SCATTERLIST;
+
+       if (!evt->ext_list) {
+               evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
+                                              &evt->ext_list_token);
+
+               if (!evt->ext_list) {
+                       scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
+                       return -ENOMEM;
+               }
+       }
+
+       ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
+
+       data->va = evt->ext_list_token;
+       data->len = sg_mapped * sizeof(struct srp_direct_buf);
+       data->key = 0;
+       return 0;
+}
+
+/**
+ * ibmvfc_timeout - Internal command timeout handler
+ * @evt:       struct ibmvfc_event that timed out
+ *
+ * Called when an internally generated command times out
+ **/
+static void ibmvfc_timeout(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
+       ibmvfc_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
+ * @evt:               event to be sent
+ * @vhost:             ibmvfc host struct
+ * @timeout:   timeout in seconds - 0 means do not time command
+ *
+ * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
+ **/
+static int ibmvfc_send_event(struct ibmvfc_event *evt,
+                            struct ibmvfc_host *vhost, unsigned long timeout)
+{
+       u64 *crq_as_u64 = (u64 *) &evt->crq;
+       int rc;
+
+       /* Copy the IU into the transfer area */
+       *evt->xfer_iu = evt->iu;
+       if (evt->crq.format == IBMVFC_CMD_FORMAT)
+               evt->xfer_iu->cmd.tag = (u64)evt;
+       else if (evt->crq.format == IBMVFC_MAD_FORMAT)
+               evt->xfer_iu->mad_common.tag = (u64)evt;
+       else
+               BUG();
+
+       list_add_tail(&evt->queue, &vhost->sent);
+       init_timer(&evt->timer);
+
+       if (timeout) {
+               evt->timer.data = (unsigned long) evt;
+               evt->timer.expires = jiffies + (timeout * HZ);
+               evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
+               add_timer(&evt->timer);
+       }
+
+       if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
+               list_del(&evt->queue);
+               del_timer(&evt->timer);
+
+               /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+                * Firmware will send a CRQ with a transport event (0xFF) to
+                * tell this client what has happened to the transport. This
+                * will be handled in ibmvfc_handle_crq()
+                */
+               if (rc == H_CLOSED) {
+                       if (printk_ratelimit())
+                               dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
+                       if (evt->cmnd)
+                               scsi_dma_unmap(evt->cmnd);
+                       ibmvfc_free_event(evt);
+                       return SCSI_MLQUEUE_HOST_BUSY;
+               }
+
+               dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
+               if (evt->cmnd) {
+                       evt->cmnd->result = DID_ERROR << 16;
+                       evt->done = ibmvfc_scsi_eh_done;
+               } else
+                       evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
+
+               evt->done(evt);
+       } else
+               ibmvfc_trc_start(evt);
+
+       return 0;
+}
+
+/**
+ * ibmvfc_log_error - Log an error for the failed command if appropriate
+ * @evt:       ibmvfc event to log
+ *
+ **/
+static void ibmvfc_log_error(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+       struct scsi_cmnd *cmnd = evt->cmnd;
+       const char *err = unknown_error;
+       int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+       int logerr = 0;
+       int rsp_code = 0;
+
+       if (index >= 0) {
+               logerr = cmd_status[index].log;
+               err = cmd_status[index].name;
+       }
+
+       if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
+               return;
+
+       if (rsp->flags & FCP_RSP_LEN_VALID)
+               rsp_code = rsp->data.info.rsp_code;
+
+       scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+                   "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
+                   cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+                   rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
+}
+
+/**
+ * ibmvfc_scsi_done - Handle responses from commands
+ * @evt:       ibmvfc event to be handled
+ *
+ * Used as a callback when sending scsi cmds.
+ **/
+static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+       struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+       struct scsi_cmnd *cmnd = evt->cmnd;
+       int rsp_len = 0;
+       int sense_len = rsp->fcp_sense_len;
+
+       if (cmnd) {
+               if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
+                       scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
+               else if (rsp->flags & FCP_RESID_UNDER)
+                       scsi_set_resid(cmnd, rsp->fcp_resid);
+               else
+                       scsi_set_resid(cmnd, 0);
+
+               if (vfc_cmd->status) {
+                       cmnd->result = ibmvfc_get_err_result(vfc_cmd);
+
+                       if (rsp->flags & FCP_RSP_LEN_VALID)
+                               rsp_len = rsp->fcp_rsp_len;
+                       if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
+                               sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
+                       if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len)
+                               memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
+
+                       ibmvfc_log_error(evt);
+               }
+
+               if (!cmnd->result &&
+                   (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
+                       cmnd->result = (DID_ERROR << 16);
+
+               scsi_dma_unmap(cmnd);
+               cmnd->scsi_done(cmnd);
+       }
+
+       ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_host_chkready - Check if the host can accept commands
+ * @vhost:      struct ibmvfc host
+ *
+ * Returns:
+ *     1 if host can accept command / 0 if not
+ **/
+static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
+{
+       int result = 0;
+
+       switch (vhost->state) {
+       case IBMVFC_LINK_DEAD:
+       case IBMVFC_HOST_OFFLINE:
+               result = DID_NO_CONNECT << 16;
+               break;
+       case IBMVFC_NO_CRQ:
+       case IBMVFC_INITIALIZING:
+       case IBMVFC_HALTED:
+       case IBMVFC_LINK_DOWN:
+               result = DID_REQUEUE << 16;
+               break;
+       case IBMVFC_ACTIVE:
+               result = 0;
+               break;
+       };
+
+       return result;
+}
+
+/**
+ * ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @cmnd:      struct scsi_cmnd to be executed
+ * @done:      Callback function to be called when cmnd is completed
+ *
+ * Returns:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
+                              void (*done) (struct scsi_cmnd *))
+{
+       struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
+       struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+       struct ibmvfc_cmd *vfc_cmd;
+       struct ibmvfc_event *evt;
+       u8 tag[2];
+       int rc;
+
+       if (unlikely((rc = fc_remote_port_chkready(rport))) ||
+           unlikely((rc = ibmvfc_host_chkready(vhost)))) {
+               cmnd->result = rc;
+               done(cmnd);
+               return 0;
+       }
+
+       cmnd->result = (DID_OK << 16);
+       evt = ibmvfc_get_event(vhost);
+       ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+       evt->cmnd = cmnd;
+       cmnd->scsi_done = done;
+       vfc_cmd = &evt->iu.cmd;
+       memset(vfc_cmd, 0, sizeof(*vfc_cmd));
+       vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+       vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
+       vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
+       vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
+       vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
+       vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
+       vfc_cmd->tgt_scsi_id = rport->port_id;
+       if ((rport->supported_classes & FC_COS_CLASS3) &&
+           (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3))
+               vfc_cmd->flags = IBMVFC_CLASS_3_ERR;
+       vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
+       int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
+       memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
+
+       if (scsi_populate_tag_msg(cmnd, tag)) {
+               vfc_cmd->task_tag = tag[1];
+               switch (tag[0]) {
+               case MSG_SIMPLE_TAG:
+                       vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
+                       break;
+               case MSG_HEAD_TAG:
+                       vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
+                       break;
+               case MSG_ORDERED_TAG:
+                       vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
+                       break;
+               };
+       }
+
+       if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
+               return ibmvfc_send_event(evt, vhost, 0);
+
+       ibmvfc_free_event(evt);
+       if (rc == -ENOMEM)
+               return SCSI_MLQUEUE_HOST_BUSY;
+
+       if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+               scmd_printk(KERN_ERR, cmnd,
+                           "Failed to map DMA buffer for command. rc=%d\n", rc);
+
+       cmnd->result = DID_ERROR << 16;
+       done(cmnd);
+       return 0;
+}
+
+/**
+ * ibmvfc_sync_completion - Signal that a synchronous command has completed
+ * @evt:       ibmvfc event struct
+ *
+ **/
+static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
+{
+       /* copy the response back */
+       if (evt->sync_iu)
+               *evt->sync_iu = *evt->xfer_iu;
+
+       complete(&evt->comp);
+}
+
+/**
+ * ibmvfc_reset_device - Reset the device with the specified reset type
+ * @sdev:      scsi device to reset
+ * @type:      reset type
+ * @desc:      reset type description for log messages
+ *
+ * Returns:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+{
+       struct ibmvfc_host *vhost = shost_priv(sdev->host);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       struct ibmvfc_cmd *tmf;
+       struct ibmvfc_event *evt;
+       union ibmvfc_iu rsp_iu;
+       struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+       int rsp_rc = -EBUSY;
+       unsigned long flags;
+       int rsp_code = 0;
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       if (vhost->state == IBMVFC_ACTIVE) {
+               evt = ibmvfc_get_event(vhost);
+               ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+               tmf = &evt->iu.cmd;
+               memset(tmf, 0, sizeof(*tmf));
+               tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+               tmf->resp.len = sizeof(tmf->rsp);
+               tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+               tmf->payload_len = sizeof(tmf->iu);
+               tmf->resp_len = sizeof(tmf->rsp);
+               tmf->cancel_key = (unsigned long)sdev->hostdata;
+               tmf->tgt_scsi_id = rport->port_id;
+               int_to_scsilun(sdev->lun, &tmf->iu.lun);
+               tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+               tmf->iu.tmf_flags = type;
+               evt->sync_iu = &rsp_iu;
+
+               init_completion(&evt->comp);
+               rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+       }
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+       if (rsp_rc != 0) {
+               sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
+                           desc, rsp_rc);
+               return -EIO;
+       }
+
+       sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
+       wait_for_completion(&evt->comp);
+
+       if (rsp_iu.cmd.status) {
+               if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+                       rsp_code = fc_rsp->data.info.rsp_code;
+
+               sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
+                           "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+                           desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           fc_rsp->scsi_status);
+               rsp_rc = -EIO;
+       } else
+               sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       ibmvfc_free_event(evt);
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       return rsp_rc;
+}
+
+/**
+ * ibmvfc_abort_task_set - Abort outstanding commands to the device
+ * @sdev:      scsi device to abort commands
+ *
+ * This sends an Abort Task Set to the VIOS for the specified device. This does
+ * NOT send any cancel to the VIOS. That must be done separately.
+ *
+ * Returns:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+{
+       struct ibmvfc_host *vhost = shost_priv(sdev->host);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       struct ibmvfc_cmd *tmf;
+       struct ibmvfc_event *evt, *found_evt;
+       union ibmvfc_iu rsp_iu;
+       struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+       int rsp_rc = -EBUSY;
+       unsigned long flags;
+       int rsp_code = 0;
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       found_evt = NULL;
+       list_for_each_entry(evt, &vhost->sent, queue) {
+               if (evt->cmnd && evt->cmnd->device == sdev) {
+                       found_evt = evt;
+                       break;
+               }
+       }
+
+       if (!found_evt) {
+               if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+                       sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               return 0;
+       }
+
+       if (vhost->state == IBMVFC_ACTIVE) {
+               evt = ibmvfc_get_event(vhost);
+               ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+               tmf = &evt->iu.cmd;
+               memset(tmf, 0, sizeof(*tmf));
+               tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+               tmf->resp.len = sizeof(tmf->rsp);
+               tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+               tmf->payload_len = sizeof(tmf->iu);
+               tmf->resp_len = sizeof(tmf->rsp);
+               tmf->cancel_key = (unsigned long)sdev->hostdata;
+               tmf->tgt_scsi_id = rport->port_id;
+               int_to_scsilun(sdev->lun, &tmf->iu.lun);
+               tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+               tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
+               evt->sync_iu = &rsp_iu;
+
+               init_completion(&evt->comp);
+               rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+       }
+
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+       if (rsp_rc != 0) {
+               sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
+               return -EIO;
+       }
+
+       sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
+       wait_for_completion(&evt->comp);
+
+       if (rsp_iu.cmd.status) {
+               if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+                       rsp_code = fc_rsp->data.info.rsp_code;
+
+               sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
+                           "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+                           ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           fc_rsp->scsi_status);
+               rsp_rc = -EIO;
+       } else
+               sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       ibmvfc_free_event(evt);
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       return rsp_rc;
+}
+
+/**
+ * ibmvfc_cancel_all - Cancel all outstanding commands to the device
+ * @sdev:      scsi device to cancel commands
+ * @type:      type of error recovery being performed
+ *
+ * This sends a cancel to the VIOS for the specified device. This does
+ * NOT send any abort to the actual device. That must be done separately.
+ *
+ * Returns:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+{
+       struct ibmvfc_host *vhost = shost_priv(sdev->host);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       struct ibmvfc_tmf *tmf;
+       struct ibmvfc_event *evt, *found_evt;
+       union ibmvfc_iu rsp;
+       int rsp_rc = -EBUSY;
+       unsigned long flags;
+       u16 status;
+
+       ENTER;
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       found_evt = NULL;
+       list_for_each_entry(evt, &vhost->sent, queue) {
+               if (evt->cmnd && evt->cmnd->device == sdev) {
+                       found_evt = evt;
+                       break;
+               }
+       }
+
+       if (!found_evt) {
+               if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+                       sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               return 0;
+       }
+
+       if (vhost->state == IBMVFC_ACTIVE) {
+               evt = ibmvfc_get_event(vhost);
+               ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+               tmf = &evt->iu.tmf;
+               memset(tmf, 0, sizeof(*tmf));
+               tmf->common.version = 1;
+               tmf->common.opcode = IBMVFC_TMF_MAD;
+               tmf->common.length = sizeof(*tmf);
+               tmf->scsi_id = rport->port_id;
+               int_to_scsilun(sdev->lun, &tmf->lun);
+               tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               tmf->cancel_key = (unsigned long)sdev->hostdata;
+               tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
+
+               evt->sync_iu = &rsp;
+               init_completion(&evt->comp);
+               rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+       }
+
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+       if (rsp_rc != 0) {
+               sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
+               return -EIO;
+       }
+
+       sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
+
+       wait_for_completion(&evt->comp);
+       status = rsp.mad_common.status;
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       ibmvfc_free_event(evt);
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+       if (status != IBMVFC_MAD_SUCCESS) {
+               sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+               return -EIO;
+       }
+
+       sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+       return 0;
+}
+
+/**
+ * ibmvfc_eh_abort_handler - Abort a command
+ * @cmd:       scsi command to abort
+ *
+ * Returns:
+ *     SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+       struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+       struct ibmvfc_event *evt, *pos;
+       int cancel_rc, abort_rc;
+       unsigned long flags;
+
+       ENTER;
+       ibmvfc_wait_while_resetting(vhost);
+       cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
+       abort_rc = ibmvfc_abort_task_set(cmd->device);
+
+       if (!cancel_rc && !abort_rc) {
+               spin_lock_irqsave(vhost->host->host_lock, flags);
+               list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+                       if (evt->cmnd && evt->cmnd->device == cmd->device)
+                               ibmvfc_fail_request(evt, DID_ABORT);
+               }
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               LEAVE;
+               return SUCCESS;
+       }
+
+       LEAVE;
+       return FAILED;
+}
+
+/**
+ * ibmvfc_eh_device_reset_handler - Reset a single LUN
+ * @cmd:       scsi command struct
+ *
+ * Returns:
+ *     SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+       struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+       struct ibmvfc_event *evt, *pos;
+       int cancel_rc, reset_rc;
+       unsigned long flags;
+
+       ENTER;
+       ibmvfc_wait_while_resetting(vhost);
+       cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
+       reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
+
+       if (!cancel_rc && !reset_rc) {
+               spin_lock_irqsave(vhost->host->host_lock, flags);
+               list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+                       if (evt->cmnd && evt->cmnd->device == cmd->device)
+                               ibmvfc_fail_request(evt, DID_ABORT);
+               }
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               LEAVE;
+               return SUCCESS;
+       }
+
+       LEAVE;
+       return FAILED;
+}
+
+/**
+ * ibmvfc_dev_cancel_all - Device iterated cancel all function
+ * @sdev:      scsi device struct
+ * @data:      return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
+{
+       unsigned long *rc = data;
+       *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
+}
+
+/**
+ * ibmvfc_dev_abort_all - Device iterated abort task set function
+ * @sdev:      scsi device struct
+ * @data:      return code
+ *
+ **/
+static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
+{
+       unsigned long *rc = data;
+       *rc |= ibmvfc_abort_task_set(sdev);
+}
+
+/**
+ * ibmvfc_eh_target_reset_handler - Reset the target
+ * @cmd:       scsi command struct
+ *
+ * Returns:
+ *     SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
+{
+       struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+       struct scsi_target *starget = scsi_target(cmd->device);
+       struct ibmvfc_event *evt, *pos;
+       int reset_rc;
+       unsigned long cancel_rc = 0;
+       unsigned long flags;
+
+       ENTER;
+       ibmvfc_wait_while_resetting(vhost);
+       starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+       reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
+
+       if (!cancel_rc && !reset_rc) {
+               spin_lock_irqsave(vhost->host->host_lock, flags);
+               list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+                       if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
+                               ibmvfc_fail_request(evt, DID_ABORT);
+               }
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               LEAVE;
+               return SUCCESS;
+       }
+
+       LEAVE;
+       return FAILED;
+}
+
+/**
+ * ibmvfc_eh_host_reset_handler - Reset the connection to the server
+ * @cmd:       struct scsi_cmnd having problems
+ *
+ **/
+static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+       int rc;
+       struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+
+       dev_err(vhost->dev, "Resetting connection due to error recovery\n");
+       rc = ibmvfc_issue_fc_host_lip(vhost->host);
+       return rc ? FAILED : SUCCESS;
+}
+
+/**
+ * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
+ * @rport:             rport struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
+{
+       struct scsi_target *starget = to_scsi_target(&rport->dev);
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       struct ibmvfc_event *evt, *pos;
+       unsigned long cancel_rc = 0;
+       unsigned long abort_rc = 0;
+       unsigned long flags;
+
+       ENTER;
+       starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+       starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
+
+       if (!cancel_rc && !abort_rc) {
+               spin_lock_irqsave(shost->host_lock, flags);
+               list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+                       if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
+                               ibmvfc_fail_request(evt, DID_ABORT);
+               }
+               spin_unlock_irqrestore(shost->host_lock, flags);
+       } else
+               ibmvfc_issue_fc_host_lip(shost);
+
+       scsi_target_unblock(&rport->dev);
+       LEAVE;
+}
+
+static const struct {
+       enum ibmvfc_async_event ae;
+       const char *desc;
+} ae_desc [] = {
+       { IBMVFC_AE_ELS_PLOGI,          "PLOGI" },
+       { IBMVFC_AE_ELS_LOGO,           "LOGO" },
+       { IBMVFC_AE_ELS_PRLO,           "PRLO" },
+       { IBMVFC_AE_SCN_NPORT,          "N-Port SCN" },
+       { IBMVFC_AE_SCN_GROUP,          "Group SCN" },
+       { IBMVFC_AE_SCN_DOMAIN,         "Domain SCN" },
+       { IBMVFC_AE_SCN_FABRIC,         "Fabric SCN" },
+       { IBMVFC_AE_LINK_UP,            "Link Up" },
+       { IBMVFC_AE_LINK_DOWN,          "Link Down" },
+       { IBMVFC_AE_LINK_DEAD,          "Link Dead" },
+       { IBMVFC_AE_HALT,                       "Halt" },
+       { IBMVFC_AE_RESUME,             "Resume" },
+       { IBMVFC_AE_ADAPTER_FAILED,     "Adapter Failed" },
+};
+
+static const char *unknown_ae = "Unknown async";
+
+/**
+ * ibmvfc_get_ae_desc - Get text description for async event
+ * @ae:        async event
+ *
+ **/
+static const char *ibmvfc_get_ae_desc(u64 ae)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
+               if (ae_desc[i].ae == ae)
+                       return ae_desc[i].desc;
+
+       return unknown_ae;
+}
+
+/**
+ * ibmvfc_handle_async - Handle an async event from the adapter
+ * @crq:       crq to process
+ * @vhost:     ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
+                               struct ibmvfc_host *vhost)
+{
+       const char *desc = ibmvfc_get_ae_desc(crq->event);
+
+       ibmvfc_log(vhost, 2, "%s event received\n", desc);
+
+       switch (crq->event) {
+       case IBMVFC_AE_LINK_UP:
+       case IBMVFC_AE_RESUME:
+               vhost->events_to_log |= IBMVFC_AE_LINKUP;
+               ibmvfc_init_host(vhost);
+               break;
+       case IBMVFC_AE_SCN_FABRIC:
+               vhost->events_to_log |= IBMVFC_AE_RSCN;
+               ibmvfc_init_host(vhost);
+               break;
+       case IBMVFC_AE_SCN_NPORT:
+       case IBMVFC_AE_SCN_GROUP:
+       case IBMVFC_AE_SCN_DOMAIN:
+               vhost->events_to_log |= IBMVFC_AE_RSCN;
+       case IBMVFC_AE_ELS_LOGO:
+       case IBMVFC_AE_ELS_PRLO:
+       case IBMVFC_AE_ELS_PLOGI:
+               ibmvfc_reinit_host(vhost);
+               break;
+       case IBMVFC_AE_LINK_DOWN:
+       case IBMVFC_AE_ADAPTER_FAILED:
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+               break;
+       case IBMVFC_AE_LINK_DEAD:
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+               break;
+       case IBMVFC_AE_HALT:
+               ibmvfc_link_down(vhost, IBMVFC_HALTED);
+               break;
+       default:
+               dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
+               break;
+       };
+}
+
+/**
+ * ibmvfc_handle_crq - Handles and frees received events in the CRQ
+ * @crq:       Command/Response queue
+ * @vhost:     ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
+{
+       long rc;
+       struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
+
+       switch (crq->valid) {
+       case IBMVFC_CRQ_INIT_RSP:
+               switch (crq->format) {
+               case IBMVFC_CRQ_INIT:
+                       dev_info(vhost->dev, "Partner initialized\n");
+                       /* Send back a response */
+                       rc = ibmvfc_send_crq_init_complete(vhost);
+                       if (rc == 0)
+                               ibmvfc_init_host(vhost);
+                       else
+                               dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
+                       break;
+               case IBMVFC_CRQ_INIT_COMPLETE:
+                       dev_info(vhost->dev, "Partner initialization complete\n");
+                       ibmvfc_init_host(vhost);
+                       break;
+               default:
+                       dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
+               }
+               return;
+       case IBMVFC_CRQ_XPORT_EVENT:
+               vhost->state = IBMVFC_NO_CRQ;
+               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+               if (crq->format == IBMVFC_PARTITION_MIGRATED) {
+                       /* We need to re-setup the interpartition connection */
+                       dev_info(vhost->dev, "Re-enabling adapter\n");
+                       vhost->client_migrated = 1;
+                       ibmvfc_purge_requests(vhost, DID_REQUEUE);
+                       if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
+                           (rc = ibmvfc_send_crq_init(vhost))) {
+                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+                               dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
+                       } else
+                               ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+               } else {
+                       dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+
+                       ibmvfc_purge_requests(vhost, DID_ERROR);
+                       if ((rc = ibmvfc_reset_crq(vhost)) ||
+                           (rc = ibmvfc_send_crq_init(vhost))) {
+                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+                               dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
+                       } else
+                               ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+               }
+               return;
+       case IBMVFC_CRQ_CMD_RSP:
+               break;
+       default:
+               dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
+               return;
+       }
+
+       if (crq->format == IBMVFC_ASYNC_EVENT)
+               return;
+
+       /* The only kind of payload CRQs we should get are responses to
+        * things we send. Make sure this response is to something we
+        * actually sent
+        */
+       if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+               dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
+                       crq->ioba);
+               return;
+       }
+
+       if (unlikely(atomic_read(&evt->free))) {
+               dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
+                       crq->ioba);
+               return;
+       }
+
+       del_timer(&evt->timer);
+       list_del(&evt->queue);
+       ibmvfc_trc_end(evt);
+       evt->done(evt);
+}
+
+/**
+ * ibmvfc_scan_finished - Check if the device scan is done.
+ * @shost:     scsi host struct
+ * @time:      current elapsed time
+ *
+ * Returns:
+ *     0 if scan is not done / 1 if scan is done
+ **/
+static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       unsigned long flags;
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       int done = 0;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       if (time >= (init_timeout * HZ)) {
+               dev_info(vhost->dev, "Scan taking longer than %d seconds, "
+                        "continuing initialization\n", init_timeout);
+               done = 1;
+       }
+
+       if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
+               done = 1;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return done;
+}
+
+/**
+ * ibmvfc_slave_alloc - Setup the device's task set value
+ * @sdev:      struct scsi_device device to configure
+ *
+ * Set the device's task set value so that error handling works as
+ * expected.
+ *
+ * Returns:
+ *     0 on success / -ENXIO if device does not exist
+ **/
+static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+{
+       struct Scsi_Host *shost = sdev->host;
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       unsigned long flags = 0;
+
+       if (!rport || fc_remote_port_chkready(rport))
+               return -ENXIO;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return 0;
+}
+
+/**
+ * ibmvfc_slave_configure - Configure the device
+ * @sdev:      struct scsi_device device to configure
+ *
+ * Enable allow_restart for a device if it is a disk. Adjust the
+ * queue_depth here also.
+ *
+ * Returns:
+ *     0
+ **/
+static int ibmvfc_slave_configure(struct scsi_device *sdev)
+{
+       struct Scsi_Host *shost = sdev->host;
+       struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       if (sdev->type == TYPE_DISK)
+               sdev->allow_restart = 1;
+
+       if (sdev->tagged_supported) {
+               scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+               scsi_activate_tcq(sdev, sdev->queue_depth);
+       } else
+               scsi_deactivate_tcq(sdev, sdev->queue_depth);
+
+       rport->dev_loss_tmo = dev_loss_tmo;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return 0;
+}
+
+/**
+ * ibmvfc_change_queue_depth - Change the device's queue depth
+ * @sdev:      scsi device struct
+ * @qdepth:    depth to set
+ *
+ * Return value:
+ *     actual depth set
+ **/
+static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+       if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
+               qdepth = IBMVFC_MAX_CMDS_PER_LUN;
+
+       scsi_adjust_queue_depth(sdev, 0, qdepth);
+       return sdev->queue_depth;
+}
+
+/**
+ * ibmvfc_change_queue_type - Change the device's queue type
+ * @sdev:              scsi device struct
+ * @tag_type:  type of tags to use
+ *
+ * Return value:
+ *     actual queue type set
+ **/
+static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+       if (sdev->tagged_supported) {
+               scsi_set_tag_type(sdev, tag_type);
+
+               if (tag_type)
+                       scsi_activate_tcq(sdev, sdev->queue_depth);
+               else
+                       scsi_deactivate_tcq(sdev, sdev->queue_depth);
+       } else
+               tag_type = 0;
+
+       return tag_type;
+}
+
+static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
+                                                struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       vhost->login_buf->resp.partition_name);
+}
+
+static struct device_attribute ibmvfc_host_partition_name = {
+       .attr = {
+               .name = "partition_name",
+               .mode = S_IRUGO,
+       },
+       .show = ibmvfc_show_host_partition_name,
+};
+
+static ssize_t ibmvfc_show_host_device_name(struct device *dev,
+                                           struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       vhost->login_buf->resp.device_name);
+}
+
+static struct device_attribute ibmvfc_host_device_name = {
+       .attr = {
+               .name = "device_name",
+               .mode = S_IRUGO,
+       },
+       .show = ibmvfc_show_host_device_name,
+};
+
+static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
+                                        struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       vhost->login_buf->resp.port_loc_code);
+}
+
+static struct device_attribute ibmvfc_host_loc_code = {
+       .attr = {
+               .name = "port_loc_code",
+               .mode = S_IRUGO,
+       },
+       .show = ibmvfc_show_host_loc_code,
+};
+
+static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
+                                        struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       vhost->login_buf->resp.drc_name);
+}
+
+static struct device_attribute ibmvfc_host_drc_name = {
+       .attr = {
+               .name = "drc_name",
+               .mode = S_IRUGO,
+       },
+       .show = ibmvfc_show_host_drc_name,
+};
+
+static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
+                                            struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
+}
+
+static struct device_attribute ibmvfc_host_npiv_version = {
+       .attr = {
+               .name = "npiv_version",
+               .mode = S_IRUGO,
+       },
+       .show = ibmvfc_show_host_npiv_version,
+};
+
+/**
+ * ibmvfc_show_log_level - Show the adapter's error logging level
+ * @dev:       class device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_show_log_level(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       unsigned long flags = 0;
+       int len;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return len;
+}
+
+/**
+ * ibmvfc_store_log_level - Change the adapter's error logging level
+ * @dev:       class device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_store_log_level(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       vhost->log_level = simple_strtoul(buf, NULL, 10);
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return strlen(buf);
+}
+
+static struct device_attribute ibmvfc_log_level_attr = {
+       .attr = {
+               .name =         "log_level",
+               .mode =         S_IRUGO | S_IWUSR,
+       },
+       .show = ibmvfc_show_log_level,
+       .store = ibmvfc_store_log_level
+};
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_read_trace - Dump the adapter trace
+ * @kobj:              kobject struct
+ * @bin_attr:  bin_attribute struct
+ * @buf:               buffer
+ * @off:               offset
+ * @count:             buffer size
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_read_trace(struct kobject *kobj,
+                                struct bin_attribute *bin_attr,
+                                char *buf, loff_t off, size_t count)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       unsigned long flags = 0;
+       int size = IBMVFC_TRACE_SIZE;
+       char *src = (char *)vhost->trace;
+
+       if (off > size)
+               return 0;
+       if (off + count > size) {
+               size -= off;
+               count = size;
+       }
+
+       spin_lock_irqsave(shost->host_lock, flags);
+       memcpy(buf, &src[off], count);
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return count;
+}
+
+static struct bin_attribute ibmvfc_trace_attr = {
+       .attr = {
+               .name = "trace",
+               .mode = S_IRUGO,
+       },
+       .size = 0,
+       .read = ibmvfc_read_trace,
+};
+#endif
+
+static struct device_attribute *ibmvfc_attrs[] = {
+       &ibmvfc_host_partition_name,
+       &ibmvfc_host_device_name,
+       &ibmvfc_host_loc_code,
+       &ibmvfc_host_drc_name,
+       &ibmvfc_host_npiv_version,
+       &ibmvfc_log_level_attr,
+       NULL
+};
+
+static struct scsi_host_template driver_template = {
+       .module = THIS_MODULE,
+       .name = "IBM POWER Virtual FC Adapter",
+       .proc_name = IBMVFC_NAME,
+       .queuecommand = ibmvfc_queuecommand,
+       .eh_abort_handler = ibmvfc_eh_abort_handler,
+       .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
+       .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
+       .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
+       .slave_alloc = ibmvfc_slave_alloc,
+       .slave_configure = ibmvfc_slave_configure,
+       .scan_finished = ibmvfc_scan_finished,
+       .change_queue_depth = ibmvfc_change_queue_depth,
+       .change_queue_type = ibmvfc_change_queue_type,
+       .cmd_per_lun = 16,
+       .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
+       .this_id = -1,
+       .sg_tablesize = SG_ALL,
+       .max_sectors = IBMVFC_MAX_SECTORS,
+       .use_clustering = ENABLE_CLUSTERING,
+       .shost_attrs = ibmvfc_attrs,
+};
+
+/**
+ * ibmvfc_next_async_crq - Returns the next entry in async queue
+ * @vhost:     ibmvfc host struct
+ *
+ * Returns:
+ *     Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
+       struct ibmvfc_async_crq *crq;
+
+       crq = &async_crq->msgs[async_crq->cur];
+       if (crq->valid & 0x80) {
+               if (++async_crq->cur == async_crq->size)
+                       async_crq->cur = 0;
+       } else
+               crq = NULL;
+
+       return crq;
+}
+
+/**
+ * ibmvfc_next_crq - Returns the next entry in message queue
+ * @vhost:     ibmvfc host struct
+ *
+ * Returns:
+ *     Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_crq_queue *queue = &vhost->crq;
+       struct ibmvfc_crq *crq;
+
+       crq = &queue->msgs[queue->cur];
+       if (crq->valid & 0x80) {
+               if (++queue->cur == queue->size)
+                       queue->cur = 0;
+       } else
+               crq = NULL;
+
+       return crq;
+}
+
+/**
+ * ibmvfc_interrupt - Interrupt handler
+ * @irq:               number of irq to handle, not used
+ * @dev_instance: ibmvfc_host that received interrupt
+ *
+ * Returns:
+ *     IRQ_HANDLED
+ **/
+static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
+{
+       struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
+       struct vio_dev *vdev = to_vio_dev(vhost->dev);
+       struct ibmvfc_crq *crq;
+       struct ibmvfc_async_crq *async;
+       unsigned long flags;
+       int done = 0;
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       vio_disable_interrupts(to_vio_dev(vhost->dev));
+       while (!done) {
+               /* Pull all the valid messages off the CRQ */
+               while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+                       ibmvfc_handle_crq(crq, vhost);
+                       crq->valid = 0;
+               }
+
+               /* Pull all the valid messages off the async CRQ */
+               while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+                       ibmvfc_handle_async(async, vhost);
+                       async->valid = 0;
+               }
+
+               vio_enable_interrupts(vdev);
+               if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+                       vio_disable_interrupts(vdev);
+                       ibmvfc_handle_crq(crq, vhost);
+                       crq->valid = 0;
+               } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+                       vio_disable_interrupts(vdev);
+                       ibmvfc_handle_async(async, vhost);
+                       crq->valid = 0;
+               } else
+                       done = 1;
+       }
+
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       return IRQ_HANDLED;
+}
+
+/**
+ * ibmvfc_init_tgt - Set the next init job step for the target
+ * @tgt:               ibmvfc target struct
+ * @job_step:  job step to perform
+ *
+ **/
+static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
+                           void (*job_step) (struct ibmvfc_target *))
+{
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
+       tgt->job_step = job_step;
+       wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
+ * @tgt:               ibmvfc target struct
+ * @job_step:  initialization job step
+ *
+ **/
+static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+                                 void (*job_step) (struct ibmvfc_target *))
+{
+       if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+               wake_up(&tgt->vhost->work_wait_q);
+       } else
+               ibmvfc_init_tgt(tgt, job_step);
+}
+
+/**
+ * ibmvfc_release_tgt - Free memory allocated for a target
+ * @kref:              kref struct
+ *
+ **/
+static void ibmvfc_release_tgt(struct kref *kref)
+{
+       struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
+       kfree(tgt);
+}
+
+/**
+ * ibmvfc_tgt_prli_done - Completion handler for Process Login
+ * @evt:       ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_target *tgt = evt->tgt;
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
+       u32 status = rsp->common.status;
+
+       vhost->discovery_threads--;
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+       switch (status) {
+       case IBMVFC_MAD_SUCCESS:
+               tgt_dbg(tgt, "Process Login succeeded\n");
+               tgt->need_login = 0;
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
+               break;
+       case IBMVFC_MAD_DRIVER_FAILED:
+               break;
+       case IBMVFC_MAD_CRQ_ERROR:
+               ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+               break;
+       case IBMVFC_MAD_FAILED:
+       default:
+               tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error),
+                       rsp->status, rsp->error, status);
+               if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+                       ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+               break;
+       };
+
+       kref_put(&tgt->kref, ibmvfc_release_tgt);
+       ibmvfc_free_event(evt);
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_prli - Send a process login
+ * @tgt:       ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+{
+       struct ibmvfc_process_login *prli;
+       struct ibmvfc_host *vhost = tgt->vhost;
+       struct ibmvfc_event *evt;
+
+       if (vhost->discovery_threads >= disc_threads)
+               return;
+
+       kref_get(&tgt->kref);
+       evt = ibmvfc_get_event(vhost);
+       vhost->discovery_threads++;
+       ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+       evt->tgt = tgt;
+       prli = &evt->iu.prli;
+       memset(prli, 0, sizeof(*prli));
+       prli->common.version = 1;
+       prli->common.opcode = IBMVFC_PROCESS_LOGIN;
+       prli->common.length = sizeof(*prli);
+       prli->scsi_id = tgt->scsi_id;
+
+       prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
+       prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
+       prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
+
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+       if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+               vhost->discovery_threads--;
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+       } else
+               tgt_dbg(tgt, "Sent process login\n");
+}
+
+/**
+ * ibmvfc_tgt_plogi_done - Completion handler for Port Login
+ * @evt:       ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_target *tgt = evt->tgt;
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
+       u32 status = rsp->common.status;
+
+       vhost->discovery_threads--;
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+       switch (status) {
+       case IBMVFC_MAD_SUCCESS:
+               tgt_dbg(tgt, "Port Login succeeded\n");
+               if (tgt->ids.port_name &&
+                   tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
+                       vhost->reinit = 1;
+                       tgt_dbg(tgt, "Port re-init required\n");
+                       break;
+               }
+               tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+               tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+               tgt->ids.port_id = tgt->scsi_id;
+               tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
+               memcpy(&tgt->service_parms, &rsp->service_parms,
+                      sizeof(tgt->service_parms));
+               memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+                      sizeof(tgt->service_parms_change));
+               ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+               break;
+       case IBMVFC_MAD_DRIVER_FAILED:
+               break;
+       case IBMVFC_MAD_CRQ_ERROR:
+               ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+               break;
+       case IBMVFC_MAD_FAILED:
+       default:
+               tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+                       ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+                       ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
+
+               if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+                       ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+               break;
+       };
+
+       kref_put(&tgt->kref, ibmvfc_release_tgt);
+       ibmvfc_free_event(evt);
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
+ * @tgt:       ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+{
+       struct ibmvfc_port_login *plogi;
+       struct ibmvfc_host *vhost = tgt->vhost;
+       struct ibmvfc_event *evt;
+
+       if (vhost->discovery_threads >= disc_threads)
+               return;
+
+       kref_get(&tgt->kref);
+       evt = ibmvfc_get_event(vhost);
+       vhost->discovery_threads++;
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+       ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+       evt->tgt = tgt;
+       plogi = &evt->iu.plogi;
+       memset(plogi, 0, sizeof(*plogi));
+       plogi->common.version = 1;
+       plogi->common.opcode = IBMVFC_PORT_LOGIN;
+       plogi->common.length = sizeof(*plogi);
+       plogi->scsi_id = tgt->scsi_id;
+
+       if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+               vhost->discovery_threads--;
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+       } else
+               tgt_dbg(tgt, "Sent port login\n");
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
+ * @evt:       ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_target *tgt = evt->tgt;
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
+       u32 status = rsp->common.status;
+
+       vhost->discovery_threads--;
+       ibmvfc_free_event(evt);
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+
+       switch (status) {
+       case IBMVFC_MAD_SUCCESS:
+               tgt_dbg(tgt, "Implicit Logout succeeded\n");
+               break;
+       case IBMVFC_MAD_DRIVER_FAILED:
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               wake_up(&vhost->work_wait_q);
+               return;
+       case IBMVFC_MAD_FAILED:
+       default:
+               tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
+               break;
+       };
+
+       if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
+               ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
+       else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
+                tgt->scsi_id != tgt->new_scsi_id)
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+       kref_put(&tgt->kref, ibmvfc_release_tgt);
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt:               ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+       struct ibmvfc_implicit_logout *mad;
+       struct ibmvfc_host *vhost = tgt->vhost;
+       struct ibmvfc_event *evt;
+
+       if (vhost->discovery_threads >= disc_threads)
+               return;
+
+       kref_get(&tgt->kref);
+       evt = ibmvfc_get_event(vhost);
+       vhost->discovery_threads++;
+       ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+       evt->tgt = tgt;
+       mad = &evt->iu.implicit_logout;
+       memset(mad, 0, sizeof(*mad));
+       mad->common.version = 1;
+       mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
+       mad->common.length = sizeof(*mad);
+       mad->old_scsi_id = tgt->scsi_id;
+
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+       if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+               vhost->discovery_threads--;
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+       } else
+               tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
+ * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
+ * @evt:       ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_target *tgt = evt->tgt;
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
+       u32 status = rsp->common.status;
+
+       vhost->discovery_threads--;
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+       switch (status) {
+       case IBMVFC_MAD_SUCCESS:
+               tgt_dbg(tgt, "Query Target succeeded\n");
+               tgt->new_scsi_id = rsp->scsi_id;
+               if (rsp->scsi_id != tgt->scsi_id)
+                       ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+               break;
+       case IBMVFC_MAD_DRIVER_FAILED:
+               break;
+       case IBMVFC_MAD_CRQ_ERROR:
+               ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+               break;
+       case IBMVFC_MAD_FAILED:
+       default:
+               tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+                       ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+                       ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
+
+               if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
+                   rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
+                   rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
+                       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+               else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+                       ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+               break;
+       };
+
+       kref_put(&tgt->kref, ibmvfc_release_tgt);
+       ibmvfc_free_event(evt);
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
+ * @tgt:       ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+{
+       struct ibmvfc_query_tgt *query_tgt;
+       struct ibmvfc_host *vhost = tgt->vhost;
+       struct ibmvfc_event *evt;
+
+       if (vhost->discovery_threads >= disc_threads)
+               return;
+
+       kref_get(&tgt->kref);
+       evt = ibmvfc_get_event(vhost);
+       vhost->discovery_threads++;
+       evt->tgt = tgt;
+       ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+       query_tgt = &evt->iu.query_tgt;
+       memset(query_tgt, 0, sizeof(*query_tgt));
+       query_tgt->common.version = 1;
+       query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
+       query_tgt->common.length = sizeof(*query_tgt);
+       query_tgt->wwpn = tgt->ids.port_name;
+
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+       if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+               vhost->discovery_threads--;
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+       } else
+               tgt_dbg(tgt, "Sent Query Target\n");
+}
+
+/**
+ * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
+ * @vhost:             ibmvfc host struct
+ * @scsi_id:   SCSI ID to allocate target for
+ *
+ * Returns:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+{
+       struct ibmvfc_target *tgt;
+       unsigned long flags;
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       list_for_each_entry(tgt, &vhost->targets, queue) {
+               if (tgt->scsi_id == scsi_id) {
+                       if (tgt->need_login)
+                               ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+                       goto unlock_out;
+               }
+       }
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+       tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
+       if (!tgt) {
+               dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
+                       scsi_id);
+               return -ENOMEM;
+       }
+
+       tgt->scsi_id = scsi_id;
+       tgt->new_scsi_id = scsi_id;
+       tgt->vhost = vhost;
+       tgt->need_login = 1;
+       kref_init(&tgt->kref);
+       ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       list_add_tail(&tgt->queue, &vhost->targets);
+
+unlock_out:
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       return 0;
+}
+
+/**
+ * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
+ * @vhost:             ibmvfc host struct
+ *
+ * Returns:
+ *     0 on success / other on failure
+ **/
+static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
+{
+       int i, rc;
+
+       for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
+               rc = ibmvfc_alloc_target(vhost,
+                                        vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
+
+       return rc;
+}
+
+/**
+ * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
+ * @evt:       ibmvfc event struct
+ *
+ **/
+static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
+       u32 mad_status = rsp->common.status;
+
+       switch (mad_status) {
+       case IBMVFC_MAD_SUCCESS:
+               ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
+               vhost->num_targets = rsp->num_written;
+               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
+               break;
+       case IBMVFC_MAD_FAILED:
+               dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+               ibmvfc_retry_host_init(vhost);
+               break;
+       case IBMVFC_MAD_DRIVER_FAILED:
+               break;
+       default:
+               dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+               break;
+       }
+
+       ibmvfc_free_event(evt);
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_discover_targets - Send Discover Targets MAD
+ * @vhost:     ibmvfc host struct
+ *
+ **/
+static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_discover_targets *mad;
+       struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+       ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+       mad = &evt->iu.discover_targets;
+       memset(mad, 0, sizeof(*mad));
+       mad->common.version = 1;
+       mad->common.opcode = IBMVFC_DISC_TARGETS;
+       mad->common.length = sizeof(*mad);
+       mad->bufflen = vhost->disc_buf_sz;
+       mad->buffer.va = vhost->disc_buf_dma;
+       mad->buffer.len = vhost->disc_buf_sz;
+       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+       if (!ibmvfc_send_event(evt, vhost, default_timeout))
+               ibmvfc_dbg(vhost, "Sent discover targets\n");
+       else
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
+/**
+ * ibmvfc_npiv_login_done - Completion handler for NPIV Login
+ * @evt:       ibmvfc event struct
+ *
+ **/
+static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       u32 mad_status = evt->xfer_iu->npiv_login.common.status;
+       struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
+       unsigned int npiv_max_sectors;
+
+       switch (mad_status) {
+       case IBMVFC_MAD_SUCCESS:
+               ibmvfc_free_event(evt);
+               break;
+       case IBMVFC_MAD_FAILED:
+               dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+               if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+                       ibmvfc_retry_host_init(vhost);
+               else
+                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+               ibmvfc_free_event(evt);
+               return;
+       case IBMVFC_MAD_CRQ_ERROR:
+               ibmvfc_retry_host_init(vhost);
+       case IBMVFC_MAD_DRIVER_FAILED:
+               ibmvfc_free_event(evt);
+               return;
+       default:
+               dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+               ibmvfc_free_event(evt);
+               return;
+       }
+
+       vhost->client_migrated = 0;
+
+       if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
+               dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
+                       rsp->flags);
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+               wake_up(&vhost->work_wait_q);
+               return;
+       }
+
+       if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
+               dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
+                       rsp->max_cmds);
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+               wake_up(&vhost->work_wait_q);
+               return;
+       }
+
+       npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
+       dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
+                rsp->partition_name, rsp->device_name, rsp->port_loc_code,
+                rsp->drc_name, npiv_max_sectors);
+
+       fc_host_fabric_name(vhost->host) = rsp->node_name;
+       fc_host_node_name(vhost->host) = rsp->node_name;
+       fc_host_port_name(vhost->host) = rsp->port_name;
+       fc_host_port_id(vhost->host) = rsp->scsi_id;
+       fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
+       fc_host_supported_classes(vhost->host) = 0;
+       if (rsp->service_parms.class1_parms[0] & 0x80000000)
+               fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
+       if (rsp->service_parms.class2_parms[0] & 0x80000000)
+               fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
+       if (rsp->service_parms.class3_parms[0] & 0x80000000)
+               fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
+       fc_host_maxframe_size(vhost->host) =
+               rsp->service_parms.common.bb_rcv_sz & 0x0fff;
+
+       vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
+       vhost->host->max_sectors = npiv_max_sectors;
+       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_npiv_login - Sends NPIV login
+ * @vhost:     ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_npiv_login_mad *mad;
+       struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+       ibmvfc_gather_partition_info(vhost);
+       ibmvfc_set_login_info(vhost);
+       ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+
+       memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
+       mad = &evt->iu.npiv_login;
+       memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
+       mad->common.version = 1;
+       mad->common.opcode = IBMVFC_NPIV_LOGIN;
+       mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
+       mad->buffer.va = vhost->login_buf_dma;
+       mad->buffer.len = sizeof(*vhost->login_buf);
+
+       memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+       vhost->async_crq.cur = 0;
+       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+       if (!ibmvfc_send_event(evt, vhost, default_timeout))
+               ibmvfc_dbg(vhost, "Sent NPIV login\n");
+       else
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+};
+
+/**
+ * ibmvfc_dev_init_to_do - Is there target initialization work to do?
+ * @vhost:             ibmvfc host struct
+ *
+ * Returns:
+ *     1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_target *tgt;
+
+       list_for_each_entry(tgt, &vhost->targets, queue) {
+               if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
+                   tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+                       return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
+ * @vhost:             ibmvfc host struct
+ *
+ * Returns:
+ *     1 if work to do / 0 if not
+ **/
+static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_target *tgt;
+
+       if (kthread_should_stop())
+               return 1;
+       switch (vhost->action) {
+       case IBMVFC_HOST_ACTION_NONE:
+       case IBMVFC_HOST_ACTION_INIT_WAIT:
+               return 0;
+       case IBMVFC_HOST_ACTION_TGT_INIT:
+       case IBMVFC_HOST_ACTION_QUERY_TGTS:
+               if (vhost->discovery_threads == disc_threads)
+                       return 0;
+               list_for_each_entry(tgt, &vhost->targets, queue)
+                       if (tgt->action == IBMVFC_TGT_ACTION_INIT)
+                               return 1;
+               list_for_each_entry(tgt, &vhost->targets, queue)
+                       if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+                               return 0;
+               return 1;
+       case IBMVFC_HOST_ACTION_INIT:
+       case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+       case IBMVFC_HOST_ACTION_TGT_ADD:
+       case IBMVFC_HOST_ACTION_TGT_DEL:
+       case IBMVFC_HOST_ACTION_QUERY:
+       default:
+               break;
+       };
+
+       return 1;
+}
+
+/**
+ * ibmvfc_work_to_do - Is there task level work to do?
+ * @vhost:             ibmvfc host struct
+ *
+ * Returns:
+ *     1 if work to do / 0 if not
+ **/
+static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+       unsigned long flags;
+       int rc;
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       rc = __ibmvfc_work_to_do(vhost);
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       return rc;
+}
+
+/**
+ * ibmvfc_log_ae - Log async events if necessary
+ * @vhost:             ibmvfc host struct
+ * @events:            events to log
+ *
+ **/
+static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
+{
+       if (events & IBMVFC_AE_RSCN)
+               fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
+       if ((events & IBMVFC_AE_LINKDOWN) &&
+           vhost->state >= IBMVFC_HALTED)
+               fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+       if ((events & IBMVFC_AE_LINKUP) &&
+           vhost->state == IBMVFC_INITIALIZING)
+               fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+}
+
+/**
+ * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
+ * @tgt:               ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
+{
+       struct ibmvfc_host *vhost = tgt->vhost;
+       struct fc_rport *rport;
+       unsigned long flags;
+
+       tgt_dbg(tgt, "Adding rport\n");
+       rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       tgt->rport = rport;
+       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+       if (rport) {
+               tgt_dbg(tgt, "rport add succeeded\n");
+               rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
+               rport->supported_classes = 0;
+               if (tgt->service_parms.class1_parms[0] & 0x80000000)
+                       rport->supported_classes |= FC_COS_CLASS1;
+               if (tgt->service_parms.class2_parms[0] & 0x80000000)
+                       rport->supported_classes |= FC_COS_CLASS2;
+               if (tgt->service_parms.class3_parms[0] & 0x80000000)
+                       rport->supported_classes |= FC_COS_CLASS3;
+       } else
+               tgt_dbg(tgt, "rport add failed\n");
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_do_work - Do task level work
+ * @vhost:             ibmvfc host struct
+ *
+ **/
+static void ibmvfc_do_work(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_target *tgt;
+       unsigned long flags;
+       struct fc_rport *rport;
+
+       ibmvfc_log_ae(vhost, vhost->events_to_log);
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       vhost->events_to_log = 0;
+       switch (vhost->action) {
+       case IBMVFC_HOST_ACTION_NONE:
+       case IBMVFC_HOST_ACTION_INIT_WAIT:
+               break;
+       case IBMVFC_HOST_ACTION_INIT:
+               BUG_ON(vhost->state != IBMVFC_INITIALIZING);
+               vhost->job_step(vhost);
+               break;
+       case IBMVFC_HOST_ACTION_QUERY:
+               list_for_each_entry(tgt, &vhost->targets, queue)
+                       ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
+               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
+               break;
+       case IBMVFC_HOST_ACTION_QUERY_TGTS:
+               list_for_each_entry(tgt, &vhost->targets, queue) {
+                       if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+                               tgt->job_step(tgt);
+                               break;
+                       }
+               }
+
+               if (!ibmvfc_dev_init_to_do(vhost))
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+               break;
+       case IBMVFC_HOST_ACTION_TGT_DEL:
+               list_for_each_entry(tgt, &vhost->targets, queue) {
+                       if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+                               tgt_dbg(tgt, "Deleting rport\n");
+                               rport = tgt->rport;
+                               tgt->rport = NULL;
+                               list_del(&tgt->queue);
+                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                               if (rport)
+                                       fc_remote_port_delete(rport);
+                               kref_put(&tgt->kref, ibmvfc_release_tgt);
+                               return;
+                       }
+               }
+
+               if (vhost->state == IBMVFC_INITIALIZING) {
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+                       vhost->job_step = ibmvfc_discover_targets;
+               } else {
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                       scsi_unblock_requests(vhost->host);
+                       wake_up(&vhost->init_wait_q);
+                       return;
+               }
+               break;
+       case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               ibmvfc_alloc_targets(vhost);
+               spin_lock_irqsave(vhost->host->host_lock, flags);
+               break;
+       case IBMVFC_HOST_ACTION_TGT_INIT:
+               list_for_each_entry(tgt, &vhost->targets, queue) {
+                       if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+                               tgt->job_step(tgt);
+                               break;
+                       }
+               }
+
+               if (!ibmvfc_dev_init_to_do(vhost)) {
+                       ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
+                       vhost->init_retries = 0;
+                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                       scsi_unblock_requests(vhost->host);
+                       return;
+               }
+               break;
+       case IBMVFC_HOST_ACTION_TGT_ADD:
+               list_for_each_entry(tgt, &vhost->targets, queue) {
+                       if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
+                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                               ibmvfc_tgt_add_rport(tgt);
+                               return;
+                       } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+                               tgt_dbg(tgt, "Deleting rport\n");
+                               rport = tgt->rport;
+                               tgt->rport = NULL;
+                               list_del(&tgt->queue);
+                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                               if (rport)
+                                       fc_remote_port_delete(rport);
+                               kref_put(&tgt->kref, ibmvfc_release_tgt);
+                               return;
+                       }
+               }
+
+               if (vhost->reinit) {
+                       vhost->reinit = 0;
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+               } else {
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+                       wake_up(&vhost->init_wait_q);
+               }
+               break;
+       default:
+               break;
+       };
+
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_work - Do task level work
+ * @data:              ibmvfc host struct
+ *
+ * Returns:
+ *     zero
+ **/
+static int ibmvfc_work(void *data)
+{
+       struct ibmvfc_host *vhost = data;
+       int rc;
+
+       set_user_nice(current, -20);
+
+       while (1) {
+               rc = wait_event_interruptible(vhost->work_wait_q,
+                                             ibmvfc_work_to_do(vhost));
+
+               BUG_ON(rc);
+
+               if (kthread_should_stop())
+                       break;
+
+               ibmvfc_do_work(vhost);
+       }
+
+       ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
+       return 0;
+}
+
+/**
+ * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
+ * @vhost:     ibmvfc host struct
+ *
+ * Allocates a page for messages, maps it for dma, and registers
+ * the crq with the hypervisor.
+ *
+ * Return value:
+ *     zero on success / other on failure
+ **/
+static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
+{
+       int rc, retrc = -ENOMEM;
+       struct device *dev = vhost->dev;
+       struct vio_dev *vdev = to_vio_dev(dev);
+       struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+       ENTER;
+       crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
+
+       if (!crq->msgs)
+               return -ENOMEM;
+
+       crq->size = PAGE_SIZE / sizeof(*crq->msgs);
+       crq->msg_token = dma_map_single(dev, crq->msgs,
+                                       PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(crq->msg_token))
+               goto map_failed;
+
+       retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+                                       crq->msg_token, PAGE_SIZE);
+
+       if (rc == H_RESOURCE)
+               /* maybe kexecing and resource is busy. try a reset */
+               retrc = rc = ibmvfc_reset_crq(vhost);
+
+       if (rc == H_CLOSED)
+               dev_warn(dev, "Partner adapter not ready\n");
+       else if (rc) {
+               dev_warn(dev, "Error %d opening adapter\n", rc);
+               goto reg_crq_failed;
+       }
+
+       retrc = 0;
+
+       if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
+               dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
+               goto req_irq_failed;
+       }
+
+       if ((rc = vio_enable_interrupts(vdev))) {
+               dev_err(dev, "Error %d enabling interrupts\n", rc);
+               goto req_irq_failed;
+       }
+
+       crq->cur = 0;
+       LEAVE;
+       return retrc;
+
+req_irq_failed:
+       do {
+               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_crq_failed:
+       dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+map_failed:
+       free_page((unsigned long)crq->msgs);
+       return retrc;
+}
+
+/**
+ * ibmvfc_free_mem - Free memory for vhost
+ * @vhost:     ibmvfc host struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+
+       ENTER;
+       mempool_destroy(vhost->tgt_pool);
+       kfree(vhost->trace);
+       dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
+                         vhost->disc_buf_dma);
+       dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
+                         vhost->login_buf, vhost->login_buf_dma);
+       dma_pool_destroy(vhost->sg_pool);
+       dma_unmap_single(vhost->dev, async_q->msg_token,
+                        async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+       free_page((unsigned long)async_q->msgs);
+       LEAVE;
+}
+
+/**
+ * ibmvfc_alloc_mem - Allocate memory for vhost
+ * @vhost:     ibmvfc host struct
+ *
+ * Return value:
+ *     0 on success / non-zero on failure
+ **/
+static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+       struct device *dev = vhost->dev;
+
+       ENTER;
+       async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
+       if (!async_q->msgs) {
+               dev_err(dev, "Couldn't allocate async queue.\n");
+               goto nomem;
+       }
+
+       async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
+       async_q->msg_token = dma_map_single(dev, async_q->msgs,
+                                           async_q->size * sizeof(*async_q->msgs),
+                                           DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(async_q->msg_token)) {
+               dev_err(dev, "Failed to map async queue\n");
+               goto free_async_crq;
+       }
+
+       vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
+                                        SG_ALL * sizeof(struct srp_direct_buf),
+                                        sizeof(struct srp_direct_buf), 0);
+
+       if (!vhost->sg_pool) {
+               dev_err(dev, "Failed to allocate sg pool\n");
+               goto unmap_async_crq;
+       }
+
+       vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
+                                             &vhost->login_buf_dma, GFP_KERNEL);
+
+       if (!vhost->login_buf) {
+               dev_err(dev, "Couldn't allocate NPIV login buffer\n");
+               goto free_sg_pool;
+       }
+
+       vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
+       vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
+                                            &vhost->disc_buf_dma, GFP_KERNEL);
+
+       if (!vhost->disc_buf) {
+               dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
+               goto free_login_buffer;
+       }
+
+       vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
+                              sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
+
+       if (!vhost->trace)
+               goto free_disc_buffer;
+
+       vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
+                                                     sizeof(struct ibmvfc_target));
+
+       if (!vhost->tgt_pool) {
+               dev_err(dev, "Couldn't allocate target memory pool\n");
+               goto free_trace;
+       }
+
+       LEAVE;
+       return 0;
+
+free_trace:
+       kfree(vhost->trace);
+free_disc_buffer:
+       dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
+                         vhost->disc_buf_dma);
+free_login_buffer:
+       dma_free_coherent(dev, sizeof(*vhost->login_buf),
+                         vhost->login_buf, vhost->login_buf_dma);
+free_sg_pool:
+       dma_pool_destroy(vhost->sg_pool);
+unmap_async_crq:
+       dma_unmap_single(dev, async_q->msg_token,
+                        async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+free_async_crq:
+       free_page((unsigned long)async_q->msgs);
+nomem:
+       LEAVE;
+       return -ENOMEM;
+}
+
+/**
+ * ibmvfc_probe - Adapter hot plug add entry point
+ * @vdev:      vio device struct
+ * @id:        vio device id struct
+ *
+ * Return value:
+ *     0 on success / non-zero on failure
+ **/
+static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+       struct ibmvfc_host *vhost;
+       struct Scsi_Host *shost;
+       struct device *dev = &vdev->dev;
+       int rc = -ENOMEM;
+
+       ENTER;
+       shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
+       if (!shost) {
+               dev_err(dev, "Couldn't allocate host data\n");
+               goto out;
+       }
+
+       shost->transportt = ibmvfc_transport_template;
+       shost->can_queue = max_requests;
+       shost->max_lun = max_lun;
+       shost->max_id = max_targets;
+       shost->max_sectors = IBMVFC_MAX_SECTORS;
+       shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
+       shost->unique_id = shost->host_no;
+
+       vhost = shost_priv(shost);
+       INIT_LIST_HEAD(&vhost->sent);
+       INIT_LIST_HEAD(&vhost->free);
+       INIT_LIST_HEAD(&vhost->targets);
+       sprintf(vhost->name, IBMVFC_NAME);
+       vhost->host = shost;
+       vhost->dev = dev;
+       vhost->partition_number = -1;
+       vhost->log_level = log_level;
+       strcpy(vhost->partition_name, "UNKNOWN");
+       init_waitqueue_head(&vhost->work_wait_q);
+       init_waitqueue_head(&vhost->init_wait_q);
+
+       if ((rc = ibmvfc_alloc_mem(vhost)))
+               goto free_scsi_host;
+
+       vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
+                                        shost->host_no);
+
+       if (IS_ERR(vhost->work_thread)) {
+               dev_err(dev, "Couldn't create kernel thread: %ld\n",
+                       PTR_ERR(vhost->work_thread));
+               goto free_host_mem;
+       }
+
+       if ((rc = ibmvfc_init_crq(vhost))) {
+               dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
+               goto kill_kthread;
+       }
+
+       if ((rc = ibmvfc_init_event_pool(vhost))) {
+               dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
+               goto release_crq;
+       }
+
+       if ((rc = scsi_add_host(shost, dev)))
+               goto release_event_pool;
+
+       if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
+                                          &ibmvfc_trace_attr))) {
+               dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
+               goto remove_shost;
+       }
+
+       dev_set_drvdata(dev, vhost);
+       spin_lock(&ibmvfc_driver_lock);
+       list_add_tail(&vhost->queue, &ibmvfc_head);
+       spin_unlock(&ibmvfc_driver_lock);
+
+       ibmvfc_send_crq_init(vhost);
+       scsi_scan_host(shost);
+       return 0;
+
+remove_shost:
+       scsi_remove_host(shost);
+release_event_pool:
+       ibmvfc_free_event_pool(vhost);
+release_crq:
+       ibmvfc_release_crq_queue(vhost);
+kill_kthread:
+       kthread_stop(vhost->work_thread);
+free_host_mem:
+       ibmvfc_free_mem(vhost);
+free_scsi_host:
+       scsi_host_put(shost);
+out:
+       LEAVE;
+       return rc;
+}
+
+/**
+ * ibmvfc_remove - Adapter hot plug remove entry point
+ * @vdev:      vio device struct
+ *
+ * Return value:
+ *     0
+ **/
+static int ibmvfc_remove(struct vio_dev *vdev)
+{
+       struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
+       unsigned long flags;
+
+       ENTER;
+       ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
+       kthread_stop(vhost->work_thread);
+       fc_remove_host(vhost->host);
+       scsi_remove_host(vhost->host);
+       ibmvfc_release_crq_queue(vhost);
+
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       ibmvfc_purge_requests(vhost, DID_ERROR);
+       ibmvfc_free_event_pool(vhost);
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+       ibmvfc_free_mem(vhost);
+       spin_lock(&ibmvfc_driver_lock);
+       list_del(&vhost->queue);
+       spin_unlock(&ibmvfc_driver_lock);
+       scsi_host_put(vhost->host);
+       LEAVE;
+       return 0;
+}
+
+static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
+       {"fcp", "IBM,vfc-client"},
+       { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
+
+static struct vio_driver ibmvfc_driver = {
+       .id_table = ibmvfc_device_table,
+       .probe = ibmvfc_probe,
+       .remove = ibmvfc_remove,
+       .driver = {
+               .name = IBMVFC_NAME,
+               .owner = THIS_MODULE,
+       }
+};
+
+static struct fc_function_template ibmvfc_transport_functions = {
+       .show_host_fabric_name = 1,
+       .show_host_node_name = 1,
+       .show_host_port_name = 1,
+       .show_host_supported_classes = 1,
+       .show_host_port_type = 1,
+       .show_host_port_id = 1,
+
+       .get_host_port_state = ibmvfc_get_host_port_state,
+       .show_host_port_state = 1,
+
+       .get_host_speed = ibmvfc_get_host_speed,
+       .show_host_speed = 1,
+
+       .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
+       .terminate_rport_io = ibmvfc_terminate_rport_io,
+
+       .show_rport_maxframe_size = 1,
+       .show_rport_supported_classes = 1,
+
+       .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
+       .show_rport_dev_loss_tmo = 1,
+
+       .get_starget_node_name = ibmvfc_get_starget_node_name,
+       .show_starget_node_name = 1,
+
+       .get_starget_port_name = ibmvfc_get_starget_port_name,
+       .show_starget_port_name = 1,
+
+       .get_starget_port_id = ibmvfc_get_starget_port_id,
+       .show_starget_port_id = 1,
+};
+
+/**
+ * ibmvfc_module_init - Initialize the ibmvfc module
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int __init ibmvfc_module_init(void)
+{
+       int rc;
+
+       if (!firmware_has_feature(FW_FEATURE_VIO))
+               return -ENODEV;
+
+       printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
+              IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+
+       ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
+       if (!ibmvfc_transport_template)
+               return -ENOMEM;
+
+       rc = vio_register_driver(&ibmvfc_driver);
+       if (rc)
+               fc_release_transport(ibmvfc_transport_template);
+       return rc;
+}
+
+/**
+ * ibmvfc_module_exit - Teardown the ibmvfc module
+ *
+ * Return value:
+ *     nothing
+ **/
+static void __exit ibmvfc_module_exit(void)
+{
+       vio_unregister_driver(&ibmvfc_driver);
+       fc_release_transport(ibmvfc_transport_template);
+}
+
+module_init(ibmvfc_module_init);
+module_exit(ibmvfc_module_exit);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
new file mode 100644 (file)
index 0000000..057f3c0
--- /dev/null
@@ -0,0 +1,682 @@
+/*
+ * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _IBMVFC_H
+#define _IBMVFC_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "viosrp.h"
+
+#define IBMVFC_NAME    "ibmvfc"
+#define IBMVFC_DRIVER_VERSION          "1.0.0"
+#define IBMVFC_DRIVER_DATE             "(July 1, 2008)"
+
+#define IBMVFC_DEFAULT_TIMEOUT 15
+#define IBMVFC_INIT_TIMEOUT            30
+#define IBMVFC_MAX_REQUESTS_DEFAULT    100
+
+#define IBMVFC_DEBUG                   0
+#define IBMVFC_MAX_TARGETS             1024
+#define IBMVFC_MAX_LUN                 0xffffffff
+#define IBMVFC_MAX_SECTORS             0xffffu
+#define IBMVFC_MAX_DISC_THREADS        4
+#define IBMVFC_TGT_MEMPOOL_SZ          64
+#define IBMVFC_MAX_CMDS_PER_LUN        64
+#define IBMVFC_MAX_INIT_RETRIES        3
+#define IBMVFC_DEV_LOSS_TMO            (5 * 60)
+#define IBMVFC_DEFAULT_LOG_LEVEL       2
+#define IBMVFC_MAX_CDB_LEN             16
+
+/*
+ * Ensure we have resources for ERP and initialization:
+ * 1 for ERP
+ * 1 for initialization
+ * 1 for each discovery thread
+ */
+#define IBMVFC_NUM_INTERNAL_REQ        (1 + 1 + disc_threads)
+
+#define IBMVFC_MAD_SUCCESS             0x00
+#define IBMVFC_MAD_NOT_SUPPORTED       0xF1
+#define IBMVFC_MAD_FAILED              0xF7
+#define IBMVFC_MAD_DRIVER_FAILED       0xEE
+#define IBMVFC_MAD_CRQ_ERROR           0xEF
+
+enum ibmvfc_crq_valid {
+       IBMVFC_CRQ_CMD_RSP              = 0x80,
+       IBMVFC_CRQ_INIT_RSP             = 0xC0,
+       IBMVFC_CRQ_XPORT_EVENT          = 0xFF,
+};
+
+enum ibmvfc_crq_format {
+       IBMVFC_CRQ_INIT                 = 0x01,
+       IBMVFC_CRQ_INIT_COMPLETE        = 0x02,
+       IBMVFC_PARTITION_MIGRATED       = 0x06,
+};
+
+enum ibmvfc_cmd_status_flags {
+       IBMVFC_FABRIC_MAPPED            = 0x0001,
+       IBMVFC_VIOS_FAILURE             = 0x0002,
+       IBMVFC_FC_FAILURE                       = 0x0004,
+       IBMVFC_FC_SCSI_ERROR            = 0x0008,
+       IBMVFC_HW_EVENT_LOGGED          = 0x0010,
+       IBMVFC_VIOS_LOGGED              = 0x0020,
+};
+
+enum ibmvfc_fabric_mapped_errors {
+       IBMVFC_UNABLE_TO_ESTABLISH      = 0x0001,
+       IBMVFC_XPORT_FAULT              = 0x0002,
+       IBMVFC_CMD_TIMEOUT              = 0x0003,
+       IBMVFC_ENETDOWN                 = 0x0004,
+       IBMVFC_HW_FAILURE                       = 0x0005,
+       IBMVFC_LINK_DOWN_ERR            = 0x0006,
+       IBMVFC_LINK_DEAD_ERR            = 0x0007,
+       IBMVFC_UNABLE_TO_REGISTER       = 0x0008,
+       IBMVFC_XPORT_BUSY                       = 0x000A,
+       IBMVFC_XPORT_DEAD                       = 0x000B,
+       IBMVFC_CONFIG_ERROR             = 0x000C,
+       IBMVFC_NAME_SERVER_FAIL         = 0x000D,
+       IBMVFC_LINK_HALTED              = 0x000E,
+       IBMVFC_XPORT_GENERAL            = 0x8000,
+};
+
+enum ibmvfc_vios_errors {
+       IBMVFC_CRQ_FAILURE                      = 0x0001,
+       IBMVFC_SW_FAILURE                               = 0x0002,
+       IBMVFC_INVALID_PARAMETER                = 0x0003,
+       IBMVFC_MISSING_PARAMETER                = 0x0004,
+       IBMVFC_HOST_IO_BUS                      = 0x0005,
+       IBMVFC_TRANS_CANCELLED                  = 0x0006,
+       IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
+       IBMVFC_INSUFFICIENT_RESOURCE            = 0x0008,
+       IBMVFC_COMMAND_FAILED                   = 0x8000,
+};
+
+enum ibmvfc_mad_types {
+       IBMVFC_NPIV_LOGIN               = 0x0001,
+       IBMVFC_DISC_TARGETS     = 0x0002,
+       IBMVFC_PORT_LOGIN               = 0x0004,
+       IBMVFC_PROCESS_LOGIN    = 0x0008,
+       IBMVFC_QUERY_TARGET     = 0x0010,
+       IBMVFC_IMPLICIT_LOGOUT  = 0x0040,
+       IBMVFC_TMF_MAD          = 0x0100,
+};
+
+struct ibmvfc_mad_common {
+       u32 version;
+       u32 reserved;
+       u32 opcode;
+       u16 status;
+       u16 length;
+       u64 tag;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_npiv_login_mad {
+       struct ibmvfc_mad_common common;
+       struct srp_direct_buf buffer;
+}__attribute__((packed, aligned (8)));
+
+#define IBMVFC_MAX_NAME 256
+
+struct ibmvfc_npiv_login {
+       u32 ostype;
+#define IBMVFC_OS_LINUX        0x02
+       u32 pad;
+       u64 max_dma_len;
+       u32 max_payload;
+       u32 max_response;
+       u32 partition_num;
+       u32 vfc_frame_version;
+       u16 fcp_version;
+       u16 flags;
+#define IBMVFC_CLIENT_MIGRATED 0x01
+#define IBMVFC_FLUSH_ON_HALT           0x02
+       u32 max_cmds;
+       u64 capabilities;
+#define IBMVFC_CAN_MIGRATE             0x01
+       u64 node_name;
+       struct srp_direct_buf async;
+       u8 partition_name[IBMVFC_MAX_NAME];
+       u8 device_name[IBMVFC_MAX_NAME];
+       u8 drc_name[IBMVFC_MAX_NAME];
+       u64 reserved2[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_common_svc_parms {
+       u16 fcph_version;
+       u16 b2b_credit;
+       u16 features;
+       u16 bb_rcv_sz; /* upper nibble is BB_SC_N */
+       u32 ratov;
+       u32 edtov;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_service_parms {
+       struct ibmvfc_common_svc_parms common;
+       u8 port_name[8];
+       u8 node_name[8];
+       u32 class1_parms[4];
+       u32 class2_parms[4];
+       u32 class3_parms[4];
+       u32 obsolete[4];
+       u32 vendor_version[4];
+       u32 services_avail[2];
+       u32 ext_len;
+       u32 reserved[30];
+       u32 clk_sync_qos[2];
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_npiv_login_resp {
+       u32 version;
+       u16 status;
+       u16 error;
+       u32 flags;
+#define IBMVFC_NATIVE_FC               0x01
+#define IBMVFC_CAN_FLUSH_ON_HALT       0x08
+       u32 reserved;
+       u64 capabilites;
+       u32 max_cmds;
+       u32 scsi_id_sz;
+       u64 max_dma_len;
+       u64 scsi_id;
+       u64 port_name;
+       u64 node_name;
+       u64 link_speed;
+       u8 partition_name[IBMVFC_MAX_NAME];
+       u8 device_name[IBMVFC_MAX_NAME];
+       u8 port_loc_code[IBMVFC_MAX_NAME];
+       u8 drc_name[IBMVFC_MAX_NAME];
+       struct ibmvfc_service_parms service_parms;
+       u64 reserved2;
+}__attribute__((packed, aligned (8)));
+
+union ibmvfc_npiv_login_data {
+       struct ibmvfc_npiv_login login;
+       struct ibmvfc_npiv_login_resp resp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_discover_targets_buf {
+       u32 scsi_id[1];
+#define IBMVFC_DISC_TGT_SCSI_ID_MASK   0x00ffffff
+};
+
+struct ibmvfc_discover_targets {
+       struct ibmvfc_mad_common common;
+       struct srp_direct_buf buffer;
+       u32 flags;
+       u16 status;
+       u16 error;
+       u32 bufflen;
+       u32 num_avail;
+       u32 num_written;
+       u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fc_reason {
+       IBMVFC_INVALID_ELS_CMD_CODE     = 0x01,
+       IBMVFC_INVALID_VERSION          = 0x02,
+       IBMVFC_LOGICAL_ERROR            = 0x03,
+       IBMVFC_INVALID_CT_IU_SIZE       = 0x04,
+       IBMVFC_LOGICAL_BUSY             = 0x05,
+       IBMVFC_PROTOCOL_ERROR           = 0x07,
+       IBMVFC_UNABLE_TO_PERFORM_REQ    = 0x09,
+       IBMVFC_CMD_NOT_SUPPORTED        = 0x0B,
+       IBMVFC_SERVER_NOT_AVAIL         = 0x0D,
+       IBMVFC_CMD_IN_PROGRESS          = 0x0E,
+       IBMVFC_VENDOR_SPECIFIC          = 0xFF,
+};
+
+enum ibmvfc_fc_type {
+       IBMVFC_FABRIC_REJECT    = 0x01,
+       IBMVFC_PORT_REJECT      = 0x02,
+       IBMVFC_LS_REJECT                = 0x03,
+       IBMVFC_FABRIC_BUSY      = 0x04,
+       IBMVFC_PORT_BUSY                = 0x05,
+       IBMVFC_BASIC_REJECT     = 0x06,
+};
+
+enum ibmvfc_gs_explain {
+       IBMVFC_PORT_NAME_NOT_REG        = 0x02,
+};
+
+struct ibmvfc_port_login {
+       struct ibmvfc_mad_common common;
+       u64 scsi_id;
+       u16 reserved;
+       u16 fc_service_class;
+       u32 blksz;
+       u32 hdr_per_blk;
+       u16 status;
+       u16 error;              /* also fc_reason */
+       u16 fc_explain;
+       u16 fc_type;
+       u32 reserved2;
+       struct ibmvfc_service_parms service_parms;
+       struct ibmvfc_service_parms service_parms_change;
+       u64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_prli_svc_parms {
+       u8 type;
+#define IBMVFC_SCSI_FCP_TYPE           0x08
+       u8 type_ext;
+       u16 flags;
+#define IBMVFC_PRLI_ORIG_PA_VALID                      0x8000
+#define IBMVFC_PRLI_RESP_PA_VALID                      0x4000
+#define IBMVFC_PRLI_EST_IMG_PAIR                       0x2000
+       u32 orig_pa;
+       u32 resp_pa;
+       u32 service_parms;
+#define IBMVFC_PRLI_TASK_RETRY                 0x00000200
+#define IBMVFC_PRLI_RETRY                              0x00000100
+#define IBMVFC_PRLI_DATA_OVERLAY                       0x00000040
+#define IBMVFC_PRLI_INITIATOR_FUNC                     0x00000020
+#define IBMVFC_PRLI_TARGET_FUNC                        0x00000010
+#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
+#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED   0x00000001
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_process_login {
+       struct ibmvfc_mad_common common;
+       u64 scsi_id;
+       struct ibmvfc_prli_svc_parms parms;
+       u8 reserved[48];
+       u16 status;
+       u16 error;                      /* also fc_reason */
+       u32 reserved2;
+       u64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_query_tgt {
+       struct ibmvfc_mad_common common;
+       u64 wwpn;
+       u64 scsi_id;
+       u16 status;
+       u16 error;
+       u16 fc_explain;
+       u16 fc_type;
+       u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_implicit_logout {
+       struct ibmvfc_mad_common common;
+       u64 old_scsi_id;
+       u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_tmf {
+       struct ibmvfc_mad_common common;
+       u64 scsi_id;
+       struct scsi_lun lun;
+       u32 flags;
+#define IBMVFC_TMF_ABORT_TASK          0x02
+#define IBMVFC_TMF_ABORT_TASK_SET      0x04
+#define IBMVFC_TMF_LUN_RESET           0x10
+#define IBMVFC_TMF_TGT_RESET           0x20
+#define IBMVFC_TMF_LUA_VALID           0x40
+       u32 cancel_key;
+       u32 my_cancel_key;
+#define IBMVFC_TMF_CANCEL_KEY          0x80000000
+       u32 pad;
+       u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fcp_rsp_info_codes {
+       RSP_NO_FAILURE          = 0x00,
+       RSP_TMF_REJECTED                = 0x04,
+       RSP_TMF_FAILED          = 0x05,
+       RSP_TMF_INVALID_LUN     = 0x09,
+};
+
+struct ibmvfc_fcp_rsp_info {
+       u16 reserved;
+       u8 rsp_code;
+       u8 reserved2[4];
+}__attribute__((packed, aligned (2)));
+
+enum ibmvfc_fcp_rsp_flags {
+       FCP_BIDI_RSP                    = 0x80,
+       FCP_BIDI_READ_RESID_UNDER       = 0x40,
+       FCP_BIDI_READ_RESID_OVER        = 0x20,
+       FCP_CONF_REQ                    = 0x10,
+       FCP_RESID_UNDER                 = 0x08,
+       FCP_RESID_OVER                  = 0x04,
+       FCP_SNS_LEN_VALID                       = 0x02,
+       FCP_RSP_LEN_VALID                       = 0x01,
+};
+
+union ibmvfc_fcp_rsp_data {
+       struct ibmvfc_fcp_rsp_info info;
+       u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_fcp_rsp {
+       u64 reserved;
+       u16 retry_delay_timer;
+       u8 flags;
+       u8 scsi_status;
+       u32 fcp_resid;
+       u32 fcp_sense_len;
+       u32 fcp_rsp_len;
+       union ibmvfc_fcp_rsp_data data;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_cmd_flags {
+       IBMVFC_SCATTERLIST      = 0x0001,
+       IBMVFC_NO_MEM_DESC      = 0x0002,
+       IBMVFC_READ                     = 0x0004,
+       IBMVFC_WRITE            = 0x0008,
+       IBMVFC_TMF                      = 0x0080,
+       IBMVFC_CLASS_3_ERR      = 0x0100,
+};
+
+enum ibmvfc_fc_task_attr {
+       IBMVFC_SIMPLE_TASK      = 0x00,
+       IBMVFC_HEAD_OF_QUEUE    = 0x01,
+       IBMVFC_ORDERED_TASK     = 0x02,
+       IBMVFC_ACA_TASK         = 0x04,
+};
+
+enum ibmvfc_fc_tmf_flags {
+       IBMVFC_ABORT_TASK_SET   = 0x02,
+       IBMVFC_LUN_RESET                = 0x10,
+       IBMVFC_TARGET_RESET     = 0x20,
+};
+
+struct ibmvfc_fcp_cmd_iu {
+       struct scsi_lun lun;
+       u8 crn;
+       u8 pri_task_attr;
+       u8 tmf_flags;
+       u8 add_cdb_len;
+#define IBMVFC_RDDATA          0x02
+#define IBMVFC_WRDATA          0x01
+       u8 cdb[IBMVFC_MAX_CDB_LEN];
+       u32 xfer_len;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_cmd {
+       u64 task_tag;
+       u32 frame_type;
+       u32 payload_len;
+       u32 resp_len;
+       u32 adapter_resid;
+       u16 status;
+       u16 error;
+       u16 flags;
+       u16 response_flags;
+#define IBMVFC_ADAPTER_RESID_VALID     0x01
+       u32 cancel_key;
+       u32 exchange_id;
+       struct srp_direct_buf ext_func;
+       struct srp_direct_buf ioba;
+       struct srp_direct_buf resp;
+       u64 correlation;
+       u64 tgt_scsi_id;
+       u64 tag;
+       u64 reserved3[2];
+       struct ibmvfc_fcp_cmd_iu iu;
+       struct ibmvfc_fcp_rsp rsp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_trace_start_entry {
+       u32 xfer_len;
+}__attribute__((packed));
+
+struct ibmvfc_trace_end_entry {
+       u16 status;
+       u16 error;
+       u8 fcp_rsp_flags;
+       u8 rsp_code;
+       u8 scsi_status;
+       u8 reserved;
+}__attribute__((packed));
+
+struct ibmvfc_trace_entry {
+       struct ibmvfc_event *evt;
+       u32 time;
+       u32 scsi_id;
+       u32 lun;
+       u8 fmt;
+       u8 op_code;
+       u8 tmf_flags;
+       u8 type;
+#define IBMVFC_TRC_START       0x00
+#define IBMVFC_TRC_END         0xff
+       union {
+               struct ibmvfc_trace_start_entry start;
+               struct ibmvfc_trace_end_entry end;
+       } u;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_crq_formats {
+       IBMVFC_CMD_FORMAT               = 0x01,
+       IBMVFC_ASYNC_EVENT      = 0x02,
+       IBMVFC_MAD_FORMAT               = 0x04,
+};
+
+enum ibmvfc_async_event {
+       IBMVFC_AE_ELS_PLOGI             = 0x0001,
+       IBMVFC_AE_ELS_LOGO              = 0x0002,
+       IBMVFC_AE_ELS_PRLO              = 0x0004,
+       IBMVFC_AE_SCN_NPORT             = 0x0008,
+       IBMVFC_AE_SCN_GROUP             = 0x0010,
+       IBMVFC_AE_SCN_DOMAIN            = 0x0020,
+       IBMVFC_AE_SCN_FABRIC            = 0x0040,
+       IBMVFC_AE_LINK_UP                       = 0x0080,
+       IBMVFC_AE_LINK_DOWN             = 0x0100,
+       IBMVFC_AE_LINK_DEAD             = 0x0200,
+       IBMVFC_AE_HALT                  = 0x0400,
+       IBMVFC_AE_RESUME                        = 0x0800,
+       IBMVFC_AE_ADAPTER_FAILED        = 0x1000,
+};
+
+struct ibmvfc_crq {
+       u8 valid;
+       u8 format;
+       u8 reserved[6];
+       u64 ioba;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_crq_queue {
+       struct ibmvfc_crq *msgs;
+       int size, cur;
+       dma_addr_t msg_token;
+};
+
+struct ibmvfc_async_crq {
+       u8 valid;
+       u8 pad[3];
+       u32 pad2;
+       u64 event;
+       u64 scsi_id;
+       u64 wwpn;
+       u64 node_name;
+       u64 reserved;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_async_crq_queue {
+       struct ibmvfc_async_crq *msgs;
+       int size, cur;
+       dma_addr_t msg_token;
+};
+
+union ibmvfc_iu {
+       struct ibmvfc_mad_common mad_common;
+       struct ibmvfc_npiv_login_mad npiv_login;
+       struct ibmvfc_discover_targets discover_targets;
+       struct ibmvfc_port_login plogi;
+       struct ibmvfc_process_login prli;
+       struct ibmvfc_query_tgt query_tgt;
+       struct ibmvfc_implicit_logout implicit_logout;
+       struct ibmvfc_tmf tmf;
+       struct ibmvfc_cmd cmd;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_target_action {
+       IBMVFC_TGT_ACTION_NONE = 0,
+       IBMVFC_TGT_ACTION_INIT,
+       IBMVFC_TGT_ACTION_INIT_WAIT,
+       IBMVFC_TGT_ACTION_ADD_RPORT,
+       IBMVFC_TGT_ACTION_DEL_RPORT,
+};
+
+struct ibmvfc_target {
+       struct list_head queue;
+       struct ibmvfc_host *vhost;
+       u64 scsi_id;
+       u64 new_scsi_id;
+       struct fc_rport *rport;
+       int target_id;
+       enum ibmvfc_target_action action;
+       int need_login;
+       int init_retries;
+       struct ibmvfc_service_parms service_parms;
+       struct ibmvfc_service_parms service_parms_change;
+       struct fc_rport_identifiers ids;
+       void (*job_step) (struct ibmvfc_target *);
+       struct kref kref;
+};
+
+/* a unit of work for the hosting partition */
+struct ibmvfc_event {
+       struct list_head queue;
+       struct ibmvfc_host *vhost;
+       struct ibmvfc_target *tgt;
+       struct scsi_cmnd *cmnd;
+       atomic_t free;
+       union ibmvfc_iu *xfer_iu;
+       void (*done) (struct ibmvfc_event *);
+       struct ibmvfc_crq crq;
+       union ibmvfc_iu iu;
+       union ibmvfc_iu *sync_iu;
+       struct srp_direct_buf *ext_list;
+       dma_addr_t ext_list_token;
+       struct completion comp;
+       struct timer_list timer;
+};
+
+/* a pool of event structs for use */
+struct ibmvfc_event_pool {
+       struct ibmvfc_event *events;
+       u32 size;
+       union ibmvfc_iu *iu_storage;
+       dma_addr_t iu_token;
+};
+
+enum ibmvfc_host_action {
+       IBMVFC_HOST_ACTION_NONE = 0,
+       IBMVFC_HOST_ACTION_INIT,
+       IBMVFC_HOST_ACTION_INIT_WAIT,
+       IBMVFC_HOST_ACTION_QUERY,
+       IBMVFC_HOST_ACTION_QUERY_TGTS,
+       IBMVFC_HOST_ACTION_TGT_DEL,
+       IBMVFC_HOST_ACTION_ALLOC_TGTS,
+       IBMVFC_HOST_ACTION_TGT_INIT,
+       IBMVFC_HOST_ACTION_TGT_ADD,
+};
+
+enum ibmvfc_host_state {
+       IBMVFC_NO_CRQ = 0,
+       IBMVFC_INITIALIZING,
+       IBMVFC_ACTIVE,
+       IBMVFC_HALTED,
+       IBMVFC_LINK_DOWN,
+       IBMVFC_LINK_DEAD,
+       IBMVFC_HOST_OFFLINE,
+};
+
+struct ibmvfc_host {
+       char name[8];
+       struct list_head queue;
+       struct Scsi_Host *host;
+       enum ibmvfc_host_state state;
+       enum ibmvfc_host_action action;
+#define IBMVFC_NUM_TRACE_INDEX_BITS            8
+#define IBMVFC_NUM_TRACE_ENTRIES               (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
+#define IBMVFC_TRACE_SIZE      (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
+       struct ibmvfc_trace_entry *trace;
+       u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
+       int num_targets;
+       struct list_head targets;
+       struct list_head sent;
+       struct list_head free;
+       struct device *dev;
+       struct ibmvfc_event_pool pool;
+       struct dma_pool *sg_pool;
+       mempool_t *tgt_pool;
+       struct ibmvfc_crq_queue crq;
+       struct ibmvfc_async_crq_queue async_crq;
+       struct ibmvfc_npiv_login login_info;
+       union ibmvfc_npiv_login_data *login_buf;
+       dma_addr_t login_buf_dma;
+       int disc_buf_sz;
+       int log_level;
+       struct ibmvfc_discover_targets_buf *disc_buf;
+       int task_set;
+       int init_retries;
+       int discovery_threads;
+       int client_migrated;
+       int reinit;
+       int events_to_log;
+#define IBMVFC_AE_LINKUP       0x0001
+#define IBMVFC_AE_LINKDOWN     0x0002
+#define IBMVFC_AE_RSCN         0x0004
+       dma_addr_t disc_buf_dma;
+       unsigned int partition_number;
+       char partition_name[97];
+       void (*job_step) (struct ibmvfc_host *);
+       struct task_struct *work_thread;
+       wait_queue_head_t init_wait_q;
+       wait_queue_head_t work_wait_q;
+};
+
+#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
+
+#define tgt_dbg(t, fmt, ...)                   \
+       DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
+
+#define tgt_err(t, fmt, ...)           \
+       dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+
+#define ibmvfc_dbg(vhost, ...) \
+       DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
+
+#define ibmvfc_log(vhost, level, ...) \
+       do { \
+               if (level >= (vhost)->log_level) \
+                       dev_err((vhost)->dev, ##__VA_ARGS__); \
+       } while (0)
+
+#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
+#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ibmvfc_create_trace_file(kobj, attr) 0
+#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0)
+#endif
+
+#endif
index 72b9b2a..2a2f009 100644 (file)
@@ -64,6 +64,10 @@ MODULE_LICENSE("GPL");
 #define BUG_ON(expr)
 #endif
 
+static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+static struct scsi_host_template iscsi_sht;
+static struct iscsi_transport iscsi_tcp_transport;
+
 static unsigned int iscsi_max_lun = 512;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
@@ -494,39 +498,43 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  * must be called with session lock
  */
 static void
-iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
 {
-       struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+       struct iscsi_tcp_task *tcp_task = task->dd_data;
        struct iscsi_r2t_info *r2t;
 
-       /* flush ctask's r2t queues */
-       while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
-               __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+       /* nothing to do for mgmt tasks */
+       if (!task->sc)
+               return;
+
+       /* flush task's r2t queues */
+       while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+               __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
                            sizeof(void*));
-               debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+               debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
        }
 
-       r2t = tcp_ctask->r2t;
+       r2t = tcp_task->r2t;
        if (r2t != NULL) {
-               __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+               __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
                            sizeof(void*));
-               tcp_ctask->r2t = NULL;
+               tcp_task->r2t = NULL;
        }
 }
 
 /**
  * iscsi_data_rsp - SCSI Data-In Response processing
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
  **/
 static int
-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
 {
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-       struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+       struct iscsi_tcp_task *tcp_task = task->dd_data;
        struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
        struct iscsi_session *session = conn->session;
-       struct scsi_cmnd *sc = ctask->sc;
+       struct scsi_cmnd *sc = task->sc;
        int datasn = be32_to_cpu(rhdr->datasn);
        unsigned total_in_length = scsi_in(sc)->length;
 
@@ -534,18 +542,18 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
        if (tcp_conn->in.datalen == 0)
                return 0;
 
-       if (tcp_ctask->exp_datasn != datasn) {
-               debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
-                         __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+       if (tcp_task->exp_datasn != datasn) {
+               debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+                         __func__, tcp_task->exp_datasn, datasn);
                return ISCSI_ERR_DATASN;
        }
 
-       tcp_ctask->exp_datasn++;
+       tcp_task->exp_datasn++;
 
-       tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
-       if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) {
+       tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+       if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
                debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
-                         __FUNCTION__, tcp_ctask->data_offset,
+                         __func__, tcp_task->data_offset,
                          tcp_conn->in.datalen, total_in_length);
                return ISCSI_ERR_DATA_OFFSET;
        }
@@ -574,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 /**
  * iscsi_solicit_data_init - initialize first Data-Out
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
  * @r2t: R2T info
  *
  * Notes:
@@ -584,7 +592,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  *     This function is called with connection lock taken.
  **/
 static void
-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
                        struct iscsi_r2t_info *r2t)
 {
        struct iscsi_data *hdr;
@@ -595,8 +603,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
        hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
        r2t->solicit_datasn++;
        hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-       memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
-       hdr->itt = ctask->hdr->itt;
+       memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+       hdr->itt = task->hdr->itt;
        hdr->exp_statsn = r2t->exp_statsn;
        hdr->offset = cpu_to_be32(r2t->data_offset);
        if (r2t->data_length > conn->max_xmit_dlength) {
@@ -616,14 +624,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 /**
  * iscsi_r2t_rsp - iSCSI R2T Response processing
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
  **/
 static int
-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
 {
        struct iscsi_r2t_info *r2t;
        struct iscsi_session *session = conn->session;
-       struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+       struct iscsi_tcp_task *tcp_task = task->dd_data;
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
        struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
        int r2tsn = be32_to_cpu(rhdr->r2tsn);
@@ -636,23 +644,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
                return ISCSI_ERR_DATALEN;
        }
 
-       if (tcp_ctask->exp_datasn != r2tsn){
-               debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
-                         __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+       if (tcp_task->exp_datasn != r2tsn){
+               debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+                         __func__, tcp_task->exp_datasn, r2tsn);
                return ISCSI_ERR_R2TSN;
        }
 
        /* fill-in new R2T associated with the task */
        iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
 
-       if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+       if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
                iscsi_conn_printk(KERN_INFO, conn,
                                  "dropping R2T itt %d in recovery.\n",
-                                 ctask->itt);
+                                 task->itt);
                return 0;
        }
 
-       rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+       rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
        BUG_ON(!rc);
 
        r2t->exp_statsn = rhdr->statsn;
@@ -660,7 +668,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
        if (r2t->data_length == 0) {
                iscsi_conn_printk(KERN_ERR, conn,
                                  "invalid R2T with zero data len\n");
-               __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+               __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
                            sizeof(void*));
                return ISCSI_ERR_DATALEN;
        }
@@ -671,12 +679,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
                            r2t->data_length, session->max_burst);
 
        r2t->data_offset = be32_to_cpu(rhdr->data_offset);
-       if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) {
+       if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
                iscsi_conn_printk(KERN_ERR, conn,
                                  "invalid R2T with data len %u at offset %u "
                                  "and total length %d\n", r2t->data_length,
-                                 r2t->data_offset, scsi_out(ctask->sc)->length);
-               __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+                                 r2t->data_offset, scsi_out(task->sc)->length);
+               __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
                            sizeof(void*));
                return ISCSI_ERR_DATALEN;
        }
@@ -684,13 +692,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
        r2t->ttt = rhdr->ttt; /* no flip */
        r2t->solicit_datasn = 0;
 
-       iscsi_solicit_data_init(conn, ctask, r2t);
+       iscsi_solicit_data_init(conn, task, r2t);
 
-       tcp_ctask->exp_datasn = r2tsn + 1;
-       __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+       tcp_task->exp_datasn = r2tsn + 1;
+       __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
        conn->r2t_pdus_cnt++;
 
-       iscsi_requeue_ctask(ctask);
+       iscsi_requeue_task(task);
        return 0;
 }
 
@@ -733,10 +741,8 @@ static int
 iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 {
        int rc = 0, opcode, ahslen;
-       struct iscsi_session *session = conn->session;
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-       struct iscsi_cmd_task *ctask;
-       uint32_t itt;
+       struct iscsi_task *task;
 
        /* verify PDU length */
        tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -754,7 +760,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
        opcode = hdr->opcode & ISCSI_OPCODE_MASK;
        /* verify itt (itt encoding: age+cid+itt) */
-       rc = iscsi_verify_itt(conn, hdr, &itt);
+       rc = iscsi_verify_itt(conn, hdr->itt);
        if (rc)
                return rc;
 
@@ -763,16 +769,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
        switch(opcode) {
        case ISCSI_OP_SCSI_DATA_IN:
-               ctask = session->cmds[itt];
                spin_lock(&conn->session->lock);
-               rc = iscsi_data_rsp(conn, ctask);
-               spin_unlock(&conn->session->lock);
-               if (rc)
-                       return rc;
+               task = iscsi_itt_to_ctask(conn, hdr->itt);
+               if (!task)
+                       rc = ISCSI_ERR_BAD_ITT;
+               else
+                       rc = iscsi_data_rsp(conn, task);
+               if (rc) {
+                       spin_unlock(&conn->session->lock);
+                       break;
+               }
+
                if (tcp_conn->in.datalen) {
-                       struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+                       struct iscsi_tcp_task *tcp_task = task->dd_data;
                        struct hash_desc *rx_hash = NULL;
-                       struct scsi_data_buffer *sdb = scsi_in(ctask->sc);
+                       struct scsi_data_buffer *sdb = scsi_in(task->sc);
 
                        /*
                         * Setup copy of Data-In into the Scsi_Cmnd
@@ -787,17 +798,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
                        debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
                                  "datalen=%d)\n", tcp_conn,
-                                 tcp_ctask->data_offset,
+                                 tcp_task->data_offset,
                                  tcp_conn->in.datalen);
-                       return iscsi_segment_seek_sg(&tcp_conn->in.segment,
-                                                    sdb->table.sgl,
-                                                    sdb->table.nents,
-                                                    tcp_ctask->data_offset,
-                                                    tcp_conn->in.datalen,
-                                                    iscsi_tcp_process_data_in,
-                                                    rx_hash);
+                       rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+                                                  sdb->table.sgl,
+                                                  sdb->table.nents,
+                                                  tcp_task->data_offset,
+                                                  tcp_conn->in.datalen,
+                                                  iscsi_tcp_process_data_in,
+                                                  rx_hash);
+                       spin_unlock(&conn->session->lock);
+                       return rc;
                }
-               /* fall through */
+               rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+               spin_unlock(&conn->session->lock);
+               break;
        case ISCSI_OP_SCSI_CMD_RSP:
                if (tcp_conn->in.datalen) {
                        iscsi_tcp_data_recv_prep(tcp_conn);
@@ -806,15 +821,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
                rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
                break;
        case ISCSI_OP_R2T:
-               ctask = session->cmds[itt];
-               if (ahslen)
+               spin_lock(&conn->session->lock);
+               task = iscsi_itt_to_ctask(conn, hdr->itt);
+               if (!task)
+                       rc = ISCSI_ERR_BAD_ITT;
+               else if (ahslen)
                        rc = ISCSI_ERR_AHSLEN;
-               else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
-                       spin_lock(&session->lock);
-                       rc = iscsi_r2t_rsp(conn, ctask);
-                       spin_unlock(&session->lock);
-               } else
+               else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+                       rc = iscsi_r2t_rsp(conn, task);
+               else
                        rc = ISCSI_ERR_PROTO;
+               spin_unlock(&conn->session->lock);
                break;
        case ISCSI_OP_LOGIN_RSP:
        case ISCSI_OP_TEXT_RSP:
@@ -1176,7 +1193,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
 {
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 
-       debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+       debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
                        conn->hdrdgst_en? ", digest enabled" : "");
 
        /* Clear the data segment - needs to be filled in by the
@@ -1185,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
 
        /* If header digest is enabled, compute the CRC and
         * place the digest into the same buffer. We make
-        * sure that both iscsi_tcp_ctask and mtask have
+        * sure that both iscsi_tcp_task and mtask have
         * sufficient room.
         */
        if (conn->hdrdgst_en) {
@@ -1217,7 +1234,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
        struct hash_desc *tx_hash = NULL;
        unsigned int hdr_spec_len;
 
-       debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+       debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
                        tcp_conn, offset, len,
                        conn->datadgst_en? ", digest enabled" : "");
 
@@ -1242,7 +1259,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
        struct hash_desc *tx_hash = NULL;
        unsigned int hdr_spec_len;
 
-       debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+       debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
                  conn->datadgst_en? ", digest enabled" : "");
 
        /* Make sure the datalen matches what the caller
@@ -1260,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
 /**
  * iscsi_solicit_data_cont - initialize next Data-Out
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
  * @r2t: R2T info
  * @left: bytes left to transfer
  *
@@ -1271,7 +1288,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
  *     Called under connection lock.
  **/
 static int
-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
                        struct iscsi_r2t_info *r2t)
 {
        struct iscsi_data *hdr;
@@ -1288,8 +1305,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
        hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
        r2t->solicit_datasn++;
        hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-       memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
-       hdr->itt = ctask->hdr->itt;
+       memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+       hdr->itt = task->hdr->itt;
        hdr->exp_statsn = r2t->exp_statsn;
        new_offset = r2t->data_offset + r2t->sent;
        hdr->offset = cpu_to_be32(new_offset);
@@ -1307,89 +1324,76 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 }
 
 /**
- * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
  * @sc: scsi command
  **/
 static int
-iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_init(struct iscsi_task *task)
 {
-       struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
-       struct iscsi_conn *conn = ctask->conn;
-       struct scsi_cmnd *sc = ctask->sc;
+       struct iscsi_tcp_task *tcp_task = task->dd_data;
+       struct iscsi_conn *conn = task->conn;
+       struct scsi_cmnd *sc = task->sc;
        int err;
 
-       BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
-       tcp_ctask->sent = 0;
-       tcp_ctask->exp_datasn = 0;
+       if (!sc) {
+               /*
+                * mgmt tasks do not have a scatterlist since they come
+                * in from the iscsi interface.
+                */
+               debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+                          task->itt);
+
+               /* Prepare PDU, optionally w/ immediate data */
+               iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+
+               /* If we have immediate data, attach a payload */
+               if (task->data_count)
+                       iscsi_tcp_send_linear_data_prepare(conn, task->data,
+                                                          task->data_count);
+               return 0;
+       }
+
+       BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+       tcp_task->sent = 0;
+       tcp_task->exp_datasn = 0;
 
        /* Prepare PDU, optionally w/ immediate data */
-       debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
-                   conn->id, ctask->itt, ctask->imm_count,
-                   ctask->unsol_count);
-       iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+       debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+                   conn->id, task->itt, task->imm_count,
+                   task->unsol_count);
+       iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
 
-       if (!ctask->imm_count)
+       if (!task->imm_count)
                return 0;
 
        /* If we have immediate data, attach a payload */
        err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
                                       scsi_out(sc)->table.nents,
-                                      0, ctask->imm_count);
+                                      0, task->imm_count);
        if (err)
                return err;
-       tcp_ctask->sent += ctask->imm_count;
-       ctask->imm_count = 0;
-       return 0;
-}
-
-/**
- * iscsi_tcp_mtask_xmit - xmit management(immediate) task
- * @conn: iscsi connection
- * @mtask: task management task
- *
- * Notes:
- *     The function can return -EAGAIN in which case caller must
- *     call it again later, or recover. '0' return code means successful
- *     xmit.
- **/
-static int
-iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
-{
-       int rc;
-
-       /* Flush any pending data first. */
-       rc = iscsi_tcp_flush(conn);
-       if (rc < 0)
-               return rc;
-
-       if (mtask->hdr->itt == RESERVED_ITT) {
-               struct iscsi_session *session = conn->session;
-
-               spin_lock_bh(&session->lock);
-               iscsi_free_mgmt_task(conn, mtask);
-               spin_unlock_bh(&session->lock);
-       }
-
+       tcp_task->sent += task->imm_count;
+       task->imm_count = 0;
        return 0;
 }
 
 /*
- * iscsi_tcp_ctask_xmit - xmit normal PDU task
- * @conn: iscsi connection
- * @ctask: iscsi command task
+ * iscsi_tcp_task_xmit - xmit normal PDU task
+ * @task: iscsi command task
  *
  * We're expected to return 0 when everything was transmitted succesfully,
  * -EAGAIN if there's still data in the queue, or != 0 for any other kind
  * of error.
  */
 static int
-iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_xmit(struct iscsi_task *task)
 {
-       struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
-       struct scsi_cmnd *sc = ctask->sc;
-       struct scsi_data_buffer *sdb = scsi_out(sc);
+       struct iscsi_conn *conn = task->conn;
+       struct iscsi_tcp_task *tcp_task = task->dd_data;
+       struct scsi_cmnd *sc = task->sc;
+       struct scsi_data_buffer *sdb;
        int rc = 0;
 
 flush:
@@ -1398,31 +1402,39 @@ flush:
        if (rc < 0)
                return rc;
 
+       /* mgmt command */
+       if (!sc) {
+               if (task->hdr->itt == RESERVED_ITT)
+                       iscsi_put_task(task);
+               return 0;
+       }
+
        /* Are we done already? */
        if (sc->sc_data_direction != DMA_TO_DEVICE)
                return 0;
 
-       if (ctask->unsol_count != 0) {
-               struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+       sdb = scsi_out(sc);
+       if (task->unsol_count != 0) {
+               struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
 
                /* Prepare a header for the unsolicited PDU.
                 * The amount of data we want to send will be
-                * in ctask->data_count.
+                * in task->data_count.
                 * FIXME: return the data count instead.
                 */
-               iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+               iscsi_prep_unsolicit_data_pdu(task, hdr);
 
                debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
-                               ctask->itt, tcp_ctask->sent, ctask->data_count);
+                               task->itt, tcp_task->sent, task->data_count);
 
                iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
                rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
-                                             sdb->table.nents, tcp_ctask->sent,
-                                             ctask->data_count);
+                                             sdb->table.nents, tcp_task->sent,
+                                             task->data_count);
                if (rc)
                        goto fail;
-               tcp_ctask->sent += ctask->data_count;
-               ctask->unsol_count -= ctask->data_count;
+               tcp_task->sent += task->data_count;
+               task->unsol_count -= task->data_count;
                goto flush;
        } else {
                struct iscsi_session *session = conn->session;
@@ -1431,22 +1443,22 @@ flush:
                /* All unsolicited PDUs sent. Check for solicited PDUs.
                 */
                spin_lock_bh(&session->lock);
-               r2t = tcp_ctask->r2t;
+               r2t = tcp_task->r2t;
                if (r2t != NULL) {
                        /* Continue with this R2T? */
-                       if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+                       if (!iscsi_solicit_data_cont(conn, task, r2t)) {
                                debug_scsi("  done with r2t %p\n", r2t);
 
-                               __kfifo_put(tcp_ctask->r2tpool.queue,
+                               __kfifo_put(tcp_task->r2tpool.queue,
                                            (void*)&r2t, sizeof(void*));
-                               tcp_ctask->r2t = r2t = NULL;
+                               tcp_task->r2t = r2t = NULL;
                        }
                }
 
                if (r2t == NULL) {
-                       __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+                       __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
                                    sizeof(void*));
-                       r2t = tcp_ctask->r2t;
+                       r2t = tcp_task->r2t;
                }
                spin_unlock_bh(&session->lock);
 
@@ -1457,7 +1469,7 @@ flush:
                }
 
                debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
-                       r2t, r2t->solicit_datasn - 1, ctask->itt,
+                       r2t, r2t->solicit_datasn - 1, task->itt,
                        r2t->data_offset + r2t->sent, r2t->data_count);
 
                iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
@@ -1469,7 +1481,7 @@ flush:
                                              r2t->data_count);
                if (rc)
                        goto fail;
-               tcp_ctask->sent += r2t->data_count;
+               tcp_task->sent += r2t->data_count;
                r2t->sent += r2t->data_count;
                goto flush;
        }
@@ -1486,7 +1498,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
        struct iscsi_cls_conn *cls_conn;
        struct iscsi_tcp_conn *tcp_conn;
 
-       cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+       cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
        if (!cls_conn)
                return NULL;
        conn = cls_conn->dd_data;
@@ -1496,18 +1508,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
         */
        conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
 
-       tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
-       if (!tcp_conn)
-               goto tcp_conn_alloc_fail;
-
-       conn->dd_data = tcp_conn;
+       tcp_conn = conn->dd_data;
        tcp_conn->iscsi_conn = conn;
 
        tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
                                                  CRYPTO_ALG_ASYNC);
        tcp_conn->tx_hash.flags = 0;
        if (IS_ERR(tcp_conn->tx_hash.tfm))
-               goto free_tcp_conn;
+               goto free_conn;
 
        tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
                                                  CRYPTO_ALG_ASYNC);
@@ -1519,14 +1527,12 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 
 free_tx_tfm:
        crypto_free_hash(tcp_conn->tx_hash.tfm);
-free_tcp_conn:
+free_conn:
        iscsi_conn_printk(KERN_ERR, conn,
                          "Could not create connection due to crc32c "
                          "loading error. Make sure the crc32c "
                          "module is built as a module or into the "
                          "kernel\n");
-       kfree(tcp_conn);
-tcp_conn_alloc_fail:
        iscsi_conn_teardown(cls_conn);
        return NULL;
 }
@@ -1547,7 +1553,6 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
 
        spin_lock_bh(&session->lock);
        tcp_conn->sock = NULL;
-       conn->recv_lock = NULL;
        spin_unlock_bh(&session->lock);
        sockfd_put(sock);
 }
@@ -1559,20 +1564,32 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 
        iscsi_tcp_release_conn(conn);
-       iscsi_conn_teardown(cls_conn);
 
        if (tcp_conn->tx_hash.tfm)
                crypto_free_hash(tcp_conn->tx_hash.tfm);
        if (tcp_conn->rx_hash.tfm)
                crypto_free_hash(tcp_conn->rx_hash.tfm);
 
-       kfree(tcp_conn);
+       iscsi_conn_teardown(cls_conn);
 }
 
 static void
 iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
+       struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+       /* userspace may have goofed up and not bound us */
+       if (!tcp_conn->sock)
+               return;
+       /*
+        * Make sure our recv side is stopped.
+        * Older tools called conn stop before ep_disconnect
+        * so IO could still be coming in.
+        */
+       write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+       write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
 
        iscsi_conn_stop(cls_conn, flag);
        iscsi_tcp_release_conn(conn);
@@ -1623,6 +1640,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
                    struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
                    int is_leading)
 {
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct iscsi_host *ihost = shost_priv(shost);
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
        struct sock *sk;
@@ -1646,8 +1665,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
        if (err)
                goto free_socket;
 
-       err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
-                               &conn->local_port, kernel_getsockname);
+       err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+                               &ihost->local_port, kernel_getsockname);
        if (err)
                goto free_socket;
 
@@ -1664,13 +1683,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
        sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
        sk->sk_allocation = GFP_ATOMIC;
 
-       /* FIXME: disable Nagle's algorithm */
-
-       /*
-        * Intercept TCP callbacks for sendfile like receive
-        * processing.
-        */
-       conn->recv_lock = &sk->sk_callback_lock;
        iscsi_conn_set_callbacks(conn);
        tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
        /*
@@ -1684,21 +1696,6 @@ free_socket:
        return err;
 }
 
-/* called with host lock */
-static void
-iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
-{
-       debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
-
-       /* Prepare PDU, optionally w/ immediate data */
-       iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
-
-       /* If we have immediate data, attach a payload */
-       if (mtask->data_count)
-               iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
-                                                  mtask->data_count);
-}
-
 static int
 iscsi_r2tpool_alloc(struct iscsi_session *session)
 {
@@ -1709,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
         * initialize per-task: R2T pool and xmit queue
         */
        for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-               struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
-               struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+               struct iscsi_task *task = session->cmds[cmd_i];
+               struct iscsi_tcp_task *tcp_task = task->dd_data;
 
                /*
                 * pre-allocated x4 as much r2ts to handle race when
@@ -1719,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
                 */
 
                /* R2T pool */
-               if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+               if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
                                    sizeof(struct iscsi_r2t_info))) {
                        goto r2t_alloc_fail;
                }
 
                /* R2T xmit queue */
-               tcp_ctask->r2tqueue = kfifo_alloc(
+               tcp_task->r2tqueue = kfifo_alloc(
                      session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
-               if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
-                       iscsi_pool_free(&tcp_ctask->r2tpool);
+               if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+                       iscsi_pool_free(&tcp_task->r2tpool);
                        goto r2t_alloc_fail;
                }
        }
@@ -1737,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
 
 r2t_alloc_fail:
        for (i = 0; i < cmd_i; i++) {
-               struct iscsi_cmd_task *ctask = session->cmds[i];
-               struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+               struct iscsi_task *task = session->cmds[i];
+               struct iscsi_tcp_task *tcp_task = task->dd_data;
 
-               kfifo_free(tcp_ctask->r2tqueue);
-               iscsi_pool_free(&tcp_ctask->r2tpool);
+               kfifo_free(tcp_task->r2tqueue);
+               iscsi_pool_free(&tcp_task->r2tpool);
        }
        return -ENOMEM;
 }
@@ -1752,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
        int i;
 
        for (i = 0; i < session->cmds_max; i++) {
-               struct iscsi_cmd_task *ctask = session->cmds[i];
-               struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+               struct iscsi_task *task = session->cmds[i];
+               struct iscsi_tcp_task *tcp_task = task->dd_data;
 
-               kfifo_free(tcp_ctask->r2tqueue);
-               iscsi_pool_free(&tcp_ctask->r2tpool);
+               kfifo_free(tcp_task->r2tqueue);
+               iscsi_pool_free(&tcp_task->r2tpool);
        }
 }
 
@@ -1821,29 +1818,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
        return len;
 }
 
-static int
-iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
-                        char *buf)
-{
-        struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
-       int len;
-
-       switch (param) {
-       case ISCSI_HOST_PARAM_IPADDRESS:
-               spin_lock_bh(&session->lock);
-               if (!session->leadconn)
-                       len = -ENODEV;
-               else
-                       len = sprintf(buf, "%s\n",
-                                    session->leadconn->local_address);
-               spin_unlock_bh(&session->lock);
-               break;
-       default:
-               return iscsi_host_get_param(shost, param, buf);
-       }
-       return len;
-}
-
 static void
 iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
 {
@@ -1869,54 +1843,70 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
 }
 
 static struct iscsi_cls_session *
-iscsi_tcp_session_create(struct iscsi_transport *iscsit,
-                        struct scsi_transport_template *scsit,
-                        uint16_t cmds_max, uint16_t qdepth,
-                        uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+                        uint16_t qdepth, uint32_t initial_cmdsn,
+                        uint32_t *hostno)
 {
        struct iscsi_cls_session *cls_session;
        struct iscsi_session *session;
-       uint32_t hn;
+       struct Scsi_Host *shost;
        int cmd_i;
 
-       cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
-                                        sizeof(struct iscsi_tcp_cmd_task),
-                                        sizeof(struct iscsi_tcp_mgmt_task),
-                                        initial_cmdsn, &hn);
-       if (!cls_session)
+       if (ep) {
+               printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
                return NULL;
-       *hostno = hn;
-
-       session = class_to_transport_session(cls_session);
-       for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-               struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
-               struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
-
-               ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
-               ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
        }
 
-       for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
-               struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
-               struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+       shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+       if (!shost)
+               return NULL;
+       shost->transportt = iscsi_tcp_scsi_transport;
+       shost->max_lun = iscsi_max_lun;
+       shost->max_id = 0;
+       shost->max_channel = 0;
+       shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+
+       if (iscsi_host_add(shost, NULL))
+               goto free_host;
+       *hostno = shost->host_no;
+
+       cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+                                         sizeof(struct iscsi_tcp_task),
+                                         initial_cmdsn, 0);
+       if (!cls_session)
+               goto remove_host;
+       session = cls_session->dd_data;
+
+       shost->can_queue = session->scsi_cmds_max;
+       for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+               struct iscsi_task *task = session->cmds[cmd_i];
+               struct iscsi_tcp_task *tcp_task = task->dd_data;
 
-               mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+               task->hdr = &tcp_task->hdr.cmd_hdr;
+               task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
        }
 
-       if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
-               goto r2tpool_alloc_fail;
-
+       if (iscsi_r2tpool_alloc(session))
+               goto remove_session;
        return cls_session;
 
-r2tpool_alloc_fail:
+remove_session:
        iscsi_session_teardown(cls_session);
+remove_host:
+       iscsi_host_remove(shost);
+free_host:
+       iscsi_host_free(shost);
        return NULL;
 }
 
 static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
 {
-       iscsi_r2tpool_free(class_to_transport_session(cls_session));
-       iscsi_session_teardown(cls_session);
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+       iscsi_r2tpool_free(cls_session->dd_data);
+
+       iscsi_host_remove(shost);
+       iscsi_host_free(shost);
 }
 
 static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
@@ -1971,14 +1961,11 @@ static struct iscsi_transport iscsi_tcp_transport = {
                                  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
                                  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
                                  ISCSI_LU_RESET_TMO |
-                                 ISCSI_PING_TMO | ISCSI_RECV_TMO,
+                                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
+                                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
        .host_param_mask        = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
                                  ISCSI_HOST_INITIATOR_NAME |
                                  ISCSI_HOST_NETDEV_NAME,
-       .host_template          = &iscsi_sht,
-       .conndata_size          = sizeof(struct iscsi_conn),
-       .max_conn               = 1,
-       .max_cmd_len            = 16,
        /* session management */
        .create_session         = iscsi_tcp_session_create,
        .destroy_session        = iscsi_tcp_session_destroy,
@@ -1992,16 +1979,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
        .start_conn             = iscsi_conn_start,
        .stop_conn              = iscsi_tcp_conn_stop,
        /* iscsi host params */
-       .get_host_param         = iscsi_tcp_host_get_param,
+       .get_host_param         = iscsi_host_get_param,
        .set_host_param         = iscsi_host_set_param,
        /* IO */
        .send_pdu               = iscsi_conn_send_pdu,
        .get_stats              = iscsi_conn_get_stats,
-       .init_cmd_task          = iscsi_tcp_ctask_init,
-       .init_mgmt_task         = iscsi_tcp_mtask_init,
-       .xmit_cmd_task          = iscsi_tcp_ctask_xmit,
-       .xmit_mgmt_task         = iscsi_tcp_mtask_xmit,
-       .cleanup_cmd_task       = iscsi_tcp_cleanup_ctask,
+       .init_task              = iscsi_tcp_task_init,
+       .xmit_task              = iscsi_tcp_task_xmit,
+       .cleanup_task           = iscsi_tcp_cleanup_task,
        /* recovery */
        .session_recovery_timedout = iscsi_session_recovery_timedout,
 };
@@ -2014,9 +1999,10 @@ iscsi_tcp_init(void)
                       iscsi_max_lun);
                return -EINVAL;
        }
-       iscsi_tcp_transport.max_lun = iscsi_max_lun;
 
-       if (!iscsi_register_transport(&iscsi_tcp_transport))
+       iscsi_tcp_scsi_transport = iscsi_register_transport(
+                                                       &iscsi_tcp_transport);
+       if (!iscsi_tcp_scsi_transport)
                return -ENODEV;
 
        return 0;
index ed0b991..498d8ca 100644 (file)
@@ -103,11 +103,6 @@ struct iscsi_data_task {
        char                    hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
 };
 
-struct iscsi_tcp_mgmt_task {
-       struct iscsi_hdr        hdr;
-       char                    hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
-};
-
 struct iscsi_r2t_info {
        __be32                  ttt;            /* copied from R2T */
        __be32                  exp_statsn;     /* copied from R2T */
@@ -119,7 +114,7 @@ struct iscsi_r2t_info {
        struct iscsi_data_task  dtask;          /* Data-Out header buf */
 };
 
-struct iscsi_tcp_cmd_task {
+struct iscsi_tcp_task {
        struct iscsi_hdr_buff {
                struct iscsi_cmd        cmd_hdr;
                char                    hdrextbuf[ISCSI_MAX_AHS_SIZE +
index b43bf1d..299e075 100644 (file)
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/libiscsi.h>
 
-struct iscsi_session *
-class_to_transport_session(struct iscsi_cls_session *cls_session)
-{
-       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-       return iscsi_hostdata(shost->hostdata);
-}
-EXPORT_SYMBOL_GPL(class_to_transport_session);
-
 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
 #define SNA32_CHECK 2147483648UL
 
@@ -87,68 +79,70 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
                 * xmit thread
                 */
                if (!list_empty(&session->leadconn->xmitqueue) ||
-                   !list_empty(&session->leadconn->mgmtqueue))
-                       scsi_queue_work(session->host,
-                                       &session->leadconn->xmitwork);
+                   !list_empty(&session->leadconn->mgmtqueue)) {
+                       if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+                               scsi_queue_work(session->host,
+                                               &session->leadconn->xmitwork);
+               }
        }
 }
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
 
-void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
                                   struct iscsi_data *hdr)
 {
-       struct iscsi_conn *conn = ctask->conn;
+       struct iscsi_conn *conn = task->conn;
 
        memset(hdr, 0, sizeof(struct iscsi_data));
        hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
-       hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
-       ctask->unsol_datasn++;
+       hdr->datasn = cpu_to_be32(task->unsol_datasn);
+       task->unsol_datasn++;
        hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-       memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+       memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
 
-       hdr->itt = ctask->hdr->itt;
+       hdr->itt = task->hdr->itt;
        hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
-       hdr->offset = cpu_to_be32(ctask->unsol_offset);
+       hdr->offset = cpu_to_be32(task->unsol_offset);
 
-       if (ctask->unsol_count > conn->max_xmit_dlength) {
+       if (task->unsol_count > conn->max_xmit_dlength) {
                hton24(hdr->dlength, conn->max_xmit_dlength);
-               ctask->data_count = conn->max_xmit_dlength;
-               ctask->unsol_offset += ctask->data_count;
+               task->data_count = conn->max_xmit_dlength;
+               task->unsol_offset += task->data_count;
                hdr->flags = 0;
        } else {
-               hton24(hdr->dlength, ctask->unsol_count);
-               ctask->data_count = ctask->unsol_count;
+               hton24(hdr->dlength, task->unsol_count);
+               task->data_count = task->unsol_count;
                hdr->flags = ISCSI_FLAG_CMD_FINAL;
        }
 }
 EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
 
-static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
 {
-       unsigned exp_len = ctask->hdr_len + len;
+       unsigned exp_len = task->hdr_len + len;
 
-       if (exp_len > ctask->hdr_max) {
+       if (exp_len > task->hdr_max) {
                WARN_ON(1);
                return -EINVAL;
        }
 
        WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
-       ctask->hdr_len = exp_len;
+       task->hdr_len = exp_len;
        return 0;
 }
 
 /*
  * make an extended cdb AHS
  */
-static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
 {
-       struct scsi_cmnd *cmd = ctask->sc;
+       struct scsi_cmnd *cmd = task->sc;
        unsigned rlen, pad_len;
        unsigned short ahslength;
        struct iscsi_ecdb_ahdr *ecdb_ahdr;
        int rc;
 
-       ecdb_ahdr = iscsi_next_hdr(ctask);
+       ecdb_ahdr = iscsi_next_hdr(task);
        rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
 
        BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
@@ -156,7 +150,7 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
 
        pad_len = iscsi_padding(rlen);
 
-       rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) +
+       rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
                           sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
        if (rc)
                return rc;
@@ -171,19 +165,19 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
 
        debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
                   "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
-                  cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len);
+                  cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
 
        return 0;
 }
 
-static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
 {
-       struct scsi_cmnd *sc = ctask->sc;
+       struct scsi_cmnd *sc = task->sc;
        struct iscsi_rlength_ahdr *rlen_ahdr;
        int rc;
 
-       rlen_ahdr = iscsi_next_hdr(ctask);
-       rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr));
+       rlen_ahdr = iscsi_next_hdr(task);
+       rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
        if (rc)
                return rc;
 
@@ -203,28 +197,28 @@ static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
 
 /**
  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
- * @ctask: iscsi cmd task
+ * @task: iscsi task
  *
  * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
  * fields like dlength or final based on how much data it sends
  */
-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 {
-       struct iscsi_conn *conn = ctask->conn;
+       struct iscsi_conn *conn = task->conn;
        struct iscsi_session *session = conn->session;
-       struct iscsi_cmd *hdr = ctask->hdr;
-       struct scsi_cmnd *sc = ctask->sc;
+       struct iscsi_cmd *hdr = task->hdr;
+       struct scsi_cmnd *sc = task->sc;
        unsigned hdrlength, cmd_len;
        int rc;
 
-       ctask->hdr_len = 0;
-       rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+       task->hdr_len = 0;
+       rc = iscsi_add_hdr(task, sizeof(*hdr));
        if (rc)
                return rc;
        hdr->opcode = ISCSI_OP_SCSI_CMD;
        hdr->flags = ISCSI_ATTR_SIMPLE;
        int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
-       hdr->itt = build_itt(ctask->itt, session->age);
+       hdr->itt = build_itt(task->itt, session->age);
        hdr->cmdsn = cpu_to_be32(session->cmdsn);
        session->cmdsn++;
        hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
@@ -232,17 +226,17 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
        if (cmd_len < ISCSI_CDB_SIZE)
                memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
        else if (cmd_len > ISCSI_CDB_SIZE) {
-               rc = iscsi_prep_ecdb_ahs(ctask);
+               rc = iscsi_prep_ecdb_ahs(task);
                if (rc)
                        return rc;
                cmd_len = ISCSI_CDB_SIZE;
        }
        memcpy(hdr->cdb, sc->cmnd, cmd_len);
 
-       ctask->imm_count = 0;
+       task->imm_count = 0;
        if (scsi_bidi_cmnd(sc)) {
                hdr->flags |= ISCSI_FLAG_CMD_READ;
-               rc = iscsi_prep_bidi_ahs(ctask);
+               rc = iscsi_prep_bidi_ahs(task);
                if (rc)
                        return rc;
        }
@@ -264,28 +258,28 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
                 *
                 *      pad_count       bytes to be sent as zero-padding
                 */
-               ctask->unsol_count = 0;
-               ctask->unsol_offset = 0;
-               ctask->unsol_datasn = 0;
+               task->unsol_count = 0;
+               task->unsol_offset = 0;
+               task->unsol_datasn = 0;
 
                if (session->imm_data_en) {
                        if (out_len >= session->first_burst)
-                               ctask->imm_count = min(session->first_burst,
+                               task->imm_count = min(session->first_burst,
                                                        conn->max_xmit_dlength);
                        else
-                               ctask->imm_count = min(out_len,
+                               task->imm_count = min(out_len,
                                                        conn->max_xmit_dlength);
-                       hton24(hdr->dlength, ctask->imm_count);
+                       hton24(hdr->dlength, task->imm_count);
                } else
                        zero_data(hdr->dlength);
 
                if (!session->initial_r2t_en) {
-                       ctask->unsol_count = min(session->first_burst, out_len)
-                                                            - ctask->imm_count;
-                       ctask->unsol_offset = ctask->imm_count;
+                       task->unsol_count = min(session->first_burst, out_len)
+                                                            - task->imm_count;
+                       task->unsol_offset = task->imm_count;
                }
 
-               if (!ctask->unsol_count)
+               if (!task->unsol_count)
                        /* No unsolicit Data-Out's */
                        hdr->flags |= ISCSI_FLAG_CMD_FINAL;
        } else {
@@ -298,7 +292,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
        }
 
        /* calculate size of additional header segments (AHSs) */
-       hdrlength = ctask->hdr_len - sizeof(*hdr);
+       hdrlength = task->hdr_len - sizeof(*hdr);
 
        WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
        hdrlength /= ISCSI_PAD_LEN;
@@ -306,76 +300,115 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
        WARN_ON(hdrlength >= 256);
        hdr->hlength = hdrlength & 0xFF;
 
-       if (conn->session->tt->init_cmd_task(conn->ctask))
-               return EIO;
+       if (conn->session->tt->init_task &&
+           conn->session->tt->init_task(task))
+               return -EIO;
+
+       task->state = ISCSI_TASK_RUNNING;
+       list_move_tail(&task->running, &conn->run_list);
 
        conn->scsicmd_pdus_cnt++;
-       debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x "
-               "len %d bidi_len %d cmdsn %d win %d]\n",
-               scsi_bidi_cmnd(sc) ? "bidirectional" :
-                    sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
-               conn->id, sc, sc->cmnd[0], ctask->itt,
-               scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
-               session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+       debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+                  "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+                  "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+                  "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+                  scsi_bufflen(sc),
+                  scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+                  session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
        return 0;
 }
 
 /**
- * iscsi_complete_command - return command back to scsi-ml
- * @ctask: iscsi cmd task
+ * iscsi_complete_command - finish a task
+ * @task: iscsi cmd task
  *
  * Must be called with session lock.
- * This function returns the scsi command to scsi-ml and returns
- * the cmd task to the pool of available cmd tasks.
+ * This function returns the scsi command to scsi-ml or cleans
+ * up mgmt tasks then returns the task to the pool.
  */
-static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+static void iscsi_complete_command(struct iscsi_task *task)
 {
-       struct iscsi_conn *conn = ctask->conn;
+       struct iscsi_conn *conn = task->conn;
        struct iscsi_session *session = conn->session;
-       struct scsi_cmnd *sc = ctask->sc;
+       struct scsi_cmnd *sc = task->sc;
 
-       ctask->state = ISCSI_TASK_COMPLETED;
-       ctask->sc = NULL;
-       /* SCSI eh reuses commands to verify us */
-       sc->SCp.ptr = NULL;
-       if (conn->ctask == ctask)
-               conn->ctask = NULL;
-       list_del_init(&ctask->running);
-       __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
-       sc->scsi_done(sc);
+       list_del_init(&task->running);
+       task->state = ISCSI_TASK_COMPLETED;
+       task->sc = NULL;
+
+       if (conn->task == task)
+               conn->task = NULL;
+       /*
+        * login task is preallocated so do not free
+        */
+       if (conn->login_task == task)
+               return;
+
+       __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+
+       if (conn->ping_task == task)
+               conn->ping_task = NULL;
+
+       if (sc) {
+               task->sc = NULL;
+               /* SCSI eh reuses commands to verify us */
+               sc->SCp.ptr = NULL;
+               /*
+                * queue command may call this to free the task, but
+                * not have setup the sc callback
+                */
+               if (sc->scsi_done)
+                       sc->scsi_done(sc);
+       }
+}
+
+void __iscsi_get_task(struct iscsi_task *task)
+{
+       atomic_inc(&task->refcount);
 }
+EXPORT_SYMBOL_GPL(__iscsi_get_task);
 
-static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+static void __iscsi_put_task(struct iscsi_task *task)
 {
-       atomic_inc(&ctask->refcount);
+       if (atomic_dec_and_test(&task->refcount))
+               iscsi_complete_command(task);
 }
 
-static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+void iscsi_put_task(struct iscsi_task *task)
 {
-       if (atomic_dec_and_test(&ctask->refcount))
-               iscsi_complete_command(ctask);
+       struct iscsi_session *session = task->conn->session;
+
+       spin_lock_bh(&session->lock);
+       __iscsi_put_task(task);
+       spin_unlock_bh(&session->lock);
 }
+EXPORT_SYMBOL_GPL(iscsi_put_task);
 
 /*
  * session lock must be held
  */
-static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
                         int err)
 {
        struct scsi_cmnd *sc;
 
-       sc = ctask->sc;
+       sc = task->sc;
        if (!sc)
                return;
 
-       if (ctask->state == ISCSI_TASK_PENDING)
+       if (task->state == ISCSI_TASK_PENDING)
                /*
                 * cmd never made it to the xmit thread, so we should not count
                 * the cmd in the sequencing
                 */
                conn->session->queued_cmdsn--;
        else
-               conn->session->tt->cleanup_cmd_task(conn, ctask);
+               conn->session->tt->cleanup_task(conn, task);
+       /*
+        * Check if cleanup_task dropped the lock and the command completed,
+        */
+       if (!task->sc)
+               return;
 
        sc->result = err;
        if (!scsi_bidi_cmnd(sc))
@@ -384,39 +417,63 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
                scsi_out(sc)->resid = scsi_out(sc)->length;
                scsi_in(sc)->resid = scsi_in(sc)->length;
        }
-       if (conn->ctask == ctask)
-               conn->ctask = NULL;
+
+       if (conn->task == task)
+               conn->task = NULL;
        /* release ref from queuecommand */
-       __iscsi_put_ctask(ctask);
+       __iscsi_put_task(task);
 }
 
-/**
- * iscsi_free_mgmt_task - return mgmt task back to pool
- * @conn: iscsi connection
- * @mtask: mtask
- *
- * Must be called with session lock.
- */
-void iscsi_free_mgmt_task(struct iscsi_conn *conn,
-                         struct iscsi_mgmt_task *mtask)
+static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+                               struct iscsi_task *task)
 {
-       list_del_init(&mtask->running);
-       if (conn->login_mtask == mtask)
-               return;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+       struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+
+       if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+               return -ENOTCONN;
+
+       if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+           hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+               nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+       /*
+        * pre-format CmdSN for outgoing PDU.
+        */
+       nop->cmdsn = cpu_to_be32(session->cmdsn);
+       if (hdr->itt != RESERVED_ITT) {
+               hdr->itt = build_itt(task->itt, session->age);
+               /*
+                * TODO: We always use immediate, so we never hit this.
+                * If we start to send tmfs or nops as non-immediate then
+                * we should start checking the cmdsn numbers for mgmt tasks.
+                */
+               if (conn->c_stage == ISCSI_CONN_STARTED &&
+                   !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+                       session->queued_cmdsn++;
+                       session->cmdsn++;
+               }
+       }
 
-       if (conn->ping_mtask == mtask)
-               conn->ping_mtask = NULL;
-       __kfifo_put(conn->session->mgmtpool.queue,
-                   (void*)&mtask, sizeof(void*));
+       if (session->tt->init_task)
+               session->tt->init_task(task);
+
+       if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+               session->state = ISCSI_STATE_LOGGING_OUT;
+
+       list_move_tail(&task->running, &conn->mgmt_run_list);
+       debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+                  hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+                  task->data_count);
+       return 0;
 }
-EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
 
-static struct iscsi_mgmt_task *
+static struct iscsi_task *
 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                      char *data, uint32_t data_size)
 {
        struct iscsi_session *session = conn->session;
-       struct iscsi_mgmt_task *mtask;
+       struct iscsi_task *task;
 
        if (session->state == ISCSI_STATE_TERMINATE)
                return NULL;
@@ -426,29 +483,56 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                /*
                 * Login and Text are sent serially, in
                 * request-followed-by-response sequence.
-                * Same mtask can be used. Same ITT must be used.
-                * Note that login_mtask is preallocated at conn_create().
+                * Same task can be used. Same ITT must be used.
+                * Note that login_task is preallocated at conn_create().
                 */
-               mtask = conn->login_mtask;
+               task = conn->login_task;
        else {
                BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
                BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
-               if (!__kfifo_get(session->mgmtpool.queue,
-                                (void*)&mtask, sizeof(void*)))
+               if (!__kfifo_get(session->cmdpool.queue,
+                                (void*)&task, sizeof(void*)))
                        return NULL;
+
+               if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+                    hdr->ttt == RESERVED_ITT) {
+                       conn->ping_task = task;
+                       conn->last_ping = jiffies;
+               }
        }
+       /*
+        * released in complete pdu for task we expect a response for, and
+        * released by the lld when it has transmitted the task for
+        * pdus we do not expect a response for.
+        */
+       atomic_set(&task->refcount, 1);
+       task->conn = conn;
+       task->sc = NULL;
 
        if (data_size) {
-               memcpy(mtask->data, data, data_size);
-               mtask->data_count = data_size;
+               memcpy(task->data, data, data_size);
+               task->data_count = data_size;
+       } else
+               task->data_count = 0;
+
+       memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+       INIT_LIST_HEAD(&task->running);
+       list_add_tail(&task->running, &conn->mgmtqueue);
+
+       if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+               if (iscsi_prep_mgmt_task(conn, task)) {
+                       __iscsi_put_task(task);
+                       return NULL;
+               }
+
+               if (session->tt->xmit_task(task))
+                       task = NULL;
+
        } else
-               mtask->data_count = 0;
+               scsi_queue_work(conn->session->host, &conn->xmitwork);
 
-       memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
-       INIT_LIST_HEAD(&mtask->running);
-       list_add_tail(&mtask->running, &conn->mgmtqueue);
-       return mtask;
+       return task;
 }
 
 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -462,7 +546,6 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
        if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
                err = -EPERM;
        spin_unlock_bh(&session->lock);
-       scsi_queue_work(session->host, &conn->xmitwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -471,7 +554,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
  * iscsi_cmd_rsp - SCSI Command Response processing
  * @conn: iscsi connection
  * @hdr: iscsi header
- * @ctask: scsi command task
+ * @task: scsi command task
  * @data: cmd data buffer
  * @datalen: len of buffer
  *
@@ -479,12 +562,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
  * then completes the command and task.
  **/
 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
-                              struct iscsi_cmd_task *ctask, char *data,
+                              struct iscsi_task *task, char *data,
                               int datalen)
 {
        struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
        struct iscsi_session *session = conn->session;
-       struct scsi_cmnd *sc = ctask->sc;
+       struct scsi_cmnd *sc = task->sc;
 
        iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
        conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
@@ -508,7 +591,7 @@ invalid_datalen:
                        goto out;
                }
 
-               senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+               senselen = get_unaligned_be16(data);
                if (datalen < senselen)
                        goto invalid_datalen;
 
@@ -544,10 +627,10 @@ invalid_datalen:
        }
 out:
        debug_scsi("done [sc %lx res %d itt 0x%x]\n",
-                  (long)sc, sc->result, ctask->itt);
+                  (long)sc, sc->result, task->itt);
        conn->scsirsp_pdus_cnt++;
 
-       __iscsi_put_ctask(ctask);
+       __iscsi_put_task(task);
 }
 
 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -572,9 +655,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 {
         struct iscsi_nopout hdr;
-       struct iscsi_mgmt_task *mtask;
+       struct iscsi_task *task;
 
-       if (!rhdr && conn->ping_mtask)
+       if (!rhdr && conn->ping_task)
                return;
 
        memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -588,18 +671,9 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
        } else
                hdr.ttt = RESERVED_ITT;
 
-       mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
-       if (!mtask) {
+       task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+       if (!task)
                iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
-               return;
-       }
-
-       /* only track our nops */
-       if (!rhdr) {
-               conn->ping_mtask = mtask;
-               conn->last_ping = jiffies;
-       }
-       scsi_queue_work(conn->session->host, &conn->xmitwork);
 }
 
 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -627,6 +701,31 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
        return 0;
 }
 
+/**
+ * iscsi_itt_to_task - look up task by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for mgmt tasks like login and nops, or if
+ * the LDD's itt space does not include the session age.
+ *
+ * The session lock must be held.
+ */
+static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+{
+       struct iscsi_session *session = conn->session;
+       uint32_t i;
+
+       if (itt == RESERVED_ITT)
+               return NULL;
+
+       i = get_itt(itt);
+       if (i >= session->cmds_max)
+               return NULL;
+
+       return session->cmds[i];
+}
+
 /**
  * __iscsi_complete_pdu - complete pdu
  * @conn: iscsi conn
@@ -638,108 +737,28 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
  * queuecommand or send generic. session lock must be held and verify
  * itt must have been called.
  */
-static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
-                               char *data, int datalen)
+int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+                        char *data, int datalen)
 {
        struct iscsi_session *session = conn->session;
        int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
-       struct iscsi_cmd_task *ctask;
-       struct iscsi_mgmt_task *mtask;
+       struct iscsi_task *task;
        uint32_t itt;
 
        conn->last_recv = jiffies;
+       rc = iscsi_verify_itt(conn, hdr->itt);
+       if (rc)
+               return rc;
+
        if (hdr->itt != RESERVED_ITT)
                itt = get_itt(hdr->itt);
        else
                itt = ~0U;
 
-       if (itt < session->cmds_max) {
-               ctask = session->cmds[itt];
-
-               debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
-                          opcode, conn->id, ctask->itt, datalen);
-
-               switch(opcode) {
-               case ISCSI_OP_SCSI_CMD_RSP:
-                       BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
-                       iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
-                                          datalen);
-                       break;
-               case ISCSI_OP_SCSI_DATA_IN:
-                       BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
-                       if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
-                               conn->scsirsp_pdus_cnt++;
-                               __iscsi_put_ctask(ctask);
-                       }
-                       break;
-               case ISCSI_OP_R2T:
-                       /* LLD handles this for now */
-                       break;
-               default:
-                       rc = ISCSI_ERR_BAD_OPCODE;
-                       break;
-               }
-       } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
-                  itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
-               mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
-
-               debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
-                          opcode, conn->id, mtask->itt, datalen);
+       debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+                  opcode, conn->id, itt, datalen);
 
-               iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
-               switch(opcode) {
-               case ISCSI_OP_LOGOUT_RSP:
-                       if (datalen) {
-                               rc = ISCSI_ERR_PROTO;
-                               break;
-                       }
-                       conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
-                       /* fall through */
-               case ISCSI_OP_LOGIN_RSP:
-               case ISCSI_OP_TEXT_RSP:
-                       /*
-                        * login related PDU's exp_statsn is handled in
-                        * userspace
-                        */
-                       if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
-                               rc = ISCSI_ERR_CONN_FAILED;
-                       iscsi_free_mgmt_task(conn, mtask);
-                       break;
-               case ISCSI_OP_SCSI_TMFUNC_RSP:
-                       if (datalen) {
-                               rc = ISCSI_ERR_PROTO;
-                               break;
-                       }
-
-                       iscsi_tmf_rsp(conn, hdr);
-                       iscsi_free_mgmt_task(conn, mtask);
-                       break;
-               case ISCSI_OP_NOOP_IN:
-                       if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
-                           datalen) {
-                               rc = ISCSI_ERR_PROTO;
-                               break;
-                       }
-                       conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
-
-                       if (conn->ping_mtask != mtask) {
-                               /*
-                                * If this is not in response to one of our
-                                * nops then it must be from userspace.
-                                */
-                               if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
-                                                  datalen))
-                                       rc = ISCSI_ERR_CONN_FAILED;
-                       } else
-                               mod_timer(&conn->transport_timer,
-                                         jiffies + conn->recv_timeout);
-                       iscsi_free_mgmt_task(conn, mtask);
-                       break;
-               default:
-                       rc = ISCSI_ERR_BAD_OPCODE;
-                       break;
-               }
-       } else if (itt == ~0U) {
+       if (itt == ~0U) {
                iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
 
                switch(opcode) {
@@ -766,11 +785,104 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                        rc = ISCSI_ERR_BAD_OPCODE;
                        break;
                }
-       } else
-               rc = ISCSI_ERR_BAD_ITT;
+               goto out;
+       }
 
+       switch(opcode) {
+       case ISCSI_OP_SCSI_CMD_RSP:
+       case ISCSI_OP_SCSI_DATA_IN:
+               task = iscsi_itt_to_ctask(conn, hdr->itt);
+               if (!task)
+                       return ISCSI_ERR_BAD_ITT;
+               break;
+       case ISCSI_OP_R2T:
+               /*
+                * LLD handles R2Ts if they need to.
+                */
+               return 0;
+       case ISCSI_OP_LOGOUT_RSP:
+       case ISCSI_OP_LOGIN_RSP:
+       case ISCSI_OP_TEXT_RSP:
+       case ISCSI_OP_SCSI_TMFUNC_RSP:
+       case ISCSI_OP_NOOP_IN:
+               task = iscsi_itt_to_task(conn, hdr->itt);
+               if (!task)
+                       return ISCSI_ERR_BAD_ITT;
+               break;
+       default:
+               return ISCSI_ERR_BAD_OPCODE;
+       }
+
+       switch(opcode) {
+       case ISCSI_OP_SCSI_CMD_RSP:
+               iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+               break;
+       case ISCSI_OP_SCSI_DATA_IN:
+               if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+                       conn->scsirsp_pdus_cnt++;
+                       iscsi_update_cmdsn(session,
+                                          (struct iscsi_nopin*) hdr);
+                       __iscsi_put_task(task);
+               }
+               break;
+       case ISCSI_OP_LOGOUT_RSP:
+               iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+               if (datalen) {
+                       rc = ISCSI_ERR_PROTO;
+                       break;
+               }
+               conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+               goto recv_pdu;
+       case ISCSI_OP_LOGIN_RSP:
+       case ISCSI_OP_TEXT_RSP:
+               iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+               /*
+                * login related PDU's exp_statsn is handled in
+                * userspace
+                */
+               goto recv_pdu;
+       case ISCSI_OP_SCSI_TMFUNC_RSP:
+               iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+               if (datalen) {
+                       rc = ISCSI_ERR_PROTO;
+                       break;
+               }
+
+               iscsi_tmf_rsp(conn, hdr);
+               __iscsi_put_task(task);
+               break;
+       case ISCSI_OP_NOOP_IN:
+               iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+               if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+                       rc = ISCSI_ERR_PROTO;
+                       break;
+               }
+               conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+
+               if (conn->ping_task != task)
+                       /*
+                        * If this is not in response to one of our
+                        * nops then it must be from userspace.
+                        */
+                       goto recv_pdu;
+
+               mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+               __iscsi_put_task(task);
+               break;
+       default:
+               rc = ISCSI_ERR_BAD_OPCODE;
+               break;
+       }
+
+out:
+       return rc;
+recv_pdu:
+       if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+               rc = ISCSI_ERR_CONN_FAILED;
+       __iscsi_put_task(task);
        return rc;
 }
+EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
 
 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                       char *data, int datalen)
@@ -784,51 +896,63 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 }
 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
 
-/* verify itt (itt encoding: age+cid+itt) */
-int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
-                    uint32_t *ret_itt)
+int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 {
        struct iscsi_session *session = conn->session;
-       struct iscsi_cmd_task *ctask;
-       uint32_t itt;
+       uint32_t i;
 
-       if (hdr->itt != RESERVED_ITT) {
-               if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
-                   (session->age << ISCSI_AGE_SHIFT)) {
-                       iscsi_conn_printk(KERN_ERR, conn,
-                                         "received itt %x expected session "
-                                         "age (%x)\n", (__force u32)hdr->itt,
-                                         session->age & ISCSI_AGE_MASK);
-                       return ISCSI_ERR_BAD_ITT;
-               }
+       if (itt == RESERVED_ITT)
+               return 0;
 
-               itt = get_itt(hdr->itt);
-       } else
-               itt = ~0U;
+       if (((__force u32)itt & ISCSI_AGE_MASK) !=
+           (session->age << ISCSI_AGE_SHIFT)) {
+               iscsi_conn_printk(KERN_ERR, conn,
+                                 "received itt %x expected session age (%x)\n",
+                                 (__force u32)itt, session->age);
+               return ISCSI_ERR_BAD_ITT;
+       }
 
-       if (itt < session->cmds_max) {
-               ctask = session->cmds[itt];
+       i = get_itt(itt);
+       if (i >= session->cmds_max) {
+               iscsi_conn_printk(KERN_ERR, conn,
+                                 "received invalid itt index %u (max cmds "
+                                  "%u.\n", i, session->cmds_max);
+               return ISCSI_ERR_BAD_ITT;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_verify_itt);
 
-               if (!ctask->sc) {
-                       iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
-                                         "with itt 0x%x\n", ctask->itt);
-                       /* force drop */
-                       return ISCSI_ERR_NO_SCSI_CMD;
-               }
+/**
+ * iscsi_itt_to_ctask - look up ctask by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for cmd tasks.
+ *
+ * The session lock must be held.
+ */
+struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+{
+       struct iscsi_task *task;
 
-               if (ctask->sc->SCp.phase != session->age) {
-                       iscsi_conn_printk(KERN_ERR, conn,
-                                         "iscsi: ctask's session age %d, "
-                                         "expected %d\n", ctask->sc->SCp.phase,
-                                         session->age);
-                       return ISCSI_ERR_SESSION_FAILED;
-               }
+       if (iscsi_verify_itt(conn, itt))
+               return NULL;
+
+       task = iscsi_itt_to_task(conn, itt);
+       if (!task || !task->sc)
+               return NULL;
+
+       if (task->sc->SCp.phase != conn->session->age) {
+               iscsi_session_printk(KERN_ERR, conn->session,
+                                 "task's session age %d, expected %d\n",
+                                 task->sc->SCp.phase, conn->session->age);
+               return NULL;
        }
 
-       *ret_itt = itt;
-       return 0;
+       return task;
 }
-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
 
 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
 {
@@ -850,61 +974,6 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
 
-static void iscsi_prep_mtask(struct iscsi_conn *conn,
-                            struct iscsi_mgmt_task *mtask)
-{
-       struct iscsi_session *session = conn->session;
-       struct iscsi_hdr *hdr = mtask->hdr;
-       struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
-
-       if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
-           hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
-               nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
-       /*
-        * pre-format CmdSN for outgoing PDU.
-        */
-       nop->cmdsn = cpu_to_be32(session->cmdsn);
-       if (hdr->itt != RESERVED_ITT) {
-               hdr->itt = build_itt(mtask->itt, session->age);
-               /*
-                * TODO: We always use immediate, so we never hit this.
-                * If we start to send tmfs or nops as non-immediate then
-                * we should start checking the cmdsn numbers for mgmt tasks.
-                */
-               if (conn->c_stage == ISCSI_CONN_STARTED &&
-                   !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
-                       session->queued_cmdsn++;
-                       session->cmdsn++;
-               }
-       }
-
-       if (session->tt->init_mgmt_task)
-               session->tt->init_mgmt_task(conn, mtask);
-
-       debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
-                  hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
-                  mtask->data_count);
-}
-
-static int iscsi_xmit_mtask(struct iscsi_conn *conn)
-{
-       struct iscsi_hdr *hdr = conn->mtask->hdr;
-       int rc;
-
-       if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
-               conn->session->state = ISCSI_STATE_LOGGING_OUT;
-       spin_unlock_bh(&conn->session->lock);
-
-       rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
-       spin_lock_bh(&conn->session->lock);
-       if (rc)
-               return rc;
-
-       /* done with this in-progress mtask */
-       conn->mtask = NULL;
-       return 0;
-}
-
 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
 {
        struct iscsi_session *session = conn->session;
@@ -922,37 +991,38 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
        return 0;
 }
 
-static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+static int iscsi_xmit_task(struct iscsi_conn *conn)
 {
-       struct iscsi_cmd_task *ctask = conn->ctask;
+       struct iscsi_task *task = conn->task;
        int rc;
 
-       __iscsi_get_ctask(ctask);
+       __iscsi_get_task(task);
        spin_unlock_bh(&conn->session->lock);
-       rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+       rc = conn->session->tt->xmit_task(task);
        spin_lock_bh(&conn->session->lock);
-       __iscsi_put_ctask(ctask);
+       __iscsi_put_task(task);
        if (!rc)
-               /* done with this ctask */
-               conn->ctask = NULL;
+               /* done with this task */
+               conn->task = NULL;
        return rc;
 }
 
 /**
- * iscsi_requeue_ctask - requeue ctask to run from session workqueue
- * @ctask: ctask to requeue
+ * iscsi_requeue_task - requeue task to run from session workqueue
+ * @task: task to requeue
  *
- * LLDs that need to run a ctask from the session workqueue should call
- * this. The session lock must be held.
+ * LLDs that need to run a task from the session workqueue should call
+ * this. The session lock must be held. This should only be called
+ * by software drivers.
  */
-void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+void iscsi_requeue_task(struct iscsi_task *task)
 {
-       struct iscsi_conn *conn = ctask->conn;
+       struct iscsi_conn *conn = task->conn;
 
-       list_move_tail(&ctask->running, &conn->requeue);
+       list_move_tail(&task->running, &conn->requeue);
        scsi_queue_work(conn->session->host, &conn->xmitwork);
 }
-EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+EXPORT_SYMBOL_GPL(iscsi_requeue_task);
 
 /**
  * iscsi_data_xmit - xmit any command into the scheduled connection
@@ -974,14 +1044,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
                return -ENODATA;
        }
 
-       if (conn->ctask) {
-               rc = iscsi_xmit_ctask(conn);
-               if (rc)
-                       goto again;
-       }
-
-       if (conn->mtask) {
-               rc = iscsi_xmit_mtask(conn);
+       if (conn->task) {
+               rc = iscsi_xmit_task(conn);
                if (rc)
                        goto again;
        }
@@ -993,17 +1057,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
         */
 check_mgmt:
        while (!list_empty(&conn->mgmtqueue)) {
-               conn->mtask = list_entry(conn->mgmtqueue.next,
-                                        struct iscsi_mgmt_task, running);
-               if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
-                       iscsi_free_mgmt_task(conn, conn->mtask);
-                       conn->mtask = NULL;
+               conn->task = list_entry(conn->mgmtqueue.next,
+                                        struct iscsi_task, running);
+               if (iscsi_prep_mgmt_task(conn, conn->task)) {
+                       __iscsi_put_task(conn->task);
+                       conn->task = NULL;
                        continue;
                }
-
-               iscsi_prep_mtask(conn, conn->mtask);
-               list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
-               rc = iscsi_xmit_mtask(conn);
+               rc = iscsi_xmit_task(conn);
                if (rc)
                        goto again;
        }
@@ -1013,24 +1074,21 @@ check_mgmt:
                if (conn->tmf_state == TMF_QUEUED)
                        break;
 
-               conn->ctask = list_entry(conn->xmitqueue.next,
-                                        struct iscsi_cmd_task, running);
+               conn->task = list_entry(conn->xmitqueue.next,
+                                        struct iscsi_task, running);
                if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
-                       fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+                       fail_command(conn, conn->task, DID_IMM_RETRY << 16);
                        continue;
                }
-               if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
-                       fail_command(conn, conn->ctask, DID_ABORT << 16);
+               if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+                       fail_command(conn, conn->task, DID_ABORT << 16);
                        continue;
                }
-
-               conn->ctask->state = ISCSI_TASK_RUNNING;
-               list_move_tail(conn->xmitqueue.next, &conn->run_list);
-               rc = iscsi_xmit_ctask(conn);
+               rc = iscsi_xmit_task(conn);
                if (rc)
                        goto again;
                /*
-                * we could continuously get new ctask requests so
+                * we could continuously get new task requests so
                 * we need to check the mgmt queue for nops that need to
                 * be sent to aviod starvation
                 */
@@ -1048,11 +1106,11 @@ check_mgmt:
                if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
                        break;
 
-               conn->ctask = list_entry(conn->requeue.next,
-                                        struct iscsi_cmd_task, running);
-               conn->ctask->state = ISCSI_TASK_RUNNING;
+               conn->task = list_entry(conn->requeue.next,
+                                        struct iscsi_task, running);
+               conn->task->state = ISCSI_TASK_RUNNING;
                list_move_tail(conn->requeue.next, &conn->run_list);
-               rc = iscsi_xmit_ctask(conn);
+               rc = iscsi_xmit_task(conn);
                if (rc)
                        goto again;
                if (!list_empty(&conn->mgmtqueue))
@@ -1096,11 +1154,12 @@ enum {
 
 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
+       struct iscsi_cls_session *cls_session;
        struct Scsi_Host *host;
        int reason = 0;
        struct iscsi_session *session;
        struct iscsi_conn *conn;
-       struct iscsi_cmd_task *ctask = NULL;
+       struct iscsi_task *task = NULL;
 
        sc->scsi_done = done;
        sc->result = 0;
@@ -1109,10 +1168,11 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
        host = sc->device->host;
        spin_unlock(host->host_lock);
 
-       session = iscsi_hostdata(host->hostdata);
+       cls_session = starget_to_session(scsi_target(sc->device));
+       session = cls_session->dd_data;
        spin_lock(&session->lock);
 
-       reason = iscsi_session_chkready(session_to_cls(session));
+       reason = iscsi_session_chkready(cls_session);
        if (reason) {
                sc->result = reason;
                goto fault;
@@ -1167,26 +1227,39 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
                goto reject;
        }
 
-       if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+       if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
                         sizeof(void*))) {
                reason = FAILURE_OOM;
                goto reject;
        }
-       session->queued_cmdsn++;
-
        sc->SCp.phase = session->age;
-       sc->SCp.ptr = (char *)ctask;
-
-       atomic_set(&ctask->refcount, 1);
-       ctask->state = ISCSI_TASK_PENDING;
-       ctask->conn = conn;
-       ctask->sc = sc;
-       INIT_LIST_HEAD(&ctask->running);
+       sc->SCp.ptr = (char *)task;
+
+       atomic_set(&task->refcount, 1);
+       task->state = ISCSI_TASK_PENDING;
+       task->conn = conn;
+       task->sc = sc;
+       INIT_LIST_HEAD(&task->running);
+       list_add_tail(&task->running, &conn->xmitqueue);
+
+       if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+               if (iscsi_prep_scsi_cmd_pdu(task)) {
+                       sc->result = DID_ABORT << 16;
+                       sc->scsi_done = NULL;
+                       iscsi_complete_command(task);
+                       goto fault;
+               }
+               if (session->tt->xmit_task(task)) {
+                       sc->scsi_done = NULL;
+                       iscsi_complete_command(task);
+                       reason = FAILURE_SESSION_NOT_READY;
+                       goto reject;
+               }
+       } else
+               scsi_queue_work(session->host, &conn->xmitwork);
 
-       list_add_tail(&ctask->running, &conn->xmitqueue);
+       session->queued_cmdsn++;
        spin_unlock(&session->lock);
-
-       scsi_queue_work(host, &conn->xmitwork);
        spin_lock(host->host_lock);
        return 0;
 
@@ -1205,7 +1278,7 @@ fault:
                scsi_out(sc)->resid = scsi_out(sc)->length;
                scsi_in(sc)->resid = scsi_in(sc)->length;
        }
-       sc->scsi_done(sc);
+       done(sc);
        spin_lock(host->host_lock);
        return 0;
 }
@@ -1222,7 +1295,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
 
 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
 {
-       struct iscsi_session *session = class_to_transport_session(cls_session);
+       struct iscsi_session *session = cls_session->dd_data;
 
        spin_lock_bh(&session->lock);
        if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1236,9 +1309,13 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
 
 int iscsi_eh_host_reset(struct scsi_cmnd *sc)
 {
-       struct Scsi_Host *host = sc->device->host;
-       struct iscsi_session *session = iscsi_hostdata(host->hostdata);
-       struct iscsi_conn *conn = session->leadconn;
+       struct iscsi_cls_session *cls_session;
+       struct iscsi_session *session;
+       struct iscsi_conn *conn;
+
+       cls_session = starget_to_session(scsi_target(sc->device));
+       session = cls_session->dd_data;
+       conn = session->leadconn;
 
        mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->lock);
@@ -1300,11 +1377,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
                                   int timeout)
 {
        struct iscsi_session *session = conn->session;
-       struct iscsi_mgmt_task *mtask;
+       struct iscsi_task *task;
 
-       mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+       task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
                                      NULL, 0);
-       if (!mtask) {
+       if (!task) {
                spin_unlock_bh(&session->lock);
                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
                spin_lock_bh(&session->lock);
@@ -1320,7 +1397,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
 
        spin_unlock_bh(&session->lock);
        mutex_unlock(&session->eh_mutex);
-       scsi_queue_work(session->host, &conn->xmitwork);
 
        /*
         * block eh thread until:
@@ -1339,7 +1415,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
 
        mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->lock);
-       /* if the session drops it will clean up the mtask */
+       /* if the session drops it will clean up the task */
        if (age != session->age ||
            session->state != ISCSI_STATE_LOGGED_IN)
                return -ENOTCONN;
@@ -1353,48 +1429,51 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
 static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
                              int error)
 {
-       struct iscsi_cmd_task *ctask, *tmp;
+       struct iscsi_task *task, *tmp;
 
-       if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
-               conn->ctask = NULL;
+       if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+               conn->task = NULL;
 
        /* flush pending */
-       list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
-               if (lun == ctask->sc->device->lun || lun == -1) {
+       list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+               if (lun == task->sc->device->lun || lun == -1) {
                        debug_scsi("failing pending sc %p itt 0x%x\n",
-                                  ctask->sc, ctask->itt);
-                       fail_command(conn, ctask, error << 16);
+                                  task->sc, task->itt);
+                       fail_command(conn, task, error << 16);
                }
        }
 
-       list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
-               if (lun == ctask->sc->device->lun || lun == -1) {
+       list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+               if (lun == task->sc->device->lun || lun == -1) {
                        debug_scsi("failing requeued sc %p itt 0x%x\n",
-                                  ctask->sc, ctask->itt);
-                       fail_command(conn, ctask, error << 16);
+                                  task->sc, task->itt);
+                       fail_command(conn, task, error << 16);
                }
        }
 
        /* fail all other running */
-       list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
-               if (lun == ctask->sc->device->lun || lun == -1) {
+       list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+               if (lun == task->sc->device->lun || lun == -1) {
                        debug_scsi("failing in progress sc %p itt 0x%x\n",
-                                  ctask->sc, ctask->itt);
-                       fail_command(conn, ctask, DID_BUS_BUSY << 16);
+                                  task->sc, task->itt);
+                       fail_command(conn, task, DID_BUS_BUSY << 16);
                }
        }
 }
 
-static void iscsi_suspend_tx(struct iscsi_conn *conn)
+void iscsi_suspend_tx(struct iscsi_conn *conn)
 {
        set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-       scsi_flush_work(conn->session->host);
+       if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+               scsi_flush_work(conn->session->host);
 }
+EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
        clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-       scsi_queue_work(conn->session->host, &conn->xmitwork);
+       if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+               scsi_queue_work(conn->session->host, &conn->xmitwork);
 }
 
 static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1405,7 +1484,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
        enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
 
        cls_session = starget_to_session(scsi_target(scmd->device));
-       session = class_to_transport_session(cls_session);
+       session = cls_session->dd_data;
 
        debug_scsi("scsi cmd %p timedout\n", scmd);
 
@@ -1443,7 +1522,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
                           jiffies))
                rc = EH_RESET_TIMER;
        /* if in the middle of checking the transport then give us more time */
-       if (conn->ping_mtask)
+       if (conn->ping_task)
                rc = EH_RESET_TIMER;
 done:
        spin_unlock(&session->lock);
@@ -1467,7 +1546,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
 
        recv_timeout *= HZ;
        last_recv = conn->last_recv;
-       if (conn->ping_mtask &&
+       if (conn->ping_task &&
            time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
                           jiffies)) {
                iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
@@ -1493,27 +1572,30 @@ done:
        spin_unlock(&session->lock);
 }
 
-static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
                                      struct iscsi_tm *hdr)
 {
        memset(hdr, 0, sizeof(*hdr));
        hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
        hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
        hdr->flags |= ISCSI_FLAG_CMD_FINAL;
-       memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
-       hdr->rtt = ctask->hdr->itt;
-       hdr->refcmdsn = ctask->hdr->cmdsn;
+       memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+       hdr->rtt = task->hdr->itt;
+       hdr->refcmdsn = task->hdr->cmdsn;
 }
 
 int iscsi_eh_abort(struct scsi_cmnd *sc)
 {
-       struct Scsi_Host *host = sc->device->host;
-       struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+       struct iscsi_cls_session *cls_session;
+       struct iscsi_session *session;
        struct iscsi_conn *conn;
-       struct iscsi_cmd_task *ctask;
+       struct iscsi_task *task;
        struct iscsi_tm *hdr;
        int rc, age;
 
+       cls_session = starget_to_session(scsi_target(sc->device));
+       session = cls_session->dd_data;
+
        mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->lock);
        /*
@@ -1542,17 +1624,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
        conn->eh_abort_cnt++;
        age = session->age;
 
-       ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
-       debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+       task = (struct iscsi_task *)sc->SCp.ptr;
+       debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
 
-       /* ctask completed before time out */
-       if (!ctask->sc) {
+       /* task completed before time out */
+       if (!task->sc) {
                debug_scsi("sc completed while abort in progress\n");
                goto success;
        }
 
-       if (ctask->state == ISCSI_TASK_PENDING) {
-               fail_command(conn, ctask, DID_ABORT << 16);
+       if (task->state == ISCSI_TASK_PENDING) {
+               fail_command(conn, task, DID_ABORT << 16);
                goto success;
        }
 
@@ -1562,7 +1644,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
        conn->tmf_state = TMF_QUEUED;
 
        hdr = &conn->tmhdr;
-       iscsi_prep_abort_task_pdu(ctask, hdr);
+       iscsi_prep_abort_task_pdu(task, hdr);
 
        if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
                rc = FAILED;
@@ -1572,16 +1654,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
        switch (conn->tmf_state) {
        case TMF_SUCCESS:
                spin_unlock_bh(&session->lock);
+               /*
+                * stop tx side incase the target had sent a abort rsp but
+                * the initiator was still writing out data.
+                */
                iscsi_suspend_tx(conn);
                /*
-                * clean up task if aborted. grab the recv lock as a writer
+                * we do not stop the recv side because targets have been
+                * good and have never sent us a successful tmf response
+                * then sent more data for the cmd.
                 */
-               write_lock_bh(conn->recv_lock);
                spin_lock(&session->lock);
-               fail_command(conn, ctask, DID_ABORT << 16);
+               fail_command(conn, task, DID_ABORT << 16);
                conn->tmf_state = TMF_INITIAL;
                spin_unlock(&session->lock);
-               write_unlock_bh(conn->recv_lock);
                iscsi_start_tx(conn);
                goto success_unlocked;
        case TMF_TIMEDOUT:
@@ -1591,7 +1677,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
        case TMF_NOT_FOUND:
                if (!sc->SCp.ptr) {
                        conn->tmf_state = TMF_INITIAL;
-                       /* ctask completed before tmf abort response */
+                       /* task completed before tmf abort response */
                        debug_scsi("sc completed while abort in progress\n");
                        goto success;
                }
@@ -1604,7 +1690,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 success:
        spin_unlock_bh(&session->lock);
 success_unlocked:
-       debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+       debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
        mutex_unlock(&session->eh_mutex);
        return SUCCESS;
 
@@ -1612,7 +1698,7 @@ failed:
        spin_unlock_bh(&session->lock);
 failed_unlocked:
        debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
-                   ctask ? ctask->itt : 0);
+                   task ? task->itt : 0);
        mutex_unlock(&session->eh_mutex);
        return FAILED;
 }
@@ -1630,12 +1716,15 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
 
 int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 {
-       struct Scsi_Host *host = sc->device->host;
-       struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+       struct iscsi_cls_session *cls_session;
+       struct iscsi_session *session;
        struct iscsi_conn *conn;
        struct iscsi_tm *hdr;
        int rc = FAILED;
 
+       cls_session = starget_to_session(scsi_target(sc->device));
+       session = cls_session->dd_data;
+
        debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
 
        mutex_lock(&session->eh_mutex);
@@ -1678,13 +1767,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
        spin_unlock_bh(&session->lock);
 
        iscsi_suspend_tx(conn);
-       /* need to grab the recv lock then session lock */
-       write_lock_bh(conn->recv_lock);
+
        spin_lock(&session->lock);
        fail_all_commands(conn, sc->device->lun, DID_ERROR);
        conn->tmf_state = TMF_INITIAL;
        spin_unlock(&session->lock);
-       write_unlock_bh(conn->recv_lock);
 
        iscsi_start_tx(conn);
        goto done;
@@ -1760,177 +1847,203 @@ void iscsi_pool_free(struct iscsi_pool *q)
 }
 EXPORT_SYMBOL_GPL(iscsi_pool_free);
 
-/*
- * iSCSI Session's hostdata organization:
+/**
+ * iscsi_host_add - add host to system
+ * @shost: scsi host
+ * @pdev: parent device
+ *
+ * This should be called by partial offload and software iscsi drivers
+ * to add a host to the system.
+ */
+int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+{
+       if (!shost->can_queue)
+               shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+
+       return scsi_add_host(shost, pdev);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_add);
+
+/**
+ * iscsi_host_alloc - allocate a host and driver data
+ * @sht: scsi host template
+ * @dd_data_size: driver host data size
+ * @qdepth: default device queue depth
+ *
+ * This should be called by partial offload and software iscsi drivers.
+ * To access the driver specific memory use the iscsi_host_priv() macro.
+ */
+struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+                                  int dd_data_size, uint16_t qdepth)
+{
+       struct Scsi_Host *shost;
+
+       shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+       if (!shost)
+               return NULL;
+       shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+
+       if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+               if (qdepth != 0)
+                       printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+                              "Queue depth must be between 1 and %d.\n",
+                              qdepth, ISCSI_MAX_CMD_PER_LUN);
+               qdepth = ISCSI_DEF_CMD_PER_LUN;
+       }
+       shost->cmd_per_lun = qdepth;
+       return shost;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+
+/**
+ * iscsi_host_remove - remove host and sessions
+ * @shost: scsi host
  *
- *    *------------------* <== hostdata_session(host->hostdata)
- *    | ptr to class sess|
- *    |------------------| <== iscsi_hostdata(host->hostdata)
- *    | iscsi_session    |
- *    *------------------*
+ * This will also remove any sessions attached to the host, but if userspace
+ * is managing the session at the same time this will break. TODO: add
+ * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+ * does not remove the memory from under us.
  */
+void iscsi_host_remove(struct Scsi_Host *shost)
+{
+       iscsi_host_for_each_session(shost, iscsi_session_teardown);
+       scsi_remove_host(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_remove);
 
-#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
-                                _sz % sizeof(unsigned long))
+void iscsi_host_free(struct Scsi_Host *shost)
+{
+       struct iscsi_host *ihost = shost_priv(shost);
 
-#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+       kfree(ihost->netdev);
+       kfree(ihost->hwaddress);
+       kfree(ihost->initiatorname);
+       scsi_host_put(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_free);
 
 /**
  * iscsi_session_setup - create iscsi cls session and host and session
- * @scsit: scsi transport template
  * @iscsit: iscsi transport template
- * @cmds_max: scsi host can queue
- * @qdepth: scsi host cmds per lun
- * @cmd_task_size: LLD ctask private data size
- * @mgmt_task_size: LLD mtask private data size
+ * @shost: scsi host
+ * @cmds_max: session can queue
+ * @cmd_task_size: LLD task private data size
  * @initial_cmdsn: initial CmdSN
- * @hostno: host no allocated
  *
  * This can be used by software iscsi_transports that allocate
  * a session per scsi host.
- **/
+ *
+ * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+ * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+ * for nop handling and login/logout requests.
+ */
 struct iscsi_cls_session *
-iscsi_session_setup(struct iscsi_transport *iscsit,
-                   struct scsi_transport_template *scsit,
-                   uint16_t cmds_max, uint16_t qdepth,
-                   int cmd_task_size, int mgmt_task_size,
-                   uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+                   uint16_t cmds_max, int cmd_task_size,
+                   uint32_t initial_cmdsn, unsigned int id)
 {
-       struct Scsi_Host *shost;
        struct iscsi_session *session;
        struct iscsi_cls_session *cls_session;
-       int cmd_i;
+       int cmd_i, scsi_cmds, total_cmds = cmds_max;
 
-       if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
-               if (qdepth != 0)
-                       printk(KERN_ERR "iscsi: invalid queue depth of %d. "
-                             "Queue depth must be between 1 and %d.\n",
-                             qdepth, ISCSI_MAX_CMD_PER_LUN);
-               qdepth = ISCSI_DEF_CMD_PER_LUN;
+       if (!total_cmds)
+               total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+       /*
+        * The iscsi layer needs some tasks for nop handling and tmfs,
+        * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+        * + 1 command for scsi IO.
+        */
+       if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+               printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+                      "must be a power of two that is at least %d.\n",
+                      total_cmds, ISCSI_TOTAL_CMDS_MIN);
+               return NULL;
        }
 
-       if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
-           cmds_max < 2) {
-               if (cmds_max != 0)
-                       printk(KERN_ERR "iscsi: invalid can_queue of %d. "
-                              "can_queue must be a power of 2 and between "
-                              "2 and %d - setting to %d.\n", cmds_max,
-                              ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
-               cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+       if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+               printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+                      "must be a power of 2 less than or equal to %d.\n",
+                      cmds_max, ISCSI_TOTAL_CMDS_MAX);
+               total_cmds = ISCSI_TOTAL_CMDS_MAX;
        }
 
-       shost = scsi_host_alloc(iscsit->host_template,
-                               hostdata_privsize(sizeof(*session)));
-       if (!shost)
-               return NULL;
-
-       /* the iscsi layer takes one task for reserve */
-       shost->can_queue = cmds_max - 1;
-       shost->cmd_per_lun = qdepth;
-       shost->max_id = 1;
-       shost->max_channel = 0;
-       shost->max_lun = iscsit->max_lun;
-       shost->max_cmd_len = iscsit->max_cmd_len;
-       shost->transportt = scsit;
-       shost->transportt->create_work_queue = 1;
-       shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
-       *hostno = shost->host_no;
+       if (!is_power_of_2(total_cmds)) {
+               printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+                      "must be a power of 2.\n", total_cmds);
+               total_cmds = rounddown_pow_of_two(total_cmds);
+               if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+                       return NULL;
+               printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+                      total_cmds);
+       }
+       scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
 
-       session = iscsi_hostdata(shost->hostdata);
-       memset(session, 0, sizeof(struct iscsi_session));
+       cls_session = iscsi_alloc_session(shost, iscsit,
+                                         sizeof(struct iscsi_session));
+       if (!cls_session)
+               return NULL;
+       session = cls_session->dd_data;
+       session->cls_session = cls_session;
        session->host = shost;
        session->state = ISCSI_STATE_FREE;
        session->fast_abort = 1;
        session->lu_reset_timeout = 15;
        session->abort_timeout = 10;
-       session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
-       session->cmds_max = cmds_max;
+       session->scsi_cmds_max = scsi_cmds;
+       session->cmds_max = total_cmds;
        session->queued_cmdsn = session->cmdsn = initial_cmdsn;
        session->exp_cmdsn = initial_cmdsn + 1;
        session->max_cmdsn = initial_cmdsn + 1;
        session->max_r2t = 1;
        session->tt = iscsit;
        mutex_init(&session->eh_mutex);
+       spin_lock_init(&session->lock);
 
        /* initialize SCSI PDU commands pool */
        if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
                            (void***)&session->cmds,
-                           cmd_task_size + sizeof(struct iscsi_cmd_task)))
+                           cmd_task_size + sizeof(struct iscsi_task)))
                goto cmdpool_alloc_fail;
 
        /* pre-format cmds pool with ITT */
        for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-               struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+               struct iscsi_task *task = session->cmds[cmd_i];
 
                if (cmd_task_size)
-                       ctask->dd_data = &ctask[1];
-               ctask->itt = cmd_i;
-               INIT_LIST_HEAD(&ctask->running);
-       }
-
-       spin_lock_init(&session->lock);
-
-       /* initialize immediate command pool */
-       if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
-                          (void***)&session->mgmt_cmds,
-                          mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
-               goto mgmtpool_alloc_fail;
-
-
-       /* pre-format immediate cmds pool with ITT */
-       for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
-               struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
-
-               if (mgmt_task_size)
-                       mtask->dd_data = &mtask[1];
-               mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
-               INIT_LIST_HEAD(&mtask->running);
+                       task->dd_data = &task[1];
+               task->itt = cmd_i;
+               INIT_LIST_HEAD(&task->running);
        }
 
-       if (scsi_add_host(shost, NULL))
-               goto add_host_fail;
-
        if (!try_module_get(iscsit->owner))
-               goto cls_session_fail;
-
-       cls_session = iscsi_create_session(shost, iscsit, 0);
-       if (!cls_session)
-               goto module_put;
-       *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
+               goto module_get_fail;
 
+       if (iscsi_add_session(cls_session, id))
+               goto cls_session_fail;
        return cls_session;
 
-module_put:
-       module_put(iscsit->owner);
 cls_session_fail:
-       scsi_remove_host(shost);
-add_host_fail:
-       iscsi_pool_free(&session->mgmtpool);
-mgmtpool_alloc_fail:
+       module_put(iscsit->owner);
+module_get_fail:
        iscsi_pool_free(&session->cmdpool);
 cmdpool_alloc_fail:
-       scsi_host_put(shost);
+       iscsi_free_session(cls_session);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(iscsi_session_setup);
 
 /**
  * iscsi_session_teardown - destroy session, host, and cls_session
- * shost: scsi host
+ * @cls_session: iscsi session
  *
- * This can be used by software iscsi_transports that allocate
- * a session per scsi host.
- **/
+ * The driver must have called iscsi_remove_session before
+ * calling this.
+ */
 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 {
-       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-       struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+       struct iscsi_session *session = cls_session->dd_data;
        struct module *owner = cls_session->transport->owner;
 
-       iscsi_remove_session(cls_session);
-       scsi_remove_host(shost);
-
-       iscsi_pool_free(&session->mgmtpool);
        iscsi_pool_free(&session->cmdpool);
 
        kfree(session->password);
@@ -1938,12 +2051,10 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
        kfree(session->username);
        kfree(session->username_in);
        kfree(session->targetname);
-       kfree(session->netdev);
-       kfree(session->hwaddress);
        kfree(session->initiatorname);
+       kfree(session->ifacename);
 
-       iscsi_free_session(cls_session);
-       scsi_host_put(shost);
+       iscsi_destroy_session(cls_session);
        module_put(owner);
 }
 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -1951,22 +2062,26 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
 /**
  * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
  * @cls_session: iscsi_cls_session
+ * @dd_size: private driver data size
  * @conn_idx: cid
- **/
+ */
 struct iscsi_cls_conn *
-iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+                uint32_t conn_idx)
 {
-       struct iscsi_session *session = class_to_transport_session(cls_session);
+       struct iscsi_session *session = cls_session->dd_data;
        struct iscsi_conn *conn;
        struct iscsi_cls_conn *cls_conn;
        char *data;
 
-       cls_conn = iscsi_create_conn(cls_session, conn_idx);
+       cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+                                    conn_idx);
        if (!cls_conn)
                return NULL;
        conn = cls_conn->dd_data;
-       memset(conn, 0, sizeof(*conn));
+       memset(conn, 0, sizeof(*conn) + dd_size);
 
+       conn->dd_data = cls_conn->dd_data + sizeof(*conn);
        conn->session = session;
        conn->cls_conn = cls_conn;
        conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
@@ -1985,30 +2100,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
        INIT_LIST_HEAD(&conn->requeue);
        INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
-       /* allocate login_mtask used for the login/text sequences */
+       /* allocate login_task used for the login/text sequences */
        spin_lock_bh(&session->lock);
-       if (!__kfifo_get(session->mgmtpool.queue,
-                         (void*)&conn->login_mtask,
+       if (!__kfifo_get(session->cmdpool.queue,
+                         (void*)&conn->login_task,
                         sizeof(void*))) {
                spin_unlock_bh(&session->lock);
-               goto login_mtask_alloc_fail;
+               goto login_task_alloc_fail;
        }
        spin_unlock_bh(&session->lock);
 
        data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
        if (!data)
-               goto login_mtask_data_alloc_fail;
-       conn->login_mtask->data = conn->data = data;
+               goto login_task_data_alloc_fail;
+       conn->login_task->data = conn->data = data;
 
        init_timer(&conn->tmf_timer);
        init_waitqueue_head(&conn->ehwait);
 
        return cls_conn;
 
-login_mtask_data_alloc_fail:
-       __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+login_task_data_alloc_fail:
+       __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
                    sizeof(void*));
-login_mtask_alloc_fail:
+login_task_alloc_fail:
        iscsi_destroy_conn(cls_conn);
        return NULL;
 }
@@ -2068,7 +2183,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        spin_lock_bh(&session->lock);
        kfree(conn->data);
        kfree(conn->persistent_address);
-       __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+       __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
                    sizeof(void*));
        if (session->leadconn == conn)
                session->leadconn = NULL;
@@ -2140,7 +2255,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
        }
        spin_unlock_bh(&session->lock);
 
-       iscsi_unblock_session(session_to_cls(session));
+       iscsi_unblock_session(session->cls_session);
        wake_up(&conn->ehwait);
        return 0;
 }
@@ -2149,21 +2264,23 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
 static void
 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
 {
-       struct iscsi_mgmt_task *mtask, *tmp;
+       struct iscsi_task *task, *tmp;
 
        /* handle pending */
-       list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
-               debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
-               iscsi_free_mgmt_task(conn, mtask);
+       list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+               debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+               /* release ref from prep task */
+               __iscsi_put_task(task);
        }
 
        /* handle running */
-       list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
-               debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
-               iscsi_free_mgmt_task(conn, mtask);
+       list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+               debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+               /* release ref from prep task */
+               __iscsi_put_task(task);
        }
 
-       conn->mtask = NULL;
+       conn->task = NULL;
 }
 
 static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2181,17 +2298,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
                return;
        }
 
-       /*
-        * The LLD either freed/unset the lock on us, or userspace called
-        * stop but did not create a proper connection (connection was never
-        * bound or it was unbound then stop was called).
-        */
-       if (!conn->recv_lock) {
-               spin_unlock_bh(&session->lock);
-               mutex_unlock(&session->eh_mutex);
-               return;
-       }
-
        /*
         * When this is called for the in_login state, we only want to clean
         * up the login task and connection. We do not need to block and set
@@ -2208,11 +2314,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
        spin_unlock_bh(&session->lock);
 
        iscsi_suspend_tx(conn);
-
-       write_lock_bh(conn->recv_lock);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
-       write_unlock_bh(conn->recv_lock);
-
        /*
         * for connection level recovery we should not calculate
         * header digest. conn->hdr_size used for optimization
@@ -2225,7 +2326,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
                if (session->state == ISCSI_STATE_IN_RECOVERY &&
                    old_stop_stage != STOP_CONN_RECOVER) {
                        debug_scsi("blocking session\n");
-                       iscsi_block_session(session_to_cls(session));
+                       iscsi_block_session(session->cls_session);
                }
        }
 
@@ -2260,7 +2361,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
                    struct iscsi_cls_conn *cls_conn, int is_leading)
 {
-       struct iscsi_session *session = class_to_transport_session(cls_session);
+       struct iscsi_session *session = cls_session->dd_data;
        struct iscsi_conn *conn = cls_conn->dd_data;
 
        spin_lock_bh(&session->lock);
@@ -2399,6 +2500,14 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
                if (!conn->persistent_address)
                        return -ENOMEM;
                break;
+       case ISCSI_PARAM_IFACE_NAME:
+               if (!session->ifacename)
+                       session->ifacename = kstrdup(buf, GFP_KERNEL);
+               break;
+       case ISCSI_PARAM_INITIATOR_NAME:
+               if (!session->initiatorname)
+                       session->initiatorname = kstrdup(buf, GFP_KERNEL);
+               break;
        default:
                return -ENOSYS;
        }
@@ -2410,8 +2519,7 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
 int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
                            enum iscsi_param param, char *buf)
 {
-       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-       struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+       struct iscsi_session *session = cls_session->dd_data;
        int len;
 
        switch(param) {
@@ -2466,6 +2574,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
        case ISCSI_PARAM_PASSWORD_IN:
                len = sprintf(buf, "%s\n", session->password_in);
                break;
+       case ISCSI_PARAM_IFACE_NAME:
+               len = sprintf(buf, "%s\n", session->ifacename);
+               break;
+       case ISCSI_PARAM_INITIATOR_NAME:
+               if (!session->initiatorname)
+                       len = sprintf(buf, "%s\n", "unknown");
+               else
+                       len = sprintf(buf, "%s\n", session->initiatorname);
+               break;
        default:
                return -ENOSYS;
        }
@@ -2525,29 +2642,35 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
                         char *buf)
 {
-       struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+       struct iscsi_host *ihost = shost_priv(shost);
        int len;
 
        switch (param) {
        case ISCSI_HOST_PARAM_NETDEV_NAME:
-               if (!session->netdev)
+               if (!ihost->netdev)
                        len = sprintf(buf, "%s\n", "default");
                else
-                       len = sprintf(buf, "%s\n", session->netdev);
+                       len = sprintf(buf, "%s\n", ihost->netdev);
                break;
        case ISCSI_HOST_PARAM_HWADDRESS:
-               if (!session->hwaddress)
+               if (!ihost->hwaddress)
                        len = sprintf(buf, "%s\n", "default");
                else
-                       len = sprintf(buf, "%s\n", session->hwaddress);
+                       len = sprintf(buf, "%s\n", ihost->hwaddress);
                break;
        case ISCSI_HOST_PARAM_INITIATOR_NAME:
-               if (!session->initiatorname)
+               if (!ihost->initiatorname)
                        len = sprintf(buf, "%s\n", "unknown");
                else
-                       len = sprintf(buf, "%s\n", session->initiatorname);
+                       len = sprintf(buf, "%s\n", ihost->initiatorname);
+               break;
+       case ISCSI_HOST_PARAM_IPADDRESS:
+               if (!strlen(ihost->local_address))
+                       len = sprintf(buf, "%s\n", "unknown");
+               else
+                       len = sprintf(buf, "%s\n",
+                                     ihost->local_address);
                break;
-
        default:
                return -ENOSYS;
        }
@@ -2559,20 +2682,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
                         char *buf, int buflen)
 {
-       struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+       struct iscsi_host *ihost = shost_priv(shost);
 
        switch (param) {
        case ISCSI_HOST_PARAM_NETDEV_NAME:
-               if (!session->netdev)
-                       session->netdev = kstrdup(buf, GFP_KERNEL);
+               if (!ihost->netdev)
+                       ihost->netdev = kstrdup(buf, GFP_KERNEL);
                break;
        case ISCSI_HOST_PARAM_HWADDRESS:
-               if (!session->hwaddress)
-                       session->hwaddress = kstrdup(buf, GFP_KERNEL);
+               if (!ihost->hwaddress)
+                       ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
                break;
        case ISCSI_HOST_PARAM_INITIATOR_NAME:
-               if (!session->initiatorname)
-                       session->initiatorname = kstrdup(buf, GFP_KERNEL);
+               if (!ihost->initiatorname)
+                       ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
                break;
        default:
                return -ENOSYS;
index ec0b0f6..e0e018d 100644 (file)
@@ -33,6 +33,7 @@ struct lpfc_sli2_slim;
 #define LPFC_MAX_SG_SEG_CNT    256     /* sg element count per scsi cmnd */
 #define LPFC_IOCB_LIST_CNT     2250    /* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
+#define LPFC_VNAME_LEN         100     /* vport symbolic name length */
 
 /*
  * Following time intervals are used of adjusting SCSI device
@@ -59,6 +60,9 @@ struct lpfc_sli2_slim;
 
 #define MAX_HBAEVT     32
 
+/* lpfc wait event data ready flag */
+#define LPFC_DATA_READY                (1<<0)
+
 enum lpfc_polling_flags {
        ENABLE_FCP_RING_POLLING = 0x1,
        DISABLE_FCP_RING_INT    = 0x2
@@ -425,9 +429,6 @@ struct lpfc_hba {
 
        uint16_t pci_cfg_value;
 
-       uint8_t work_found;
-#define LPFC_MAX_WORKER_ITERATION  4
-
        uint8_t fc_linkspeed;   /* Link speed after last READ_LA */
 
        uint32_t fc_eventTag;   /* event tag for link attention */
@@ -489,8 +490,9 @@ struct lpfc_hba {
        uint32_t              work_hs;      /* HS stored in case of ERRAT */
        uint32_t              work_status[2]; /* Extra status from SLIM */
 
-       wait_queue_head_t    *work_wait;
+       wait_queue_head_t    work_waitq;
        struct task_struct   *worker_thread;
+       long data_flags;
 
        uint32_t hbq_in_use;            /* HBQs in use flag */
        struct list_head hbqbuf_in_list;  /* in-fly hbq buffer list */
@@ -637,6 +639,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
                phba->link_state == LPFC_HBA_READY;
 }
 
+static inline void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+       /* Set the lpfc data pending flag */
+       set_bit(LPFC_DATA_READY, &phba->data_flags);
+
+       /* Wake up worker thread */
+       wake_up(&phba->work_waitq);
+       return;
+}
+
 #define FC_REG_DUMP_EVENT              0x10    /* Register for Dump events */
 #define FC_REG_TEMPERATURE_EVENT       0x20    /* Register for temperature
                                                   event */
index 960baaf..37bfa0b 100644 (file)
@@ -1995,8 +1995,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
                /* Don't allow mailbox commands to be sent when blocked
                 * or when in the middle of discovery
                 */
-               if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO ||
-                   vport->fc_flag & FC_NDISC_ACTIVE) {
+               if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
                        sysfs_mbox_idle(phba);
                        spin_unlock_irq(&phba->hbalock);
                        return  -EAGAIN;
index 7c9f831..1b82452 100644 (file)
@@ -142,7 +142,7 @@ int lpfc_config_port_post(struct lpfc_hba *);
 int lpfc_hba_down_prep(struct lpfc_hba *);
 int lpfc_hba_down_post(struct lpfc_hba *);
 void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
 void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
 int lpfc_online(struct lpfc_hba *);
 void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -263,6 +263,7 @@ extern int lpfc_sli_mode;
 extern int lpfc_enable_npiv;
 
 int  lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
+int  lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *,        size_t);
 void lpfc_terminate_rport_io(struct fc_rport *);
 void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
 
index 153afae..7fc74cf 100644 (file)
@@ -101,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                /* Not enough posted buffers; Try posting more buffers */
                phba->fc_stat.NoRcvBuf++;
                if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
-                       lpfc_post_buffer(phba, pring, 2, 1);
+                       lpfc_post_buffer(phba, pring, 2);
                return;
        }
 
@@ -151,7 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        }
                        list_del(&iocbq->list);
                        lpfc_sli_release_iocbq(phba, iocbq);
-                       lpfc_post_buffer(phba, pring, i, 1);
+                       lpfc_post_buffer(phba, pring, i);
                }
        }
 }
@@ -990,7 +990,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        return;
 }
 
-static int
+int
 lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
        size_t size)
 {
@@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
        struct lpfc_hba   *phba = vport->phba;
+       uint32_t tmo_posted;
        unsigned long iflag;
 
        spin_lock_irqsave(&vport->work_port_lock, iflag);
-       if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
+       tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
+       if (!tmo_posted)
                vport->work_port_events |= WORKER_FDMI_TMO;
-               spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+       spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-               spin_lock_irqsave(&phba->hbalock, iflag);
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
-               spin_unlock_irqrestore(&phba->hbalock, iflag);
-       }
-       else
-               spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
+       return;
 }
 
 void
index 886c5f1..f54e0f7 100644 (file)
@@ -1754,29 +1754,34 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        struct lpfc_work_evt *evtp;
 
+       if (!(nlp->nlp_flag & NLP_DELAY_TMO))
+               return;
        spin_lock_irq(shost->host_lock);
        nlp->nlp_flag &= ~NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
        del_timer_sync(&nlp->nlp_delayfunc);
        nlp->nlp_last_elscmd = 0;
-
        if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
                list_del_init(&nlp->els_retry_evt.evt_listp);
                /* Decrement nlp reference count held for the delayed retry */
                evtp = &nlp->els_retry_evt;
                lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
        }
-
        if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
                spin_lock_irq(shost->host_lock);
                nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
                spin_unlock_irq(shost->host_lock);
                if (vport->num_disc_nodes) {
-                       /* Check to see if there are more
-                        * PLOGIs to be sent
-                        */
-                       lpfc_more_plogi(vport);
-
+                       if (vport->port_state < LPFC_VPORT_READY) {
+                               /* Check if there are more ADISCs to be sent */
+                               lpfc_more_adisc(vport);
+                               if ((vport->num_disc_nodes == 0) &&
+                                   (vport->fc_npr_cnt))
+                                       lpfc_els_disc_plogi(vport);
+                       } else {
+                               /* Check if there are more PLOGIs to be sent */
+                               lpfc_more_plogi(vport);
+                       }
                        if (vport->num_disc_nodes == 0) {
                                spin_lock_irq(shost->host_lock);
                                vport->fc_flag &= ~FC_NDISC_ACTIVE;
@@ -1798,10 +1803,6 @@ lpfc_els_retry_delay(unsigned long ptr)
        unsigned long flags;
        struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
 
-       ndlp = (struct lpfc_nodelist *) ptr;
-       phba = ndlp->vport->phba;
-       evtp = &ndlp->els_retry_evt;
-
        spin_lock_irqsave(&phba->hbalock, flags);
        if (!list_empty(&evtp->evt_listp)) {
                spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1812,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr)
         * count until the queued work is done
         */
        evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-       evtp->evt       = LPFC_EVT_ELS_RETRY;
-       list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
+       if (evtp->evt_arg1) {
+               evtp->evt = LPFC_EVT_ELS_RETRY;
+               list_add_tail(&evtp->evt_listp, &phba->work_list);
                lpfc_worker_wake_up(phba);
-
+       }
        spin_unlock_irqrestore(&phba->hbalock, flags);
        return;
 }
@@ -2761,10 +2762,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
        npr = (PRLI *) pcmd;
        vpd = &phba->vpd;
        /*
-        * If our firmware version is 3.20 or later,
-        * set the following bits for FC-TAPE support.
+        * If the remote port is a target and our firmware version is 3.20 or
+        * later, set the following bits for FC-TAPE support.
         */
-       if (vpd->rev.feaLevelHigh >= 0x02) {
+       if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+           (vpd->rev.feaLevelHigh >= 0x02)) {
                npr->ConfmComplAllowed = 1;
                npr->Retry = 1;
                npr->TaskRetryIdReq = 1;
@@ -3056,27 +3058,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
 {
        struct lpfc_nodelist *ndlp = NULL;
 
-       /* Look at all nodes effected by pending RSCNs and move
-        * them to NPR state.
-        */
-
+       /* Move all affected nodes by pending RSCNs to NPR state. */
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
                if (!NLP_CHK_NODE_ACT(ndlp) ||
-                   ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
-                   lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
+                   (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+                   !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
                        continue;
-
                lpfc_disc_state_machine(vport, ndlp, NULL,
-                                               NLP_EVT_DEVICE_RECOVERY);
-
-               /*
-                * Make sure NLP_DELAY_TMO is NOT running after a device
-                * recovery event.
-                */
-               if (ndlp->nlp_flag & NLP_DELAY_TMO)
-                       lpfc_cancel_retry_delay_tmo(vport, ndlp);
+                                       NLP_EVT_DEVICE_RECOVERY);
+               lpfc_cancel_retry_delay_tmo(vport, ndlp);
        }
-
        return 0;
 }
 
@@ -3781,91 +3772,27 @@ static int
 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                 struct lpfc_nodelist *fan_ndlp)
 {
-       struct lpfc_dmabuf *pcmd;
+       struct lpfc_hba *phba = vport->phba;
        uint32_t *lp;
-       IOCB_t *icmd;
-       uint32_t cmd, did;
        FAN *fp;
-       struct lpfc_nodelist *ndlp, *next_ndlp;
-       struct lpfc_hba *phba = vport->phba;
-
-       /* FAN received */
-       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0265 FAN received\n");
-       icmd = &cmdiocb->iocb;
-       did = icmd->un.elsreq64.remoteID;
-       pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
-       lp = (uint32_t *)pcmd->virt;
-
-       cmd = *lp++;
-       fp = (FAN *) lp;
 
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
+       lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+       fp = (FAN *) ++lp;
        /* FAN received; Fan does not have a reply sequence */
-
-       if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
+       if ((vport == phba->pport) &&
+           (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
                if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
-                       sizeof(struct lpfc_name)) != 0) ||
+                           sizeof(struct lpfc_name))) ||
                    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
-                       sizeof(struct lpfc_name)) != 0)) {
-                       /*
-                        * This node has switched fabrics.  FLOGI is required
-                        * Clean up the old rpi's
-                        */
-
-                       list_for_each_entry_safe(ndlp, next_ndlp,
-                                                &vport->fc_nodes, nlp_listp) {
-                               if (!NLP_CHK_NODE_ACT(ndlp))
-                                       continue;
-                               if (ndlp->nlp_state != NLP_STE_NPR_NODE)
-                                       continue;
-                               if (ndlp->nlp_type & NLP_FABRIC) {
-                                       /*
-                                        * Clean up old Fabric, Nameserver and
-                                        * other NLP_FABRIC logins
-                                        */
-                                       lpfc_drop_node(vport, ndlp);
-
-                               } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
-                                       /* Fail outstanding I/O now since this
-                                        * device is marked for PLOGI
-                                        */
-                                       lpfc_unreg_rpi(vport, ndlp);
-                               }
-                       }
-
+                           sizeof(struct lpfc_name)))) {
+                       /* This port has switched fabrics. FLOGI is required */
                        lpfc_initial_flogi(vport);
-                       return 0;
-               }
-               /* Discovery not needed,
-                * move the nodes to their original state.
-                */
-               list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
-                                        nlp_listp) {
-                       if (!NLP_CHK_NODE_ACT(ndlp))
-                               continue;
-                       if (ndlp->nlp_state != NLP_STE_NPR_NODE)
-                               continue;
-
-                       switch (ndlp->nlp_prev_state) {
-                       case NLP_STE_UNMAPPED_NODE:
-                               ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-                               lpfc_nlp_set_state(vport, ndlp,
-                                                  NLP_STE_UNMAPPED_NODE);
-                               break;
-
-                       case NLP_STE_MAPPED_NODE:
-                               ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-                               lpfc_nlp_set_state(vport, ndlp,
-                                                  NLP_STE_MAPPED_NODE);
-                               break;
-
-                       default:
-                               break;
-                       }
+               } else {
+                       /* FAN verified - skip FLOGI */
+                       vport->fc_myDID = vport->fc_prevDID;
+                       lpfc_issue_fabric_reglogin(vport);
                }
-
-               /* Start discovery - this should just do CLEAR_LA */
-               lpfc_disc_start(vport);
        }
        return 0;
 }
@@ -3875,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
        struct lpfc_hba   *phba = vport->phba;
+       uint32_t tmo_posted;
        unsigned long iflag;
 
        spin_lock_irqsave(&vport->work_port_lock, iflag);
-       if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
+       tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
+       if (!tmo_posted)
                vport->work_port_events |= WORKER_ELS_TMO;
-               spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+       spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-               spin_lock_irqsave(&phba->hbalock, iflag);
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
-               spin_unlock_irqrestore(&phba->hbalock, iflag);
-       }
-       else
-               spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
        return;
 }
 
@@ -3933,9 +3857,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
                    els_command == ELS_CMD_FDISC)
                        continue;
 
-               if (vport != piocb->vport)
-                       continue;
-
                if (piocb->drvrTimeout > 0) {
                        if (piocb->drvrTimeout >= timeout)
                                piocb->drvrTimeout -= timeout;
@@ -4089,7 +4010,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
        cmd = *payload;
        if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
-               lpfc_post_buffer(phba, pring, 1, 1);
+               lpfc_post_buffer(phba, pring, 1);
 
        did = icmd->un.rcvels.remoteID;
        if (icmd->ulpStatus) {
@@ -4398,7 +4319,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                phba->fc_stat.NoRcvBuf++;
                /* Not enough posted buffers; Try posting more buffers */
                if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
-                       lpfc_post_buffer(phba, pring, 0, 1);
+                       lpfc_post_buffer(phba, pring, 0);
                return;
        }
 
@@ -4842,18 +4763,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
        struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
        unsigned long iflags;
        uint32_t tmo_posted;
+
        spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
        tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
        if (!tmo_posted)
                phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
 
-       if (!tmo_posted) {
-               spin_lock_irqsave(&phba->hbalock, iflags);
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
-               spin_unlock_irqrestore(&phba->hbalock, iflags);
-       }
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
+       return;
 }
 
 static void
index 7cb68fe..a98d11b 100644 (file)
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
         * count until this queued work is done
         */
        evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-       evtp->evt       = LPFC_EVT_DEV_LOSS;
-       list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
-               wake_up(phba->work_wait);
-
+       if (evtp->evt_arg1) {
+               evtp->evt = LPFC_EVT_DEV_LOSS;
+               list_add_tail(&evtp->evt_listp, &phba->work_list);
+               lpfc_worker_wake_up(phba);
+       }
        spin_unlock_irq(&phba->hbalock);
 
        return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 }
 
-
-void
-lpfc_worker_wake_up(struct lpfc_hba *phba)
-{
-       wake_up(phba->work_wait);
-       return;
-}
-
 static void
 lpfc_work_list_done(struct lpfc_hba *phba)
 {
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
                || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
+                       /* Set the lpfc data pending flag */
+                       set_bit(LPFC_DATA_READY, &phba->data_flags);
                } else {
                        pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
                        lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
        lpfc_work_list_done(phba);
 }
 
-static int
-check_work_wait_done(struct lpfc_hba *phba)
-{
-       struct lpfc_vport *vport;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-       int rc = 0;
-
-       spin_lock_irq(&phba->hbalock);
-       list_for_each_entry(vport, &phba->port_list, listentry) {
-               if (vport->work_port_events) {
-                       rc = 1;
-                       break;
-               }
-       }
-       if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
-           kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
-               rc = 1;
-               phba->work_found++;
-       } else
-               phba->work_found = 0;
-       spin_unlock_irq(&phba->hbalock);
-       return rc;
-}
-
-
 int
 lpfc_do_work(void *p)
 {
        struct lpfc_hba *phba = p;
        int rc;
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
 
        set_user_nice(current, -20);
-       phba->work_wait = &work_waitq;
-       phba->work_found = 0;
+       phba->data_flags = 0;
 
        while (1) {
-
-               rc = wait_event_interruptible(work_waitq,
-                                             check_work_wait_done(phba));
-
+               /* wait and check worker queue activities */
+               rc = wait_event_interruptible(phba->work_waitq,
+                                       (test_and_clear_bit(LPFC_DATA_READY,
+                                                           &phba->data_flags)
+                                        || kthread_should_stop()));
                BUG_ON(rc);
 
                if (kthread_should_stop())
                        break;
 
+               /* Attend pending lpfc data processing */
                lpfc_work_done(phba);
-
-               /* If there is alot of slow ring work, like during link up
-                * check_work_wait_done() may cause this thread to not give
-                * up the CPU for very long periods of time. This may cause
-                * soft lockups or other problems. To avoid these situations
-                * give up the CPU here after LPFC_MAX_WORKER_ITERATION
-                * consecutive iterations.
-                */
-               if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
-                       phba->work_found = 0;
-                       schedule();
-               }
        }
-       spin_lock_irq(&phba->hbalock);
-       phba->work_wait = NULL;
-       spin_unlock_irq(&phba->hbalock);
        return 0;
 }
 
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 
        spin_lock_irqsave(&phba->hbalock, flags);
        list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
-               lpfc_worker_wake_up(phba);
        spin_unlock_irqrestore(&phba->hbalock, flags);
 
+       lpfc_worker_wake_up(phba);
+
        return 1;
 }
 
@@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
        if (phba->fc_topology == TOPOLOGY_LOOP) {
                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
+               if (phba->cfg_enable_npiv)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1309 Link Up Event npiv not supported in loop "
+                               "topology\n");
                                /* Get Loop Map information */
                if (la->il)
                        vport->fc_flag |= FC_LBIT;
@@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        MAILBOX_t *mb = &pmb->mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
+       /* Unblock ELS traffic */
+       phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
        /* Check for error */
        if (mb->mbxStatus) {
                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                ndlp->nlp_DID, old_state, state);
 
        if (old_state == NLP_STE_NPR_NODE &&
-           (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
            state != NLP_STE_NPR_NODE)
                lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-       if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
        spin_lock_irq(shost->host_lock);
@@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 static void
 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-       if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        ndlp->nlp_last_elscmd = 0;
        del_timer_sync(&ndlp->nlp_delayfunc);
 
-       if (!list_empty(&ndlp->els_retry_evt.evt_listp))
-               list_del_init(&ndlp->els_retry_evt.evt_listp);
-       if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
-               list_del_init(&ndlp->dev_loss_evt.evt_listp);
+       list_del_init(&ndlp->els_retry_evt.evt_listp);
+       list_del_init(&ndlp->dev_loss_evt.evt_listp);
 
        lpfc_unreg_rpi(vport, ndlp);
 
@@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        LPFC_MBOXQ_t *mbox;
        int rc;
 
-       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
-       }
-
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
                /* For this case we need to cleanup the default rpi
                 * allocated by the firmware.
@@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
                        /* Since this node is marked for discovery,
                         * delay timeout is not needed.
                         */
-                       if (ndlp->nlp_flag & NLP_DELAY_TMO)
-                               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+                       lpfc_cancel_retry_delay_tmo(vport, ndlp);
                } else
                        ndlp = NULL;
        } else {
@@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
        struct lpfc_hba   *phba = vport->phba;
+       uint32_t tmo_posted;
        unsigned long flags = 0;
 
        if (unlikely(!phba))
                return;
 
-       if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
-               spin_lock_irqsave(&vport->work_port_lock, flags);
+       spin_lock_irqsave(&vport->work_port_lock, flags);
+       tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+       if (!tmo_posted)
                vport->work_port_events |= WORKER_DISC_TMO;
-               spin_unlock_irqrestore(&vport->work_port_lock, flags);
+       spin_unlock_irqrestore(&vport->work_port_lock, flags);
 
-               spin_lock_irqsave(&phba->hbalock, flags);
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
-               spin_unlock_irqrestore(&phba->hbalock, flags);
-       }
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
        return;
 }
 
index fa757b2..5b6e539 100644 (file)
@@ -145,8 +145,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                return -ERESTART;
        }
 
-       if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
+       if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
+               mempool_free(pmb, phba->mbox_mem_pool);
                return -EINVAL;
+       }
 
        /* Save information as VPD data */
        vp->rev.rBit = 1;
@@ -551,18 +553,18 @@ static void
 lpfc_hb_timeout(unsigned long ptr)
 {
        struct lpfc_hba *phba;
+       uint32_t tmo_posted;
        unsigned long iflag;
 
        phba = (struct lpfc_hba *)ptr;
        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
-       if (!(phba->pport->work_port_events & WORKER_HB_TMO))
+       tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
+       if (!tmo_posted)
                phba->pport->work_port_events |= WORKER_HB_TMO;
        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 
-       spin_lock_irqsave(&phba->hbalock, iflag);
-       if (phba->work_wait)
-               wake_up(phba->work_wait);
-       spin_unlock_irqrestore(&phba->hbalock, iflag);
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
        return;
 }
 
@@ -851,6 +853,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
        lpfc_read_la(phba, pmb, mp);
        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
        pmb->vport = vport;
+       /* Block ELS IOCBs until we have processed this mbox command */
+       phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
        rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
        if (rc == MBX_NOT_FINISHED) {
                rc = 4;
@@ -866,6 +870,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
        return;
 
 lpfc_handle_latt_free_mbuf:
+       phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 lpfc_handle_latt_free_mp:
        kfree(mp);
@@ -1194,8 +1199,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 /*   Returns the number of buffers NOT posted.    */
 /**************************************************/
 int
-lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
-                int type)
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
 {
        IOCB_t *icmd;
        struct lpfc_iocbq *iocb;
@@ -1295,7 +1299,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
        struct lpfc_sli *psli = &phba->sli;
 
        /* Ring 0, ELS / CT buffers */
-       lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
+       lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
        /* Ring 2 - FCP no buffers needed */
 
        return 0;
@@ -1454,6 +1458,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
 
                lpfc_disc_state_machine(vport, ndlp, NULL,
                                             NLP_EVT_DEVICE_RM);
+
+               /* nlp_type zero is not defined, nlp_flag zero also not defined,
+                * nlp_state is unused, this happens when
+                * an initiator has logged
+                * into us so cleanup this ndlp.
+                */
+               if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
+                       (ndlp->nlp_state == 0))
+                       lpfc_nlp_put(ndlp);
        }
 
        /* At this point, ALL ndlp's should be gone
@@ -2101,6 +2114,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
        phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
        phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
 
+       /* Initialize the wait queue head for the kernel thread */
+       init_waitqueue_head(&phba->work_waitq);
+
        /* Startup the kernel thread for this host adapter. */
        phba->worker_thread = kthread_run(lpfc_do_work, phba,
                                       "lpfc_worker_%d", phba->brd_no);
index d08c4c8..6688a86 100644 (file)
@@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
                        (iocb->iocb_cmpl) (phba, iocb, iocb);
                }
        }
-
-       /* If we are delaying issuing an ELS command, cancel it */
-       if (ndlp->nlp_flag & NLP_DELAY_TMO)
-               lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+       lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
        return 0;
 }
 
@@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
        struct lpfc_hba    *phba = vport->phba;
        struct lpfc_dmabuf *pcmd;
-       struct lpfc_work_evt *evtp;
        uint32_t *lp;
        IOCB_t *icmd;
        struct serv_parm *sp;
@@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        ndlp, mbox);
                return 1;
        }
-
-       /* If the remote NPort logs into us, before we can initiate
-        * discovery to them, cleanup the NPort from discovery accordingly.
-        */
-       if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
-               spin_lock_irq(shost->host_lock);
-               ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-               spin_unlock_irq(shost->host_lock);
-               del_timer_sync(&ndlp->nlp_delayfunc);
-               ndlp->nlp_last_elscmd = 0;
-
-               if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
-                       list_del_init(&ndlp->els_retry_evt.evt_listp);
-                       /* Decrement ndlp reference count held for the
-                        * delayed retry
-                        */
-                       evtp = &ndlp->els_retry_evt;
-                       lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
-               }
-
-               if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
-                       spin_lock_irq(shost->host_lock);
-                       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-                       spin_unlock_irq(shost->host_lock);
-
-                       if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
-                           (vport->num_disc_nodes)) {
-                               /* Check to see if there are more
-                                * ADISCs to be sent
-                                */
-                               lpfc_more_adisc(vport);
-
-                               if ((vport->num_disc_nodes == 0) &&
-                                       (vport->fc_npr_cnt))
-                                       lpfc_els_disc_plogi(vport);
-
-                               if (vport->num_disc_nodes == 0) {
-                                       spin_lock_irq(shost->host_lock);
-                                       vport->fc_flag &= ~FC_NDISC_ACTIVE;
-                                       spin_unlock_irq(shost->host_lock);
-                                       lpfc_can_disctmo(vport);
-                                       lpfc_end_rscn(vport);
-                               }
-                       }
-               }
-       } else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
-                  (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
-                  (vport->num_disc_nodes)) {
-               spin_lock_irq(shost->host_lock);
-               ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-               spin_unlock_irq(shost->host_lock);
-               /* Check to see if there are more
-                * PLOGIs to be sent
-                */
-               lpfc_more_plogi(vport);
-               if (vport->num_disc_nodes == 0) {
-                       spin_lock_irq(shost->host_lock);
-                       vport->fc_flag &= ~FC_NDISC_ACTIVE;
-                       spin_unlock_irq(shost->host_lock);
-                       lpfc_can_disctmo(vport);
-                       lpfc_end_rscn(vport);
-               }
-       }
-
        lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
        return 1;
-
 out:
        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
        stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
@@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        else
                lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
-       if (!(ndlp->nlp_type & NLP_FABRIC) ||
+       if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+            ((ndlp->nlp_type & NLP_FCP_TARGET) ||
+             !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
            (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
                /* Only try to re-login if this is NOT a Fabric Node */
                mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@@ -751,6 +684,7 @@ static uint32_t
 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                           void *arg, uint32_t evt)
 {
+       struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
        struct lpfc_hba   *phba = vport->phba;
        struct lpfc_iocbq *cmdiocb = arg;
        struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
                        NULL);
        } else {
-               lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+               if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+                   (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+                   (vport->num_disc_nodes)) {
+                       spin_lock_irq(shost->host_lock);
+                       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+                       spin_unlock_irq(shost->host_lock);
+                       /* Check if there are more PLOGIs to be sent */
+                       lpfc_more_plogi(vport);
+                       if (vport->num_disc_nodes == 0) {
+                               spin_lock_irq(shost->host_lock);
+                               vport->fc_flag &= ~FC_NDISC_ACTIVE;
+                               spin_unlock_irq(shost->host_lock);
+                               lpfc_can_disctmo(vport);
+                               lpfc_end_rscn(vport);
+                       }
+               }
        } /* If our portname was less */
 
        return ndlp->nlp_state;
@@ -1040,6 +989,7 @@ static uint32_t
 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                           void *arg, uint32_t evt)
 {
+       struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
        struct lpfc_hba   *phba = vport->phba;
        struct lpfc_iocbq *cmdiocb;
 
@@ -1048,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        cmdiocb = (struct lpfc_iocbq *) arg;
 
-       if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
-               return ndlp->nlp_state;
+       if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+               if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+                       spin_lock_irq(shost->host_lock);
+                       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+                       spin_unlock_irq(shost->host_lock);
 
+                       if (vport->num_disc_nodes) {
+                               lpfc_more_adisc(vport);
+                               if ((vport->num_disc_nodes == 0) &&
+                                   (vport->fc_npr_cnt))
+                                       lpfc_els_disc_plogi(vport);
+                               if (vport->num_disc_nodes == 0) {
+                                       spin_lock_irq(shost->host_lock);
+                                       vport->fc_flag &= ~FC_NDISC_ACTIVE;
+                                       spin_unlock_irq(shost->host_lock);
+                                       lpfc_can_disctmo(vport);
+                                       lpfc_end_rscn(vport);
+                               }
+                       }
+               }
+               return ndlp->nlp_state;
+       }
        ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
        lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -1742,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
 
        /* Ignore PLOGI if we have an outstanding LOGO */
-       if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
+       if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
                return ndlp->nlp_state;
-       }
-
        if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+               lpfc_cancel_retry_delay_tmo(vport, ndlp);
                spin_lock_irq(shost->host_lock);
-               ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+               ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
                spin_unlock_irq(shost->host_lock);
-               return ndlp->nlp_state;
-       }
-
-       /* send PLOGI immediately, move to PLOGI issue state */
-       if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-               ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-               lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
-               lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+       } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+               /* send PLOGI immediately, move to PLOGI issue state */
+               if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+                       ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+                       lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+               }
        }
-
        return ndlp->nlp_state;
 }
 
@@ -1810,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
 
        lpfc_rcv_padisc(vport, ndlp, cmdiocb);
-
        /*
         * Do not start discovery if discovery is about to start
         * or discovery in progress for this node. Starting discovery
@@ -1973,9 +1938,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
        spin_unlock_irq(shost->host_lock);
-       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
-       }
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        return ndlp->nlp_state;
 }
 
index 0910a9a..c94da4f 100644 (file)
@@ -50,6 +50,7 @@ void
 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
 {
        unsigned long flags;
+       uint32_t evt_posted;
 
        spin_lock_irqsave(&phba->hbalock, flags);
        atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
        spin_unlock_irqrestore(&phba->hbalock, flags);
 
        spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-       if ((phba->pport->work_port_events &
-               WORKER_RAMP_DOWN_QUEUE) == 0) {
+       evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
+       if (!evt_posted)
                phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
-       }
        spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-       spin_lock_irqsave(&phba->hbalock, flags);
-       if (phba->work_wait)
-               wake_up(phba->work_wait);
-       spin_unlock_irqrestore(&phba->hbalock, flags);
-
+       if (!evt_posted)
+               lpfc_worker_wake_up(phba);
        return;
 }
 
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 {
        unsigned long flags;
        struct lpfc_hba *phba = vport->phba;
+       uint32_t evt_posted;
        atomic_inc(&phba->num_cmd_success);
 
        if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
        spin_unlock_irqrestore(&phba->hbalock, flags);
 
        spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-       if ((phba->pport->work_port_events &
-               WORKER_RAMP_UP_QUEUE) == 0) {
+       evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
+       if (!evt_posted)
                phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
-       }
        spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-       spin_lock_irqsave(&phba->hbalock, flags);
-       if (phba->work_wait)
-               wake_up(phba->work_wait);
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       if (!evt_posted)
+               lpfc_worker_wake_up(phba);
+       return;
 }
 
 void
@@ -609,9 +605,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
        result = cmd->result;
        sdev = cmd->device;
        lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
-       spin_lock_irqsave(sdev->host->host_lock, flags);
-       lpfc_cmd->pCmd = NULL;  /* This must be done before scsi_done */
-       spin_unlock_irqrestore(sdev->host->host_lock, flags);
        cmd->scsi_done(cmd);
 
        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -620,6 +613,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                 * wake up the thread.
                 */
                spin_lock_irqsave(sdev->host->host_lock, flags);
+               lpfc_cmd->pCmd = NULL;
                if (lpfc_cmd->waitq)
                        wake_up(lpfc_cmd->waitq);
                spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -690,6 +684,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
         * wake up the thread.
         */
        spin_lock_irqsave(sdev->host->host_lock, flags);
+       lpfc_cmd->pCmd = NULL;
        if (lpfc_cmd->waitq)
                wake_up(lpfc_cmd->waitq);
        spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -849,14 +844,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
        struct lpfc_iocbq *iocbq;
        struct lpfc_iocbq *iocbqrsp;
        int ret;
+       int status;
 
        if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
                return FAILED;
 
        lpfc_cmd->rdata = rdata;
-       ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
+       status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
                                           FCP_TARGET_RESET);
-       if (!ret)
+       if (!status)
                return FAILED;
 
        iocbq = &lpfc_cmd->cur_iocbq;
@@ -869,12 +865,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
                         "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
                         tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
-       ret = lpfc_sli_issue_iocb_wait(phba,
+       status = lpfc_sli_issue_iocb_wait(phba,
                                       &phba->sli.ring[phba->sli.fcp_ring],
                                       iocbq, iocbqrsp, lpfc_cmd->timeout);
-       if (ret != IOCB_SUCCESS) {
-               if (ret == IOCB_TIMEDOUT)
+       if (status != IOCB_SUCCESS) {
+               if (status == IOCB_TIMEDOUT) {
                        iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+                       ret = TIMEOUT_ERROR;
+               } else
+                       ret = FAILED;
                lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
        } else {
                ret = SUCCESS;
@@ -1142,121 +1141,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
        struct lpfc_iocbq *iocbq, *iocbqrsp;
        struct lpfc_rport_data *rdata = cmnd->device->hostdata;
        struct lpfc_nodelist *pnode = rdata->pnode;
-       uint32_t cmd_result = 0, cmd_status = 0;
-       int ret = FAILED;
-       int iocb_status = IOCB_SUCCESS;
-       int cnt, loopcnt;
+       unsigned long later;
+       int ret = SUCCESS;
+       int status;
+       int cnt;
 
        lpfc_block_error_handler(cmnd);
-       loopcnt = 0;
        /*
         * If target is not in a MAPPED state, delay the reset until
         * target is rediscovered or devloss timeout expires.
         */
-       while (1) {
+       later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+       while (time_after(later, jiffies)) {
                if (!pnode || !NLP_CHK_NODE_ACT(pnode))
-                       goto out;
-
-               if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
-                       schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-                       loopcnt++;
-                       rdata = cmnd->device->hostdata;
-                       if (!rdata ||
-                               (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
-                               lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-                                                "0721 LUN Reset rport "
-                                                "failure: cnt x%x rdata x%p\n",
-                                                loopcnt, rdata);
-                               goto out;
-                       }
-                       pnode = rdata->pnode;
-                       if (!pnode || !NLP_CHK_NODE_ACT(pnode))
-                               goto out;
-               }
+                       return FAILED;
                if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
                        break;
+               schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+               rdata = cmnd->device->hostdata;
+               if (!rdata)
+                       break;
+               pnode = rdata->pnode;
+       }
+       if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                "0721 LUN Reset rport "
+                                "failure: msec x%x rdata x%p\n",
+                                jiffies_to_msecs(jiffies - later), rdata);
+               return FAILED;
        }
-
        lpfc_cmd = lpfc_get_scsi_buf(phba);
        if (lpfc_cmd == NULL)
-               goto out;
-
+               return FAILED;
        lpfc_cmd->timeout = 60;
        lpfc_cmd->rdata = rdata;
 
-       ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
-                                          FCP_TARGET_RESET);
-       if (!ret)
-               goto out_free_scsi_buf;
-
+       status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
+                                             cmnd->device->lun,
+                                             FCP_TARGET_RESET);
+       if (!status) {
+               lpfc_release_scsi_buf(phba, lpfc_cmd);
+               return FAILED;
+       }
        iocbq = &lpfc_cmd->cur_iocbq;
 
        /* get a buffer for this IOCB command response */
        iocbqrsp = lpfc_sli_get_iocbq(phba);
-       if (iocbqrsp == NULL)
-               goto out_free_scsi_buf;
-
+       if (iocbqrsp == NULL) {
+               lpfc_release_scsi_buf(phba, lpfc_cmd);
+               return FAILED;
+       }
        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
                         "0703 Issue target reset to TGT %d LUN %d "
                         "rpi x%x nlp_flag x%x\n", cmnd->device->id,
                         cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
-       iocb_status = lpfc_sli_issue_iocb_wait(phba,
-                                      &phba->sli.ring[phba->sli.fcp_ring],
-                                      iocbq, iocbqrsp, lpfc_cmd->timeout);
-
-       if (iocb_status == IOCB_TIMEDOUT)
+       status = lpfc_sli_issue_iocb_wait(phba,
+                                         &phba->sli.ring[phba->sli.fcp_ring],
+                                         iocbq, iocbqrsp, lpfc_cmd->timeout);
+       if (status == IOCB_TIMEDOUT) {
                iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
-
-       if (iocb_status == IOCB_SUCCESS)
-               ret = SUCCESS;
-       else
-               ret = iocb_status;
-
-       cmd_result = iocbqrsp->iocb.un.ulpWord[4];
-       cmd_status = iocbqrsp->iocb.ulpStatus;
-
+               ret = TIMEOUT_ERROR;
+       } else {
+               if (status != IOCB_SUCCESS)
+                       ret = FAILED;
+               lpfc_release_scsi_buf(phba, lpfc_cmd);
+       }
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                        "0713 SCSI layer issued device reset (%d, %d) "
+                        "return x%x status x%x result x%x\n",
+                        cmnd->device->id, cmnd->device->lun, ret,
+                        iocbqrsp->iocb.ulpStatus,
+                        iocbqrsp->iocb.un.ulpWord[4]);
        lpfc_sli_release_iocbq(phba, iocbqrsp);
-
-       /*
-        * All outstanding txcmplq I/Os should have been aborted by the device.
-        * Unfortunately, some targets do not abide by this forcing the driver
-        * to double check.
-        */
        cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
-                               LPFC_CTX_LUN);
+                               LPFC_CTX_TGT);
        if (cnt)
                lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
                                    cmnd->device->id, cmnd->device->lun,
-                                   LPFC_CTX_LUN);
-       loopcnt = 0;
-       while(cnt) {
-               schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
-               if (++loopcnt
-                   > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
-                       break;
-
+                                   LPFC_CTX_TGT);
+       later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+       while (time_after(later, jiffies) && cnt) {
+               schedule_timeout_uninterruptible(msecs_to_jiffies(20));
                cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
-                                       cmnd->device->lun, LPFC_CTX_LUN);
+                                       cmnd->device->lun, LPFC_CTX_TGT);
        }
-
        if (cnt) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                                 "0719 device reset I/O flush failure: "
                                 "cnt x%x\n", cnt);
                ret = FAILED;
        }
-
-out_free_scsi_buf:
-       if (iocb_status != IOCB_TIMEDOUT) {
-               lpfc_release_scsi_buf(phba, lpfc_cmd);
-       }
-       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-                        "0713 SCSI layer issued device reset (%d, %d) "
-                        "return x%x status x%x result x%x\n",
-                        cmnd->device->id, cmnd->device->lun, ret,
-                        cmd_status, cmd_result);
-out:
        return ret;
 }
 
@@ -1268,19 +1242,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
        struct lpfc_hba   *phba = vport->phba;
        struct lpfc_nodelist *ndlp = NULL;
        int match;
-       int ret = FAILED, i, err_count = 0;
-       int cnt, loopcnt;
+       int ret = SUCCESS, status, i;
+       int cnt;
        struct lpfc_scsi_buf * lpfc_cmd;
+       unsigned long later;
 
        lpfc_block_error_handler(cmnd);
-
-       lpfc_cmd = lpfc_get_scsi_buf(phba);
-       if (lpfc_cmd == NULL)
-               goto out;
-
-       /* The lpfc_cmd storage is reused.  Set all loop invariants. */
-       lpfc_cmd->timeout = 60;
-
        /*
         * Since the driver manages a single bus device, reset all
         * targets known to the driver.  Should any target reset
@@ -1294,7 +1261,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
                        if (!NLP_CHK_NODE_ACT(ndlp))
                                continue;
                        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
-                           i == ndlp->nlp_sid &&
+                           ndlp->nlp_sid == i &&
                            ndlp->rport) {
                                match = 1;
                                break;
@@ -1303,27 +1270,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
                spin_unlock_irq(shost->host_lock);
                if (!match)
                        continue;
-
-               ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
-                                         cmnd->device->lun,
-                                         ndlp->rport->dd_data);
-               if (ret != SUCCESS) {
+               lpfc_cmd = lpfc_get_scsi_buf(phba);
+               if (lpfc_cmd) {
+                       lpfc_cmd->timeout = 60;
+                       status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
+                                                    cmnd->device->lun,
+                                                    ndlp->rport->dd_data);
+                       if (status != TIMEOUT_ERROR)
+                               lpfc_release_scsi_buf(phba, lpfc_cmd);
+               }
+               if (!lpfc_cmd || status != SUCCESS) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                                         "0700 Bus Reset on target %d failed\n",
                                         i);
-                       err_count++;
-                       break;
+                       ret = FAILED;
                }
        }
-
-       if (ret != IOCB_TIMEDOUT)
-               lpfc_release_scsi_buf(phba, lpfc_cmd);
-
-       if (err_count == 0)
-               ret = SUCCESS;
-       else
-               ret = FAILED;
-
        /*
         * All outstanding txcmplq I/Os should have been aborted by
         * the targets.  Unfortunately, some targets do not abide by
@@ -1333,27 +1295,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
        if (cnt)
                lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
                                    0, 0, LPFC_CTX_HOST);
-       loopcnt = 0;
-       while(cnt) {
-               schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
-               if (++loopcnt
-                   > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
-                       break;
-
+       later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+       while (time_after(later, jiffies) && cnt) {
+               schedule_timeout_uninterruptible(msecs_to_jiffies(20));
                cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
        }
-
        if (cnt) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                                 "0715 Bus Reset I/O flush failure: "
                                 "cnt x%x left x%x\n", cnt, i);
                ret = FAILED;
        }
-
        lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                         "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
-out:
        return ret;
 }
 
index 70a0a9e..f40aa7b 100644 (file)
@@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
                        phba->work_ha |= HA_ERATT;
                        phba->work_hs = HS_FFER3;
 
-                       /* hbalock should already be held */
-                       if (phba->work_wait)
-                               lpfc_worker_wake_up(phba);
+                       lpfc_worker_wake_up(phba);
 
                        return NULL;
                }
@@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
        phba->work_ha |= HA_ERATT;
        phba->work_hs = HS_FFER3;
 
-       /* hbalock should already be held */
-       if (phba->work_wait)
-               lpfc_worker_wake_up(phba);
+       lpfc_worker_wake_up(phba);
 
        return;
 }
@@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr)
                phba->pport->work_port_events |= WORKER_MBOX_TMO;
        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 
-       if (!tmo_posted) {
-               spin_lock_irqsave(&phba->hbalock, iflag);
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
-               spin_unlock_irqrestore(&phba->hbalock, iflag);
-       }
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
+       return;
 }
 
 void
@@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
        for (i = 0; i < psli->num_rings; i++) {
                pring = &psli->ring[i];
                prev_pring_flag = pring->flag;
-               if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+               /* Only slow rings */
+               if (pring->ringno == LPFC_ELS_RING) {
                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
+                       /* Set the lpfc data pending flag */
+                       set_bit(LPFC_DATA_READY, &phba->data_flags);
+               }
                /*
                 * Error everything on the txq since these iocbs have not been
                 * given to the FW yet.
@@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
        spin_lock_irqsave(&phba->hbalock, flags);
        for (i = 0; i < psli->num_rings; i++) {
                pring = &psli->ring[i];
-               if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+               /* Only slow rings */
+               if (pring->ringno == LPFC_ELS_RING) {
                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
+                       /* Set the lpfc data pending flag */
+                       set_bit(LPFC_DATA_READY, &phba->data_flags);
+               }
 
                /*
                 * Error everything on the txq since these iocbs have not been
@@ -3762,7 +3763,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
                           lpfc_ctx_cmd ctx_cmd)
 {
        struct lpfc_scsi_buf *lpfc_cmd;
-       struct scsi_cmnd *cmnd;
        int rc = 1;
 
        if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
@@ -3772,19 +3772,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
                return rc;
 
        lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
-       cmnd = lpfc_cmd->pCmd;
 
-       if (cmnd == NULL)
+       if (lpfc_cmd->pCmd == NULL)
                return rc;
 
        switch (ctx_cmd) {
        case LPFC_CTX_LUN:
-               if ((cmnd->device->id == tgt_id) &&
-                   (cmnd->device->lun == lun_id))
+               if ((lpfc_cmd->rdata->pnode) &&
+                   (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
+                   (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
                        rc = 0;
                break;
        case LPFC_CTX_TGT:
-               if (cmnd->device->id == tgt_id)
+               if ((lpfc_cmd->rdata->pnode) &&
+                   (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
                        rc = 0;
                break;
        case LPFC_CTX_HOST:
@@ -3994,6 +3995,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
        if (pmboxq->context1)
                return MBX_NOT_FINISHED;
 
+       pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
        /* setup wake call as IOCB callback */
        pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
        /* setup context field to pass wait_queue pointer to wake function  */
@@ -4159,7 +4161,7 @@ lpfc_intr_handler(int irq, void *dev_id)
                                                "pwork:x%x hawork:x%x wait:x%x",
                                                phba->work_ha, work_ha_copy,
                                                (uint32_t)((unsigned long)
-                                               phba->work_wait));
+                                               &phba->work_waitq));
 
                                        control &=
                                            ~(HC_R0INT_ENA << LPFC_ELS_RING);
@@ -4172,7 +4174,7 @@ lpfc_intr_handler(int irq, void *dev_id)
                                                "x%x hawork:x%x wait:x%x",
                                                phba->work_ha, work_ha_copy,
                                                (uint32_t)((unsigned long)
-                                               phba->work_wait));
+                                               &phba->work_waitq));
                                }
                                spin_unlock(&phba->hbalock);
                        }
@@ -4297,9 +4299,8 @@ send_current_mbox:
 
                spin_lock(&phba->hbalock);
                phba->work_ha |= work_ha_copy;
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
                spin_unlock(&phba->hbalock);
+               lpfc_worker_wake_up(phba);
        }
 
        ha_copy &= ~(phba->work_ha_mask);
index b22b893..ad24cac 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.2.6"
+#define LPFC_DRIVER_VERSION "8.2.7"
 
 #define LPFC_DRIVER_NAME "lpfc"
 
index 6feaf59..109f89d 100644 (file)
@@ -216,6 +216,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
        int vpi;
        int rc = VPORT_ERROR;
        int status;
+       int size;
 
        if ((phba->sli_rev < 3) ||
                !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@@ -278,7 +279,20 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 
        memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
        memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
-
+       size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
+       if (size) {
+               vport->vname = kzalloc(size+1, GFP_KERNEL);
+               if (!vport->vname) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+                                        "1814 Create VPORT failed. "
+                                        "vname allocation failed.\n");
+                       rc = VPORT_ERROR;
+                       lpfc_free_vpi(phba, vpi);
+                       destroy_port(vport);
+                       goto error_out;
+               }
+               memcpy(vport->vname, fc_vport->symbolic_name, size+1);
+       }
        if (fc_vport->node_name != 0)
                u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
        if (fc_vport->port_name != 0)
index fd63b06..11aa917 100644 (file)
@@ -1765,7 +1765,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
        default:
                return 0;
        }
-       if (mesg.event == mdev->ofdev.dev.power.power_state.event)
+       if (ms->phase == sleeping)
                return 0;
 
        scsi_block_requests(ms->host);
@@ -1780,8 +1780,6 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
        disable_irq(ms->meshintr);
        set_mesh_power(ms, 0);
 
-       mdev->ofdev.dev.power.power_state = mesg;
-
        return 0;
 }
 
@@ -1790,7 +1788,7 @@ static int mesh_resume(struct macio_dev *mdev)
        struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
        unsigned long flags;
 
-       if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
+       if (ms->phase != sleeping)
                return 0;
 
        set_mesh_power(ms, 1);
@@ -1801,8 +1799,6 @@ static int mesh_resume(struct macio_dev *mdev)
        enable_irq(ms->meshintr);
        scsi_unblock_requests(ms->host);
 
-       mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
-
        return 0;
 }
 
index 0c78694..5822dd5 100644 (file)
@@ -113,9 +113,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
        .host_param_mask        = ISCSI_HOST_HWADDRESS |
                                  ISCSI_HOST_IPADDRESS |
                                  ISCSI_HOST_INITIATOR_NAME,
-       .sessiondata_size       = sizeof(struct ddb_entry),
-       .host_template          = &qla4xxx_driver_template,
-
        .tgt_dscvr              = qla4xxx_tgt_dscvr,
        .get_conn_param         = qla4xxx_conn_get_param,
        .get_session_param      = qla4xxx_sess_get_param,
@@ -275,7 +272,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
                return err;
        }
 
-       ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0);
+       ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
        if (!ddb_entry->conn) {
                iscsi_remove_session(ddb_entry->sess);
                DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
@@ -292,7 +289,8 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
        struct ddb_entry *ddb_entry;
        struct iscsi_cls_session *sess;
 
-       sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport);
+       sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
+                                  sizeof(struct ddb_entry));
        if (!sess)
                return NULL;
 
index 110e776..36c92f9 100644 (file)
@@ -855,9 +855,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
 
        good_bytes = scsi_bufflen(cmd);
         if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+               int old_good_bytes = good_bytes;
                drv = scsi_cmd_to_driver(cmd);
                if (drv->done)
                        good_bytes = drv->done(cmd);
+               /*
+                * USB may not give sense identifying bad sector and
+                * simply return a residue instead, so subtract off the
+                * residue if drv->done() error processing indicates no
+                * change to the completion length.
+                */
+               if (good_bytes == old_good_bytes)
+                       good_bytes -= scsi_get_resid(cmd);
        }
        scsi_io_completion(cmd, good_bytes);
 }
index f6600bf..01d11a0 100644 (file)
@@ -94,6 +94,7 @@ static const char * scsi_debug_version_date = "20070104";
 #define DEF_VIRTUAL_GB   0
 #define DEF_FAKE_RW    0
 #define DEF_VPD_USE_HOSTNO 1
+#define DEF_SECTOR_SIZE 512
 
 /* bit mask values for scsi_debug_opts */
 #define SCSI_DEBUG_OPT_NOISE   1
@@ -142,6 +143,7 @@ static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
 static int scsi_debug_fake_rw = DEF_FAKE_RW;
 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
+static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
 
 static int scsi_debug_cmnd_count = 0;
 
@@ -157,11 +159,6 @@ static int sdebug_heads;           /* heads per disk */
 static int sdebug_cylinders_per;       /* cylinders per surface */
 static int sdebug_sectors_per;         /* sectors per cylinder */
 
-/* default sector size is 512 bytes, 2**9 bytes */
-#define POW2_SECT_SIZE 9
-#define SECT_SIZE (1 << POW2_SECT_SIZE)
-#define SECT_SIZE_PER(TGT) SECT_SIZE
-
 #define SDEBUG_MAX_PARTS 4
 
 #define SDEBUG_SENSE_LEN 32
@@ -646,6 +643,14 @@ static int inquiry_evpd_b0(unsigned char * arr)
        return sizeof(vpdb0_data);
 }
 
+static int inquiry_evpd_b1(unsigned char *arr)
+{
+       memset(arr, 0, 0x3c);
+       arr[0] = 0;
+       arr[1] = 1;
+
+       return 0x3c;
+}
 
 #define SDEBUG_LONG_INQ_SZ 96
 #define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -701,6 +706,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
                        arr[n++] = 0x88;  /* SCSI ports */
                        arr[n++] = 0x89;  /* ATA information */
                        arr[n++] = 0xb0;  /* Block limits (SBC) */
+                       arr[n++] = 0xb1;  /* Block characteristics (SBC) */
                        arr[3] = n - 4;   /* number of supported VPD pages */
                } else if (0x80 == cmd[2]) { /* unit serial number */
                        arr[1] = cmd[2];        /*sanity */
@@ -740,6 +746,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
                } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
                        arr[1] = cmd[2];        /*sanity */
                        arr[3] = inquiry_evpd_b0(&arr[4]);
+               } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
+                       arr[1] = cmd[2];        /*sanity */
+                       arr[3] = inquiry_evpd_b1(&arr[4]);
                } else {
                        /* Illegal request, invalid field in cdb */
                        mk_sense_buffer(devip, ILLEGAL_REQUEST,
@@ -878,8 +887,8 @@ static int resp_readcap(struct scsi_cmnd * scp,
                arr[2] = 0xff;
                arr[3] = 0xff;
        }
-       arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-       arr[7] = SECT_SIZE_PER(target) & 0xff;
+       arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
+       arr[7] = scsi_debug_sector_size & 0xff;
        return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
 }
 
@@ -902,10 +911,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
        capac = sdebug_capacity - 1;
        for (k = 0; k < 8; ++k, capac >>= 8)
                arr[7 - k] = capac & 0xff;
-       arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff;
-       arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff;
-       arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-       arr[11] = SECT_SIZE_PER(target) & 0xff;
+       arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
+       arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
+       arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
+       arr[11] = scsi_debug_sector_size & 0xff;
        return fill_from_dev_buffer(scp, arr,
                                    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
 }
@@ -1019,20 +1028,20 @@ static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
 
 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
 {       /* Format device page for mode_sense */
-        unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
-                                     0, 0, 0, 0, 0, 0, 0, 0,
-                                     0, 0, 0, 0, 0x40, 0, 0, 0};
-
-        memcpy(p, format_pg, sizeof(format_pg));
-        p[10] = (sdebug_sectors_per >> 8) & 0xff;
-        p[11] = sdebug_sectors_per & 0xff;
-        p[12] = (SECT_SIZE >> 8) & 0xff;
-        p[13] = SECT_SIZE & 0xff;
-        if (DEV_REMOVEABLE(target))
-                p[20] |= 0x20; /* should agree with INQUIRY */
-        if (1 == pcontrol)
-                memset(p + 2, 0, sizeof(format_pg) - 2);
-        return sizeof(format_pg);
+       unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
+                                    0, 0, 0, 0, 0, 0, 0, 0,
+                                    0, 0, 0, 0, 0x40, 0, 0, 0};
+
+       memcpy(p, format_pg, sizeof(format_pg));
+       p[10] = (sdebug_sectors_per >> 8) & 0xff;
+       p[11] = sdebug_sectors_per & 0xff;
+       p[12] = (scsi_debug_sector_size >> 8) & 0xff;
+       p[13] = scsi_debug_sector_size & 0xff;
+       if (DEV_REMOVEABLE(target))
+               p[20] |= 0x20; /* should agree with INQUIRY */
+       if (1 == pcontrol)
+               memset(p + 2, 0, sizeof(format_pg) - 2);
+       return sizeof(format_pg);
 }
 
 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
@@ -1206,8 +1215,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
                        ap[2] = (sdebug_capacity >> 8) & 0xff;
                        ap[3] = sdebug_capacity & 0xff;
                }
-               ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-               ap[7] = SECT_SIZE_PER(target) & 0xff;
+               ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
+               ap[7] = scsi_debug_sector_size & 0xff;
                offset += bd_len;
                ap = arr + offset;
        } else if (16 == bd_len) {
@@ -1215,10 +1224,10 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
 
                for (k = 0; k < 8; ++k, capac >>= 8)
                        ap[7 - k] = capac & 0xff;
-               ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff;
-               ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff;
-               ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-               ap[15] = SECT_SIZE_PER(target) & 0xff;
+               ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
+               ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
+               ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
+               ap[15] = scsi_debug_sector_size & 0xff;
                offset += bd_len;
                ap = arr + offset;
        }
@@ -1519,10 +1528,10 @@ static int do_device_access(struct scsi_cmnd *scmd,
        if (block + num > sdebug_store_sectors)
                rest = block + num - sdebug_store_sectors;
 
-       ret = func(scmd, fake_storep + (block * SECT_SIZE),
-                  (num - rest) * SECT_SIZE);
+       ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
+                  (num - rest) * scsi_debug_sector_size);
        if (!ret && rest)
-               ret = func(scmd, fake_storep, rest * SECT_SIZE);
+               ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
 
        return ret;
 }
@@ -1575,10 +1584,10 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
        write_unlock_irqrestore(&atomic_rw, iflags);
        if (-1 == ret)
                return (DID_ERROR << 16);
-       else if ((ret < (num * SECT_SIZE)) &&
+       else if ((ret < (num * scsi_debug_sector_size)) &&
                 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
                printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
-                      " IO sent=%d bytes\n", num * SECT_SIZE, ret);
+                      " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
        return 0;
 }
 
@@ -2085,6 +2094,7 @@ module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
                   S_IRUGO | S_IWUSR);
+module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
 
 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
 MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2106,6 +2116,7 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
 
 
 static char sdebug_info[256];
@@ -2158,8 +2169,9 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
            scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
            scsi_debug_cmnd_count, scsi_debug_delay,
            scsi_debug_max_luns, scsi_debug_scsi_level,
-           SECT_SIZE, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
-           num_aborts, num_dev_resets, num_bus_resets, num_host_resets);
+           scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
+           sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
+           num_host_resets);
        if (pos < offset) {
                len = 0;
                begin = pos;
@@ -2434,6 +2446,12 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
            sdebug_vpd_use_hostno_store);
 
+static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
+}
+DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
+
 /* Note: The following function creates attribute files in the
    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
    files (over those found in the /sys/module/scsi_debug/parameters
@@ -2459,11 +2477,13 @@ static int do_create_driverfs_files(void)
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
+       ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
        return ret;
 }
 
 static void do_remove_driverfs_files(void)
 {
+       driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
@@ -2499,10 +2519,22 @@ static int __init scsi_debug_init(void)
        int k;
        int ret;
 
+       switch (scsi_debug_sector_size) {
+       case  512:
+       case 1024:
+       case 2048:
+       case 4096:
+               break;
+       default:
+               printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n",
+                      scsi_debug_sector_size);
+               return -EINVAL;
+       }
+
        if (scsi_debug_dev_size_mb < 1)
                scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
        sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
-       sdebug_store_sectors = sz / SECT_SIZE;
+       sdebug_store_sectors = sz / scsi_debug_sector_size;
        sdebug_capacity = get_sdebug_capacity();
 
        /* play around with geometry, don't waste too much on track 0 */
index eaf5a8a..006a959 100644 (file)
@@ -298,6 +298,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
  */
 static int scsi_check_sense(struct scsi_cmnd *scmd)
 {
+       struct scsi_device *sdev = scmd->device;
        struct scsi_sense_hdr sshdr;
 
        if (! scsi_command_normalize_sense(scmd, &sshdr))
@@ -306,6 +307,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
        if (scsi_sense_is_deferred(&sshdr))
                return NEEDS_RETRY;
 
+       if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
+                       sdev->scsi_dh_data->scsi_dh->check_sense) {
+               int rc;
+
+               rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
+               if (rc != SCSI_RETURN_NOT_HANDLED)
+                       return rc;
+               /* handler does not care. Drop down to default handling */
+       }
+
        /*
         * Previous logic looked for FILEMARK, EOM or ILI which are
         * mainly associated with tapes and returned SUCCESS.
index cbf55d5..88d1b5f 100644 (file)
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
 };
 #undef SP
 
-static struct kmem_cache *scsi_bidi_sdb_cache;
+static struct kmem_cache *scsi_sdb_cache;
 
 static void scsi_run_queue(struct request_queue *q);
 
@@ -784,7 +784,7 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
                struct scsi_data_buffer *bidi_sdb =
                        cmd->request->next_rq->special;
                scsi_free_sgtable(bidi_sdb);
-               kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb);
+               kmem_cache_free(scsi_sdb_cache, bidi_sdb);
                cmd->request->next_rq->special = NULL;
        }
 }
@@ -1059,7 +1059,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 
        if (blk_bidi_rq(cmd->request)) {
                struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
-                       scsi_bidi_sdb_cache, GFP_ATOMIC);
+                       scsi_sdb_cache, GFP_ATOMIC);
                if (!bidi_sdb) {
                        error = BLKPREP_DEFER;
                        goto err_exit;
@@ -1169,6 +1169,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 
        if (ret != BLKPREP_OK)
                return ret;
+
+       if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
+                        && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
+               ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+               if (ret != BLKPREP_OK)
+                       return ret;
+       }
+
        /*
         * Filesystem requests must transfer data.
         */
@@ -1329,7 +1337,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
                                printk("scsi%d unblocking host at zero depth\n",
                                        shost->host_no));
                } else {
-                       blk_plug_device(q);
                        return 0;
                }
        }
@@ -1693,11 +1700,11 @@ int __init scsi_init_queue(void)
                return -ENOMEM;
        }
 
-       scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb",
-                                       sizeof(struct scsi_data_buffer),
-                                       0, 0, NULL);
-       if (!scsi_bidi_sdb_cache) {
-               printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n");
+       scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
+                                          sizeof(struct scsi_data_buffer),
+                                          0, 0, NULL);
+       if (!scsi_sdb_cache) {
+               printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
                goto cleanup_io_context;
        }
 
@@ -1710,7 +1717,7 @@ int __init scsi_init_queue(void)
                if (!sgp->slab) {
                        printk(KERN_ERR "SCSI: can't init sg slab %s\n",
                                        sgp->name);
-                       goto cleanup_bidi_sdb;
+                       goto cleanup_sdb;
                }
 
                sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@@ -1718,13 +1725,13 @@ int __init scsi_init_queue(void)
                if (!sgp->pool) {
                        printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
                                        sgp->name);
-                       goto cleanup_bidi_sdb;
+                       goto cleanup_sdb;
                }
        }
 
        return 0;
 
-cleanup_bidi_sdb:
+cleanup_sdb:
        for (i = 0; i < SG_MEMPOOL_NR; i++) {
                struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
                if (sgp->pool)
@@ -1732,7 +1739,7 @@ cleanup_bidi_sdb:
                if (sgp->slab)
                        kmem_cache_destroy(sgp->slab);
        }
-       kmem_cache_destroy(scsi_bidi_sdb_cache);
+       kmem_cache_destroy(scsi_sdb_cache);
 cleanup_io_context:
        kmem_cache_destroy(scsi_io_context_cache);
 
@@ -1744,7 +1751,7 @@ void scsi_exit_queue(void)
        int i;
 
        kmem_cache_destroy(scsi_io_context_cache);
-       kmem_cache_destroy(scsi_bidi_sdb_cache);
+       kmem_cache_destroy(scsi_sdb_cache);
 
        for (i = 0; i < SG_MEMPOOL_NR; i++) {
                struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
index a00eee6..196fe3a 100644 (file)
@@ -346,7 +346,7 @@ static void scsi_target_dev_release(struct device *dev)
        put_device(parent);
 }
 
-struct device_type scsi_target_type = {
+static struct device_type scsi_target_type = {
        .name =         "scsi_target",
        .release =      scsi_target_dev_release,
 };
index 93d2b67..b6e5610 100644 (file)
@@ -439,6 +439,7 @@ struct bus_type scsi_bus_type = {
        .resume         = scsi_bus_resume,
        .remove         = scsi_bus_remove,
 };
+EXPORT_SYMBOL_GPL(scsi_bus_type);
 
 int scsi_sysfs_register(void)
 {
index 65d1737..3af7cbc 100644 (file)
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/iscsi_if.h>
 
-#define ISCSI_SESSION_ATTRS 19
+#define ISCSI_SESSION_ATTRS 21
 #define ISCSI_CONN_ATTRS 13
 #define ISCSI_HOST_ATTRS 4
-#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+#define ISCSI_TRANSPORT_VERSION "2.0-870"
 
 struct iscsi_internal {
        int daemon_pid;
@@ -101,16 +102,10 @@ show_transport_##name(struct device *dev,                                 \
 static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
 
 show_transport_attr(caps, "0x%x");
-show_transport_attr(max_lun, "%d");
-show_transport_attr(max_conn, "%d");
-show_transport_attr(max_cmd_len, "%d");
 
 static struct attribute *iscsi_transport_attrs[] = {
        &dev_attr_handle.attr,
        &dev_attr_caps.attr,
-       &dev_attr_max_lun.attr,
-       &dev_attr_max_conn.attr,
-       &dev_attr_max_cmd_len.attr,
        NULL,
 };
 
@@ -118,18 +113,139 @@ static struct attribute_group iscsi_transport_group = {
        .attrs = iscsi_transport_attrs,
 };
 
+/*
+ * iSCSI endpoint attrs
+ */
+#define iscsi_dev_to_endpoint(_dev) \
+       container_of(_dev, struct iscsi_endpoint, dev)
+
+#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store)   \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+        __ATTR(_name,_mode,_show,_store)
+
+static void iscsi_endpoint_release(struct device *dev)
+{
+       struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+       kfree(ep);
+}
+
+static struct class iscsi_endpoint_class = {
+       .name = "iscsi_endpoint",
+       .dev_release = iscsi_endpoint_release,
+};
+
+static ssize_t
+show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+       return sprintf(buf, "%u\n", ep->id);
+}
+static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+
+static struct attribute *iscsi_endpoint_attrs[] = {
+       &dev_attr_ep_handle.attr,
+       NULL,
+};
+
+static struct attribute_group iscsi_endpoint_group = {
+       .attrs = iscsi_endpoint_attrs,
+};
 
+#define ISCSI_MAX_EPID -1
+
+static int iscsi_match_epid(struct device *dev, void *data)
+{
+       struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+       unsigned int *epid = (unsigned int *) data;
+
+       return *epid == ep->id;
+}
+
+struct iscsi_endpoint *
+iscsi_create_endpoint(int dd_size)
+{
+       struct device *dev;
+       struct iscsi_endpoint *ep;
+       unsigned int id;
+       int err;
+
+       for (id = 1; id < ISCSI_MAX_EPID; id++) {
+               dev = class_find_device(&iscsi_endpoint_class, &id,
+                                       iscsi_match_epid);
+               if (!dev)
+                       break;
+       }
+       if (id == ISCSI_MAX_EPID) {
+               printk(KERN_ERR "Too many connections. Max supported %u\n",
+                      ISCSI_MAX_EPID - 1);
+               return NULL;
+       }
+
+       ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+       if (!ep)
+               return NULL;
+
+       ep->id = id;
+       ep->dev.class = &iscsi_endpoint_class;
+       snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+       err = device_register(&ep->dev);
+        if (err)
+                goto free_ep;
+
+       err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+       if (err)
+               goto unregister_dev;
+
+       if (dd_size)
+               ep->dd_data = &ep[1];
+       return ep;
+
+unregister_dev:
+       device_unregister(&ep->dev);
+       return NULL;
+
+free_ep:
+       kfree(ep);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+
+void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+{
+       sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+       device_unregister(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+
+struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+{
+       struct iscsi_endpoint *ep;
+       struct device *dev;
+
+       dev = class_find_device(&iscsi_endpoint_class, &handle,
+                               iscsi_match_epid);
+       if (!dev)
+               return NULL;
+
+       ep = iscsi_dev_to_endpoint(dev);
+       /*
+        * we can drop this now because the interface will prevent
+        * removals and lookups from racing.
+        */
+       put_device(dev);
+       return ep;
+}
+EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
 
 static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
                            struct device *cdev)
 {
        struct Scsi_Host *shost = dev_to_shost(dev);
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_cls_host *ihost = shost->shost_data;
 
        memset(ihost, 0, sizeof(*ihost));
-       INIT_LIST_HEAD(&ihost->sessions);
-       mutex_init(&ihost->mutex);
        atomic_set(&ihost->nr_scans, 0);
+       mutex_init(&ihost->mutex);
 
        snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
                shost->host_no);
@@ -144,7 +260,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
                             struct device *cdev)
 {
        struct Scsi_Host *shost = dev_to_shost(dev);
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_cls_host *ihost = shost->shost_data;
 
        destroy_workqueue(ihost->scan_workq);
        return 0;
@@ -287,6 +403,24 @@ static int iscsi_is_session_dev(const struct device *dev)
        return dev->release == iscsi_session_release;
 }
 
+static int iscsi_iter_session_fn(struct device *dev, void *data)
+{
+       void (* fn) (struct iscsi_cls_session *) = data;
+
+       if (!iscsi_is_session_dev(dev))
+               return 0;
+       fn(iscsi_dev_to_session(dev));
+       return 0;
+}
+
+void iscsi_host_for_each_session(struct Scsi_Host *shost,
+                                void (*fn)(struct iscsi_cls_session *))
+{
+       device_for_each_child(&shost->shost_gendev, fn,
+                             iscsi_iter_session_fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+
 /**
  * iscsi_scan_finished - helper to report when running scans are done
  * @shost: scsi host
@@ -297,7 +431,7 @@ static int iscsi_is_session_dev(const struct device *dev)
  */
 int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_cls_host *ihost = shost->shost_data;
        /*
         * qla4xxx will have kicked off some session unblocks before calling
         * scsi_scan_host, so just wait for them to complete.
@@ -306,42 +440,76 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
 }
 EXPORT_SYMBOL_GPL(iscsi_scan_finished);
 
-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
-                          uint id, uint lun)
+struct iscsi_scan_data {
+       unsigned int channel;
+       unsigned int id;
+       unsigned int lun;
+};
+
+static int iscsi_user_scan_session(struct device *dev, void *data)
 {
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_scan_data *scan_data = data;
        struct iscsi_cls_session *session;
+       struct Scsi_Host *shost;
+       struct iscsi_cls_host *ihost;
+       unsigned long flags;
+       unsigned int id;
+
+       if (!iscsi_is_session_dev(dev))
+               return 0;
+
+       session = iscsi_dev_to_session(dev);
+       shost = iscsi_session_to_shost(session);
+       ihost = shost->shost_data;
 
        mutex_lock(&ihost->mutex);
-       list_for_each_entry(session, &ihost->sessions, host_list) {
-               if ((channel == SCAN_WILD_CARD || channel == 0) &&
-                   (id == SCAN_WILD_CARD || id == session->target_id))
-                       scsi_scan_target(&session->dev, 0,
-                                        session->target_id, lun, 1);
+       spin_lock_irqsave(&session->lock, flags);
+       if (session->state != ISCSI_SESSION_LOGGED_IN) {
+               spin_unlock_irqrestore(&session->lock, flags);
+               mutex_unlock(&ihost->mutex);
+               return 0;
        }
-       mutex_unlock(&ihost->mutex);
+       id = session->target_id;
+       spin_unlock_irqrestore(&session->lock, flags);
 
+       if (id != ISCSI_MAX_TARGET) {
+               if ((scan_data->channel == SCAN_WILD_CARD ||
+                    scan_data->channel == 0) &&
+                   (scan_data->id == SCAN_WILD_CARD ||
+                    scan_data->id == id))
+                       scsi_scan_target(&session->dev, 0, id,
+                                        scan_data->lun, 1);
+       }
+       mutex_unlock(&ihost->mutex);
        return 0;
 }
 
+static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+                          uint id, uint lun)
+{
+       struct iscsi_scan_data scan_data;
+
+       scan_data.channel = channel;
+       scan_data.id = id;
+       scan_data.lun = lun;
+
+       return device_for_each_child(&shost->shost_gendev, &scan_data,
+                                    iscsi_user_scan_session);
+}
+
 static void iscsi_scan_session(struct work_struct *work)
 {
        struct iscsi_cls_session *session =
                        container_of(work, struct iscsi_cls_session, scan_work);
        struct Scsi_Host *shost = iscsi_session_to_shost(session);
-       struct iscsi_host *ihost = shost->shost_data;
-       unsigned long flags;
+       struct iscsi_cls_host *ihost = shost->shost_data;
+       struct iscsi_scan_data scan_data;
 
-       spin_lock_irqsave(&session->lock, flags);
-       if (session->state != ISCSI_SESSION_LOGGED_IN) {
-               spin_unlock_irqrestore(&session->lock, flags);
-               goto done;
-       }
-       spin_unlock_irqrestore(&session->lock, flags);
+       scan_data.channel = 0;
+       scan_data.id = SCAN_WILD_CARD;
+       scan_data.lun = SCAN_WILD_CARD;
 
-       scsi_scan_target(&session->dev, 0, session->target_id,
-                        SCAN_WILD_CARD, 1);
-done:
+       iscsi_user_scan_session(&session->dev, &scan_data);
        atomic_dec(&ihost->nr_scans);
 }
 
@@ -381,7 +549,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
                        container_of(work, struct iscsi_cls_session,
                                     unblock_work);
        struct Scsi_Host *shost = iscsi_session_to_shost(session);
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_cls_host *ihost = shost->shost_data;
        unsigned long flags;
 
        /*
@@ -449,15 +617,19 @@ static void __iscsi_unbind_session(struct work_struct *work)
                        container_of(work, struct iscsi_cls_session,
                                     unbind_work);
        struct Scsi_Host *shost = iscsi_session_to_shost(session);
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_cls_host *ihost = shost->shost_data;
+       unsigned long flags;
 
        /* Prevent new scans and make sure scanning is not in progress */
        mutex_lock(&ihost->mutex);
-       if (list_empty(&session->host_list)) {
+       spin_lock_irqsave(&session->lock, flags);
+       if (session->target_id == ISCSI_MAX_TARGET) {
+               spin_unlock_irqrestore(&session->lock, flags);
                mutex_unlock(&ihost->mutex);
                return;
        }
-       list_del_init(&session->host_list);
+       session->target_id = ISCSI_MAX_TARGET;
+       spin_unlock_irqrestore(&session->lock, flags);
        mutex_unlock(&ihost->mutex);
 
        scsi_remove_target(&session->dev);
@@ -467,18 +639,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
 static int iscsi_unbind_session(struct iscsi_cls_session *session)
 {
        struct Scsi_Host *shost = iscsi_session_to_shost(session);
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_cls_host *ihost = shost->shost_data;
 
        return queue_work(ihost->scan_workq, &session->unbind_work);
 }
 
 struct iscsi_cls_session *
-iscsi_alloc_session(struct Scsi_Host *shost,
-                   struct iscsi_transport *transport)
+iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+                   int dd_size)
 {
        struct iscsi_cls_session *session;
 
-       session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+       session = kzalloc(sizeof(*session) + dd_size,
                          GFP_KERNEL);
        if (!session)
                return NULL;
@@ -487,7 +659,6 @@ iscsi_alloc_session(struct Scsi_Host *shost,
        session->recovery_tmo = 120;
        session->state = ISCSI_SESSION_FREE;
        INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
-       INIT_LIST_HEAD(&session->host_list);
        INIT_LIST_HEAD(&session->sess_list);
        INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
        INIT_WORK(&session->block_work, __iscsi_block_session);
@@ -500,22 +671,57 @@ iscsi_alloc_session(struct Scsi_Host *shost,
        session->dev.parent = &shost->shost_gendev;
        session->dev.release = iscsi_session_release;
        device_initialize(&session->dev);
-       if (transport->sessiondata_size)
+       if (dd_size)
                session->dd_data = &session[1];
        return session;
 }
 EXPORT_SYMBOL_GPL(iscsi_alloc_session);
 
+static int iscsi_get_next_target_id(struct device *dev, void *data)
+{
+       struct iscsi_cls_session *session;
+       unsigned long flags;
+       int err = 0;
+
+       if (!iscsi_is_session_dev(dev))
+               return 0;
+
+       session = iscsi_dev_to_session(dev);
+       spin_lock_irqsave(&session->lock, flags);
+       if (*((unsigned int *) data) == session->target_id)
+               err = -EEXIST;
+       spin_unlock_irqrestore(&session->lock, flags);
+       return err;
+}
+
 int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
 {
        struct Scsi_Host *shost = iscsi_session_to_shost(session);
-       struct iscsi_host *ihost;
+       struct iscsi_cls_host *ihost;
        unsigned long flags;
+       unsigned int id = target_id;
        int err;
 
        ihost = shost->shost_data;
        session->sid = atomic_add_return(1, &iscsi_session_nr);
-       session->target_id = target_id;
+
+       if (id == ISCSI_MAX_TARGET) {
+               for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+                       err = device_for_each_child(&shost->shost_gendev, &id,
+                                                   iscsi_get_next_target_id);
+                       if (!err)
+                               break;
+               }
+
+               if (id == ISCSI_MAX_TARGET) {
+                       iscsi_cls_session_printk(KERN_ERR, session,
+                                                "Too many iscsi targets. Max "
+                                                "number of targets is %d.\n",
+                                                ISCSI_MAX_TARGET - 1);
+                       goto release_host;
+               }
+       }
+       session->target_id = id;
 
        snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
                 session->sid);
@@ -531,10 +737,6 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
        list_add(&session->sess_list, &sesslist);
        spin_unlock_irqrestore(&sesslock, flags);
 
-       mutex_lock(&ihost->mutex);
-       list_add(&session->host_list, &ihost->sessions);
-       mutex_unlock(&ihost->mutex);
-
        iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
        return 0;
 
@@ -548,18 +750,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
  * iscsi_create_session - create iscsi class session
  * @shost: scsi host
  * @transport: iscsi transport
+ * @dd_size: private driver data size
  * @target_id: which target
  *
  * This can be called from a LLD or iscsi_transport.
  */
 struct iscsi_cls_session *
-iscsi_create_session(struct Scsi_Host *shost,
-                    struct iscsi_transport *transport,
-                    unsigned int target_id)
+iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+                    int dd_size, unsigned int target_id)
 {
        struct iscsi_cls_session *session;
 
-       session = iscsi_alloc_session(shost, transport);
+       session = iscsi_alloc_session(shost, transport, dd_size);
        if (!session)
                return NULL;
 
@@ -595,7 +797,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
 void iscsi_remove_session(struct iscsi_cls_session *session)
 {
        struct Scsi_Host *shost = iscsi_session_to_shost(session);
-       struct iscsi_host *ihost = shost->shost_data;
+       struct iscsi_cls_host *ihost = shost->shost_data;
        unsigned long flags;
        int err;
 
@@ -661,6 +863,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
 /**
  * iscsi_create_conn - create iscsi class connection
  * @session: iscsi cls session
+ * @dd_size: private driver data size
  * @cid: connection id
  *
  * This can be called from a LLD or iscsi_transport. The connection
@@ -673,18 +876,17 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
  * non-zero.
  */
 struct iscsi_cls_conn *
-iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
 {
        struct iscsi_transport *transport = session->transport;
        struct iscsi_cls_conn *conn;
        unsigned long flags;
        int err;
 
-       conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+       conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
        if (!conn)
                return NULL;
-
-       if (transport->conndata_size)
+       if (dd_size)
                conn->dd_data = &conn[1];
 
        INIT_LIST_HEAD(&conn->conn_list);
@@ -1017,21 +1219,20 @@ int iscsi_session_event(struct iscsi_cls_session *session,
 EXPORT_SYMBOL_GPL(iscsi_session_event);
 
 static int
-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+                       struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+                       uint16_t cmds_max, uint16_t queue_depth)
 {
        struct iscsi_transport *transport = priv->iscsi_transport;
        struct iscsi_cls_session *session;
-       uint32_t hostno;
+       uint32_t host_no;
 
-       session = transport->create_session(transport, &priv->t,
-                                           ev->u.c_session.cmds_max,
-                                           ev->u.c_session.queue_depth,
-                                           ev->u.c_session.initial_cmdsn,
-                                           &hostno);
+       session = transport->create_session(ep, cmds_max, queue_depth,
+                                           initial_cmdsn, &host_no);
        if (!session)
                return -ENOMEM;
 
-       ev->r.c_session_ret.host_no = hostno;
+       ev->r.c_session_ret.host_no = host_no;
        ev->r.c_session_ret.sid = session->sid;
        return 0;
 }
@@ -1106,6 +1307,7 @@ static int
 iscsi_if_transport_ep(struct iscsi_transport *transport,
                      struct iscsi_uevent *ev, int msg_type)
 {
+       struct iscsi_endpoint *ep;
        struct sockaddr *dst_addr;
        int rc = 0;
 
@@ -1115,22 +1317,33 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
                        return -EINVAL;
 
                dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
-               rc = transport->ep_connect(dst_addr,
-                                          ev->u.ep_connect.non_blocking,
-                                          &ev->r.ep_connect_ret.handle);
+               ep = transport->ep_connect(dst_addr,
+                                          ev->u.ep_connect.non_blocking);
+               if (IS_ERR(ep))
+                       return PTR_ERR(ep);
+
+               ev->r.ep_connect_ret.handle = ep->id;
                break;
        case ISCSI_UEVENT_TRANSPORT_EP_POLL:
                if (!transport->ep_poll)
                        return -EINVAL;
 
-               ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+               ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+               if (!ep)
+                       return -EINVAL;
+
+               ev->r.retcode = transport->ep_poll(ep,
                                                   ev->u.ep_poll.timeout_ms);
                break;
        case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
                if (!transport->ep_disconnect)
                        return -EINVAL;
 
-               transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+               ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+               if (!ep)
+                       return -EINVAL;
+
+               transport->ep_disconnect(ep);
                break;
        }
        return rc;
@@ -1195,6 +1408,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct iscsi_internal *priv;
        struct iscsi_cls_session *session;
        struct iscsi_cls_conn *conn;
+       struct iscsi_endpoint *ep = NULL;
 
        priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
        if (!priv)
@@ -1208,7 +1422,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_CREATE_SESSION:
-               err = iscsi_if_create_session(priv, ev);
+               err = iscsi_if_create_session(priv, ep, ev,
+                                             ev->u.c_session.initial_cmdsn,
+                                             ev->u.c_session.cmds_max,
+                                             ev->u.c_session.queue_depth);
+               break;
+       case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+               ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+               if (!ep) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               err = iscsi_if_create_session(priv, ep, ev,
+                                       ev->u.c_bound_session.initial_cmdsn,
+                                       ev->u.c_bound_session.cmds_max,
+                                       ev->u.c_bound_session.queue_depth);
                break;
        case ISCSI_UEVENT_DESTROY_SESSION:
                session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -1414,6 +1643,8 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
 iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
 iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
 iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
 
 static ssize_t
 show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -1580,6 +1811,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
        priv->daemon_pid = -1;
        priv->iscsi_transport = tt;
        priv->t.user_scan = iscsi_user_scan;
+       if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+               priv->t.create_work_queue = 1;
 
        priv->dev.class = &iscsi_transport_class;
        snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
@@ -1595,7 +1828,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
        priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
        priv->t.host_attrs.ac.class = &iscsi_host_class.class;
        priv->t.host_attrs.ac.match = iscsi_host_match;
-       priv->t.host_size = sizeof(struct iscsi_host);
+       priv->t.host_size = sizeof(struct iscsi_cls_host);
        transport_container_register(&priv->t.host_attrs);
 
        SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
@@ -1653,6 +1886,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
        SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
        SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
        SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+       SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+       SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
        SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
        SETUP_PRIV_SESSION_RD_ATTR(state);
 
@@ -1668,6 +1903,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
 
 unregister_dev:
        device_unregister(&priv->dev);
+       return NULL;
 free_priv:
        kfree(priv);
        return NULL;
@@ -1715,10 +1951,14 @@ static __init int iscsi_transport_init(void)
        if (err)
                return err;
 
-       err = transport_class_register(&iscsi_host_class);
+       err = class_register(&iscsi_endpoint_class);
        if (err)
                goto unregister_transport_class;
 
+       err = transport_class_register(&iscsi_host_class);
+       if (err)
+               goto unregister_endpoint_class;
+
        err = transport_class_register(&iscsi_connection_class);
        if (err)
                goto unregister_host_class;
@@ -1727,8 +1967,8 @@ static __init int iscsi_transport_init(void)
        if (err)
                goto unregister_conn_class;
 
-       nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
-                       THIS_MODULE);
+       nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+                                   NULL, THIS_MODULE);
        if (!nls) {
                err = -ENOBUFS;
                goto unregister_session_class;
@@ -1748,6 +1988,8 @@ unregister_conn_class:
        transport_class_unregister(&iscsi_connection_class);
 unregister_host_class:
        transport_class_unregister(&iscsi_host_class);
+unregister_endpoint_class:
+       class_unregister(&iscsi_endpoint_class);
 unregister_transport_class:
        class_unregister(&iscsi_transport_class);
        return err;
@@ -1760,6 +2002,7 @@ static void __exit iscsi_transport_exit(void)
        transport_class_unregister(&iscsi_connection_class);
        transport_class_unregister(&iscsi_session_class);
        transport_class_unregister(&iscsi_host_class);
+       class_unregister(&iscsi_endpoint_class);
        class_unregister(&iscsi_transport_class);
 }
 
index d53312c..0c63947 100644 (file)
@@ -58,8 +58,8 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_ioctl.h>
 #include <scsi/scsicam.h>
-#include <scsi/sd.h>
 
+#include "sd.h"
 #include "scsi_logging.h"
 
 MODULE_AUTHOR("Eric Youngdale");
@@ -295,11 +295,6 @@ static int sd_major(int major_idx)
        }
 }
 
-static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
-{
-       return container_of(disk->private_data, struct scsi_disk, driver);
-}
-
 static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
 {
        struct scsi_disk *sdkp = NULL;
similarity index 91%
rename from include/scsi/sd.h
rename to drivers/scsi/sd.h
index 4f032d4..03a3d45 100644 (file)
@@ -48,6 +48,11 @@ struct scsi_disk {
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
 
+static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
+{
+       return container_of(disk->private_data, struct scsi_disk, driver);
+}
+
 #define sd_printk(prefix, sdsk, fmt, a...)                             \
         (sdsk)->disk ?                                                 \
        sdev_printk(prefix, (sdsk)->device, "[%s] " fmt,                \
index fccd2e8..d3b8ebb 100644 (file)
@@ -1036,6 +1036,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
                case SG_SCSI_RESET_DEVICE:
                        val = SCSI_TRY_RESET_DEVICE;
                        break;
+               case SG_SCSI_RESET_TARGET:
+                       val = SCSI_TRY_RESET_TARGET;
+                       break;
                case SG_SCSI_RESET_BUS:
                        val = SCSI_TRY_RESET_BUS;
                        break;
index 0433d5d..4305371 100644 (file)
@@ -121,9 +121,7 @@ static __inline void sym_que_move(struct sym_quehead *orig,
        }
 }
 
-#define sym_que_entry(ptr, type, member) \
-       ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
-
+#define sym_que_entry(ptr, type, member) container_of(ptr, type, member)
 
 #define sym_insque(new, pos)           __sym_que_add(new, pos, (pos)->flink)
 
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
new file mode 100644 (file)
index 0000000..a9c96d8
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _LINUX_CRC_T10DIF_H
+#define _LINUX_CRC_T10DIF_H
+
+#include <linux/types.h>
+
+__u16 crc_t10dif(unsigned char const *, size_t);
+
+#endif
index e19e584..16be12f 100644 (file)
@@ -50,6 +50,7 @@ enum iscsi_uevent_e {
        ISCSI_UEVENT_TGT_DSCVR          = UEVENT_BASE + 15,
        ISCSI_UEVENT_SET_HOST_PARAM     = UEVENT_BASE + 16,
        ISCSI_UEVENT_UNBIND_SESSION     = UEVENT_BASE + 17,
+       ISCSI_UEVENT_CREATE_BOUND_SESSION       = UEVENT_BASE + 18,
 
        /* up events */
        ISCSI_KEVENT_RECV_PDU           = KEVENT_BASE + 1,
@@ -78,6 +79,12 @@ struct iscsi_uevent {
                        uint16_t        cmds_max;
                        uint16_t        queue_depth;
                } c_session;
+               struct msg_create_bound_session {
+                       uint64_t        ep_handle;
+                       uint32_t        initial_cmdsn;
+                       uint16_t        cmds_max;
+                       uint16_t        queue_depth;
+               } c_bound_session;
                struct msg_destroy_session {
                        uint32_t        sid;
                } d_session;
@@ -250,42 +257,49 @@ enum iscsi_param {
 
        ISCSI_PARAM_PING_TMO,
        ISCSI_PARAM_RECV_TMO,
+
+       ISCSI_PARAM_IFACE_NAME,
+       ISCSI_PARAM_ISID,
+       ISCSI_PARAM_INITIATOR_NAME,
        /* must always be last */
        ISCSI_PARAM_MAX,
 };
 
-#define ISCSI_MAX_RECV_DLENGTH         (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
-#define ISCSI_MAX_XMIT_DLENGTH         (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
-#define ISCSI_HDRDGST_EN               (1 << ISCSI_PARAM_HDRDGST_EN)
-#define ISCSI_DATADGST_EN              (1 << ISCSI_PARAM_DATADGST_EN)
-#define ISCSI_INITIAL_R2T_EN           (1 << ISCSI_PARAM_INITIAL_R2T_EN)
-#define ISCSI_MAX_R2T                  (1 << ISCSI_PARAM_MAX_R2T)
-#define ISCSI_IMM_DATA_EN              (1 << ISCSI_PARAM_IMM_DATA_EN)
-#define ISCSI_FIRST_BURST              (1 << ISCSI_PARAM_FIRST_BURST)
-#define ISCSI_MAX_BURST                        (1 << ISCSI_PARAM_MAX_BURST)
-#define ISCSI_PDU_INORDER_EN           (1 << ISCSI_PARAM_PDU_INORDER_EN)
-#define ISCSI_DATASEQ_INORDER_EN       (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
-#define ISCSI_ERL                      (1 << ISCSI_PARAM_ERL)
-#define ISCSI_IFMARKER_EN              (1 << ISCSI_PARAM_IFMARKER_EN)
-#define ISCSI_OFMARKER_EN              (1 << ISCSI_PARAM_OFMARKER_EN)
-#define ISCSI_EXP_STATSN               (1 << ISCSI_PARAM_EXP_STATSN)
-#define ISCSI_TARGET_NAME              (1 << ISCSI_PARAM_TARGET_NAME)
-#define ISCSI_TPGT                     (1 << ISCSI_PARAM_TPGT)
-#define ISCSI_PERSISTENT_ADDRESS       (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
-#define ISCSI_PERSISTENT_PORT          (1 << ISCSI_PARAM_PERSISTENT_PORT)
-#define ISCSI_SESS_RECOVERY_TMO                (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
-#define ISCSI_CONN_PORT                        (1 << ISCSI_PARAM_CONN_PORT)
-#define ISCSI_CONN_ADDRESS             (1 << ISCSI_PARAM_CONN_ADDRESS)
-#define ISCSI_USERNAME                 (1 << ISCSI_PARAM_USERNAME)
-#define ISCSI_USERNAME_IN              (1 << ISCSI_PARAM_USERNAME_IN)
-#define ISCSI_PASSWORD                 (1 << ISCSI_PARAM_PASSWORD)
-#define ISCSI_PASSWORD_IN              (1 << ISCSI_PARAM_PASSWORD_IN)
-#define ISCSI_FAST_ABORT               (1 << ISCSI_PARAM_FAST_ABORT)
-#define ISCSI_ABORT_TMO                        (1 << ISCSI_PARAM_ABORT_TMO)
-#define ISCSI_LU_RESET_TMO             (1 << ISCSI_PARAM_LU_RESET_TMO)
-#define ISCSI_HOST_RESET_TMO           (1 << ISCSI_PARAM_HOST_RESET_TMO)
-#define ISCSI_PING_TMO                 (1 << ISCSI_PARAM_PING_TMO)
-#define ISCSI_RECV_TMO                 (1 << ISCSI_PARAM_RECV_TMO)
+#define ISCSI_MAX_RECV_DLENGTH         (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+#define ISCSI_MAX_XMIT_DLENGTH         (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+#define ISCSI_HDRDGST_EN               (1ULL << ISCSI_PARAM_HDRDGST_EN)
+#define ISCSI_DATADGST_EN              (1ULL << ISCSI_PARAM_DATADGST_EN)
+#define ISCSI_INITIAL_R2T_EN           (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+#define ISCSI_MAX_R2T                  (1ULL << ISCSI_PARAM_MAX_R2T)
+#define ISCSI_IMM_DATA_EN              (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+#define ISCSI_FIRST_BURST              (1ULL << ISCSI_PARAM_FIRST_BURST)
+#define ISCSI_MAX_BURST                        (1ULL << ISCSI_PARAM_MAX_BURST)
+#define ISCSI_PDU_INORDER_EN           (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+#define ISCSI_DATASEQ_INORDER_EN       (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+#define ISCSI_ERL                      (1ULL << ISCSI_PARAM_ERL)
+#define ISCSI_IFMARKER_EN              (1ULL << ISCSI_PARAM_IFMARKER_EN)
+#define ISCSI_OFMARKER_EN              (1ULL << ISCSI_PARAM_OFMARKER_EN)
+#define ISCSI_EXP_STATSN               (1ULL << ISCSI_PARAM_EXP_STATSN)
+#define ISCSI_TARGET_NAME              (1ULL << ISCSI_PARAM_TARGET_NAME)
+#define ISCSI_TPGT                     (1ULL << ISCSI_PARAM_TPGT)
+#define ISCSI_PERSISTENT_ADDRESS       (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+#define ISCSI_PERSISTENT_PORT          (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+#define ISCSI_SESS_RECOVERY_TMO                (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+#define ISCSI_CONN_PORT                        (1ULL << ISCSI_PARAM_CONN_PORT)
+#define ISCSI_CONN_ADDRESS             (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+#define ISCSI_USERNAME                 (1ULL << ISCSI_PARAM_USERNAME)
+#define ISCSI_USERNAME_IN              (1ULL << ISCSI_PARAM_USERNAME_IN)
+#define ISCSI_PASSWORD                 (1ULL << ISCSI_PARAM_PASSWORD)
+#define ISCSI_PASSWORD_IN              (1ULL << ISCSI_PARAM_PASSWORD_IN)
+#define ISCSI_FAST_ABORT               (1ULL << ISCSI_PARAM_FAST_ABORT)
+#define ISCSI_ABORT_TMO                        (1ULL << ISCSI_PARAM_ABORT_TMO)
+#define ISCSI_LU_RESET_TMO             (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+#define ISCSI_HOST_RESET_TMO           (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+#define ISCSI_PING_TMO                 (1ULL << ISCSI_PARAM_PING_TMO)
+#define ISCSI_RECV_TMO                 (1ULL << ISCSI_PARAM_RECV_TMO)
+#define ISCSI_IFACE_NAME               (1ULL << ISCSI_PARAM_IFACE_NAME)
+#define ISCSI_ISID                     (1ULL << ISCSI_PARAM_ISID)
+#define ISCSI_INITIATOR_NAME           (1ULL << ISCSI_PARAM_INITIATOR_NAME)
 
 /* iSCSI HBA params */
 enum iscsi_host_param {
@@ -296,20 +310,13 @@ enum iscsi_host_param {
        ISCSI_HOST_PARAM_MAX,
 };
 
-#define ISCSI_HOST_HWADDRESS           (1 << ISCSI_HOST_PARAM_HWADDRESS)
-#define ISCSI_HOST_INITIATOR_NAME      (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
-#define ISCSI_HOST_NETDEV_NAME         (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
-#define ISCSI_HOST_IPADDRESS           (1 << ISCSI_HOST_PARAM_IPADDRESS)
+#define ISCSI_HOST_HWADDRESS           (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+#define ISCSI_HOST_INITIATOR_NAME      (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+#define ISCSI_HOST_NETDEV_NAME         (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+#define ISCSI_HOST_IPADDRESS           (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
 
 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
-#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
-
-/**
- * iscsi_hostdata - get LLD hostdata from scsi_host
- * @_hostdata: pointer to scsi host's hostdata
- **/
-#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
 
 /*
  * These flags presents iSCSI Data-Path capabilities.
index e0593bf..f2a2c11 100644 (file)
@@ -22,6 +22,7 @@
 #define ISCSI_PROTO_H
 
 #include <linux/types.h>
+#include <scsi/scsi.h>
 
 #define ISCSI_DRAFT20_VERSION  0x00
 
@@ -156,7 +157,7 @@ struct iscsi_ecdb_ahdr {
        uint8_t ahstype;
        uint8_t reserved;
        /* 4-byte aligned extended CDB spillover */
-       uint8_t ecdb[260 - ISCSI_CDB_SIZE];
+       uint8_t ecdb[SCSI_MAX_VARLEN_CDB_SIZE - ISCSI_CDB_SIZE];
 };
 
 /* SCSI Response Header */
index cd3ca63..5e75bb7 100644 (file)
@@ -24,6 +24,7 @@
 #define LIBISCSI_H
 
 #include <linux/types.h>
+#include <linux/wait.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
@@ -31,6 +32,7 @@
 #include <scsi/iscsi_if.h>
 
 struct scsi_transport_template;
+struct scsi_host_template;
 struct scsi_device;
 struct Scsi_Host;
 struct scsi_cmnd;
@@ -40,6 +42,7 @@ struct iscsi_cls_session;
 struct iscsi_cls_conn;
 struct iscsi_session;
 struct iscsi_nopin;
+struct device;
 
 /* #define DEBUG_SCSI */
 #ifdef DEBUG_SCSI
@@ -49,9 +52,7 @@ struct iscsi_nopin;
 #endif
 
 #define ISCSI_DEF_XMIT_CMDS_MAX        128     /* must be power of 2 */
-#define ISCSI_MGMT_CMDS_MAX    16      /* must be power of 2 */
-
-#define ISCSI_MGMT_ITT_OFFSET  0xa00
+#define ISCSI_MGMT_CMDS_MAX    15
 
 #define ISCSI_DEF_CMD_PER_LUN          32
 #define ISCSI_MAX_CMD_PER_LUN          128
@@ -69,7 +70,10 @@ enum {
 /* Connection suspend "bit" */
 #define ISCSI_SUSPEND_BIT              1
 
-#define ISCSI_ITT_MASK                 (0xfff)
+#define ISCSI_ITT_MASK                 (0x1fff)
+#define ISCSI_TOTAL_CMDS_MAX           4096
+/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+#define ISCSI_TOTAL_CMDS_MIN           16
 #define ISCSI_AGE_SHIFT                        28
 #define ISCSI_AGE_MASK                 (0xf << ISCSI_AGE_SHIFT)
 
@@ -82,18 +86,6 @@ enum {
        ISCSI_DIGEST_SIZE = sizeof(__u32),
 };
 
-struct iscsi_mgmt_task {
-       /*
-        * Becuae LLDs allocate their hdr differently, this is a pointer to
-        * that storage. It must be setup at session creation time.
-        */
-       struct iscsi_hdr        *hdr;
-       char                    *data;          /* mgmt payload */
-       unsigned                data_count;     /* counts data to be sent */
-       uint32_t                itt;            /* this ITT */
-       void                    *dd_data;       /* driver/transport data */
-       struct list_head        running;
-};
 
 enum {
        ISCSI_TASK_COMPLETED,
@@ -101,7 +93,7 @@ enum {
        ISCSI_TASK_RUNNING,
 };
 
-struct iscsi_cmd_task {
+struct iscsi_task {
        /*
         * Because LLDs allocate their hdr differently, this is a pointer
         * and length to that storage. It must be setup at session
@@ -118,6 +110,7 @@ struct iscsi_cmd_task {
        /* offset in unsolicited stream (bytes); */
        unsigned                unsol_offset;
        unsigned                data_count;     /* remaining Data-Out */
+       char                    *data;          /* mgmt payload */
        struct scsi_cmnd        *sc;            /* associated SCSI cmd*/
        struct iscsi_conn       *conn;          /* used connection    */
 
@@ -128,9 +121,9 @@ struct iscsi_cmd_task {
        void                    *dd_data;       /* driver/transport data */
 };
 
-static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+static inline void* iscsi_next_hdr(struct iscsi_task *task)
 {
-       return (void*)ctask->hdr + ctask->hdr_len;
+       return (void*)task->hdr + task->hdr_len;
 }
 
 /* Connection's states */
@@ -145,11 +138,6 @@ struct iscsi_conn {
        struct iscsi_cls_conn   *cls_conn;      /* ptr to class connection */
        void                    *dd_data;       /* iscsi_transport data */
        struct iscsi_session    *session;       /* parent session */
-       /*
-        * LLDs should set this lock. It protects the transport recv
-        * code
-        */
-       rwlock_t                *recv_lock;
        /*
         * conn_stop() flag: stop to recover, stop to terminate
         */
@@ -159,7 +147,7 @@ struct iscsi_conn {
        unsigned long           last_ping;
        int                     ping_timeout;
        int                     recv_timeout;
-       struct iscsi_mgmt_task  *ping_mtask;
+       struct iscsi_task       *ping_task;
 
        /* iSCSI connection-wide sequencing */
        uint32_t                exp_statsn;
@@ -175,9 +163,8 @@ struct iscsi_conn {
         * should always fit in this buffer
         */
        char                    *data;
-       struct iscsi_mgmt_task  *login_mtask;   /* mtask used for login/text */
-       struct iscsi_mgmt_task  *mtask;         /* xmit mtask in progress */
-       struct iscsi_cmd_task   *ctask;         /* xmit ctask in progress */
+       struct iscsi_task       *login_task;    /* mtask used for login/text */
+       struct iscsi_task       *task;          /* xmit task in progress */
 
        /* xmit */
        struct list_head        mgmtqueue;      /* mgmt (control) xmit queue */
@@ -208,9 +195,6 @@ struct iscsi_conn {
        /* remote portal currently connected to */
        int                     portal_port;
        char                    portal_address[ISCSI_ADDRESS_BUF_LEN];
-       /* local address */
-       int                     local_port;
-       char                    local_address[ISCSI_ADDRESS_BUF_LEN];
 
        /* MIB-statistics */
        uint64_t                txdata_octets;
@@ -246,6 +230,7 @@ enum {
 };
 
 struct iscsi_session {
+       struct iscsi_cls_session *cls_session;
        /*
         * Syncs up the scsi eh thread with the iscsi eh thread when sending
         * task management functions. This must be taken before the session
@@ -281,10 +266,8 @@ struct iscsi_session {
        char                    *password;
        char                    *password_in;
        char                    *targetname;
+       char                    *ifacename;
        char                    *initiatorname;
-       /* hw address or netdev iscsi connection is bound to */
-       char                    *hwaddress;
-       char                    *netdev;
        /* control data */
        struct iscsi_transport  *tt;
        struct Scsi_Host        *host;
@@ -298,12 +281,20 @@ struct iscsi_session {
        int                     state;          /* session state           */
        int                     age;            /* counts session re-opens */
 
+       int                     scsi_cmds_max;  /* max scsi commands */
        int                     cmds_max;       /* size of cmds array */
-       struct iscsi_cmd_task   **cmds;         /* Original Cmds arr */
+       struct iscsi_task       **cmds;         /* Original Cmds arr */
        struct iscsi_pool       cmdpool;        /* PDU's pool */
-       int                     mgmtpool_max;   /* size of mgmt array */
-       struct iscsi_mgmt_task  **mgmt_cmds;    /* Original mgmt arr */
-       struct iscsi_pool       mgmtpool;       /* Mgmt PDU's pool */
+};
+
+struct iscsi_host {
+       char                    *initiatorname;
+       /* hw address or netdev iscsi connection is bound to */
+       char                    *hwaddress;
+       char                    *netdev;
+       /* local address */
+       int                     local_port;
+       char                    local_address[ISCSI_ADDRESS_BUF_LEN];
 };
 
 /*
@@ -316,42 +307,44 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
 extern int iscsi_queuecommand(struct scsi_cmnd *sc,
                              void (*done)(struct scsi_cmnd *));
 
-
 /*
  * iSCSI host helpers.
  */
+#define iscsi_host_priv(_shost) \
+       (shost_priv(_shost) + sizeof(struct iscsi_host))
+
 extern int iscsi_host_set_param(struct Scsi_Host *shost,
                                enum iscsi_host_param param, char *buf,
                                int buflen);
 extern int iscsi_host_get_param(struct Scsi_Host *shost,
                                enum iscsi_host_param param, char *buf);
+extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+                                         int dd_data_size, uint16_t qdepth);
+extern void iscsi_host_remove(struct Scsi_Host *shost);
+extern void iscsi_host_free(struct Scsi_Host *shost);
 
 /*
  * session management
  */
 extern struct iscsi_cls_session *
-iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
-                   uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+                   uint16_t, int, uint32_t, unsigned int);
 extern void iscsi_session_teardown(struct iscsi_cls_session *);
-extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
 extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
 extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
                           enum iscsi_param param, char *buf, int buflen);
 extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
                                   enum iscsi_param param, char *buf);
 
-#define session_to_cls(_sess) \
-       hostdata_session(_sess->host->hostdata)
-
 #define iscsi_session_printk(prefix, _sess, fmt, a...) \
-       iscsi_cls_session_printk(prefix,                \
-               (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+       iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
 
 /*
  * connection management
  */
 extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
-                                              uint32_t);
+                                              int, uint32_t);
 extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
 extern int iscsi_conn_start(struct iscsi_cls_conn *);
 extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
@@ -360,25 +353,29 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
 extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
 extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
                                enum iscsi_param param, char *buf);
+extern void iscsi_suspend_tx(struct iscsi_conn *conn);
 
 #define iscsi_conn_printk(prefix, _c, fmt, a...) \
-       iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+       iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+                             fmt, ##a)
 
 /*
  * pdu and task processing
  */
 extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
                                        struct iscsi_data *hdr);
 extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
                                char *, uint32_t);
 extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
                              char *, int);
-extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
-                           uint32_t *);
-extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
-extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
-                                struct iscsi_mgmt_task *mtask);
+extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+                               char *, int);
+extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+extern void iscsi_requeue_task(struct iscsi_task *task);
+extern void iscsi_put_task(struct iscsi_task *task);
+extern void __iscsi_get_task(struct iscsi_task *task);
 
 /*
  * generic helpers
index 32742c4..00137a7 100644 (file)
@@ -9,6 +9,7 @@
 #define _SCSI_SCSI_H
 
 #include <linux/types.h>
+#include <scsi/scsi_cmnd.h>
 
 /*
  * The maximum number of SG segments that we will put inside a
@@ -400,6 +401,7 @@ struct scsi_lun {
 #define SOFT_ERROR      0x2005
 #define ADD_TO_MLQUEUE  0x2006
 #define TIMEOUT_ERROR   0x2007
+#define SCSI_RETURN_NOT_HANDLED   0x2008
 
 /*
  * Midlevel queue return values.
@@ -424,6 +426,22 @@ struct scsi_lun {
 #define driver_byte(result) (((result) >> 24) & 0xff)
 #define suggestion(result)  (driver_byte(result) & SUGGEST_MASK)
 
+static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
+{
+       cmd->result |= status << 8;
+}
+
+static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
+{
+       cmd->result |= status << 16;
+}
+
+static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
+{
+       cmd->result |= status << 24;
+}
+
+
 #define sense_class(sense)  (((sense) >> 4) & 0x7)
 #define sense_error(sense)  ((sense) & 0xf)
 #define sense_valid(sense)  ((sense) & 0x80);
index 3e46dfa..66c9448 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/types.h>
 #include <linux/timer.h>
 #include <linux/scatterlist.h>
-#include <linux/blkdev.h>
 
 struct Scsi_Host;
 struct scsi_device;
index 00b7876..6467f78 100644 (file)
@@ -162,9 +162,29 @@ struct scsi_device {
 
        struct execute_work     ew; /* used to get process context on put */
 
+       struct scsi_dh_data     *scsi_dh_data;
        enum scsi_device_state sdev_state;
        unsigned long           sdev_data[0];
 } __attribute__((aligned(sizeof(unsigned long))));
+
+struct scsi_device_handler {
+       /* Used by the infrastructure */
+       struct list_head list; /* list of scsi_device_handlers */
+       struct notifier_block nb;
+
+       /* Filled by the hardware handler */
+       struct module *module;
+       const char *name;
+       int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
+       int (*activate)(struct scsi_device *);
+       int (*prep_fn)(struct scsi_device *, struct request *);
+};
+
+struct scsi_dh_data {
+       struct scsi_device_handler *scsi_dh;
+       char buf[0];
+};
+
 #define        to_scsi_device(d)       \
        container_of(d, struct scsi_device, sdev_gendev)
 #define        class_to_sdev(d)        \
@@ -231,7 +251,9 @@ extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
                uint, uint, uint, void *hostdata);
 extern int scsi_add_device(struct Scsi_Host *host, uint channel,
                           uint target, uint lun);
+extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
 extern void scsi_remove_device(struct scsi_device *);
+extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
 
 extern int scsi_device_get(struct scsi_device *);
 extern void scsi_device_put(struct scsi_device *);
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
new file mode 100644 (file)
index 0000000..3ad2303
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Header file for SCSI device handler infrastruture.
+ *
+ * Modified version of patches posted by Mike Christie <michaelc@cs.wisc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ *      Authors:
+ *               Chandra Seetharaman <sekharan@us.ibm.com>
+ *               Mike Anderson <andmike@linux.vnet.ibm.com>
+ */
+
+#include <scsi/scsi_device.h>
+
+enum {
+       SCSI_DH_OK = 0,
+       /*
+        * device errors
+        */
+       SCSI_DH_DEV_FAILED,     /* generic device error */
+       SCSI_DH_DEV_TEMP_BUSY,
+       SCSI_DH_DEVICE_MAX,     /* max device blkerr definition */
+
+       /*
+        * transport errors
+        */
+       SCSI_DH_NOTCONN = SCSI_DH_DEVICE_MAX + 1,
+       SCSI_DH_CONN_FAILURE,
+       SCSI_DH_TRANSPORT_MAX,  /* max transport blkerr definition */
+
+       /*
+        * driver and generic errors
+        */
+       SCSI_DH_IO = SCSI_DH_TRANSPORT_MAX + 1, /* generic error */
+       SCSI_DH_INVALID_IO,
+       SCSI_DH_RETRY,          /* retry the req, but not immediately */
+       SCSI_DH_IMM_RETRY,      /* immediately retry the req */
+       SCSI_DH_TIMED_OUT,
+       SCSI_DH_RES_TEMP_UNAVAIL,
+       SCSI_DH_DEV_OFFLINED,
+       SCSI_DH_NOSYS,
+       SCSI_DH_DRIVER_MAX,
+};
+#if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE)
+extern int scsi_dh_activate(struct request_queue *);
+extern int scsi_dh_handler_exist(const char *);
+#else
+static inline int scsi_dh_activate(struct request_queue *req)
+{
+       return 0;
+}
+static inline int scsi_dh_handler_exist(const char *name)
+{
+       return 0;
+}
+#endif
index aab1eae..f5444e0 100644 (file)
 
 struct scsi_transport_template;
 struct iscsi_transport;
+struct iscsi_endpoint;
 struct Scsi_Host;
 struct iscsi_cls_conn;
 struct iscsi_conn;
-struct iscsi_cmd_task;
-struct iscsi_mgmt_task;
+struct iscsi_task;
 struct sockaddr;
 
 /**
@@ -58,19 +58,22 @@ struct sockaddr;
  * @stop_conn:         suspend/recover/terminate connection
  * @send_pdu:          send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
  * @session_recovery_timedout: notify LLD a block during recovery timed out
- * @init_cmd_task:     Initialize a iscsi_cmd_task and any internal structs.
- *                     Called from queuecommand with session lock held.
- * @init_mgmt_task:    Initialize a iscsi_mgmt_task and any internal structs.
- *                     Called from iscsi_conn_send_generic with xmitmutex.
- * @xmit_cmd_task:     Requests LLD to transfer cmd task. Returns 0 or the
+ * @init_task:         Initialize a iscsi_task and any internal structs.
+ *                     When offloading the data path, this is called from
+ *                     queuecommand with the session lock, or from the
+ *                     iscsi_conn_send_pdu context with the session lock.
+ *                     When not offloading the data path, this is called
+ *                     from the scsi work queue without the session lock.
+ * @xmit_task          Requests LLD to transfer cmd task. Returns 0 or the
  *                     the number of bytes transferred on success, and -Exyz
- *                     value on error.
- * @xmit_mgmt_task:    Requests LLD to transfer mgmt task. Returns 0 or the
- *                     the number of bytes transferred on success, and -Exyz
- *                     value on error.
- * @cleanup_cmd_task:  requests LLD to fail cmd task. Called with xmitmutex
- *                     and session->lock after the connection has been
- *                     suspended and terminated during recovery. If called
+ *                     value on error. When offloading the data path, this
+ *                     is called from queuecommand with the session lock, or
+ *                     from the iscsi_conn_send_pdu context with the session
+ *                     lock. When not offloading the data path, this is called
+ *                     from the scsi work queue without the session lock.
+ * @cleanup_task:      requests LLD to fail task. Called with session lock
+ *                     and after the connection has been suspended and
+ *                     terminated during recovery. If called
  *                     from abort task then connection is not suspended
  *                     or terminated but sk_callback_lock is held
  *
@@ -83,17 +86,9 @@ struct iscsi_transport {
        /* LLD sets this to indicate what values it can export to sysfs */
        uint64_t param_mask;
        uint64_t host_param_mask;
-       struct scsi_host_template *host_template;
-       /* LLD connection data size */
-       int conndata_size;
-       /* LLD session data size */
-       int sessiondata_size;
-       int max_lun;
-       unsigned int max_conn;
-       unsigned int max_cmd_len;
-       struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
-               struct scsi_transport_template *t, uint16_t, uint16_t,
-               uint32_t sn, uint32_t *hn);
+       struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+                                       uint16_t cmds_max, uint16_t qdepth,
+                                       uint32_t sn, uint32_t *hn);
        void (*destroy_session) (struct iscsi_cls_session *session);
        struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
                                uint32_t cid);
@@ -118,20 +113,15 @@ struct iscsi_transport {
                         char *data, uint32_t data_size);
        void (*get_stats) (struct iscsi_cls_conn *conn,
                           struct iscsi_stats *stats);
-       int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
-       void (*init_mgmt_task) (struct iscsi_conn *conn,
-                               struct iscsi_mgmt_task *mtask);
-       int (*xmit_cmd_task) (struct iscsi_conn *conn,
-                             struct iscsi_cmd_task *ctask);
-       void (*cleanup_cmd_task) (struct iscsi_conn *conn,
-                                 struct iscsi_cmd_task *ctask);
-       int (*xmit_mgmt_task) (struct iscsi_conn *conn,
-                              struct iscsi_mgmt_task *mtask);
+       int (*init_task) (struct iscsi_task *task);
+       int (*xmit_task) (struct iscsi_task *task);
+       void (*cleanup_task) (struct iscsi_conn *conn,
+                                 struct iscsi_task *task);
        void (*session_recovery_timedout) (struct iscsi_cls_session *session);
-       int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
-                          uint64_t *ep_handle);
-       int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
-       void (*ep_disconnect) (uint64_t ep_handle);
+       struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+                                             int non_blocking);
+       int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+       void (*ep_disconnect) (struct iscsi_endpoint *ep);
        int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
                          uint32_t enable, struct sockaddr *dst_addr);
 };
@@ -172,9 +162,10 @@ enum {
        ISCSI_SESSION_FREE,
 };
 
+#define ISCSI_MAX_TARGET -1
+
 struct iscsi_cls_session {
        struct list_head sess_list;             /* item in session_list */
-       struct list_head host_list;
        struct iscsi_transport *transport;
        spinlock_t lock;
        struct work_struct block_work;
@@ -186,7 +177,7 @@ struct iscsi_cls_session {
        int recovery_tmo;
        struct delayed_work recovery_work;
 
-       int target_id;
+       unsigned int target_id;
 
        int state;
        int sid;                                /* session id */
@@ -203,14 +194,22 @@ struct iscsi_cls_session {
 #define starget_to_session(_stgt) \
        iscsi_dev_to_session(_stgt->dev.parent)
 
-struct iscsi_host {
-       struct list_head sessions;
+struct iscsi_cls_host {
        atomic_t nr_scans;
        struct mutex mutex;
        struct workqueue_struct *scan_workq;
        char scan_workq_name[KOBJ_NAME_LEN];
 };
 
+extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+                               void (*fn)(struct iscsi_cls_session *));
+
+struct iscsi_endpoint {
+       void *dd_data;                  /* LLD private data */
+       struct device dev;
+       unsigned int id;
+};
+
 /*
  * session and connection functions that can be used by HW iSCSI LLDs
  */
@@ -222,22 +221,26 @@ struct iscsi_host {
 
 extern int iscsi_session_chkready(struct iscsi_cls_session *session);
 extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
-                                       struct iscsi_transport *transport);
+                               struct iscsi_transport *transport, int dd_size);
 extern int iscsi_add_session(struct iscsi_cls_session *session,
                             unsigned int target_id);
 extern int iscsi_session_event(struct iscsi_cls_session *session,
                               enum iscsi_uevent_e event);
 extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
                                                struct iscsi_transport *t,
+                                               int dd_size,
                                                unsigned int target_id);
 extern void iscsi_remove_session(struct iscsi_cls_session *session);
 extern void iscsi_free_session(struct iscsi_cls_session *session);
 extern int iscsi_destroy_session(struct iscsi_cls_session *session);
 extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
-                                           uint32_t cid);
+                                               int dd_size, uint32_t cid);
 extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
 extern void iscsi_unblock_session(struct iscsi_cls_session *session);
 extern void iscsi_block_session(struct iscsi_cls_session *session);
 extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
 
 #endif
index 519c49a..934ae38 100644 (file)
@@ -206,6 +206,7 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
 #define                SG_SCSI_RESET_DEVICE    1
 #define                SG_SCSI_RESET_BUS       2
 #define                SG_SCSI_RESET_HOST      3
+#define                SG_SCSI_RESET_TARGET    4
 
 /* synchronous SCSI command ioctl, (only in version 3 interface) */
 #define SG_IO 0x2285   /* similar effect as write() followed by read() */
index 8cc8e87..c7ad7a5 100644 (file)
@@ -29,6 +29,13 @@ config CRC16
          the kernel tree does. Such modules that use library CRC16
          functions require M here.
 
+config CRC_T10DIF
+       tristate "CRC calculation for the T10 Data Integrity Field"
+       help
+         This option is only needed if a module that's not in the
+         kernel tree needs to calculate CRC checks for use with the
+         SCSI data integrity subsystem.
+
 config CRC_ITU_T
        tristate "CRC ITU-T V.41 functions"
        help
index 4b836a5..2c62a9c 100644 (file)
@@ -54,6 +54,7 @@ endif
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_CRC_CCITT)        += crc-ccitt.o
 obj-$(CONFIG_CRC16)    += crc16.o
+obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
 obj-$(CONFIG_CRC_ITU_T)        += crc-itu-t.o
 obj-$(CONFIG_CRC32)    += crc32.o
 obj-$(CONFIG_CRC7)     += crc7.o
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
new file mode 100644 (file)
index 0000000..fbbd66e
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * T10 Data Integrity Field CRC16 calculation
+ *
+ * Copyright (c) 2007 Oracle Corporation.  All rights reserved.
+ * Written by Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc-t10dif.h>
+
+/* Table generated using the following polynomium:
+ * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
+ * gt: 0x8bb7
+ */
+static const __u16 t10_dif_crc_table[256] = {
+       0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
+       0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
+       0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
+       0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
+       0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
+       0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
+       0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
+       0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
+       0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
+       0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
+       0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
+       0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
+       0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
+       0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
+       0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
+       0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
+       0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
+       0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
+       0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
+       0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
+       0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
+       0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
+       0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
+       0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
+       0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
+       0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
+       0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
+       0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
+       0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
+       0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
+       0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
+       0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
+};
+
+__u16 crc_t10dif(const unsigned char *buffer, size_t len)
+{
+       __u16 crc = 0;
+       unsigned int i;
+
+       for (i = 0 ; i < len ; i++)
+               crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
+
+       return crc;
+}
+EXPORT_SYMBOL(crc_t10dif);
+
+MODULE_DESCRIPTION("T10 DIF CRC calculation");
+MODULE_LICENSE("GPL");