26301b8325e87e006ec3e40b7cdf0989d1592fff
[pandora-kernel.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41
42 #include "ufshcd.h"
43 #include "unipro.h"
44
45 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
46                                  UTP_TASK_REQ_COMPL |\
47                                  UIC_POWER_MODE |\
48                                  UFSHCD_ERROR_MASK)
49 /* UIC command timeout, unit: ms */
50 #define UIC_CMD_TIMEOUT 500
51
52 /* NOP OUT retries waiting for NOP IN response */
53 #define NOP_OUT_RETRIES    10
54 /* Timeout after 30 msecs if NOP OUT hangs without response */
55 #define NOP_OUT_TIMEOUT    30 /* msecs */
56
57 /* Query request retries */
58 #define QUERY_REQ_RETRIES 10
59 /* Query request timeout */
60 #define QUERY_REQ_TIMEOUT 30 /* msec */
61
62 /* Task management command timeout */
63 #define TM_CMD_TIMEOUT  100 /* msecs */
64
65 /* Expose the flag value from utp_upiu_query.value */
66 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
67
68 /* Interrupt aggregation default timeout, unit: 40us */
69 #define INT_AGGR_DEF_TO 0x02
70
71 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
72         ({                                                              \
73                 int _ret;                                               \
74                 if (_on)                                                \
75                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
76                 else                                                    \
77                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
78                 _ret;                                                   \
79         })
80
81 enum {
82         UFSHCD_MAX_CHANNEL      = 0,
83         UFSHCD_MAX_ID           = 1,
84         UFSHCD_MAX_LUNS         = 8,
85         UFSHCD_CMD_PER_LUN      = 32,
86         UFSHCD_CAN_QUEUE        = 32,
87 };
88
89 /* UFSHCD states */
90 enum {
91         UFSHCD_STATE_RESET,
92         UFSHCD_STATE_ERROR,
93         UFSHCD_STATE_OPERATIONAL,
94 };
95
96 /* UFSHCD error handling flags */
97 enum {
98         UFSHCD_EH_IN_PROGRESS = (1 << 0),
99 };
100
101 /* UFSHCD UIC layer error flags */
102 enum {
103         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
104         UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
105         UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
106         UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
107 };
108
109 /* Interrupt configuration options */
110 enum {
111         UFSHCD_INT_DISABLE,
112         UFSHCD_INT_ENABLE,
113         UFSHCD_INT_CLEAR,
114 };
115
116 #define ufshcd_set_eh_in_progress(h) \
117         (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
118 #define ufshcd_eh_in_progress(h) \
119         (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
120 #define ufshcd_clear_eh_in_progress(h) \
121         (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
122
123 static void ufshcd_tmc_handler(struct ufs_hba *hba);
124 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
125 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
126 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
127 static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
128                                         struct scsi_device *sdev);
129
130 /*
131  * ufshcd_wait_for_register - wait for register value to change
132  * @hba - per-adapter interface
133  * @reg - mmio register offset
134  * @mask - mask to apply to read register value
135  * @val - wait condition
136  * @interval_us - polling interval in microsecs
137  * @timeout_ms - timeout in millisecs
138  *
139  * Returns -ETIMEDOUT on error, zero on success
140  */
141 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
142                 u32 val, unsigned long interval_us, unsigned long timeout_ms)
143 {
144         int err = 0;
145         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
146
147         /* ignore bits that we don't intend to wait on */
148         val = val & mask;
149
150         while ((ufshcd_readl(hba, reg) & mask) != val) {
151                 /* wakeup within 50us of expiry */
152                 usleep_range(interval_us, interval_us + 50);
153
154                 if (time_after(jiffies, timeout)) {
155                         if ((ufshcd_readl(hba, reg) & mask) != val)
156                                 err = -ETIMEDOUT;
157                         break;
158                 }
159         }
160
161         return err;
162 }
163
164 /**
165  * ufshcd_get_intr_mask - Get the interrupt bit mask
166  * @hba - Pointer to adapter instance
167  *
168  * Returns interrupt bit mask per version
169  */
170 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
171 {
172         if (hba->ufs_version == UFSHCI_VERSION_10)
173                 return INTERRUPT_MASK_ALL_VER_10;
174         else
175                 return INTERRUPT_MASK_ALL_VER_11;
176 }
177
178 /**
179  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
180  * @hba - Pointer to adapter instance
181  *
182  * Returns UFSHCI version supported by the controller
183  */
184 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
185 {
186         return ufshcd_readl(hba, REG_UFS_VERSION);
187 }
188
189 /**
190  * ufshcd_is_device_present - Check if any device connected to
191  *                            the host controller
192  * @hba: pointer to adapter instance
193  *
194  * Returns 1 if device present, 0 if no device detected
195  */
196 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
197 {
198         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
199                                                 DEVICE_PRESENT) ? 1 : 0;
200 }
201
202 /**
203  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
204  * @lrb: pointer to local command reference block
205  *
206  * This function is used to get the OCS field from UTRD
207  * Returns the OCS field in the UTRD
208  */
209 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
210 {
211         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
212 }
213
214 /**
215  * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
216  * @task_req_descp: pointer to utp_task_req_desc structure
217  *
218  * This function is used to get the OCS field from UTMRD
219  * Returns the OCS field in the UTMRD
220  */
221 static inline int
222 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
223 {
224         return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
225 }
226
227 /**
228  * ufshcd_get_tm_free_slot - get a free slot for task management request
229  * @hba: per adapter instance
230  * @free_slot: pointer to variable with available slot value
231  *
232  * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
233  * Returns 0 if free slot is not available, else return 1 with tag value
234  * in @free_slot.
235  */
236 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
237 {
238         int tag;
239         bool ret = false;
240
241         if (!free_slot)
242                 goto out;
243
244         do {
245                 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
246                 if (tag >= hba->nutmrs)
247                         goto out;
248         } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
249
250         *free_slot = tag;
251         ret = true;
252 out:
253         return ret;
254 }
255
256 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
257 {
258         clear_bit_unlock(slot, &hba->tm_slots_in_use);
259 }
260
261 /**
262  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
263  * @hba: per adapter instance
264  * @pos: position of the bit to be cleared
265  */
266 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
267 {
268         ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
269 }
270
271 /**
272  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
273  * @reg: Register value of host controller status
274  *
275  * Returns integer, 0 on Success and positive value if failed
276  */
277 static inline int ufshcd_get_lists_status(u32 reg)
278 {
279         /*
280          * The mask 0xFF is for the following HCS register bits
281          * Bit          Description
282          *  0           Device Present
283          *  1           UTRLRDY
284          *  2           UTMRLRDY
285          *  3           UCRDY
286          *  4           HEI
287          *  5           DEI
288          * 6-7          reserved
289          */
290         return (((reg) & (0xFF)) >> 1) ^ (0x07);
291 }
292
293 /**
294  * ufshcd_get_uic_cmd_result - Get the UIC command result
295  * @hba: Pointer to adapter instance
296  *
297  * This function gets the result of UIC command completion
298  * Returns 0 on success, non zero value on error
299  */
300 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
301 {
302         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
303                MASK_UIC_COMMAND_RESULT;
304 }
305
306 /**
307  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
308  * @hba: Pointer to adapter instance
309  *
310  * This function gets UIC command argument3
311  * Returns 0 on success, non zero value on error
312  */
313 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
314 {
315         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
316 }
317
318 /**
319  * ufshcd_get_req_rsp - returns the TR response transaction type
320  * @ucd_rsp_ptr: pointer to response UPIU
321  */
322 static inline int
323 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
324 {
325         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
326 }
327
328 /**
329  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
330  * @ucd_rsp_ptr: pointer to response UPIU
331  *
332  * This function gets the response status and scsi_status from response UPIU
333  * Returns the response result code.
334  */
335 static inline int
336 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
337 {
338         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
339 }
340
341 /*
342  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
343  *                              from response UPIU
344  * @ucd_rsp_ptr: pointer to response UPIU
345  *
346  * Return the data segment length.
347  */
348 static inline unsigned int
349 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
350 {
351         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
352                 MASK_RSP_UPIU_DATA_SEG_LEN;
353 }
354
355 /**
356  * ufshcd_is_exception_event - Check if the device raised an exception event
357  * @ucd_rsp_ptr: pointer to response UPIU
358  *
359  * The function checks if the device raised an exception event indicated in
360  * the Device Information field of response UPIU.
361  *
362  * Returns true if exception is raised, false otherwise.
363  */
364 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
365 {
366         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
367                         MASK_RSP_EXCEPTION_EVENT ? true : false;
368 }
369
370 /**
371  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
372  * @hba: per adapter instance
373  */
374 static inline void
375 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
376 {
377         ufshcd_writel(hba, INT_AGGR_ENABLE |
378                       INT_AGGR_COUNTER_AND_TIMER_RESET,
379                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
380 }
381
382 /**
383  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
384  * @hba: per adapter instance
385  * @cnt: Interrupt aggregation counter threshold
386  * @tmout: Interrupt aggregation timeout value
387  */
388 static inline void
389 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
390 {
391         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
392                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
393                       INT_AGGR_TIMEOUT_VAL(tmout),
394                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
395 }
396
397 /**
398  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
399  *                      When run-stop registers are set to 1, it indicates the
400  *                      host controller that it can process the requests
401  * @hba: per adapter instance
402  */
403 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
404 {
405         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
406                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
407         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
408                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
409 }
410
411 /**
412  * ufshcd_hba_start - Start controller initialization sequence
413  * @hba: per adapter instance
414  */
415 static inline void ufshcd_hba_start(struct ufs_hba *hba)
416 {
417         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
418 }
419
420 /**
421  * ufshcd_is_hba_active - Get controller state
422  * @hba: per adapter instance
423  *
424  * Returns zero if controller is active, 1 otherwise
425  */
426 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
427 {
428         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
429 }
430
431 /**
432  * ufshcd_send_command - Send SCSI or device management commands
433  * @hba: per adapter instance
434  * @task_tag: Task tag of the command
435  */
436 static inline
437 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
438 {
439         __set_bit(task_tag, &hba->outstanding_reqs);
440         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
441 }
442
443 /**
444  * ufshcd_copy_sense_data - Copy sense data in case of check condition
445  * @lrb - pointer to local reference block
446  */
447 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
448 {
449         int len;
450         if (lrbp->sense_buffer &&
451             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
452                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
453                 memcpy(lrbp->sense_buffer,
454                         lrbp->ucd_rsp_ptr->sr.sense_data,
455                         min_t(int, len, SCSI_SENSE_BUFFERSIZE));
456         }
457 }
458
459 /**
460  * ufshcd_copy_query_response() - Copy the Query Response and the data
461  * descriptor
462  * @hba: per adapter instance
463  * @lrb - pointer to local reference block
464  */
465 static
466 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
467 {
468         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
469
470         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
471
472         /* Get the descriptor */
473         if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
474                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
475                                 GENERAL_UPIU_REQUEST_SIZE;
476                 u16 resp_len;
477                 u16 buf_len;
478
479                 /* data segment length */
480                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
481                                                 MASK_QUERY_DATA_SEG_LEN;
482                 buf_len = be16_to_cpu(
483                                 hba->dev_cmd.query.request.upiu_req.length);
484                 if (likely(buf_len >= resp_len)) {
485                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
486                 } else {
487                         dev_warn(hba->dev,
488                                 "%s: Response size is bigger than buffer",
489                                 __func__);
490                         return -EINVAL;
491                 }
492         }
493
494         return 0;
495 }
496
497 /**
498  * ufshcd_hba_capabilities - Read controller capabilities
499  * @hba: per adapter instance
500  */
501 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
502 {
503         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
504
505         /* nutrs and nutmrs are 0 based values */
506         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
507         hba->nutmrs =
508         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
509 }
510
511 /**
512  * ufshcd_ready_for_uic_cmd - Check if controller is ready
513  *                            to accept UIC commands
514  * @hba: per adapter instance
515  * Return true on success, else false
516  */
517 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
518 {
519         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
520                 return true;
521         else
522                 return false;
523 }
524
525 /**
526  * ufshcd_get_upmcrs - Get the power mode change request status
527  * @hba: Pointer to adapter instance
528  *
529  * This function gets the UPMCRS field of HCS register
530  * Returns value of UPMCRS field
531  */
532 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
533 {
534         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
535 }
536
537 /**
538  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
539  * @hba: per adapter instance
540  * @uic_cmd: UIC command
541  *
542  * Mutex must be held.
543  */
544 static inline void
545 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
546 {
547         WARN_ON(hba->active_uic_cmd);
548
549         hba->active_uic_cmd = uic_cmd;
550
551         /* Write Args */
552         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
553         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
554         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
555
556         /* Write UIC Cmd */
557         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
558                       REG_UIC_COMMAND);
559 }
560
561 /**
562  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
563  * @hba: per adapter instance
564  * @uic_command: UIC command
565  *
566  * Must be called with mutex held.
567  * Returns 0 only if success.
568  */
569 static int
570 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
571 {
572         int ret;
573         unsigned long flags;
574
575         if (wait_for_completion_timeout(&uic_cmd->done,
576                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
577                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
578         else
579                 ret = -ETIMEDOUT;
580
581         spin_lock_irqsave(hba->host->host_lock, flags);
582         hba->active_uic_cmd = NULL;
583         spin_unlock_irqrestore(hba->host->host_lock, flags);
584
585         return ret;
586 }
587
588 /**
589  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
590  * @hba: per adapter instance
591  * @uic_cmd: UIC command
592  *
593  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
594  * with mutex held.
595  * Returns 0 only if success.
596  */
597 static int
598 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
599 {
600         int ret;
601         unsigned long flags;
602
603         if (!ufshcd_ready_for_uic_cmd(hba)) {
604                 dev_err(hba->dev,
605                         "Controller not ready to accept UIC commands\n");
606                 return -EIO;
607         }
608
609         init_completion(&uic_cmd->done);
610
611         spin_lock_irqsave(hba->host->host_lock, flags);
612         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
613         spin_unlock_irqrestore(hba->host->host_lock, flags);
614
615         ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
616
617         return ret;
618 }
619
620 /**
621  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
622  * @hba: per adapter instance
623  * @uic_cmd: UIC command
624  *
625  * Returns 0 only if success.
626  */
627 static int
628 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
629 {
630         int ret;
631
632         mutex_lock(&hba->uic_cmd_mutex);
633         ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
634         mutex_unlock(&hba->uic_cmd_mutex);
635
636         return ret;
637 }
638
639 /**
640  * ufshcd_map_sg - Map scatter-gather list to prdt
641  * @lrbp - pointer to local reference block
642  *
643  * Returns 0 in case of success, non-zero value in case of failure
644  */
645 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
646 {
647         struct ufshcd_sg_entry *prd_table;
648         struct scatterlist *sg;
649         struct scsi_cmnd *cmd;
650         int sg_segments;
651         int i;
652
653         cmd = lrbp->cmd;
654         sg_segments = scsi_dma_map(cmd);
655         if (sg_segments < 0)
656                 return sg_segments;
657
658         if (sg_segments) {
659                 lrbp->utr_descriptor_ptr->prd_table_length =
660                                         cpu_to_le16((u16) (sg_segments));
661
662                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
663
664                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
665                         prd_table[i].size  =
666                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
667                         prd_table[i].base_addr =
668                                 cpu_to_le32(lower_32_bits(sg->dma_address));
669                         prd_table[i].upper_addr =
670                                 cpu_to_le32(upper_32_bits(sg->dma_address));
671                 }
672         } else {
673                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
674         }
675
676         return 0;
677 }
678
679 /**
680  * ufshcd_enable_intr - enable interrupts
681  * @hba: per adapter instance
682  * @intrs: interrupt bits
683  */
684 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
685 {
686         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
687
688         if (hba->ufs_version == UFSHCI_VERSION_10) {
689                 u32 rw;
690                 rw = set & INTERRUPT_MASK_RW_VER_10;
691                 set = rw | ((set ^ intrs) & intrs);
692         } else {
693                 set |= intrs;
694         }
695
696         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
697 }
698
699 /**
700  * ufshcd_disable_intr - disable interrupts
701  * @hba: per adapter instance
702  * @intrs: interrupt bits
703  */
704 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
705 {
706         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
707
708         if (hba->ufs_version == UFSHCI_VERSION_10) {
709                 u32 rw;
710                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
711                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
712                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
713
714         } else {
715                 set &= ~intrs;
716         }
717
718         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
719 }
720
721 /**
722  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
723  * descriptor according to request
724  * @lrbp: pointer to local reference block
725  * @upiu_flags: flags required in the header
726  * @cmd_dir: requests data direction
727  */
728 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
729                 u32 *upiu_flags, enum dma_data_direction cmd_dir)
730 {
731         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
732         u32 data_direction;
733         u32 dword_0;
734
735         if (cmd_dir == DMA_FROM_DEVICE) {
736                 data_direction = UTP_DEVICE_TO_HOST;
737                 *upiu_flags = UPIU_CMD_FLAGS_READ;
738         } else if (cmd_dir == DMA_TO_DEVICE) {
739                 data_direction = UTP_HOST_TO_DEVICE;
740                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
741         } else {
742                 data_direction = UTP_NO_DATA_TRANSFER;
743                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
744         }
745
746         dword_0 = data_direction | (lrbp->command_type
747                                 << UPIU_COMMAND_TYPE_OFFSET);
748         if (lrbp->intr_cmd)
749                 dword_0 |= UTP_REQ_DESC_INT_CMD;
750
751         /* Transfer request descriptor header fields */
752         req_desc->header.dword_0 = cpu_to_le32(dword_0);
753
754         /*
755          * assigning invalid value for command status. Controller
756          * updates OCS on command completion, with the command
757          * status
758          */
759         req_desc->header.dword_2 =
760                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
761 }
762
763 /**
764  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
765  * for scsi commands
766  * @lrbp - local reference block pointer
767  * @upiu_flags - flags
768  */
769 static
770 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
771 {
772         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
773
774         /* command descriptor fields */
775         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
776                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
777                                 lrbp->lun, lrbp->task_tag);
778         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
779                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
780
781         /* Total EHS length and Data segment length will be zero */
782         ucd_req_ptr->header.dword_2 = 0;
783
784         ucd_req_ptr->sc.exp_data_transfer_len =
785                 cpu_to_be32(lrbp->cmd->sdb.length);
786
787         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
788                 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
789 }
790
791 /**
792  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
793  * for query requsts
794  * @hba: UFS hba
795  * @lrbp: local reference block pointer
796  * @upiu_flags: flags
797  */
798 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
799                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
800 {
801         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
802         struct ufs_query *query = &hba->dev_cmd.query;
803         u16 len = be16_to_cpu(query->request.upiu_req.length);
804         u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
805
806         /* Query request header */
807         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
808                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
809                         lrbp->lun, lrbp->task_tag);
810         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
811                         0, query->request.query_func, 0, 0);
812
813         /* Data segment length */
814         ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
815                         0, 0, len >> 8, (u8)len);
816
817         /* Copy the Query Request buffer as is */
818         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
819                         QUERY_OSF_SIZE);
820
821         /* Copy the Descriptor */
822         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
823                 memcpy(descp, query->descriptor, len);
824
825 }
826
827 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
828 {
829         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
830
831         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
832
833         /* command descriptor fields */
834         ucd_req_ptr->header.dword_0 =
835                 UPIU_HEADER_DWORD(
836                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
837 }
838
839 /**
840  * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
841  * @hba - per adapter instance
842  * @lrb - pointer to local reference block
843  */
844 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
845 {
846         u32 upiu_flags;
847         int ret = 0;
848
849         switch (lrbp->command_type) {
850         case UTP_CMD_TYPE_SCSI:
851                 if (likely(lrbp->cmd)) {
852                         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
853                                         lrbp->cmd->sc_data_direction);
854                         ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
855                 } else {
856                         ret = -EINVAL;
857                 }
858                 break;
859         case UTP_CMD_TYPE_DEV_MANAGE:
860                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
861                 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
862                         ufshcd_prepare_utp_query_req_upiu(
863                                         hba, lrbp, upiu_flags);
864                 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
865                         ufshcd_prepare_utp_nop_upiu(lrbp);
866                 else
867                         ret = -EINVAL;
868                 break;
869         case UTP_CMD_TYPE_UFS:
870                 /* For UFS native command implementation */
871                 ret = -ENOTSUPP;
872                 dev_err(hba->dev, "%s: UFS native command are not supported\n",
873                         __func__);
874                 break;
875         default:
876                 ret = -ENOTSUPP;
877                 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
878                                 __func__, lrbp->command_type);
879                 break;
880         } /* end of switch */
881
882         return ret;
883 }
884
885 /**
886  * ufshcd_queuecommand - main entry point for SCSI requests
887  * @cmd: command from SCSI Midlayer
888  * @done: call back function
889  *
890  * Returns 0 for success, non-zero in case of failure
891  */
892 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
893 {
894         struct ufshcd_lrb *lrbp;
895         struct ufs_hba *hba;
896         unsigned long flags;
897         int tag;
898         int err = 0;
899
900         hba = shost_priv(host);
901
902         tag = cmd->request->tag;
903
904         spin_lock_irqsave(hba->host->host_lock, flags);
905         switch (hba->ufshcd_state) {
906         case UFSHCD_STATE_OPERATIONAL:
907                 break;
908         case UFSHCD_STATE_RESET:
909                 err = SCSI_MLQUEUE_HOST_BUSY;
910                 goto out_unlock;
911         case UFSHCD_STATE_ERROR:
912                 set_host_byte(cmd, DID_ERROR);
913                 cmd->scsi_done(cmd);
914                 goto out_unlock;
915         default:
916                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
917                                 __func__, hba->ufshcd_state);
918                 set_host_byte(cmd, DID_BAD_TARGET);
919                 cmd->scsi_done(cmd);
920                 goto out_unlock;
921         }
922         spin_unlock_irqrestore(hba->host->host_lock, flags);
923
924         /* acquire the tag to make sure device cmds don't use it */
925         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
926                 /*
927                  * Dev manage command in progress, requeue the command.
928                  * Requeuing the command helps in cases where the request *may*
929                  * find different tag instead of waiting for dev manage command
930                  * completion.
931                  */
932                 err = SCSI_MLQUEUE_HOST_BUSY;
933                 goto out;
934         }
935
936         lrbp = &hba->lrb[tag];
937
938         WARN_ON(lrbp->cmd);
939         lrbp->cmd = cmd;
940         lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
941         lrbp->sense_buffer = cmd->sense_buffer;
942         lrbp->task_tag = tag;
943         lrbp->lun = cmd->device->lun;
944         lrbp->intr_cmd = false;
945         lrbp->command_type = UTP_CMD_TYPE_SCSI;
946
947         /* form UPIU before issuing the command */
948         ufshcd_compose_upiu(hba, lrbp);
949         err = ufshcd_map_sg(lrbp);
950         if (err) {
951                 lrbp->cmd = NULL;
952                 clear_bit_unlock(tag, &hba->lrb_in_use);
953                 goto out;
954         }
955
956         /* issue command to the controller */
957         spin_lock_irqsave(hba->host->host_lock, flags);
958         ufshcd_send_command(hba, tag);
959 out_unlock:
960         spin_unlock_irqrestore(hba->host->host_lock, flags);
961 out:
962         return err;
963 }
964
965 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
966                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
967 {
968         lrbp->cmd = NULL;
969         lrbp->sense_bufflen = 0;
970         lrbp->sense_buffer = NULL;
971         lrbp->task_tag = tag;
972         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
973         lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
974         lrbp->intr_cmd = true; /* No interrupt aggregation */
975         hba->dev_cmd.type = cmd_type;
976
977         return ufshcd_compose_upiu(hba, lrbp);
978 }
979
980 static int
981 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
982 {
983         int err = 0;
984         unsigned long flags;
985         u32 mask = 1 << tag;
986
987         /* clear outstanding transaction before retry */
988         spin_lock_irqsave(hba->host->host_lock, flags);
989         ufshcd_utrl_clear(hba, tag);
990         spin_unlock_irqrestore(hba->host->host_lock, flags);
991
992         /*
993          * wait for for h/w to clear corresponding bit in door-bell.
994          * max. wait is 1 sec.
995          */
996         err = ufshcd_wait_for_register(hba,
997                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
998                         mask, ~mask, 1000, 1000);
999
1000         return err;
1001 }
1002
1003 static int
1004 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1005 {
1006         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1007
1008         /* Get the UPIU response */
1009         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1010                                 UPIU_RSP_CODE_OFFSET;
1011         return query_res->response;
1012 }
1013
1014 /**
1015  * ufshcd_dev_cmd_completion() - handles device management command responses
1016  * @hba: per adapter instance
1017  * @lrbp: pointer to local reference block
1018  */
1019 static int
1020 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1021 {
1022         int resp;
1023         int err = 0;
1024
1025         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1026
1027         switch (resp) {
1028         case UPIU_TRANSACTION_NOP_IN:
1029                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1030                         err = -EINVAL;
1031                         dev_err(hba->dev, "%s: unexpected response %x\n",
1032                                         __func__, resp);
1033                 }
1034                 break;
1035         case UPIU_TRANSACTION_QUERY_RSP:
1036                 err = ufshcd_check_query_response(hba, lrbp);
1037                 if (!err)
1038                         err = ufshcd_copy_query_response(hba, lrbp);
1039                 break;
1040         case UPIU_TRANSACTION_REJECT_UPIU:
1041                 /* TODO: handle Reject UPIU Response */
1042                 err = -EPERM;
1043                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1044                                 __func__);
1045                 break;
1046         default:
1047                 err = -EINVAL;
1048                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1049                                 __func__, resp);
1050                 break;
1051         }
1052
1053         return err;
1054 }
1055
1056 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1057                 struct ufshcd_lrb *lrbp, int max_timeout)
1058 {
1059         int err = 0;
1060         unsigned long time_left;
1061         unsigned long flags;
1062
1063         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1064                         msecs_to_jiffies(max_timeout));
1065
1066         spin_lock_irqsave(hba->host->host_lock, flags);
1067         hba->dev_cmd.complete = NULL;
1068         if (likely(time_left)) {
1069                 err = ufshcd_get_tr_ocs(lrbp);
1070                 if (!err)
1071                         err = ufshcd_dev_cmd_completion(hba, lrbp);
1072         }
1073         spin_unlock_irqrestore(hba->host->host_lock, flags);
1074
1075         if (!time_left) {
1076                 err = -ETIMEDOUT;
1077                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1078                         /* sucessfully cleared the command, retry if needed */
1079                         err = -EAGAIN;
1080         }
1081
1082         return err;
1083 }
1084
1085 /**
1086  * ufshcd_get_dev_cmd_tag - Get device management command tag
1087  * @hba: per-adapter instance
1088  * @tag: pointer to variable with available slot value
1089  *
1090  * Get a free slot and lock it until device management command
1091  * completes.
1092  *
1093  * Returns false if free slot is unavailable for locking, else
1094  * return true with tag value in @tag.
1095  */
1096 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1097 {
1098         int tag;
1099         bool ret = false;
1100         unsigned long tmp;
1101
1102         if (!tag_out)
1103                 goto out;
1104
1105         do {
1106                 tmp = ~hba->lrb_in_use;
1107                 tag = find_last_bit(&tmp, hba->nutrs);
1108                 if (tag >= hba->nutrs)
1109                         goto out;
1110         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1111
1112         *tag_out = tag;
1113         ret = true;
1114 out:
1115         return ret;
1116 }
1117
1118 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1119 {
1120         clear_bit_unlock(tag, &hba->lrb_in_use);
1121 }
1122
1123 /**
1124  * ufshcd_exec_dev_cmd - API for sending device management requests
1125  * @hba - UFS hba
1126  * @cmd_type - specifies the type (NOP, Query...)
1127  * @timeout - time in seconds
1128  *
1129  * NOTE: Since there is only one available tag for device management commands,
1130  * it is expected you hold the hba->dev_cmd.lock mutex.
1131  */
1132 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1133                 enum dev_cmd_type cmd_type, int timeout)
1134 {
1135         struct ufshcd_lrb *lrbp;
1136         int err;
1137         int tag;
1138         struct completion wait;
1139         unsigned long flags;
1140
1141         /*
1142          * Get free slot, sleep if slots are unavailable.
1143          * Even though we use wait_event() which sleeps indefinitely,
1144          * the maximum wait time is bounded by SCSI request timeout.
1145          */
1146         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1147
1148         init_completion(&wait);
1149         lrbp = &hba->lrb[tag];
1150         WARN_ON(lrbp->cmd);
1151         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1152         if (unlikely(err))
1153                 goto out_put_tag;
1154
1155         hba->dev_cmd.complete = &wait;
1156
1157         spin_lock_irqsave(hba->host->host_lock, flags);
1158         ufshcd_send_command(hba, tag);
1159         spin_unlock_irqrestore(hba->host->host_lock, flags);
1160
1161         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1162
1163 out_put_tag:
1164         ufshcd_put_dev_cmd_tag(hba, tag);
1165         wake_up(&hba->dev_cmd.tag_wq);
1166         return err;
1167 }
1168
1169 /**
1170  * ufshcd_init_query() - init the query response and request parameters
1171  * @hba: per-adapter instance
1172  * @request: address of the request pointer to be initialized
1173  * @response: address of the response pointer to be initialized
1174  * @opcode: operation to perform
1175  * @idn: flag idn to access
1176  * @index: LU number to access
1177  * @selector: query/flag/descriptor further identification
1178  */
1179 static inline void ufshcd_init_query(struct ufs_hba *hba,
1180                 struct ufs_query_req **request, struct ufs_query_res **response,
1181                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1182 {
1183         *request = &hba->dev_cmd.query.request;
1184         *response = &hba->dev_cmd.query.response;
1185         memset(*request, 0, sizeof(struct ufs_query_req));
1186         memset(*response, 0, sizeof(struct ufs_query_res));
1187         (*request)->upiu_req.opcode = opcode;
1188         (*request)->upiu_req.idn = idn;
1189         (*request)->upiu_req.index = index;
1190         (*request)->upiu_req.selector = selector;
1191 }
1192
1193 /**
1194  * ufshcd_query_flag() - API function for sending flag query requests
1195  * hba: per-adapter instance
1196  * query_opcode: flag query to perform
1197  * idn: flag idn to access
1198  * flag_res: the flag value after the query request completes
1199  *
1200  * Returns 0 for success, non-zero in case of failure
1201  */
1202 static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1203                         enum flag_idn idn, bool *flag_res)
1204 {
1205         struct ufs_query_req *request = NULL;
1206         struct ufs_query_res *response = NULL;
1207         int err, index = 0, selector = 0;
1208
1209         BUG_ON(!hba);
1210
1211         mutex_lock(&hba->dev_cmd.lock);
1212         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1213                         selector);
1214
1215         switch (opcode) {
1216         case UPIU_QUERY_OPCODE_SET_FLAG:
1217         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1218         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1219                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1220                 break;
1221         case UPIU_QUERY_OPCODE_READ_FLAG:
1222                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1223                 if (!flag_res) {
1224                         /* No dummy reads */
1225                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
1226                                         __func__);
1227                         err = -EINVAL;
1228                         goto out_unlock;
1229                 }
1230                 break;
1231         default:
1232                 dev_err(hba->dev,
1233                         "%s: Expected query flag opcode but got = %d\n",
1234                         __func__, opcode);
1235                 err = -EINVAL;
1236                 goto out_unlock;
1237         }
1238
1239         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1240
1241         if (err) {
1242                 dev_err(hba->dev,
1243                         "%s: Sending flag query for idn %d failed, err = %d\n",
1244                         __func__, idn, err);
1245                 goto out_unlock;
1246         }
1247
1248         if (flag_res)
1249                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1250                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1251
1252 out_unlock:
1253         mutex_unlock(&hba->dev_cmd.lock);
1254         return err;
1255 }
1256
1257 /**
1258  * ufshcd_query_attr - API function for sending attribute requests
1259  * hba: per-adapter instance
1260  * opcode: attribute opcode
1261  * idn: attribute idn to access
1262  * index: index field
1263  * selector: selector field
1264  * attr_val: the attribute value after the query request completes
1265  *
1266  * Returns 0 for success, non-zero in case of failure
1267 */
1268 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1269                         enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1270 {
1271         struct ufs_query_req *request = NULL;
1272         struct ufs_query_res *response = NULL;
1273         int err;
1274
1275         BUG_ON(!hba);
1276
1277         if (!attr_val) {
1278                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1279                                 __func__, opcode);
1280                 err = -EINVAL;
1281                 goto out;
1282         }
1283
1284         mutex_lock(&hba->dev_cmd.lock);
1285         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1286                         selector);
1287
1288         switch (opcode) {
1289         case UPIU_QUERY_OPCODE_WRITE_ATTR:
1290                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1291                 request->upiu_req.value = cpu_to_be32(*attr_val);
1292                 break;
1293         case UPIU_QUERY_OPCODE_READ_ATTR:
1294                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1295                 break;
1296         default:
1297                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1298                                 __func__, opcode);
1299                 err = -EINVAL;
1300                 goto out_unlock;
1301         }
1302
1303         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1304
1305         if (err) {
1306                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1307                                 __func__, opcode, idn, err);
1308                 goto out_unlock;
1309         }
1310
1311         *attr_val = be32_to_cpu(response->upiu_res.value);
1312
1313 out_unlock:
1314         mutex_unlock(&hba->dev_cmd.lock);
1315 out:
1316         return err;
1317 }
1318
1319 /**
1320  * ufshcd_query_descriptor - API function for sending descriptor requests
1321  * hba: per-adapter instance
1322  * opcode: attribute opcode
1323  * idn: attribute idn to access
1324  * index: index field
1325  * selector: selector field
1326  * desc_buf: the buffer that contains the descriptor
1327  * buf_len: length parameter passed to the device
1328  *
1329  * Returns 0 for success, non-zero in case of failure.
1330  * The buf_len parameter will contain, on return, the length parameter
1331  * received on the response.
1332  */
1333 static int ufshcd_query_descriptor(struct ufs_hba *hba,
1334                         enum query_opcode opcode, enum desc_idn idn, u8 index,
1335                         u8 selector, u8 *desc_buf, int *buf_len)
1336 {
1337         struct ufs_query_req *request = NULL;
1338         struct ufs_query_res *response = NULL;
1339         int err;
1340
1341         BUG_ON(!hba);
1342
1343         if (!desc_buf) {
1344                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1345                                 __func__, opcode);
1346                 err = -EINVAL;
1347                 goto out;
1348         }
1349
1350         if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1351                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1352                                 __func__, *buf_len);
1353                 err = -EINVAL;
1354                 goto out;
1355         }
1356
1357         mutex_lock(&hba->dev_cmd.lock);
1358         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1359                         selector);
1360         hba->dev_cmd.query.descriptor = desc_buf;
1361         request->upiu_req.length = cpu_to_be16(*buf_len);
1362
1363         switch (opcode) {
1364         case UPIU_QUERY_OPCODE_WRITE_DESC:
1365                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1366                 break;
1367         case UPIU_QUERY_OPCODE_READ_DESC:
1368                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1369                 break;
1370         default:
1371                 dev_err(hba->dev,
1372                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1373                                 __func__, opcode);
1374                 err = -EINVAL;
1375                 goto out_unlock;
1376         }
1377
1378         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1379
1380         if (err) {
1381                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1382                                 __func__, opcode, idn, err);
1383                 goto out_unlock;
1384         }
1385
1386         hba->dev_cmd.query.descriptor = NULL;
1387         *buf_len = be16_to_cpu(response->upiu_res.length);
1388
1389 out_unlock:
1390         mutex_unlock(&hba->dev_cmd.lock);
1391 out:
1392         return err;
1393 }
1394
1395 /**
1396  * ufshcd_memory_alloc - allocate memory for host memory space data structures
1397  * @hba: per adapter instance
1398  *
1399  * 1. Allocate DMA memory for Command Descriptor array
1400  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1401  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1402  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1403  *      (UTMRDL)
1404  * 4. Allocate memory for local reference block(lrb).
1405  *
1406  * Returns 0 for success, non-zero in case of failure
1407  */
1408 static int ufshcd_memory_alloc(struct ufs_hba *hba)
1409 {
1410         size_t utmrdl_size, utrdl_size, ucdl_size;
1411
1412         /* Allocate memory for UTP command descriptors */
1413         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
1414         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1415                                                   ucdl_size,
1416                                                   &hba->ucdl_dma_addr,
1417                                                   GFP_KERNEL);
1418
1419         /*
1420          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1421          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1422          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1423          * be aligned to 128 bytes as well
1424          */
1425         if (!hba->ucdl_base_addr ||
1426             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
1427                 dev_err(hba->dev,
1428                         "Command Descriptor Memory allocation failed\n");
1429                 goto out;
1430         }
1431
1432         /*
1433          * Allocate memory for UTP Transfer descriptors
1434          * UFSHCI requires 1024 byte alignment of UTRD
1435          */
1436         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
1437         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1438                                                    utrdl_size,
1439                                                    &hba->utrdl_dma_addr,
1440                                                    GFP_KERNEL);
1441         if (!hba->utrdl_base_addr ||
1442             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
1443                 dev_err(hba->dev,
1444                         "Transfer Descriptor Memory allocation failed\n");
1445                 goto out;
1446         }
1447
1448         /*
1449          * Allocate memory for UTP Task Management descriptors
1450          * UFSHCI requires 1024 byte alignment of UTMRD
1451          */
1452         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
1453         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1454                                                     utmrdl_size,
1455                                                     &hba->utmrdl_dma_addr,
1456                                                     GFP_KERNEL);
1457         if (!hba->utmrdl_base_addr ||
1458             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
1459                 dev_err(hba->dev,
1460                 "Task Management Descriptor Memory allocation failed\n");
1461                 goto out;
1462         }
1463
1464         /* Allocate memory for local reference block */
1465         hba->lrb = devm_kzalloc(hba->dev,
1466                                 hba->nutrs * sizeof(struct ufshcd_lrb),
1467                                 GFP_KERNEL);
1468         if (!hba->lrb) {
1469                 dev_err(hba->dev, "LRB Memory allocation failed\n");
1470                 goto out;
1471         }
1472         return 0;
1473 out:
1474         return -ENOMEM;
1475 }
1476
1477 /**
1478  * ufshcd_host_memory_configure - configure local reference block with
1479  *                              memory offsets
1480  * @hba: per adapter instance
1481  *
1482  * Configure Host memory space
1483  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1484  * address.
1485  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1486  * and PRDT offset.
1487  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1488  * into local reference block.
1489  */
1490 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1491 {
1492         struct utp_transfer_cmd_desc *cmd_descp;
1493         struct utp_transfer_req_desc *utrdlp;
1494         dma_addr_t cmd_desc_dma_addr;
1495         dma_addr_t cmd_desc_element_addr;
1496         u16 response_offset;
1497         u16 prdt_offset;
1498         int cmd_desc_size;
1499         int i;
1500
1501         utrdlp = hba->utrdl_base_addr;
1502         cmd_descp = hba->ucdl_base_addr;
1503
1504         response_offset =
1505                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
1506         prdt_offset =
1507                 offsetof(struct utp_transfer_cmd_desc, prd_table);
1508
1509         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
1510         cmd_desc_dma_addr = hba->ucdl_dma_addr;
1511
1512         for (i = 0; i < hba->nutrs; i++) {
1513                 /* Configure UTRD with command descriptor base address */
1514                 cmd_desc_element_addr =
1515                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
1516                 utrdlp[i].command_desc_base_addr_lo =
1517                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
1518                 utrdlp[i].command_desc_base_addr_hi =
1519                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
1520
1521                 /* Response upiu and prdt offset should be in double words */
1522                 utrdlp[i].response_upiu_offset =
1523                                 cpu_to_le16((response_offset >> 2));
1524                 utrdlp[i].prd_table_offset =
1525                                 cpu_to_le16((prdt_offset >> 2));
1526                 utrdlp[i].response_upiu_length =
1527                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
1528
1529                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
1530                 hba->lrb[i].ucd_req_ptr =
1531                         (struct utp_upiu_req *)(cmd_descp + i);
1532                 hba->lrb[i].ucd_rsp_ptr =
1533                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
1534                 hba->lrb[i].ucd_prdt_ptr =
1535                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
1536         }
1537 }
1538
1539 /**
1540  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
1541  * @hba: per adapter instance
1542  *
1543  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
1544  * in order to initialize the Unipro link startup procedure.
1545  * Once the Unipro links are up, the device connected to the controller
1546  * is detected.
1547  *
1548  * Returns 0 on success, non-zero value on failure
1549  */
1550 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1551 {
1552         struct uic_command uic_cmd = {0};
1553         int ret;
1554
1555         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
1556
1557         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1558         if (ret)
1559                 dev_err(hba->dev,
1560                         "dme-link-startup: error code %d\n", ret);
1561         return ret;
1562 }
1563
1564 /**
1565  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
1566  * @hba: per adapter instance
1567  * @attr_sel: uic command argument1
1568  * @attr_set: attribute set type as uic command argument2
1569  * @mib_val: setting value as uic command argument3
1570  * @peer: indicate whether peer or local
1571  *
1572  * Returns 0 on success, non-zero value on failure
1573  */
1574 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1575                         u8 attr_set, u32 mib_val, u8 peer)
1576 {
1577         struct uic_command uic_cmd = {0};
1578         static const char *const action[] = {
1579                 "dme-set",
1580                 "dme-peer-set"
1581         };
1582         const char *set = action[!!peer];
1583         int ret;
1584
1585         uic_cmd.command = peer ?
1586                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
1587         uic_cmd.argument1 = attr_sel;
1588         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
1589         uic_cmd.argument3 = mib_val;
1590
1591         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1592         if (ret)
1593                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
1594                         set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
1595
1596         return ret;
1597 }
1598 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
1599
1600 /**
1601  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
1602  * @hba: per adapter instance
1603  * @attr_sel: uic command argument1
1604  * @mib_val: the value of the attribute as returned by the UIC command
1605  * @peer: indicate whether peer or local
1606  *
1607  * Returns 0 on success, non-zero value on failure
1608  */
1609 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1610                         u32 *mib_val, u8 peer)
1611 {
1612         struct uic_command uic_cmd = {0};
1613         static const char *const action[] = {
1614                 "dme-get",
1615                 "dme-peer-get"
1616         };
1617         const char *get = action[!!peer];
1618         int ret;
1619
1620         uic_cmd.command = peer ?
1621                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
1622         uic_cmd.argument1 = attr_sel;
1623
1624         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1625         if (ret) {
1626                 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
1627                         get, UIC_GET_ATTR_ID(attr_sel), ret);
1628                 goto out;
1629         }
1630
1631         if (mib_val)
1632                 *mib_val = uic_cmd.argument3;
1633 out:
1634         return ret;
1635 }
1636 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1637
1638 /**
1639  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
1640  *                              using DME_SET primitives.
1641  * @hba: per adapter instance
1642  * @mode: powr mode value
1643  *
1644  * Returns 0 on success, non-zero value on failure
1645  */
1646 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1647 {
1648         struct uic_command uic_cmd = {0};
1649         struct completion pwr_done;
1650         unsigned long flags;
1651         u8 status;
1652         int ret;
1653
1654         uic_cmd.command = UIC_CMD_DME_SET;
1655         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1656         uic_cmd.argument3 = mode;
1657         init_completion(&pwr_done);
1658
1659         mutex_lock(&hba->uic_cmd_mutex);
1660
1661         spin_lock_irqsave(hba->host->host_lock, flags);
1662         hba->pwr_done = &pwr_done;
1663         spin_unlock_irqrestore(hba->host->host_lock, flags);
1664         ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
1665         if (ret) {
1666                 dev_err(hba->dev,
1667                         "pwr mode change with mode 0x%x uic error %d\n",
1668                         mode, ret);
1669                 goto out;
1670         }
1671
1672         if (!wait_for_completion_timeout(hba->pwr_done,
1673                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
1674                 dev_err(hba->dev,
1675                         "pwr mode change with mode 0x%x completion timeout\n",
1676                         mode);
1677                 ret = -ETIMEDOUT;
1678                 goto out;
1679         }
1680
1681         status = ufshcd_get_upmcrs(hba);
1682         if (status != PWR_LOCAL) {
1683                 dev_err(hba->dev,
1684                         "pwr mode change failed, host umpcrs:0x%x\n",
1685                         status);
1686                 ret = (status != PWR_OK) ? status : -1;
1687         }
1688 out:
1689         spin_lock_irqsave(hba->host->host_lock, flags);
1690         hba->pwr_done = NULL;
1691         spin_unlock_irqrestore(hba->host->host_lock, flags);
1692         mutex_unlock(&hba->uic_cmd_mutex);
1693         return ret;
1694 }
1695
1696 /**
1697  * ufshcd_config_max_pwr_mode - Set & Change power mode with
1698  *      maximum capability attribute information.
1699  * @hba: per adapter instance
1700  *
1701  * Returns 0 on success, non-zero value on failure
1702  */
1703 static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
1704 {
1705         enum {RX = 0, TX = 1};
1706         u32 lanes[] = {1, 1};
1707         u32 gear[] = {1, 1};
1708         u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
1709         int ret;
1710
1711         /* Get the connected lane count */
1712         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
1713         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
1714
1715         /*
1716          * First, get the maximum gears of HS speed.
1717          * If a zero value, it means there is no HSGEAR capability.
1718          * Then, get the maximum gears of PWM speed.
1719          */
1720         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
1721         if (!gear[RX]) {
1722                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
1723                 pwr[RX] = SLOWAUTO_MODE;
1724         }
1725
1726         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
1727         if (!gear[TX]) {
1728                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1729                                     &gear[TX]);
1730                 pwr[TX] = SLOWAUTO_MODE;
1731         }
1732
1733         /*
1734          * Configure attributes for power mode change with below.
1735          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1736          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1737          * - PA_HSSERIES
1738          */
1739         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
1740         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
1741         if (pwr[RX] == FASTAUTO_MODE)
1742                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1743
1744         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
1745         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
1746         if (pwr[TX] == FASTAUTO_MODE)
1747                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1748
1749         if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
1750                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
1751
1752         ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
1753         if (ret)
1754                 dev_err(hba->dev,
1755                         "pwr_mode: power mode change failed %d\n", ret);
1756
1757         return ret;
1758 }
1759
1760 /**
1761  * ufshcd_complete_dev_init() - checks device readiness
1762  * hba: per-adapter instance
1763  *
1764  * Set fDeviceInit flag and poll until device toggles it.
1765  */
1766 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1767 {
1768         int i, retries, err = 0;
1769         bool flag_res = 1;
1770
1771         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1772                 /* Set the fDeviceInit flag */
1773                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1774                                         QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1775                 if (!err || err == -ETIMEDOUT)
1776                         break;
1777                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1778         }
1779         if (err) {
1780                 dev_err(hba->dev,
1781                         "%s setting fDeviceInit flag failed with error %d\n",
1782                         __func__, err);
1783                 goto out;
1784         }
1785
1786         /* poll for max. 100 iterations for fDeviceInit flag to clear */
1787         for (i = 0; i < 100 && !err && flag_res; i++) {
1788                 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1789                         err = ufshcd_query_flag(hba,
1790                                         UPIU_QUERY_OPCODE_READ_FLAG,
1791                                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
1792                         if (!err || err == -ETIMEDOUT)
1793                                 break;
1794                         dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
1795                                         err);
1796                 }
1797         }
1798         if (err)
1799                 dev_err(hba->dev,
1800                         "%s reading fDeviceInit flag failed with error %d\n",
1801                         __func__, err);
1802         else if (flag_res)
1803                 dev_err(hba->dev,
1804                         "%s fDeviceInit was not cleared by the device\n",
1805                         __func__);
1806
1807 out:
1808         return err;
1809 }
1810
1811 /**
1812  * ufshcd_make_hba_operational - Make UFS controller operational
1813  * @hba: per adapter instance
1814  *
1815  * To bring UFS host controller to operational state,
1816  * 1. Enable required interrupts
1817  * 2. Configure interrupt aggregation
1818  * 3. Program UTRL and UTMRL base addres
1819  * 4. Configure run-stop-registers
1820  *
1821  * Returns 0 on success, non-zero value on failure
1822  */
1823 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1824 {
1825         int err = 0;
1826         u32 reg;
1827
1828         /* Enable required interrupts */
1829         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1830
1831         /* Configure interrupt aggregation */
1832         ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
1833
1834         /* Configure UTRL and UTMRL base address registers */
1835         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
1836                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
1837         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
1838                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
1839         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
1840                         REG_UTP_TASK_REQ_LIST_BASE_L);
1841         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
1842                         REG_UTP_TASK_REQ_LIST_BASE_H);
1843
1844         /*
1845          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
1846          * DEI, HEI bits must be 0
1847          */
1848         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
1849         if (!(ufshcd_get_lists_status(reg))) {
1850                 ufshcd_enable_run_stop_reg(hba);
1851         } else {
1852                 dev_err(hba->dev,
1853                         "Host controller not ready to process requests");
1854                 err = -EIO;
1855                 goto out;
1856         }
1857
1858 out:
1859         return err;
1860 }
1861
1862 /**
1863  * ufshcd_hba_enable - initialize the controller
1864  * @hba: per adapter instance
1865  *
1866  * The controller resets itself and controller firmware initialization
1867  * sequence kicks off. When controller is ready it will set
1868  * the Host Controller Enable bit to 1.
1869  *
1870  * Returns 0 on success, non-zero value on failure
1871  */
1872 static int ufshcd_hba_enable(struct ufs_hba *hba)
1873 {
1874         int retry;
1875
1876         /*
1877          * msleep of 1 and 5 used in this function might result in msleep(20),
1878          * but it was necessary to send the UFS FPGA to reset mode during
1879          * development and testing of this driver. msleep can be changed to
1880          * mdelay and retry count can be reduced based on the controller.
1881          */
1882         if (!ufshcd_is_hba_active(hba)) {
1883
1884                 /* change controller state to "reset state" */
1885                 ufshcd_hba_stop(hba);
1886
1887                 /*
1888                  * This delay is based on the testing done with UFS host
1889                  * controller FPGA. The delay can be changed based on the
1890                  * host controller used.
1891                  */
1892                 msleep(5);
1893         }
1894
1895         if (hba->vops && hba->vops->hce_enable_notify)
1896                 hba->vops->hce_enable_notify(hba, PRE_CHANGE);
1897
1898         /* start controller initialization sequence */
1899         ufshcd_hba_start(hba);
1900
1901         /*
1902          * To initialize a UFS host controller HCE bit must be set to 1.
1903          * During initialization the HCE bit value changes from 1->0->1.
1904          * When the host controller completes initialization sequence
1905          * it sets the value of HCE bit to 1. The same HCE bit is read back
1906          * to check if the controller has completed initialization sequence.
1907          * So without this delay the value HCE = 1, set in the previous
1908          * instruction might be read back.
1909          * This delay can be changed based on the controller.
1910          */
1911         msleep(1);
1912
1913         /* wait for the host controller to complete initialization */
1914         retry = 10;
1915         while (ufshcd_is_hba_active(hba)) {
1916                 if (retry) {
1917                         retry--;
1918                 } else {
1919                         dev_err(hba->dev,
1920                                 "Controller enable failed\n");
1921                         return -EIO;
1922                 }
1923                 msleep(5);
1924         }
1925
1926         if (hba->vops && hba->vops->hce_enable_notify)
1927                 hba->vops->hce_enable_notify(hba, POST_CHANGE);
1928
1929         return 0;
1930 }
1931
1932 /**
1933  * ufshcd_link_startup - Initialize unipro link startup
1934  * @hba: per adapter instance
1935  *
1936  * Returns 0 for success, non-zero in case of failure
1937  */
1938 static int ufshcd_link_startup(struct ufs_hba *hba)
1939 {
1940         int ret;
1941
1942         /* enable UIC related interrupts */
1943         ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
1944
1945         if (hba->vops && hba->vops->link_startup_notify)
1946                 hba->vops->link_startup_notify(hba, PRE_CHANGE);
1947
1948         ret = ufshcd_dme_link_startup(hba);
1949         if (ret)
1950                 goto out;
1951
1952         /* check if device is detected by inter-connect layer */
1953         if (!ufshcd_is_device_present(hba)) {
1954                 dev_err(hba->dev, "%s: Device not present\n", __func__);
1955                 ret = -ENXIO;
1956                 goto out;
1957         }
1958
1959         /* Include any host controller configuration via UIC commands */
1960         if (hba->vops && hba->vops->link_startup_notify) {
1961                 ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
1962                 if (ret)
1963                         goto out;
1964         }
1965
1966         ret = ufshcd_make_hba_operational(hba);
1967 out:
1968         if (ret)
1969                 dev_err(hba->dev, "link startup failed %d\n", ret);
1970         return ret;
1971 }
1972
1973 /**
1974  * ufshcd_verify_dev_init() - Verify device initialization
1975  * @hba: per-adapter instance
1976  *
1977  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
1978  * device Transport Protocol (UTP) layer is ready after a reset.
1979  * If the UTP layer at the device side is not initialized, it may
1980  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
1981  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
1982  */
1983 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1984 {
1985         int err = 0;
1986         int retries;
1987
1988         mutex_lock(&hba->dev_cmd.lock);
1989         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1990                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1991                                                NOP_OUT_TIMEOUT);
1992
1993                 if (!err || err == -ETIMEDOUT)
1994                         break;
1995
1996                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1997         }
1998         mutex_unlock(&hba->dev_cmd.lock);
1999
2000         if (err)
2001                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
2002         return err;
2003 }
2004
2005 /**
2006  * ufshcd_slave_alloc - handle initial SCSI device configurations
2007  * @sdev: pointer to SCSI device
2008  *
2009  * Returns success
2010  */
2011 static int ufshcd_slave_alloc(struct scsi_device *sdev)
2012 {
2013         struct ufs_hba *hba;
2014         int lun_qdepth;
2015
2016         hba = shost_priv(sdev->host);
2017         sdev->tagged_supported = 1;
2018
2019         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
2020         sdev->use_10_for_ms = 1;
2021         scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2022
2023         /* allow SCSI layer to restart the device in case of errors */
2024         sdev->allow_restart = 1;
2025
2026         /* REPORT SUPPORTED OPERATION CODES is not supported */
2027         sdev->no_report_opcodes = 1;
2028
2029         lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev);
2030         if (lun_qdepth <= 0)
2031                 /* eventually, we can figure out the real queue depth */
2032                 lun_qdepth = hba->nutrs;
2033         else
2034                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
2035
2036         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2037                         __func__, lun_qdepth);
2038         scsi_activate_tcq(sdev, lun_qdepth);
2039
2040         return 0;
2041 }
2042
2043 /**
2044  * ufshcd_change_queue_depth - change queue depth
2045  * @sdev: pointer to SCSI device
2046  * @depth: required depth to set
2047  * @reason: reason for changing the depth
2048  *
2049  * Change queue depth according to the reason and make sure
2050  * the max. limits are not crossed.
2051  */
2052 static int ufshcd_change_queue_depth(struct scsi_device *sdev,
2053                 int depth, int reason)
2054 {
2055         struct ufs_hba *hba = shost_priv(sdev->host);
2056
2057         if (depth > hba->nutrs)
2058                 depth = hba->nutrs;
2059
2060         switch (reason) {
2061         case SCSI_QDEPTH_DEFAULT:
2062         case SCSI_QDEPTH_RAMP_UP:
2063                 if (!sdev->tagged_supported)
2064                         depth = 1;
2065                 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
2066                 break;
2067         case SCSI_QDEPTH_QFULL:
2068                 scsi_track_queue_full(sdev, depth);
2069                 break;
2070         default:
2071                 return -EOPNOTSUPP;
2072         }
2073
2074         return depth;
2075 }
2076
2077 /**
2078  * ufshcd_slave_configure - adjust SCSI device configurations
2079  * @sdev: pointer to SCSI device
2080  */
2081 static int ufshcd_slave_configure(struct scsi_device *sdev)
2082 {
2083         struct request_queue *q = sdev->request_queue;
2084
2085         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
2086         blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
2087
2088         return 0;
2089 }
2090
2091 /**
2092  * ufshcd_slave_destroy - remove SCSI device configurations
2093  * @sdev: pointer to SCSI device
2094  */
2095 static void ufshcd_slave_destroy(struct scsi_device *sdev)
2096 {
2097         struct ufs_hba *hba;
2098
2099         hba = shost_priv(sdev->host);
2100         scsi_deactivate_tcq(sdev, hba->nutrs);
2101 }
2102
2103 /**
2104  * ufshcd_task_req_compl - handle task management request completion
2105  * @hba: per adapter instance
2106  * @index: index of the completed request
2107  * @resp: task management service response
2108  *
2109  * Returns non-zero value on error, zero on success
2110  */
2111 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
2112 {
2113         struct utp_task_req_desc *task_req_descp;
2114         struct utp_upiu_task_rsp *task_rsp_upiup;
2115         unsigned long flags;
2116         int ocs_value;
2117         int task_result;
2118
2119         spin_lock_irqsave(hba->host->host_lock, flags);
2120
2121         /* Clear completed tasks from outstanding_tasks */
2122         __clear_bit(index, &hba->outstanding_tasks);
2123
2124         task_req_descp = hba->utmrdl_base_addr;
2125         ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
2126
2127         if (ocs_value == OCS_SUCCESS) {
2128                 task_rsp_upiup = (struct utp_upiu_task_rsp *)
2129                                 task_req_descp[index].task_rsp_upiu;
2130                 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
2131                 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
2132                 if (resp)
2133                         *resp = (u8)task_result;
2134         } else {
2135                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
2136                                 __func__, ocs_value);
2137         }
2138         spin_unlock_irqrestore(hba->host->host_lock, flags);
2139
2140         return ocs_value;
2141 }
2142
2143 /**
2144  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
2145  * @lrb: pointer to local reference block of completed command
2146  * @scsi_status: SCSI command status
2147  *
2148  * Returns value base on SCSI command status
2149  */
2150 static inline int
2151 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
2152 {
2153         int result = 0;
2154
2155         switch (scsi_status) {
2156         case SAM_STAT_CHECK_CONDITION:
2157                 ufshcd_copy_sense_data(lrbp);
2158         case SAM_STAT_GOOD:
2159                 result |= DID_OK << 16 |
2160                           COMMAND_COMPLETE << 8 |
2161                           scsi_status;
2162                 break;
2163         case SAM_STAT_TASK_SET_FULL:
2164         case SAM_STAT_BUSY:
2165         case SAM_STAT_TASK_ABORTED:
2166                 ufshcd_copy_sense_data(lrbp);
2167                 result |= scsi_status;
2168                 break;
2169         default:
2170                 result |= DID_ERROR << 16;
2171                 break;
2172         } /* end of switch */
2173
2174         return result;
2175 }
2176
2177 /**
2178  * ufshcd_transfer_rsp_status - Get overall status of the response
2179  * @hba: per adapter instance
2180  * @lrb: pointer to local reference block of completed command
2181  *
2182  * Returns result of the command to notify SCSI midlayer
2183  */
2184 static inline int
2185 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2186 {
2187         int result = 0;
2188         int scsi_status;
2189         int ocs;
2190
2191         /* overall command status of utrd */
2192         ocs = ufshcd_get_tr_ocs(lrbp);
2193
2194         switch (ocs) {
2195         case OCS_SUCCESS:
2196                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2197
2198                 switch (result) {
2199                 case UPIU_TRANSACTION_RESPONSE:
2200                         /*
2201                          * get the response UPIU result to extract
2202                          * the SCSI command status
2203                          */
2204                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
2205
2206                         /*
2207                          * get the result based on SCSI status response
2208                          * to notify the SCSI midlayer of the command status
2209                          */
2210                         scsi_status = result & MASK_SCSI_STATUS;
2211                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
2212
2213                         if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
2214                                 schedule_work(&hba->eeh_work);
2215                         break;
2216                 case UPIU_TRANSACTION_REJECT_UPIU:
2217                         /* TODO: handle Reject UPIU Response */
2218                         result = DID_ERROR << 16;
2219                         dev_err(hba->dev,
2220                                 "Reject UPIU not fully implemented\n");
2221                         break;
2222                 default:
2223                         result = DID_ERROR << 16;
2224                         dev_err(hba->dev,
2225                                 "Unexpected request response code = %x\n",
2226                                 result);
2227                         break;
2228                 }
2229                 break;
2230         case OCS_ABORTED:
2231                 result |= DID_ABORT << 16;
2232                 break;
2233         case OCS_INVALID_COMMAND_STATUS:
2234                 result |= DID_REQUEUE << 16;
2235                 break;
2236         case OCS_INVALID_CMD_TABLE_ATTR:
2237         case OCS_INVALID_PRDT_ATTR:
2238         case OCS_MISMATCH_DATA_BUF_SIZE:
2239         case OCS_MISMATCH_RESP_UPIU_SIZE:
2240         case OCS_PEER_COMM_FAILURE:
2241         case OCS_FATAL_ERROR:
2242         default:
2243                 result |= DID_ERROR << 16;
2244                 dev_err(hba->dev,
2245                 "OCS error from controller = %x\n", ocs);
2246                 break;
2247         } /* end of switch */
2248
2249         return result;
2250 }
2251
2252 /**
2253  * ufshcd_uic_cmd_compl - handle completion of uic command
2254  * @hba: per adapter instance
2255  * @intr_status: interrupt status generated by the controller
2256  */
2257 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
2258 {
2259         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
2260                 hba->active_uic_cmd->argument2 |=
2261                         ufshcd_get_uic_cmd_result(hba);
2262                 hba->active_uic_cmd->argument3 =
2263                         ufshcd_get_dme_attr_val(hba);
2264                 complete(&hba->active_uic_cmd->done);
2265         }
2266
2267         if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
2268                 complete(hba->pwr_done);
2269 }
2270
2271 /**
2272  * ufshcd_transfer_req_compl - handle SCSI and query command completion
2273  * @hba: per adapter instance
2274  */
2275 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2276 {
2277         struct ufshcd_lrb *lrbp;
2278         struct scsi_cmnd *cmd;
2279         unsigned long completed_reqs;
2280         u32 tr_doorbell;
2281         int result;
2282         int index;
2283
2284         /* Resetting interrupt aggregation counters first and reading the
2285          * DOOR_BELL afterward allows us to handle all the completed requests.
2286          * In order to prevent other interrupts starvation the DB is read once
2287          * after reset. The down side of this solution is the possibility of
2288          * false interrupt if device completes another request after resetting
2289          * aggregation and before reading the DB.
2290          */
2291         ufshcd_reset_intr_aggr(hba);
2292
2293         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2294         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
2295
2296         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
2297                 lrbp = &hba->lrb[index];
2298                 cmd = lrbp->cmd;
2299                 if (cmd) {
2300                         result = ufshcd_transfer_rsp_status(hba, lrbp);
2301                         scsi_dma_unmap(cmd);
2302                         cmd->result = result;
2303                         /* Mark completed command as NULL in LRB */
2304                         lrbp->cmd = NULL;
2305                         clear_bit_unlock(index, &hba->lrb_in_use);
2306                         /* Do not touch lrbp after scsi done */
2307                         cmd->scsi_done(cmd);
2308                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
2309                         if (hba->dev_cmd.complete)
2310                                 complete(hba->dev_cmd.complete);
2311                 }
2312         }
2313
2314         /* clear corresponding bits of completed commands */
2315         hba->outstanding_reqs ^= completed_reqs;
2316
2317         /* we might have free'd some tags above */
2318         wake_up(&hba->dev_cmd.tag_wq);
2319 }
2320
2321 /**
2322  * ufshcd_disable_ee - disable exception event
2323  * @hba: per-adapter instance
2324  * @mask: exception event to disable
2325  *
2326  * Disables exception event in the device so that the EVENT_ALERT
2327  * bit is not set.
2328  *
2329  * Returns zero on success, non-zero error value on failure.
2330  */
2331 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
2332 {
2333         int err = 0;
2334         u32 val;
2335
2336         if (!(hba->ee_ctrl_mask & mask))
2337                 goto out;
2338
2339         val = hba->ee_ctrl_mask & ~mask;
2340         val &= 0xFFFF; /* 2 bytes */
2341         err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2342                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2343         if (!err)
2344                 hba->ee_ctrl_mask &= ~mask;
2345 out:
2346         return err;
2347 }
2348
2349 /**
2350  * ufshcd_enable_ee - enable exception event
2351  * @hba: per-adapter instance
2352  * @mask: exception event to enable
2353  *
2354  * Enable corresponding exception event in the device to allow
2355  * device to alert host in critical scenarios.
2356  *
2357  * Returns zero on success, non-zero error value on failure.
2358  */
2359 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
2360 {
2361         int err = 0;
2362         u32 val;
2363
2364         if (hba->ee_ctrl_mask & mask)
2365                 goto out;
2366
2367         val = hba->ee_ctrl_mask | mask;
2368         val &= 0xFFFF; /* 2 bytes */
2369         err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2370                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2371         if (!err)
2372                 hba->ee_ctrl_mask |= mask;
2373 out:
2374         return err;
2375 }
2376
2377 /**
2378  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
2379  * @hba: per-adapter instance
2380  *
2381  * Allow device to manage background operations on its own. Enabling
2382  * this might lead to inconsistent latencies during normal data transfers
2383  * as the device is allowed to manage its own way of handling background
2384  * operations.
2385  *
2386  * Returns zero on success, non-zero on failure.
2387  */
2388 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
2389 {
2390         int err = 0;
2391
2392         if (hba->auto_bkops_enabled)
2393                 goto out;
2394
2395         err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2396                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
2397         if (err) {
2398                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
2399                                 __func__, err);
2400                 goto out;
2401         }
2402
2403         hba->auto_bkops_enabled = true;
2404
2405         /* No need of URGENT_BKOPS exception from the device */
2406         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2407         if (err)
2408                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
2409                                 __func__, err);
2410 out:
2411         return err;
2412 }
2413
2414 /**
2415  * ufshcd_disable_auto_bkops - block device in doing background operations
2416  * @hba: per-adapter instance
2417  *
2418  * Disabling background operations improves command response latency but
2419  * has drawback of device moving into critical state where the device is
2420  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
2421  * host is idle so that BKOPS are managed effectively without any negative
2422  * impacts.
2423  *
2424  * Returns zero on success, non-zero on failure.
2425  */
2426 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
2427 {
2428         int err = 0;
2429
2430         if (!hba->auto_bkops_enabled)
2431                 goto out;
2432
2433         /*
2434          * If host assisted BKOPs is to be enabled, make sure
2435          * urgent bkops exception is allowed.
2436          */
2437         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
2438         if (err) {
2439                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
2440                                 __func__, err);
2441                 goto out;
2442         }
2443
2444         err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
2445                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
2446         if (err) {
2447                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
2448                                 __func__, err);
2449                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2450                 goto out;
2451         }
2452
2453         hba->auto_bkops_enabled = false;
2454 out:
2455         return err;
2456 }
2457
2458 /**
2459  * ufshcd_force_reset_auto_bkops - force enable of auto bkops
2460  * @hba: per adapter instance
2461  *
2462  * After a device reset the device may toggle the BKOPS_EN flag
2463  * to default value. The s/w tracking variables should be updated
2464  * as well. Do this by forcing enable of auto bkops.
2465  */
2466 static void  ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
2467 {
2468         hba->auto_bkops_enabled = false;
2469         hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
2470         ufshcd_enable_auto_bkops(hba);
2471 }
2472
2473 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
2474 {
2475         return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2476                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
2477 }
2478
2479 /**
2480  * ufshcd_urgent_bkops - handle urgent bkops exception event
2481  * @hba: per-adapter instance
2482  *
2483  * Enable fBackgroundOpsEn flag in the device to permit background
2484  * operations.
2485  */
2486 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
2487 {
2488         int err;
2489         u32 status = 0;
2490
2491         err = ufshcd_get_bkops_status(hba, &status);
2492         if (err) {
2493                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
2494                                 __func__, err);
2495                 goto out;
2496         }
2497
2498         status = status & 0xF;
2499
2500         /* handle only if status indicates performance impact or critical */
2501         if (status >= BKOPS_STATUS_PERF_IMPACT)
2502                 err = ufshcd_enable_auto_bkops(hba);
2503 out:
2504         return err;
2505 }
2506
2507 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
2508 {
2509         return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2510                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
2511 }
2512
2513 /**
2514  * ufshcd_exception_event_handler - handle exceptions raised by device
2515  * @work: pointer to work data
2516  *
2517  * Read bExceptionEventStatus attribute from the device and handle the
2518  * exception event accordingly.
2519  */
2520 static void ufshcd_exception_event_handler(struct work_struct *work)
2521 {
2522         struct ufs_hba *hba;
2523         int err;
2524         u32 status = 0;
2525         hba = container_of(work, struct ufs_hba, eeh_work);
2526
2527         pm_runtime_get_sync(hba->dev);
2528         err = ufshcd_get_ee_status(hba, &status);
2529         if (err) {
2530                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
2531                                 __func__, err);
2532                 goto out;
2533         }
2534
2535         status &= hba->ee_ctrl_mask;
2536         if (status & MASK_EE_URGENT_BKOPS) {
2537                 err = ufshcd_urgent_bkops(hba);
2538                 if (err)
2539                         dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
2540                                         __func__, err);
2541         }
2542 out:
2543         pm_runtime_put_sync(hba->dev);
2544         return;
2545 }
2546
2547 /**
2548  * ufshcd_err_handler - handle UFS errors that require s/w attention
2549  * @work: pointer to work structure
2550  */
2551 static void ufshcd_err_handler(struct work_struct *work)
2552 {
2553         struct ufs_hba *hba;
2554         unsigned long flags;
2555         u32 err_xfer = 0;
2556         u32 err_tm = 0;
2557         int err = 0;
2558         int tag;
2559
2560         hba = container_of(work, struct ufs_hba, eh_work);
2561
2562         pm_runtime_get_sync(hba->dev);
2563
2564         spin_lock_irqsave(hba->host->host_lock, flags);
2565         if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
2566                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2567                 goto out;
2568         }
2569
2570         hba->ufshcd_state = UFSHCD_STATE_RESET;
2571         ufshcd_set_eh_in_progress(hba);
2572
2573         /* Complete requests that have door-bell cleared by h/w */
2574         ufshcd_transfer_req_compl(hba);
2575         ufshcd_tmc_handler(hba);
2576         spin_unlock_irqrestore(hba->host->host_lock, flags);
2577
2578         /* Clear pending transfer requests */
2579         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
2580                 if (ufshcd_clear_cmd(hba, tag))
2581                         err_xfer |= 1 << tag;
2582
2583         /* Clear pending task management requests */
2584         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
2585                 if (ufshcd_clear_tm_cmd(hba, tag))
2586                         err_tm |= 1 << tag;
2587
2588         /* Complete the requests that are cleared by s/w */
2589         spin_lock_irqsave(hba->host->host_lock, flags);
2590         ufshcd_transfer_req_compl(hba);
2591         ufshcd_tmc_handler(hba);
2592         spin_unlock_irqrestore(hba->host->host_lock, flags);
2593
2594         /* Fatal errors need reset */
2595         if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
2596                         ((hba->saved_err & UIC_ERROR) &&
2597                          (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
2598                 err = ufshcd_reset_and_restore(hba);
2599                 if (err) {
2600                         dev_err(hba->dev, "%s: reset and restore failed\n",
2601                                         __func__);
2602                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
2603                 }
2604                 /*
2605                  * Inform scsi mid-layer that we did reset and allow to handle
2606                  * Unit Attention properly.
2607                  */
2608                 scsi_report_bus_reset(hba->host, 0);
2609                 hba->saved_err = 0;
2610                 hba->saved_uic_err = 0;
2611         }
2612         ufshcd_clear_eh_in_progress(hba);
2613
2614 out:
2615         scsi_unblock_requests(hba->host);
2616         pm_runtime_put_sync(hba->dev);
2617 }
2618
2619 /**
2620  * ufshcd_update_uic_error - check and set fatal UIC error flags.
2621  * @hba: per-adapter instance
2622  */
2623 static void ufshcd_update_uic_error(struct ufs_hba *hba)
2624 {
2625         u32 reg;
2626
2627         /* PA_INIT_ERROR is fatal and needs UIC reset */
2628         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
2629         if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2630                 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
2631
2632         /* UIC NL/TL/DME errors needs software retry */
2633         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
2634         if (reg)
2635                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
2636
2637         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
2638         if (reg)
2639                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
2640
2641         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
2642         if (reg)
2643                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
2644
2645         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
2646                         __func__, hba->uic_error);
2647 }
2648
2649 /**
2650  * ufshcd_check_errors - Check for errors that need s/w attention
2651  * @hba: per-adapter instance
2652  */
2653 static void ufshcd_check_errors(struct ufs_hba *hba)
2654 {
2655         bool queue_eh_work = false;
2656
2657         if (hba->errors & INT_FATAL_ERRORS)
2658                 queue_eh_work = true;
2659
2660         if (hba->errors & UIC_ERROR) {
2661                 hba->uic_error = 0;
2662                 ufshcd_update_uic_error(hba);
2663                 if (hba->uic_error)
2664                         queue_eh_work = true;
2665         }
2666
2667         if (queue_eh_work) {
2668                 /* handle fatal errors only when link is functional */
2669                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
2670                         /* block commands from scsi mid-layer */
2671                         scsi_block_requests(hba->host);
2672
2673                         /* transfer error masks to sticky bits */
2674                         hba->saved_err |= hba->errors;
2675                         hba->saved_uic_err |= hba->uic_error;
2676
2677                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
2678                         schedule_work(&hba->eh_work);
2679                 }
2680         }
2681         /*
2682          * if (!queue_eh_work) -
2683          * Other errors are either non-fatal where host recovers
2684          * itself without s/w intervention or errors that will be
2685          * handled by the SCSI core layer.
2686          */
2687 }
2688
2689 /**
2690  * ufshcd_tmc_handler - handle task management function completion
2691  * @hba: per adapter instance
2692  */
2693 static void ufshcd_tmc_handler(struct ufs_hba *hba)
2694 {
2695         u32 tm_doorbell;
2696
2697         tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
2698         hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
2699         wake_up(&hba->tm_wq);
2700 }
2701
2702 /**
2703  * ufshcd_sl_intr - Interrupt service routine
2704  * @hba: per adapter instance
2705  * @intr_status: contains interrupts generated by the controller
2706  */
2707 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2708 {
2709         hba->errors = UFSHCD_ERROR_MASK & intr_status;
2710         if (hba->errors)
2711                 ufshcd_check_errors(hba);
2712
2713         if (intr_status & UFSHCD_UIC_MASK)
2714                 ufshcd_uic_cmd_compl(hba, intr_status);
2715
2716         if (intr_status & UTP_TASK_REQ_COMPL)
2717                 ufshcd_tmc_handler(hba);
2718
2719         if (intr_status & UTP_TRANSFER_REQ_COMPL)
2720                 ufshcd_transfer_req_compl(hba);
2721 }
2722
2723 /**
2724  * ufshcd_intr - Main interrupt service routine
2725  * @irq: irq number
2726  * @__hba: pointer to adapter instance
2727  *
2728  * Returns IRQ_HANDLED - If interrupt is valid
2729  *              IRQ_NONE - If invalid interrupt
2730  */
2731 static irqreturn_t ufshcd_intr(int irq, void *__hba)
2732 {
2733         u32 intr_status;
2734         irqreturn_t retval = IRQ_NONE;
2735         struct ufs_hba *hba = __hba;
2736
2737         spin_lock(hba->host->host_lock);
2738         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
2739
2740         if (intr_status) {
2741                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
2742                 ufshcd_sl_intr(hba, intr_status);
2743                 retval = IRQ_HANDLED;
2744         }
2745         spin_unlock(hba->host->host_lock);
2746         return retval;
2747 }
2748
2749 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
2750 {
2751         int err = 0;
2752         u32 mask = 1 << tag;
2753         unsigned long flags;
2754
2755         if (!test_bit(tag, &hba->outstanding_tasks))
2756                 goto out;
2757
2758         spin_lock_irqsave(hba->host->host_lock, flags);
2759         ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
2760         spin_unlock_irqrestore(hba->host->host_lock, flags);
2761
2762         /* poll for max. 1 sec to clear door bell register by h/w */
2763         err = ufshcd_wait_for_register(hba,
2764                         REG_UTP_TASK_REQ_DOOR_BELL,
2765                         mask, 0, 1000, 1000);
2766 out:
2767         return err;
2768 }
2769
2770 /**
2771  * ufshcd_issue_tm_cmd - issues task management commands to controller
2772  * @hba: per adapter instance
2773  * @lun_id: LUN ID to which TM command is sent
2774  * @task_id: task ID to which the TM command is applicable
2775  * @tm_function: task management function opcode
2776  * @tm_response: task management service response return value
2777  *
2778  * Returns non-zero value on error, zero on success.
2779  */
2780 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2781                 u8 tm_function, u8 *tm_response)
2782 {
2783         struct utp_task_req_desc *task_req_descp;
2784         struct utp_upiu_task_req *task_req_upiup;
2785         struct Scsi_Host *host;
2786         unsigned long flags;
2787         int free_slot;
2788         int err;
2789         int task_tag;
2790
2791         host = hba->host;
2792
2793         /*
2794          * Get free slot, sleep if slots are unavailable.
2795          * Even though we use wait_event() which sleeps indefinitely,
2796          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2797          */
2798         wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
2799
2800         spin_lock_irqsave(host->host_lock, flags);
2801         task_req_descp = hba->utmrdl_base_addr;
2802         task_req_descp += free_slot;
2803
2804         /* Configure task request descriptor */
2805         task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
2806         task_req_descp->header.dword_2 =
2807                         cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2808
2809         /* Configure task request UPIU */
2810         task_req_upiup =
2811                 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
2812         task_tag = hba->nutrs + free_slot;
2813         task_req_upiup->header.dword_0 =
2814                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
2815                                               lun_id, task_tag);
2816         task_req_upiup->header.dword_1 =
2817                 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
2818
2819         task_req_upiup->input_param1 = cpu_to_be32(lun_id);
2820         task_req_upiup->input_param2 = cpu_to_be32(task_id);
2821
2822         /* send command to the controller */
2823         __set_bit(free_slot, &hba->outstanding_tasks);
2824         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
2825
2826         spin_unlock_irqrestore(host->host_lock, flags);
2827
2828         /* wait until the task management command is completed */
2829         err = wait_event_timeout(hba->tm_wq,
2830                         test_bit(free_slot, &hba->tm_condition),
2831                         msecs_to_jiffies(TM_CMD_TIMEOUT));
2832         if (!err) {
2833                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
2834                                 __func__, tm_function);
2835                 if (ufshcd_clear_tm_cmd(hba, free_slot))
2836                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
2837                                         __func__, free_slot);
2838                 err = -ETIMEDOUT;
2839         } else {
2840                 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
2841         }
2842
2843         clear_bit(free_slot, &hba->tm_condition);
2844         ufshcd_put_tm_slot(hba, free_slot);
2845         wake_up(&hba->tm_tag_wq);
2846
2847         return err;
2848 }
2849
2850 /**
2851  * ufshcd_eh_device_reset_handler - device reset handler registered to
2852  *                                    scsi layer.
2853  * @cmd: SCSI command pointer
2854  *
2855  * Returns SUCCESS/FAILED
2856  */
2857 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
2858 {
2859         struct Scsi_Host *host;
2860         struct ufs_hba *hba;
2861         unsigned int tag;
2862         u32 pos;
2863         int err;
2864         u8 resp = 0xF;
2865         struct ufshcd_lrb *lrbp;
2866         unsigned long flags;
2867
2868         host = cmd->device->host;
2869         hba = shost_priv(host);
2870         tag = cmd->request->tag;
2871
2872         lrbp = &hba->lrb[tag];
2873         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
2874         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2875                 if (!err)
2876                         err = resp;
2877                 goto out;
2878         }
2879
2880         /* clear the commands that were pending for corresponding LUN */
2881         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
2882                 if (hba->lrb[pos].lun == lrbp->lun) {
2883                         err = ufshcd_clear_cmd(hba, pos);
2884                         if (err)
2885                                 break;
2886                 }
2887         }
2888         spin_lock_irqsave(host->host_lock, flags);
2889         ufshcd_transfer_req_compl(hba);
2890         spin_unlock_irqrestore(host->host_lock, flags);
2891 out:
2892         if (!err) {
2893                 err = SUCCESS;
2894         } else {
2895                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2896                 err = FAILED;
2897         }
2898         return err;
2899 }
2900
2901 /**
2902  * ufshcd_abort - abort a specific command
2903  * @cmd: SCSI command pointer
2904  *
2905  * Abort the pending command in device by sending UFS_ABORT_TASK task management
2906  * command, and in host controller by clearing the door-bell register. There can
2907  * be race between controller sending the command to the device while abort is
2908  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
2909  * really issued and then try to abort it.
2910  *
2911  * Returns SUCCESS/FAILED
2912  */
2913 static int ufshcd_abort(struct scsi_cmnd *cmd)
2914 {
2915         struct Scsi_Host *host;
2916         struct ufs_hba *hba;
2917         unsigned long flags;
2918         unsigned int tag;
2919         int err = 0;
2920         int poll_cnt;
2921         u8 resp = 0xF;
2922         struct ufshcd_lrb *lrbp;
2923         u32 reg;
2924
2925         host = cmd->device->host;
2926         hba = shost_priv(host);
2927         tag = cmd->request->tag;
2928
2929         /* If command is already aborted/completed, return SUCCESS */
2930         if (!(test_bit(tag, &hba->outstanding_reqs)))
2931                 goto out;
2932
2933         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2934         if (!(reg & (1 << tag))) {
2935                 dev_err(hba->dev,
2936                 "%s: cmd was completed, but without a notifying intr, tag = %d",
2937                 __func__, tag);
2938         }
2939
2940         lrbp = &hba->lrb[tag];
2941         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
2942                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2943                                 UFS_QUERY_TASK, &resp);
2944                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
2945                         /* cmd pending in the device */
2946                         break;
2947                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2948                         /*
2949                          * cmd not pending in the device, check if it is
2950                          * in transition.
2951                          */
2952                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2953                         if (reg & (1 << tag)) {
2954                                 /* sleep for max. 200us to stabilize */
2955                                 usleep_range(100, 200);
2956                                 continue;
2957                         }
2958                         /* command completed already */
2959                         goto out;
2960                 } else {
2961                         if (!err)
2962                                 err = resp; /* service response error */
2963                         goto out;
2964                 }
2965         }
2966
2967         if (!poll_cnt) {
2968                 err = -EBUSY;
2969                 goto out;
2970         }
2971
2972         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2973                         UFS_ABORT_TASK, &resp);
2974         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2975                 if (!err)
2976                         err = resp; /* service response error */
2977                 goto out;
2978         }
2979
2980         err = ufshcd_clear_cmd(hba, tag);
2981         if (err)
2982                 goto out;
2983
2984         scsi_dma_unmap(cmd);
2985
2986         spin_lock_irqsave(host->host_lock, flags);
2987         __clear_bit(tag, &hba->outstanding_reqs);
2988         hba->lrb[tag].cmd = NULL;
2989         spin_unlock_irqrestore(host->host_lock, flags);
2990
2991         clear_bit_unlock(tag, &hba->lrb_in_use);
2992         wake_up(&hba->dev_cmd.tag_wq);
2993 out:
2994         if (!err) {
2995                 err = SUCCESS;
2996         } else {
2997                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2998                 err = FAILED;
2999         }
3000
3001         return err;
3002 }
3003
3004 /**
3005  * ufshcd_host_reset_and_restore - reset and restore host controller
3006  * @hba: per-adapter instance
3007  *
3008  * Note that host controller reset may issue DME_RESET to
3009  * local and remote (device) Uni-Pro stack and the attributes
3010  * are reset to default state.
3011  *
3012  * Returns zero on success, non-zero on failure
3013  */
3014 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
3015 {
3016         int err;
3017         async_cookie_t cookie;
3018         unsigned long flags;
3019
3020         /* Reset the host controller */
3021         spin_lock_irqsave(hba->host->host_lock, flags);
3022         ufshcd_hba_stop(hba);
3023         spin_unlock_irqrestore(hba->host->host_lock, flags);
3024
3025         err = ufshcd_hba_enable(hba);
3026         if (err)
3027                 goto out;
3028
3029         /* Establish the link again and restore the device */
3030         cookie = async_schedule(ufshcd_async_scan, hba);
3031         /* wait for async scan to be completed */
3032         async_synchronize_cookie(++cookie);
3033         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
3034                 err = -EIO;
3035 out:
3036         if (err)
3037                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
3038
3039         return err;
3040 }
3041
3042 /**
3043  * ufshcd_reset_and_restore - reset and re-initialize host/device
3044  * @hba: per-adapter instance
3045  *
3046  * Reset and recover device, host and re-establish link. This
3047  * is helpful to recover the communication in fatal error conditions.
3048  *
3049  * Returns zero on success, non-zero on failure
3050  */
3051 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
3052 {
3053         int err = 0;
3054         unsigned long flags;
3055
3056         err = ufshcd_host_reset_and_restore(hba);
3057
3058         /*
3059          * After reset the door-bell might be cleared, complete
3060          * outstanding requests in s/w here.
3061          */
3062         spin_lock_irqsave(hba->host->host_lock, flags);
3063         ufshcd_transfer_req_compl(hba);
3064         ufshcd_tmc_handler(hba);
3065         spin_unlock_irqrestore(hba->host->host_lock, flags);
3066
3067         return err;
3068 }
3069
3070 /**
3071  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
3072  * @cmd - SCSI command pointer
3073  *
3074  * Returns SUCCESS/FAILED
3075  */
3076 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3077 {
3078         int err;
3079         unsigned long flags;
3080         struct ufs_hba *hba;
3081
3082         hba = shost_priv(cmd->device->host);
3083
3084         /*
3085          * Check if there is any race with fatal error handling.
3086          * If so, wait for it to complete. Even though fatal error
3087          * handling does reset and restore in some cases, don't assume
3088          * anything out of it. We are just avoiding race here.
3089          */
3090         do {
3091                 spin_lock_irqsave(hba->host->host_lock, flags);
3092                 if (!(work_pending(&hba->eh_work) ||
3093                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
3094                         break;
3095                 spin_unlock_irqrestore(hba->host->host_lock, flags);
3096                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
3097                 flush_work(&hba->eh_work);
3098         } while (1);
3099
3100         hba->ufshcd_state = UFSHCD_STATE_RESET;
3101         ufshcd_set_eh_in_progress(hba);
3102         spin_unlock_irqrestore(hba->host->host_lock, flags);
3103
3104         err = ufshcd_reset_and_restore(hba);
3105
3106         spin_lock_irqsave(hba->host->host_lock, flags);
3107         if (!err) {
3108                 err = SUCCESS;
3109                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3110         } else {
3111                 err = FAILED;
3112                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3113         }
3114         ufshcd_clear_eh_in_progress(hba);
3115         spin_unlock_irqrestore(hba->host->host_lock, flags);
3116
3117         return err;
3118 }
3119
3120 /**
3121  * ufshcd_read_sdev_qdepth - read the lun command queue depth
3122  * @hba: Pointer to adapter instance
3123  * @sdev: pointer to SCSI device
3124  *
3125  * Return in case of success the lun's queue depth else error.
3126  */
3127 static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
3128                                 struct scsi_device *sdev)
3129 {
3130         int ret;
3131         int buff_len = UNIT_DESC_MAX_SIZE;
3132         u8 desc_buf[UNIT_DESC_MAX_SIZE];
3133
3134         ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
3135                         QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len);
3136
3137         if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) {
3138                 dev_err(hba->dev,
3139                         "%s:Failed reading unit descriptor. len = %d ret = %d"
3140                         , __func__, buff_len, ret);
3141                 if (!ret)
3142                         ret = -EINVAL;
3143
3144                 goto out;
3145         }
3146
3147         ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF;
3148 out:
3149         return ret;
3150 }
3151
3152 /**
3153  * ufshcd_async_scan - asynchronous execution for link startup
3154  * @data: data pointer to pass to this function
3155  * @cookie: cookie data
3156  */
3157 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
3158 {
3159         struct ufs_hba *hba = (struct ufs_hba *)data;
3160         int ret;
3161
3162         ret = ufshcd_link_startup(hba);
3163         if (ret)
3164                 goto out;
3165
3166         ufshcd_config_max_pwr_mode(hba);
3167
3168         ret = ufshcd_verify_dev_init(hba);
3169         if (ret)
3170                 goto out;
3171
3172         ret = ufshcd_complete_dev_init(hba);
3173         if (ret)
3174                 goto out;
3175
3176         ufshcd_force_reset_auto_bkops(hba);
3177         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3178
3179         /* If we are in error handling context no need to scan the host */
3180         if (!ufshcd_eh_in_progress(hba)) {
3181                 scsi_scan_host(hba->host);
3182                 pm_runtime_put_sync(hba->dev);
3183         }
3184 out:
3185         return;
3186 }
3187
3188 static struct scsi_host_template ufshcd_driver_template = {
3189         .module                 = THIS_MODULE,
3190         .name                   = UFSHCD,
3191         .proc_name              = UFSHCD,
3192         .queuecommand           = ufshcd_queuecommand,
3193         .slave_alloc            = ufshcd_slave_alloc,
3194         .slave_configure        = ufshcd_slave_configure,
3195         .slave_destroy          = ufshcd_slave_destroy,
3196         .change_queue_depth     = ufshcd_change_queue_depth,
3197         .eh_abort_handler       = ufshcd_abort,
3198         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
3199         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
3200         .this_id                = -1,
3201         .sg_tablesize           = SG_ALL,
3202         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
3203         .can_queue              = UFSHCD_CAN_QUEUE,
3204 };
3205
3206 static int ufshcd_config_vreg(struct device *dev,
3207                 struct ufs_vreg *vreg, bool on)
3208 {
3209         int ret = 0;
3210         struct regulator *reg = vreg->reg;
3211         const char *name = vreg->name;
3212         int min_uV, uA_load;
3213
3214         BUG_ON(!vreg);
3215
3216         if (regulator_count_voltages(reg) > 0) {
3217                 min_uV = on ? vreg->min_uV : 0;
3218                 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
3219                 if (ret) {
3220                         dev_err(dev, "%s: %s set voltage failed, err=%d\n",
3221                                         __func__, name, ret);
3222                         goto out;
3223                 }
3224
3225                 uA_load = on ? vreg->max_uA : 0;
3226                 ret = regulator_set_optimum_mode(reg, uA_load);
3227                 if (ret >= 0) {
3228                         /*
3229                          * regulator_set_optimum_mode() returns new regulator
3230                          * mode upon success.
3231                          */
3232                         ret = 0;
3233                 } else {
3234                         dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
3235                                         __func__, name, uA_load, ret);
3236                         goto out;
3237                 }
3238         }
3239 out:
3240         return ret;
3241 }
3242
3243 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
3244 {
3245         int ret = 0;
3246
3247         if (!vreg || vreg->enabled)
3248                 goto out;
3249
3250         ret = ufshcd_config_vreg(dev, vreg, true);
3251         if (!ret)
3252                 ret = regulator_enable(vreg->reg);
3253
3254         if (!ret)
3255                 vreg->enabled = true;
3256         else
3257                 dev_err(dev, "%s: %s enable failed, err=%d\n",
3258                                 __func__, vreg->name, ret);
3259 out:
3260         return ret;
3261 }
3262
3263 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
3264 {
3265         int ret = 0;
3266
3267         if (!vreg || !vreg->enabled)
3268                 goto out;
3269
3270         ret = regulator_disable(vreg->reg);
3271
3272         if (!ret) {
3273                 /* ignore errors on applying disable config */
3274                 ufshcd_config_vreg(dev, vreg, false);
3275                 vreg->enabled = false;
3276         } else {
3277                 dev_err(dev, "%s: %s disable failed, err=%d\n",
3278                                 __func__, vreg->name, ret);
3279         }
3280 out:
3281         return ret;
3282 }
3283
3284 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
3285 {
3286         int ret = 0;
3287         struct device *dev = hba->dev;
3288         struct ufs_vreg_info *info = &hba->vreg_info;
3289
3290         if (!info)
3291                 goto out;
3292
3293         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
3294         if (ret)
3295                 goto out;
3296
3297         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
3298         if (ret)
3299                 goto out;
3300
3301         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
3302         if (ret)
3303                 goto out;
3304
3305 out:
3306         if (ret) {
3307                 ufshcd_toggle_vreg(dev, info->vccq2, false);
3308                 ufshcd_toggle_vreg(dev, info->vccq, false);
3309                 ufshcd_toggle_vreg(dev, info->vcc, false);
3310         }
3311         return ret;
3312 }
3313
3314 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
3315 {
3316         struct ufs_vreg_info *info = &hba->vreg_info;
3317
3318         if (info)
3319                 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
3320
3321         return 0;
3322 }
3323
3324 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
3325 {
3326         int ret = 0;
3327
3328         if (!vreg)
3329                 goto out;
3330
3331         vreg->reg = devm_regulator_get(dev, vreg->name);
3332         if (IS_ERR(vreg->reg)) {
3333                 ret = PTR_ERR(vreg->reg);
3334                 dev_err(dev, "%s: %s get failed, err=%d\n",
3335                                 __func__, vreg->name, ret);
3336         }
3337 out:
3338         return ret;
3339 }
3340
3341 static int ufshcd_init_vreg(struct ufs_hba *hba)
3342 {
3343         int ret = 0;
3344         struct device *dev = hba->dev;
3345         struct ufs_vreg_info *info = &hba->vreg_info;
3346
3347         if (!info)
3348                 goto out;
3349
3350         ret = ufshcd_get_vreg(dev, info->vcc);
3351         if (ret)
3352                 goto out;
3353
3354         ret = ufshcd_get_vreg(dev, info->vccq);
3355         if (ret)
3356                 goto out;
3357
3358         ret = ufshcd_get_vreg(dev, info->vccq2);
3359 out:
3360         return ret;
3361 }
3362
3363 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
3364 {
3365         struct ufs_vreg_info *info = &hba->vreg_info;
3366
3367         if (info)
3368                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
3369
3370         return 0;
3371 }
3372
3373 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
3374 {
3375         int ret = 0;
3376         struct ufs_clk_info *clki;
3377         struct list_head *head = &hba->clk_list_head;
3378
3379         if (!head || list_empty(head))
3380                 goto out;
3381
3382         list_for_each_entry(clki, head, list) {
3383                 if (!IS_ERR_OR_NULL(clki->clk)) {
3384                         if (on && !clki->enabled) {
3385                                 ret = clk_prepare_enable(clki->clk);
3386                                 if (ret) {
3387                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
3388                                                 __func__, clki->name, ret);
3389                                         goto out;
3390                                 }
3391                         } else if (!on && clki->enabled) {
3392                                 clk_disable_unprepare(clki->clk);
3393                         }
3394                         clki->enabled = on;
3395                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
3396                                         clki->name, on ? "en" : "dis");
3397                 }
3398         }
3399 out:
3400         if (ret) {
3401                 list_for_each_entry(clki, head, list) {
3402                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
3403                                 clk_disable_unprepare(clki->clk);
3404                 }
3405         }
3406         return ret;
3407 }
3408
3409 static int ufshcd_init_clocks(struct ufs_hba *hba)
3410 {
3411         int ret = 0;
3412         struct ufs_clk_info *clki;
3413         struct device *dev = hba->dev;
3414         struct list_head *head = &hba->clk_list_head;
3415
3416         if (!head || list_empty(head))
3417                 goto out;
3418
3419         list_for_each_entry(clki, head, list) {
3420                 if (!clki->name)
3421                         continue;
3422
3423                 clki->clk = devm_clk_get(dev, clki->name);
3424                 if (IS_ERR(clki->clk)) {
3425                         ret = PTR_ERR(clki->clk);
3426                         dev_err(dev, "%s: %s clk get failed, %d\n",
3427                                         __func__, clki->name, ret);
3428                         goto out;
3429                 }
3430
3431                 if (clki->max_freq) {
3432                         ret = clk_set_rate(clki->clk, clki->max_freq);
3433                         if (ret) {
3434                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
3435                                         __func__, clki->name,
3436                                         clki->max_freq, ret);
3437                                 goto out;
3438                         }
3439                 }
3440                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
3441                                 clki->name, clk_get_rate(clki->clk));
3442         }
3443 out:
3444         return ret;
3445 }
3446
3447 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
3448 {
3449         int err = 0;
3450
3451         if (!hba->vops)
3452                 goto out;
3453
3454         if (hba->vops->init) {
3455                 err = hba->vops->init(hba);
3456                 if (err)
3457                         goto out;
3458         }
3459
3460         if (hba->vops->setup_clocks) {
3461                 err = hba->vops->setup_clocks(hba, true);
3462                 if (err)
3463                         goto out_exit;
3464         }
3465
3466         if (hba->vops->setup_regulators) {
3467                 err = hba->vops->setup_regulators(hba, true);
3468                 if (err)
3469                         goto out_clks;
3470         }
3471
3472         goto out;
3473
3474 out_clks:
3475         if (hba->vops->setup_clocks)
3476                 hba->vops->setup_clocks(hba, false);
3477 out_exit:
3478         if (hba->vops->exit)
3479                 hba->vops->exit(hba);
3480 out:
3481         if (err)
3482                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
3483                         __func__, hba->vops ? hba->vops->name : "", err);
3484         return err;
3485 }
3486
3487 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
3488 {
3489         if (!hba->vops)
3490                 return;
3491
3492         if (hba->vops->setup_clocks)
3493                 hba->vops->setup_clocks(hba, false);
3494
3495         if (hba->vops->setup_regulators)
3496                 hba->vops->setup_regulators(hba, false);
3497
3498         if (hba->vops->exit)
3499                 hba->vops->exit(hba);
3500 }
3501
3502 static int ufshcd_hba_init(struct ufs_hba *hba)
3503 {
3504         int err;
3505
3506         /*
3507          * Handle host controller power separately from the UFS device power
3508          * rails as it will help controlling the UFS host controller power
3509          * collapse easily which is different than UFS device power collapse.
3510          * Also, enable the host controller power before we go ahead with rest
3511          * of the initialization here.
3512          */
3513         err = ufshcd_init_hba_vreg(hba);
3514         if (err)
3515                 goto out;
3516
3517         err = ufshcd_setup_hba_vreg(hba, true);
3518         if (err)
3519                 goto out;
3520
3521         err = ufshcd_init_clocks(hba);
3522         if (err)
3523                 goto out_disable_hba_vreg;
3524
3525         err = ufshcd_setup_clocks(hba, true);
3526         if (err)
3527                 goto out_disable_hba_vreg;
3528
3529         err = ufshcd_init_vreg(hba);
3530         if (err)
3531                 goto out_disable_clks;
3532
3533         err = ufshcd_setup_vreg(hba, true);
3534         if (err)
3535                 goto out_disable_clks;
3536
3537         err = ufshcd_variant_hba_init(hba);
3538         if (err)
3539                 goto out_disable_vreg;
3540
3541         goto out;
3542
3543 out_disable_vreg:
3544         ufshcd_setup_vreg(hba, false);
3545 out_disable_clks:
3546         ufshcd_setup_clocks(hba, false);
3547 out_disable_hba_vreg:
3548         ufshcd_setup_hba_vreg(hba, false);
3549 out:
3550         return err;
3551 }
3552
3553 static void ufshcd_hba_exit(struct ufs_hba *hba)
3554 {
3555         ufshcd_variant_hba_exit(hba);
3556         ufshcd_setup_vreg(hba, false);
3557         ufshcd_setup_clocks(hba, false);
3558         ufshcd_setup_hba_vreg(hba, false);
3559 }
3560
3561 /**
3562  * ufshcd_suspend - suspend power management function
3563  * @hba: per adapter instance
3564  * @state: power state
3565  *
3566  * Returns -ENOSYS
3567  */
3568 int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
3569 {
3570         /*
3571          * TODO:
3572          * 1. Block SCSI requests from SCSI midlayer
3573          * 2. Change the internal driver state to non operational
3574          * 3. Set UTRLRSR and UTMRLRSR bits to zero
3575          * 4. Wait until outstanding commands are completed
3576          * 5. Set HCE to zero to send the UFS host controller to reset state
3577          */
3578
3579         return -ENOSYS;
3580 }
3581 EXPORT_SYMBOL_GPL(ufshcd_suspend);
3582
3583 /**
3584  * ufshcd_resume - resume power management function
3585  * @hba: per adapter instance
3586  *
3587  * Returns -ENOSYS
3588  */
3589 int ufshcd_resume(struct ufs_hba *hba)
3590 {
3591         /*
3592          * TODO:
3593          * 1. Set HCE to 1, to start the UFS host controller
3594          * initialization process
3595          * 2. Set UTRLRSR and UTMRLRSR bits to 1
3596          * 3. Change the internal driver state to operational
3597          * 4. Unblock SCSI requests from SCSI midlayer
3598          */
3599
3600         return -ENOSYS;
3601 }
3602 EXPORT_SYMBOL_GPL(ufshcd_resume);
3603
3604 int ufshcd_runtime_suspend(struct ufs_hba *hba)
3605 {
3606         if (!hba)
3607                 return 0;
3608
3609         /*
3610          * The device is idle with no requests in the queue,
3611          * allow background operations.
3612          */
3613         return ufshcd_enable_auto_bkops(hba);
3614 }
3615 EXPORT_SYMBOL(ufshcd_runtime_suspend);
3616
3617 int ufshcd_runtime_resume(struct ufs_hba *hba)
3618 {
3619         if (!hba)
3620                 return 0;
3621
3622         return ufshcd_disable_auto_bkops(hba);
3623 }
3624 EXPORT_SYMBOL(ufshcd_runtime_resume);
3625
3626 int ufshcd_runtime_idle(struct ufs_hba *hba)
3627 {
3628         return 0;
3629 }
3630 EXPORT_SYMBOL(ufshcd_runtime_idle);
3631
3632 /**
3633  * ufshcd_remove - de-allocate SCSI host and host memory space
3634  *              data structure memory
3635  * @hba - per adapter instance
3636  */
3637 void ufshcd_remove(struct ufs_hba *hba)
3638 {
3639         scsi_remove_host(hba->host);
3640         /* disable interrupts */
3641         ufshcd_disable_intr(hba, hba->intr_mask);
3642         ufshcd_hba_stop(hba);
3643
3644         scsi_host_put(hba->host);
3645
3646         ufshcd_hba_exit(hba);
3647 }
3648 EXPORT_SYMBOL_GPL(ufshcd_remove);
3649
3650 /**
3651  * ufshcd_set_dma_mask - Set dma mask based on the controller
3652  *                       addressing capability
3653  * @hba: per adapter instance
3654  *
3655  * Returns 0 for success, non-zero for failure
3656  */
3657 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
3658 {
3659         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
3660                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
3661                         return 0;
3662         }
3663         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
3664 }
3665
3666 /**
3667  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3668  * @dev: pointer to device handle
3669  * @hba_handle: driver private handle
3670  * Returns 0 on success, non-zero value on failure
3671  */
3672 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
3673 {
3674         struct Scsi_Host *host;
3675         struct ufs_hba *hba;
3676         int err = 0;
3677
3678         if (!dev) {
3679                 dev_err(dev,
3680                 "Invalid memory reference for dev is NULL\n");
3681                 err = -ENODEV;
3682                 goto out_error;
3683         }
3684
3685         host = scsi_host_alloc(&ufshcd_driver_template,
3686                                 sizeof(struct ufs_hba));
3687         if (!host) {
3688                 dev_err(dev, "scsi_host_alloc failed\n");
3689                 err = -ENOMEM;
3690                 goto out_error;
3691         }
3692         hba = shost_priv(host);
3693         hba->host = host;
3694         hba->dev = dev;
3695         *hba_handle = hba;
3696
3697 out_error:
3698         return err;
3699 }
3700 EXPORT_SYMBOL(ufshcd_alloc_host);
3701
3702 /**
3703  * ufshcd_init - Driver initialization routine
3704  * @hba: per-adapter instance
3705  * @mmio_base: base register address
3706  * @irq: Interrupt line of device
3707  * Returns 0 on success, non-zero value on failure
3708  */
3709 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
3710 {
3711         int err;
3712         struct Scsi_Host *host = hba->host;
3713         struct device *dev = hba->dev;
3714
3715         if (!mmio_base) {
3716                 dev_err(hba->dev,
3717                 "Invalid memory reference for mmio_base is NULL\n");
3718                 err = -ENODEV;
3719                 goto out_error;
3720         }
3721
3722         hba->mmio_base = mmio_base;
3723         hba->irq = irq;
3724
3725         err = ufshcd_hba_init(hba);
3726         if (err)
3727                 goto out_error;
3728
3729         /* Read capabilities registers */
3730         ufshcd_hba_capabilities(hba);
3731
3732         /* Get UFS version supported by the controller */
3733         hba->ufs_version = ufshcd_get_ufs_version(hba);
3734
3735         /* Get Interrupt bit mask per version */
3736         hba->intr_mask = ufshcd_get_intr_mask(hba);
3737
3738         err = ufshcd_set_dma_mask(hba);
3739         if (err) {
3740                 dev_err(hba->dev, "set dma mask failed\n");
3741                 goto out_disable;
3742         }
3743
3744         /* Allocate memory for host memory space */
3745         err = ufshcd_memory_alloc(hba);
3746         if (err) {
3747                 dev_err(hba->dev, "Memory allocation failed\n");
3748                 goto out_disable;
3749         }
3750
3751         /* Configure LRB */
3752         ufshcd_host_memory_configure(hba);
3753
3754         host->can_queue = hba->nutrs;
3755         host->cmd_per_lun = hba->nutrs;
3756         host->max_id = UFSHCD_MAX_ID;
3757         host->max_lun = UFSHCD_MAX_LUNS;
3758         host->max_channel = UFSHCD_MAX_CHANNEL;
3759         host->unique_id = host->host_no;
3760         host->max_cmd_len = MAX_CDB_SIZE;
3761
3762         /* Initailize wait queue for task management */
3763         init_waitqueue_head(&hba->tm_wq);
3764         init_waitqueue_head(&hba->tm_tag_wq);
3765
3766         /* Initialize work queues */
3767         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
3768         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
3769
3770         /* Initialize UIC command mutex */
3771         mutex_init(&hba->uic_cmd_mutex);
3772
3773         /* Initialize mutex for device management commands */
3774         mutex_init(&hba->dev_cmd.lock);
3775
3776         /* Initialize device management tag acquire wait queue */
3777         init_waitqueue_head(&hba->dev_cmd.tag_wq);
3778
3779         /* IRQ registration */
3780         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
3781         if (err) {
3782                 dev_err(hba->dev, "request irq failed\n");
3783                 goto out_disable;
3784         }
3785
3786         /* Enable SCSI tag mapping */
3787         err = scsi_init_shared_tag_map(host, host->can_queue);
3788         if (err) {
3789                 dev_err(hba->dev, "init shared queue failed\n");
3790                 goto out_disable;
3791         }
3792
3793         err = scsi_add_host(host, hba->dev);
3794         if (err) {
3795                 dev_err(hba->dev, "scsi_add_host failed\n");
3796                 goto out_disable;
3797         }
3798
3799         /* Host controller enable */
3800         err = ufshcd_hba_enable(hba);
3801         if (err) {
3802                 dev_err(hba->dev, "Host controller enable failed\n");
3803                 goto out_remove_scsi_host;
3804         }
3805
3806         /* Hold auto suspend until async scan completes */
3807         pm_runtime_get_sync(dev);
3808
3809         async_schedule(ufshcd_async_scan, hba);
3810
3811         return 0;
3812
3813 out_remove_scsi_host:
3814         scsi_remove_host(hba->host);
3815 out_disable:
3816         scsi_host_put(host);
3817         ufshcd_hba_exit(hba);
3818 out_error:
3819         return err;
3820 }
3821 EXPORT_SYMBOL_GPL(ufshcd_init);
3822
3823 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
3824 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
3825 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
3826 MODULE_LICENSE("GPL");
3827 MODULE_VERSION(UFSHCD_DRIVER_VERSION);