#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
static int max_queue_depth = -1;
module_param(max_queue_depth, int, 0);
MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
static inline u8
_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
{
- return ioc->cpu_msix_table[smp_processor_id()];
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
}
/**
}
if (ioc->chain_dma_pool)
pci_pool_destroy(ioc->chain_dma_pool);
- }
- if (ioc->chain_lookup) {
free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
ioc->chain_lookup = NULL;
}
_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
struct mpt2sas_facts *facts;
- u32 queue_size, queue_diff;
u16 max_sge_elements;
- u16 num_of_reply_frames;
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz;
u32 retry_sz;
}
/* command line tunables for max controller queue depth */
- if (max_queue_depth != -1)
- max_request_credit = (max_queue_depth < facts->RequestCredit)
- ? max_queue_depth : facts->RequestCredit;
- else
- max_request_credit = facts->RequestCredit;
+ if (max_queue_depth != -1 && max_queue_depth != 0) {
+ max_request_credit = min_t(u16, max_queue_depth +
+ ioc->hi_priority_depth + ioc->internal_depth,
+ facts->RequestCredit);
+ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+ max_request_credit = MAX_HBA_QUEUE_DEPTH;
+ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
ioc->hba_queue_depth = max_request_credit;
ioc->hi_priority_depth = facts->HighPriorityCredit;
}
ioc->chains_needed_per_io = chains_needed_per_io;
- /* reply free queue sizing - taking into account for events */
- num_of_reply_frames = ioc->hba_queue_depth + 32;
-
- /* number of replies frames can't be a multiple of 16 */
- /* decrease number of reply frames by 1 */
- if (!(num_of_reply_frames % 16))
- num_of_reply_frames--;
-
- /* calculate number of reply free queue entries
- * (must be multiple of 16)
- */
-
- /* (we know reply_free_queue_depth is not a multiple of 16) */
- queue_size = num_of_reply_frames;
- queue_size += 16 - (queue_size % 16);
- ioc->reply_free_queue_depth = queue_size;
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
- /* reply descriptor post queue sizing */
- /* this size should be the number of request frames + number of reply
- * frames
- */
-
- queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
- /* round up to 16 byte boundary */
- if (queue_size % 16)
- queue_size += 16 - (queue_size % 16);
-
- /* check against IOC maximum reply post queue depth */
- if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
- queue_diff = queue_size -
- facts->MaxReplyDescriptorPostQueueDepth;
-
- /* round queue_diff up to multiple of 16 */
- if (queue_diff % 16)
- queue_diff += 16 - (queue_diff % 16);
-
- /* adjust hba_queue_depth, reply_free_queue_depth,
- * and queue_size
- */
- ioc->hba_queue_depth -= (queue_diff / 2);
- ioc->reply_free_queue_depth -= (queue_diff / 2);
- queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+ /* align the reply post queue on the next 16 count boundary */
+ if (!ioc->reply_free_queue_depth % 16)
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
+ else
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
+ 32 - (ioc->reply_free_queue_depth % 16);
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth = min_t(u16,
+ (facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16)),
+ (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
+ ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
+ ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
}
- ioc->reply_post_queue_depth = queue_size;
+
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
"sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
/* set the scsi host can_queue depth
* with some internal commands that could be outstanding
*/
- ioc->shost->can_queue = ioc->scsiio_depth - (2);
+ ioc->shost->can_queue = ioc->scsiio_depth;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
"can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
"depth(%d)\n", ioc->name, ioc->request,
ioc->scsiio_depth));
- /* loop till the allocation succeeds */
- do {
- sz = ioc->chain_depth * sizeof(struct chain_tracker);
- ioc->chain_pages = get_order(sz);
- ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->chain_pages);
- if (ioc->chain_lookup == NULL)
- ioc->chain_depth -= 100;
- } while (ioc->chain_lookup == NULL);
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
ioc->request_sz, 16, 0);
if (!ioc->chain_dma_pool) {
}
pfacts = &ioc->pfacts[port];
- memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
+ memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
pfacts->PortNumber = mpi_reply.PortNumber;
pfacts->VP_ID = mpi_reply.VP_ID;
pfacts->VF_ID = mpi_reply.VF_ID;
}
facts = &ioc->facts;
- memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
+ memset(facts, 0, sizeof(struct mpt2sas_facts));
facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
facts->VP_ID = mpi_reply.VP_ID;
ioc->reply_free[i] = cpu_to_le32(reply_address);
/* initialize reply queues */
- _base_assign_reply_queues(ioc);
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
reply_post_free = (long)ioc->reply_post_free;
if (ioc->is_driver_loading) {
-
-
-
- ioc->wait_for_discovery_to_complete =
- _base_determine_wait_on_discovery(ioc);
- return r; /* scan_start and scan_finished support */
- }
-
-
- if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
- if (ioc->manu_pg10.OEMIdentifier == 0x80) {
+ if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
+ == 0x80) {
hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
MFG_PAGE10_HIDE_SSDS_MASK);
if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
ioc->mfg_pg10_hide_flag = hide_flag;
}
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+ return r; /* scan_start and scan_finished support */
}
-
r = _base_send_port_enable(ioc, sleep_flag);
if (r)
return r;
goto out_free_resources;
ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
- sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
+ sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
if (!ioc->pfacts) {
r = -ENOMEM;
goto out_free_resources;