isci: refactor initialization for S3/S4

Based on an original implementation by Ed Nadolski and Artur Wojcik

In preparation for S3/S4 support refactor initialization so that
driver-load and resume-from-suspend can share the common init path of
isci_host_init().  Organize the initialization into objects that are
self-contained to the driver (initialized by isci_host_init) versus
those that have some upward registration (initialized at allocation time
asd_sas_phy, asd_sas_port, dma allocations).  The largest change is
moving the the validation of the oem and module parameters from
isci_host_init() to isci_host_alloc().

The S3/S4 approach being taken is that libsas will be tasked with
remembering the state of the domain and the lldd is free to be
forgetful.  In the case of isci we'll just re-init using a subset of the
normal driver load path.

[clean up some unused / mis-indented function definitions in host.h]

Signed-off-by: Ed Nadolski <edmund.nadolski@intel.com>
Signed-off-by: Artur Wojcik <artur.wojcik@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2012-02-15 13:58:42 -08:00
parent ae904d15cf
commit abec912d71
8 changed files with 278 additions and 361 deletions

View File

@ -1074,7 +1074,7 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
* @data: This parameter specifies the ISCI host object
*
*/
static void isci_host_completion_routine(unsigned long data)
void isci_host_completion_routine(unsigned long data)
{
struct isci_host *ihost = (struct isci_host *)data;
struct list_head completed_request_list;
@ -1317,29 +1317,6 @@ static void __iomem *smu_base(struct isci_host *isci_host)
return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
}
static void isci_user_parameters_get(struct sci_user_parameters *u)
{
int i;
for (i = 0; i < SCI_MAX_PHYS; i++) {
struct sci_phy_user_params *u_phy = &u->phys[i];
u_phy->max_speed_generation = phy_gen;
/* we are not exporting these for now */
u_phy->align_insertion_frequency = 0x7f;
u_phy->in_connection_align_insertion_frequency = 0xff;
u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
}
u->stp_inactivity_timeout = stp_inactive_to;
u->ssp_inactivity_timeout = ssp_inactive_to;
u->stp_max_occupancy_timeout = stp_max_occ_to;
u->ssp_max_occupancy_timeout = ssp_max_occ_to;
u->no_outbound_task_timeout = no_outbound_task_to;
u->max_concurr_spinup = max_concurr_spinup;
}
static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
@ -1648,55 +1625,6 @@ static const struct sci_base_state sci_controller_state_table[] = {
[SCIC_FAILED] = {}
};
static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
{
/* these defaults are overridden by the platform / firmware */
u16 index;
/* Default to APC mode. */
ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
/* Default to APC mode. */
ihost->oem_parameters.controller.max_concurr_spin_up = 1;
/* Default to no SSC operation. */
ihost->oem_parameters.controller.do_enable_ssc = false;
/* Default to short cables on all phys. */
ihost->oem_parameters.controller.cable_selection_mask = 0;
/* Initialize all of the port parameter information to narrow ports. */
for (index = 0; index < SCI_MAX_PORTS; index++) {
ihost->oem_parameters.ports[index].phy_mask = 0;
}
/* Initialize all of the phy parameter information. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
/* Default to 3G (i.e. Gen 2). */
ihost->user_parameters.phys[index].max_speed_generation =
SCIC_SDS_PARM_GEN2_SPEED;
/* the frequencies cannot be 0 */
ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
/*
* Previous Vitesse based expanders had a arbitration issue that
* is worked around by having the upper 32-bits of SAS address
* with a value greater then the Vitesse company identifier.
* Hence, usage of 0x5FCFFFFF. */
ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
}
ihost->user_parameters.stp_inactivity_timeout = 5;
ihost->user_parameters.ssp_inactivity_timeout = 5;
ihost->user_parameters.stp_max_occupancy_timeout = 5;
ihost->user_parameters.ssp_max_occupancy_timeout = 20;
ihost->user_parameters.no_outbound_task_timeout = 2;
}
static void controller_timeout(unsigned long data)
{
struct sci_timer *tmr = (struct sci_timer *)data;
@ -1753,9 +1681,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
sci_init_timer(&ihost->timer, controller_timeout);
/* Initialize the User and OEM parameters to default values. */
sci_controller_set_default_config_parameters(ihost);
return sci_controller_reset(ihost);
}
@ -1835,27 +1760,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
return 0;
}
static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
{
u32 state = ihost->sm.current_state_id;
struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
if (state == SCIC_RESET ||
state == SCIC_INITIALIZING ||
state == SCIC_INITIALIZED) {
u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
ISCI_ROM_VER_1_0;
if (sci_oem_parameters_validate(&ihost->oem_parameters,
oem_version))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
return SCI_SUCCESS;
}
return SCI_FAILURE_INVALID_STATE;
}
static u8 max_spin_up(struct isci_host *ihost)
{
if (ihost->user_parameters.max_concurr_spinup)
@ -2372,96 +2276,77 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
return result;
}
static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
struct sci_user_parameters *sci_parms)
static int sci_controller_dma_alloc(struct isci_host *ihost)
{
u32 state = ihost->sm.current_state_id;
struct device *dev = &ihost->pdev->dev;
size_t size;
int i;
if (state == SCIC_RESET ||
state == SCIC_INITIALIZING ||
state == SCIC_INITIALIZED) {
u16 index;
/* detect re-initialization */
if (ihost->completion_queue)
return 0;
/*
* Validate the user parameters. If they are not legal, then
* return a failure.
*/
for (index = 0; index < SCI_MAX_PHYS; index++) {
struct sci_phy_user_params *user_phy;
size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
GFP_KERNEL);
if (!ihost->completion_queue)
return -ENOMEM;
user_phy = &sci_parms->phys[index];
size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
GFP_KERNEL);
if (!((user_phy->max_speed_generation <=
SCIC_SDS_PARM_MAX_SPEED) &&
(user_phy->max_speed_generation >
SCIC_SDS_PARM_NO_SPEED)))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
if (!ihost->remote_node_context_table)
return -ENOMEM;
if (user_phy->in_connection_align_insertion_frequency <
3)
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
size = ihost->task_context_entries * sizeof(struct scu_task_context),
ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
GFP_KERNEL);
if (!ihost->task_context_table)
return -ENOMEM;
if ((user_phy->in_connection_align_insertion_frequency <
3) ||
(user_phy->align_insertion_frequency == 0) ||
(user_phy->
notify_enable_spin_up_insertion_frequency ==
0))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
size = SCI_UFI_TOTAL_SIZE;
ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
if (!ihost->ufi_buf)
return -ENOMEM;
for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
struct isci_request *ireq;
dma_addr_t dma;
ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
if (!ireq)
return -ENOMEM;
ireq->tc = &ihost->task_context_table[i];
ireq->owning_controller = ihost;
spin_lock_init(&ireq->state_lock);
ireq->request_daddr = dma;
ireq->isci_host = ihost;
ihost->reqs[i] = ireq;
}
if ((sci_parms->stp_inactivity_timeout == 0) ||
(sci_parms->ssp_inactivity_timeout == 0) ||
(sci_parms->stp_max_occupancy_timeout == 0) ||
(sci_parms->ssp_max_occupancy_timeout == 0) ||
(sci_parms->no_outbound_task_timeout == 0))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
return SCI_SUCCESS;
}
return SCI_FAILURE_INVALID_STATE;
return 0;
}
static int sci_controller_mem_init(struct isci_host *ihost)
{
struct device *dev = &ihost->pdev->dev;
dma_addr_t dma;
size_t size;
int err;
int err = sci_controller_dma_alloc(ihost);
size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
if (!ihost->completion_queue)
return -ENOMEM;
writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
GFP_KERNEL);
if (!ihost->remote_node_context_table)
return -ENOMEM;
writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
size = ihost->task_context_entries * sizeof(struct scu_task_context),
ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
if (!ihost->task_context_table)
return -ENOMEM;
ihost->task_context_dma = dma;
writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
err = sci_unsolicited_frame_control_construct(ihost);
if (err)
return err;
writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
sci_unsolicited_frame_control_construct(ihost);
/*
* Inform the silicon as to the location of the UF headers and
* address table.
@ -2479,19 +2364,20 @@ static int sci_controller_mem_init(struct isci_host *ihost)
return 0;
}
/**
* isci_host_init - (re-)initialize hardware and internal (private) state
* @ihost: host to init
*
* Any public facing objects (like asd_sas_port, and asd_sas_phys), or
* one-time initialization objects like locks and waitqueues, are
* not touched (they are initialized in isci_host_alloc)
*/
int isci_host_init(struct isci_host *ihost)
{
int err = 0, i;
int i, err;
enum sci_status status;
struct sci_user_parameters sci_user_params;
struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
spin_lock_init(&ihost->scic_lock);
init_waitqueue_head(&ihost->eventq);
status = sci_controller_construct(ihost, scu_base(ihost),
smu_base(ihost));
status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: sci_controller_construct failed - status = %x\n",
@ -2500,48 +2386,6 @@ int isci_host_init(struct isci_host *ihost)
return -ENODEV;
}
ihost->sas_ha.dev = &ihost->pdev->dev;
ihost->sas_ha.lldd_ha = ihost;
/*
* grab initial values stored in the controller object for OEM and USER
* parameters
*/
isci_user_parameters_get(&sci_user_params);
status = sci_user_parameters_set(ihost, &sci_user_params);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
"%s: sci_user_parameters_set failed\n",
__func__);
return -ENODEV;
}
/* grab any OEM parameters specified in orom */
if (pci_info->orom) {
status = isci_parse_oem_parameters(&ihost->oem_parameters,
pci_info->orom,
ihost->id);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
"parsing firmware oem parameters failed\n");
return -EINVAL;
}
}
status = sci_oem_parameters_set(ihost);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
"%s: sci_oem_parameters_set failed\n",
__func__);
return -ENODEV;
}
tasklet_init(&ihost->completion_tasklet,
isci_host_completion_routine, (unsigned long)ihost);
INIT_LIST_HEAD(&ihost->requests_to_complete);
INIT_LIST_HEAD(&ihost->requests_to_errorback);
spin_lock_irq(&ihost->scic_lock);
status = sci_controller_initialize(ihost);
spin_unlock_irq(&ihost->scic_lock);
@ -2557,47 +2401,12 @@ int isci_host_init(struct isci_host *ihost)
if (err)
return err;
for (i = 0; i < SCI_MAX_PORTS; i++) {
struct isci_port *iport = &ihost->ports[i];
INIT_LIST_HEAD(&iport->remote_dev_list);
iport->isci_host = ihost;
}
for (i = 0; i < SCI_MAX_PHYS; i++)
isci_phy_init(&ihost->phys[i], ihost, i);
/* enable sgpio */
writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
for (i = 0; i < isci_gpio_count(ihost); i++)
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
struct isci_remote_device *idev = &ihost->devices[i];
INIT_LIST_HEAD(&idev->reqs_in_process);
INIT_LIST_HEAD(&idev->node);
}
for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
struct isci_request *ireq;
dma_addr_t dma;
ireq = dmam_alloc_coherent(&ihost->pdev->dev,
sizeof(struct isci_request), &dma,
GFP_KERNEL);
if (!ireq)
return -ENOMEM;
ireq->tc = &ihost->task_context_table[i];
ireq->owning_controller = ihost;
spin_lock_init(&ireq->state_lock);
ireq->request_daddr = dma;
ireq->isci_host = ihost;
ihost->reqs[i] = ireq;
}
return 0;
}

View File

@ -158,13 +158,17 @@ struct isci_host {
struct sci_power_control power_control;
u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
struct scu_task_context *task_context_table;
dma_addr_t task_context_dma;
dma_addr_t tc_dma;
union scu_remote_node_context *remote_node_context_table;
dma_addr_t rnc_dma;
u32 *completion_queue;
dma_addr_t cq_dma;
u32 completion_queue_get;
u32 logical_port_entries;
u32 remote_node_entries;
u32 task_context_entries;
void *ufi_buf;
dma_addr_t ufi_dma;
struct sci_unsolicited_frame_control uf_control;
/* phy startup */
@ -452,37 +456,18 @@ void sci_controller_free_remote_node_context(
struct isci_remote_device *idev,
u16 node_id);
struct isci_request *sci_request_by_tag(struct isci_host *ihost,
u16 io_tag);
void sci_controller_power_control_queue_insert(
struct isci_host *ihost,
struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
void sci_controller_power_control_queue_insert(struct isci_host *ihost,
struct isci_phy *iphy);
void sci_controller_power_control_queue_remove(
struct isci_host *ihost,
void sci_controller_power_control_queue_remove(struct isci_host *ihost,
struct isci_phy *iphy);
void sci_controller_link_up(
struct isci_host *ihost,
struct isci_port *iport,
void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy);
void sci_controller_link_down(
struct isci_host *ihost,
struct isci_port *iport,
void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy);
void sci_controller_remote_device_stopped(
struct isci_host *ihost,
void sci_controller_remote_device_stopped(struct isci_host *ihost,
struct isci_remote_device *idev);
void sci_controller_copy_task_context(
struct isci_host *ihost,
struct isci_request *ireq);
void sci_controller_register_setup(struct isci_host *ihost);
enum sci_status sci_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
void isci_host_scan_start(struct Scsi_Host *);
@ -491,27 +476,9 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
void isci_tci_free(struct isci_host *ihost, u16 tci);
int isci_host_init(struct isci_host *);
void isci_host_init_controller_names(
struct isci_host *isci_host,
unsigned int controller_idx);
void isci_host_deinit(
struct isci_host *);
void isci_host_port_link_up(
struct isci_host *,
struct isci_port *,
struct isci_phy *);
int isci_host_dev_found(struct domain_device *);
void isci_host_remote_device_start_complete(
struct isci_host *,
struct isci_remote_device *,
enum sci_status);
void sci_controller_disable_interrupts(
struct isci_host *ihost);
void isci_host_completion_routine(unsigned long data);
void isci_host_deinit(struct isci_host *);
void sci_controller_disable_interrupts(struct isci_host *ihost);
enum sci_status sci_controller_start_io(
struct isci_host *ihost,

View File

@ -397,38 +397,203 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
return err;
}
static void isci_user_parameters_get(struct sci_user_parameters *u)
{
int i;
for (i = 0; i < SCI_MAX_PHYS; i++) {
struct sci_phy_user_params *u_phy = &u->phys[i];
u_phy->max_speed_generation = phy_gen;
/* we are not exporting these for now */
u_phy->align_insertion_frequency = 0x7f;
u_phy->in_connection_align_insertion_frequency = 0xff;
u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
}
u->stp_inactivity_timeout = stp_inactive_to;
u->ssp_inactivity_timeout = ssp_inactive_to;
u->stp_max_occupancy_timeout = stp_max_occ_to;
u->ssp_max_occupancy_timeout = ssp_max_occ_to;
u->no_outbound_task_timeout = no_outbound_task_to;
u->max_concurr_spinup = max_concurr_spinup;
}
static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
struct sci_user_parameters *sci_parms)
{
u16 index;
/*
* Validate the user parameters. If they are not legal, then
* return a failure.
*/
for (index = 0; index < SCI_MAX_PHYS; index++) {
struct sci_phy_user_params *u;
u = &sci_parms->phys[index];
if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
(u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
if (u->in_connection_align_insertion_frequency < 3)
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
if ((u->in_connection_align_insertion_frequency < 3) ||
(u->align_insertion_frequency == 0) ||
(u->notify_enable_spin_up_insertion_frequency == 0))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
if ((sci_parms->stp_inactivity_timeout == 0) ||
(sci_parms->ssp_inactivity_timeout == 0) ||
(sci_parms->stp_max_occupancy_timeout == 0) ||
(sci_parms->ssp_max_occupancy_timeout == 0) ||
(sci_parms->no_outbound_task_timeout == 0))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
return SCI_SUCCESS;
}
static void sci_oem_defaults(struct isci_host *ihost)
{
/* these defaults are overridden by the platform / firmware */
struct sci_user_parameters *user = &ihost->user_parameters;
struct sci_oem_params *oem = &ihost->oem_parameters;
int i;
/* Default to APC mode. */
oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
/* Default to APC mode. */
oem->controller.max_concurr_spin_up = 1;
/* Default to no SSC operation. */
oem->controller.do_enable_ssc = false;
/* Default to short cables on all phys. */
oem->controller.cable_selection_mask = 0;
/* Initialize all of the port parameter information to narrow ports. */
for (i = 0; i < SCI_MAX_PORTS; i++)
oem->ports[i].phy_mask = 0;
/* Initialize all of the phy parameter information. */
for (i = 0; i < SCI_MAX_PHYS; i++) {
/* Default to 3G (i.e. Gen 2). */
user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
/* the frequencies cannot be 0 */
user->phys[i].align_insertion_frequency = 0x7f;
user->phys[i].in_connection_align_insertion_frequency = 0xff;
user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
/* Previous Vitesse based expanders had a arbitration issue that
* is worked around by having the upper 32-bits of SAS address
* with a value greater then the Vitesse company identifier.
* Hence, usage of 0x5FCFFFFF.
*/
oem->phys[i].sas_address.low = 0x1 + ihost->id;
oem->phys[i].sas_address.high = 0x5FCFFFFF;
}
user->stp_inactivity_timeout = 5;
user->ssp_inactivity_timeout = 5;
user->stp_max_occupancy_timeout = 5;
user->ssp_max_occupancy_timeout = 20;
user->no_outbound_task_timeout = 2;
}
static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
{
struct isci_host *isci_host;
struct isci_orom *orom = to_pci_info(pdev)->orom;
struct sci_user_parameters sci_user_params;
u8 oem_version = ISCI_ROM_VER_1_0;
struct isci_host *ihost;
struct Scsi_Host *shost;
int err;
int err, i;
isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
if (!isci_host)
ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
if (!ihost)
return NULL;
isci_host->pdev = pdev;
isci_host->id = id;
ihost->pdev = pdev;
ihost->id = id;
spin_lock_init(&ihost->scic_lock);
init_waitqueue_head(&ihost->eventq);
ihost->sas_ha.dev = &ihost->pdev->dev;
ihost->sas_ha.lldd_ha = ihost;
tasklet_init(&ihost->completion_tasklet,
isci_host_completion_routine, (unsigned long)ihost);
/* validate module parameters */
/* TODO: kill struct sci_user_parameters and reference directly */
sci_oem_defaults(ihost);
isci_user_parameters_get(&sci_user_params);
if (sci_user_parameters_set(ihost, &sci_user_params)) {
dev_warn(&pdev->dev,
"%s: sci_user_parameters_set failed\n", __func__);
return NULL;
}
/* sanity check platform (or 'firmware') oem parameters */
if (orom) {
if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
return NULL;
}
ihost->oem_parameters = orom->ctrl[id];
oem_version = orom->hdr.version;
}
/* validate oem parameters (platform, firmware, or built-in defaults) */
if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
dev_warn(&pdev->dev, "oem parameter validation failed\n");
return NULL;
}
INIT_LIST_HEAD(&ihost->requests_to_complete);
INIT_LIST_HEAD(&ihost->requests_to_errorback);
for (i = 0; i < SCI_MAX_PORTS; i++) {
struct isci_port *iport = &ihost->ports[i];
INIT_LIST_HEAD(&iport->remote_dev_list);
iport->isci_host = ihost;
}
for (i = 0; i < SCI_MAX_PHYS; i++)
isci_phy_init(&ihost->phys[i], ihost, i);
for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
struct isci_remote_device *idev = &ihost->devices[i];
INIT_LIST_HEAD(&idev->reqs_in_process);
INIT_LIST_HEAD(&idev->node);
}
shost = scsi_host_alloc(&isci_sht, sizeof(void *));
if (!shost)
return NULL;
isci_host->shost = shost;
ihost->shost = shost;
dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
"{%s, %s, %s, %s}\n",
(is_cable_select_overridden() ? "* " : ""), isci_host->id,
lookup_cable_names(decode_cable_selection(isci_host, 3)),
lookup_cable_names(decode_cable_selection(isci_host, 2)),
lookup_cable_names(decode_cable_selection(isci_host, 1)),
lookup_cable_names(decode_cable_selection(isci_host, 0)));
(is_cable_select_overridden() ? "* " : ""), ihost->id,
lookup_cable_names(decode_cable_selection(ihost, 3)),
lookup_cable_names(decode_cable_selection(ihost, 2)),
lookup_cable_names(decode_cable_selection(ihost, 1)),
lookup_cable_names(decode_cable_selection(ihost, 0)));
err = isci_host_init(isci_host);
err = isci_host_init(ihost);
if (err)
goto err_shost;
SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
isci_host->sas_ha.core.shost = shost;
SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
ihost->sas_ha.core.shost = shost;
shost->transportt = isci_transport_template;
shost->max_id = ~0;
@ -439,11 +604,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
if (err)
goto err_shost;
err = isci_register_sas_ha(isci_host);
err = isci_register_sas_ha(ihost);
if (err)
goto err_shost_remove;
return isci_host;
return ihost;
err_shost_remove:
scsi_remove_host(shost);

View File

@ -112,18 +112,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
return rom;
}
enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
struct isci_orom *orom, int scu_index)
{
/* check for valid inputs */
if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
scu_index > orom->hdr.num_elements || !oem)
return -EINVAL;
*oem = orom->ctrl[scu_index];
return 0;
}
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
{
struct isci_orom *orom = NULL, *data;

View File

@ -156,8 +156,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
struct isci_orom *orom, int scu_index);
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);

View File

@ -92,11 +92,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
if (idx == 0) {
offset = (void *) &ireq->tc->sgl_pair_ab -
(void *) &ihost->task_context_table[0];
return ihost->task_context_dma + offset;
return ihost->tc_dma + offset;
} else if (idx == 1) {
offset = (void *) &ireq->tc->sgl_pair_cd -
(void *) &ihost->task_context_table[0];
return ihost->task_context_dma + offset;
return ihost->tc_dma + offset;
}
return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);

View File

@ -57,31 +57,19 @@
#include "unsolicited_frame_control.h"
#include "registers.h"
int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
{
struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
struct sci_unsolicited_frame *uf;
u32 buf_len, header_len, i;
dma_addr_t dma;
size_t size;
void *virt;
/*
* Prepare all of the memory sizes for the UF headers, UF address
* table, and UF buffers themselves.
*/
buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
dma_addr_t dma = ihost->ufi_dma;
void *virt = ihost->ufi_buf;
int i;
/*
* The Unsolicited Frame buffers are set at the start of the UF
* memory descriptor entry. The headers and address table will be
* placed after the buffers.
*/
virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
if (!virt)
return -ENOMEM;
/*
* Program the location of the UF header table into the SCU.
@ -93,8 +81,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
* headers, since we program the UF address table pointers to
* NULL.
*/
uf_control->headers.physical_address = dma + buf_len;
uf_control->headers.array = virt + buf_len;
uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
/*
* Program the location of the UF address table into the SCU.
@ -103,8 +91,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
* byte boundary already due to above programming headers being on a
* 64-bit boundary and headers are on a 64-bytes in size.
*/
uf_control->address_table.physical_address = dma + buf_len + header_len;
uf_control->address_table.array = virt + buf_len + header_len;
uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
uf_control->get = 0;
/*
@ -135,8 +123,6 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
}
return 0;
}
enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,

View File

@ -257,9 +257,13 @@ struct sci_unsolicited_frame_control {
};
#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
struct isci_host;
int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
enum sci_status sci_unsolicited_frame_control_get_header(
struct sci_unsolicited_frame_control *uf_control,