[GITLAB] - A technical upgrade is planned today at 6PM on our GITLAB server.

lpfc_hbadisc.c 183 KB
Newer Older
1 2
/*******************************************************************
 * This file is part of the Emulex Linux Device Driver for         *
3
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5
 * EMULEX and SLI are trademarks of Emulex.                        *
6
 * www.emulex.com                                                  *
7
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 9
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
10 11 12 13 14 15 16 17 18 19
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
20 21 22
 *******************************************************************/

#include <linux/blkdev.h>
23
#include <linux/delay.h>
24
#include <linux/slab.h>
25 26 27 28
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>

29
#include <scsi/scsi.h>
30 31 32 33
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>

34
#include "lpfc_hw4.h"
35
#include "lpfc_hw.h"
36
#include "lpfc_nl.h"
37 38
#include "lpfc_disc.h"
#include "lpfc_sli.h"
39
#include "lpfc_sli4.h"
40 41 42 43
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
44
#include "lpfc_vport.h"
James Smart's avatar
James Smart committed
45
#include "lpfc_debugfs.h"
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

/* AlpaArray for assignment of scsid for scan-down and bind_method */
static uint8_t lpfcAlpaArray[] = {
	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
};

64
static void lpfc_disc_timeout_handler(struct lpfc_vport *);
65
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
66
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
67
static int lpfc_fcf_inuse(struct lpfc_hba *);
68

69 70
void
lpfc_terminate_rport_io(struct fc_rport *rport)
71
{
72 73 74
	struct lpfc_rport_data *rdata;
	struct lpfc_nodelist * ndlp;
	struct lpfc_hba *phba;
75

76 77 78
	rdata = rport->dd_data;
	ndlp = rdata->pnode;

79
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
80 81 82 83
		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
			printk(KERN_ERR "Cannot find remote node"
			" to terminate I/O Data x%x\n",
			rport->port_id);
84 85 86
		return;
	}

87
	phba  = ndlp->phba;
88

James Smart's avatar
James Smart committed
89 90 91 92
	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
		"rport terminate: sid:x%x did:x%x flg:x%x",
		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);

93
	if (ndlp->nlp_sid != NLP_NO_SID) {
94 95 96
		lpfc_sli_abort_iocb(ndlp->vport,
			&phba->sli.ring[phba->sli.fcp_ring],
			ndlp->nlp_sid, 0, LPFC_CTX_TGT);
97 98 99 100 101 102 103 104 105 106 107
	}
}

/*
 * This function will be called when dev_loss_tmo fire.
 */
void
lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
	struct lpfc_rport_data *rdata;
	struct lpfc_nodelist * ndlp;
108
	struct lpfc_vport *vport;
James Smart's avatar
James Smart committed
109 110
	struct lpfc_hba   *phba;
	struct lpfc_work_evt *evtp;
111 112
	int  put_node;
	int  put_rport;
113 114 115

	rdata = rport->dd_data;
	ndlp = rdata->pnode;
116
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
117 118
		return;

James Smart's avatar
James Smart committed
119 120 121 122 123 124 125
	vport = ndlp->vport;
	phba  = vport->phba;

	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
		"rport devlosscb: sid:x%x did:x%x flg:x%x",
		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);

126 127 128 129
	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
			 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	/* Don't defer this if we are in the process of deleting the vport
	 * or unloading the driver. The unload will cleanup the node
	 * appropriately we just need to cleanup the ndlp rport info here.
	 */
	if (vport->load_flag & FC_UNLOADING) {
		put_node = rdata->pnode != NULL;
		put_rport = ndlp->rport != NULL;
		rdata->pnode = NULL;
		ndlp->rport = NULL;
		if (put_node)
			lpfc_nlp_put(ndlp);
		if (put_rport)
			put_device(&rport->dev);
		return;
	}

	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
		return;

149 150 151 152 153 154 155 156 157
	if (ndlp->nlp_type & NLP_FABRIC) {

		/* If the WWPN of the rport and ndlp don't match, ignore it */
		if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
			put_device(&rport->dev);
			return;
		}
	}

James Smart's avatar
James Smart committed
158 159 160 161 162
	evtp = &ndlp->dev_loss_evt;

	if (!list_empty(&evtp->evt_listp))
		return;

163 164
	evtp->evt_arg1  = lpfc_nlp_get(ndlp);

James Smart's avatar
James Smart committed
165
	spin_lock_irq(&phba->hbalock);
166 167 168
	/* We need to hold the node by incrementing the reference
	 * count until this queued work is done
	 */
169 170 171 172 173
	if (evtp->evt_arg1) {
		evtp->evt = LPFC_EVT_DEV_LOSS;
		list_add_tail(&evtp->evt_listp, &phba->work_list);
		lpfc_worker_wake_up(phba);
	}
James Smart's avatar
James Smart committed
174 175 176 177 178
	spin_unlock_irq(&phba->hbalock);

	return;
}

179 180 181 182 183 184 185 186 187 188 189
/**
 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
 * @ndlp: Pointer to remote node object.
 *
 * This function is called from the worker thread when devloss timeout timer
 * expires. For SLI4 host, this routine shall return 1 when at lease one
 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
 * routine shall return 0 when there is no remote node is still in use of FCF
 * when devloss timeout happened to this @ndlp.
 **/
static int
James Smart's avatar
James Smart committed
190 191 192 193 194 195 196
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
	struct lpfc_rport_data *rdata;
	struct fc_rport   *rport;
	struct lpfc_vport *vport;
	struct lpfc_hba   *phba;
	uint8_t *name;
197 198
	int  put_node;
	int  put_rport;
James Smart's avatar
James Smart committed
199
	int warn_on = 0;
200
	int fcf_inuse = 0;
James Smart's avatar
James Smart committed
201 202 203 204

	rport = ndlp->rport;

	if (!rport)
205
		return fcf_inuse;
James Smart's avatar
James Smart committed
206 207 208 209 210 211

	rdata = rport->dd_data;
	name = (uint8_t *) &ndlp->nlp_portname;
	vport = ndlp->vport;
	phba  = vport->phba;

212 213 214
	if (phba->sli_rev == LPFC_SLI_REV4)
		fcf_inuse = lpfc_fcf_inuse(phba);

James Smart's avatar
James Smart committed
215 216 217 218
	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
		"rport devlosstmo:did:x%x type:x%x id:x%x",
		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);

219 220 221 222
	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
			 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);

223 224 225 226 227
	/* Don't defer this if we are in the process of deleting the vport
	 * or unloading the driver. The unload will cleanup the node
	 * appropriately we just need to cleanup the ndlp rport info here.
	 */
	if (vport->load_flag & FC_UNLOADING) {
228 229 230 231 232 233
		if (ndlp->nlp_sid != NLP_NO_SID) {
			/* flush the target */
			lpfc_sli_abort_iocb(vport,
					&phba->sli.ring[phba->sli.fcp_ring],
					ndlp->nlp_sid, 0, LPFC_CTX_TGT);
		}
234 235 236 237 238 239 240 241
		put_node = rdata->pnode != NULL;
		put_rport = ndlp->rport != NULL;
		rdata->pnode = NULL;
		ndlp->rport = NULL;
		if (put_node)
			lpfc_nlp_put(ndlp);
		if (put_rport)
			put_device(&rport->dev);
242
		return fcf_inuse;
243 244
	}

245 246 247 248 249 250 251 252
	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
				 "0284 Devloss timeout Ignored on "
				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
				 "NPort x%x\n",
				 *name, *(name+1), *(name+2), *(name+3),
				 *(name+4), *(name+5), *(name+6), *(name+7),
				 ndlp->nlp_DID);
253
		return fcf_inuse;
254
	}
James Smart's avatar
James Smart committed
255

256 257 258 259 260 261 262 263 264 265
	if (ndlp->nlp_type & NLP_FABRIC) {
		/* We will clean up these Nodes in linkup */
		put_node = rdata->pnode != NULL;
		put_rport = ndlp->rport != NULL;
		rdata->pnode = NULL;
		ndlp->rport = NULL;
		if (put_node)
			lpfc_nlp_put(ndlp);
		if (put_rport)
			put_device(&rport->dev);
266
		return fcf_inuse;
267
	}
268

269
	if (ndlp->nlp_sid != NLP_NO_SID) {
270
		warn_on = 1;
271
		/* flush the target */
272 273
		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
274
	}
275

276
	if (warn_on) {
277 278
		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
				 "0203 Devloss timeout on "
279 280
				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
				 "NPort x%06x Data: x%x x%x x%x\n",
281 282 283 284
				 *name, *(name+1), *(name+2), *(name+3),
				 *(name+4), *(name+5), *(name+6), *(name+7),
				 ndlp->nlp_DID, ndlp->nlp_flag,
				 ndlp->nlp_state, ndlp->nlp_rpi);
285
	} else {
286 287
		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
				 "0204 Devloss timeout on "
288 289
				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
				 "NPort x%06x Data: x%x x%x x%x\n",
290 291 292 293
				 *name, *(name+1), *(name+2), *(name+3),
				 *(name+4), *(name+5), *(name+6), *(name+7),
				 ndlp->nlp_DID, ndlp->nlp_flag,
				 ndlp->nlp_state, ndlp->nlp_rpi);
294 295
	}

296 297 298 299 300 301 302 303 304
	put_node = rdata->pnode != NULL;
	put_rport = ndlp->rport != NULL;
	rdata->pnode = NULL;
	ndlp->rport = NULL;
	if (put_node)
		lpfc_nlp_put(ndlp);
	if (put_rport)
		put_device(&rport->dev);

305
	if (!(vport->load_flag & FC_UNLOADING) &&
306
	    !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
307
	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
308
	    (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
309 310
	    (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
	    (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
311
		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
312

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	return fcf_inuse;
}

/**
 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
 * @phba: Pointer to hba context object.
 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
 * @nlp_did: remote node identifer with devloss timeout.
 *
 * This function is called from the worker thread after invoking devloss
 * timeout handler and releasing the reference count for the ndlp with
 * which the devloss timeout was handled for SLI4 host. For the devloss
 * timeout of the last remote node which had been in use of FCF, when this
 * routine is invoked, it shall be guaranteed that none of the remote are
 * in-use of FCF. When devloss timeout to the last remote using the FCF,
 * if the FIP engine is neither in FCF table scan process nor roundrobin
 * failover process, the in-use FCF shall be unregistered. If the FIP
 * engine is in FCF discovery process, the devloss timeout state shall
 * be set for either the FCF table scan process or roundrobin failover
 * process to unregister the in-use FCF.
 **/
static void
lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
				    uint32_t nlp_did)
{
	/* If devloss timeout happened to a remote node when FCF had no
	 * longer been in-use, do nothing.
	 */
	if (!fcf_inuse)
		return;

	if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
		spin_lock_irq(&phba->hbalock);
		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
			if (phba->hba_flag & HBA_DEVLOSS_TMO) {
				spin_unlock_irq(&phba->hbalock);
				return;
			}
			phba->hba_flag |= HBA_DEVLOSS_TMO;
			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
					"2847 Last remote node (x%x) using "
					"FCF devloss tmo\n", nlp_did);
		}
		if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
			spin_unlock_irq(&phba->hbalock);
			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
					"2868 Devloss tmo to FCF rediscovery "
					"in progress\n");
			return;
		}
		if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
			spin_unlock_irq(&phba->hbalock);
			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
					"2869 Devloss tmo to idle FIP engine, "
					"unreg in-use FCF and rescan.\n");
			/* Unregister in-use FCF and rescan */
			lpfc_unregister_fcf_rescan(phba);
			return;
		}
		spin_unlock_irq(&phba->hbalock);
		if (phba->hba_flag & FCF_TS_INPROG)
			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
					"2870 FCF table scan in progress\n");
		if (phba->hba_flag & FCF_RR_INPROG)
			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
					"2871 FLOGI roundrobin FCF failover "
					"in progress\n");
	}
381
	lpfc_unregister_unused_fcf(phba);
382
}
383

384
/**
385
 * lpfc_alloc_fast_evt - Allocates data structure for posting event
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
 * @phba: Pointer to hba context object.
 *
 * This function is called from the functions which need to post
 * events from interrupt context. This function allocates data
 * structure required for posting event. It also keeps track of
 * number of events pending and prevent event storm when there are
 * too many events.
 **/
struct lpfc_fast_path_event *
lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
	struct lpfc_fast_path_event *ret;

	/* If there are lot of fast event do not exhaust memory due to this */
	if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
		return NULL;

	ret = kzalloc(sizeof(struct lpfc_fast_path_event),
			GFP_ATOMIC);
404
	if (ret) {
405
		atomic_inc(&phba->fast_event_count);
406 407 408
		INIT_LIST_HEAD(&ret->work_evt.evt_listp);
		ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
	}
409 410 411 412
	return ret;
}

/**
413
 * lpfc_free_fast_evt - Frees event data structure
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
 * @phba: Pointer to hba context object.
 * @evt:  Event object which need to be freed.
 *
 * This function frees the data structure required for posting
 * events.
 **/
void
lpfc_free_fast_evt(struct lpfc_hba *phba,
		struct lpfc_fast_path_event *evt) {

	atomic_dec(&phba->fast_event_count);
	kfree(evt);
}

/**
429
 * lpfc_send_fastpath_evt - Posts events generated from fast path
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
 * @phba: Pointer to hba context object.
 * @evtp: Event data structure.
 *
 * This function is called from worker thread, when the interrupt
 * context need to post an event. This function posts the event
 * to fc transport netlink interface.
 **/
static void
lpfc_send_fastpath_evt(struct lpfc_hba *phba,
		struct lpfc_work_evt *evtp)
{
	unsigned long evt_category, evt_sub_category;
	struct lpfc_fast_path_event *fast_evt_data;
	char *evt_data;
	uint32_t evt_data_size;
	struct Scsi_Host *shost;

	fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
		work_evt);

	evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
	evt_sub_category = (unsigned long) fast_evt_data->un.
			fabric_evt.subcategory;
	shost = lpfc_shost_from_vport(fast_evt_data->vport);
	if (evt_category == FC_REG_FABRIC_EVENT) {
		if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
			evt_data = (char *) &fast_evt_data->un.read_check_error;
			evt_data_size = sizeof(fast_evt_data->un.
				read_check_error);
		} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
460
			(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
			evt_data = (char *) &fast_evt_data->un.fabric_evt;
			evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
		} else {
			lpfc_free_fast_evt(phba, fast_evt_data);
			return;
		}
	} else if (evt_category == FC_REG_SCSI_EVENT) {
		switch (evt_sub_category) {
		case LPFC_EVENT_QFULL:
		case LPFC_EVENT_DEVBSY:
			evt_data = (char *) &fast_evt_data->un.scsi_evt;
			evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
			break;
		case LPFC_EVENT_CHECK_COND:
			evt_data = (char *) &fast_evt_data->un.check_cond_evt;
			evt_data_size =  sizeof(fast_evt_data->un.
				check_cond_evt);
			break;
		case LPFC_EVENT_VARQUEDEPTH:
			evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
			evt_data_size = sizeof(fast_evt_data->un.
				queue_depth_evt);
			break;
		default:
			lpfc_free_fast_evt(phba, fast_evt_data);
			return;
		}
	} else {
		lpfc_free_fast_evt(phba, fast_evt_data);
		return;
	}

	fc_host_post_vendor_event(shost,
		fc_get_event_number(),
		evt_data_size,
		evt_data,
497
		LPFC_NL_VENDOR_ID);
498 499 500 501 502

	lpfc_free_fast_evt(phba, fast_evt_data);
	return;
}

503
static void
504
lpfc_work_list_done(struct lpfc_hba *phba)
505 506 507 508
{
	struct lpfc_work_evt  *evtp = NULL;
	struct lpfc_nodelist  *ndlp;
	int free_evt;
509 510
	int fcf_inuse;
	uint32_t nlp_did;
511

512 513
	spin_lock_irq(&phba->hbalock);
	while (!list_empty(&phba->work_list)) {
514 515
		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
				 evt_listp);
516
		spin_unlock_irq(&phba->hbalock);
517
		free_evt = 1;
518
		switch (evtp->evt) {
519
		case LPFC_EVT_ELS_RETRY:
520
			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
521
			lpfc_els_retry_delay_handler(ndlp);
522
			free_evt = 0; /* evt is part of ndlp */
523 524 525 526
			/* decrement the node reference count held
			 * for this queued work
			 */
			lpfc_nlp_put(ndlp);
527
			break;
James Smart's avatar
James Smart committed
528 529
		case LPFC_EVT_DEV_LOSS:
			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
530
			fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
James Smart's avatar
James Smart committed
531
			free_evt = 0;
532 533 534
			/* decrement the node reference count held for
			 * this queued work
			 */
535
			nlp_did = ndlp->nlp_DID;
James Smart's avatar
James Smart committed
536
			lpfc_nlp_put(ndlp);
537 538 539 540
			if (phba->sli_rev == LPFC_SLI_REV4)
				lpfc_sli4_post_dev_loss_tmo_handler(phba,
								    fcf_inuse,
								    nlp_did);
James Smart's avatar
James Smart committed
541
			break;
542
		case LPFC_EVT_ONLINE:
543 544
			if (phba->link_state < LPFC_LINK_DOWN)
				*(int *) (evtp->evt_arg1) = lpfc_online(phba);
545
			else
546
				*(int *) (evtp->evt_arg1) = 0;
547 548
			complete((struct completion *)(evtp->evt_arg2));
			break;
549
		case LPFC_EVT_OFFLINE_PREP:
550
			if (phba->link_state >= LPFC_LINK_DOWN)
551
				lpfc_offline_prep(phba, LPFC_MBX_WAIT);
552 553 554 555 556
			*(int *)(evtp->evt_arg1) = 0;
			complete((struct completion *)(evtp->evt_arg2));
			break;
		case LPFC_EVT_OFFLINE:
			lpfc_offline(phba);
557 558
			lpfc_sli_brdrestart(phba);
			*(int *)(evtp->evt_arg1) =
559 560
				lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
			lpfc_unblock_mgmt_io(phba);
561 562 563
			complete((struct completion *)(evtp->evt_arg2));
			break;
		case LPFC_EVT_WARM_START:
564
			lpfc_offline(phba);
565
			lpfc_reset_barrier(phba);
566 567 568 569
			lpfc_sli_brdreset(phba);
			lpfc_hba_down_post(phba);
			*(int *)(evtp->evt_arg1) =
				lpfc_sli_brdready(phba, HS_MBRDY);
570
			lpfc_unblock_mgmt_io(phba);
571 572 573
			complete((struct completion *)(evtp->evt_arg2));
			break;
		case LPFC_EVT_KILL:
574
			lpfc_offline(phba);
575
			*(int *)(evtp->evt_arg1)
576 577
				= (phba->pport->stopped)
				        ? 0 : lpfc_sli_brdkill(phba);
578
			lpfc_unblock_mgmt_io(phba);
579 580
			complete((struct completion *)(evtp->evt_arg2));
			break;
581 582 583 584
		case LPFC_EVT_FASTPATH_MGMT_EVT:
			lpfc_send_fastpath_evt(phba, evtp);
			free_evt = 0;
			break;
585 586 587 588
		case LPFC_EVT_RESET_HBA:
			if (!(phba->pport->load_flag & FC_UNLOADING))
				lpfc_reset_hba(phba);
			break;
589 590 591
		}
		if (free_evt)
			kfree(evtp);
592
		spin_lock_irq(&phba->hbalock);
593
	}
594
	spin_unlock_irq(&phba->hbalock);
595 596 597

}

598
static void
599
lpfc_work_done(struct lpfc_hba *phba)
600 601
{
	struct lpfc_sli_ring *pring;
James Smart's avatar
James Smart committed
602
	uint32_t ha_copy, status, control, work_port_events;
603
	struct lpfc_vport **vports;
604
	struct lpfc_vport *vport;
605
	int i;
606

607
	spin_lock_irq(&phba->hbalock);
608 609
	ha_copy = phba->work_ha;
	phba->work_ha = 0;
610
	spin_unlock_irq(&phba->hbalock);
611

612 613 614 615
	/* First, try to post the next mailbox command to SLI4 device */
	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
		lpfc_sli4_post_async_mbox(phba);

616
	if (ha_copy & HA_ERATT)
617
		/* Handle the error attention event */
618 619
		lpfc_handle_eratt(phba);

620
	if (ha_copy & HA_MBATT)
621 622
		lpfc_sli_handle_mb_event(phba);

623
	if (ha_copy & HA_LATT)
624
		lpfc_handle_latt(phba);
625

626 627
	/* Process SLI4 events */
	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
628 629
		if (phba->hba_flag & HBA_RRQ_ACTIVE)
			lpfc_handle_rrq_active(phba);
630 631 632 633 634 635 636 637 638 639 640 641
		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
			lpfc_sli4_fcp_xri_abort_event_proc(phba);
		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
			lpfc_sli4_els_xri_abort_event_proc(phba);
		if (phba->hba_flag & ASYNC_EVENT)
			lpfc_sli4_async_event_proc(phba);
		if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
			spin_lock_irq(&phba->hbalock);
			phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
			spin_unlock_irq(&phba->hbalock);
			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
		}
642 643
		if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
			lpfc_sli4_fcf_redisc_event_proc(phba);
644 645
	}

646 647
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
648
		for (i = 0; i <= phba->max_vports; i++) {
649 650 651 652 653 654 655 656 657 658
			/*
			 * We could have no vports in array if unloading, so if
			 * this happens then just use the pport
			 */
			if (vports[i] == NULL && i == 0)
				vport = phba->pport;
			else
				vport = vports[i];
			if (vport == NULL)
				break;
659
			spin_lock_irq(&vport->work_port_lock);
660
			work_port_events = vport->work_port_events;
661 662
			vport->work_port_events &= ~work_port_events;
			spin_unlock_irq(&vport->work_port_lock);
663
			if (work_port_events & WORKER_DISC_TMO)
664
				lpfc_disc_timeout_handler(vport);
665
			if (work_port_events & WORKER_ELS_TMO)
666
				lpfc_els_timeout_handler(vport);
667 668 669 670 671 672 673
			if (work_port_events & WORKER_HB_TMO)
				lpfc_hb_timeout_handler(phba);
			if (work_port_events & WORKER_MBOX_TMO)
				lpfc_mbox_timeout_handler(phba);
			if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
				lpfc_unblock_fabric_iocbs(phba);
			if (work_port_events & WORKER_FDMI_TMO)
674
				lpfc_fdmi_timeout_handler(vport);
675 676 677 678
			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
				lpfc_ramp_down_queue_handler(phba);
			if (work_port_events & WORKER_RAMP_UP_QUEUE)
				lpfc_ramp_up_queue_handler(phba);
679 680
			if (work_port_events & WORKER_DELAYED_DISC_TMO)
				lpfc_delayed_disc_timeout_handler(vport);
681
		}
682
	lpfc_destroy_vport_work_array(phba, vports);
683

James Smart's avatar
James Smart committed
684 685 686
	pring = &phba->sli.ring[LPFC_ELS_RING];
	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
	status >>= (4*LPFC_ELS_RING);
687 688
	if ((status & HA_RXMASK) ||
	    (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
689
	    (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
690
		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
James Smart's avatar
James Smart committed
691
			pring->flag |= LPFC_DEFERRED_RING_EVENT;
692 693
			/* Set the lpfc data pending flag */
			set_bit(LPFC_DATA_READY, &phba->data_flags);
James Smart's avatar
James Smart committed
694
		} else {
695 696 697 698 699 700
			if (phba->link_state >= LPFC_LINK_UP) {
				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
				lpfc_sli_handle_slow_ring_event(phba, pring,
								(status &
								HA_RXMASK));
			}
James Smart's avatar
James Smart committed
701
		}
702 703
		if ((phba->sli_rev == LPFC_SLI_REV4) &
				 (!list_empty(&pring->txq)))
704
			lpfc_drain_txq(phba);
James Smart's avatar
James Smart committed
705 706 707
		/*
		 * Turn on Ring interrupts
		 */
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
		if (phba->sli_rev <= LPFC_SLI_REV3) {
			spin_lock_irq(&phba->hbalock);
			control = readl(phba->HCregaddr);
			if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
				lpfc_debugfs_slow_ring_trc(phba,
					"WRK Enable ring: cntl:x%x hacopy:x%x",
					control, ha_copy, 0);

				control |= (HC_R0INT_ENA << LPFC_ELS_RING);
				writel(control, phba->HCregaddr);
				readl(phba->HCregaddr); /* flush */
			} else {
				lpfc_debugfs_slow_ring_trc(phba,
					"WRK Ring ok:     cntl:x%x hacopy:x%x",
					control, ha_copy, 0);
			}
			spin_unlock_irq(&phba->hbalock);
725
		}
726
	}
727
	lpfc_work_list_done(phba);
728 729 730 731 732 733 734 735 736
}

int
lpfc_do_work(void *p)
{
	struct lpfc_hba *phba = p;
	int rc;

	set_user_nice(current, -20);
737
	current->flags |= PF_NOFREEZE;
738
	phba->data_flags = 0;
739

740
	while (!kthread_should_stop()) {
741 742 743 744 745
		/* wait and check worker queue activities */
		rc = wait_event_interruptible(phba->work_waitq,
					(test_and_clear_bit(LPFC_DATA_READY,
							    &phba->data_flags)
					 || kthread_should_stop()));
746 747 748 749
		/* Signal wakeup shall terminate the worker thread */
		if (rc) {
			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
					"0433 Wakeup on signal: rc=x%x\n", rc);
750
			break;
751
		}
752

753
		/* Attend pending lpfc data processing */
754 755
		lpfc_work_done(phba);
	}
756 757 758
	phba->worker_thread = NULL;
	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
			"0432 Worker thread stopped.\n");
759 760 761 762 763
	return 0;
}

/*
 * This is only called to handle FC worker events. Since this a rare
764
 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
765 766 767
 * embedding it in the IOCB.
 */
int
768
lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
769 770 771
		      uint32_t evt)
{
	struct lpfc_work_evt  *evtp;
772
	unsigned long flags;
773 774 775 776 777

	/*
	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
	 * be queued to worker thread for processing
	 */
778
	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
779 780 781 782 783 784 785
	if (!evtp)
		return 0;

	evtp->evt_arg1  = arg1;
	evtp->evt_arg2  = arg2;
	evtp->evt       = evt;

786
	spin_lock_irqsave(&phba->hbalock, flags);
787
	list_add_tail(&evtp->evt_listp, &phba->work_list);
788
	spin_unlock_irqrestore(&phba->hbalock, flags);
789

790 791
	lpfc_worker_wake_up(phba);

792 793 794
	return 1;
}

795 796 797
void
lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
798
	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
799 800 801 802 803
	struct lpfc_hba  *phba = vport->phba;
	struct lpfc_nodelist *ndlp, *next_ndlp;
	int  rc;

	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
804 805
		if (!NLP_CHK_NODE_ACT(ndlp))
			continue;
806 807
		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
			continue;
808 809 810
		if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
			((vport->port_type == LPFC_NPIV_PORT) &&
			(ndlp->nlp_DID == NameServer_DID)))
811 812 813
			lpfc_unreg_rpi(vport, ndlp);

		/* Leave Fabric nodes alone on link down */
814 815
		if ((phba->sli_rev < LPFC_SLI_REV4) &&
		    (!remove && ndlp->nlp_type & NLP_FABRIC))
816 817 818 819 820 821 822
			continue;
		rc = lpfc_disc_state_machine(vport, ndlp, NULL,
					     remove
					     ? NLP_EVT_DEVICE_RM
					     : NLP_EVT_DEVICE_RECOVERY);
	}
	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
823 824
		if (phba->sli_rev == LPFC_SLI_REV4)
			lpfc_sli4_unreg_all_rpis(vport);
825
		lpfc_mbx_unreg_vpi(vport);
826
		spin_lock_irq(shost->host_lock);
827
		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
828
		spin_unlock_irq(shost->host_lock);
829 830 831
	}
}

832
void
833
lpfc_port_link_failure(struct lpfc_vport *vport)
834
{
835 836
	lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);

837 838 839
	/* Cleanup any outstanding received buffers */
	lpfc_cleanup_rcv_buffers(vport);

840 841 842 843 844 845 846 847 848 849 850 851
	/* Cleanup any outstanding RSCN activity */
	lpfc_els_flush_rscn(vport);

	/* Cleanup any outstanding ELS commands */
	lpfc_els_flush_cmd(vport);

	lpfc_cleanup_rpis(vport, 0);

	/* Turn off discovery timer if its running */
	lpfc_can_disctmo(vport);
}

852
void
853 854 855 856 857 858 859 860 861 862 863 864
lpfc_linkdown_port(struct lpfc_vport *vport)
{
	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);

	fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);

	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
		"Link Down:       state:x%x rtry:x%x flg:x%x",
		vport->port_state, vport->fc_ns_retry, vport->fc_flag);

	lpfc_port_link_failure(vport);

865 866 867 868 869
	/* Stop delayed Nport discovery */
	spin_lock_irq(shost->host_lock);
	vport->fc_flag &= ~FC_DISC_DELAYED;
	spin_unlock_irq(shost->host_lock);
	del_timer_sync(&vport->delayed_disc_tmo);
870 871
}

872
int
873
lpfc_linkdown(struct lpfc_hba *phba)
874
{
875 876
	struct lpfc_vport *vport = phba->pport;
	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
877
	struct lpfc_vport **vports;
878
	LPFC_MBOXQ_t          *mb;
879
	int i;
880

881
	if (phba->link_state == LPFC_LINK_DOWN)
882
		return 0;
883 884 885 886

	/* Block all SCSI stack I/Os */
	lpfc_scsi_dev_block(phba);

887
	spin_lock_irq(&phba->hbalock);
888
	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
889
	spin_unlock_irq(&phba->hbalock);
890
	if (phba->link_state > LPFC_LINK_DOWN) {
891
		phba->link_state = LPFC_LINK_DOWN;
892
		spin_lock_irq(shost->host_lock);
893
		phba->pport->fc_flag &= ~FC_LBIT;
894
		spin_unlock_irq(shost->host_lock);
895
	}
896 897
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
898
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
899 900 901
			/* Issue a LINK DOWN event to all nodes */
			lpfc_linkdown_port(vports[i]);
		}
902
	lpfc_destroy_vport_work_array(phba, vports);
903
	/* Clean up any firmware default rpi's */
904 905
	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
	if (mb) {
906
		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
907
		mb->vport = vport;
908
		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
909
		if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
910
		    == MBX_NOT_FINISHED) {
911
			mempool_free(mb, phba->mbox_mem_pool);
912 913 914 915
		}
	}

	/* Setup myDID for link up if we are in pt2pt mode */
916 917
	if (phba->pport->fc_flag & FC_PT2PT) {
		phba->pport->fc_myDID = 0;
918 919
		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
		if (mb) {
920
			lpfc_config_link(phba, mb);
921
			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
922
			mb->vport = vport;
923
			if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
924
			    == MBX_NOT_FINISHED) {
925
				mempool_free(mb, phba->mbox_mem_pool);
926 927
			}
		}
928
		spin_lock_irq(shost->host_lock);
929
		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
930
		spin_unlock_irq(shost->host_lock);
931
	}
932

933 934
	return 0;
}
935

936 937 938 939
static void
lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
{
	struct lpfc_nodelist *ndlp;
940

941
	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
942 943
		if (!NLP_CHK_NODE_ACT(ndlp))
			continue;
944 945 946
		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
			continue;
		if (ndlp->nlp_type & NLP_FABRIC) {
947 948 949
			/* On Linkup its safe to clean up the ndlp
			 * from Fabric connections.
			 */
950 951 952 953
			if (ndlp->nlp_DID != Fabric_DID)
				lpfc_unreg_rpi(vport, ndlp);
			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
954 955 956
			/* Fail outstanding IO now since device is
			 * marked for PLOGI.
			 */
957 958 959
			lpfc_unreg_rpi(vport, ndlp);
		}
	}
960 961
}

962 963
static void
lpfc_linkup_port(struct lpfc_vport *vport)
964
{
965 966 967 968 969 970
	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
	struct lpfc_hba  *phba = vport->phba;

	if ((vport->load_flag & FC_UNLOADING) != 0)
		return;

James Smart's avatar
James Smart committed
971 972 973 974
	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
		"Link Up:         top:x%x speed:x%x flg:x%x",
		phba->fc_topology, phba->fc_linkspeed, phba->link_flag);

975 976 977 978
	/* If NPIV is not enabled, only bring the physical port up */
	if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
		(vport != phba->pport))
		return;
979

980
	fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
981

982 983 984 985 986 987
	spin_lock_irq(shost->host_lock);
	vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
			    FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
	vport->fc_flag |= FC_NDISC_ACTIVE;
	vport->fc_ns_retry = 0;
	spin_unlock_irq(shost->host_lock);
988

989 990
	if (vport->fc_flag & FC_LBIT)
		lpfc_linkup_cleanup_nodes(vport);
991

992 993 994 995 996
}

static int
lpfc_linkup(struct lpfc_hba *phba)
{
997 998
	struct lpfc_vport **vports;
	int i;
999

1000
	lpfc_cleanup_wt_rrqs(phba);
1001 1002 1003 1004 1005 1006
	phba->link_state = LPFC_LINK_UP;

	/* Unblock fabric iocbs if they are blocked */
	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
	del_timer_sync(&phba->fabric_block_timer);

1007 1008
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
1009
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1010
			lpfc_linkup_port(vports[i]);
1011
	lpfc_destroy_vport_work_array(phba, vports);
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

	return 0;
}

/*
 * This routine handles processing a CLEAR_LA mailbox
 * command upon completion. It is setup in the LPFC_MBOXQ
 * as the completion routine when the command is
 * handed off to the SLI layer.
 */
1022
static void
1023
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1024
{
1025 1026 1027
	struct lpfc_vport *vport = pmb->vport;
	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
	struct lpfc_sli   *psli = &phba->sli;
1028
	MAILBOX_t *mb = &pmb->u.mb;
1029 1030 1031
	uint32_t control;

	/* Since we don't do discovery right now, turn these off here */
1032
	psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1033 1034 1035 1036 1037
	psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
	psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;

	/* Check for error */
	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1038
		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1039 1040 1041 1042
		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
				 "0320 CLEAR_LA mbxStatus error x%x hba "
				 "state x%x\n",
				 mb->mbxStatus, vport->port_state);
1043
		phba->link_state = LPFC_HBA_ERROR;
1044 1045 1046
		goto out;
	}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	if (vport->port_type == LPFC_PHYSICAL_PORT)
		phba->link_state = LPFC_HBA_READY;

	spin_lock_irq(&phba->hbalock);
	psli->sli_flag |= LPFC_PROCESS_LA;
	control = readl(phba->HCregaddr);
	control |= HC_LAINT_ENA;
	writel(control, phba->HCregaddr);
	readl(phba->HCregaddr); /* flush */
	spin_unlock_irq(&phba->hbalock);
1057
	mempool_free(pmb, phba->mbox_mem_pool);
1058
	return;
1059 1060 1061

out:
	/* Device Discovery completes */
1062 1063
	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
			 "0225 Device Discovery completes\n");
1064
	mempool_free(pmb, phba->mbox_mem_pool);
1065

1066
	spin_lock_irq(shost->host_lock);
1067
	vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1068
	spin_unlock_irq(shost->host_lock);
1069

1070
	lpfc_can_disctmo(vport);
1071 1072

	/* turn on Link Attention interrupts */
1073 1074

	spin_lock_irq(&phba->hbalock);
1075 1076 1077 1078 1079
	psli->sli_flag |= LPFC_PROCESS_LA;
	control = readl(phba->HCregaddr);
	control |= HC_LAINT_ENA;
	writel(control, phba->HCregaddr);
	readl(phba->HCregaddr); /* flush */
1080
	spin_unlock_irq(&phba->hbalock);
1081 1082 1083 1084

	return;
}

1085

1086
static void
1087
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1088
{
1089
	struct lpfc_vport *vport = pmb->vport;
1090

1091
	if (pmb->u.mb.mbxStatus)
1092 1093
		goto out;

1094 1095
	mempool_free(pmb, phba->mbox_mem_pool);

1096 1097 1098 1099 1100 1101
	/* don't perform discovery for SLI4 loopback diagnostic test */
	if ((phba->sli_rev == LPFC_SLI_REV4) &&
	    !(phba->hba_flag & HBA_FCOE_MODE) &&
	    (phba->link_flag & LS_LOOPBACK_MODE))
		return;

1102
	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1103 1104
	    vport->fc_flag & FC_PUBLIC_LOOP &&
	    !(vport->fc_flag & FC_LBIT)) {
1105
			/* Need to wait for FAN - use discovery timer
1106
			 * for timeout.  port_state is identically
1107 1108
			 * LPFC_LOCAL_CFG_LINK while waiting for FAN
			 */
1109
			lpfc_set_disctmo(vport);
1110
			return;
1111
	}
1112

1113
	/* Start discovery by sending a FLOGI. port_state is identically
1114 1115
	 * LPFC_FLOGI while waiting for FLOGI cmpl
	 */
1116
	if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
1117
		lpfc_initial_flogi(vport);
1118
	return;
1119 1120

out:
1121 1122 1123
	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
			 "0306 CONFIG_LINK mbxStatus error x%x "
			 "HBA state x%x\n",
1124
			 pmb->u.mb.mbxStatus, vport->port_state);
1125
	mempool_free(pmb, phba->mbox_mem_pool);
1126

1127
	lpfc_linkdown(phba);
1128

1129 1130 1131
	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
			 "0200 CONFIG_LINK bad hba state x%x\n",
			 vport->port_state);
1132

1133
	lpfc_issue_clear_la(phba, vport);
1134 1135 1136
	return;
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
/**
 * lpfc_sli4_clear_fcf_rr_bmask
 * @phba pointer to the struct lpfc_hba for this port.
 * This fucnction resets the round robin bit mask and clears the
 * fcf priority list. The list deletions are done while holding the
 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
 * from the lpfc_fcf_pri record.
 **/
void
lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
{
	struct lpfc_fcf_pri *fcf_pri;
	struct lpfc_fcf_pri *next_fcf_pri;
	memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
	spin_lock_irq(&phba->hbalock);
	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
				&phba->fcf.fcf_pri_list, list) {
		list_del_init(&fcf_pri->list);
		fcf_pri->fcf_rec.flag = 0;
	}
	spin_unlock_irq(&phba->hbalock);
}
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
static void
lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
	struct lpfc_vport *vport = mboxq->vport;

	if (mboxq->u.mb.mbxStatus) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
			 "2017 REG_FCFI mbxStatus error x%x "
			 "HBA state x%x\n",
			 mboxq->u.mb.mbxStatus, vport->port_state);
1169
		goto fail_out;
1170 1171 1172 1173 1174
	}

	/* Start FCoE discovery by sending a FLOGI. */
	phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
	/* Set the FCFI registered flag */
1175
	spin_lock_irq(&phba->hbalock);
1176
	phba->fcf.fcf_flag |= FCF_REGISTERED;
1177
	spin_unlock_irq(&phba->hbalock);
1178

1179
	/* If there is a pending FCoE event, restart FCF table scan. */
1180 1181
	if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
		lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1182 1183 1184
		goto fail_out;

	/* Mark successful completion of FCF table scan */
1185
	spin_lock_irq(&phba->hbalock);
1186
	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1187 1188 1189 1190
	phba->hba_flag &= ~FCF_TS_INPROG;
	if (vport->port_state != LPFC_FLOGI) {
		phba->hba_flag |= FCF_RR_INPROG;
		spin_unlock_irq(&phba->hbalock);
1191
		lpfc_issue_init_vfi(vport);
1192 1193 1194 1195
		goto out;
	}
	spin_unlock_irq(&phba->hbalock);
	goto out;
1196

1197 1198 1199 1200 1201
fail_out:
	spin_lock_irq(&phba->hbalock);
	phba->hba_flag &= ~FCF_RR_INPROG;
	spin_unlock_irq(&phba->hbalock);
out:
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	mempool_free(mboxq, phba->mbox_mem_pool);
}

/**
 * lpfc_fab_name_match - Check if the fcf fabric name match.
 * @fab_name: pointer to fabric name.
 * @new_fcf_record: pointer to fcf record.
 *
 * This routine compare the fcf record's fabric name with provided
 * fabric name. If the fabric name are identical this function
 * returns 1 else return 0.
 **/
static uint32_t
lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
{
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
		return 0;
	if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
		return 0;
	if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
		return 0;
	if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
		return 0;
	if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
		return 0;
	if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1228
		return 0;
1229 1230 1231 1232 1233
	if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
		return 0;
	if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
		return 0;
	return 1;
1234 1235
}

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
/**
 * lpfc_sw_name_match - Check if the fcf switch name match.
 * @fab_name: pointer to fabric name.
 * @new_fcf_record: pointer to fcf record.
 *
 * This routine compare the fcf record's switch name with provided
 * switch name. If the switch name are identical this function
 * returns 1 else return 0.
 **/
static uint32_t
lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
{
1248 1249 1250 1251 1252
	if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
		return 0;
	if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
		return 0;
	if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1253
		return 0;
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
		return 0;
	if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
		return 0;
	if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
		return 0;
	if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
		return 0;
	if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
		return 0;
	return 1;
1265 1266
}

1267 1268
/**
 * lpfc_mac_addr_match - Check if the fcf mac address match.
1269
 * @mac_addr: pointer to mac address.
1270 1271 1272 1273 1274 1275 1276
 * @new_fcf_record: pointer to fcf record.
 *
 * This routine compare the fcf record's mac address with HBA's
 * FCF mac address. If the mac addresses are identical this function
 * returns 1 else return 0.
 **/
static uint32_t
1277
lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1278
{
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
		return 0;
	if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
		return 0;
	if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
		return 0;
	if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
		return 0;
	if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
		return 0;
	if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1290
		return 0;
1291 1292 1293 1294 1295 1296 1297
	return 1;
}

static bool
lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
{
	return (curr_vlan_id == new_vlan_id);
1298 1299
}

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
/**
 * lpfc_update_fcf_record - Update driver fcf record
 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
 * @phba: pointer to lpfc hba data structure.
 * @fcf_index: Index for the lpfc_fcf_record.
 * @new_fcf_record: pointer to hba fcf record.
 *
 * This routine updates the driver FCF priority record from the new HBA FCF
 * record. This routine is called with the host lock held.
 **/
static void
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
				 struct fcf_record *new_fcf_record
				 )
{
	struct lpfc_fcf_pri *fcf_pri;

	fcf_pri = &phba->fcf.fcf_pri[fcf_index];
	fcf_pri->fcf_rec.fcf_index = fcf_index;
	/* FCF record priority */
	fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;

}

1324 1325
/**
 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1326
 * @fcf: pointer to driver fcf record.
1327 1328 1329 1330 1331 1332
 * @new_fcf_record: pointer to fcf record.
 *
 * This routine copies the FCF information from the FCF
 * record to lpfc_hba data structure.
 **/
static void
1333 1334
lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
		     struct fcf_record *new_fcf_record)
1335
{
1336 1337
	/* Fabric name */
	fcf_rec->fabric_name[0] =
1338
		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1339
	fcf_rec->fabric_name[1] =
1340
		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1341
	fcf_rec->fabric_name[2] =
1342
		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1343
	fcf_rec->fabric_name[3] =
1344
		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1345
	fcf_rec->fabric_name[