From: <vl...@us...> - 2007-09-25 09:46:41
|
Revision: 198 http://scst.svn.sourceforge.net/scst/?rev=198&view=rev Author: vlnb Date: 2007-09-25 02:46:36 -0700 (Tue, 25 Sep 2007) Log Message: ----------- - Fixes commands leak in case of, e.g., QUEUE FULL status returned, because of uninitialised cmd->sn. - HEAD OF QUEUE commands processing simplified to fix known problems - Cosmetics Modified Paths: -------------- trunk/scst/include/scsi_tgt.h trunk/scst/src/dev_handlers/scst_user.c trunk/scst/src/dev_handlers/scst_vdisk.c trunk/scst/src/scst_lib.c trunk/scst/src/scst_priv.h trunk/scst/src/scst_targ.c trunk/usr/fileio/common.c Modified: trunk/scst/include/scsi_tgt.h =================================================================== --- trunk/scst/include/scsi_tgt.h 2007-09-25 09:39:04 UTC (rev 197) +++ trunk/scst/include/scsi_tgt.h 2007-09-25 09:46:36 UTC (rev 198) @@ -341,9 +341,6 @@ /* Set if tgt_dev is RESERVED by another session */ #define SCST_TGT_DEV_RESERVED 1 -/* Set HEAD OF QUEUE cmd is being executed */ -#define SCST_TGT_DEV_HQ_ACTIVE 2 - /* Set if the corresponding context is atomic */ #define SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC 5 #define SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC 6 @@ -658,7 +655,7 @@ /* Set, if no /proc files should be automatically created by SCST */ unsigned no_proc:1; - /* Set if expected_sn in cmd->scst_cmd_done() */ + /* Set if increment expected_sn in cmd->scst_cmd_done() */ unsigned inc_expected_sn_on_done:1; /* @@ -1027,11 +1024,14 @@ */ unsigned int preprocessing_only:1; + /* Set if scst_cmd_set_sn() was called */ + unsigned int sn_set:1; + /* - * Set if scst_cmd_init_stage1_done() called and the target want - * that the SN for the cmd isn't assigned until scst_restart_cmd() + * Set if scst_cmd_init_stage1_done() called and the target wants + * that the SN for the cmd won't be assigned until scst_restart_cmd() */ - unsigned int no_sn:1; + unsigned int set_sn_on_restart_cmd:1; /* Set if the cmd's must not use sgv cache for data buffer */ unsigned int no_sgv:1; @@ -1053,8 +1053,8 @@ unsigned int skip_parse:1; /* - * Set if inc expected_sn in cmd->scst_cmd_done() (to - * save extra dereferences) + * Set if increment expected_sn in cmd->scst_cmd_done() (to save + * extra dereferences) */ unsigned int inc_expected_sn_on_done:1; @@ -1366,10 +1366,11 @@ */ int def_cmd_count; spinlock_t sn_lock; - unsigned long expected_sn, curr_sn; + unsigned long expected_sn; + unsigned long curr_sn; + int hq_cmd_count; struct list_head deferred_cmd_list; struct list_head skipped_sn_list; - struct list_head hq_cmd_list; /* * Set if the prev cmd was ORDERED. Size must allow unprotected @@ -1508,10 +1509,9 @@ * Registers and returns target adapter * Returns new target structure on success or NULL otherwise. * - * If parameter "target_name" isn't NULL, then new security group with name - * "Default_##target_name" will be created and all sessions, which don't - * belong to any defined security groups, will be assigned to it instead of - * the "Default" one. + * If parameter "target_name" isn't NULL, then security group with name + * "Default_##target_name", if created, will be used as the default + * instead of "Default" one for all initiators not assigned to any other group. */ struct scst_tgt *scst_register(struct scst_tgt_template *vtt, const char *target_name); @@ -1630,9 +1630,10 @@ * * !!IMPORTANT!! * - * If cmd->no_sn not set, this function, as well as scst_cmd_init_stage1_done() - * and scst_restart_cmd() must not be called simultaneously for the same session - * (more precisely, for the same session/LUN, i.e. tgt_dev), i.e. they must be + * If cmd->set_sn_on_restart_cmd not set, this function, as well as + * scst_cmd_init_stage1_done() and scst_restart_cmd(), must not be + * called simultaneously for the same session (more precisely, + * for the same session/LUN, i.e. tgt_dev), i.e. they must be * somehow externally serialized. This is needed to have lock free fast path in * scst_cmd_set_sn(). For majority of targets those functions are naturally * serialized by the single source of commands. Only iSCSI immediate commands @@ -1654,7 +1655,7 @@ int pref_context, int set_sn) { cmd->preprocessing_only = 1; - cmd->no_sn = !set_sn; + cmd->set_sn_on_restart_cmd = !set_sn; scst_cmd_init_done(cmd, pref_context); } Modified: trunk/scst/src/dev_handlers/scst_user.c =================================================================== --- trunk/scst/src/dev_handlers/scst_user.c 2007-09-25 09:39:04 UTC (rev 197) +++ trunk/scst/src/dev_handlers/scst_user.c 2007-09-25 09:46:36 UTC (rev 198) @@ -2092,6 +2092,7 @@ int res, rc; struct dev_user_cmd *ucmd; struct scst_user_dev *dev = (struct scst_user_dev*)tgt_dev->dev->dh_priv; + struct dev_user_cmd *ucmd_to_abort = NULL; TRACE_ENTRY(); @@ -2107,15 +2108,15 @@ ucmd->user_cmd.tm_cmd.fn = mcmd->fn; if (mcmd->cmd_to_abort != NULL) { - struct dev_user_cmd *ucmd_to_abort = - (struct dev_user_cmd*)mcmd->cmd_to_abort->dh_priv; + ucmd_to_abort = (struct dev_user_cmd*)mcmd->cmd_to_abort->dh_priv; if (ucmd_to_abort != NULL) ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h; } TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, " - "ucmd_to_abort %d)", ucmd, ucmd->h, mcmd->fn, - mcmd->cmd_to_abort, ucmd->user_cmd.tm_cmd.cmd_h_to_abort); + "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h, + mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort, + ucmd->user_cmd.tm_cmd.cmd_h_to_abort); ucmd->state = UCMD_STATE_TM_EXECING; @@ -2158,6 +2159,7 @@ sBUG_ON(irqs_disabled()); spin_lock_irq(&dev->cmd_lists.cmd_list_lock); + out_locked_free: kfree(ucmd->cmpl); ucmd->cmpl = NULL; Modified: trunk/scst/src/dev_handlers/scst_vdisk.c =================================================================== --- trunk/scst/src/dev_handlers/scst_vdisk.c 2007-09-25 09:39:04 UTC (rev 197) +++ trunk/scst/src/dev_handlers/scst_vdisk.c 2007-09-25 09:46:36 UTC (rev 198) @@ -651,6 +651,17 @@ TRACE_ENTRY(); + switch(cmd->queue_type) { + case SCST_CMD_QUEUE_ORDERED: + TRACE(TRACE_ORDER, "ORDERED cmd %p", cmd); + break; + case SCST_CMD_QUEUE_HEAD_OF_QUEUE: + TRACE(TRACE_ORDER, "HQ cmd %p", cmd); + break; + default: + break; + } + rc = scst_check_local_events(cmd); if (unlikely(rc != 0)) goto out_done; Modified: trunk/scst/src/scst_lib.c =================================================================== --- trunk/scst/src/scst_lib.c 2007-09-25 09:39:04 UTC (rev 197) +++ trunk/scst/src/scst_lib.c 2007-09-25 09:46:36 UTC (rev 198) @@ -404,7 +404,6 @@ spin_lock_init(&tgt_dev->sn_lock); INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list); INIT_LIST_HEAD(&tgt_dev->skipped_sn_list); - INIT_LIST_HEAD(&tgt_dev->hq_cmd_list); tgt_dev->expected_sn = 1; tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots); tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0]; @@ -2427,47 +2426,6 @@ return; } -/* sn_lock supposed to be held and IRQ off */ -static inline int __scst_check_hq_cmd(struct scst_cmd *cmd) -{ - struct scst_tgt_dev *tgt_dev = cmd->tgt_dev; - struct scst_cmd *hq; - int res; - - TRACE_ENTRY(); - - /* According to SAM, the latest HQ cmd shall pass first */ - hq = list_entry(tgt_dev->hq_cmd_list.next, typeof(*hq), - sn_cmd_list_entry); - if ((cmd == hq) && !test_bit(SCST_TGT_DEV_HQ_ACTIVE, - &tgt_dev->tgt_dev_flags)) { - TRACE_SN("Passing HQ cmd %p", cmd); - res = 1; - list_del(&cmd->sn_cmd_list_entry); - set_bit(SCST_TGT_DEV_HQ_ACTIVE, &tgt_dev->tgt_dev_flags); - } else { - TRACE_SN("Defer HQ cmd %p", cmd); - res = 0; - cmd->hq_deferred = 1; - tgt_dev->def_cmd_count++; - } - - TRACE_EXIT_RES(res); - return res; -} - -int scst_check_hq_cmd(struct scst_cmd *cmd) -{ - struct scst_tgt_dev *tgt_dev = cmd->tgt_dev; - int res; - - spin_lock_irq(&tgt_dev->sn_lock); - res = __scst_check_hq_cmd(cmd); - spin_unlock_irq(&tgt_dev->sn_lock); - - return res; -} - /* No locks */ struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev) { @@ -2476,38 +2434,14 @@ spin_lock_irq(&tgt_dev->sn_lock); - if (unlikely(test_bit(SCST_TGT_DEV_HQ_ACTIVE, &tgt_dev->tgt_dev_flags))) { - if (!list_empty(&tgt_dev->hq_cmd_list)) { - int rc; - cmd = list_entry(tgt_dev->hq_cmd_list.next, - typeof(*cmd), sn_cmd_list_entry); - if (cmd->hq_deferred) { - TRACE_SN("Releasing deferred HQ cmd %p", cmd); - tgt_dev->def_cmd_count--; - cmd->hq_deferred = 0; - res = cmd; - /* - * Since __scst_check_hq_cmd() is inline, a lot - * of code should be optimized out - */ - clear_bit(SCST_TGT_DEV_HQ_ACTIVE, - &tgt_dev->tgt_dev_flags); - rc = __scst_check_hq_cmd(res); - EXTRACHECKS_BUG_ON(rc != 1); - goto out_unlock; - } - } - TRACE_SN("Turning OFF hq_cmd_active (tgt_dev %p)", - tgt_dev); - clear_bit(SCST_TGT_DEV_HQ_ACTIVE, &tgt_dev->tgt_dev_flags); - } - restart: list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list, sn_cmd_list_entry) { + EXTRACHECKS_BUG_ON(cmd->queue_type == + SCST_CMD_QUEUE_HEAD_OF_QUEUE); if (cmd->sn == expected_sn) { - TRACE_SN("Deferred command %p (sn %ld) found", - cmd, cmd->sn); + TRACE_SN("Deferred command %p (sn %ld, set %d) found", + cmd, cmd->sn, cmd->sn_set); tgt_dev->def_cmd_count--; list_del(&cmd->sn_cmd_list_entry); if (res == NULL) @@ -2528,6 +2462,8 @@ list_for_each_entry(cmd, &tgt_dev->skipped_sn_list, sn_cmd_list_entry) { + EXTRACHECKS_BUG_ON(cmd->queue_type == + SCST_CMD_QUEUE_HEAD_OF_QUEUE); if (cmd->sn == expected_sn) { atomic_t *slot = cmd->sn_slot; /* @@ -2540,8 +2476,6 @@ tgt_dev->def_cmd_count--; list_del(&cmd->sn_cmd_list_entry); spin_unlock_irq(&tgt_dev->sn_lock); - EXTRACHECKS_BUG_ON(cmd->queue_type == - SCST_CMD_QUEUE_HEAD_OF_QUEUE); if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED, &cmd->cmd_flags)) { scst_destroy_put_cmd(cmd); @@ -2833,13 +2767,9 @@ { struct scst_cmd *res = NULL; - if (out_of_sn_cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE) { - TRACE_SN("HQ out_of_sn_cmd %p", out_of_sn_cmd); - spin_lock_irq(&out_of_sn_cmd->tgt_dev->sn_lock); - list_del(&out_of_sn_cmd->sn_cmd_list_entry); - spin_unlock_irq(&out_of_sn_cmd->tgt_dev->sn_lock); - res = scst_check_deferred_commands(tgt_dev); - } else if (out_of_sn_cmd->sn == tgt_dev->expected_sn) { + EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set); + + if (out_of_sn_cmd->sn == tgt_dev->expected_sn) { scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot); res = scst_check_deferred_commands(tgt_dev); } else { @@ -2864,8 +2794,8 @@ TRACE_ENTRY(); - if (out_of_sn_cmd->no_sn) { - TRACE_SN("cmd %p with no_sn", out_of_sn_cmd); + if (!out_of_sn_cmd->sn_set) { + TRACE_SN("cmd %p without sn", out_of_sn_cmd); goto out; } Modified: trunk/scst/src/scst_priv.h =================================================================== --- trunk/scst/src/scst_priv.h 2007-09-25 09:39:04 UTC (rev 197) +++ trunk/scst/src/scst_priv.h 2007-09-25 09:46:36 UTC (rev 198) @@ -90,9 +90,6 @@ /* Set if a TM command is being performed */ #define SCST_FLAG_TM_ACTIVE 2 -/* Set if scst_cmd_mem_work is scheduled */ -#define SCST_FLAG_CMD_MEM_WORK_SCHEDULED 3 - /** ** Return codes for cmd state process functions **/ @@ -222,8 +219,7 @@ static inline struct scst_cmd *scst_check_deferred_commands( struct scst_tgt_dev *tgt_dev) { - if ((tgt_dev->def_cmd_count == 0) && - likely(!test_bit(SCST_TGT_DEV_HQ_ACTIVE, &tgt_dev->tgt_dev_flags))) + if (tgt_dev->def_cmd_count == 0) return NULL; else return __scst_check_deferred_commands(tgt_dev); Modified: trunk/scst/src/scst_targ.c =================================================================== --- trunk/scst/src/scst_targ.c 2007-09-25 09:39:04 UTC (rev 197) +++ trunk/scst/src/scst_targ.c 2007-09-25 09:46:36 UTC (rev 198) @@ -634,7 +634,7 @@ cmd->state = SCST_CMD_STATE_PRE_EXEC; break; } - if (cmd->no_sn) + if (cmd->set_sn_on_restart_cmd) scst_cmd_set_sn(cmd); /* Small context optimization */ if ((pref_context == SCST_CONTEXT_TASKLET) || @@ -955,8 +955,9 @@ { struct scst_cmd *c; - if (likely(cmd->queue_type != SCST_CMD_QUEUE_HEAD_OF_QUEUE)) + if (likely(cmd->sn_set)) scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot); + c = scst_check_deferred_commands(cmd->tgt_dev); if (c != NULL) { unsigned long flags; @@ -1813,42 +1814,27 @@ "thread context, rescheduling"); res = SCST_CMD_STATE_RES_NEED_THREAD; scst_dec_on_dev_cmd(cmd); - goto out_dec_cmd_count; + goto out_put; } else { sBUG_ON(rc != SCST_EXEC_COMPLETED); goto out_unplug; } } - EXTRACHECKS_BUG_ON(cmd->no_sn); + if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)) + goto exec; - if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)) { - /* - * W/o get() there will be a race, when cmd is executed and - * destroyed before "goto out_unplug" - */ - scst_cmd_get(cmd); - if (scst_check_hq_cmd(cmd)) { - scst_cmd_put(cmd); - goto exec; - } else { - scst_dec_on_dev_cmd(cmd); - scst_cmd_put(cmd); - goto out_unplug; - } - } + sBUG_ON(!cmd->sn_set); expected_sn = tgt_dev->expected_sn; /* Optimized for lockless fast path */ - if ((cmd->sn != expected_sn) || unlikely(test_bit(SCST_TGT_DEV_HQ_ACTIVE, - &tgt_dev->tgt_dev_flags))) { + if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) { spin_lock_irq(&tgt_dev->sn_lock); tgt_dev->def_cmd_count++; smp_mb(); - barrier(); /* to reread expected_sn & hq_cmd_active */ + barrier(); /* to reread expected_sn & hq_cmd_count */ expected_sn = tgt_dev->expected_sn; - if ((cmd->sn != expected_sn) || test_bit(SCST_TGT_DEV_HQ_ACTIVE, - &tgt_dev->tgt_dev_flags)) { + if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) { /* We are under IRQ lock, but dev->dev_lock is BH one */ int cmd_blocking = scst_pre_dec_on_dev_cmd(cmd); if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) { @@ -1859,18 +1845,16 @@ cmd->state = SCST_CMD_STATE_DEV_DONE; res = SCST_CMD_STATE_RES_CONT_SAME; } else { - TRACE_SN("Deferring cmd %p (sn=%ld, " - "expected_sn=%ld, hq_cmd_active=%d)", cmd, - cmd->sn, expected_sn, - test_bit(SCST_TGT_DEV_HQ_ACTIVE, - &tgt_dev->tgt_dev_flags)); + TRACE_SN("Deferring cmd %p (sn=%ld, set %d, " + "expected_sn=%ld)", cmd, cmd->sn, + cmd->sn_set, expected_sn); list_add_tail(&cmd->sn_cmd_list_entry, &tgt_dev->deferred_cmd_list); } spin_unlock_irq(&tgt_dev->sn_lock); /* !! At this point cmd can be already freed !! */ __scst_dec_on_dev_cmd(dev, cmd_blocking); - goto out_dec_cmd_count; + goto out_put; } else { TRACE_SN("Somebody incremented expected_sn %ld, " "continuing", expected_sn); @@ -1883,32 +1867,23 @@ count = 0; while(1) { atomic_t *slot = cmd->sn_slot; - int hq = (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE); - int inc_expected_sn_on_done = cmd->inc_expected_sn_on_done; + int inc_expected_sn = !cmd->inc_expected_sn_on_done && + cmd->sn_set; rc = scst_do_send_to_midlev(cmd); if (rc == SCST_EXEC_NEED_THREAD) { TRACE_DBG("%s", "scst_do_send_to_midlev() requested " "thread context, rescheduling"); res = SCST_CMD_STATE_RES_NEED_THREAD; - if (unlikely(hq)) { - TRACE_SN("Rescheduling HQ cmd %p", cmd); - spin_lock_irq(&tgt_dev->sn_lock); - clear_bit(SCST_TGT_DEV_HQ_ACTIVE, - &tgt_dev->tgt_dev_flags); - list_add(&cmd->sn_cmd_list_entry, - &tgt_dev->hq_cmd_list); - spin_unlock_irq(&tgt_dev->sn_lock); - } scst_dec_on_dev_cmd(cmd); if (count != 0) goto out_unplug; else - goto out_dec_cmd_count; + goto out_put; } sBUG_ON(rc != SCST_EXEC_COMPLETED); /* !! At this point cmd can be already freed !! */ count++; - if ( !inc_expected_sn_on_done && likely(!hq)) + if (inc_expected_sn) scst_inc_expected_sn(tgt_dev, slot); cmd = scst_check_deferred_commands(tgt_dev); if (cmd == NULL) @@ -1921,7 +1896,7 @@ if (dev->scsi_dev != NULL) generic_unplug_device(dev->scsi_dev->request_queue); -out_dec_cmd_count: +out_put: __scst_put(); /* !! At this point sess, dev and tgt_dev can be already freed !! */ @@ -2293,11 +2268,41 @@ * Check here also in order to avoid unnecessary delays of other * commands. */ - if (unlikely(!cmd->sent_to_midlev) && (cmd->tgt_dev != NULL)) { - TRACE_SN("cmd %p was not sent to mid-lev (sn %ld)", - cmd, cmd->sn); - scst_unblock_deferred(cmd->tgt_dev, cmd); - cmd->sent_to_midlev = 1; + if (cmd->tgt_dev != NULL) { + if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)) { + struct scst_tgt_dev *tgt_dev = cmd->tgt_dev; + + spin_lock_irq(&tgt_dev->sn_lock); + tgt_dev->hq_cmd_count--; + spin_unlock_irq(&tgt_dev->sn_lock); + + EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0); + + /* + * There is no problem in checking hq_cmd_count in the + * non-locked state. In the worst case we will only have + * unneeded run of the deferred commands. + */ + if (tgt_dev->hq_cmd_count == 0) { + struct scst_cmd *c = + scst_check_deferred_commands(tgt_dev); + if (c != NULL) { + spin_lock_irq(&c->cmd_lists->cmd_list_lock); + TRACE_SN("Adding cmd %p to active cmd list", c); + list_add_tail(&c->cmd_list_entry, + &c->cmd_lists->active_cmd_list); + wake_up(&c->cmd_lists->cmd_list_waitQ); + spin_unlock_irq(&c->cmd_lists->cmd_list_lock); + } + } + } + + if (unlikely(!cmd->sent_to_midlev)) { + TRACE_SN("cmd %p was not sent to mid-lev (sn %ld, set %d)", + cmd, cmd->sn, cmd->sn_set); + scst_unblock_deferred(cmd->tgt_dev, cmd); + cmd->sent_to_midlev = 1; + } } if (atomic && !cmd->tgtt->xmit_response_atomic) { @@ -2501,6 +2506,7 @@ } cmd->sn_slot = tgt_dev->cur_sn_slot; cmd->sn = tgt_dev->curr_sn; + tgt_dev->prev_cmd_ordered = 0; } else { TRACE(TRACE_MINOR, "%s", "Not enough SN slots"); @@ -2540,10 +2546,9 @@ TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "HQ cmd %p " "(op %x)", cmd, cmd->cdb[0]); spin_lock_irqsave(&tgt_dev->sn_lock, flags); - /* Add in the head as required by SAM */ - list_add(&cmd->sn_cmd_list_entry, &tgt_dev->hq_cmd_list); + tgt_dev->hq_cmd_count++; spin_unlock_irqrestore(&tgt_dev->sn_lock, flags); - break; + goto out; default: PRINT_ERROR_PR("Unsupported queue type %d, treating it as " @@ -2559,7 +2564,8 @@ tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered, tgt_dev->cur_sn_slot-tgt_dev->sn_slots); - cmd->no_sn = 0; + cmd->sn_set = 1; +out: return; } @@ -2652,7 +2658,7 @@ "Anonymous" : cmd->sess->initiator_name); goto out_busy; } - if (!cmd->no_sn) + if (!cmd->set_sn_on_restart_cmd) scst_cmd_set_sn(cmd); } else if (res < 0) { TRACE_DBG("Finishing cmd %p", cmd); @@ -3388,8 +3394,9 @@ } scst_cmd_get(cmd); spin_unlock_irq(&sess->sess_list_lock); - TRACE(TRACE_MGMT, "Cmd %p for tag %llu (sn %ld) found, " - "aborting it", cmd, mcmd->tag, cmd->sn); + TRACE(TRACE_MGMT, "Cmd %p for tag %llu (sn %ld, set %d, " + "queue_type %x) found, aborting it", cmd, mcmd->tag, + cmd->sn, cmd->sn_set, cmd->queue_type); mcmd->cmd_to_abort = cmd; if (mcmd->lun_set && (mcmd->lun != cmd->lun)) { PRINT_ERROR_PR("ABORT TASK: LUN mismatch: mcmd LUN %Lx, " @@ -3988,7 +3995,7 @@ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) { switch(sess->init_phase) { case SCST_SESS_IPH_INITING: - TRACE_DBG("Moving mcmd %p to init deferred mcmd list", + TRACE_DBG("Adding mcmd %p to init deferred mcmd list", mcmd); list_add_tail(&mcmd->mgmt_cmd_list_entry, &sess->init_deferred_mcmd_list); Modified: trunk/usr/fileio/common.c =================================================================== --- trunk/usr/fileio/common.c 2007-09-25 09:39:04 UTC (rev 197) +++ trunk/usr/fileio/common.c 2007-09-25 09:46:36 UTC (rev 198) @@ -184,6 +184,17 @@ TRACE_ENTRY(); + switch(cmd->queue_type) { + case SCST_CMD_QUEUE_ORDERED: + TRACE(TRACE_ORDER, "ORDERED cmd_h %d", vcmd->cmd->cmd_h); + break; + case SCST_CMD_QUEUE_HEAD_OF_QUEUE: + TRACE(TRACE_ORDER, "HQ cmd_h %d", vcmd->cmd->cmd_h); + break; + default: + break; + } + memset(reply, 0, sizeof(*reply)); vcmd->reply->cmd_h = vcmd->cmd->cmd_h; vcmd->reply->subcode = vcmd->cmd->subcode; @@ -1601,8 +1612,8 @@ } if (err < 0) { - PRINT_ERROR_PR("write() returned %Ld from %zd (errno %d)", - (uint64_t)err, length, errno); + PRINT_ERROR_PR("write() returned %Ld from %zd (errno %d, cmd_h " + "%x)", (uint64_t)err, length, errno, vcmd->cmd->cmd_h); if (err == -EAGAIN) set_busy(reply); else { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |