From: <vl...@us...> - 2007-05-31 17:12:00
|
Revision: 121 http://svn.sourceforge.net/scst/?rev=121&view=rev Author: vlnb Date: 2007-05-31 10:11:57 -0700 (Thu, 31 May 2007) Log Message: ----------- - Module scst_user and user space utility to test it added - Support for per-target default security groups added - FILEIO made multithreaded - BLOCKIO made async - Other improvements, fixes and cleanups Modified Paths: -------------- trunk/Makefile trunk/scst/ChangeLog trunk/scst/README trunk/scst/include/scsi_tgt.h trunk/scst/kernel/in-tree/Kconfig.scsi_tgt trunk/scst/kernel/in-tree/Makefile.scsi_tgt trunk/scst/src/Makefile trunk/scst/src/dev_handlers/Makefile trunk/scst/src/dev_handlers/scst_vdisk.c trunk/scst/src/scst.c trunk/scst/src/scst_lib.c trunk/scst/src/scst_priv.h trunk/scst/src/scst_proc.c trunk/scst/src/scst_targ.c Added Paths: ----------- trunk/scst/include/scst_user.h trunk/scst/src/dev_handlers/scst_user.c trunk/usr/ trunk/usr/fileio/ trunk/usr/fileio/Makefile trunk/usr/fileio/common.c trunk/usr/fileio/common.h trunk/usr/fileio/debug.c trunk/usr/fileio/debug.h trunk/usr/fileio/fileio.c Modified: trunk/Makefile =================================================================== --- trunk/Makefile 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/Makefile 2007-05-31 17:11:57 UTC (rev 121) @@ -22,33 +22,39 @@ QLA_INI_DIR=qla2x00t QLA_DIR=qla2x00t/qla2x00-target LSI_DIR=mpt +USR_DIR=usr/fileio all: cd $(SCST_DIR) && $(MAKE) $@ @if [ -d $(QLA_DIR) ]; then cd $(QLA_DIR) && $(MAKE) $@; fi # @if [ -d $(LSI_DIR) ]; then cd $(LSI_DIR) && $(MAKE) $@; fi + @if [ -d $(USR_DIR) ]; then cd $(USR_DIR) && $(MAKE) $@; fi install: cd $(SCST_DIR) && $(MAKE) $@ @if [ -d $(QLA_DIR) ]; then cd $(QLA_DIR) && $(MAKE) $@; fi - @if [ -d $(LSI_DIR) ]; then cd $(LSI_DIR) && $(MAKE) $@; fi +# @if [ -d $(LSI_DIR) ]; then cd $(LSI_DIR) && $(MAKE) $@; fi + @if [ -d $(USR_DIR) ]; then cd $(USR_DIR) && $(MAKE) $@; fi uninstall: cd $(SCST_DIR) && $(MAKE) $@ @if [ -d $(QLA_DIR) ]; then cd $(QLA_DIR) && $(MAKE) $@; fi @if [ -d $(LSI_DIR) ]; then cd $(LSI_DIR) && $(MAKE) $@; fi + @if [ -d $(USR_DIR) ]; then cd $(USR_DIR) && $(MAKE) $@; fi clean: cd $(SCST_DIR) && $(MAKE) $@ @if [ -d $(QLA_INI_DIR) ]; then cd $(QLA_INI_DIR) && $(MAKE) $@; fi @if [ -d $(QLA_DIR) ]; then cd $(QLA_DIR) && $(MAKE) $@; fi @if [ -d $(LSI_DIR) ]; then cd $(LSI_DIR) && $(MAKE) $@; fi + @if [ -d $(USR_DIR) ]; then cd $(USR_DIR) && $(MAKE) $@; fi extraclean: cd $(SCST_DIR) && $(MAKE) $@ @if [ -d $(QLA_INI_DIR) ]; then cd $(QLA_INI_DIR) && $(MAKE) $@; fi @if [ -d $(QLA_DIR) ]; then cd $(QLA_DIR) && $(MAKE) $@; fi @if [ -d $(LSI_DIR) ]; then cd $(LSI_DIR) && $(MAKE) $@; fi + @if [ -d $(USR_DIR) ]; then cd $(USR_DIR) && $(MAKE) $@; fi scst: cd $(SCST_DIR) && $(MAKE) @@ -97,6 +103,21 @@ lsi_extraclean: cd $(LSI_DIR) && $(MAKE) extraclean +usr: + cd $(USR_DIR) && $(MAKE) + +usr_install: + cd $(USR_DIR) && $(MAKE) install + +usr_uninstall: + cd $(USR_DIR) && $(MAKE) uninstall + +usr_clean: + cd $(USR_DIR) && $(MAKE) clean + +usr_extraclean: + cd $(USR_DIR) && $(MAKE) extraclean + help: @echo " all (the default) : make all" @echo " clean : clean files" @@ -121,6 +142,12 @@ @echo " lsi_extraclean : lsi target: clean + clean dependencies" @echo " lsi_install : lsi target: install" @echo " lsi_uninstall : lsi target: uninstall" + @echo "" + @echo " usr : make usr target" + @echo " usr_clean : usr target: clean " + @echo " usr_extraclean : usr target: clean + clean dependencies" + @echo " usr_install : usr target: install" + @echo " usr_uninstall : usr target: uninstall" @echo " Notes :" @echo " - install and uninstall must be made as root" @@ -128,3 +155,4 @@ qla qla_install qla_uninstall qla_clean qla_extraclean \ lsi lsi_install lsi_uninstall lsi_clean lsi_extraclean \ scst scst_install scst_uninstall scst_clean scst_extraclean + usr usr_install usr_uninstall usr_clean usr_extraclean Modified: trunk/scst/ChangeLog =================================================================== --- trunk/scst/ChangeLog 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/scst/ChangeLog 2007-05-31 17:11:57 UTC (rev 121) @@ -4,12 +4,14 @@ - FILEIO was renamed to VDISK. BLOCKIO added to it, thanks to Ross S. W. Walker and Vu Pham. - - Internal locking and execution context were reimplemnted. Particularly, - implemented full support for SCSI task attributes (SIMPLE, ORDERED, - etc.). + - Updated to work on 2.6.20.x, no update for 2.6.21.x isn't needed - - Updated to work on 2.6.20.x, no update for 2.6.21.x isn't needed. + - Internal locking and execution context were reimplemnted. As some of + the results now FILEIO has >1 IO threads and implemented full support + for SCSI task attributes (SIMPLE, ORDERED, etc.). + - Ability to have per-target default security groups added. + - Updated to work on 2.6.19.x, thanks to Ming Zhang. - Internal threads management reimplemented based on kthread*() API, Modified: trunk/scst/README =================================================================== --- trunk/scst/README 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/scst/README 2007-05-31 17:11:57 UTC (rev 121) @@ -106,7 +106,7 @@ or ever disk partition, where there is no file systems overhead. Using block devices comparing to sending SCSI commands directly to SCSI mid-level via scsi_do_req()/scsi_execute_async() has advantage that data -are transfered via system cache, so it is possible to fully benefit from +are transferred via system cache, so it is possible to fully benefit from caching and read ahead performed by Linux's VM subsystem. The only disadvantage here that in the FILEIO mode there is superfluous data copying between the cache and SCST's buffers. This issue is going to be @@ -145,12 +145,12 @@ - EXTRACHECKS - adds extra validity checks in the various places. - DEBUG_TM - turns on task management functions debugging, when on - LUN 0 in the "Default" group some of the commands will be delayed for - about 60 sec., so making the remote initiator send TM functions, eg - ABORT TASK and TARGET RESET. Also set TM_DBG_GO_OFFLINE symbol in the - Makefile to 1 if you want that the device eventually become - completely unresponsive, or to 0 otherwise to circle around ABORTs - and RESETs code. Needs DEBUG turned on. + LUN 0 in the default access control group some of the commands will + be delayed for about 60 sec., so making the remote initiator send TM + functions, eg ABORT TASK and TARGET RESET. Also set TM_DBG_GO_OFFLINE + symbol in the Makefile to 1 if you want that the device eventually + become completely unresponsive, or to 0 otherwise to circle around + ABORTs and RESETs code. Needs DEBUG turned on. - STRICT_SERIALIZING - makes SCST send all commands to underlying SCSI device synchronously, one after one. This makes task management more @@ -235,19 +235,21 @@ Access and devices visibility management allows for an initiator or group of initiators to have different limited set of LUs/LUNs (security group) each with appropriate access permissions. Initiator is -represented as a SCST session. Session is binded to security group on -its registration time by character "name" parameter of the registration +represented as a SCST session. Session is bound to security group on its +registration time by character "name" parameter of the registration function, which provided by target driver, based on its internal -authentication. For example, for FC "name" could be WWN or just loop -ID. For iSCSI this could be iSCSI login credentials or iSCSI initiator -name. Each security group has set of names assigned to it by system -administrator. Session is binded to security group with provided name. -If no such groups found, the session binded to "Default" group. +authentication. For example, for FC "name" could be WWN or just loop ID. +For iSCSI this could be iSCSI login credentials or iSCSI initiator name. +Each security group has set of names assigned to it by system +administrator. Session is bound to security group with provided name. If +no such groups found, the session bound to either "Default_target_name", +or "Default" group, depending from either "Default_target_name" exists +or not. In "Default_target_name" target name means name of the target. In /proc/scsi_tgt each group represented as "groups/GROUP_NAME/" subdirectory. In it there are files "devices" and "users". File "devices" lists all devices and their LUNs in the group, file "users" -lists all names that should be binded to this group. +lists all names that should be bound to this group. To configure access and devices visibility management SCST provides the following files and directories under /proc/scsi_tgt: @@ -310,7 +312,7 @@ device "NAME" with block size "BLOCK_SIZE" bytes with flags "FLAGS". "PATH" could be empty only for VDISK CDROM. "BLOCK_SIZE" and "FLAGS" are valid only for disk VDISK. The block size must be - power of 2 and >= 512 bytes Default is 512. Possible flags: + power of 2 and >= 512 bytes. Default is 512. Possible flags: - WRITE_THROUGH - write back caching disabled @@ -479,9 +481,16 @@ - Disable in Makefile TRACING, DEBUG -IMPORTANT: Some of those options enabled by default, i.e. SCST is optimized -========= currently rather for development, not for performance. + - If your initiator(s) use dedicated exported from the target virtual + SCSI devices and have more or equal amount of memory, than the + target, it is recommended to use O_DIRECT option (currently it is + available only with fileio_tgt user space program) or BLOCKIO. With + them you could have up to 100% increase in throughput. +IMPORTANT: Some of the compilation options enabled by default, i.e. SCST +========= is optimized currently rather for development and bug hunting, + not for performance. + 4. For kernel: - Don't enable debug/hacking features, i.e. use them as they are by @@ -496,7 +505,11 @@ parameters in /sys/block/device directory, they also affect the performance. If you find the best values, please share them with us. - - Also it is recommended to turn the kernel preemption off, i.e. set + - Use on the target deadline IO scheduler with read_expire and + write_expire increased on all exported devices to 5000 and 20000 + correspondingly. + + - It is recommended to turn the kernel preemption off, i.e. set the kernel preemption model to "No Forced Preemption (Server)". 5. For hardware. Modified: trunk/scst/include/scsi_tgt.h =================================================================== --- trunk/scst/include/scsi_tgt.h 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/scst/include/scsi_tgt.h 2007-05-31 17:11:57 UTC (rev 121) @@ -39,7 +39,7 @@ /* Version numbers, the same as for the kernel */ #define SCST_VERSION_CODE 0x000906 #define SCST_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) -#define SCST_VERSION_STRING "0.9.6-pre1" +#define SCST_VERSION_STRING "0.9.6-pre2" /************************************************************* ** States of command processing state machine @@ -481,8 +481,15 @@ void (*on_free_cmd) (struct scst_cmd *cmd); /* - * This function allows the target driver to handle data buffer + * This function allows target driver to handle data buffer * allocations on its own. + * + * Target driver doesn't have to always allocate buffer in this + * function, but if it decide to do it, it must check that + * scst_cmd_get_data_buff_alloced() returns 0, otherwise to avoid + * double buffer allocation and memory leaks alloc_data_buf() shall + * fail. + * * Shall return 0 in case of success or < 0 (preferrably -ENOMEM) * in case of error, or > 0 if the regular SCST allocation should be * done. In case of returning successfully, scst_cmd->data_buf_alloced @@ -492,9 +499,9 @@ * desired or fails and consequently < 0 is returned, this function * will be re-called in thread context. * - * Please note that the driver will have to handle all relevant details - * such as scatterlist setup, highmem, freeing the allocated memory, ... - * itself. + * Please note that the driver will have to handle itself all relevant + * details such as scatterlist setup, highmem, freeing the allocated + * memory, etc. * * OPTIONAL. */ @@ -593,6 +600,14 @@ */ const char name[50]; + /* + * Number of additional threads to the pool of dedicated threads. + * Used if xmit_response() or rdy_to_xfer() is blocking. + * It is the target driver's duty to ensure that not more, than that + * number of threads, are blocked in those functions at any time. + */ + int threads_num; + /* Private, must be inited to 0 by memset() */ /* List of targets per template, protected by scst_mutex */ @@ -620,7 +635,6 @@ unsigned parse_atomic:1; unsigned exec_atomic:1; unsigned dev_done_atomic:1; - unsigned dedicated_thread:1; /* Set, if no /proc files should be automatically created by SCST */ unsigned no_proc:1; @@ -748,6 +762,12 @@ /* SCSI type of the supported device. MUST HAVE */ int type; + /* + * Number of dedicated threads. If 0 - no dedicated threads will + * be created, if <0 - creation of dedicated threads is prohibited. + */ + int threads_num; + struct module *module; /* private: */ @@ -795,6 +815,9 @@ /* Used for storage of target driver private stuff */ void *tgt_priv; + + /* Name on the default security group ("Default_target_name") */ + char *default_group_name; }; /* Hash size and hash fn for hash based lun translation */ @@ -1314,8 +1337,8 @@ /* internal tmp list entry */ struct list_head extra_tgt_dev_list_entry; - /* Dedicated thread. Doesn't need any protection. */ - struct task_struct *thread; + /* List of dedicated threads. Doesn't need any protection. */ + struct list_head threads_list; }; /* @@ -1419,9 +1442,15 @@ /* * Registers and returns target adapter - * Returns new target structure on success or NULL otherwise + * Returns new target structure on success or NULL otherwise. + * + * If parameter "target_name" isn't NULL, then new security group with name + * "Default_##target_name" will be created and all sessions, which don't + * belong to any defined security groups, will be assigned to it instead of + * the "Default" one. */ -struct scst_tgt *scst_register(struct scst_tgt_template *vtt); +struct scst_tgt *scst_register(struct scst_tgt_template *vtt, + const char *target_name); /* * Unregisters target adapter Added: trunk/scst/include/scst_user.h =================================================================== --- trunk/scst/include/scst_user.h (rev 0) +++ trunk/scst/include/scst_user.h 2007-05-31 17:11:57 UTC (rev 121) @@ -0,0 +1,257 @@ +/* + * include/scst_user.h + * + * Copyright (C) 2007 Vladislav Bolkhovitin <vs...@vl...> + * + * Contains macroses for execution tracing and error reporting + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2 + * of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SCST_USER_H +#define __SCST_USER_H + +#include <scst_const.h> + +#define DEV_USER_NAME "scst_user" +#define DEV_USER_PATH "/dev/" +#define DEV_USER_VERSION 96 + +/* + * Chosen so sizeof(scst_user_sess) <= sizeof(scst_user_scsi_cmd_exec) + * (the largest one) + */ +#define SCST_MAX_NAME 45 + +#define SCST_USER_PARSE_STANDARD 0 +#define SCST_USER_PARSE_CALL 1 +#define SCST_USER_PARSE_EXCEPTION 2 +#define SCST_USER_MAX_PARSE_OPT SCST_USER_PARSE_EXCEPTION + +#define SCST_USER_ON_FREE_CMD_CALL 0 +#define SCST_USER_ON_FREE_CMD_IGNORE 1 +#define SCST_USER_MAX_ON_FREE_CMD_OPT SCST_USER_ON_FREE_CMD_IGNORE + +#define SCST_USER_MEM_NO_REUSE 0 +#define SCST_USER_MEM_REUSE_READ 1 +#define SCST_USER_MEM_REUSE_WRITE 2 +#define SCST_USER_MEM_REUSE_ALL 3 +#define SCST_USER_MAX_MEM_REUSE_OPT SCST_USER_MEM_REUSE_ALL + +#define SCST_USER_PRIO_QUEUE_SINGLE 0 +#define SCST_USER_PRIO_QUEUE_SEPARATE 1 +#define SCST_USER_MAX_PRIO_QUEUE_OPT SCST_USER_PRIO_QUEUE_SEPARATE + +#define SCST_USER_PARTIAL_TRANSFERS_NOT_SUPPORTED 0 +#define SCST_USER_PARTIAL_TRANSFERS_SUPPORTED_ORDERED 1 +#define SCST_USER_PARTIAL_TRANSFERS_SUPPORTED 2 +#define SCST_USER_MAX_PARTIAL_TRANSFERS_OPT SCST_USER_PARTIAL_TRANSFERS_SUPPORTED + +#ifndef aligned_u64 +#define aligned_u64 uint64_t __attribute__((aligned(8))) +#endif + +/************************************************************* + ** Private ucmd states + *************************************************************/ +#define UCMD_STATE_NEW 0 +#define UCMD_STATE_PARSING 1 +#define UCMD_STATE_BUF_ALLOCING 2 +#define UCMD_STATE_EXECING 3 +#define UCMD_STATE_ON_FREEING 4 +#define UCMD_STATE_ON_FREE_SKIPPED 5 +#define UCMD_STATE_ON_CACHE_FREEING 6 +#define UCMD_STATE_TM_EXECING 7 + +#define UCMD_STATE_ATTACH_SESS 0x20 +#define UCMD_STATE_DETACH_SESS 0x21 + +/* Must be changed under cmd_lists.cmd_list_lock */ +#define UCMD_STATE_SENT_MASK 0x10000 +#define UCMD_STATE_RECV_MASK 0x20000 +#define UCMD_STATE_JAMMED_MASK 0x40000 + +#define UCMD_STATE_MASK (UCMD_STATE_SENT_MASK | \ + UCMD_STATE_RECV_MASK | \ + UCMD_STATE_JAMMED_MASK) + +struct scst_user_opt +{ + uint8_t parse_type; + uint8_t on_free_cmd_type; + uint8_t memory_reuse_type; + uint8_t prio_queue_type; + uint8_t partial_transfers_type; + int32_t partial_len; +}; + +struct scst_user_dev_desc +{ + uint8_t version; + uint8_t type; + struct scst_user_opt opt; + uint32_t block_size; + char name[SCST_MAX_NAME]; +}; + +struct scst_user_sess +{ + aligned_u64 sess_h; + aligned_u64 lun; + uint8_t rd_only; + char initiator_name[SCST_MAX_NAME]; +}; + +struct scst_user_scsi_cmd_parse +{ + aligned_u64 sess_h; + + uint8_t cdb[SCST_MAX_CDB_SIZE]; + int32_t cdb_len; + + uint32_t timeout; + int32_t bufflen; + + uint8_t queue_type; + uint8_t data_direction; + + uint8_t expected_values_set; + uint8_t expected_data_direction; + int32_t expected_transfer_len; +}; + +struct scst_user_scsi_cmd_alloc_mem +{ + aligned_u64 sess_h; + + uint8_t cdb[SCST_MAX_CDB_SIZE]; + int32_t cdb_len; + + int32_t alloc_len; + + uint8_t queue_type; + uint8_t data_direction; +}; + +struct scst_user_scsi_cmd_exec +{ + aligned_u64 sess_h; + + uint8_t cdb[SCST_MAX_CDB_SIZE]; + int32_t cdb_len; + + int32_t data_len; + int32_t bufflen; + int32_t alloc_len; + aligned_u64 pbuf; + uint8_t queue_type; + uint8_t data_direction; + uint8_t partial; + uint32_t timeout; + + uint32_t parent_cmd_h; + int32_t parent_cmd_data_len; + uint32_t partial_offset; +}; + +struct scst_user_scsi_on_free_cmd +{ + aligned_u64 pbuf; + int32_t resp_data_len; + uint8_t buffer_cached; + uint8_t status; +}; + +struct scst_user_on_cached_mem_free +{ + aligned_u64 pbuf; +}; + +struct scst_user_tm +{ + aligned_u64 sess_h; + uint32_t fn; + uint32_t cmd_h_to_abort; +}; + +struct scst_user_get_cmd +{ + aligned_u64 preply; + uint32_t cmd_h; + uint32_t subcode; + union { + struct scst_user_sess sess; + struct scst_user_scsi_cmd_parse parse_cmd; + struct scst_user_scsi_cmd_alloc_mem alloc_cmd; + struct scst_user_scsi_cmd_exec exec_cmd; + struct scst_user_scsi_on_free_cmd on_free_cmd; + struct scst_user_on_cached_mem_free on_cached_mem_free; + struct scst_user_tm tm_cmd; + }; +}; + +struct scst_user_scsi_cmd_reply_parse +{ + uint8_t queue_type; + uint8_t data_direction; + int32_t data_len; + int32_t bufflen; +}; + +struct scst_user_scsi_cmd_reply_alloc_mem +{ + aligned_u64 pbuf; +}; + +struct scst_user_scsi_cmd_reply_exec +{ + int32_t resp_data_len; + aligned_u64 pbuf; + +#define SCST_EXEC_REPLY_BACKGROUND 0 +#define SCST_EXEC_REPLY_COMPLETED 1 + uint8_t reply_type; + + uint8_t status; + uint8_t sense_len; + aligned_u64 psense_buffer; +}; + +struct scst_user_reply_cmd +{ + uint32_t cmd_h; + uint32_t subcode; + union { + int32_t result; + struct scst_user_scsi_cmd_reply_parse parse_reply; + struct scst_user_scsi_cmd_reply_alloc_mem alloc_reply; + struct scst_user_scsi_cmd_reply_exec exec_reply; + }; +}; + +#define SCST_USER_REGISTER_DEVICE _IOW('u', 1, struct scst_user_dev_desc) +#define SCST_USER_SET_OPTIONS _IOW('u', 3, struct scst_user_opt) +#define SCST_USER_GET_OPTIONS _IOR('u', 4, struct scst_user_opt) +#define SCST_USER_REPLY_AND_GET_CMD _IOWR('u', 5, struct scst_user_get_cmd) +#define SCST_USER_REPLY_AND_GET_PRIO_CMD _IOWR('u', 6, struct scst_user_get_cmd) +#define SCST_USER_REPLY_CMD _IOW('u', 7, struct scst_user_reply_cmd) + +/* Values for scst_user_get_cmd.subcode */ +#define SCST_USER_ATTACH_SESS _IOR('s', UCMD_STATE_ATTACH_SESS, struct scst_user_sess) +#define SCST_USER_DETACH_SESS _IOR('s', UCMD_STATE_DETACH_SESS, struct scst_user_sess) +#define SCST_USER_PARSE _IOWR('s', UCMD_STATE_PARSING, struct scst_user_scsi_cmd_parse) +#define SCST_USER_ALLOC_MEM _IOWR('s', UCMD_STATE_BUF_ALLOCING, struct scst_user_scsi_cmd_alloc_mem) +#define SCST_USER_EXEC _IOWR('s', UCMD_STATE_EXECING, struct scst_user_scsi_cmd_exec) +#define SCST_USER_ON_FREE_CMD _IOR('s', UCMD_STATE_ON_FREEING, struct scst_user_scsi_on_free_cmd) +#define SCST_USER_ON_CACHED_MEM_FREE _IOR('s', UCMD_STATE_ON_CACHE_FREEING, struct scst_user_on_cached_mem_free) +#define SCST_USER_TASK_MGMT _IOWR('s', UCMD_STATE_TM_EXECING, struct scst_user_tm) + +#endif /* __SCST_USER_H */ Modified: trunk/scst/kernel/in-tree/Kconfig.scsi_tgt =================================================================== --- trunk/scst/kernel/in-tree/Kconfig.scsi_tgt 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/scst/kernel/in-tree/Kconfig.scsi_tgt 2007-05-31 17:11:57 UTC (rev 121) @@ -63,6 +63,13 @@ ---help--- SCSI TARGET handler for virtual disk and/or cdrom device. +config SCSI_TARGET_USER + tristate "SCSI user space virtual target devices support" + default SCSI_TARGET + depends on SCSI && PROC_FS && SCSI_TARGET + ---help--- + SCSI TARGET handler for virtual user space device. + config SCSI_TARGET_EXTRACHECKS bool "Extrachecks support" ---help--- Modified: trunk/scst/kernel/in-tree/Makefile.scsi_tgt =================================================================== --- trunk/scst/kernel/in-tree/Makefile.scsi_tgt 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/scst/kernel/in-tree/Makefile.scsi_tgt 2007-05-31 17:11:57 UTC (rev 121) @@ -15,4 +15,5 @@ obj-$(CONFIG_SCSI_TARGET_CHANGER) += scst_changer.o obj-$(CONFIG_SCSI_TARGET_RAID) += scst_raid.o obj-$(CONFIG_SCSI_TARGET_PROCESSOR) += scst_processor.o -obj-$(CONFIG_SCSI_TARGET_VDISK ) += scst_vdisk.o +obj-$(CONFIG_SCSI_TARGET_VDISK) += scst_vdisk.o +obj-$(CONFIG_SCSI_TARGET_USER) += scst_user.o Modified: trunk/scst/src/Makefile =================================================================== --- trunk/scst/src/Makefile 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/scst/src/Makefile 2007-05-31 17:11:57 UTC (rev 121) @@ -70,6 +70,7 @@ install -d $(INSTALL_DIR_H) install -m 644 ../include/scsi_tgt.h $(INSTALL_DIR_H) install -m 644 ../include/scst_debug.h $(INSTALL_DIR_H) + install -m 644 ../include/scst_user.h $(INSTALL_DIR_H) install -m 644 ../include/scst_const.h $(INSTALL_DIR_H) ifneq ($(MODS_VERS),) rm -f $(INSTALL_DIR_H)/Module.symvers Modified: trunk/scst/src/dev_handlers/Makefile =================================================================== --- trunk/scst/src/dev_handlers/Makefile 2007-05-31 17:10:55 UTC (rev 120) +++ trunk/scst/src/dev_handlers/Makefile 2007-05-31 17:11:57 UTC (rev 121) @@ -30,7 +30,7 @@ SCST_INC_DIR := $(SUBDIRS)/../include obj-m := scst_cdrom.o scst_changer.o scst_disk.o scst_modisk.o scst_tape.o \ - scst_vdisk.o scst_raid.o scst_processor.o + scst_vdisk.o scst_raid.o scst_processor.o scst_user.o obj-$(CONFIG_SCSI_TARGET_DISK) += scst_disk.o obj-$(CONFIG_SCSI_TARGET_TAPE) += scst_tape.o @@ -40,6 +40,7 @@ obj-$(CONFIG_SCSI_TARGET_RAID) += scst_raid.o obj-$(CONFIG_SCSI_TARGET_PROCESSOR) += scst_processor.o obj-$(CONFIG_SCSI_TARGET_VDISK) += scst_vdisk.o +obj-$(CONFIG_SCSI_TARGET_USER) += scst_user.o else ifeq ($(KDIR),) Added: trunk/scst/src/dev_handlers/scst_user.c =================================================================== --- trunk/scst/src/dev_handlers/scst_user.c (rev 0) +++ trunk/scst/src/dev_handlers/scst_user.c 2007-05-31 17:11:57 UTC (rev 121) @@ -0,0 +1,3039 @@ +/* + * scst_user.c + * + * Copyright (C) 2007 Vladislav Bolkhovitin <vs...@vl...> + * + * SCSI virtual user space device handler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2 + * of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kthread.h> +#include <linux/delay.h> +#include <linux/poll.h> + +#define LOG_PREFIX DEV_USER_NAME + +#include "scsi_tgt.h" +#include "scst_user.h" +#include "scst_dev_handler.h" + +#ifndef CONFIG_NOHIGHMEM +#warning HIGHMEM kernel configurations are not supported. Consider \ + changing VMSPLIT option or using 64-bit configuration. +#endif + +#if defined(DEBUG) && defined(CONFIG_DEBUG_SLAB) +#define DEV_USER_SLAB_FLAGS ( SLAB_RED_ZONE | SLAB_POISON ) +#else +#define DEV_USER_SLAB_FLAGS 0L +#endif + +#define DEV_USER_MAJOR 237 +#define DEV_USER_CMD_HASH_ORDER 6 +#define DEV_USER_TM_TIMEOUT (10*HZ) +#define DEV_USER_ATTACH_TIMEOUT (5*HZ) +#define DEV_USER_DETACH_TIMEOUT (5*HZ) +#define DEV_USER_PRE_UNREG_POLL_TIME (HZ/10) + +struct scst_user_dev +{ + struct rw_semaphore dev_rwsem; + + struct scst_cmd_lists cmd_lists; + /* All 3 protected by cmd_lists.cmd_list_lock */ + struct list_head ready_cmd_list; + struct list_head prio_ready_cmd_list; + wait_queue_head_t prio_cmd_list_waitQ; + + /* All, including detach_cmd_count, protected by cmd_lists.cmd_list_lock */ + unsigned short blocking:1; + unsigned short cleaning:1; + unsigned short cleanup_done:1; + unsigned short attach_cmd_active:1; + unsigned short tm_cmd_active:1; + unsigned short internal_reset_active:1; + unsigned short pre_unreg_sess_active:1; /* just a small optimization */ + + unsigned short detach_cmd_count; + + int (*generic_parse)(struct scst_cmd *cmd, struct scst_info_cdb *info_cdb, + int (*get_block)(struct scst_cmd *cmd)); + + int block; + int def_block; + + struct sgv_pool *pool; + + uint8_t parse_type; + uint8_t on_free_cmd_type; + uint8_t memory_reuse_type; + uint8_t prio_queue_type; + uint8_t partial_transfers_type; + uint32_t partial_len; + + struct scst_dev_type devtype; + + /* Both protected by cmd_lists.cmd_list_lock */ + unsigned int handle_counter; + struct list_head ucmd_hash[1<<DEV_USER_CMD_HASH_ORDER]; + + int virt_id; + struct list_head dev_list_entry; + char name[SCST_MAX_NAME]; + + /* Protected by cmd_lists.cmd_list_lock */ + struct list_head pre_unreg_sess_list; + + struct list_head cleanup_list_entry; + struct completion cleanup_cmpl; +}; + +struct dev_user_pre_unreg_sess_obj +{ + struct scst_tgt_dev *tgt_dev; + unsigned int active:1; + unsigned int exit:1; + struct list_head pre_unreg_sess_list_entry; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + struct work_struct pre_unreg_sess_work; +#else + struct delayed_work pre_unreg_sess_work; +#endif +}; + +/* Most fields are unprotected, since only one thread at time can access them */ +struct dev_user_cmd +{ + struct scst_cmd *cmd; + struct scst_user_dev *dev; + + atomic_t ucmd_ref; + + unsigned int buff_cached:1; + unsigned int buf_dirty:1; + unsigned int background_exec:1; + unsigned int internal_reset_tm:1; + + struct dev_user_cmd *buf_ucmd; + + int cur_data_page; + int num_data_pages; + int first_page_offset; + unsigned long ubuff; + struct page **data_pages; + struct sgv_pool_obj *sgv; + + unsigned int state; + + struct list_head ready_cmd_list_entry; + + unsigned int h; + struct list_head hash_list_entry; + + struct scst_user_get_cmd user_cmd; + + struct completion *cmpl; + int result; +}; + +static struct dev_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev, + int gfp_mask); +static void dev_user_free_ucmd(struct dev_user_cmd *ucmd); + +static int dev_user_parse(struct scst_cmd *cmd, struct scst_info_cdb *info_cdb); +static int dev_user_exec(struct scst_cmd *cmd); +static void dev_user_on_free_cmd(struct scst_cmd *cmd); +static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd, + struct scst_tgt_dev *tgt_dev); + +static int dev_user_disk_done(struct scst_cmd *cmd); +static int dev_user_tape_done(struct scst_cmd *cmd); + +static struct page *dev_user_alloc_pages(struct scatterlist *sg, + gfp_t gfp_mask, void *priv); +static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count, + void *priv); + +static void dev_user_add_to_ready(struct dev_user_cmd *ucmd); + +static void dev_user_unjam_cmd(struct dev_user_cmd *ucmd, int busy, + unsigned long *flags); +static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm, + struct scst_tgt_dev *tgt_dev); + +static int dev_user_process_reply_tm_exec(struct dev_user_cmd *ucmd, + int status); +static int dev_user_process_reply_sess(struct dev_user_cmd *ucmd, int status); +static int dev_user_register_dev(struct file *file, + const struct scst_user_dev_desc *dev_desc); +static int __dev_user_set_opt(struct scst_user_dev *dev, + const struct scst_user_opt *opt); +static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt); +static int dev_user_get_opt(struct file *file, void *arg); + +static unsigned int dev_user_poll(struct file *filp, poll_table *wait); +static long dev_user_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static int dev_user_release(struct inode *inode, struct file *file); + +/** Data **/ + +static struct kmem_cache *user_cmd_cachep; + +static DEFINE_MUTEX(dev_user_mutex); + +static struct file_operations dev_user_fops = { + .poll = dev_user_poll, + .unlocked_ioctl = dev_user_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = dev_user_ioctl, +#endif + .release = dev_user_release, +}; + +static struct class *dev_user_sysfs_class; + +static LIST_HEAD(dev_list); + +static spinlock_t cleanup_lock = SPIN_LOCK_UNLOCKED; +static LIST_HEAD(cleanup_list); +static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ); +static struct task_struct *cleanup_thread; + +static inline void ucmd_get(struct dev_user_cmd *ucmd, int barrier) +{ + TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref)); + atomic_inc(&ucmd->ucmd_ref); + if (barrier) + smp_mb__after_atomic_inc(); +} + +static inline void ucmd_put(struct dev_user_cmd *ucmd) +{ + TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref)); + if (atomic_dec_and_test(&ucmd->ucmd_ref)) + dev_user_free_ucmd(ucmd); +} + +static inline int calc_num_pg(unsigned long buf, int len) +{ + len += buf & ~PAGE_MASK; + return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0); +} + +static inline int is_need_offs_page(unsigned long buf, int len) +{ + return ((buf & ~PAGE_MASK) != 0) && + ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK)); +} + +static void __dev_user_not_reg(void) +{ + PRINT_ERROR_PR("%s", "Device not registered"); + return; +} + +static inline int dev_user_check_reg(struct scst_user_dev *dev) +{ + if (dev == NULL) { + __dev_user_not_reg(); + return -EINVAL; + } + return 0; +} + +static inline int dev_user_cmd_hashfn(int h) +{ + return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1); +} + +static inline struct dev_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev, + unsigned int h) +{ + struct list_head *head; + struct dev_user_cmd *ucmd; + + head = &dev->ucmd_hash[dev_user_cmd_hashfn(h)]; + list_for_each_entry(ucmd, head, hash_list_entry) { + if (ucmd->h == h) { + TRACE_DBG("Found ucmd %p", ucmd); + return ucmd; + } + } + return NULL; +} + +static void cmnd_insert_hash(struct dev_user_cmd *ucmd) +{ + struct list_head *head; + struct scst_user_dev *dev = ucmd->dev; + struct dev_user_cmd *u; + unsigned long flags; + + spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags); + do { + ucmd->h = dev->handle_counter++; + u = __ucmd_find_hash(dev, ucmd->h); + } while(u != NULL); + head = &dev->ucmd_hash[dev_user_cmd_hashfn(ucmd->h)]; + list_add_tail(&ucmd->hash_list_entry, head); + spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags); + + TRACE_DBG("Inserted ucmd %p, h=%d", ucmd, ucmd->h); + return; +} + +static inline void cmnd_remove_hash(struct dev_user_cmd *ucmd) +{ + unsigned long flags; + spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags); + list_del(&ucmd->hash_list_entry); + spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags); + + TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h); + return; +} + +static void dev_user_free_ucmd(struct dev_user_cmd *ucmd) +{ + TRACE_ENTRY(); + + TRACE_MEM("Freeing ucmd %p", ucmd); + + cmnd_remove_hash(ucmd); + EXTRACHECKS_BUG_ON(ucmd->cmd != NULL); + + kmem_cache_free(user_cmd_cachep, ucmd); + + TRACE_EXIT(); + return; +} + +static struct page *dev_user_alloc_pages(struct scatterlist *sg, + gfp_t gfp_mask, void *priv) +{ + struct dev_user_cmd *ucmd = (struct dev_user_cmd*)priv; + + TRACE_ENTRY(); + + /* *sg supposed to be zeroed */ + + TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd, + ucmd->ubuff, ucmd->cur_data_page); + + if (ucmd->cur_data_page == 0) { + TRACE_MEM("ucmd->first_page_offset %d", + ucmd->first_page_offset); + sg->offset = ucmd->first_page_offset; + ucmd_get(ucmd, 0); + } + + if (ucmd->cur_data_page >= ucmd->num_data_pages) + goto out; + + sg->page = ucmd->data_pages[ucmd->cur_data_page]; + sg->length = PAGE_SIZE - sg->offset; + + ucmd->cur_data_page++; + + TRACE_MEM("page=%p, length=%d", sg->page, sg->length); + TRACE_BUFFER("Page data", page_address(sg->page), sg->length); + +out: + TRACE_EXIT(); + return sg->page; +} + +static void dev_user_on_cached_mem_free(struct dev_user_cmd *ucmd) +{ + TRACE_ENTRY(); + + TRACE_DBG("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)", + ucmd, ucmd->h, ucmd->ubuff); + + ucmd->user_cmd.cmd_h = ucmd->h; + ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE; + ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff; + + ucmd->state = UCMD_STATE_ON_CACHE_FREEING; + + dev_user_add_to_ready(ucmd); + + TRACE_EXIT(); + return; +} + +static void dev_user_unmap_buf(struct dev_user_cmd *ucmd) +{ + int i; + + TRACE_ENTRY(); + + TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd, + ucmd->ubuff, ucmd->num_data_pages); + + for(i = 0; i < ucmd->num_data_pages; i++) { + struct page *page = ucmd->data_pages[i]; + + if (ucmd->buf_dirty) + SetPageDirty(page); + + page_cache_release(page); + } + kfree(ucmd->data_pages); + ucmd->data_pages = NULL; + + TRACE_EXIT(); + return; +} + +static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count, + void *priv) +{ + struct dev_user_cmd *ucmd = (struct dev_user_cmd*)priv; + + TRACE_ENTRY(); + + sBUG_ON(ucmd->data_pages == NULL); + + TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, sg=%p, sg_count=%d, " + "buff_cached=%d)", ucmd, ucmd->ubuff, sg, sg_count, + ucmd->buff_cached); + + dev_user_unmap_buf(ucmd); + + if (ucmd->buff_cached) + dev_user_on_cached_mem_free(ucmd); + else + ucmd_put(ucmd); + + TRACE_EXIT(); + return; +} + +static inline int is_buff_cached(struct dev_user_cmd *ucmd) +{ + int mem_reuse_type = ucmd->dev->memory_reuse_type; + + if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) || + ((ucmd->cmd->data_direction == SCST_DATA_READ) && + (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) || + ((ucmd->cmd->data_direction == SCST_DATA_WRITE) && + (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE))) { + return 1; + } else + return 0; +} + +/* + * Returns 0 for success, <0 for fatal failure, >0 - need pages. + * Unmaps the buffer, if needed in case of error + */ +static int dev_user_alloc_sg(struct dev_user_cmd *ucmd, int cached_buff) +{ + int res = 0; + struct scst_cmd *cmd = ucmd->cmd; + struct scst_user_dev *dev = ucmd->dev; + int gfp_mask, flags = 0; + int bufflen = cmd->bufflen; + int last_len = 0; + + TRACE_ENTRY(); + + gfp_mask = __GFP_NOWARN; + gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL); + + if (cached_buff) { + flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL; + if (ucmd->ubuff == 0) + flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS; + } else { + TRACE_MEM("%s", "Not cached buff"); + flags |= SCST_POOL_ALLOC_NO_CACHED; + if (ucmd->ubuff == 0) { + res = 1; + goto out; + } + bufflen += ucmd->first_page_offset; + if (is_need_offs_page(ucmd->ubuff, cmd->bufflen)) + last_len = bufflen & ~PAGE_MASK; + else + last_len = cmd->bufflen & ~PAGE_MASK; + if (last_len == 0) + last_len = PAGE_SIZE; + } + ucmd->buff_cached = cached_buff; + + cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags, + &cmd->sg_cnt, &ucmd->sgv, ucmd); + if (cmd->sg != NULL) { + struct dev_user_cmd *buf_ucmd = + (struct dev_user_cmd*)sgv_get_priv(ucmd->sgv); + + TRACE_MEM("Buf ucmd %p", buf_ucmd); + + ucmd->ubuff = buf_ucmd->ubuff; + ucmd->buf_ucmd = buf_ucmd; + + TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, " + "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff, + last_len, cmd->sg[cmd->sg_cnt-1].length); + + EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) && + (ucmd != buf_ucmd)); + + if (last_len != 0) { + /* We don't use clustering, so the assignment is safe */ + cmd->sg[cmd->sg_cnt-1].length = last_len; + } + + if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) { + static int ll; + if (ll < 10) { + PRINT_INFO("Unable to complete command due to " + "SG IO count limitation (requested %d, " + "available %d, tgt lim %d)", cmd->sg_cnt, + cmd->tgt_dev->max_sg_cnt, + cmd->tgt->sg_tablesize); + ll++; + } + sgv_pool_free(ucmd->sgv); + ucmd->sgv = NULL; + cmd->sg = NULL; + res = -1; + } + } else { + TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, " + "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h, + ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv); + if (unlikely(cmd->sg_cnt == 0)) { + res = -1; + if (ucmd->data_pages != NULL) + dev_user_unmap_buf(ucmd); + } else { + switch(ucmd->state & ~UCMD_STATE_MASK) { + case UCMD_STATE_BUF_ALLOCING: + res = 1; + break; + case UCMD_STATE_EXECING: + res = -1; + if (ucmd->data_pages != NULL) + dev_user_unmap_buf(ucmd); + break; + default: + sBUG(); + break; + } + } + } + +out: + TRACE_EXIT_RES(res); + return res; +} + +static int dev_user_alloc_space(struct dev_user_cmd *ucmd) +{ + int rc, res = SCST_CMD_STATE_DEFAULT; + struct scst_cmd *cmd = ucmd->cmd; + + TRACE_ENTRY(); + + ucmd->state = UCMD_STATE_BUF_ALLOCING; + cmd->data_buf_alloced = 1; + + rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd)); + if (rc == 0) + goto out; + else if (rc < 0) { + scst_set_busy(cmd); + res = SCST_CMD_STATE_XMIT_RESP; + goto out; + } + + if ((cmd->data_direction != SCST_DATA_WRITE) && + !scst_is_cmd_local(cmd)) { + TRACE_DBG("Delayed alloc, ucmd %p", ucmd); + goto out; + } + + ucmd->user_cmd.cmd_h = ucmd->h; + ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM; + ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev; + memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb, + min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb))); + ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len; + ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ? + (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen; + ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type; + ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction; + + dev_user_add_to_ready(ucmd); + + res = SCST_CMD_STATE_STOP; + +out: + TRACE_EXIT_RES(res); + return res; +} + +static struct dev_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev, + int gfp_mask) +{ + struct dev_user_cmd *ucmd = NULL; + + TRACE_ENTRY(); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) + ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask); + if (ucmd != NULL) + memset(ucmd, 0, sizeof(*ucmd)); +#else + ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask); +#endif + if (unlikely(ucmd == NULL)) { + TRACE(TRACE_OUT_OF_MEM, "Unable to allocate " + "user cmd (gfp_mask %x)", gfp_mask); + goto out; + } + ucmd->dev = dev; + atomic_set(&ucmd->ucmd_ref, 1); + + cmnd_insert_hash(ucmd); + + TRACE_MEM("ucmd %p allocated", ucmd); + +out: + TRACE_EXIT_HRES((unsigned long)ucmd); + return ucmd; +} + +static int dev_user_get_block(struct scst_cmd *cmd) +{ + struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv; + /* + * No need for locks here, since *_detach() can not be + * called, when there are existing commands. + */ + TRACE_EXIT_RES(dev->block); + return dev->block; +} + +static int dev_user_parse(struct scst_cmd *cmd, struct scst_info_cdb *info_cdb) +{ + int rc, res = SCST_CMD_STATE_DEFAULT; + struct dev_user_cmd *ucmd; + int atomic = scst_cmd_atomic(cmd); + struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv; + int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL; + + TRACE_ENTRY(); + + if (cmd->dh_priv == NULL) { + ucmd = dev_user_alloc_ucmd(dev, gfp_mask); + if (unlikely(ucmd == NULL)) { + if (atomic) { + res = SCST_CMD_STATE_NEED_THREAD_CTX; + goto out; + } else { + scst_set_busy(cmd); + goto out_error; + } + } + ucmd->cmd = cmd; + cmd->dh_priv = ucmd; + } else { + ucmd = (struct dev_user_cmd*)cmd->dh_priv; + TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state); + } + + TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state); + + if (ucmd->state != UCMD_STATE_NEW) + goto alloc; + + switch(dev->parse_type) { + case SCST_USER_PARSE_STANDARD: + TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd); + rc = dev->generic_parse(cmd, info_cdb, dev_user_get_block); + if ((rc != 0) || (info_cdb->flags & SCST_INFO_INVALID)) + goto out_invalid; + ucmd->cmd->skip_parse = 1; + break; + + case SCST_USER_PARSE_EXCEPTION: + TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd); + rc = dev->generic_parse(cmd, info_cdb, dev_user_get_block); + if ((rc == 0) && (!(info_cdb->flags & SCST_INFO_INVALID))) { + ucmd->cmd->skip_parse = 1; + break; + } else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) { + TRACE_MEM("Restarting PARSE to thread context " + "(ucmd %p)", ucmd); + res = SCST_CMD_STATE_NEED_THREAD_CTX; + goto out; + } + /* else go through */ + + case SCST_USER_PARSE_CALL: + TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, " + "bufflen %d)", ucmd, ucmd->h, cmd->bufflen); + ucmd->cmd->skip_parse = 1; + ucmd->user_cmd.cmd_h = ucmd->h; + ucmd->user_cmd.subcode = SCST_USER_PARSE; + ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev; + memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb, + min(sizeof(ucmd->user_cmd.parse_cmd.cdb), + sizeof(cmd->cdb))); + ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len; + ucmd->user_cmd.parse_cmd.timeout = cmd->timeout; + ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen; + ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type; + ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction; + ucmd->user_cmd.parse_cmd.expected_values_set = + cmd->expected_values_set; + ucmd->user_cmd.parse_cmd.expected_data_direction = + cmd->expected_data_direction; + ucmd->user_cmd.parse_cmd.expected_transfer_len = + cmd->expected_transfer_len; + ucmd->state = UCMD_STATE_PARSING; + dev_user_add_to_ready(ucmd); + res = SCST_CMD_STATE_STOP; + goto out; + + default: + sBUG(); + goto out; + } + +alloc: + if (cmd->data_direction != SCST_DATA_NONE) + res = dev_user_alloc_space(ucmd); + +out: + TRACE_EXIT_RES(res); + return res; + +out_invalid: + PRINT_ERROR_PR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc, + info_cdb->flags & SCST_INFO_INVALID); + scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode)); + +out_error: + res = SCST_CMD_STATE_XMIT_RESP; + goto out; +} + +static void dev_user_flush_dcache(struct dev_user_cmd *ucmd) +{ + struct dev_user_cmd *buf_ucmd = ucmd->buf_ucmd; + unsigned long start = buf_ucmd->ubuff; + int i; + + TRACE_ENTRY(); + + if (start == 0) + goto out; + + for(i = 0; i < buf_ucmd->num_data_pages; i++) { + struct page *page; + page = buf_ucmd->data_pages[i]; +#ifdef ARCH_HAS_FLUSH_ANON_PAGE + struct vm_area_struct *vma = find_vma(current->mm, start); + if (vma != NULL) + flush_anon_page(vma, page, start); +#endif + flush_dcache_page(page); + start += PAGE_SIZE; + } + +out: + TRACE_EXIT(); + return; +} + +static int dev_user_exec(struct scst_cmd *cmd) +{ + struct dev_user_cmd *ucmd = (struct dev_user_cmd*)cmd->dh_priv; + + TRACE_ENTRY(); + + TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, " + "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h, + cmd->bufflen, cmd->data_len, ucmd->ubuff); + + if (cmd->data_direction == SCST_DATA_WRITE) + dev_user_flush_dcache(ucmd); + + ucmd->user_cmd.cmd_h = ucmd->h; + ucmd->user_cmd.subcode = SCST_USER_EXEC; + ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev; + memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb, + min(sizeof(ucmd->user_cmd.exec_cmd.cdb), + sizeof(cmd->cdb))); + ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len; + ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen; + ucmd->user_cmd.exec_cmd.data_len = cmd->data_len; + ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff; + if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) { + ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ? + (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen; + } + ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type; + ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction; + ucmd->user_cmd.exec_cmd.partial = 0; + ucmd->user_cmd.exec_cmd.timeout = cmd->timeout; + + ucmd->state = UCMD_STATE_EXECING; + + dev_user_add_to_ready(ucmd); + + TRACE_EXIT(); + return SCST_EXEC_COMPLETED; +} + +static void dev_user_free_sgv(struct dev_user_cmd *ucmd) +{ + if (ucmd->sgv != NULL) { + sgv_pool_free(ucmd->sgv); + ucmd->sgv = NULL; + } +} + +static void dev_user_on_free_cmd(struct scst_cmd *cmd) +{ + struct dev_user_cmd *ucmd = (struct dev_user_cmd*)cmd->dh_priv; + + TRACE_ENTRY(); + + if (unlikely(ucmd == NULL)) + goto out; + + TRACE_DBG("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd, + ucmd->buff_cached, ucmd->ubuff); + + ucmd->cmd = NULL; + if ((cmd->data_direction == SCST_DATA_WRITE) && (ucmd->buf_ucmd != NULL)) + ucmd->buf_ucmd->buf_dirty = 1; + + if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) { + ucmd->state = UCMD_STATE_ON_FREE_SKIPPED; + /* The state assignment must be before freeing sgv! */ + dev_user_free_sgv(ucmd); + ucmd_put(ucmd); + goto out; + } + + ucmd->user_cmd.cmd_h = ucmd->h; + ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD; + + ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff; + ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len; + ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached; + ucmd->user_cmd.on_free_cmd.status = cmd->status; + + ucmd->state = UCMD_STATE_ON_FREEING; + + dev_user_add_to_ready(ucmd); + +out: + TRACE_EXIT(); + return; +} + +static void dev_user_set_block(struct scst_cmd *cmd, int block) +{ + struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv; + /* + * No need for locks here, since *_detach() can not be + * called, when there are existing commands. + */ + TRACE_DBG("dev %p, new block %d", dev, block); + if (block != 0) + dev->block = block; + else + dev->block = dev->def_block; + return; +} + +static int dev_user_disk_done(struct scst_cmd *cmd) +{ + int res = SCST_CMD_STATE_DEFAULT; + + TRACE_ENTRY(); + + res = scst_block_generic_dev_done(cmd, dev_user_set_block); + + TRACE_EXIT_RES(res); + return res; +} + +static int dev_user_tape_done(struct scst_cmd *cmd) +{ + int res = SCST_CMD_STATE_DEFAULT; + + TRACE_ENTRY(); + + res = scst_tape_generic_dev_done(cmd, dev_user_set_block); + + TRACE_EXIT_RES(res); + return res; +} + +static void dev_user_add_to_ready(struct dev_user_cmd *ucmd) +{ + struct scst_user_dev *dev = ucmd->dev; + unsigned long flags; + int do_wake; + + TRACE_ENTRY(); + + do_wake = (in_interrupt() || + (ucmd->state == UCMD_STATE_ON_CACHE_FREEING)); + if (ucmd->cmd) + do_wake |= ucmd->cmd->preprocessing_only; + + EXTRACHECKS_BUG_ON(ucmd->state & UCMD_STATE_JAMMED_MASK); + + spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags); + + /* Hopefully, compiler will make it as a single test/jmp */ + if (unlikely(dev->attach_cmd_active || dev->tm_cmd_active || + dev->internal_reset_active || dev->pre_unreg_sess_active || + (dev->detach_cmd_count != 0))) { + switch(ucmd->state) { + case UCMD_STATE_PARSING: + case UCMD_STATE_BUF_ALLOCING: + case UCMD_STATE_EXECING: + if (dev->pre_unreg_sess_active && + !(dev->attach_cmd_active || dev->tm_cmd_active || + dev->internal_reset_active || + (dev->detach_cmd_count != 0))) { + struct dev_user_pre_unreg_sess_obj *p, *found = NULL; + list_for_each_entry(p, &dev->pre_unreg_sess_list, + pre_unreg_sess_list_entry) { + if (p->tgt_dev == ucmd->cmd->tgt_dev) { + if (p->active) + found = p; + break; + } + } + if (found == NULL) { + TRACE_MGMT_DBG("No pre unreg sess " + "active (ucmd %p)", ucmd); + break; + } else { + TRACE(TRACE_MGMT, "Pre unreg sess %p " + "active (ucmd %p)", found, ucmd); + } + } + TRACE(TRACE_MGMT, "Mgmt cmd active, returning BUSY for " + "ucmd %p", ucmd); + dev_user_unjam_cmd(ucmd, 1, &flags); + spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags); + goto out; + } + } + + if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) || + unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) || + unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) { + if (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE) { + TRACE_MGMT_DBG("Adding mgmt ucmd %p to prio ready cmd " + "list", ucmd); + list_add_tail(&ucmd->ready_cmd_list_entry, + &dev->prio_ready_cmd_list); + wake_up(&dev->prio_cmd_list_waitQ); + do_wake = 0; + } else { + TRACE_MGMT_DBG("Adding mgmt ucmd %p to ready cmd " + "list", ucmd); + list_add_tail(&ucmd->ready_cmd_list_entry, + &dev->ready_cmd_list); + do_wake = 1; + } + } else if ((ucmd->cmd != NULL) && + unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) { + TRACE_DBG("Adding ucmd %p to head ready cmd list", ucmd); + list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list); + } else { + TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd); + list_add_tail(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list); + } + + if (do_wake) { + TRACE_DBG("Waking up dev %p", dev); + wake_up(&dev->cmd_lists.cmd_list_waitQ); + } + + spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags); + +out: + TRACE_EXIT(); + return; +} + +static int dev_user_map_buf(struct dev_user_cmd *ucmd, unsigned long ubuff, + int num_pg) +{ + int res = 0, rc; + int i; + + TRACE_ENTRY(); + + if (unlikely(ubuff == 0)) + goto out_nomem; + + ucmd->num_data_pages = num_pg; + + ucmd->data_pages = kzalloc(sizeof(*ucmd->data_pages)*ucmd->num_data_pages, + GFP_KERNEL); + if (ucmd->data_pages == NULL) { + TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array " + "(num_data_pages=%d)", ucmd->num_data_pages); + res = -ENOMEM; + goto out_nomem; + } + + TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d, " + "first_page_offset %d, len %d)", ucmd, ubuff, + ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK), + ucmd->cmd->bufflen); + + down_read(¤t->mm->mmap_sem); + rc = get_user_pages(current, current->mm, ubuff, ucmd->num_data_pages, + 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL); + up_read(¤t->mm->mmap_sem); + + /* get_user_pages() flushes dcache */ + + if (rc < ucmd->num_data_pages) + goto out_unmap; + + ucmd->ubuff = ubuff; + ucmd->first_page_offset = (ubuff & ~PAGE_MASK); + +out: + TRACE_EXIT_RES(res); + return res; + +out_nomem: + scst_set_busy(ucmd->cmd); + /* go through */ + +out_err: + ucmd->cmd->state = SCST_CMD_STATE_XMIT_RESP; + goto out; + +out_unmap: + PRINT_ERROR_PR("Failed to get %d user pages (rc %d)", + ucmd->num_data_pages, rc); + if (rc > 0) { + for(i = 0; i < rc; i++) + page_cache_release(ucmd->data_pages[i]); + } + kfree(ucmd->data_pages); + ucmd->data_pages = NULL; + res = -EFAULT; + scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error)); + goto out_err; +} + +static int dev_user_process_reply_alloc(struct dev_user_cmd *ucmd, + struct scst_user_reply_cmd *reply) +{ + int res = 0; + struct scst_cmd *cmd = ucmd->cmd; + + TRACE_ENTRY(); + + TRACE_DBG("ucmd %p, pbuf %Lx", ucmd, reply->alloc_reply.pbuf); + + if (likely(reply->alloc_reply.pbuf != 0)) { + int pages; + if (ucmd->buff_cached) { + if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) { + PRINT_ERROR_PR("Supplied pbuf %Lx isn't " + "page aligned", reply->alloc_reply.pbuf); + goto out_hwerr; + } + pages = cmd->sg_cnt; + } else + pages = calc_num_pg(reply->alloc_reply.pbuf, cmd->bufflen); + res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages); + } else { + scst_set_busy(ucmd->cmd); + ucmd->cmd->state = SCST_CMD_STATE_XMIT_RESP; + } + +out_process: + scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT); + + TRACE_EXIT_RES(res); + return res; + +out_hwerr: + scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error)); + res = -EINVAL; + goto out_process; +} + +static int dev_user_process_reply_parse(struct dev_user_cmd *ucmd, + struct scst_user_reply_cmd *reply) +{ + int res = 0; + struct scst_user_scsi_cmd_reply_parse *preply = + &reply->parse_reply; + struct scst_cmd *cmd = ucmd->cmd; + + TRACE_ENTRY(); + + if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA)) + goto out_inval; + + if (unlikely((preply->data_direction != SCST_DATA_WRITE) && + (preply->data_direction != SCST_DATA_READ) && + (preply->data_direction != SCST_DATA_NONE))) + goto out_inval; + + if (unlikely((preply->data_direction != SCST_DATA_NONE) && + (preply->bufflen == 0))) + goto out_inval; + + if (unlikely((preply->bufflen < 0) || (preply->data_len < 0))) + goto out_inval; + + TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, " + "data_len %d, pbuf %Lx", ucmd, preply->queue_type, + preply->data_direction, preply->bufflen, preply->data_len, + reply->alloc_reply.pbuf); + + cmd->queue_type = preply->queue_type; + cmd->data_direction = preply->data_direction; + cmd->bufflen = preply->bufflen; + cmd->data_len = preply->data_len; + +out_process: + scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT); + + TRACE_EXIT_RES(res); + return res; + +out_inval: + PRINT_ERROR_PR("%s", "Invalid parse_reply parameter(s)"); + scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error)); + res = -EINVAL; + goto out_process; +} + +static int dev_user_process_reply_on_free(struct dev_user_cmd *ucmd) +{ + int res = 0; + + TRACE_ENTRY(); + + TRACE_DBG("ON FREE ucmd %p", ucmd); + + dev_user_free_sgv(ucmd); + ucmd_put(ucmd); + + TRACE_EXIT_RES(res); + return res; +} + +static int dev_user_process_reply_on_cache_free(struct dev_user_cmd *ucmd) +{ + int res = 0; + + TRACE_ENTRY(); + + TRACE_DBG("ON CACHE FREE ucmd %p", ucmd); + + ucmd_put(ucmd); + + TRACE_EXIT_RES(res); + return res; +} + +static int dev_user_process_reply_exec(struct dev_user_cmd *ucmd, + struct scst_user_reply_cmd *reply) +{ + int res = 0; + struct scst_user_scsi_cmd_reply_exec *ereply = + &reply->exec_reply; + struct scst_cmd *cmd = ucmd->cmd; + + TRACE_ENTRY(); + + if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) { + if (ucmd->background_exec) { + TRACE_DBG("Background ucmd %p finished", ucmd); + ucmd_put(ucmd); + goto out; + } + if (unlikely(ereply->resp_data_len > cmd->bufflen)) + goto out_inval; + if (unlikely((cmd->data_direction != SCST_DATA_READ) && + (ereply->resp_data_len != 0))) + goto out_inval; + } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) { + if (unlikely(ucmd->background_exec)) + goto out_inval; + if (unlikely((cmd->data_direction == SCST_DATA_READ) || + (cmd->resp_data_len != 0))) + goto out_inval; + ucmd_get(ucmd, 1); + ucmd->background_exec = 1; + TRACE_DBG("Background ucmd %p", ucmd); + goto out_compl; + } else + goto out_inval; + + TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd, + ereply->status, ereply->resp_data_len); + + if (ereply->resp_data_len != 0) { + if (ucmd->ubuff == 0) { + int pages, rc; + if (unlikely(ereply->pbuf == 0)) + goto out_busy; + if (ucmd->buff_cached) { + if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) { + PRINT_ERROR_PR("Supplied pbuf %Lx isn't " + "page aligned", ereply->pbuf); + goto out_hwerr; + } + pages = cmd->sg_cnt; + } else + pages = calc_num_pg(ereply->pbuf, cmd->bufflen); + rc = dev_user_map_buf(ucmd, ereply->pbuf, pages); + if ((rc != 0) || (ucmd->ubuff == 0)) + goto out_compl; + + rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached); + if (unlikely(rc != 0)) + goto out_busy; + } else + dev_user_flush_dcache(ucmd); + cmd->may_need_dma_sync = 1; + scst_set_resp_data_len(cmd, ereply->resp_data_len); + } else if (cmd->resp_data_len != ereply->resp_data_len) { + if (ucmd->ubuff == 0) + cmd->resp_data_len = ereply->resp_data_len; + else + scst_set_resp_data_len(cmd, ereply->resp_data_len); + } + + cmd->status = ereply->status; + if (ereply->sense_len != 0) { + res = copy_from_user(cmd->sense_buffer, + (void*)(unsigned long)ereply->psense_buffer, + min(sizeof(cmd->sense_buffer), + (unsigned int)ereply->sense_len)); + if (res < 0) { + PRINT_ERROR_PR("%s", "Unable to get sense data"); + goto out_hwerr_res_set; + } + } + +out_compl: + cmd->completed = 1; + cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT); + +out: + TRACE_EXIT_RES(res); + return res; + +out_inval: + PRINT_ERROR_PR("%s", "Invalid exec_reply parameter(s)"); + +out_hwerr: + res = -EINVAL; + +out_hwerr_res_set: + if (ucmd->background_exec) { + ucmd_put(ucmd); + goto out; + } else { + scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error)); + goto out_compl; + } + +out_busy: + scst_set_busy(cmd); + goto out_compl; +} + +static int dev_user_process_reply(struct scst_user_dev *dev, + struct scst_user_reply_cmd *reply) +{ + int res = 0; + struct dev_user_cmd *ucmd; + int state; + + TRACE_ENTRY(); + + spin_lock_irq(&dev->cmd_lists.cmd_list_lock); + + ucmd = __ucmd_find_hash(dev, reply->cmd_h); + if (ucmd == NULL) { + TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h); + res = -ESRCH; + goto out_unlock; + } + + if (ucmd->background_exec) { + state = UCMD_STATE_EXECING; + goto unlock_process; + } + + if (unlikely(!(ucmd->state & UCMD_STATE_SENT_MASK))) { + if (ucmd->state & UCMD_STATE_JAMMED_MASK) { + TRACE_MGMT_DBG("Reply on jammed ucmd %p, ignoring", + ucmd); + } else { + TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user " + "state %x", ucmd, ucmd->state); + res = -EBUSY; + } + goto out_unlock; + } + + if (unlikely(reply->subcode != ucmd->user_cmd.subcode)) + goto out_wrong_state; + + if (unlikely(_IOC_NR(reply->subcode) != + (ucmd->state & ~UCMD_STATE_SENT_MASK))) + goto out_wrong_state; + + ucmd->state &= ~UCMD_STATE_SENT_MASK; + state = ucmd->state; + ucmd->state |= UCMD_STATE_RECV_MASK; + +unlock_process: + spin_unlock_irq(&dev->cmd_lists.cmd_list_lock); + + switch(state) { + case UCMD_STATE_PARSING: + res = dev_user_process_reply_parse(ucmd, reply); + break; + + case UCMD_STATE_BUF_ALLOCING: + res = dev_user_process_reply_alloc(ucmd, reply); + break; + + case UCMD_STATE_EXECING: + res = dev_user_process_reply_exec(ucmd, reply); + break; + + case UCMD_STATE_ON_FREEING: + res = dev_user_process_reply_on_free(ucmd); + break; + + case UCMD_STATE_ON_CACHE_FREEING: + res = dev_user_process_reply_on_cache_free(ucmd); + break; + + case UCMD_STATE_TM_EXECING: + res = dev_user_process_reply_tm_exec(uc... [truncated message content] |