[Netnice-commitlog] netnice : Linux/drivers/infiniband/core Makefile,NONE,1.1.4.2 agent.c,NONE,1.1.4
Status: Alpha
Brought to you by:
taost6
Update of /cvsroot/netnice/Linux/drivers/infiniband/core In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv13340/core Added Files: Tag: netnice2612b Makefile agent.c agent.h agent_priv.h cache.c core_priv.h device.c fmr_pool.c mad.c mad_priv.h packer.c sa_query.c smi.c smi.h sysfs.c ud_header.c user_mad.c verbs.c Log Message: Missed this on initial upload (seems that core directories are ignored on import) --- NEW FILE: verbs.c --- /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: verbs.c,v 1.1.4.2 2006/03/06 05:16:47 enferex Exp $ */ #include <linux/errno.h> #include <linux/err.h> #include <ib_verbs.h> /* Protection domains */ struct ib_pd *ib_alloc_pd(struct ib_device *device) { struct ib_pd *pd; pd = device->alloc_pd(device); if (!IS_ERR(pd)) { pd->device = device; atomic_set(&pd->usecnt, 0); } return pd; } EXPORT_SYMBOL(ib_alloc_pd); int ib_dealloc_pd(struct ib_pd *pd) { if (atomic_read(&pd->usecnt)) return -EBUSY; return pd->device->dealloc_pd(pd); } EXPORT_SYMBOL(ib_dealloc_pd); /* Address handles */ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct ib_ah *ah; ah = pd->device->create_ah(pd, ah_attr); if (!IS_ERR(ah)) { ah->device = pd->device; ah->pd = pd; atomic_inc(&pd->usecnt); } return ah; } EXPORT_SYMBOL(ib_create_ah); int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) { return ah->device->modify_ah ? ah->device->modify_ah(ah, ah_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_modify_ah); int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) { return ah->device->query_ah ? ah->device->query_ah(ah, ah_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_ah); int ib_destroy_ah(struct ib_ah *ah) { struct ib_pd *pd; int ret; pd = ah->pd; ret = ah->device->destroy_ah(ah); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_destroy_ah); /* Queue pairs */ struct ib_qp *ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct ib_qp *qp; qp = pd->device->create_qp(pd, qp_init_attr); if (!IS_ERR(qp)) { qp->device = pd->device; qp->pd = pd; qp->send_cq = qp_init_attr->send_cq; qp->recv_cq = qp_init_attr->recv_cq; qp->srq = qp_init_attr->srq; qp->event_handler = qp_init_attr->event_handler; qp->qp_context = qp_init_attr->qp_context; qp->qp_type = qp_init_attr->qp_type; atomic_inc(&pd->usecnt); atomic_inc(&qp_init_attr->send_cq->usecnt); atomic_inc(&qp_init_attr->recv_cq->usecnt); if (qp_init_attr->srq) atomic_inc(&qp_init_attr->srq->usecnt); } return qp; } EXPORT_SYMBOL(ib_create_qp); int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) { return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); } EXPORT_SYMBOL(ib_modify_qp); int ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { return qp->device->query_qp ? qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_qp); int ib_destroy_qp(struct ib_qp *qp) { struct ib_pd *pd; struct ib_cq *scq, *rcq; struct ib_srq *srq; int ret; pd = qp->pd; scq = qp->send_cq; rcq = qp->recv_cq; srq = qp->srq; ret = qp->device->destroy_qp(qp); if (!ret) { atomic_dec(&pd->usecnt); atomic_dec(&scq->usecnt); atomic_dec(&rcq->usecnt); if (srq) atomic_dec(&srq->usecnt); } return ret; } EXPORT_SYMBOL(ib_destroy_qp); /* Completion queues */ struct ib_cq *ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), void *cq_context, int cqe) { struct ib_cq *cq; cq = device->create_cq(device, cqe); if (!IS_ERR(cq)) { cq->device = device; cq->comp_handler = comp_handler; cq->event_handler = event_handler; cq->cq_context = cq_context; atomic_set(&cq->usecnt, 0); } return cq; } EXPORT_SYMBOL(ib_create_cq); int ib_destroy_cq(struct ib_cq *cq) { if (atomic_read(&cq->usecnt)) return -EBUSY; return cq->device->destroy_cq(cq); } EXPORT_SYMBOL(ib_destroy_cq); int ib_resize_cq(struct ib_cq *cq, int cqe) { int ret; if (!cq->device->resize_cq) return -ENOSYS; ret = cq->device->resize_cq(cq, &cqe); if (!ret) cq->cqe = cqe; return ret; } EXPORT_SYMBOL(ib_resize_cq); /* Memory regions */ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) { struct ib_mr *mr; mr = pd->device->get_dma_mr(pd, mr_access_flags); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; atomic_inc(&pd->usecnt); atomic_set(&mr->usecnt, 0); } return mr; } EXPORT_SYMBOL(ib_get_dma_mr); struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_mr *mr; mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; atomic_inc(&pd->usecnt); atomic_set(&mr->usecnt, 0); } return mr; } EXPORT_SYMBOL(ib_reg_phys_mr); int ib_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_pd *old_pd; int ret; if (!mr->device->rereg_phys_mr) return -ENOSYS; if (atomic_read(&mr->usecnt)) return -EBUSY; old_pd = mr->pd; ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { atomic_dec(&old_pd->usecnt); atomic_inc(&pd->usecnt); } return ret; } EXPORT_SYMBOL(ib_rereg_phys_mr); int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) { return mr->device->query_mr ? mr->device->query_mr(mr, mr_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_mr); int ib_dereg_mr(struct ib_mr *mr) { struct ib_pd *pd; int ret; if (atomic_read(&mr->usecnt)) return -EBUSY; pd = mr->pd; ret = mr->device->dereg_mr(mr); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dereg_mr); /* Memory windows */ struct ib_mw *ib_alloc_mw(struct ib_pd *pd) { struct ib_mw *mw; if (!pd->device->alloc_mw) return ERR_PTR(-ENOSYS); mw = pd->device->alloc_mw(pd); if (!IS_ERR(mw)) { mw->device = pd->device; mw->pd = pd; atomic_inc(&pd->usecnt); } return mw; } EXPORT_SYMBOL(ib_alloc_mw); int ib_dealloc_mw(struct ib_mw *mw) { struct ib_pd *pd; int ret; pd = mw->pd; ret = mw->device->dealloc_mw(mw); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dealloc_mw); /* "Fast" memory regions */ struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct ib_fmr *fmr; if (!pd->device->alloc_fmr) return ERR_PTR(-ENOSYS); fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); if (!IS_ERR(fmr)) { fmr->device = pd->device; fmr->pd = pd; atomic_inc(&pd->usecnt); } return fmr; } EXPORT_SYMBOL(ib_alloc_fmr); int ib_unmap_fmr(struct list_head *fmr_list) { struct ib_fmr *fmr; if (list_empty(fmr_list)) return 0; fmr = list_entry(fmr_list->next, struct ib_fmr, list); return fmr->device->unmap_fmr(fmr_list); } EXPORT_SYMBOL(ib_unmap_fmr); int ib_dealloc_fmr(struct ib_fmr *fmr) { struct ib_pd *pd; int ret; pd = fmr->pd; ret = fmr->device->dealloc_fmr(fmr); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dealloc_fmr); /* Multicast groups */ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { return qp->device->attach_mcast ? qp->device->attach_mcast(qp, gid, lid) : -ENOSYS; } EXPORT_SYMBOL(ib_attach_mcast); int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { return qp->device->detach_mcast ? qp->device->detach_mcast(qp, gid, lid) : -ENOSYS; } EXPORT_SYMBOL(ib_detach_mcast); --- NEW FILE: mad.c --- /* * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following [...2672 lines suppressed...] return 0; error2: kmem_cache_destroy(ib_mad_cache); error1: return ret; } static void __exit ib_mad_cleanup_module(void) { ib_unregister_client(&mad_client); if (kmem_cache_destroy(ib_mad_cache)) { printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n"); } } module_init(ib_mad_init_module); module_exit(ib_mad_cleanup_module); --- NEW FILE: packer.c --- /* * Copyright (c) 2004 Topspin Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: packer.c,v 1.1.4.2 2006/03/06 05:16:47 enferex Exp $ */ #include <ib_pack.h> static u64 value_read(int offset, int size, void *structure) { switch (size) { case 1: return *(u8 *) (structure + offset); case 2: return be16_to_cpup((__be16 *) (structure + offset)); case 4: return be32_to_cpup((__be32 *) (structure + offset)); case 8: return be64_to_cpup((__be64 *) (structure + offset)); default: printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); return 0; } } /** * ib_pack - Pack a structure into a buffer * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @structure:Structure to pack from * @buf:Buffer to pack into * * ib_pack() packs a list of structure fields into a buffer, * controlled by the array of fields in @desc. */ void ib_pack(const struct ib_field *desc, int desc_len, void *structure, void *buf) { int i; for (i = 0; i < desc_len; ++i) { if (desc[i].size_bits <= 32) { int shift; u32 val; __be32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift); addr = (__be32 *) buf + desc[i].offset_words; *addr = (*addr & ~mask) | (cpu_to_be32(val) & mask); } else if (desc[i].size_bits <= 64) { int shift; u64 val; __be64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be64(((1ull << desc[i].size_bits) - 1) << shift); addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); *addr = (*addr & ~mask) | (cpu_to_be64(val) & mask); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { printk(KERN_WARNING "Structure field %s of size %d " "bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } if (desc[i].struct_size_bytes) memcpy(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, structure + desc[i].struct_offset_bytes, desc[i].size_bits / 8); else memset(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, 0, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_pack); static void value_write(int offset, int size, u64 val, void *structure) { switch (size * 8) { case 8: *( u8 *) (structure + offset) = val; break; case 16: *(__be16 *) (structure + offset) = cpu_to_be16(val); break; case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break; case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break; default: printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); } } /** * ib_unpack - Unpack a buffer into a structure * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @buf:Buffer to unpack from * @structure:Structure to unpack into * * ib_pack() unpacks a list of structure fields from a buffer, * controlled by the array of fields in @desc. */ void ib_unpack(const struct ib_field *desc, int desc_len, void *buf, void *structure) { int i; for (i = 0; i < desc_len; ++i) { if (!desc[i].struct_size_bytes) continue; if (desc[i].size_bits <= 32) { int shift; u32 val; u32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; mask = ((1ull << desc[i].size_bits) - 1) << shift; addr = (__be32 *) buf + desc[i].offset_words; val = (be32_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else if (desc[i].size_bits <= 64) { int shift; u64 val; u64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; mask = ((1ull << desc[i].size_bits) - 1) << shift; addr = (__be64 *) buf + desc[i].offset_words; val = (be64_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { printk(KERN_WARNING "Structure field %s of size %d " "bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } memcpy(structure + desc[i].struct_offset_bytes, buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_unpack); --- NEW FILE: sysfs.c --- /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: sysfs.c,v 1.1.4.2 2006/03/06 05:16:47 enferex Exp $ */ #include "core_priv.h" #include <ib_mad.h> struct ib_port { struct kobject kobj; struct ib_device *ibdev; struct attribute_group gid_group; struct attribute **gid_attr; struct attribute_group pkey_group; struct attribute **pkey_attr; u8 port_num; }; struct port_attribute { struct attribute attr; ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf); ssize_t (*store)(struct ib_port *, struct port_attribute *, const char *buf, size_t count); }; #define PORT_ATTR(_name, _mode, _show, _store) \ struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store) #define PORT_ATTR_RO(_name) \ struct port_attribute port_attr_##_name = __ATTR_RO(_name) struct port_table_attribute { struct port_attribute attr; int index; }; static ssize_t port_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct port_attribute *port_attr = container_of(attr, struct port_attribute, attr); struct ib_port *p = container_of(kobj, struct ib_port, kobj); if (!port_attr->show) return 0; return port_attr->show(p, port_attr, buf); } static struct sysfs_ops port_sysfs_ops = { .show = port_attr_show }; static ssize_t state_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; static const char *state_name[] = { [IB_PORT_NOP] = "NOP", [IB_PORT_DOWN] = "DOWN", [IB_PORT_INIT] = "INIT", [IB_PORT_ARMED] = "ARMED", [IB_PORT_ACTIVE] = "ACTIVE", [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER" }; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "%d: %s\n", attr.state, attr.state >= 0 && attr.state <= ARRAY_SIZE(state_name) ? state_name[attr.state] : "UNKNOWN"); } static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "0x%x\n", attr.lid); } static ssize_t lid_mask_count_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "%d\n", attr.lmc); } static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "0x%x\n", attr.sm_lid); } static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "%d\n", attr.sm_sl); } static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "0x%08x\n", attr.port_cap_flags); } static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; char *speed = ""; int rate; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; switch (attr.active_speed) { case 2: speed = " DDR"; break; case 4: speed = " QDR"; break; } rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed; if (rate < 0) return -EINVAL; return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", rate / 10, rate % 10 ? ".5" : "", ib_width_enum_to_int(attr.active_width), speed); } static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; switch (attr.phys_state) { case 1: return sprintf(buf, "1: Sleep\n"); case 2: return sprintf(buf, "2: Polling\n"); case 3: return sprintf(buf, "3: Disabled\n"); case 4: return sprintf(buf, "4: PortConfigurationTraining\n"); case 5: return sprintf(buf, "5: LinkUp\n"); case 6: return sprintf(buf, "6: LinkErrorRecovery\n"); case 7: return sprintf(buf, "7: Phy Test\n"); default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state); } } static PORT_ATTR_RO(state); static PORT_ATTR_RO(lid); static PORT_ATTR_RO(lid_mask_count); static PORT_ATTR_RO(sm_lid); static PORT_ATTR_RO(sm_sl); static PORT_ATTR_RO(cap_mask); static PORT_ATTR_RO(rate); static PORT_ATTR_RO(phys_state); static struct attribute *port_default_attrs[] = { &port_attr_state.attr, &port_attr_lid.attr, &port_attr_lid_mask_count.attr, &port_attr_sm_lid.attr, &port_attr_sm_sl.attr, &port_attr_cap_mask.attr, &port_attr_rate.attr, &port_attr_phys_state.attr, NULL }; static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); union ib_gid gid; ssize_t ret; ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid); if (ret) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", be16_to_cpu(((u16 *) gid.raw)[0]), be16_to_cpu(((u16 *) gid.raw)[1]), be16_to_cpu(((u16 *) gid.raw)[2]), be16_to_cpu(((u16 *) gid.raw)[3]), be16_to_cpu(((u16 *) gid.raw)[4]), be16_to_cpu(((u16 *) gid.raw)[5]), be16_to_cpu(((u16 *) gid.raw)[6]), be16_to_cpu(((u16 *) gid.raw)[7])); } static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); u16 pkey; ssize_t ret; ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey); if (ret) return ret; return sprintf(buf, "0x%04x\n", pkey); } #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \ struct port_table_attribute port_pma_attr_##_name = { \ .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ .index = (_offset) | ((_width) << 16) | ((_counter) << 24) \ } static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); int offset = tab_attr->index & 0xffff; int width = (tab_attr->index >> 16) & 0xff; struct ib_mad *in_mad = NULL; struct ib_mad *out_mad = NULL; ssize_t ret; if (!p->ibdev->process_mad) return sprintf(buf, "N/A (no PMA)\n"); in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); if (!in_mad || !out_mad) { ret = -ENOMEM; goto out; } memset(in_mad, 0, sizeof *in_mad); in_mad->mad_hdr.base_version = 1; in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; in_mad->mad_hdr.class_version = 1; in_mad->mad_hdr.method = IB_MGMT_METHOD_GET; in_mad->mad_hdr.attr_id = cpu_to_be16(0x12); /* PortCounters */ in_mad->data[41] = p->port_num; /* PortSelect field */ if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY, p->port_num, NULL, NULL, in_mad, out_mad) & (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { ret = -EINVAL; goto out; } switch (width) { case 4: ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> (offset % 4)) & 0xf); break; case 8: ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); break; case 16: ret = sprintf(buf, "%u\n", be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8))); break; case 32: ret = sprintf(buf, "%u\n", be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8))); break; default: ret = 0; } out: kfree(in_mad); kfree(out_mad); return ret; } static PORT_PMA_ATTR(symbol_error , 0, 16, 32); static PORT_PMA_ATTR(link_error_recovery , 1, 8, 48); static PORT_PMA_ATTR(link_downed , 2, 8, 56); static PORT_PMA_ATTR(port_rcv_errors , 3, 16, 64); static PORT_PMA_ATTR(port_rcv_remote_physical_errors, 4, 16, 80); static PORT_PMA_ATTR(port_rcv_switch_relay_errors , 5, 16, 96); static PORT_PMA_ATTR(port_xmit_discards , 6, 16, 112); static PORT_PMA_ATTR(port_xmit_constraint_errors , 7, 8, 128); static PORT_PMA_ATTR(port_rcv_constraint_errors , 8, 8, 136); static PORT_PMA_ATTR(local_link_integrity_errors , 9, 4, 152); static PORT_PMA_ATTR(excessive_buffer_overrun_errors, 10, 4, 156); static PORT_PMA_ATTR(VL15_dropped , 11, 16, 176); static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192); static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224); static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256); static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288); static struct attribute *pma_attrs[] = { &port_pma_attr_symbol_error.attr.attr, &port_pma_attr_link_error_recovery.attr.attr, &port_pma_attr_link_downed.attr.attr, &port_pma_attr_port_rcv_errors.attr.attr, &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, &port_pma_attr_port_xmit_discards.attr.attr, &port_pma_attr_port_xmit_constraint_errors.attr.attr, &port_pma_attr_port_rcv_constraint_errors.attr.attr, &port_pma_attr_local_link_integrity_errors.attr.attr, &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, &port_pma_attr_VL15_dropped.attr.attr, &port_pma_attr_port_xmit_data.attr.attr, &port_pma_attr_port_rcv_data.attr.attr, &port_pma_attr_port_xmit_packets.attr.attr, &port_pma_attr_port_rcv_packets.attr.attr, NULL }; static struct attribute_group pma_group = { .name = "counters", .attrs = pma_attrs }; static void ib_port_release(struct kobject *kobj) { struct ib_port *p = container_of(kobj, struct ib_port, kobj); struct attribute *a; int i; for (i = 0; (a = p->gid_attr[i]); ++i) { kfree(a->name); kfree(a); } for (i = 0; (a = p->pkey_attr[i]); ++i) { kfree(a->name); kfree(a); } kfree(p->gid_attr); kfree(p); } static struct kobj_type port_type = { .release = ib_port_release, .sysfs_ops = &port_sysfs_ops, .default_attrs = port_default_attrs }; static void ib_device_release(struct class_device *cdev) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); kfree(dev); } static int ib_device_hotplug(struct class_device *cdev, char **envp, int num_envp, char *buf, int size) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); int i = 0, len = 0; if (add_hotplug_env_var(envp, num_envp, &i, buf, size, &len, "NAME=%s", dev->name)) return -ENOMEM; /* * It might be nice to pass the node GUID to hotplug, but * right now the only way to get it is to query the device * provider, and this can crash during device removal because * we are will be running after driver removal has started. * We could add a node_guid field to struct ib_device, or we * could just let the hotplug script read the node GUID from * sysfs when devices are added. */ envp[i] = NULL; return 0; } static int alloc_group(struct attribute ***attr, ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf), int len) { struct port_table_attribute ***tab_attr = (struct port_table_attribute ***) attr; int i; int ret; *tab_attr = kmalloc((1 + len) * sizeof *tab_attr, GFP_KERNEL); if (!*tab_attr) return -ENOMEM; memset(*tab_attr, 0, (1 + len) * sizeof *tab_attr); for (i = 0; i < len; ++i) { (*tab_attr)[i] = kmalloc(sizeof *(*tab_attr)[i], GFP_KERNEL); if (!(*tab_attr)[i]) { ret = -ENOMEM; goto err; } memset((*tab_attr)[i], 0, sizeof *(*tab_attr)[i]); (*tab_attr)[i]->attr.attr.name = kmalloc(8, GFP_KERNEL); if (!(*tab_attr)[i]->attr.attr.name) { ret = -ENOMEM; goto err; } if (snprintf((*tab_attr)[i]->attr.attr.name, 8, "%d", i) >= 8) { ret = -ENOMEM; goto err; } (*tab_attr)[i]->attr.attr.mode = S_IRUGO; (*tab_attr)[i]->attr.attr.owner = THIS_MODULE; (*tab_attr)[i]->attr.show = show; (*tab_attr)[i]->index = i; } return 0; err: for (i = 0; i < len; ++i) { if ((*tab_attr)[i]) kfree((*tab_attr)[i]->attr.attr.name); kfree((*tab_attr)[i]); } kfree(*tab_attr); return ret; } static int add_port(struct ib_device *device, int port_num) { struct ib_port *p; struct ib_port_attr attr; int i; int ret; ret = ib_query_port(device, port_num, &attr); if (ret) return ret; p = kmalloc(sizeof *p, GFP_KERNEL); if (!p) return -ENOMEM; memset(p, 0, sizeof *p); p->ibdev = device; p->port_num = port_num; p->kobj.ktype = &port_type; p->kobj.parent = kobject_get(&device->ports_parent); if (!p->kobj.parent) { ret = -EBUSY; goto err; } ret = kobject_set_name(&p->kobj, "%d", port_num); if (ret) goto err_put; ret = kobject_register(&p->kobj); if (ret) goto err_put; ret = sysfs_create_group(&p->kobj, &pma_group); if (ret) goto err_put; ret = alloc_group(&p->gid_attr, show_port_gid, attr.gid_tbl_len); if (ret) goto err_remove_pma; p->gid_group.name = "gids"; p->gid_group.attrs = p->gid_attr; ret = sysfs_create_group(&p->kobj, &p->gid_group); if (ret) goto err_free_gid; ret = alloc_group(&p->pkey_attr, show_port_pkey, attr.pkey_tbl_len); if (ret) goto err_remove_gid; p->pkey_group.name = "pkeys"; p->pkey_group.attrs = p->pkey_attr; ret = sysfs_create_group(&p->kobj, &p->pkey_group); if (ret) goto err_free_pkey; list_add_tail(&p->kobj.entry, &device->port_list); return 0; err_free_pkey: for (i = 0; i < attr.pkey_tbl_len; ++i) { kfree(p->pkey_attr[i]->name); kfree(p->pkey_attr[i]); } kfree(p->pkey_attr); err_remove_gid: sysfs_remove_group(&p->kobj, &p->gid_group); err_free_gid: for (i = 0; i < attr.gid_tbl_len; ++i) { kfree(p->gid_attr[i]->name); kfree(p->gid_attr[i]); } kfree(p->gid_attr); err_remove_pma: sysfs_remove_group(&p->kobj, &pma_group); err_put: kobject_put(&device->ports_parent); err: kfree(p); return ret; } static ssize_t show_node_type(struct class_device *cdev, char *buf) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); switch (dev->node_type) { case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); } } static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); struct ib_device_attr attr; ssize_t ret; ret = ib_query_device(dev, &attr); if (ret) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]), be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]), be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]), be16_to_cpu(((u16 *) &attr.sys_image_guid)[3])); } static ssize_t show_node_guid(struct class_device *cdev, char *buf) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); struct ib_device_attr attr; ssize_t ret; ret = ib_query_device(dev, &attr); if (ret) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", be16_to_cpu(((u16 *) &attr.node_guid)[0]), be16_to_cpu(((u16 *) &attr.node_guid)[1]), be16_to_cpu(((u16 *) &attr.node_guid)[2]), be16_to_cpu(((u16 *) &attr.node_guid)[3])); } static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); static CLASS_DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL); static CLASS_DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL); static struct class_device_attribute *ib_class_attributes[] = { &class_device_attr_node_type, &class_device_attr_sys_image_guid, &class_device_attr_node_guid }; static struct class ib_class = { .name = "infiniband", .release = ib_device_release, .hotplug = ib_device_hotplug, }; int ib_device_register_sysfs(struct ib_device *device) { struct class_device *class_dev = &device->class_dev; int ret; int i; class_dev->class = &ib_class; class_dev->class_data = device; strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE); INIT_LIST_HEAD(&device->port_list); ret = class_device_register(class_dev); if (ret) goto err; for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) { ret = class_device_create_file(class_dev, ib_class_attributes[i]); if (ret) goto err_unregister; } device->ports_parent.parent = kobject_get(&class_dev->kobj); if (!device->ports_parent.parent) { ret = -EBUSY; goto err_unregister; } ret = kobject_set_name(&device->ports_parent, "ports"); if (ret) goto err_put; ret = kobject_register(&device->ports_parent); if (ret) goto err_put; if (device->node_type == IB_NODE_SWITCH) { ret = add_port(device, 0); if (ret) goto err_put; } else { int i; for (i = 1; i <= device->phys_port_cnt; ++i) { ret = add_port(device, i); if (ret) goto err_put; } } return 0; err_put: { struct kobject *p, *t; struct ib_port *port; list_for_each_entry_safe(p, t, &device->port_list, entry) { list_del(&p->entry); port = container_of(p, struct ib_port, kobj); sysfs_remove_group(p, &pma_group); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); kobject_unregister(p); } } kobject_put(&class_dev->kobj); err_unregister: class_device_unregister(class_dev); err: return ret; } void ib_device_unregister_sysfs(struct ib_device *device) { struct kobject *p, *t; struct ib_port *port; list_for_each_entry_safe(p, t, &device->port_list, entry) { list_del(&p->entry); port = container_of(p, struct ib_port, kobj); sysfs_remove_group(p, &pma_group); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); kobject_unregister(p); } kobject_unregister(&device->ports_parent); class_device_unregister(&device->class_dev); } int ib_sysfs_setup(void) { return class_register(&ib_class); } void ib_sysfs_cleanup(void) { class_unregister(&ib_class); } --- NEW FILE: agent.c --- /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: agent.c,v 1.1.4.2 2006/03/06 05:16:47 enferex Exp $ */ #include <linux/dma-mapping.h> #include <asm/bug.h> #include <ib_smi.h> #include "smi.h" #include "agent_priv.h" #include "mad_priv.h" #include "agent.h" spinlock_t ib_agent_port_list_lock; static LIST_HEAD(ib_agent_port_list); /* * Caller must hold ib_agent_port_list_lock */ static inline struct ib_agent_port_private * __ib_get_agent_port(struct ib_device *device, int port_num, struct ib_mad_agent *mad_agent) { struct ib_agent_port_private *entry; BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */ if (device) { list_for_each_entry(entry, &ib_agent_port_list, port_list) { if (entry->smp_agent->device == device && entry->port_num == port_num) return entry; } } else { list_for_each_entry(entry, &ib_agent_port_list, port_list) { if ((entry->smp_agent == mad_agent) || (entry->perf_mgmt_agent == mad_agent)) return entry; } } return NULL; } static inline struct ib_agent_port_private * ib_get_agent_port(struct ib_device *device, int port_num, struct ib_mad_agent *mad_agent) { struct ib_agent_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); entry = __ib_get_agent_port(device, port_num, mad_agent); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return entry; } int smi_check_local_dr_smp(struct ib_smp *smp, struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) return 1; port_priv = ib_get_agent_port(device, port_num, NULL); if (!port_priv) { printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " "not open\n", device->name, port_num); return 1; } return smi_check_local_smp(port_priv->smp_agent, smp); } static int agent_mad_send(struct ib_mad_agent *mad_agent, struct ib_agent_port_private *port_priv, struct ib_mad_private *mad_priv, struct ib_grh *grh, struct ib_wc *wc) { struct ib_agent_send_wr *agent_send_wr; struct ib_sge gather_list; struct ib_send_wr send_wr; struct ib_send_wr *bad_send_wr; struct ib_ah_attr ah_attr; unsigned long flags; int ret = 1; agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL); if (!agent_send_wr) goto out; agent_send_wr->mad = mad_priv; gather_list.addr = dma_map_single(mad_agent->device->dma_device, &mad_priv->mad, sizeof(mad_priv->mad), DMA_TO_DEVICE); gather_list.length = sizeof(mad_priv->mad); gather_list.lkey = (*port_priv->mr).lkey; send_wr.next = NULL; send_wr.opcode = IB_WR_SEND; send_wr.sg_list = &gather_list; send_wr.num_sge = 1; send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */ send_wr.wr.ud.timeout_ms = 0; send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; ah_attr.dlid = wc->slid; ah_attr.port_num = mad_agent->port_num; ah_attr.src_path_bits = wc->dlid_path_bits; ah_attr.sl = wc->sl; ah_attr.static_rate = 0; ah_attr.ah_flags = 0; /* No GRH */ if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { if (wc->wc_flags & IB_WC_GRH) { ah_attr.ah_flags = IB_AH_GRH; /* Should sgid be looked up ? */ ah_attr.grh.sgid_index = 0; ah_attr.grh.hop_limit = grh->hop_limit; ah_attr.grh.flow_label = be32_to_cpup( &grh->version_tclass_flow) & 0xfffff; ah_attr.grh.traffic_class = (be32_to_cpup( &grh->version_tclass_flow) >> 20) & 0xff; memcpy(ah_attr.grh.dgid.raw, grh->sgid.raw, sizeof(ah_attr.grh.dgid)); } } agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr); if (IS_ERR(agent_send_wr->ah)) { printk(KERN_ERR SPFX "No memory for address handle\n"); kfree(agent_send_wr); goto out; } send_wr.wr.ud.ah = agent_send_wr->ah; if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { send_wr.wr.ud.pkey_index = wc->pkey_index; send_wr.wr.ud.remote_qkey = IB_QP1_QKEY; } else { /* for SMPs */ send_wr.wr.ud.pkey_index = 0; send_wr.wr.ud.remote_qkey = 0; } send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr; send_wr.wr_id = (unsigned long)agent_send_wr; pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr); /* Send */ spin_lock_irqsave(&port_priv->send_list_lock, flags); if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) { spin_unlock_irqrestore(&port_priv->send_list_lock, flags); dma_unmap_single(mad_agent->device->dma_device, pci_unmap_addr(agent_send_wr, mapping), sizeof(mad_priv->mad), DMA_TO_DEVICE); ib_destroy_ah(agent_send_wr->ah); kfree(agent_send_wr); } else { list_add_tail(&agent_send_wr->send_list, &port_priv->send_posted_list); spin_unlock_irqrestore(&port_priv->send_list_lock, flags); ret = 0; } out: return ret; } int agent_send(struct ib_mad_private *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; struct ib_mad_agent *mad_agent; port_priv = ib_get_agent_port(device, port_num, NULL); if (!port_priv) { printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n", device->name, port_num); return 1; } /* Get mad agent based on mgmt_class in MAD */ switch (mad->mad.mad.mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: mad_agent = port_priv->smp_agent; break; case IB_MGMT_CLASS_PERF_MGMT: mad_agent = port_priv->perf_mgmt_agent; break; default: return 1; } return agent_mad_send(mad_agent, port_priv, mad, grh, wc); } static void agent_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_agent_port_private *port_priv; struct ib_agent_send_wr *agent_send_wr; unsigned long flags; /* Find matching MAD agent */ port_priv = ib_get_agent_port(NULL, 0, mad_agent); if (!port_priv) { printk(KERN_ERR SPFX "agent_send_handler: no matching MAD " "agent %p\n", mad_agent); return; } agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id; spin_lock_irqsave(&port_priv->send_list_lock, flags); /* Remove completed send from posted send MAD list */ list_del(&agent_send_wr->send_list); spin_unlock_irqrestore(&port_priv->send_list_lock, flags); dma_unmap_single(mad_agent->device->dma_device, pci_unmap_addr(agent_send_wr, mapping), sizeof(agent_send_wr->mad->mad), DMA_TO_DEVICE); ib_destroy_ah(agent_send_wr->ah); /* Release allocated memory */ kmem_cache_free(ib_mad_cache, agent_send_wr->mad); kfree(agent_send_wr); } int ib_agent_port_open(struct ib_device *device, int port_num) { int ret; struct ib_agent_port_private *port_priv; unsigned long flags; /* First, check if port already open for SMI */ port_priv = ib_get_agent_port(device, port_num, NULL); if (port_priv) { printk(KERN_DEBUG SPFX "%s port %d already open\n", device->name, port_num); return 0; } /* Create new device info */ port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); ret = -ENOMEM; goto error1; } memset(port_priv, 0, sizeof *port_priv); port_priv->port_num = port_num; spin_lock_init(&port_priv->send_list_lock); INIT_LIST_HEAD(&port_priv->send_posted_list); /* Obtain send only MAD agent for SM class (SMI QP) */ port_priv->smp_agent = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, &agent_send_handler, NULL, NULL); if (IS_ERR(port_priv->smp_agent)) { ret = PTR_ERR(port_priv->smp_agent); goto error2; } /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */ port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, &agent_send_handler, NULL, NULL); if (IS_ERR(port_priv->perf_mgmt_agent)) { ret = PTR_ERR(port_priv->perf_mgmt_agent); goto error3; } port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(port_priv->mr)) { printk(KERN_ERR SPFX "Couldn't get DMA MR\n"); ret = PTR_ERR(port_priv->mr); goto error4; } spin_lock_irqsave(&ib_agent_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_agent_port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return 0; error4: ib_unregister_mad_agent(port_priv->perf_mgmt_agent); error3: ib_unregister_mad_agent(port_priv->smp_agent); error2: kfree(port_priv); error1: return ret; } int ib_agent_port_close(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); port_priv = __ib_get_agent_port(device, port_num, NULL); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); printk(KERN_ERR SPFX "Port %d not found\n", port_num); return -ENODEV; } list_del(&port_priv->port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); ib_dereg_mr(port_priv->mr); ib_unregister_mad_agent(port_priv->perf_mgmt_agent); ib_unregister_mad_agent(port_priv->smp_agent); kfree(port_priv); return 0; } --- NEW FILE: Makefile --- EXTRA_CFLAGS += -Idrivers/infiniband/include obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o ib_umad.o ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ device.o fmr_pool.o cache.o ib_mad-y := mad.o smi.o agent.o ib_sa-y := sa_query.o ib_umad-y := user_mad.o --- NEW FILE: agent.h --- /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: agent.h,v 1.1.4.2 2006/03/06 05:16:47 enferex Exp $ */ #ifndef __AGENT_H_ #define __AGENT_H_ extern spinlock_t ib_agent_port_list_lock; extern int ib_agent_port_open(struct ib_device *device, int port_num); extern int ib_agent_port_close(struct ib_device *device, int port_num); extern int agent_send(struct ib_mad_private *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num); #endif /* __AGENT_H_ */ --- NEW FILE: sa_query.c --- /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: sa_query.c,v 1.1.4.2 2006/03/06 05:16:47 enferex Exp $ */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/random.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/kref.h> #include <linux/idr.h> #include <ib_pack.h> #include <ib_sa.h> MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand subnet administration query support"); MODULE_LICENSE("Dual BSD/GPL"); /* * These two structures must be packed because they have 64-bit fields * that are only 32-bit aligned. 64-bit architectures will lay them * out wrong otherwise. (And unfortunately they are sent on the wire * so we can't change the layout) */ struct ib_sa_hdr { u64 sm_key; u16 attr_offset; u16 reserved; ib_sa_comp_mask comp_mask; } __attribute__ ((packed)); struct ib_sa_mad { struct ib_mad_hdr mad_hdr; struct ib_rmpp_hdr rmpp_hdr; struct ib_sa_hdr sa_hdr; u8 data[200]; } __attribute__ ((packed)); struct ib_sa_sm_ah { struct ib_ah *ah; struct kref ref; }; struct ib_sa_port { struct ib_mad_agent *agent; struct ib_mr *mr; struct ib_sa_sm_ah *sm_ah; struct work_struct update_task; spinlock_t ah_lock; u8 port_num; }; struct ib_sa_device { int start_port, end_port; struct ib_event_handler event_handler; struct ib_sa_port port[0]; }; struct ib_sa_query { void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); void (*release)(struct ib_sa_query *); struct ib_sa_port *port; struct ib_sa_mad *mad; struct ib_sa_sm_ah *sm_ah; DECLARE_PCI_UNMAP_ADDR(mapping) int id; }; struct ib_sa_path_query { void (*callback)(int, struct ib_sa_path_rec *, void *); void *context; struct ib_sa_query sa_query; }; struct ib_sa_mcmember_query { void (*callback)(int, struct ib_sa_mcmember_rec *, void *); void *context; struct ib_sa_query sa_query; }; static void ib_sa_add_one(struct ib_device *device); static void ib_sa_remove_one(struct ib_device *device); static struct ib_client sa_client = { .name = "sa", .add = ib_sa_add_one, .remove = ib_sa_remove_one }; static spinlock_t idr_lock; static DEFINE_IDR(query_idr); static spinlock_t tid_lock; static u32 tid; enum { IB_SA_ATTR_CLASS_PORTINFO = 0x01, IB_SA_ATTR_NOTICE = 0x02, IB_SA_ATTR_INFORM_INFO = 0x03, IB_SA_ATTR_NODE_REC = 0x11, IB_SA_ATTR_PORT_INFO_REC = 0x12, IB_SA_ATTR_SL2VL_REC = 0x13, IB_SA_ATTR_SWITCH_REC = 0x14, IB_SA_ATTR_LINEAR_FDB_REC = 0x15, IB_SA_ATTR_RANDOM_FDB_REC = 0x16, IB_SA_ATTR_MCAST_FDB_REC = 0x17, IB_SA_ATTR_SM_INFO_REC = 0x18, IB_SA_ATTR_LINK_REC = 0x20, IB_SA_ATTR_GUID_INFO_REC = 0x30, IB_SA_ATTR_SERVICE_REC = 0x31, IB_SA_ATTR_PARTITION_REC = 0x33, IB_SA_ATTR_RANGE_REC = 0x34, IB_SA_ATTR_PATH_REC = 0x35, IB_SA_ATTR_VL_ARB_REC = 0x36, IB_SA_ATTR_MC_GROUP_REC = 0x37, IB_SA_ATTR_MC_MEMBER_REC = 0x38, IB_SA_ATTR_TRACE_REC = 0x39, IB_SA_ATTR_MULTI_PATH_REC = 0x3a, IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b }; #define PATH_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ .field_name = "sa_path_rec:" #field static const struct ib_field path_rec_table[] = { { RESERVED, .offset_words = 0, .offset_bits = 0, .size_bits = 32 }, { RESERVED, .offset_words = 1, .offset_bits = 0, .size_bits = 32 }, { PATH_REC_FIELD(dgid), .offset_words = 2, .offset_bits = 0, .size_bits = 128 }, { PATH_REC_FIELD(sgid), .offset_words = 6, .offset_bits = 0, .size_bits = 128 }, { PATH_REC_FIELD(dlid), .offset_words = 10, .offset_bits = 0, .size_bits = 16 }, { PATH_REC_FIELD(slid), .offset_words = 10, .offset_bits = 16, .size_bits = 16 }, { PATH_REC_FIELD(raw_traffic), .offset_words = 11, .offset_bits = 0, .size_bits = 1 }, { RESERVED, .offset_words = 11, .offset_bits = 1, .size_bits = 3 }, { PATH_REC_FIELD(flow_label), .offset_words = 11, .offset_bits = 4, .size_bits = 20 }, { PATH_REC_FIELD(hop_limit), .offset_words = 11, .offset_bits = 24, .size_bits = 8 }, { PATH_REC_FIELD(traffic_class), .offset_words = 12, .offset_bits = 0, .size_bits = 8 }, { PATH_REC_FIELD(reversible), .offset_words = 12, .offset_bits = 8, .size_bits = 1 }, { PATH_REC_FIELD(numb_path), .offset_words = 12, .offset_bits = 9, .size_bits = 7 }, { PATH_REC_FIELD(pkey), .offset_words = 12, .offset_bits = 16, .size_bits = 16 }, { RESERVED, .offset_words = 13, .offset_bits = 0, .size_bits = 12 }, { PATH_REC_FIELD(sl), .offset_words = 13, .offset_bits = 12, .size_bits = 4 }, { PATH_REC_FIELD(mtu_selector), .offset_words = 13, .offset_bits = 16, .size_bits = 2 }, { PATH_REC_FIELD(mtu), .offset_words = 13, .offset_bits = 18, .size_bits = 6 }, { PATH_REC_FIELD(rate_selector), .offset_words = 13, .offset_bits = 24, .size_bits = 2 }, { PATH_REC_FIELD(rate), .offset_words = 13, .offset_bits = 26, .size_bits = 6 }, { PATH_REC_FIELD(packet_life_time_selector), .offset_words = 14, .offset_bits = 0, .size_bits = 2 }, { PATH_REC_FIELD(packet_life_time), .offset_words = 14, .offset_bits = 2, .size_bits = 6 }, { PATH_REC_FIELD(preference), .offset_words = 14, .offset_bits = 8, .size_bits = 8 }, { RESERVED, .offset_words = 14, .offset_bits = 16, .size_bits = 48 }, }; #define MCMEMBER_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ .field_name = "sa_mcmember_rec:" #field static const struct ib_field mcmember_rec_table[] = { { MCMEMBER_REC_FIELD(mgid), .offset_words = 0, .offset_bits = 0, .size_bits = 128 }, { MCMEMBER_REC_FIELD(port_gid), .offset_words = 4, .offset_bits = 0, .size_bits = 128 }, { MCMEMBER_REC_FIELD(qkey), .offset_words = 8, .offset_bits = 0, .size_bits = 32 }, { MCMEMBER_REC_FIELD(mlid), .offset_words = 9, .offset_bits = 0, .size_bits = 16 }, { MCMEMBER_REC_FIELD(mtu_selector), .offset_words = 9, .offset_bits = 16, .size_bits = 2 }, { MCMEMBER_REC_FIELD(mtu), .offset_words = 9, .offset_bits = 18, .size_bits = 6 }, { MCMEMBER_REC_FIELD(traffic_class), .offset_words = 9, .offset_bits = 24, .size_bits = 8 }, { MCMEMBER_REC_FIELD(pkey), .offset_words = 10, .offset_bits = 0, .size_bits = 16 }, { MCMEMBER_REC_FIELD(rate_selector), .offset_words = 10, .offset_bits = 16, .size_bits = 2 }, { MCMEMBER_REC_FIELD(rate), .offset_words = 10, .offset_bits = 18, .size_bits = 6 }, { MCMEMBER_REC_FIELD(packet_life_time_selector), .offset_words = 10, .offset_bits = 24, .size_bits = 2 }, { MCMEMBER_REC_FIELD(packet_life_time), .offset_words = 10, .offset_bits = 26, .size_bits = 6 }, { MCMEMBER_REC_FIELD(sl), .offset_words = 11, .offset_bits = 0, .size_bits = 4 }, { MCMEMBER_REC_FIELD(flow_label), .offset_words = 11, .offset_bits = 4, .size_bits = 20 }, { MCMEMBER_REC_FIELD(hop_limit), .offset_words = 11, .offset_bits = 24, .size_bits = 8 }, { MCMEMBER_REC_FIELD(scope), .offset_words = 12, .offset_bits = 0, .size_bits = 4 }, { MCMEMBER_REC_FIELD(join_state), .offset_words = 12, .offset_bits = 4, .size_bits = 4 }, { MCMEMBER_REC_FIELD(proxy_join), .offset_words = 12, .offset_bits = 8, .size_bits = 1 }, { RESERVED, .offset_words = 12, .offset_bits = 9, .size_bits = 23 }, }; static void free_sm_ah(struct kref *kref) { struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); ib_destroy_ah(sm_ah->ah); kfree(sm_ah); } static void update_sm_ah(void *port_ptr) { struct ib_sa_port *port = port_ptr; struct ib_sa_sm_ah *new_ah, *old_ah; struct ib_port_attr port_attr; struct ib_ah_attr ah_attr; if (ib_query_port(p... [truncated message content] |