netnice-commitlog Mailing List for netnice (Page 6)
Status: Alpha
Brought to you by:
taost6
You can subscribe to this list here.
| 2005 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
(26) |
Dec
(4) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2006 |
Jan
|
Feb
(103) |
Mar
(2) |
Apr
(3) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
|
From: enferex <en...@us...> - 2005-11-11 04:25:31
|
Update of /cvsroot/netnice/Linux/net/ipv4 In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv20624/net/ipv4 Modified Files: Tag: netnice2612 af_inet.c ip_output.c tcp_ipv4.c udp.c Log Message: Updated nnfs_ops.c with the FreeBSD fix, to hopefully stop drainage of packets to deleted vif nodes Index: udp.c =================================================================== RCS file: /cvsroot/netnice/Linux/net/ipv4/udp.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- udp.c 5 Nov 2005 21:22:18 -0000 1.1.1.2 +++ udp.c 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -1116,7 +1116,6 @@ /* * All we need to do is get the socket, and then do a checksum. */ - int udp_rcv(struct sk_buff *skb) { struct sock *sk; Index: af_inet.c =================================================================== RCS file: /cvsroot/netnice/Linux/net/ipv4/af_inet.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- af_inet.c 5 Nov 2005 21:21:42 -0000 1.1.1.2 +++ af_inet.c 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -160,6 +160,56 @@ #endif } +void inet_sock_release(struct sock *sk) +{ + +#ifdef CONFIG_NETNICE + if (sk->sk_vifnet != NULL) { + vif_checksk(sk->sk_vifnet, sk); + vif_rmpvif(sk->sk_vifnet); + sk->sk_vifnet = 0; + } +#endif + + if (sk->sk_prot->destroy) + sk->sk_prot->destroy(sk); + + /* Observation: when inet_sock_release is called, processes have + * no access to socket. But net still has. + * Step one, detach it from networking: + * + * A. Remove from hash tables. + */ + + sk->sk_prot->unhash(sk); + + /* In this point socket cannot receive new packets, + * but it is possible that some packets are in flight + * because some CPU runs receiver and did hash table lookup + * before we unhashed socket. They will achieve receive queue + * and will be purged by socket destructor. + * + * Also we still have packets pending on receive + * queue and probably, our own packets waiting in device queues. + * sock_destroy will drain receive queue, but transmitted + * packets will delay socket destruction until the last reference + * will be released. + */ + + sock_orphan(sk); + + xfrm_sk_free_policy(sk); + + +#ifdef INET_REFCNT_DEBUG + if (atomic_read(&sk->sk_refcnt) != 1) + printk(KERN_DEBUG "Destruction inet %p delayed, c=%d\n", + sk, atomic_read(&sk->sk_refcnt)); +#endif + sock_put(sk); +} + + /* * The routines beyond this point handle the behaviour of an AF_INET * socket object. Mostly it punts to the subprotocols of IP to do @@ -310,7 +360,9 @@ sk->sk_family = PF_INET; sk->sk_protocol = protocol; sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; - +#ifdef CONFIG_NETNICE + sk->sk_vifnet = vif_cpvif(current->p_vifnet); +#endif inet->uc_ttl = -1; inet->mc_loop = 1; inet->mc_ttl = 1; @@ -602,7 +654,9 @@ (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)); sock_graft(sk2, newsock); - +#ifdef CONFIG_NETNICE + sk2->sk_vifnet = vif_cpvif(current->p_vifnet); +#endif newsock->state = SS_CONNECTED; err = 0; release_sock(sk2); @@ -1094,6 +1148,9 @@ /* * Initialise per-cpu ipv4 mibs */ +#ifdef CONFIG_NETNICE + vif_init((void *)0); +#endif if(init_ipv4_mibs()) printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; Index: tcp_ipv4.c =================================================================== RCS file: /cvsroot/netnice/Linux/net/ipv4/tcp_ipv4.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- tcp_ipv4.c 5 Nov 2005 21:22:13 -0000 1.1.1.2 +++ tcp_ipv4.c 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -859,7 +859,7 @@ return err; } -static __inline__ int tcp_v4_iif(struct sk_buff *skb) +int tcp_v4_iif(struct sk_buff *skb) { return ((struct rtable *)skb->dst)->rt_iif; } @@ -1719,7 +1719,6 @@ /* * From tcp_input.c */ - int tcp_v4_rcv(struct sk_buff *skb) { struct tcphdr *th; Index: ip_output.c =================================================================== RCS file: /cvsroot/netnice/Linux/net/ipv4/ip_output.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- ip_output.c 5 Nov 2005 21:21:54 -0000 1.1.1.2 +++ ip_output.c 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -85,6 +85,10 @@ #include <linux/mroute.h> #include <linux/netlink.h> +#ifdef CONFIG_NETNICE +#include <net/vif.h> +#endif + /* * Shall we try to damage output packets if routing dev changes? */ @@ -170,7 +174,7 @@ dst_output); } -static inline int ip_finish_output2(struct sk_buff *skb) +inline int ip_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct hh_cache *hh = dst->hh; @@ -200,7 +204,7 @@ if (hh) { int hh_alen; - + read_lock_bh(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(hh->hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); @@ -220,11 +224,24 @@ { struct net_device *dev = skb->dst->dev; +#ifdef CONFIG_NETNICE + struct sock *sk = skb->sk; +#endif + skb->dev = dev; skb->protocol = htons(ETH_P_IP); +#ifdef CONFIG_NETNICE + + if (sk) + skb->skb_pvif = sk->sk_vifnet; + return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, - ip_finish_output2); + vif_input); +#else + return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, + ip_finish_output2); +#endif } int ip_mc_output(struct sk_buff *skb) @@ -1340,7 +1357,11 @@ static struct packet_type ip_packet_type = { .type = __constant_htons(ETH_P_IP), +#ifdef CONFIG_NETNICE + .func = vif_rcv, +#else .func = ip_rcv, +#endif }; /* @@ -1360,6 +1381,7 @@ } EXPORT_SYMBOL(ip_finish_output); +EXPORT_SYMBOL(ip_finish_output2); EXPORT_SYMBOL(ip_fragment); EXPORT_SYMBOL(ip_generic_getfrag); EXPORT_SYMBOL(ip_queue_xmit); |
|
From: enferex <en...@us...> - 2005-11-11 04:25:31
|
Update of /cvsroot/netnice/Linux/include/linux In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv20624/include/linux Modified Files: Tag: netnice2612 sched.h skbuff.h Added Files: Tag: netnice2612 tailq.h Log Message: Updated nnfs_ops.c with the FreeBSD fix, to hopefully stop drainage of packets to deleted vif nodes Index: sched.h =================================================================== RCS file: /cvsroot/netnice/Linux/include/linux/sched.h,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- sched.h 5 Nov 2005 21:17:20 -0000 1.1.1.2 +++ sched.h 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -570,6 +570,7 @@ struct audit_context; /* See audit.c */ struct mempolicy; +struct pvifnet; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -707,6 +708,10 @@ /* journalling filesystem info */ void *journal_info; +#ifdef CONFIG_NETNICE + struct pvifnet* p_vifnet; +#endif + /* VM state */ struct reclaim_state *reclaim_state; --- NEW FILE: tailq.h --- /* Code copied verbatim from FreeBSD for use by netnice for linux. */ #ifndef _TAILQ_H #define _TAILQ_H /* * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. */ /* * Tail queue declarations. */ #define TAILQ_HEAD(name, type) \ struct name { \ struct type *tqh_first; /* first element */ \ struct type **tqh_last; /* addr of last next element */ \ } #define TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define TAILQ_ENTRY(type) \ struct { \ struct type *tqe_next; /* next element */ \ struct type **tqe_prev; /* address of previous next element */ \ } /* * Tail queue functions. */ #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define TAILQ_FIRST(head) ((head)->tqh_first) #define TAILQ_FOREACH(var, head, field) \ for ((var) = TAILQ_FIRST((head)); \ (var); \ (var) = TAILQ_NEXT((var), field)) #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = TAILQ_LAST((head), headname); \ (var); \ (var) = TAILQ_PREV((var), headname, field)) #define TAILQ_INIT(head) do { \ TAILQ_FIRST((head)) = NULL; \ (head)->tqh_last = &TAILQ_FIRST((head)); \ } while (0) #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ TAILQ_NEXT((elm), field)->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ TAILQ_NEXT((listelm), field) = (elm); \ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ } while (0) #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ TAILQ_NEXT((elm), field) = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ } while (0) #define TAILQ_INSERT_HEAD(head, elm, field) do { \ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ TAILQ_FIRST((head))->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ TAILQ_FIRST((head)) = (elm); \ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ } while (0) #define TAILQ_INSERT_TAIL(head, elm, field) do { \ TAILQ_NEXT((elm), field) = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ } while (0) #define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #define TAILQ_REMOVE(head, elm, field) do { \ if ((TAILQ_NEXT((elm), field)) != NULL) \ TAILQ_NEXT((elm), field)->field.tqe_prev = \ (elm)->field.tqe_prev; \ else \ (head)->tqh_last = (elm)->field.tqe_prev; \ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ } while (0) #endif Index: skbuff.h =================================================================== RCS file: /cvsroot/netnice/Linux/include/linux/skbuff.h,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- skbuff.h 5 Nov 2005 21:17:33 -0000 1.1.1.2 +++ skbuff.h 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -29,6 +29,10 @@ #include <linux/net.h> #include <net/checksum.h> +#ifdef CONFIG_NETNICE +#include <net/vif.h> +#endif + #define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ #define SLAB_SKB /* Slabified skbuffs */ @@ -285,6 +289,11 @@ #endif +#ifdef CONFIG_NETNICE + struct pvifnet *skb_pvif; /*vif pointer list*/ + u_int skb_stamp; + struct vif_pkt skb_pkt; +#endif /* These elements must be at the end, see alloc_skb() for details. */ unsigned int truesize; |
|
From: enferex <en...@us...> - 2005-11-11 04:25:31
|
Update of /cvsroot/netnice/Linux/include/net In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv20624/include/net Modified Files: Tag: netnice2612 ip.h sock.h Added Files: Tag: netnice2612 vif.h Log Message: Updated nnfs_ops.c with the FreeBSD fix, to hopefully stop drainage of packets to deleted vif nodes Index: sock.h =================================================================== RCS file: /cvsroot/netnice/Linux/include/net/sock.h,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- sock.h 5 Nov 2005 21:18:32 -0000 1.1.1.2 +++ sock.h 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -88,6 +88,7 @@ } while(0) struct sock; +struct pvifnet; /** * struct sock_common - minimal network layer representation of sockets @@ -241,6 +242,9 @@ __u32 sk_sndmsg_off; int sk_write_pending; void *sk_security; +#ifdef CONFIG_NETNICE + struct pvifnet *sk_vifnet; +#endif void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); --- NEW FILE: vif.h --- /* Netnice for linux. */ #ifndef _NET_VIF_H #define _NET_VIF_H #include <linux/tailq.h> /* * System-wide parameter */ #define MAX_VIFDEPTH 8 #define MAX_VIFBRANCH 32 /* should be less than procfs.h::MAX_SOCKET */ #define MAX_SOCKET 126 /* 5 bits: used for NETNICE_FILENO@procfs */ #define MAX_INTERFACE 127 /* 5 bits: */ #define MAX_NVIF 1024 #define VIFNAMELEN 16 #define SOCKADDRLEN 16 #define PADLEN 24 /* KEEP THIS! (24) */ #define ERROR -1 #define NOERR 0 #define VIF_NWC 0 #define VIF_WFQ 1 #define VIF_PQ 2 #define VIF_BLOCK 4 #define VIF_IN 1 /* incommind pkt */ #define VIF_OUT 2 /* outgoing pkt */ #define VIF_IPV4 4 #define VIF_IPV6 6 /* Forward Declaration */ struct sk_filter; /*vif lock*/ extern spinlock_t vif_lock; /* * VIF association stracture for procfs */ struct pvifnet { struct pvifnet *next; /* next entry */ struct net_device *dev; /* real interface */ struct vifnet *pipe; /* virtual interface */ int refcnt; /* XXX: deprecated */ }; /* * The Virtual Network Interface (VIF) */ struct vifnet { TAILQ_ENTRY(vifnet) vif_link; /* vif list */ /* identification */ int vif_index; /* vif index */ int root_flag; /* vif_root */ uid_t uid; /* owner uid */ gid_t gid; /* owner gid */ char name[VIFNAMELEN]; /* name for the interface */ struct net_device *dev; /* read interface */ /* VIF structure */ struct vifnet *pptr; /* parent interface (can be net_device) */ struct vifnet *sptr; /* sibling interface */ struct vifnet *pc[MAX_VIFDEPTH];/* VIF path cache */ int depth; /* VIF depth */ /* packet scheduler */ int type; /* queue type {NWC, WFQ, PQ} */ int delete_flag; /* disabled interface */ u_long bandwidth; /* bytes/tick */ int weight; /* weight (for WFQ, PQ) */ /* queue */ struct { int ndsched; /* scheduling status */ struct vif_pkt *p_queue; /* proximal queue */ struct vif_pkt *p_tail; /* proximal queue */ struct vif_pkt *d_queue; /* distal queue */ struct vif_pkt *d_tail; /* distal queue */ int length; /* elements in queue */ int len_bytes; /* bytes in queue */ long numbytes; /* can send */ long deficit; /* have to wait */ int dr_req; /* drainage request */ long c_time; /* timeout */ long c_timestamp; /* timestamp */ long sndbyte; /* byte sent at a time */ struct vifnet *c_rrp; /* roundrobin pointer */ struct vifnet *c_next; /* callout queue */ u_long snd_pkt; /* droped pkt */ u_long snd_byte; /* sent byte */ u_long drops; /* droped pkt */ } in, out; /* packet filter */ struct sk_filter* filter; }; /* struct vifnet */ /* global VIF list */ TAILQ_HEAD(vifnethead, vifnet); extern struct vifnethead vifnet; /* * VIF packet */ #define skbtop(skb) ((struct vif_pkt *) &(skb->skb_pkt)) /* skbuff to vif_pkt */ /* Globals */ extern int wfq_quota; extern int wfq_normal; extern int nn_debug; extern int bypass_out; extern int bypass_in; extern int if_protect; /* * Prototypes */ /* global functions */ extern void register_vif (struct net_device *); extern void unregister_vif (struct net_device *); extern void vif_exit (task_t *); extern void vif_fork (task_t *, task_t *); extern struct pvifnet * vif_cpvif (struct pvifnet *); extern void vif_free (struct vifnet *); extern void vif_alloc (struct vifnet **, char *, struct vifnet *); extern void vif_creat (struct pvifnet **, struct net_device *, struct vifnet *); extern struct pvifnet * vif_lookup (struct pvifnet *, struct pvifnet *); extern void vif_refresh (struct vifnet *); extern inline void vif_rmpvif(struct pvifnet *); extern void vif_rm (struct pvifnet **, struct pvifnet *); extern int vif_checkrm (struct vifnet *pipe); extern void vif_checksk (struct pvifnet *, struct sock *); struct vif_pkt; struct sk_buff; struct packet_type; struct net_device; extern void vif_init (void *); extern void vif_input_init(void *); extern void vif_output_init(void *); extern int vif_rcv (struct sk_buff *, struct net_device *, struct packet_type *); extern int vif_input (struct sk_buff *); extern void vif_output (struct vif_pkt *); extern int vif_markpath (struct vif_pkt *, struct net_device *, struct pvifnet *, u_int); extern void vif_in_enqueue (struct vif_pkt *, struct vifnet *); extern void vif_out_enqueue (struct vif_pkt *, struct vifnet *); extern struct pvifnet * pvif_lookup (struct sk_buff *); extern struct net_device * vif2nd (struct vifnet *); /* useful definitions */ #ifdef ASSERT #define nassert(cond) do {if (!(cond)) panic("line %d: file %s", __LINE__, __FILE__); } while (0) #define nwarn(cond) do {if ((cond)) printk("line %d: file %s\n", __LINE__, __FILE__); } while (0) #else #define nassert(x) #define nwarn(x) #endif typedef u_int drctn; typedef u_int prtcl; struct vif_pkt { struct net_device *dev; /* destination / source I/F */ struct vifnet **pc; /* VIF path arrays */ short nextvif; /* next vif pointer */ u_short len; /* length of the pkt */ drctn dir; /* direction of the pkt */ prtcl proto; /* L3 protocol of the pkt */ struct vif_pkt *nextp; /* next pkt in the chain */ struct sk_buff *skb; /* encapsulated pkt */ struct packet_type *pt; /* for ip_rcv handling */ char padding[PADLEN]; }; /* struct vif_pkt */ #endif Index: ip.h =================================================================== RCS file: /cvsroot/netnice/Linux/include/net/ip.h,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- ip.h 5 Nov 2005 21:18:24 -0000 1.1.1.2 +++ ip.h 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -141,6 +141,7 @@ unsigned int len); extern int ip_finish_output(struct sk_buff *skb); +extern int ip_finish_output2(struct sk_buff *skb); struct ipv4_config { |
|
From: enferex <en...@us...> - 2005-11-11 04:25:31
|
Update of /cvsroot/netnice/Linux/fs In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv20624/fs Modified Files: Tag: netnice2612 Kconfig Makefile Log Message: Updated nnfs_ops.c with the FreeBSD fix, to hopefully stop drainage of packets to deleted vif nodes Index: Makefile =================================================================== RCS file: /cvsroot/netnice/Linux/fs/Makefile,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- Makefile 5 Nov 2005 21:00:04 -0000 1.1.1.2 +++ Makefile 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -53,6 +53,7 @@ obj-$(CONFIG_EXT2_FS) += ext2/ obj-$(CONFIG_CRAMFS) += cramfs/ obj-$(CONFIG_RAMFS) += ramfs/ +obj-$(CONFIG_NNFS) += nnfs/ obj-$(CONFIG_HUGETLBFS) += hugetlbfs/ obj-$(CONFIG_CODA_FS) += coda/ obj-$(CONFIG_MINIX_FS) += minix/ Index: Kconfig =================================================================== RCS file: /cvsroot/netnice/Linux/fs/Kconfig,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- Kconfig 5 Nov 2005 21:00:03 -0000 1.1.1.2 +++ Kconfig 11 Nov 2005 04:25:15 -0000 1.1.1.2.2.1 @@ -853,6 +853,13 @@ config HUGETLB_PAGE def_bool HUGETLBFS +config NNFS + tristate "Netnice file system support" + depends on NETNICE + default m + help + Control API for netnice. + config RAMFS bool default y |
|
From: enferex <en...@us...> - 2005-11-11 04:25:31
|
Update of /cvsroot/netnice/Linux/fs/nnfs In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv20624/fs/nnfs Modified Files: Tag: netnice2612 nnfs_ops.c Log Message: Updated nnfs_ops.c with the FreeBSD fix, to hopefully stop drainage of packets to deleted vif nodes Index: nnfs_ops.c =================================================================== RCS file: /cvsroot/netnice/Linux/fs/nnfs/Attic/nnfs_ops.c,v retrieving revision 1.1.4.2 retrieving revision 1.1.4.3 diff -u -d -r1.1.4.2 -r1.1.4.3 --- nnfs_ops.c 10 Nov 2005 04:36:05 -0000 1.1.4.2 +++ nnfs_ops.c 11 Nov 2005 04:25:15 -0000 1.1.4.3 @@ -388,6 +388,11 @@ return -ENOTEMPTY; } } + + /* Has the vif been removed? */ + if (vif->delete_flag) { + return EBUSY; + } #if 0 |
|
From: Takashi O. <ta...@us...> - 2005-11-10 07:29:45
|
Update of /cvsroot/netnice/CVSROOT In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv6734 Modified Files: loginfo Log Message: deleted an address in the delivery list. Index: loginfo =================================================================== RCS file: /cvsroot/netnice/CVSROOT/loginfo,v retrieving revision 1.9 retrieving revision 1.10 diff -u -d -r1.9 -r1.10 --- loginfo 9 Nov 2005 12:13:49 -0000 1.9 +++ loginfo 10 Nov 2005 07:29:31 -0000 1.10 @@ -25,4 +25,4 @@ # or #DEFAULT (echo ""; id; echo %{sVv}; date; cat) >> $CVSROOT/CVSROOT/commitlog -DEFAULT /cvsroot/sitedocs/CVSROOT/cvstools/syncmail -S "netnice : " -u -q %{sVv} ta...@cs... hid...@ir... fu...@wi... net...@li... +DEFAULT /cvsroot/sitedocs/CVSROOT/cvstools/syncmail -S "netnice : " -u -q %{sVv} hid...@ir... fu...@wi... net...@li... |
|
From: enferex <en...@us...> - 2005-11-10 05:17:30
|
Update of /cvsroot/netnice/Linux/kernel In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv3414 Modified Files: Tag: netnice2612 exit.c fork.c softirq.c Log Message: Updated from 2.6.7 Index: softirq.c =================================================================== RCS file: /cvsroot/netnice/Linux/kernel/softirq.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- softirq.c 5 Nov 2005 21:20:01 -0000 1.1.1.2 +++ softirq.c 10 Nov 2005 05:17:15 -0000 1.1.1.2.2.1 @@ -137,7 +137,14 @@ void local_bh_enable(void) { + __local_bh_enable(); + +#ifndef CONFIG_NETNICE + /* XXX: netnice internals is running bottom halfs with + irqs disabled. */ WARN_ON(irqs_disabled()); +#endif + /* * Keep preemption disabled until we are done with * softirq processing: Index: fork.c =================================================================== RCS file: /cvsroot/netnice/Linux/kernel/fork.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- fork.c 5 Nov 2005 21:19:42 -0000 1.1.1.2 +++ fork.c 10 Nov 2005 05:17:15 -0000 1.1.1.2.2.1 @@ -49,6 +49,10 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> +#ifdef CONFIG_NETNICE +#include <net/vif.h> +#endif + /* * Protected counters by write_lock_irq(&tasklist_lock) */ @@ -1234,6 +1238,12 @@ else p->state = TASK_STOPPED; +#ifdef CONFIG_NETNICE + vif_fork (current, p); +#endif + + ++total_forks; + if (unlikely (trace)) { current->ptrace_message = pid; ptrace_notify ((trace << 8) | SIGTRAP); Index: exit.c =================================================================== RCS file: /cvsroot/netnice/Linux/kernel/exit.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.2.2.1 diff -u -d -r1.1.1.2 -r1.1.1.2.2.1 --- exit.c 5 Nov 2005 21:19:41 -0000 1.1.1.2 +++ exit.c 10 Nov 2005 05:17:15 -0000 1.1.1.2.2.1 @@ -396,6 +396,8 @@ return files; } +EXPORT_SYMBOL(get_files_struct); + void fastcall put_files_struct(struct files_struct *files) { if (atomic_dec_and_test(&files->count)) { @@ -826,6 +828,10 @@ if (group_dead && tsk->signal->leader) disassociate_ctty(1); +#ifdef CONFIG_NETNICE + vif_exit(tsk); +#endif + module_put(tsk->thread_info->exec_domain->module); if (tsk->binfmt) module_put(tsk->binfmt->module); |
Update of /cvsroot/netnice/Linux/net/core In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv29142 Added Files: Tag: netnice2612 Makefile datagram.c dev.c dev_mcast.c dst.c dv.c ethtool.c filter.c flow.c gen_estimator.c gen_stats.c iovec.c link_watch.c neighbour.c net-sysfs.c netfilter.c netpoll.c pktgen.c rtnetlink.c scm.c skbuff.c sock.c stream.c sysctl_net_core.c utils.c wireless.c Log Message: Adding missing Linux/net/core/* --- NEW FILE: netpoll.c --- /* * Common framework for low-level network console, dump, and debugger code * * Sep 8 2003 Matt Mackall <mp...@se...> * * based on the netconsole code from: * * Copyright (C) 2001 Ingo Molnar <mi...@re...> * Copyright (C) 2002 Red Hat, Inc. */ #include <linux/smp_lock.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/string.h> #include <linux/inetdevice.h> #include <linux/inet.h> #include <linux/interrupt.h> #include <linux/netpoll.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> #include <net/tcp.h> #include <net/udp.h> #include <asm/unaligned.h> /* * We maintain a small pool of fully-sized skbs, to make sure the * message gets out even in extreme OOM situations. */ #define MAX_UDP_CHUNK 1460 #define MAX_SKBS 32 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) static DEFINE_SPINLOCK(skb_list_lock); static int nr_skbs; static struct sk_buff *skbs; static DEFINE_SPINLOCK(queue_lock); static int queue_depth; static struct sk_buff *queue_head, *queue_tail; static atomic_t trapped; #define NETPOLL_RX_ENABLED 1 #define NETPOLL_RX_DROP 2 #define MAX_SKB_SIZE \ (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ sizeof(struct iphdr) + sizeof(struct ethhdr)) static void zap_completion_queue(void); static void queue_process(void *p) { unsigned long flags; struct sk_buff *skb; while (queue_head) { spin_lock_irqsave(&queue_lock, flags); skb = queue_head; queue_head = skb->next; if (skb == queue_tail) queue_head = NULL; queue_depth--; spin_unlock_irqrestore(&queue_lock, flags); dev_queue_xmit(skb); } } static DECLARE_WORK(send_queue, queue_process, NULL); void netpoll_queue(struct sk_buff *skb) { unsigned long flags; if (queue_depth == MAX_QUEUE_DEPTH) { __kfree_skb(skb); return; } spin_lock_irqsave(&queue_lock, flags); if (!queue_head) queue_head = skb; else queue_tail->next = skb; queue_tail = skb; queue_depth++; spin_unlock_irqrestore(&queue_lock, flags); schedule_work(&send_queue); } static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, unsigned short ulen, u32 saddr, u32 daddr) { if (uh->check == 0) return 0; if (skb->ip_summed == CHECKSUM_HW) return csum_tcpudp_magic( saddr, daddr, ulen, IPPROTO_UDP, skb->csum); skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); } /* * Check whether delayed processing was scheduled for our NIC. If so, * we attempt to grab the poll lock and use ->poll() to pump the card. * If this fails, either we've recursed in ->poll() or it's already * running on another CPU. * * Note: we don't mask interrupts with this lock because we're using * trylock here and interrupts are already disabled in the softirq * case. Further, we test the poll_owner to avoid recursion on UP * systems where the lock doesn't exist. * * In cases where there is bi-directional communications, reading only * one message at a time can lead to packets being dropped by the * network adapter, forcing superfluous retries and possibly timeouts. * Thus, we set our budget to greater than 1. */ static void poll_napi(struct netpoll *np) { int budget = 16; if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && np->poll_owner != smp_processor_id() && spin_trylock(&np->poll_lock)) { np->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); np->dev->poll(np->dev, &budget); atomic_dec(&trapped); np->rx_flags &= ~NETPOLL_RX_DROP; spin_unlock(&np->poll_lock); } } void netpoll_poll(struct netpoll *np) { if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) return; /* Process pending work on NIC */ np->dev->poll_controller(np->dev); if (np->dev->poll) poll_napi(np); zap_completion_queue(); } static void refill_skbs(void) { struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&skb_list_lock, flags); while (nr_skbs < MAX_SKBS) { skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); if (!skb) break; skb->next = skbs; skbs = skb; nr_skbs++; } spin_unlock_irqrestore(&skb_list_lock, flags); } static void zap_completion_queue(void) { unsigned long flags; struct softnet_data *sd = &get_cpu_var(softnet_data); if (sd->completion_queue) { struct sk_buff *clist; local_irq_save(flags); clist = sd->completion_queue; sd->completion_queue = NULL; local_irq_restore(flags); while (clist != NULL) { struct sk_buff *skb = clist; clist = clist->next; if(skb->destructor) dev_kfree_skb_any(skb); /* put this one back */ else __kfree_skb(skb); } } put_cpu_var(softnet_data); } static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) { int once = 1, count = 0; unsigned long flags; struct sk_buff *skb = NULL; zap_completion_queue(); repeat: if (nr_skbs < MAX_SKBS) refill_skbs(); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { spin_lock_irqsave(&skb_list_lock, flags); skb = skbs; if (skb) { skbs = skb->next; skb->next = NULL; nr_skbs--; } spin_unlock_irqrestore(&skb_list_lock, flags); } if(!skb) { count++; if (once && (count == 1000000)) { printk("out of netpoll skbs!\n"); once = 0; } netpoll_poll(np); goto repeat; } atomic_set(&skb->users, 1); skb_reserve(skb, reserve); return skb; } static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { int status; repeat: if(!np || !np->dev || !netif_running(np->dev)) { __kfree_skb(skb); return; } /* avoid recursion */ if(np->poll_owner == smp_processor_id() || np->dev->xmit_lock_owner == smp_processor_id()) { if (np->drop) np->drop(skb); else __kfree_skb(skb); return; } spin_lock(&np->dev->xmit_lock); np->dev->xmit_lock_owner = smp_processor_id(); /* * network drivers do not expect to be called if the queue is * stopped. */ if (netif_queue_stopped(np->dev)) { np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); netpoll_poll(np); goto repeat; } status = np->dev->hard_start_xmit(skb, np->dev); np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); /* transmit busy */ if(status) { netpoll_poll(np); goto repeat; } } void netpoll_send_udp(struct netpoll *np, const char *msg, int len) { int total_len, eth_len, ip_len, udp_len; struct sk_buff *skb; struct udphdr *udph; struct iphdr *iph; struct ethhdr *eth; udp_len = len + sizeof(*udph); ip_len = eth_len = udp_len + sizeof(*iph); total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; skb = find_skb(np, total_len, total_len - len); if (!skb) return; memcpy(skb->data, msg, len); skb->len += len; udph = (struct udphdr *) skb_push(skb, sizeof(*udph)); udph->source = htons(np->local_port); udph->dest = htons(np->remote_port); udph->len = htons(udp_len); udph->check = 0; iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); /* iph->version = 4; iph->ihl = 5; */ put_unaligned(0x45, (unsigned char *)iph); iph->tos = 0; put_unaligned(htons(ip_len), &(iph->tot_len)); iph->id = 0; iph->frag_off = 0; iph->ttl = 64; iph->protocol = IPPROTO_UDP; iph->check = 0; put_unaligned(htonl(np->local_ip), &(iph->saddr)); put_unaligned(htonl(np->remote_ip), &(iph->daddr)); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); eth->h_proto = htons(ETH_P_IP); memcpy(eth->h_source, np->local_mac, 6); memcpy(eth->h_dest, np->remote_mac, 6); skb->dev = np->dev; netpoll_send_skb(np, skb); } static void arp_reply(struct sk_buff *skb) { struct arphdr *arp; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; u32 sip, tip; struct sk_buff *send_skb; struct netpoll *np = skb->dev->np; if (!np) return; /* No arp on this interface */ if (skb->dev->flags & IFF_NOARP) return; if (!pskb_may_pull(skb, (sizeof(struct arphdr) + (2 * skb->dev->addr_len) + (2 * sizeof(u32))))) return; skb->h.raw = skb->nh.raw = skb->data; arp = skb->nh.arph; if ((arp->ar_hrd != htons(ARPHRD_ETHER) && arp->ar_hrd != htons(ARPHRD_IEEE802)) || arp->ar_pro != htons(ETH_P_IP) || arp->ar_op != htons(ARPOP_REQUEST)) return; arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4 + skb->dev->addr_len; memcpy(&tip, arp_ptr, 4); /* Should we ignore arp? */ if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip)) return; size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4); send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), LL_RESERVED_SPACE(np->dev)); if (!send_skb) return; send_skb->nh.raw = send_skb->data; arp = (struct arphdr *) skb_put(send_skb, size); send_skb->dev = skb->dev; send_skb->protocol = htons(ETH_P_ARP); /* Fill the device header for the ARP frame */ if (np->dev->hard_header && np->dev->hard_header(send_skb, skb->dev, ptype, np->remote_mac, np->local_mac, send_skb->len) < 0) { kfree_skb(send_skb); return; } /* * Fill out the arp protocol part. * * we only support ethernet device type, * which (according to RFC 1390) should always equal 1 (Ethernet). */ arp->ar_hrd = htons(np->dev->type); arp->ar_pro = htons(ETH_P_IP); arp->ar_hln = np->dev->addr_len; arp->ar_pln = 4; arp->ar_op = htons(type); arp_ptr=(unsigned char *)(arp + 1); memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); arp_ptr += np->dev->addr_len; memcpy(arp_ptr, &tip, 4); arp_ptr += 4; memcpy(arp_ptr, np->remote_mac, np->dev->addr_len); arp_ptr += np->dev->addr_len; memcpy(arp_ptr, &sip, 4); netpoll_send_skb(np, send_skb); } int __netpoll_rx(struct sk_buff *skb) { int proto, len, ulen; struct iphdr *iph; struct udphdr *uh; struct netpoll *np = skb->dev->np; if (!np->rx_hook) goto out; if (skb->dev->type != ARPHRD_ETHER) goto out; /* check if netpoll clients need ARP */ if (skb->protocol == __constant_htons(ETH_P_ARP) && atomic_read(&trapped)) { arp_reply(skb); return 1; } proto = ntohs(eth_hdr(skb)->h_proto); if (proto != ETH_P_IP) goto out; if (skb->pkt_type == PACKET_OTHERHOST) goto out; if (skb_shared(skb)) goto out; iph = (struct iphdr *)skb->data; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; if (iph->ihl < 5 || iph->version != 4) goto out; if (!pskb_may_pull(skb, iph->ihl*4)) goto out; if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) goto out; len = ntohs(iph->tot_len); if (skb->len < len || len < iph->ihl*4) goto out; if (iph->protocol != IPPROTO_UDP) goto out; len -= iph->ihl*4; uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); ulen = ntohs(uh->len); if (ulen != len) goto out; if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr) < 0) goto out; if (np->local_ip && np->local_ip != ntohl(iph->daddr)) goto out; if (np->remote_ip && np->remote_ip != ntohl(iph->saddr)) goto out; if (np->local_port && np->local_port != ntohs(uh->dest)) goto out; np->rx_hook(np, ntohs(uh->source), (char *)(uh+1), ulen - sizeof(struct udphdr)); kfree_skb(skb); return 1; out: if (atomic_read(&trapped)) { kfree_skb(skb); return 1; } return 0; } int netpoll_parse_options(struct netpoll *np, char *opt) { char *cur=opt, *delim; if(*cur != '@') { if ((delim = strchr(cur, '@')) == NULL) goto parse_failed; *delim=0; np->local_port=simple_strtol(cur, NULL, 10); cur=delim; } cur++; printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); if(*cur != '/') { if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; *delim=0; np->local_ip=ntohl(in_aton(cur)); cur=delim; printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", np->name, HIPQUAD(np->local_ip)); } cur++; if ( *cur != ',') { /* parse out dev name */ if ((delim = strchr(cur, ',')) == NULL) goto parse_failed; *delim=0; strlcpy(np->dev_name, cur, sizeof(np->dev_name)); cur=delim; } cur++; printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); if ( *cur != '@' ) { /* dst port */ if ((delim = strchr(cur, '@')) == NULL) goto parse_failed; *delim=0; np->remote_port=simple_strtol(cur, NULL, 10); cur=delim; } cur++; printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); /* dst ip */ if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; *delim=0; np->remote_ip=ntohl(in_aton(cur)); cur=delim+1; printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", np->name, HIPQUAD(np->remote_ip)); if( *cur != 0 ) { /* MAC address */ if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; *delim=0; np->remote_mac[0]=simple_strtol(cur, NULL, 16); cur=delim+1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; *delim=0; np->remote_mac[1]=simple_strtol(cur, NULL, 16); cur=delim+1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; *delim=0; np->remote_mac[2]=simple_strtol(cur, NULL, 16); cur=delim+1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; *delim=0; np->remote_mac[3]=simple_strtol(cur, NULL, 16); cur=delim+1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; *delim=0; np->remote_mac[4]=simple_strtol(cur, NULL, 16); cur=delim+1; np->remote_mac[5]=simple_strtol(cur, NULL, 16); } printk(KERN_INFO "%s: remote ethernet address " "%02x:%02x:%02x:%02x:%02x:%02x\n", np->name, np->remote_mac[0], np->remote_mac[1], np->remote_mac[2], np->remote_mac[3], np->remote_mac[4], np->remote_mac[5]); return 0; parse_failed: printk(KERN_INFO "%s: couldn't parse config at %s!\n", np->name, cur); return -1; } int netpoll_setup(struct netpoll *np) { struct net_device *ndev = NULL; struct in_device *in_dev; np->poll_lock = SPIN_LOCK_UNLOCKED; np->poll_owner = -1; if (np->dev_name) ndev = dev_get_by_name(np->dev_name); if (!ndev) { printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", np->name, np->dev_name); return -1; } np->dev = ndev; ndev->np = np; if (!ndev->poll_controller) { printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", np->name, np->dev_name); goto release; } if (!netif_running(ndev)) { unsigned long atmost, atleast; printk(KERN_INFO "%s: device %s not up yet, forcing it\n", np->name, np->dev_name); rtnl_shlock(); if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) { printk(KERN_ERR "%s: failed to open %s\n", np->name, np->dev_name); rtnl_shunlock(); goto release; } rtnl_shunlock(); atleast = jiffies + HZ/10; atmost = jiffies + 4*HZ; while (!netif_carrier_ok(ndev)) { if (time_after(jiffies, atmost)) { printk(KERN_NOTICE "%s: timeout waiting for carrier\n", np->name); break; } cond_resched(); } /* If carrier appears to come up instantly, we don't * trust it and pause so that we don't pump all our * queued console messages into the bitbucket. */ if (time_before(jiffies, atleast)) { printk(KERN_NOTICE "%s: carrier detect appears" " untrustworthy, waiting 4 seconds\n", np->name); msleep(4000); } } if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr) memcpy(np->local_mac, ndev->dev_addr, 6); if (!np->local_ip) { rcu_read_lock(); in_dev = __in_dev_get(ndev); if (!in_dev || !in_dev->ifa_list) { rcu_read_unlock(); printk(KERN_ERR "%s: no IP address for %s, aborting\n", np->name, np->dev_name); goto release; } np->local_ip = ntohl(in_dev->ifa_list->ifa_local); rcu_read_unlock(); printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", np->name, HIPQUAD(np->local_ip)); } if(np->rx_hook) np->rx_flags = NETPOLL_RX_ENABLED; return 0; release: ndev->np = NULL; np->dev = NULL; dev_put(ndev); return -1; } void netpoll_cleanup(struct netpoll *np) { if (np->dev) np->dev->np = NULL; dev_put(np->dev); np->dev = NULL; } int netpoll_trap(void) { return atomic_read(&trapped); } void netpoll_set_trap(int trap) { if (trap) atomic_inc(&trapped); else atomic_dec(&trapped); } EXPORT_SYMBOL(netpoll_set_trap); EXPORT_SYMBOL(netpoll_trap); EXPORT_SYMBOL(netpoll_parse_options); EXPORT_SYMBOL(netpoll_setup); EXPORT_SYMBOL(netpoll_cleanup); EXPORT_SYMBOL(netpoll_send_udp); EXPORT_SYMBOL(netpoll_poll); EXPORT_SYMBOL(netpoll_queue); --- NEW FILE: neighbour.c --- /* * Generic address resolution entity * * Authors: * Pedro Roque <ro...@di...> * Alexey Kuznetsov <ku...@ms...> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. * Harald Welte Add neighbour cache statistics like rtstat */ #include <linux/config.h> #include <linux/types.h> [...2323 lines suppressed...] EXPORT_SYMBOL(neigh_lookup); EXPORT_SYMBOL(neigh_lookup_nodev); EXPORT_SYMBOL(neigh_parms_alloc); EXPORT_SYMBOL(neigh_parms_release); EXPORT_SYMBOL(neigh_rand_reach_time); EXPORT_SYMBOL(neigh_resolve_output); EXPORT_SYMBOL(neigh_table_clear); EXPORT_SYMBOL(neigh_table_init); EXPORT_SYMBOL(neigh_update); EXPORT_SYMBOL(neigh_update_hhs); EXPORT_SYMBOL(pneigh_enqueue); EXPORT_SYMBOL(pneigh_lookup); #ifdef CONFIG_ARPD EXPORT_SYMBOL(neigh_app_ns); #endif #ifdef CONFIG_SYSCTL EXPORT_SYMBOL(neigh_sysctl_register); EXPORT_SYMBOL(neigh_sysctl_unregister); #endif --- NEW FILE: netfilter.c --- /* netfilter.c: look after the filters for various protocols. * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. * * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any * way. * * Rusty Russell (C)2000 -- This code is GPL. * * February 2000: Modified by James Morris to have 1 queue per protocol. * 15-Mar-2000: Added NF_REPEAT --RR. * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik. */ #include <linux/config.h> #include <linux/kernel.h> #include <linux/netfilter.h> #include <net/protocol.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/if.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/icmp.h> #include <net/sock.h> #include <net/route.h> #include <linux/ip.h> /* In this code, we can be waiting indefinitely for userspace to * service a packet if a hook returns NF_QUEUE. We could keep a count * of skbuffs queued for userspace, and not deregister a hook unless * this is zero, but that sucks. Now, we simply check when the * packets come back: if the hook is gone, the packet is discarded. */ #ifdef CONFIG_NETFILTER_DEBUG #define NFDEBUG(format, args...) printk(format , ## args) #else #define NFDEBUG(format, args...) #endif /* Sockopts only registered and called from user context, so net locking would be overkill. Also, [gs]etsockopt calls may sleep. */ static DECLARE_MUTEX(nf_sockopt_mutex); struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS]; static LIST_HEAD(nf_sockopts); static DEFINE_SPINLOCK(nf_hook_lock); /* * A queue handler may be registered for each protocol. Each is protected by * long term mutex. The handler must provide an an outfn() to accept packets * for queueing and must reinject all packets it receives, no matter what. */ static struct nf_queue_handler_t { nf_queue_outfn_t outfn; void *data; } queue_handler[NPROTO]; static DEFINE_RWLOCK(queue_handler_lock); int nf_register_hook(struct nf_hook_ops *reg) { struct list_head *i; spin_lock_bh(&nf_hook_lock); list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) { if (reg->priority < ((struct nf_hook_ops *)i)->priority) break; } list_add_rcu(®->list, i->prev); spin_unlock_bh(&nf_hook_lock); synchronize_net(); return 0; } void nf_unregister_hook(struct nf_hook_ops *reg) { spin_lock_bh(&nf_hook_lock); list_del_rcu(®->list); spin_unlock_bh(&nf_hook_lock); synchronize_net(); } /* Do exclusive ranges overlap? */ static inline int overlap(int min1, int max1, int min2, int max2) { return max1 > min2 && min1 < max2; } /* Functions to register sockopt ranges (exclusive). */ int nf_register_sockopt(struct nf_sockopt_ops *reg) { struct list_head *i; int ret = 0; if (down_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; list_for_each(i, &nf_sockopts) { struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i; if (ops->pf == reg->pf && (overlap(ops->set_optmin, ops->set_optmax, reg->set_optmin, reg->set_optmax) || overlap(ops->get_optmin, ops->get_optmax, reg->get_optmin, reg->get_optmax))) { NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n", ops->set_optmin, ops->set_optmax, ops->get_optmin, ops->get_optmax, reg->set_optmin, reg->set_optmax, reg->get_optmin, reg->get_optmax); ret = -EBUSY; goto out; } } list_add(®->list, &nf_sockopts); out: up(&nf_sockopt_mutex); return ret; } void nf_unregister_sockopt(struct nf_sockopt_ops *reg) { /* No point being interruptible: we're probably in cleanup_module() */ restart: down(&nf_sockopt_mutex); if (reg->use != 0) { /* To be woken by nf_sockopt call... */ /* FIXME: Stuart Young's name appears gratuitously. */ set_current_state(TASK_UNINTERRUPTIBLE); reg->cleanup_task = current; up(&nf_sockopt_mutex); schedule(); goto restart; } list_del(®->list); up(&nf_sockopt_mutex); } #ifdef CONFIG_NETFILTER_DEBUG #include <net/ip.h> #include <net/tcp.h> #include <linux/netfilter_ipv4.h> static void debug_print_hooks_ip(unsigned int nf_debug) { if (nf_debug & (1 << NF_IP_PRE_ROUTING)) { printk("PRE_ROUTING "); nf_debug ^= (1 << NF_IP_PRE_ROUTING); } if (nf_debug & (1 << NF_IP_LOCAL_IN)) { printk("LOCAL_IN "); nf_debug ^= (1 << NF_IP_LOCAL_IN); } if (nf_debug & (1 << NF_IP_FORWARD)) { printk("FORWARD "); nf_debug ^= (1 << NF_IP_FORWARD); } if (nf_debug & (1 << NF_IP_LOCAL_OUT)) { printk("LOCAL_OUT "); nf_debug ^= (1 << NF_IP_LOCAL_OUT); } if (nf_debug & (1 << NF_IP_POST_ROUTING)) { printk("POST_ROUTING "); nf_debug ^= (1 << NF_IP_POST_ROUTING); } if (nf_debug) printk("Crap bits: 0x%04X", nf_debug); printk("\n"); } static void nf_dump_skb(int pf, struct sk_buff *skb) { printk("skb: pf=%i %s dev=%s len=%u\n", pf, skb->sk ? "(owned)" : "(unowned)", skb->dev ? skb->dev->name : "(no dev)", skb->len); switch (pf) { case PF_INET: { const struct iphdr *ip = skb->nh.iph; __u32 *opt = (__u32 *) (ip + 1); int opti; __u16 src_port = 0, dst_port = 0; if (ip->protocol == IPPROTO_TCP || ip->protocol == IPPROTO_UDP) { struct tcphdr *tcp=(struct tcphdr *)((__u32 *)ip+ip->ihl); src_port = ntohs(tcp->source); dst_port = ntohs(tcp->dest); } printk("PROTO=%d %u.%u.%u.%u:%hu %u.%u.%u.%u:%hu" " L=%hu S=0x%2.2hX I=%hu F=0x%4.4hX T=%hu", ip->protocol, NIPQUAD(ip->saddr), src_port, NIPQUAD(ip->daddr), dst_port, ntohs(ip->tot_len), ip->tos, ntohs(ip->id), ntohs(ip->frag_off), ip->ttl); for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++) printk(" O=0x%8.8X", *opt++); printk("\n"); } } } void nf_debug_ip_local_deliver(struct sk_buff *skb) { /* If it's a loopback packet, it must have come through * NF_IP_LOCAL_OUT, NF_IP_RAW_INPUT, NF_IP_PRE_ROUTING and * NF_IP_LOCAL_IN. Otherwise, must have gone through * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */ if (!skb->dev) { printk("ip_local_deliver: skb->dev is NULL.\n"); } else { if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_LOCAL_IN))) { printk("ip_local_deliver: bad skb: "); debug_print_hooks_ip(skb->nf_debug); nf_dump_skb(PF_INET, skb); } } } void nf_debug_ip_loopback_xmit(struct sk_buff *newskb) { if (newskb->nf_debug != ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING))) { printk("ip_dev_loopback_xmit: bad owned skb = %p: ", newskb); debug_print_hooks_ip(newskb->nf_debug); nf_dump_skb(PF_INET, newskb); } } void nf_debug_ip_finish_output2(struct sk_buff *skb) { /* If it's owned, it must have gone through the * NF_IP_LOCAL_OUT and NF_IP_POST_ROUTING. * Otherwise, must have gone through * NF_IP_PRE_ROUTING, NF_IP_FORWARD and NF_IP_POST_ROUTING. */ if (skb->sk) { if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING))) { printk("ip_finish_output: bad owned skb = %p: ", skb); debug_print_hooks_ip(skb->nf_debug); nf_dump_skb(PF_INET, skb); } } else { if (skb->nf_debug != ((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_FORWARD) | (1 << NF_IP_POST_ROUTING))) { /* Fragments, entunnelled packets, TCP RSTs generated by ipt_REJECT will have no owners, but still may be local */ if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING))){ printk("ip_finish_output:" " bad unowned skb = %p: ",skb); debug_print_hooks_ip(skb->nf_debug); nf_dump_skb(PF_INET, skb); } } } } #endif /*CONFIG_NETFILTER_DEBUG*/ /* Call get/setsockopt() */ static int nf_sockopt(struct sock *sk, int pf, int val, char __user *opt, int *len, int get) { struct list_head *i; struct nf_sockopt_ops *ops; int ret; if (down_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; list_for_each(i, &nf_sockopts) { ops = (struct nf_sockopt_ops *)i; if (ops->pf == pf) { if (get) { if (val >= ops->get_optmin && val < ops->get_optmax) { ops->use++; up(&nf_sockopt_mutex); ret = ops->get(sk, val, opt, len); goto out; } } else { if (val >= ops->set_optmin && val < ops->set_optmax) { ops->use++; up(&nf_sockopt_mutex); ret = ops->set(sk, val, opt, *len); goto out; } } } } up(&nf_sockopt_mutex); return -ENOPROTOOPT; out: down(&nf_sockopt_mutex); ops->use--; if (ops->cleanup_task) wake_up_process(ops->cleanup_task); up(&nf_sockopt_mutex); return ret; } int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt, int len) { return nf_sockopt(sk, pf, val, opt, &len, 0); } int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len) { return nf_sockopt(sk, pf, val, opt, len, 1); } static unsigned int nf_iterate(struct list_head *head, struct sk_buff **skb, int hook, const struct net_device *indev, const struct net_device *outdev, struct list_head **i, int (*okfn)(struct sk_buff *), int hook_thresh) { unsigned int verdict; /* * The caller must not block between calls to this * function because of risk of continuing from deleted element. */ list_for_each_continue_rcu(*i, head) { struct nf_hook_ops *elem = (struct nf_hook_ops *)*i; if (hook_thresh > elem->priority) continue; /* Optimization: we don't need to hold module reference here, since function can't sleep. --RR */ verdict = elem->hook(hook, skb, indev, outdev, okfn); if (verdict != NF_ACCEPT) { #ifdef CONFIG_NETFILTER_DEBUG if (unlikely(verdict > NF_MAX_VERDICT)) { NFDEBUG("Evil return from %p(%u).\n", elem->hook, hook); continue; } #endif if (verdict != NF_REPEAT) return verdict; *i = (*i)->prev; } } return NF_ACCEPT; } int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data) { int ret; write_lock_bh(&queue_handler_lock); if (queue_handler[pf].outfn) ret = -EBUSY; else { queue_handler[pf].outfn = outfn; queue_handler[pf].data = data; ret = 0; } write_unlock_bh(&queue_handler_lock); return ret; } /* The caller must flush their queue before this */ int nf_unregister_queue_handler(int pf) { write_lock_bh(&queue_handler_lock); queue_handler[pf].outfn = NULL; queue_handler[pf].data = NULL; write_unlock_bh(&queue_handler_lock); return 0; } /* * Any packet that leaves via this function must come back * through nf_reinject(). */ static int nf_queue(struct sk_buff *skb, struct list_head *elem, int pf, unsigned int hook, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *)) { int status; struct nf_info *info; #ifdef CONFIG_BRIDGE_NETFILTER struct net_device *physindev = NULL; struct net_device *physoutdev = NULL; #endif /* QUEUE == DROP if noone is waiting, to be safe. */ read_lock(&queue_handler_lock); if (!queue_handler[pf].outfn) { read_unlock(&queue_handler_lock); kfree_skb(skb); return 1; } info = kmalloc(sizeof(*info), GFP_ATOMIC); if (!info) { if (net_ratelimit()) printk(KERN_ERR "OOM queueing packet %p\n", skb); read_unlock(&queue_handler_lock); kfree_skb(skb); return 1; } *info = (struct nf_info) { (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn }; /* If it's going away, ignore hook. */ if (!try_module_get(info->elem->owner)) { read_unlock(&queue_handler_lock); kfree(info); return 0; } /* Bump dev refs so they don't vanish while packet is out */ if (indev) dev_hold(indev); if (outdev) dev_hold(outdev); #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge) { physindev = skb->nf_bridge->physindev; if (physindev) dev_hold(physindev); physoutdev = skb->nf_bridge->physoutdev; if (physoutdev) dev_hold(physoutdev); } #endif status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data); read_unlock(&queue_handler_lock); if (status < 0) { /* James M doesn't say fuck enough. */ if (indev) dev_put(indev); if (outdev) dev_put(outdev); #ifdef CONFIG_BRIDGE_NETFILTER if (physindev) dev_put(physindev); if (physoutdev) dev_put(physoutdev); #endif module_put(info->elem->owner); kfree(info); kfree_skb(skb); return 1; } return 1; } /* Returns 1 if okfn() needs to be executed by the caller, * -EPERM for NF_DROP, 0 otherwise. */ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *), int hook_thresh) { struct list_head *elem; unsigned int verdict; int ret = 0; /* We may already have this, but read-locks nest anyway */ rcu_read_lock(); #ifdef CONFIG_NETFILTER_DEBUG if (unlikely((*pskb)->nf_debug & (1 << hook))) { printk("nf_hook: hook %i already set.\n", hook); nf_dump_skb(pf, *pskb); } (*pskb)->nf_debug |= (1 << hook); #endif elem = &nf_hooks[pf][hook]; next_hook: verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev, outdev, &elem, okfn, hook_thresh); if (verdict == NF_ACCEPT || verdict == NF_STOP) { ret = 1; goto unlock; } else if (verdict == NF_DROP) { kfree_skb(*pskb); ret = -EPERM; } else if (verdict == NF_QUEUE) { NFDEBUG("nf_hook: Verdict = QUEUE.\n"); if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn)) goto next_hook; } unlock: rcu_read_unlock(); return ret; } void nf_reinject(struct sk_buff *skb, struct nf_info *info, unsigned int verdict) { struct list_head *elem = &info->elem->list; struct list_head *i; rcu_read_lock(); /* Release those devices we held, or Alexey will kill me. */ if (info->indev) dev_put(info->indev); if (info->outdev) dev_put(info->outdev); #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge) { if (skb->nf_bridge->physindev) dev_put(skb->nf_bridge->physindev); if (skb->nf_bridge->physoutdev) dev_put(skb->nf_bridge->physoutdev); } #endif /* Drop reference to owner of hook which queued us. */ module_put(info->elem->owner); list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) { if (i == elem) break; } if (elem == &nf_hooks[info->pf][info->hook]) { /* The module which sent it to userspace is gone. */ NFDEBUG("%s: module disappeared, dropping packet.\n", __FUNCTION__); verdict = NF_DROP; } /* Continue traversal iff userspace said ok... */ if (verdict == NF_REPEAT) { elem = elem->prev; verdict = NF_ACCEPT; } if (verdict == NF_ACCEPT) { next_hook: verdict = nf_iterate(&nf_hooks[info->pf][info->hook], &skb, info->hook, info->indev, info->outdev, &elem, info->okfn, INT_MIN); } switch (verdict) { case NF_ACCEPT: info->okfn(skb); break; case NF_QUEUE: if (!nf_queue(skb, elem, info->pf, info->hook, info->indev, info->outdev, info->okfn)) goto next_hook; break; } rcu_read_unlock(); if (verdict == NF_DROP) kfree_skb(skb); kfree(info); return; } #ifdef CONFIG_INET /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct sk_buff **pskb) { struct iphdr *iph = (*pskb)->nh.iph; struct rtable *rt; struct flowi fl = {}; struct dst_entry *odst; unsigned int hh_len; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook. */ if (inet_addr_type(iph->saddr) == RTN_LOCAL) { fl.nl_u.ip4_u.daddr = iph->daddr; fl.nl_u.ip4_u.saddr = iph->saddr; fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0; #ifdef CONFIG_IP_ROUTE_FWMARK fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark; #endif fl.proto = iph->protocol; if (ip_route_output_key(&rt, &fl) != 0) return -1; /* Drop old route. */ dst_release((*pskb)->dst); (*pskb)->dst = &rt->u.dst; } else { /* non-local src, find valid iif to satisfy * rp-filter when calling ip_route_input. */ fl.nl_u.ip4_u.daddr = iph->saddr; if (ip_route_output_key(&rt, &fl) != 0) return -1; odst = (*pskb)->dst; if (ip_route_input(*pskb, iph->daddr, iph->saddr, RT_TOS(iph->tos), rt->u.dst.dev) != 0) { dst_release(&rt->u.dst); return -1; } dst_release(&rt->u.dst); dst_release(odst); } if ((*pskb)->dst->error) return -1; /* Change in oif may mean change in hh_len. */ hh_len = (*pskb)->dst->dev->hard_header_len; if (skb_headroom(*pskb) < hh_len) { struct sk_buff *nskb; nskb = skb_realloc_headroom(*pskb, hh_len); if (!nskb) return -1; if ((*pskb)->sk) skb_set_owner_w(nskb, (*pskb)->sk); kfree_skb(*pskb); *pskb = nskb; } return 0; } EXPORT_SYMBOL(ip_route_me_harder); int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len) { struct sk_buff *nskb; if (writable_len > (*pskb)->len) return 0; /* Not exclusive use of packet? Must copy. */ if (skb_shared(*pskb) || skb_cloned(*pskb)) goto copy_skb; return pskb_may_pull(*pskb, writable_len); copy_skb: nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return 0; BUG_ON(skb_is_nonlinear(nskb)); /* Rest of kernel will get very unhappy if we pass it a suddenly-orphaned skbuff */ if ((*pskb)->sk) skb_set_owner_w(nskb, (*pskb)->sk); kfree_skb(*pskb); *pskb = nskb; return 1; } EXPORT_SYMBOL(skb_ip_make_writable); #endif /*CONFIG_INET*/ /* Internal logging interface, which relies on the real LOG target modules */ #define NF_LOG_PREFIXLEN 128 static nf_logfn *nf_logging[NPROTO]; /* = NULL */ static int reported = 0; static DEFINE_SPINLOCK(nf_log_lock); int nf_log_register(int pf, nf_logfn *logfn) { int ret = -EBUSY; /* Any setup of logging members must be done before * substituting pointer. */ spin_lock(&nf_log_lock); if (!nf_logging[pf]) { rcu_assign_pointer(nf_logging[pf], logfn); ret = 0; } spin_unlock(&nf_log_lock); return ret; } void nf_log_unregister(int pf, nf_logfn *logfn) { spin_lock(&nf_log_lock); if (nf_logging[pf] == logfn) nf_logging[pf] = NULL; spin_unlock(&nf_log_lock); /* Give time to concurrent readers. */ synchronize_net(); } void nf_log_packet(int pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; nf_logfn *logfn; rcu_read_lock(); logfn = rcu_dereference(nf_logging[pf]); if (logfn) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); /* We must read logging before nf_logfn[pf] */ logfn(hooknum, skb, in, out, prefix); } else if (!reported) { printk(KERN_WARNING "nf_log_packet: can\'t log yet, " "no backend logging module loaded in!\n"); reported++; } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_register); EXPORT_SYMBOL(nf_log_unregister); EXPORT_SYMBOL(nf_log_packet); /* This does not belong here, but locally generated errors need it if connection tracking in use: without this, connection may not be in hash table, and hence manufactured ICMP or RST packets will not be associated with it. */ void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) { void (*attach)(struct sk_buff *, struct sk_buff *); if (skb->nfct && (attach = ip_ct_attach) != NULL) { mb(); /* Just to be sure: must be read before executing this */ attach(new, skb); } } void __init netfilter_init(void) { int i, h; for (i = 0; i < NPROTO; i++) { for (h = 0; h < NF_MAX_HOOKS; h++) INIT_LIST_HEAD(&nf_hooks[i][h]); } } EXPORT_SYMBOL(ip_ct_attach); EXPORT_SYMBOL(nf_ct_attach); EXPORT_SYMBOL(nf_getsockopt); EXPORT_SYMBOL(nf_hook_slow); EXPORT_SYMBOL(nf_hooks); EXPORT_SYMBOL(nf_register_hook); EXPORT_SYMBOL(nf_register_queue_handler); EXPORT_SYMBOL(nf_register_sockopt); EXPORT_SYMBOL(nf_reinject); EXPORT_SYMBOL(nf_setsockopt); EXPORT_SYMBOL(nf_unregister_hook); EXPORT_SYMBOL(nf_unregister_queue_handler); EXPORT_SYMBOL(nf_unregister_sockopt); --- NEW FILE: gen_stats.c --- /* * net/core/gen_stats.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tg...@su...> * Jamal Hadi Salim * Alexey Kuznetsov, <ku...@ms...> * * See Documentation/networking/gen_stats.txt */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/socket.h> #include <linux/rtnetlink.h> #include <linux/gen_stats.h> #include <net/gen_stats.h> static inline int gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) { RTA_PUT(d->skb, type, size, buf); return 0; rtattr_failure: spin_unlock_bh(d->lock); return -1; } /** * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode * @skb: socket buffer to put statistics TLVs into * @type: TLV type for top level statistic TLV * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV * @xstats_type: TLV type for backward compatibility xstats TLV * @lock: statistics lock * @d: dumping handle * * Initializes the dumping handle, grabs the statistic lock and appends * an empty TLV header to the socket buffer for use a container for all * other statistic TLVS. * * The dumping handle is marked to be in backward compatibility mode telling * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats. * * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. */ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, int xstats_type, spinlock_t *lock, struct gnet_dump *d) { memset(d, 0, sizeof(*d)); spin_lock_bh(lock); d->lock = lock; if (type) d->tail = (struct rtattr *) skb->tail; d->skb = skb; d->compat_tc_stats = tc_stats_type; d->compat_xstats = xstats_type; if (d->tail) return gnet_stats_copy(d, type, NULL, 0); return 0; } /** * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode * @skb: socket buffer to put statistics TLVs into * @type: TLV type for top level statistic TLV * @lock: statistics lock * @d: dumping handle * * Initializes the dumping handle, grabs the statistic lock and appends * an empty TLV header to the socket buffer for use a container for all * other statistic TLVS. * * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. */ int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d) { return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d); } /** * gnet_stats_copy_basic - copy basic statistics into statistic TLV * @d: dumping handle * @b: basic statistics * * Appends the basic statistics to the top level TLV created by * gnet_stats_start_copy(). * * Returns 0 on success or -1 with the statistic lock released * if the room in the socket buffer was not sufficient. */ int gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic *b) { if (d->compat_tc_stats) { d->tc_stats.bytes = b->bytes; d->tc_stats.packets = b->packets; } if (d->tail) return gnet_stats_copy(d, TCA_STATS_BASIC, b, sizeof(*b)); return 0; } /** * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV * @d: dumping handle * @r: rate estimator statistics * * Appends the rate estimator statistics to the top level TLV created by * gnet_stats_start_copy(). * * Returns 0 on success or -1 with the statistic lock released * if the room in the socket buffer was not sufficient. */ int gnet_stats_copy_rate_est(struct gnet_dump *d, struct gnet_stats_rate_est *r) { if (d->compat_tc_stats) { d->tc_stats.bps = r->bps; d->tc_stats.pps = r->pps; } if (d->tail) return gnet_stats_copy(d, TCA_STATS_RATE_EST, r, sizeof(*r)); return 0; } /** * gnet_stats_copy_queue - copy queue statistics into statistics TLV * @d: dumping handle * @q: queue statistics * * Appends the queue statistics to the top level TLV created by * gnet_stats_start_copy(). * * Returns 0 on success or -1 with the statistic lock released * if the room in the socket buffer was not sufficient. */ int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q) { if (d->compat_tc_stats) { d->tc_stats.drops = q->drops; d->tc_stats.qlen = q->qlen; d->tc_stats.backlog = q->backlog; d->tc_stats.overlimits = q->overlimits; } if (d->tail) return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q)); return 0; } /** * gnet_stats_copy_app - copy application specific statistics into statistics TLV * @d: dumping handle * @st: application specific statistics data * @len: length of data * * Appends the application sepecific statistics to the top level TLV created by * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping * handle is in backward compatibility mode. * * Returns 0 on success or -1 with the statistic lock released * if the room in the socket buffer was not sufficient. */ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) { if (d->compat_xstats) { d->xstats = st; d->xstats_len = len; } if (d->tail) return gnet_stats_copy(d, TCA_STATS_APP, st, len); return 0; } /** * gnet_stats_finish_copy - finish dumping procedure * @d: dumping handle * * Corrects the length of the top level TLV to include all TLVs added * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs * if gnet_stats_start_copy_compat() was used and releases the statistics * lock. * * Returns 0 on success or -1 with the statistic lock released * if the room in the socket buffer was not sufficient. */ int gnet_stats_finish_copy(struct gnet_dump *d) { if (d->tail) d->tail->rta_len = d->skb->tail - (u8 *) d->tail; if (d->compat_tc_stats) if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, sizeof(d->tc_stats)) < 0) return -1; if (d->compat_xstats && d->xstats) { if (gnet_stats_copy(d, d->compat_xstats, d->xstats, d->xstats_len) < 0) return -1; } spin_unlock_bh(d->lock); return 0; } EXPORT_SYMBOL(gnet_stats_start_copy); EXPORT_SYMBOL(gnet_stats_start_copy_compat); EXPORT_SYMBOL(gnet_stats_copy_basic); EXPORT_SYMBOL(gnet_stats_copy_rate_est); EXPORT_SYMBOL(gnet_stats_copy_queue); EXPORT_SYMBOL(gnet_stats_copy_app); EXPORT_SYMBOL(gnet_stats_finish_copy); --- NEW FILE: filter.c --- /* * Linux Socket Filter - Kernel level socket filtering * * Author: * Jay Schulist <js...@sa...> * * Based on the design of: * - The Berkeley Packet Filter * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Andi Kleen - Fix a few bad bugs and races. */ #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <asm/system.h> #include <asm/uaccess.h> #include <linux/filter.h> /* No hurry in this branch */ static u8 *load_pointer(struct sk_buff *skb, int k) { u8 *ptr = NULL; if (k >= SKF_NET_OFF) ptr = skb->nh.raw + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) ptr = skb->mac.raw + k - SKF_LL_OFF; if (ptr >= skb->head && ptr < skb->tail) return ptr; return NULL; } /** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * @flen: length of filter * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. skb is the data we are * filtering, filter is the array of filter instructions, and * len is the number of filter blocks in the array. */ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { unsigned char *data = skb->data; /* len is UNSIGNED. Byte wide insns relies only on implicit type casts to prevent reading arbitrary memory locations. */ unsigned int len = skb->len-skb->data_len; struct sock_filter *fentry; /* We walk down these */ u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ int k; int pc; /* * Process array of filter instructions. */ for (pc = 0; pc < flen; pc++) { fentry = &filter[pc]; switch (fentry->code) { case BPF_ALU|BPF_ADD|BPF_X: A += X; continue; case BPF_ALU|BPF_ADD|BPF_K: A += fentry->k; continue; case BPF_ALU|BPF_SUB|BPF_X: A -= X; continue; case BPF_ALU|BPF_SUB|BPF_K: A -= fentry->k; continue; case BPF_ALU|BPF_MUL|BPF_X: A *= X; continue; case BPF_ALU|BPF_MUL|BPF_K: A *= fentry->k; continue; case BPF_ALU|BPF_DIV|BPF_X: if (X == 0) return 0; A /= X; continue; case BPF_ALU|BPF_DIV|BPF_K: if (fentry->k == 0) return 0; A /= fentry->k; continue; case BPF_ALU|BPF_AND|BPF_X: A &= X; continue; case BPF_ALU|BPF_AND|BPF_K: A &= fentry->k; continue; case BPF_ALU|BPF_OR|BPF_X: A |= X; continue; case BPF_ALU|BPF_OR|BPF_K: A |= fentry->k; continue; case BPF_ALU|BPF_LSH|BPF_X: A <<= X; continue; case BPF_ALU|BPF_LSH|BPF_K: A <<= fentry->k; continue; case BPF_ALU|BPF_RSH|BPF_X: A >>= X; continue; case BPF_ALU|BPF_RSH|BPF_K: A >>= fentry->k; continue; case BPF_ALU|BPF_NEG: A = -A; continue; case BPF_JMP|BPF_JA: pc += fentry->k; continue; case BPF_JMP|BPF_JGT|BPF_K: pc += (A > fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGE|BPF_K: pc += (A >= fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JEQ|BPF_K: pc += (A == fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JSET|BPF_K: pc += (A & fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGT|BPF_X: pc += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGE|BPF_X: pc += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JEQ|BPF_X: pc += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JSET|BPF_X: pc += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_LD|BPF_W|BPF_ABS: k = fentry->k; load_w: if (k >= 0 && (unsigned int)(k+sizeof(u32)) <= len) { A = ntohl(*(u32*)&data[k]); continue; } if (k < 0) { u8 *ptr; if (k >= SKF_AD_OFF) break; ptr = load_pointer(skb, k); if (ptr) { A = ntohl(*(u32*)ptr); continue; } } else { u32 _tmp, *p; p = skb_header_pointer(skb, k, 4, &_tmp); if (p != NULL) { A = ntohl(*p); continue; } } return 0; case BPF_LD|BPF_H|BPF_ABS: k = fentry->k; load_h: if (k >= 0 && (unsigned int)(k + sizeof(u16)) <= len) { A = ntohs(*(u16*)&data[k]); continue; } if (k < 0) { u8 *ptr; if (k >= SKF_AD_OFF) break; ptr = load_pointer(skb, k); if (ptr) { A = ntohs(*(u16*)ptr); continue; } } else { u16 _tmp, *p; p = skb_header_pointer(skb, k, 2, &_tmp); if (p != NULL) { A = ntohs(*p); continue; } } return 0; case BPF_LD|BPF_B|BPF_ABS: k = fentry->k; load_b: if (k >= 0 && (unsigned int)k < len) { A = data[k]; continue; } if (k < 0) { u8 *ptr; if (k >= SKF_AD_OFF) break; ptr = load_pointer(skb, k); if (ptr) { A = *ptr; continue; } } else { u8 _tmp, *p; p = skb_header_pointer(skb, k, 1, &_tmp); if (p != NULL) { A = *p; continue; } } return 0; case BPF_LD|BPF_W|BPF_LEN: A = len; continue; case BPF_LDX|BPF_W|BPF_LEN: X = len; continue; case BPF_LD|BPF_W|BPF_IND: k = X + fentry->k; goto load_w; case BPF_LD|BPF_H|BPF_IND: k = X + fentry->k; goto load_h; case BPF_LD|BPF_B|BPF_IND: k = X + fentry->k; goto load_b; case BPF_LDX|BPF_B|BPF_MSH: if (fentry->k >= len) return 0; X = (data[fentry->k] & 0xf) << 2; continue; case BPF_LD|BPF_IMM: A = fentry->k; continue; case BPF_LDX|BPF_IMM: X = fentry->k; continue; case BPF_LD|BPF_MEM: A = mem[fentry->k]; continue; case BPF_LDX|BPF_MEM: X = mem[fentry->k]; continue; case BPF_MISC|BPF_TAX: X = A; continue; case BPF_MISC|BPF_TXA: A = X; continue; case BPF_RET|BPF_K: return ((unsigned int)fentry->k); case BPF_RET|BPF_A: return ((unsigned int)A); case BPF_ST: mem[fentry->k] = A; continue; case BPF_STX: mem[fentry->k] = X; continue; default: /* Invalid instruction counts as RET */ return 0; } /* * Handle ancillary data, which are impossible * (or very difficult) to get parsing packet contents. */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: A = htons(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; continue; case SKF_AD_IFINDEX: A = skb->dev->ifindex; continue; default: return 0; } } return 0; } /** * sk_chk_filter - verify socket filter code * @filter: filter to verify * @flen: length of filter * * Check the user's filter code. If we let some ugly * filter code slip through kaboom! The filter must contain * no references or jumps that are out of range, no illegal instructions * and no backward jumps. It must end with a RET instruction * * Returns 0 if the rule set is legal or a negative errno code if not. */ int sk_chk_filter(struct sock_filter *filter, int flen) { struct sock_filter *ftest; int pc; if (((unsigned int)flen >= (~0U / sizeof(struct sock_filter))) || flen == 0) return -EINVAL; /* check the filter code now */ for (pc = 0; pc < flen; pc++) { /* all jumps are forward as they are not signed */ ftest = &filter[pc]; if (BPF_CLASS(ftest->code) == BPF_JMP) { /* but they mustn't jump off the end */ if (BPF_OP(ftest->code) == BPF_JA) { /* * Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ if (ftest->k >= (unsigned)(flen-pc-1)) return -EINVAL; } else { /* for conditionals both must be safe */ if (pc + ftest->jt +1 >= flen || pc + ftest->jf +1 >= flen) return -EINVAL; } } /* check that memory operations use valid addresses. */ if (ftest->k >= BPF_MEMWORDS) { /* but it might not be a memory operation... */ switch (ftest->code) { case BPF_ST: case BPF_STX: case BPF_LD|BPF_MEM: case BPF_LDX|BPF_MEM: return -EINVAL; } } } /* * The program must end with a return. We don't care where they * jumped within the script (its always forwards) but in the end * they _will_ hit this. */ return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; } /** * sk_attach_filter - attach a socket filter * @fprog: the filter program * @sk: the socket to use * * Attach the user's filter code. We first run some sanity checks on * it to make sure it does not explode on us later. If an error * occurs or there is insufficient memory for the filter a negative * errno code is returned. On success the return is zero. */ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct sk_filter *fp; unsigned int fsize = sizeof(struct sock_filter) * fprog->len; int err; /* Make sure new filter is there and in the right amounts. */ if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS) return -EINVAL; fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; if (copy_from_user(fp->insns, fprog->filter, fsize)) { sock_kfree_s(sk, fp, fsize+sizeof(*fp)); return -EFAULT; } atomic_set(&fp->refcnt, 1); fp->len = fprog->len; err = sk_chk_filter(fp->insns, fp->len); if (!err) { struct sk_filter *old_fp; spin_lock_bh(&sk->sk_lock.slock); old_fp = sk->sk_filter; sk->sk_filter = fp; spin_unlock_bh(&sk->sk_lock.slock); fp = old_fp; } if (fp) sk_filter_release(sk, fp); return err; } EXPORT_SYMBOL(sk_chk_filter); EXPORT_SYMBOL(sk_run_filter); --- NEW FILE: scm.c --- /* scm.c - Socket level control messages processing. * * Author: Alexey Kuznetsov, <ku...@ms...> * Alignment and value checking mods by Craig Metz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/stat.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/security.h> #include <asm/system.h> #include <asm/uaccess.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/compat.h> #include <net/scm.h> /* * Only allow a user to send credentials, that they could set with * setu(g)id. */ static __inline__ int scm_check_creds(struct ucred *creds) { if ((creds->pid == current->tgid || capable(CAP_SYS_ADMIN)) && ((creds->uid == current->uid || creds->uid == current->euid || creds->uid == current->suid) || capable(CAP_SETUID)) && ((creds->gid == current->gid || creds->gid == current->egid || creds->gid == current->sgid) || capable(CAP_SETGID))) { return 0; } return -EPERM; } static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) { int *fdp = (int*)CMSG_DATA(cmsg); struct scm_fp_list *fpl = *fplp; struct file **fpp; int i, num; num = (cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)))/sizeof(int); if (num <= 0) return 0; if (num > SCM_MAX_FD) return -EINVAL; if (!fpl) { fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL); if (!fpl) return -ENOMEM; *fplp = fpl; fpl->count = 0; } fpp = &fpl->fp[fpl->count]; if (fpl->count + num > SCM_MAX_FD) return -EINVAL; /* * Verify the descriptors and increment the usage count. */ for (i=0; i< num; i++) { int fd = fdp[i]; struct file *file; if (fd < 0 || !(file = fget(fd))) return -EBADF; *fpp++ = file; fpl->count++; } return num; } void __scm_destroy(struct scm_cookie *scm) { struct scm_fp_list *fpl = scm->fp; int i; if (fpl) { scm->fp = NULL; for (i=fpl->count-1; i>=0; i--) fput(fpl->fp[i]); kfree(fpl); } } int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) { struct cmsghdr *cmsg; int err; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { err = -EINVAL; /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */ /* The first check was omitted in <= 2.2.5. The reasoning was that parser checks cmsg_len in any case, so that additional check would be work duplication. But if cmsg_level is not SOL_SOCKET, we do not check for too short ancillary data object at all! Oops. OK, let's add it... */ if (!CMSG_OK(msg, cmsg)) goto error; if (cmsg->cmsg_level != SOL_SOCKET) continue; switch (cmsg->cmsg_type) { case SCM_RIGHTS: err=scm_fp_copy(cmsg, &p->fp); if (err<0) goto error; break; case SCM_CREDENTIALS: if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) goto error; memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); err = scm_check_creds(&p->creds); if (err) goto error; break; default: goto error; } } if (p->fp && !p->fp->count) { kfree(p->fp); p->fp = NULL; } return 0; error: scm_destroy(p); return err; } int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) { struct cmsghdr __user *cm = (struct cmsghdr __user *)msg->msg_control; struct cmsghdr cmhdr; int cmlen = CMSG_LEN(len); int err; if (MSG_CMSG_COMPAT & msg->msg_flags) return put_cmsg_compat(msg, level, type, len, data); if (cm==NULL || msg->msg_controllen < sizeof(*cm)) { msg->msg_flags |= MSG_CTRUNC; return 0; /* XXX: return error? check spec. */ } if (msg->msg_controllen < cmlen) { msg->msg_flags |= MSG_CTRUNC; cmlen = msg->msg_controllen; } cmhdr.cmsg_level = level; cmhdr.cmsg_type = type; cmhdr.cmsg_len = cmlen; err = -EFAULT; if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) goto out; if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr))) goto out; cmlen = CMSG_SPACE(len); msg->msg_control += cmlen; msg->msg_controllen -= cmlen; err = 0; out: return err; } void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) { struct cmsghdr __user *cm = (struct cmsghdr __user*)msg->msg_control; int fdmax = 0; int fdnum = scm->fp->count; struct file **fp = scm->fp->fp; int __user *cmfptr; int err = 0, i; if (MSG_CMSG_COMPAT & msg->msg_flags) { scm_detach_fds_compat(msg, scm); return; } if (msg->msg_controllen > sizeof(struct cmsghdr)) fdmax = ((msg->msg_controllen - sizeof(struct cmsghdr)) / sizeof(int)); if (fdnum < fdmax) fdmax = fdnum; for (i=0, cmfptr=(int __user *)CMSG_DATA(cm); i<fdmax; i++, cmfptr++) { int new_fd; err = security_file_receive(fp[i]); if (err) break; err = get_unused_fd(); if (err < 0) break; new_fd = err; err = put_user(new_fd, cmfptr); if (err) { put_unused_fd(new_fd); break; } /* Bump the usage count and install the file. */ get_file(fp[i]); fd_install(new_fd, fp[i]); } if (i > 0) { int cmlen = CMSG_LEN(i*sizeof(int)); if (!err) err = put_user(SOL_SOCKET, &cm->cmsg_level); if (!err) er... [truncated message content] |
|
From: enferex <en...@us...> - 2005-11-10 04:36:19
|
Update of /cvsroot/netnice/Linux/fs/nnfs In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv26897 Added Files: Tag: netnice2612 Makefile nnfs.h nnfs_fileno.c nnfs_netnice.c nnfs_ops.c nnfs_subr.c Log Message: Adding NNFS from 2.6.7 --- NEW FILE: nnfs_netnice.c --- /* * Copyright (c) 2001-2004 Netnice.org * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: nnfs_netnice.c,v 1.1.4.2 2005/11/10 04:36:05 enferex Exp $ */ /* * TODO: * * - may need to lock file descriptor structure for SMP kernel * - we may need to lock proc *p for many operations... * - try to fix lines with XXX */ #include <linux/types.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/errno.h> #include <net/sock.h> #include <asm/uaccess.h> struct socket; #include <net/vif.h> #include "nnfs.h" char *vif_type_str[] = { "Non Work-Conserving", /* VIF_NWC */ "Weighted Fair Queuing", /* VIF_WFQ */ "Priority Queue", /* VIF_PQ */ "Filter", /* VIF_FILTER */ "Block", /* VIF_BLOCK */ }; #define VIF_NTYPES (sizeof(vif_type_str) / sizeof (char*)) struct vifnet; /* * Control the netnice virtual interface parameters. */ int nnfs_donetnice(pfstype file_type, struct vifnet * vif, char * u_buf, size_t count, loff_t * offset, nnfs_rw_t rw) { int xlen, error = 0; if (*offset != 0) { return 0; } switch (rw) { case nnfs_read: { char buf[256]; switch (file_type) { case Pvif_type: if (0 <= vif->type && vif->type < VIF_NTYPES) sprintf(buf, "%s\n", vif_type_str[vif->type]); else sprintf(buf, "Invalid type\n"); break; case Pvif_bw: { u_long bw = vif->bandwidth * HZ * 8; sprintf(buf, "%6lu\n", bw); break; } case Pvif_wt: sprintf(buf, "%6d weight\n", vif->weight); break; case Pvif_recv: sprintf(buf, "%lu %lu\n", vif->in.snd_pkt, vif->in.snd_byte); break; case Pvif_send: sprintf(buf, "%lu %lu\n", vif->out.snd_pkt, vif->out.snd_byte); break; case Pvif_drops: sprintf(buf, "%lu %lu\n", vif->in.drops, vif->out.drops); break; case Pvif_filter: { /* skip null filter */ if (!vif->filter) return ENOENT; xlen = vif->filter->len * sizeof(vif->filter->insns[0]); copy_to_user(u_buf, vif->filter->insns, xlen); return xlen; } default: return -ENODEV; } /* output */ xlen = strlen(buf); if (count < xlen) { xlen = count; } copy_to_user(u_buf,buf, xlen); *offset += xlen; return xlen; } case nnfs_write: { char msg[NNFS_NETLEN + 1], *c; u_long val; switch (file_type) { case Pvif_type: xlen = NNFS_NETLEN; if (count > xlen) { return -ENOSPC; } error = copy_from_user(msg,u_buf,count); if (error) return -EFAULT; *offset += count; for (val = 0, c = msg; *c; c++) val = val * 10 + *c - '0'; vif->type = val < VIF_NTYPES ? val : VIF_NTYPES - 1; vif_refresh(vif); return count; case Pvif_bw: xlen = NNFS_NETLEN; if (count > xlen) { return -ENOSPC; } error = copy_from_user(msg,u_buf,count); if (error) return -EFAULT; *offset += count; for (val = 0, c = msg; '0' <= *c && *c <= '9' ; c++) val = val * 10 + *c - '0'; if (*c == 'K') val *= 1024; else if (*c == 'M') val *= 1024 * 1024; /* byte / tick */ vif->bandwidth = val / (HZ * 8); vif->bandwidth += val ? 1 : 0; vif_refresh(vif); return count; case Pvif_wt: xlen = NNFS_NETLEN; if (count > xlen) { return -ENOSPC; } error = copy_from_user(msg,u_buf,count); if (error) return -EFAULT; *offset += count; for (val = 0, c = msg; '0' <= *c && *c <= '9' ; c++) val = val * 10 + *c - '0'; /* val = max(val, 1); */ /* weight > 0 */ val = val > 1 ? val : 1; vif->weight = val; vif_refresh(vif); return count; case Pvif_filter: { struct sk_filter *fp; int err; if (count % sizeof(struct sock_filter) != 0) return -EINVAL; fp = kmalloc(sizeof(*fp) + count, GFP_KERNEL); if (!fp) return -ENOSPC; if (copy_from_user(fp->insns, u_buf, count)) { kfree(fp); return -EFAULT; } atomic_set(&fp->refcnt, 1); fp->len = count / sizeof(struct sock_filter); err = sk_chk_filter(fp->insns, fp->len); if (!err) { struct sk_filter *old_fp; spin_lock(&vif_lock); old_fp = vif->filter; vif->filter = fp; spin_unlock(&vif_lock); fp = old_fp; } if (fp) if (atomic_dec_and_test(&fp->refcnt)) kfree(fp); return count; } default: error = -ENODEV; break; } break; } default: error = -ENODEV; } return -EFAULT; } --- NEW FILE: nnfs_ops.c --- /* * Copyright (c) 2001-2004 Netnice.org * All rights reserved. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: nnfs_ops.c,v 1.1.4.2 2005/11/10 04:36:05 enferex Exp $ */ [...1725 lines suppressed...] .name = "nnfs", .get_sb = nnfs_get_super, .kill_sb = kill_litter_super, }; static int __init nnfs_init(void) { return register_filesystem(&nnfs_type); } static void __exit nnfs_exit(void) { unregister_filesystem(&nnfs_type); } module_init(nnfs_init); module_exit(nnfs_exit); --- NEW FILE: Makefile --- # # Makefile for the linux ramfs routines. # obj-$(CONFIG_NNFS) += nnfs.o nnfs-objs := nnfs_ops.o \ nnfs_netnice.o \ nnfs_fileno.o \ nnfs_subr.o --- NEW FILE: nnfs.h --- /* * Copyright (c) 2001-2004 Netnice.org * All rights reserved. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: nnfs.h,v 1.1.4.2 2005/11/10 04:36:05 enferex Exp $ */ #include <linux/types.h> #include <linux/fs.h> /* * System Paramesters */ #define NNFS_NOTELEN 64 /* max length of a note (/proc/$pid/note) */ #define NNFS_CTLLEN 8 /* max length of a ctl msg (/proc/$pid/ctl) */ #define NNFS_NAMELEN 16 /* max length of a filename component */ #define NNFS_FSNAMELEN 16 /* equal to MFSNAMELEN */ #define NNFS_NETLEN 64 /* max size of network files */ #define NNFS_WORKBUF 64 /* temporary buffer for I/F name */ #define NNFS_DISABLED 0x8000 /* node is disabled */ #define NNFS_PROCDEP 0x0010 /* process-dependent */ #define NNFS_LOCKED 0x01 /* XXX: to be removed */ #define NNFS_WANT 0x02 /* XXX: to be removed */ #define NNFS_MAXPATHLEN 1024 /* * The different types of node in nnfs filesystem */ typedef enum { Proot, /* the filesystem root */ Pcurproc, /* symbolic link for curproc */ Pall_procs, /* the root of the procs tree */ Pproc, /* a process-specific sub-directory */ Pfile, /* the executable file */ Pmem, /* the process's memory image */ #if 0 /* Removed for architecture dependency */ Pregs, /* the process's register set */ Pfpregs, /* the process's FP register set */ Pdbregs, /* the process's debug register set */ Pctl, /* process control */ #endif Pstatus, /* process status */ Pnote, /* process notifier */ Pnotepg, /* process group notifier */ Pmap, /* memory map */ Ptype, /* executable type */ Pcmdline, /* command line */ Prlimit, /* resource limits */ Popenfile, /* temporary */ Pso_list, /* socket file descriptor root */ Pso_dir, /* socket file descriptor dir */ Pso_dev, /* attached interface */ Pvif_root, /* root directory for virtual interface */ Pvif_dir, /* virtual interface directory */ Pvif_type, /* virtual interface type */ Pvif_bw, /* virtual interface bandwidth */ Pvif_wt, /* virtual interface weight */ Pvif_recv, /* statistics */ Pvif_send, /* statistics */ Pvif_drops, /* statistics */ Pvif_port, /* Netnice Packet Filter */ Pvif_filter /* Netnice Packet Filter */ } pfstype; struct vifnet; typedef enum nnfs_rw_t { nnfs_read, nnfs_write } nnfs_rw_t; struct inode * nnfs_new_inode(struct super_block *sup, int mode, ino_t ino); int nnfs_vif_make_control_files(struct super_block * sup, struct dentry * vif_dentry, struct vifnet * vifnet); int nnfs_donetnice(pfstype file_type, struct vifnet * vif, char * u_buf, size_t count, loff_t * offset, nnfs_rw_t rw); struct file * check_socket_fd(struct files_struct *, int); /* * Node ID Class */ typedef struct nodeid { pfstype type; /* type of the node */ pid_t pid; /* pid for process-dependent node */ int vid; /* vif index */ int fd; /* socket file descriptor */ int ifd; /* interface index */ } nodeid_t; #define iseq(x,y) (!memcmp((void *)x, y, sizeof(nodeid_t))) #define NO_PID (0) #if 0 /* * Vnode cache - PFS node data */ struct pfs_node { struct pfs_node *pfs_prev; struct pfs_node *pfs_next; nodeid_t pfs_nid; /* unique id */ /* cache entry calculated from nid for efficiency */ pid_t pfs_pid; /* process dependent */ struct vnode *pfs_vnode; /* vnode cache */ /* pfs_node information */ u_int32_t pfs_fileno; /* unique file id */ u_short pfs_mode; /* mode bits for stat() */ caddr_t pfs_data; /* associated data, such as VIF */ u_long pfs_flags; /* open flags */ pid_t pfs_lockowner; /* pfs lock owner */ }; #endif /* 0 */ /* * Node ID Operations */ #define IDTOPID(id) ((id)->pid) #define IDTOTYPE(id) ((id)->type) #define ISPROCDEP(id) ((id)->pid != NO_PID) #define SETDATA(vpp, data) do { \ if (vpp && *vpp && (*vpp)->v_data) \ ((struct pfs_node *)(*vpp)->v_data)->pfs_data = (caddr_t) data; \ } while (0) /* * Node ID Calculators * * /proc/ use IDSET_ROOT:Proot * * /proc/xxx(not proc) use IDSET:own type * * /proc/pid/ use IDSET_PROC:own type * * /proc/pid/sockets/ Pso_list directory * * there are "socket" directories and "vif attachment" links, * in the Pso_list directory. * * /proc/pid/socket/1/ use IDSET_SOCKETDIR:Sso_dir * 2/ use IDSET_SOCKETDIR:Sso_dir * . * . * n/lo0@ use IDSET_SOCKETATTACH:Sso_dev * . * . * lo0@ use IDSET_PROCATTACH:Sso_dev * fxp0@ use IDSET_PROCATTACH:Sso_dev * . * . * * /proc/network/xxx use IDSET_VIFDIR * */ #define IDSET_ROOT(id) do { \ id.type = Proot; \ id.pid = NO_PID; \ id.vid = 0; \ id.fd = 0; \ id.ifd = 0; \ } while (0) #define IDSET_TOP(id, tp) do { \ id.type = (tp); \ id.pid = NO_PID; \ id.vid = 0; \ id.fd = 0; \ id.ifd = 0; \ } while (0) #define IDSET_PROC(id, pd, tp) do { \ id.type = (tp); \ id.pid = (pd); \ id.vid = 0; \ id.fd = 0; \ id.ifd = 0; \ } while (0) #define IDSET_PROCATTACH(id, pd, ifidnt) do { \ id.type = Pso_dev; \ id.pid = (pd); \ id.vid = 0; \ id.fd = 0; \ id.ifd = (ifidnt); \ } while (0) #define IDSET_SOCKETDIR(id, pd, filedesc) do { \ id.type = Pso_dir; \ id.pid = (pd); \ id.vid = 0; \ id.fd = (filedesc) + 1; \ id.ifd = 0; \ } while (0) #define IDSET_SOCKETATTACH(id, pid, fd, dev_p) do { \ id.type = Pso_dev; \ id.pid = pid; \ id.vid = 0; \ id.fd = fd; \ id.ifd = dev_p; \ } while (0) #define IDSET_VIFDIR(id, vd, tp) do { \ id.type = (tp); \ id.pid = NO_PID; \ id.vid = (vd); \ id.fd = 0; \ id.ifd = 0; \ } while (0) #if 0 /* * Useful Macros */ #define CNEQ(cnp, s, len) \ ((cnp)->cn_namelen == (len) && \ (bcmp((s), (cnp)->cn_nameptr, (len)) == 0)) #define VTOMOUNTPOINT(vn) (vn)->v_mount->mnt_stat.f_mntonname #define VTOPFS(vp) ((struct pfs_node *)(vp)->v_data) #define PFSTOV(pfs) ((pfs)->pfs_vnode) #define PFIND(pid) ((pid) ? pfind(pid) : &proc0) #define UNLOCK_PROCESS(p) \ if (p && p != &proc0) \ PROC_UNLOCK(p) /* * Vnode cache */ void nnfs_vncache_load (void); void nnfs_vncache_unload (void); int nnfs_vncache_alloc (struct mount *mp, struct vnode **vpp, nodeid_t *nid); int nnfs_vncache_free (struct vnode *); int nnfs_disable (nodeid_t *); int nnfs_enable (struct pfs_node *); /* * File number bitmap */ void nnfs_fileno_load (void); void nnfs_fileno_unload (void); void nnfs_fileno_init (void); void nnfs_fileno_uninit (void); u_int32_t nnfs_fileno_get (nodeid_t *nid); void nnfs_fileno_free (u_int32_t fileno); /* * NNFS functions */ struct vnode * nnfs_findtextvp __P((struct proc *)); int nnfs_doopenfile __P((struct thread *, struct proc *, struct pfs_node *, struct uio *)); int nnfs_root __P((struct mount *, struct vnode **, struct thread *)); int nnfs_rw __P((struct vop_read_args *)); int nnfs_freevp __P((struct vnode *)); /* XXX */ /* * NNFS file operations */ int nnfs_donote __P((struct proc *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_domem __P((struct thread *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_doctl __P((struct proc *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_dostatus __P((struct proc *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_domap __P((struct proc *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_dotype __P((struct proc *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_docmdline __P((struct thread *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_dorlimit __P((struct proc *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); int nnfs_donetnice __P((struct proc *, struct proc *, struct pfs_node *pfsp, struct uio *uio)); /* functions to check whether or not files should be displayed */ int nnfs_validfile __P((struct proc *)); int nnfs_validmap __P((struct proc *)); int nnfs_validtype __P((struct proc *)); #endif /* * Fileno Calculator */ struct net_device; u_long FILENO_ROOT (void); u_long FILENO_PROC (pid_t, pfstype); u_long FILENO_TOP (pfstype); u_long FILENO_SDR (pid_t, int); u_long FILENO_PAT (pid_t, struct net_device *); u_long FILENO_SAT (int, int, struct net_device *); u_long FILENO_VIF (int, pfstype); #if 0 /* * VFS functions */ typedef struct vfs_namemap vfs_namemap_t; struct vfs_namemap { const char *nm_name; int nm_val; }; /* in vfs_cache.c */ int __getname __P((struct vnode *, char *, int)); int vfs_getuserstr __P((struct uio *, char *, int *)); vfs_namemap_t * vfs_findname __P((vfs_namemap_t *, char *, int)); extern vop_t **nnfs_vnodeop_p; #endif /* * VIF Management */ struct task_struct; struct socket; struct vifnet; struct pvifnet; struct vifnet * path2vifnet (const char *); struct net_device * path2ifnet (const char *); int vifnet2path (struct vifnet *, char *, char *); int remove_proc_pvif (struct task_struct *, struct pvifnet *); int remove_sock_pvif (struct socket *, struct pvifnet *); #if 0 int remove_pvif __P((struct proc *, struct pfs_node *, caddr_t)); #endif --- NEW FILE: nnfs_fileno.c --- /* * Copyright (c) 2001-2004 Netnice.org * All rights reserved. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: nnfs_fileno.c,v 1.1.4.2 2005/11/10 04:36:05 enferex Exp $ */ #include <linux/types.h> #include "nnfs.h" static int hash(void *data, int len) { static int pn[] = {65323,23293,15919,6361,557,47,17,5}; int *ptr = (unsigned int *)data; int val = 0xdeadbeaf; int i; for (i = 0; i < len / sizeof(int); i++, ptr++) { val = val * ~(*ptr) * pn[i%5] + *ptr; } return val; } #define idhash(p) hash((void *)p, sizeof(*p)) /* * Returns a fileno. */ u_int32_t nnfs_fileno_get(nodeid_t *nid) { #ifdef FILENO_ALLOC /* * We may need to change the function to avoid hash conflict, * by internally calling fileno_alloc() and by keeping the * once-used fileno entry. */ #else return idhash(nid); #endif } /* * Fileno Calculator */ u_long FILENO_ROOT() { nodeid_t nid; IDSET_ROOT(nid); return idhash(&nid); } u_long FILENO_PROC(pid_t pid, pfstype type) { nodeid_t nid; IDSET_PROC(nid, pid, type); return idhash(&nid); } u_long FILENO_TOP(pfstype type) { nodeid_t nid; IDSET_TOP(nid, type); return idhash(&nid); } u_long FILENO_SDR(pid_t pid, int fd) { nodeid_t nid; IDSET_SOCKETDIR(nid, pid, fd); return idhash(&nid); } u_long FILENO_PAT(pid_t pid, struct net_device * dev_p) { nodeid_t nid; IDSET_PROCATTACH(nid, pid, (int) dev_p); return idhash(&nid); } u_long FILENO_SAT(pid_t pid, int fd, struct net_device * dev_p) { nodeid_t nid; IDSET_SOCKETATTACH(nid, pid, fd, (int) dev_p); return idhash(&nid); } u_long FILENO_VIF(int vid, pfstype type) { nodeid_t nid; IDSET_VIFDIR(nid, vid, type); return idhash(&nid); } #if 0 /* * Initialization */ void nnfs_fileno_load(void) { mtx_init(&nnfs_fileno_mutex, "nnfs_fileno", NULL, MTX_DEF); } /* * Teardown */ void nnfs_fileno_unload(void) { mtx_destroy(&nnfs_fileno_mutex); } /* * Initialize fileno bitmap */ void nnfs_fileno_init() { struct nnfs_bitmap *pb; MALLOC(pb, struct nnfs_bitmap *, sizeof *pb, M_NNFSFILENO, M_WAITOK|M_ZERO); mtx_lock(&nnfs_fileno_mutex); pb->pb_bitmap[0] = 07; pb->pb_used = 3; nnfs_fileno_bitmap = pb; mtx_unlock(&nnfs_fileno_mutex); } /* * Tear down fileno bitmap */ void nnfs_fileno_uninit() { struct nnfs_bitmap *pb, *npb; int used; mtx_lock(&nnfs_fileno_mutex); pb = nnfs_fileno_bitmap; nnfs_fileno_bitmap = NULL; mtx_unlock(&nnfs_fileno_mutex); for (used = 0; pb; pb = npb) { npb = pb->pb_next; used += pb->pb_used; FREE(pb, M_NNFSFILENO); } #if 0 /* we currently don't reclaim filenos */ if (used > 2) printf("WARNING: %d file numbers still in use\n", used); #endif } /* * Get the next available file number */ u_int32_t nnfs_fileno_alloc(void) { struct nnfs_bitmap *pb, *ppb; u_int32_t fileno; unsigned int *p; int i; mtx_lock(&nnfs_fileno_mutex); /* look for the first page with free bits */ for (ppb = NULL, pb = nnfs_fileno_bitmap; pb; ppb = pb, pb = pb->pb_next) if (pb->pb_used != NNFS_BITMAP_BITS) break; /* out of pages? */ if (pb == NULL) { mtx_unlock(&nnfs_fileno_mutex); MALLOC(pb, struct nnfs_bitmap *, sizeof *pb, M_NNFSFILENO, M_WAITOK|M_ZERO); mtx_lock(&nnfs_fileno_mutex); /* protect against possible race */ while (ppb->pb_next) ppb = ppb->pb_next; pb->pb_offset = ppb->pb_offset + NNFS_BITMAP_BITS; ppb->pb_next = pb; } /* find the first free slot */ for (i = 0; i < NNFS_BITMAP_SIZE; ++i) if (pb->pb_bitmap[i] != UINT_MAX) break; /* find the first available bit and flip it */ fileno = pb->pb_offset + i * NNFS_SLOT_BITS; p = &pb->pb_bitmap[i]; for (i = 0; i < NNFS_SLOT_BITS; ++i, ++fileno) if ((*p & (unsigned int)(1 << i)) == 0) break; KASSERT(i < NNFS_SLOT_BITS, ("slot has free bits, yet doesn't")); *p |= (unsigned int)(1 << i); ++pb->pb_used; mtx_unlock(&nnfs_fileno_mutex); return fileno; } /* * Free a file number */ void nnfs_fileno_free(u_int32_t fileno) { #ifdef FILENO_ALLOC struct nnfs_bitmap *pb; unsigned int *p; int i; mtx_lock(&nnfs_fileno_mutex); /* find the right page */ for (pb = nnfs_fileno_bitmap; pb && fileno >= NNFS_BITMAP_BITS; pb = pb->pb_next, fileno -= NNFS_BITMAP_BITS) /* nothing */ ; KASSERT(pb, ("fileno isn't in any bitmap")); /* find the right bit in the right slot and flip it */ p = &pb->pb_bitmap[fileno / NNFS_SLOT_BITS]; i = fileno % NNFS_SLOT_BITS; KASSERT(*p & (unsigned int)(1 << i), ("fileno is already free")); *p &= ~((unsigned int)(1 << i)); --pb->pb_used; mtx_unlock(&nnfs_fileno_mutex); printf("nnfs_free_fileno(): reclaimed %d\n", fileno); #endif } #endif --- NEW FILE: nnfs_subr.c --- /* * Copyright (c) 2001-2004 Netnice.org * All rights reserved. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: nnfs_subr.c,v 1.1.4.2 2005/11/10 04:36:05 enferex Exp $ */ #include <linux/types.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/namei.h> #include <net/sock.h> struct socket; #include <net/vif.h> #include "nnfs.h" #define NNFS_DEBUG 0 /* * translates the vifnet structure into corresponding path */ int vifnet2path(struct vifnet * tmp, char *buf, char *prefix) { struct vifnet *vif = tmp; struct vifnet *vpath[MAX_VIFDEPTH], **ptr; char *c; ptr = vpath; while (vif) { *ptr = vif; if (vif->root_flag) break; vif = vif->pptr; ptr++; } c = buf + sprintf(buf, "%s/network", prefix); for (;ptr >= vpath; ptr--) { vif = *ptr; if (!vif || c - buf + strlen(vif->name) > NNFS_MAXPATHLEN) { *buf = '\0'; /* ENOMEM */ break; } c += sprintf(c, "/%s", vif->name); } return strlen(buf); } /* * get a vifnet */ struct vifnet * path2vifnet(const char *path) { struct vifnet * vifnet; struct nameidata nd; int error; error = path_lookup(path,LOOKUP_FOLLOW,&nd); if (error != 0) { return NULL; } if (!nd.dentry->d_inode) { return NULL; } vifnet = (struct vifnet *) nd.dentry->d_inode->u.generic_ip; path_release(&nd); if (!vifnet) { return NULL; } return vifnet; } /* * get a ifnet */ struct net_device * path2ifnet(const char * path) { struct vifnet *vif; vif = path2vifnet(path); for (; vif && !vif->root_flag; vif = (struct vifnet *)vif->pptr) ; if (!vif) return NULL; return vif->dev; } /* * Remove a pvifnet attached to a process and also remove * the pvifnet from all to the processes sockets. */ int remove_proc_pvif(struct task_struct * task, struct pvifnet * pvif) { struct files_struct * files; struct file * file; struct pvifnet * task_pvif; int fd; #if NNFS_DEBUG printk("remove proc pvif %p\n",pvif); #endif files = get_files_struct(task); if (!files) { printk("no files %d\n",task->pid); return 0; } task_pvif = task->p_vifnet; spin_lock(&files->file_lock); for (fd=0; fd<files->max_fds; fd++) { struct inode * inode; struct socket * socket; struct sock * sk; struct pvifnet * so_pvif; struct pvifnet * old_pvif; int count; if ((file = check_socket_fd(files,fd)) == 0) continue; inode = file->f_dentry->d_inode; if (!inode->i_sock || !(socket = SOCKET_I(inode))) continue; sk = socket->sk; #if NNFS_DEBUG printk("checking fd %d %d\n",fd,inode->i_nlink); #endif so_pvif = sk->sk_vifnet; if (!so_pvif) { printk("missing so_pvif\n"); continue; } if (so_pvif == task_pvif) { printk("so_pvif == task_pvif\n"); continue; } /* * I believe this check prevents removing a pvif from * an open socket. It is present in the FreeBSD * implementation. */ count = file_count(file); if (count > 1) { continue; } old_pvif = vif_lookup(sk->sk_vifnet, pvif); if (!old_pvif) { printk("missing old_pvif\n"); continue; } #if NNFS_DEBUG printk("remove sock fd %d pvif %p\n",fd,old_pvif); #endif vif_rm(&sk->sk_vifnet, old_pvif); } spin_unlock(&files->file_lock); put_files_struct(files); vif_rm(&task->p_vifnet, pvif); return 0; } int remove_sock_pvif(struct socket * sock, struct pvifnet * target) { struct pvifnet * pvif_target; struct sock * sk = sock->sk; #if NNFS_DEBUG printk("remove_sock_pvif %p %p\n",sock->so_vifnet,target); #endif if (!sk->sk_vifnet) return 0; pvif_target = vif_lookup(sk->sk_vifnet, target); if (pvif_target) { #if NNFS_DEBUG printk("calling vif_rm on %p %p\n",sk->sk_vifnet,pvif_target); #endif vif_rm(&(sk->sk_vifnet), pvif_target); } return 0; } #if 0 int remove_pvif(struct proc *p, struct pfs_node *pfs, caddr_t pvif) { struct socket *so; struct socket *sk; switch (IDTOTYPE(&pfs->pfs_nid) ) { case Pso_list: { int i, s; struct filedesc *fdp = p->p_fd; struct pvifnet *target; s = splnet(); /* remove the corresponding vif on sockets */ for (i = 0; i < fdp->fd_lastfile; i++) { /* * note that this operation is a bit tricky, * because so and p might be pointing the same vif. */ if (fdp->fd_ofiles[i] && fdp->fd_ofiles[i]->f_type == DTYPE_SOCKET && (so = (struct socket *) fdp->fd_ofiles[i]->f_data) && so->so_vifnet != p->p_vifnet && (target = (struct pvifnet *) vif_lookup((struct pvifnet *)so->so_vifnet, (struct pvifnet *) pvif))) { /* skip shared sockets */ if (fdp->fd_ofiles[i]->f_count > 1) continue; vif_rm((caddr_t *) &(so->so_vifnet), (caddr_t) target); } } /* remove the vif */ vif_rm((caddr_t *) &(p->p_vifnet), pvif); splx(s); break; } case Pso_dir: { if (!(so = (struct socket *) pfs->pfs_data)) return ENODEV; vif_rm((caddr_t *) &(so->so_vifnet), vif_lookup((struct pvifnet *) so->so_vifnet, (struct pvifnet *) pvif)); break; } default: return ENODEV; } return 0; } /* * Get a string from userland into (buf). Strip a trailing * nl character (to allow easy access from the shell). * The buffer should be *buflenp + 1 chars long. vfs_getuserstr * will automatically add a nul char at the end. * * Returns 0 on success or the following errors * * EINVAL: file offset is non-zero. * EMSGSIZE: message is longer than kernel buffer * EFAULT: user i/o buffer is not addressable */ int vfs_getuserstr(uio, buf, buflenp) struct uio *uio; char *buf; int *buflenp; { int xlen; int error; if (uio->uio_offset != 0) return (EINVAL); xlen = *buflenp; /* must be able to read the whole string in one go */ if (xlen < uio->uio_resid) return (EMSGSIZE); xlen = uio->uio_resid; if ((error = uiomove(buf, xlen, uio)) != 0) return (error); /* allow multiple writes without seeks */ uio->uio_offset = 0; /* cleanup string and remove trailing newline */ buf[xlen] = '\0'; xlen = strlen(buf); if (xlen > 0 && buf[xlen-1] == '\n') buf[--xlen] = '\0'; *buflenp = xlen; return (0); } vfs_namemap_t * vfs_findname(nm, buf, buflen) vfs_namemap_t *nm; char *buf; int buflen; { for (; nm->nm_name; nm++) if (bcmp(buf, nm->nm_name, buflen+1) == 0) return (nm); return (0); } #endif |
Update of /cvsroot/netnice/Linux/drivers/infiniband/core In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv18210 Added Files: Tag: netnice2612 Makefile agent.c agent.h agent_priv.h cache.c core_priv.h device.c fmr_pool.c mad.c mad_priv.h packer.c sa_query.c smi.c smi.h sysfs.c ud_header.c user_mad.c verbs.c Log Message: Missing files --- NEW FILE: verbs.c --- /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: verbs.c,v 1.1.2.1 2005/11/10 03:54:33 enferex Exp $ */ #include <linux/errno.h> #include <linux/err.h> #include <ib_verbs.h> /* Protection domains */ struct ib_pd *ib_alloc_pd(struct ib_device *device) { struct ib_pd *pd; pd = device->alloc_pd(device); if (!IS_ERR(pd)) { pd->device = device; atomic_set(&pd->usecnt, 0); } return pd; } EXPORT_SYMBOL(ib_alloc_pd); int ib_dealloc_pd(struct ib_pd *pd) { if (atomic_read(&pd->usecnt)) return -EBUSY; return pd->device->dealloc_pd(pd); } EXPORT_SYMBOL(ib_dealloc_pd); /* Address handles */ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct ib_ah *ah; ah = pd->device->create_ah(pd, ah_attr); if (!IS_ERR(ah)) { ah->device = pd->device; ah->pd = pd; atomic_inc(&pd->usecnt); } return ah; } EXPORT_SYMBOL(ib_create_ah); int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) { return ah->device->modify_ah ? ah->device->modify_ah(ah, ah_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_modify_ah); int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) { return ah->device->query_ah ? ah->device->query_ah(ah, ah_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_ah); int ib_destroy_ah(struct ib_ah *ah) { struct ib_pd *pd; int ret; pd = ah->pd; ret = ah->device->destroy_ah(ah); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_destroy_ah); /* Queue pairs */ struct ib_qp *ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct ib_qp *qp; qp = pd->device->create_qp(pd, qp_init_attr); if (!IS_ERR(qp)) { qp->device = pd->device; qp->pd = pd; qp->send_cq = qp_init_attr->send_cq; qp->recv_cq = qp_init_attr->recv_cq; qp->srq = qp_init_attr->srq; qp->event_handler = qp_init_attr->event_handler; qp->qp_context = qp_init_attr->qp_context; qp->qp_type = qp_init_attr->qp_type; atomic_inc(&pd->usecnt); atomic_inc(&qp_init_attr->send_cq->usecnt); atomic_inc(&qp_init_attr->recv_cq->usecnt); if (qp_init_attr->srq) atomic_inc(&qp_init_attr->srq->usecnt); } return qp; } EXPORT_SYMBOL(ib_create_qp); int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) { return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); } EXPORT_SYMBOL(ib_modify_qp); int ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { return qp->device->query_qp ? qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_qp); int ib_destroy_qp(struct ib_qp *qp) { struct ib_pd *pd; struct ib_cq *scq, *rcq; struct ib_srq *srq; int ret; pd = qp->pd; scq = qp->send_cq; rcq = qp->recv_cq; srq = qp->srq; ret = qp->device->destroy_qp(qp); if (!ret) { atomic_dec(&pd->usecnt); atomic_dec(&scq->usecnt); atomic_dec(&rcq->usecnt); if (srq) atomic_dec(&srq->usecnt); } return ret; } EXPORT_SYMBOL(ib_destroy_qp); /* Completion queues */ struct ib_cq *ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), void *cq_context, int cqe) { struct ib_cq *cq; cq = device->create_cq(device, cqe); if (!IS_ERR(cq)) { cq->device = device; cq->comp_handler = comp_handler; cq->event_handler = event_handler; cq->cq_context = cq_context; atomic_set(&cq->usecnt, 0); } return cq; } EXPORT_SYMBOL(ib_create_cq); int ib_destroy_cq(struct ib_cq *cq) { if (atomic_read(&cq->usecnt)) return -EBUSY; return cq->device->destroy_cq(cq); } EXPORT_SYMBOL(ib_destroy_cq); int ib_resize_cq(struct ib_cq *cq, int cqe) { int ret; if (!cq->device->resize_cq) return -ENOSYS; ret = cq->device->resize_cq(cq, &cqe); if (!ret) cq->cqe = cqe; return ret; } EXPORT_SYMBOL(ib_resize_cq); /* Memory regions */ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) { struct ib_mr *mr; mr = pd->device->get_dma_mr(pd, mr_access_flags); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; atomic_inc(&pd->usecnt); atomic_set(&mr->usecnt, 0); } return mr; } EXPORT_SYMBOL(ib_get_dma_mr); struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_mr *mr; mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; atomic_inc(&pd->usecnt); atomic_set(&mr->usecnt, 0); } return mr; } EXPORT_SYMBOL(ib_reg_phys_mr); int ib_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_pd *old_pd; int ret; if (!mr->device->rereg_phys_mr) return -ENOSYS; if (atomic_read(&mr->usecnt)) return -EBUSY; old_pd = mr->pd; ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { atomic_dec(&old_pd->usecnt); atomic_inc(&pd->usecnt); } return ret; } EXPORT_SYMBOL(ib_rereg_phys_mr); int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) { return mr->device->query_mr ? mr->device->query_mr(mr, mr_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_mr); int ib_dereg_mr(struct ib_mr *mr) { struct ib_pd *pd; int ret; if (atomic_read(&mr->usecnt)) return -EBUSY; pd = mr->pd; ret = mr->device->dereg_mr(mr); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dereg_mr); /* Memory windows */ struct ib_mw *ib_alloc_mw(struct ib_pd *pd) { struct ib_mw *mw; if (!pd->device->alloc_mw) return ERR_PTR(-ENOSYS); mw = pd->device->alloc_mw(pd); if (!IS_ERR(mw)) { mw->device = pd->device; mw->pd = pd; atomic_inc(&pd->usecnt); } return mw; } EXPORT_SYMBOL(ib_alloc_mw); int ib_dealloc_mw(struct ib_mw *mw) { struct ib_pd *pd; int ret; pd = mw->pd; ret = mw->device->dealloc_mw(mw); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dealloc_mw); /* "Fast" memory regions */ struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct ib_fmr *fmr; if (!pd->device->alloc_fmr) return ERR_PTR(-ENOSYS); fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); if (!IS_ERR(fmr)) { fmr->device = pd->device; fmr->pd = pd; atomic_inc(&pd->usecnt); } return fmr; } EXPORT_SYMBOL(ib_alloc_fmr); int ib_unmap_fmr(struct list_head *fmr_list) { struct ib_fmr *fmr; if (list_empty(fmr_list)) return 0; fmr = list_entry(fmr_list->next, struct ib_fmr, list); return fmr->device->unmap_fmr(fmr_list); } EXPORT_SYMBOL(ib_unmap_fmr); int ib_dealloc_fmr(struct ib_fmr *fmr) { struct ib_pd *pd; int ret; pd = fmr->pd; ret = fmr->device->dealloc_fmr(fmr); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dealloc_fmr); /* Multicast groups */ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { return qp->device->attach_mcast ? qp->device->attach_mcast(qp, gid, lid) : -ENOSYS; } EXPORT_SYMBOL(ib_attach_mcast); int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { return qp->device->detach_mcast ? qp->device->detach_mcast(qp, gid, lid) : -ENOSYS; } EXPORT_SYMBOL(ib_detach_mcast); --- NEW FILE: mad.c --- /* * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following [...2672 lines suppressed...] return 0; error2: kmem_cache_destroy(ib_mad_cache); error1: return ret; } static void __exit ib_mad_cleanup_module(void) { ib_unregister_client(&mad_client); if (kmem_cache_destroy(ib_mad_cache)) { printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n"); } } module_init(ib_mad_init_module); module_exit(ib_mad_cleanup_module); --- NEW FILE: packer.c --- /* * Copyright (c) 2004 Topspin Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: packer.c,v 1.1.2.1 2005/11/10 03:54:33 enferex Exp $ */ #include <ib_pack.h> static u64 value_read(int offset, int size, void *structure) { switch (size) { case 1: return *(u8 *) (structure + offset); case 2: return be16_to_cpup((__be16 *) (structure + offset)); case 4: return be32_to_cpup((__be32 *) (structure + offset)); case 8: return be64_to_cpup((__be64 *) (structure + offset)); default: printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); return 0; } } /** * ib_pack - Pack a structure into a buffer * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @structure:Structure to pack from * @buf:Buffer to pack into * * ib_pack() packs a list of structure fields into a buffer, * controlled by the array of fields in @desc. */ void ib_pack(const struct ib_field *desc, int desc_len, void *structure, void *buf) { int i; for (i = 0; i < desc_len; ++i) { if (desc[i].size_bits <= 32) { int shift; u32 val; __be32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift); addr = (__be32 *) buf + desc[i].offset_words; *addr = (*addr & ~mask) | (cpu_to_be32(val) & mask); } else if (desc[i].size_bits <= 64) { int shift; u64 val; __be64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be64(((1ull << desc[i].size_bits) - 1) << shift); addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); *addr = (*addr & ~mask) | (cpu_to_be64(val) & mask); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { printk(KERN_WARNING "Structure field %s of size %d " "bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } if (desc[i].struct_size_bytes) memcpy(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, structure + desc[i].struct_offset_bytes, desc[i].size_bits / 8); else memset(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, 0, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_pack); static void value_write(int offset, int size, u64 val, void *structure) { switch (size * 8) { case 8: *( u8 *) (structure + offset) = val; break; case 16: *(__be16 *) (structure + offset) = cpu_to_be16(val); break; case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break; case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break; default: printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); } } /** * ib_unpack - Unpack a buffer into a structure * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @buf:Buffer to unpack from * @structure:Structure to unpack into * * ib_pack() unpacks a list of structure fields from a buffer, * controlled by the array of fields in @desc. */ void ib_unpack(const struct ib_field *desc, int desc_len, void *buf, void *structure) { int i; for (i = 0; i < desc_len; ++i) { if (!desc[i].struct_size_bytes) continue; if (desc[i].size_bits <= 32) { int shift; u32 val; u32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; mask = ((1ull << desc[i].size_bits) - 1) << shift; addr = (__be32 *) buf + desc[i].offset_words; val = (be32_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else if (desc[i].size_bits <= 64) { int shift; u64 val; u64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; mask = ((1ull << desc[i].size_bits) - 1) << shift; addr = (__be64 *) buf + desc[i].offset_words; val = (be64_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { printk(KERN_WARNING "Structure field %s of size %d " "bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } memcpy(structure + desc[i].struct_offset_bytes, buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_unpack); --- NEW FILE: sysfs.c --- /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: sysfs.c,v 1.1.2.1 2005/11/10 03:54:33 enferex Exp $ */ #include "core_priv.h" #include <ib_mad.h> struct ib_port { struct kobject kobj; struct ib_device *ibdev; struct attribute_group gid_group; struct attribute **gid_attr; struct attribute_group pkey_group; struct attribute **pkey_attr; u8 port_num; }; struct port_attribute { struct attribute attr; ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf); ssize_t (*store)(struct ib_port *, struct port_attribute *, const char *buf, size_t count); }; #define PORT_ATTR(_name, _mode, _show, _store) \ struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store) #define PORT_ATTR_RO(_name) \ struct port_attribute port_attr_##_name = __ATTR_RO(_name) struct port_table_attribute { struct port_attribute attr; int index; }; static ssize_t port_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct port_attribute *port_attr = container_of(attr, struct port_attribute, attr); struct ib_port *p = container_of(kobj, struct ib_port, kobj); if (!port_attr->show) return 0; return port_attr->show(p, port_attr, buf); } static struct sysfs_ops port_sysfs_ops = { .show = port_attr_show }; static ssize_t state_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; static const char *state_name[] = { [IB_PORT_NOP] = "NOP", [IB_PORT_DOWN] = "DOWN", [IB_PORT_INIT] = "INIT", [IB_PORT_ARMED] = "ARMED", [IB_PORT_ACTIVE] = "ACTIVE", [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER" }; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "%d: %s\n", attr.state, attr.state >= 0 && attr.state <= ARRAY_SIZE(state_name) ? state_name[attr.state] : "UNKNOWN"); } static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "0x%x\n", attr.lid); } static ssize_t lid_mask_count_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "%d\n", attr.lmc); } static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "0x%x\n", attr.sm_lid); } static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "%d\n", attr.sm_sl); } static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; return sprintf(buf, "0x%08x\n", attr.port_cap_flags); } static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; char *speed = ""; int rate; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; switch (attr.active_speed) { case 2: speed = " DDR"; break; case 4: speed = " QDR"; break; } rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed; if (rate < 0) return -EINVAL; return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", rate / 10, rate % 10 ? ".5" : "", ib_width_enum_to_int(attr.active_width), speed); } static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; switch (attr.phys_state) { case 1: return sprintf(buf, "1: Sleep\n"); case 2: return sprintf(buf, "2: Polling\n"); case 3: return sprintf(buf, "3: Disabled\n"); case 4: return sprintf(buf, "4: PortConfigurationTraining\n"); case 5: return sprintf(buf, "5: LinkUp\n"); case 6: return sprintf(buf, "6: LinkErrorRecovery\n"); case 7: return sprintf(buf, "7: Phy Test\n"); default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state); } } static PORT_ATTR_RO(state); static PORT_ATTR_RO(lid); static PORT_ATTR_RO(lid_mask_count); static PORT_ATTR_RO(sm_lid); static PORT_ATTR_RO(sm_sl); static PORT_ATTR_RO(cap_mask); static PORT_ATTR_RO(rate); static PORT_ATTR_RO(phys_state); static struct attribute *port_default_attrs[] = { &port_attr_state.attr, &port_attr_lid.attr, &port_attr_lid_mask_count.attr, &port_attr_sm_lid.attr, &port_attr_sm_sl.attr, &port_attr_cap_mask.attr, &port_attr_rate.attr, &port_attr_phys_state.attr, NULL }; static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); union ib_gid gid; ssize_t ret; ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid); if (ret) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", be16_to_cpu(((u16 *) gid.raw)[0]), be16_to_cpu(((u16 *) gid.raw)[1]), be16_to_cpu(((u16 *) gid.raw)[2]), be16_to_cpu(((u16 *) gid.raw)[3]), be16_to_cpu(((u16 *) gid.raw)[4]), be16_to_cpu(((u16 *) gid.raw)[5]), be16_to_cpu(((u16 *) gid.raw)[6]), be16_to_cpu(((u16 *) gid.raw)[7])); } static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); u16 pkey; ssize_t ret; ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey); if (ret) return ret; return sprintf(buf, "0x%04x\n", pkey); } #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \ struct port_table_attribute port_pma_attr_##_name = { \ .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ .index = (_offset) | ((_width) << 16) | ((_counter) << 24) \ } static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); int offset = tab_attr->index & 0xffff; int width = (tab_attr->index >> 16) & 0xff; struct ib_mad *in_mad = NULL; struct ib_mad *out_mad = NULL; ssize_t ret; if (!p->ibdev->process_mad) return sprintf(buf, "N/A (no PMA)\n"); in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); if (!in_mad || !out_mad) { ret = -ENOMEM; goto out; } memset(in_mad, 0, sizeof *in_mad); in_mad->mad_hdr.base_version = 1; in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; in_mad->mad_hdr.class_version = 1; in_mad->mad_hdr.method = IB_MGMT_METHOD_GET; in_mad->mad_hdr.attr_id = cpu_to_be16(0x12); /* PortCounters */ in_mad->data[41] = p->port_num; /* PortSelect field */ if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY, p->port_num, NULL, NULL, in_mad, out_mad) & (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { ret = -EINVAL; goto out; } switch (width) { case 4: ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> (offset % 4)) & 0xf); break; case 8: ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); break; case 16: ret = sprintf(buf, "%u\n", be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8))); break; case 32: ret = sprintf(buf, "%u\n", be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8))); break; default: ret = 0; } out: kfree(in_mad); kfree(out_mad); return ret; } static PORT_PMA_ATTR(symbol_error , 0, 16, 32); static PORT_PMA_ATTR(link_error_recovery , 1, 8, 48); static PORT_PMA_ATTR(link_downed , 2, 8, 56); static PORT_PMA_ATTR(port_rcv_errors , 3, 16, 64); static PORT_PMA_ATTR(port_rcv_remote_physical_errors, 4, 16, 80); static PORT_PMA_ATTR(port_rcv_switch_relay_errors , 5, 16, 96); static PORT_PMA_ATTR(port_xmit_discards , 6, 16, 112); static PORT_PMA_ATTR(port_xmit_constraint_errors , 7, 8, 128); static PORT_PMA_ATTR(port_rcv_constraint_errors , 8, 8, 136); static PORT_PMA_ATTR(local_link_integrity_errors , 9, 4, 152); static PORT_PMA_ATTR(excessive_buffer_overrun_errors, 10, 4, 156); static PORT_PMA_ATTR(VL15_dropped , 11, 16, 176); static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192); static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224); static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256); static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288); static struct attribute *pma_attrs[] = { &port_pma_attr_symbol_error.attr.attr, &port_pma_attr_link_error_recovery.attr.attr, &port_pma_attr_link_downed.attr.attr, &port_pma_attr_port_rcv_errors.attr.attr, &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, &port_pma_attr_port_xmit_discards.attr.attr, &port_pma_attr_port_xmit_constraint_errors.attr.attr, &port_pma_attr_port_rcv_constraint_errors.attr.attr, &port_pma_attr_local_link_integrity_errors.attr.attr, &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, &port_pma_attr_VL15_dropped.attr.attr, &port_pma_attr_port_xmit_data.attr.attr, &port_pma_attr_port_rcv_data.attr.attr, &port_pma_attr_port_xmit_packets.attr.attr, &port_pma_attr_port_rcv_packets.attr.attr, NULL }; static struct attribute_group pma_group = { .name = "counters", .attrs = pma_attrs }; static void ib_port_release(struct kobject *kobj) { struct ib_port *p = container_of(kobj, struct ib_port, kobj); struct attribute *a; int i; for (i = 0; (a = p->gid_attr[i]); ++i) { kfree(a->name); kfree(a); } for (i = 0; (a = p->pkey_attr[i]); ++i) { kfree(a->name); kfree(a); } kfree(p->gid_attr); kfree(p); } static struct kobj_type port_type = { .release = ib_port_release, .sysfs_ops = &port_sysfs_ops, .default_attrs = port_default_attrs }; static void ib_device_release(struct class_device *cdev) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); kfree(dev); } static int ib_device_hotplug(struct class_device *cdev, char **envp, int num_envp, char *buf, int size) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); int i = 0, len = 0; if (add_hotplug_env_var(envp, num_envp, &i, buf, size, &len, "NAME=%s", dev->name)) return -ENOMEM; /* * It might be nice to pass the node GUID to hotplug, but * right now the only way to get it is to query the device * provider, and this can crash during device removal because * we are will be running after driver removal has started. * We could add a node_guid field to struct ib_device, or we * could just let the hotplug script read the node GUID from * sysfs when devices are added. */ envp[i] = NULL; return 0; } static int alloc_group(struct attribute ***attr, ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf), int len) { struct port_table_attribute ***tab_attr = (struct port_table_attribute ***) attr; int i; int ret; *tab_attr = kmalloc((1 + len) * sizeof *tab_attr, GFP_KERNEL); if (!*tab_attr) return -ENOMEM; memset(*tab_attr, 0, (1 + len) * sizeof *tab_attr); for (i = 0; i < len; ++i) { (*tab_attr)[i] = kmalloc(sizeof *(*tab_attr)[i], GFP_KERNEL); if (!(*tab_attr)[i]) { ret = -ENOMEM; goto err; } memset((*tab_attr)[i], 0, sizeof *(*tab_attr)[i]); (*tab_attr)[i]->attr.attr.name = kmalloc(8, GFP_KERNEL); if (!(*tab_attr)[i]->attr.attr.name) { ret = -ENOMEM; goto err; } if (snprintf((*tab_attr)[i]->attr.attr.name, 8, "%d", i) >= 8) { ret = -ENOMEM; goto err; } (*tab_attr)[i]->attr.attr.mode = S_IRUGO; (*tab_attr)[i]->attr.attr.owner = THIS_MODULE; (*tab_attr)[i]->attr.show = show; (*tab_attr)[i]->index = i; } return 0; err: for (i = 0; i < len; ++i) { if ((*tab_attr)[i]) kfree((*tab_attr)[i]->attr.attr.name); kfree((*tab_attr)[i]); } kfree(*tab_attr); return ret; } static int add_port(struct ib_device *device, int port_num) { struct ib_port *p; struct ib_port_attr attr; int i; int ret; ret = ib_query_port(device, port_num, &attr); if (ret) return ret; p = kmalloc(sizeof *p, GFP_KERNEL); if (!p) return -ENOMEM; memset(p, 0, sizeof *p); p->ibdev = device; p->port_num = port_num; p->kobj.ktype = &port_type; p->kobj.parent = kobject_get(&device->ports_parent); if (!p->kobj.parent) { ret = -EBUSY; goto err; } ret = kobject_set_name(&p->kobj, "%d", port_num); if (ret) goto err_put; ret = kobject_register(&p->kobj); if (ret) goto err_put; ret = sysfs_create_group(&p->kobj, &pma_group); if (ret) goto err_put; ret = alloc_group(&p->gid_attr, show_port_gid, attr.gid_tbl_len); if (ret) goto err_remove_pma; p->gid_group.name = "gids"; p->gid_group.attrs = p->gid_attr; ret = sysfs_create_group(&p->kobj, &p->gid_group); if (ret) goto err_free_gid; ret = alloc_group(&p->pkey_attr, show_port_pkey, attr.pkey_tbl_len); if (ret) goto err_remove_gid; p->pkey_group.name = "pkeys"; p->pkey_group.attrs = p->pkey_attr; ret = sysfs_create_group(&p->kobj, &p->pkey_group); if (ret) goto err_free_pkey; list_add_tail(&p->kobj.entry, &device->port_list); return 0; err_free_pkey: for (i = 0; i < attr.pkey_tbl_len; ++i) { kfree(p->pkey_attr[i]->name); kfree(p->pkey_attr[i]); } kfree(p->pkey_attr); err_remove_gid: sysfs_remove_group(&p->kobj, &p->gid_group); err_free_gid: for (i = 0; i < attr.gid_tbl_len; ++i) { kfree(p->gid_attr[i]->name); kfree(p->gid_attr[i]); } kfree(p->gid_attr); err_remove_pma: sysfs_remove_group(&p->kobj, &pma_group); err_put: kobject_put(&device->ports_parent); err: kfree(p); return ret; } static ssize_t show_node_type(struct class_device *cdev, char *buf) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); switch (dev->node_type) { case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); } } static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); struct ib_device_attr attr; ssize_t ret; ret = ib_query_device(dev, &attr); if (ret) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]), be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]), be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]), be16_to_cpu(((u16 *) &attr.sys_image_guid)[3])); } static ssize_t show_node_guid(struct class_device *cdev, char *buf) { struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); struct ib_device_attr attr; ssize_t ret; ret = ib_query_device(dev, &attr); if (ret) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", be16_to_cpu(((u16 *) &attr.node_guid)[0]), be16_to_cpu(((u16 *) &attr.node_guid)[1]), be16_to_cpu(((u16 *) &attr.node_guid)[2]), be16_to_cpu(((u16 *) &attr.node_guid)[3])); } static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); static CLASS_DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL); static CLASS_DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL); static struct class_device_attribute *ib_class_attributes[] = { &class_device_attr_node_type, &class_device_attr_sys_image_guid, &class_device_attr_node_guid }; static struct class ib_class = { .name = "infiniband", .release = ib_device_release, .hotplug = ib_device_hotplug, }; int ib_device_register_sysfs(struct ib_device *device) { struct class_device *class_dev = &device->class_dev; int ret; int i; class_dev->class = &ib_class; class_dev->class_data = device; strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE); INIT_LIST_HEAD(&device->port_list); ret = class_device_register(class_dev); if (ret) goto err; for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) { ret = class_device_create_file(class_dev, ib_class_attributes[i]); if (ret) goto err_unregister; } device->ports_parent.parent = kobject_get(&class_dev->kobj); if (!device->ports_parent.parent) { ret = -EBUSY; goto err_unregister; } ret = kobject_set_name(&device->ports_parent, "ports"); if (ret) goto err_put; ret = kobject_register(&device->ports_parent); if (ret) goto err_put; if (device->node_type == IB_NODE_SWITCH) { ret = add_port(device, 0); if (ret) goto err_put; } else { int i; for (i = 1; i <= device->phys_port_cnt; ++i) { ret = add_port(device, i); if (ret) goto err_put; } } return 0; err_put: { struct kobject *p, *t; struct ib_port *port; list_for_each_entry_safe(p, t, &device->port_list, entry) { list_del(&p->entry); port = container_of(p, struct ib_port, kobj); sysfs_remove_group(p, &pma_group); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); kobject_unregister(p); } } kobject_put(&class_dev->kobj); err_unregister: class_device_unregister(class_dev); err: return ret; } void ib_device_unregister_sysfs(struct ib_device *device) { struct kobject *p, *t; struct ib_port *port; list_for_each_entry_safe(p, t, &device->port_list, entry) { list_del(&p->entry); port = container_of(p, struct ib_port, kobj); sysfs_remove_group(p, &pma_group); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); kobject_unregister(p); } kobject_unregister(&device->ports_parent); class_device_unregister(&device->class_dev); } int ib_sysfs_setup(void) { return class_register(&ib_class); } void ib_sysfs_cleanup(void) { class_unregister(&ib_class); } --- NEW FILE: agent.c --- /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: agent.c,v 1.1.2.1 2005/11/10 03:54:33 enferex Exp $ */ #include <linux/dma-mapping.h> #include <asm/bug.h> #include <ib_smi.h> #include "smi.h" #include "agent_priv.h" #include "mad_priv.h" #include "agent.h" spinlock_t ib_agent_port_list_lock; static LIST_HEAD(ib_agent_port_list); /* * Caller must hold ib_agent_port_list_lock */ static inline struct ib_agent_port_private * __ib_get_agent_port(struct ib_device *device, int port_num, struct ib_mad_agent *mad_agent) { struct ib_agent_port_private *entry; BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */ if (device) { list_for_each_entry(entry, &ib_agent_port_list, port_list) { if (entry->smp_agent->device == device && entry->port_num == port_num) return entry; } } else { list_for_each_entry(entry, &ib_agent_port_list, port_list) { if ((entry->smp_agent == mad_agent) || (entry->perf_mgmt_agent == mad_agent)) return entry; } } return NULL; } static inline struct ib_agent_port_private * ib_get_agent_port(struct ib_device *device, int port_num, struct ib_mad_agent *mad_agent) { struct ib_agent_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); entry = __ib_get_agent_port(device, port_num, mad_agent); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return entry; } int smi_check_local_dr_smp(struct ib_smp *smp, struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) return 1; port_priv = ib_get_agent_port(device, port_num, NULL); if (!port_priv) { printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " "not open\n", device->name, port_num); return 1; } return smi_check_local_smp(port_priv->smp_agent, smp); } static int agent_mad_send(struct ib_mad_agent *mad_agent, struct ib_agent_port_private *port_priv, struct ib_mad_private *mad_priv, struct ib_grh *grh, struct ib_wc *wc) { struct ib_agent_send_wr *agent_send_wr; struct ib_sge gather_list; struct ib_send_wr send_wr; struct ib_send_wr *bad_send_wr; struct ib_ah_attr ah_attr; unsigned long flags; int ret = 1; agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL); if (!agent_send_wr) goto out; agent_send_wr->mad = mad_priv; gather_list.addr = dma_map_single(mad_agent->device->dma_device, &mad_priv->mad, sizeof(mad_priv->mad), DMA_TO_DEVICE); gather_list.length = sizeof(mad_priv->mad); gather_list.lkey = (*port_priv->mr).lkey; send_wr.next = NULL; send_wr.opcode = IB_WR_SEND; send_wr.sg_list = &gather_list; send_wr.num_sge = 1; send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */ send_wr.wr.ud.timeout_ms = 0; send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; ah_attr.dlid = wc->slid; ah_attr.port_num = mad_agent->port_num; ah_attr.src_path_bits = wc->dlid_path_bits; ah_attr.sl = wc->sl; ah_attr.static_rate = 0; ah_attr.ah_flags = 0; /* No GRH */ if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { if (wc->wc_flags & IB_WC_GRH) { ah_attr.ah_flags = IB_AH_GRH; /* Should sgid be looked up ? */ ah_attr.grh.sgid_index = 0; ah_attr.grh.hop_limit = grh->hop_limit; ah_attr.grh.flow_label = be32_to_cpup( &grh->version_tclass_flow) & 0xfffff; ah_attr.grh.traffic_class = (be32_to_cpup( &grh->version_tclass_flow) >> 20) & 0xff; memcpy(ah_attr.grh.dgid.raw, grh->sgid.raw, sizeof(ah_attr.grh.dgid)); } } agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr); if (IS_ERR(agent_send_wr->ah)) { printk(KERN_ERR SPFX "No memory for address handle\n"); kfree(agent_send_wr); goto out; } send_wr.wr.ud.ah = agent_send_wr->ah; if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { send_wr.wr.ud.pkey_index = wc->pkey_index; send_wr.wr.ud.remote_qkey = IB_QP1_QKEY; } else { /* for SMPs */ send_wr.wr.ud.pkey_index = 0; send_wr.wr.ud.remote_qkey = 0; } send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr; send_wr.wr_id = (unsigned long)agent_send_wr; pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr); /* Send */ spin_lock_irqsave(&port_priv->send_list_lock, flags); if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) { spin_unlock_irqrestore(&port_priv->send_list_lock, flags); dma_unmap_single(mad_agent->device->dma_device, pci_unmap_addr(agent_send_wr, mapping), sizeof(mad_priv->mad), DMA_TO_DEVICE); ib_destroy_ah(agent_send_wr->ah); kfree(agent_send_wr); } else { list_add_tail(&agent_send_wr->send_list, &port_priv->send_posted_list); spin_unlock_irqrestore(&port_priv->send_list_lock, flags); ret = 0; } out: return ret; } int agent_send(struct ib_mad_private *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; struct ib_mad_agent *mad_agent; port_priv = ib_get_agent_port(device, port_num, NULL); if (!port_priv) { printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n", device->name, port_num); return 1; } /* Get mad agent based on mgmt_class in MAD */ switch (mad->mad.mad.mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: mad_agent = port_priv->smp_agent; break; case IB_MGMT_CLASS_PERF_MGMT: mad_agent = port_priv->perf_mgmt_agent; break; default: return 1; } return agent_mad_send(mad_agent, port_priv, mad, grh, wc); } static void agent_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_agent_port_private *port_priv; struct ib_agent_send_wr *agent_send_wr; unsigned long flags; /* Find matching MAD agent */ port_priv = ib_get_agent_port(NULL, 0, mad_agent); if (!port_priv) { printk(KERN_ERR SPFX "agent_send_handler: no matching MAD " "agent %p\n", mad_agent); return; } agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id; spin_lock_irqsave(&port_priv->send_list_lock, flags); /* Remove completed send from posted send MAD list */ list_del(&agent_send_wr->send_list); spin_unlock_irqrestore(&port_priv->send_list_lock, flags); dma_unmap_single(mad_agent->device->dma_device, pci_unmap_addr(agent_send_wr, mapping), sizeof(agent_send_wr->mad->mad), DMA_TO_DEVICE); ib_destroy_ah(agent_send_wr->ah); /* Release allocated memory */ kmem_cache_free(ib_mad_cache, agent_send_wr->mad); kfree(agent_send_wr); } int ib_agent_port_open(struct ib_device *device, int port_num) { int ret; struct ib_agent_port_private *port_priv; unsigned long flags; /* First, check if port already open for SMI */ port_priv = ib_get_agent_port(device, port_num, NULL); if (port_priv) { printk(KERN_DEBUG SPFX "%s port %d already open\n", device->name, port_num); return 0; } /* Create new device info */ port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); ret = -ENOMEM; goto error1; } memset(port_priv, 0, sizeof *port_priv); port_priv->port_num = port_num; spin_lock_init(&port_priv->send_list_lock); INIT_LIST_HEAD(&port_priv->send_posted_list); /* Obtain send only MAD agent for SM class (SMI QP) */ port_priv->smp_agent = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, &agent_send_handler, NULL, NULL); if (IS_ERR(port_priv->smp_agent)) { ret = PTR_ERR(port_priv->smp_agent); goto error2; } /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */ port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, &agent_send_handler, NULL, NULL); if (IS_ERR(port_priv->perf_mgmt_agent)) { ret = PTR_ERR(port_priv->perf_mgmt_agent); goto error3; } port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(port_priv->mr)) { printk(KERN_ERR SPFX "Couldn't get DMA MR\n"); ret = PTR_ERR(port_priv->mr); goto error4; } spin_lock_irqsave(&ib_agent_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_agent_port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return 0; error4: ib_unregister_mad_agent(port_priv->perf_mgmt_agent); error3: ib_unregister_mad_agent(port_priv->smp_agent); error2: kfree(port_priv); error1: return ret; } int ib_agent_port_close(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); port_priv = __ib_get_agent_port(device, port_num, NULL); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); printk(KERN_ERR SPFX "Port %d not found\n", port_num); return -ENODEV; } list_del(&port_priv->port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); ib_dereg_mr(port_priv->mr); ib_unregister_mad_agent(port_priv->perf_mgmt_agent); ib_unregister_mad_agent(port_priv->smp_agent); kfree(port_priv); return 0; } --- NEW FILE: Makefile --- EXTRA_CFLAGS += -Idrivers/infiniband/include obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o ib_umad.o ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ device.o fmr_pool.o cache.o ib_mad-y := mad.o smi.o agent.o ib_sa-y := sa_query.o ib_umad-y := user_mad.o --- NEW FILE: agent.h --- /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: agent.h,v 1.1.2.1 2005/11/10 03:54:33 enferex Exp $ */ #ifndef __AGENT_H_ #define __AGENT_H_ extern spinlock_t ib_agent_port_list_lock; extern int ib_agent_port_open(struct ib_device *device, int port_num); extern int ib_agent_port_close(struct ib_device *device, int port_num); extern int agent_send(struct ib_mad_private *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num); #endif /* __AGENT_H_ */ --- NEW FILE: sa_query.c --- /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: sa_query.c,v 1.1.2.1 2005/11/10 03:54:33 enferex Exp $ */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/random.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/kref.h> #include <linux/idr.h> #include <ib_pack.h> #include <ib_sa.h> MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand subnet administration query support"); MODULE_LICENSE("Dual BSD/GPL"); /* * These two structures must be packed because they have 64-bit fields * that are only 32-bit aligned. 64-bit architectures will lay them * out wrong otherwise. (And unfortunately they are sent on the wire * so we can't change the layout) */ struct ib_sa_hdr { u64 sm_key; u16 attr_offset; u16 reserved; ib_sa_comp_mask comp_mask; } __attribute__ ((packed)); struct ib_sa_mad { struct ib_mad_hdr mad_hdr; struct ib_rmpp_hdr rmpp_hdr; struct ib_sa_hdr sa_hdr; u8 data[200]; } __attribute__ ((packed)); struct ib_sa_sm_ah { struct ib_ah *ah; struct kref ref; }; struct ib_sa_port { struct ib_mad_agent *agent; struct ib_mr *mr; struct ib_sa_sm_ah *sm_ah; struct work_struct update_task; spinlock_t ah_lock; u8 port_num; }; struct ib_sa_device { int start_port, end_port; struct ib_event_handler event_handler; struct ib_sa_port port[0]; }; struct ib_sa_query { void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); void (*release)(struct ib_sa_query *); struct ib_sa_port *port; struct ib_sa_mad *mad; struct ib_sa_sm_ah *sm_ah; DECLARE_PCI_UNMAP_ADDR(mapping) int id; }; struct ib_sa_path_query { void (*callback)(int, struct ib_sa_path_rec *, void *); void *context; struct ib_sa_query sa_query; }; struct ib_sa_mcmember_query { void (*callback)(int, struct ib_sa_mcmember_rec *, void *); void *context; struct ib_sa_query sa_query; }; static void ib_sa_add_one(struct ib_device *device); static void ib_sa_remove_one(struct ib_device *device); static struct ib_client sa_client = { .name = "sa", .add = ib_sa_add_one, .remove = ib_sa_remove_one }; static spinlock_t idr_lock; static DEFINE_IDR(query_idr); static spinlock_t tid_lock; static u32 tid; enum { IB_SA_ATTR_CLASS_PORTINFO = 0x01, IB_SA_ATTR_NOTICE = 0x02, IB_SA_ATTR_INFORM_INFO = 0x03, IB_SA_ATTR_NODE_REC = 0x11, IB_SA_ATTR_PORT_INFO_REC = 0x12, IB_SA_ATTR_SL2VL_REC = 0x13, IB_SA_ATTR_SWITCH_REC = 0x14, IB_SA_ATTR_LINEAR_FDB_REC = 0x15, IB_SA_ATTR_RANDOM_FDB_REC = 0x16, IB_SA_ATTR_MCAST_FDB_REC = 0x17, IB_SA_ATTR_SM_INFO_REC = 0x18, IB_SA_ATTR_LINK_REC = 0x20, IB_SA_ATTR_GUID_INFO_REC = 0x30, IB_SA_ATTR_SERVICE_REC = 0x31, IB_SA_ATTR_PARTITION_REC = 0x33, IB_SA_ATTR_RANGE_REC = 0x34, IB_SA_ATTR_PATH_REC = 0x35, IB_SA_ATTR_VL_ARB_REC = 0x36, IB_SA_ATTR_MC_GROUP_REC = 0x37, IB_SA_ATTR_MC_MEMBER_REC = 0x38, IB_SA_ATTR_TRACE_REC = 0x39, IB_SA_ATTR_MULTI_PATH_REC = 0x3a, IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b }; #define PATH_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ .field_name = "sa_path_rec:" #field static const struct ib_field path_rec_table[] = { { RESERVED, .offset_words = 0, .offset_bits = 0, .size_bits = 32 }, { RESERVED, .offset_words = 1, .offset_bits = 0, .size_bits = 32 }, { PATH_REC_FIELD(dgid), .offset_words = 2, .offset_bits = 0, .size_bits = 128 }, { PATH_REC_FIELD(sgid), .offset_words = 6, .offset_bits = 0, .size_bits = 128 }, { PATH_REC_FIELD(dlid), .offset_words = 10, .offset_bits = 0, .size_bits = 16 }, { PATH_REC_FIELD(slid), .offset_words = 10, .offset_bits = 16, .size_bits = 16 }, { PATH_REC_FIELD(raw_traffic), .offset_words = 11, .offset_bits = 0, .size_bits = 1 }, { RESERVED, .offset_words = 11, .offset_bits = 1, .size_bits = 3 }, { PATH_REC_FIELD(flow_label), .offset_words = 11, .offset_bits = 4, .size_bits = 20 }, { PATH_REC_FIELD(hop_limit), .offset_words = 11, .offset_bits = 24, .size_bits = 8 }, { PATH_REC_FIELD(traffic_class), .offset_words = 12, .offset_bits = 0, .size_bits = 8 }, { PATH_REC_FIELD(reversible), .offset_words = 12, .offset_bits = 8, .size_bits = 1 }, { PATH_REC_FIELD(numb_path), .offset_words = 12, .offset_bits = 9, .size_bits = 7 }, { PATH_REC_FIELD(pkey), .offset_words = 12, .offset_bits = 16, .size_bits = 16 }, { RESERVED, .offset_words = 13, .offset_bits = 0, .size_bits = 12 }, { PATH_REC_FIELD(sl), .offset_words = 13, .offset_bits = 12, .size_bits = 4 }, { PATH_REC_FIELD(mtu_selector), .offset_words = 13, .offset_bits = 16, .size_bits = 2 }, { PATH_REC_FIELD(mtu), .offset_words = 13, .offset_bits = 18, .size_bits = 6 }, { PATH_REC_FIELD(rate_selector), .offset_words = 13, .offset_bits = 24, .size_bits = 2 }, { PATH_REC_FIELD(rate), .offset_words = 13, .offset_bits = 26, .size_bits = 6 }, { PATH_REC_FIELD(packet_life_time_selector), .offset_words = 14, .offset_bits = 0, .size_bits = 2 }, { PATH_REC_FIELD(packet_life_time), .offset_words = 14, .offset_bits = 2, .size_bits = 6 }, { PATH_REC_FIELD(preference), .offset_words = 14, .offset_bits = 8, .size_bits = 8 }, { RESERVED, .offset_words = 14, .offset_bits = 16, .size_bits = 48 }, }; #define MCMEMBER_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ .field_name = "sa_mcmember_rec:" #field static const struct ib_field mcmember_rec_table[] = { { MCMEMBER_REC_FIELD(mgid), .offset_words = 0, .offset_bits = 0, .size_bits = 128 }, { MCMEMBER_REC_FIELD(port_gid), .offset_words = 4, .offset_bits = 0, .size_bits = 128 }, { MCMEMBER_REC_FIELD(qkey), .offset_words = 8, .offset_bits = 0, .size_bits = 32 }, { MCMEMBER_REC_FIELD(mlid), .offset_words = 9, .offset_bits = 0, .size_bits = 16 }, { MCMEMBER_REC_FIELD(mtu_selector), .offset_words = 9, .offset_bits = 16, .size_bits = 2 }, { MCMEMBER_REC_FIELD(mtu), .offset_words = 9, .offset_bits = 18, .size_bits = 6 }, { MCMEMBER_REC_FIELD(traffic_class), .offset_words = 9, .offset_bits = 24, .size_bits = 8 }, { MCMEMBER_REC_FIELD(pkey), .offset_words = 10, .offset_bits = 0, .size_bits = 16 }, { MCMEMBER_REC_FIELD(rate_selector), .offset_words = 10, .offset_bits = 16, .size_bits = 2 }, { MCMEMBER_REC_FIELD(rate), .offset_words = 10, .offset_bits = 18, .size_bits = 6 }, { MCMEMBER_REC_FIELD(packet_life_time_selector), .offset_words = 10, .offset_bits = 24, .size_bits = 2 }, { MCMEMBER_REC_FIELD(packet_life_time), .offset_words = 10, .offset_bits = 26, .size_bits = 6 }, { MCMEMBER_REC_FIELD(sl), .offset_words = 11, .offset_bits = 0, .size_bits = 4 }, { MCMEMBER_REC_FIELD(flow_label), .offset_words = 11, .offset_bits = 4, .size_bits = 20 }, { MCMEMBER_REC_FIELD(hop_limit), .offset_words = 11, .offset_bits = 24, .size_bits = 8 }, { MCMEMBER_REC_FIELD(scope), .offset_words = 12, .offset_bits = 0, .size_bits = 4 }, { MCMEMBER_REC_FIELD(join_state), .offset_words = 12, .offset_bits = 4, .size_bits = 4 }, { MCMEMBER_REC_FIELD(proxy_join), .offset_words = 12, .offset_bits = 8, .size_bits = 1 }, { RESERVED, .offset_words = 12, .offset_bits = 9, .size_bits = 23 }, }; static void free_sm_ah(struct kref *kref) { struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); ib_destroy_ah(sm_ah->ah); kfree(sm_ah); } static void update_sm_ah(void *port_ptr) { struct ib_sa_port *port = port_ptr; struct ib_sa_sm_ah *new_ah, *old_ah; struct ib_port_attr port_attr; struct ib_ah_attr ah_attr; if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { printk(KERN_WARNING "... [truncated message content] |
|
From: enferex <en...@us...> - 2005-11-10 03:53:56
|
Update of /cvsroot/netnice/Linux/drivers/infiniband/core In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv18121/core Log Message: Directory /cvsroot/netnice/Linux/drivers/infiniband/core added to the repository --> Using per-directory sticky tag `netnice2612' |
Update of /cvsroot/netnice/Linux/sound/core/seq In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv11464 Added Files: Tag: netnice2612 Makefile seq.c seq_clientmgr.c seq_clientmgr.h seq_compat.c seq_device.c seq_dummy.c seq_fifo.c seq_fifo.h seq_info.c seq_info.h seq_instr.c seq_lock.c seq_lock.h seq_memory.c seq_memory.h seq_midi.c seq_midi_emul.c seq_midi_event.c seq_ports.c seq_ports.h seq_prioq.c seq_prioq.h seq_queue.c seq_queue.h seq_system.c seq_system.h seq_timer.c seq_timer.h seq_virmidi.c Log Message: Added many missing files --- NEW FILE: seq_queue.h --- /* * ALSA sequencer Queue handling * Copyright (c) 1998-1999 by Frank van de Pol <fv...@co...> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __SND_SEQ_QUEUE_H #define __SND_SEQ_QUEUE_H #include "seq_memory.h" #include "seq_prioq.h" #include "seq_timer.h" #include "seq_lock.h" #include <linux/interrupt.h> #include <linux/list.h> #include <linux/bitops.h> #define SEQ_QUEUE_NO_OWNER (-1) struct _snd_seq_queue { int queue; /* queue number */ char name[64]; /* name of this queue */ prioq_t *tickq; /* midi tick event queue */ prioq_t *timeq; /* real-time event queue */ seq_timer_t *timer; /* time keeper for this queue */ int owner; /* client that 'owns' the timer */ unsigned int locked:1, /* timer is only accesibble by owner if set */ klocked:1, /* kernel lock (after START) */ check_again:1, check_blocked:1; unsigned int flags; /* status flags */ unsigned int info_flags; /* info for sync */ spinlock_t owner_lock; spinlock_t check_lock; /* clients which uses this queue (bitmap) */ DECLARE_BITMAP(clients_bitmap, SNDRV_SEQ_MAX_CLIENTS); unsigned int clients; /* users of this queue */ struct semaphore timer_mutex; snd_use_lock_t use_lock; }; /* get the number of current queues */ int snd_seq_queue_get_cur_queues(void); /* init queues structure */ int snd_seq_queues_init(void); /* delete queues */ void snd_seq_queues_delete(void); /* create new queue (constructor) */ int snd_seq_queue_alloc(int client, int locked, unsigned int flags); /* delete queue (destructor) */ int snd_seq_queue_delete(int client, int queueid); /* notification that client has left the system */ void snd_seq_queue_client_termination(int client); /* final stage */ void snd_seq_queue_client_leave(int client); /* enqueue a event received from one the clients */ int snd_seq_enqueue_event(snd_seq_event_cell_t *cell, int atomic, int hop); /* Remove events */ void snd_seq_queue_client_leave_cells(int client); void snd_seq_queue_remove_cells(int client, snd_seq_remove_events_t *info); /* return pointer to queue structure for specified id */ queue_t *queueptr(int queueid); /* unlock */ #define queuefree(q) snd_use_lock_free(&(q)->use_lock) /* return the (first) queue matching with the specified name */ queue_t *snd_seq_queue_find_name(char *name); /* check single queue and dispatch events */ void snd_seq_check_queue(queue_t *q, int atomic, int hop); /* access to queue's parameters */ int snd_seq_queue_check_access(int queueid, int client); int snd_seq_queue_timer_set_tempo(int queueid, int client, snd_seq_queue_tempo_t *info); int snd_seq_queue_set_owner(int queueid, int client, int locked); int snd_seq_queue_set_locked(int queueid, int client, int locked); int snd_seq_queue_timer_open(int queueid); int snd_seq_queue_timer_close(int queueid); int snd_seq_queue_use(int queueid, int client, int use); int snd_seq_queue_is_used(int queueid, int client); int snd_seq_control_queue(snd_seq_event_t *ev, int atomic, int hop); void snd_seq_queue_process_event(queue_t *q, snd_seq_event_t *ev, int atomic, int hop); /* * 64bit division - for sync stuff.. */ #if defined(i386) || defined(i486) #define udiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("divl %4" \ : "=a" ((u32)(q)), \ "=d" ((u32)(r)) \ : "0" ((u32)(n0)), \ "1" ((u32)(n1)), \ "rm" ((u32)(d))) #define u64_div(x,y,q) do {u32 __tmp; udiv_qrnnd(q, __tmp, (x)>>32, x, y);} while (0) #define u64_mod(x,y,r) do {u32 __tmp; udiv_qrnnd(__tmp, q, (x)>>32, x, y);} while (0) #define u64_divmod(x,y,q,r) udiv_qrnnd(q, r, (x)>>32, x, y) #else #define u64_div(x,y,q) ((q) = (u32)((u64)(x) / (u64)(y))) #define u64_mod(x,y,r) ((r) = (u32)((u64)(x) % (u64)(y))) #define u64_divmod(x,y,q,r) (u64_div(x,y,q), u64_mod(x,y,r)) #endif #endif --- NEW FILE: seq_lock.c --- /* * Do sleep inside a spin-lock * Copyright (c) 1999 by Takashi Iwai <ti...@su...> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/driver.h> #include <sound/core.h> #include "seq_lock.h" #if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG) /* wait until all locks are released */ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) { int max_count = 5 * HZ; if (atomic_read(lockp) < 0) { printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); return; } while (atomic_read(lockp) > 0) { if (max_count == 0) { snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line); break; } set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1); max_count--; } } #endif --- NEW FILE: seq.c --- /* * ALSA sequencer main module * Copyright (c) 1998-1999 by Frank van de Pol <fv...@co...> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/driver.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/seq_kernel.h> #include "seq_clientmgr.h" #include "seq_memory.h" #include "seq_queue.h" #include "seq_lock.h" #include "seq_timer.h" #include "seq_system.h" #include "seq_info.h" #include <sound/seq_device.h> #if defined(CONFIG_SND_SEQ_DUMMY_MODULE) int seq_client_load[64] = {[0] = SNDRV_SEQ_CLIENT_DUMMY, [1 ... 63] = -1}; #else int seq_client_load[64] = {[0 ... 63] = -1}; #endif int seq_default_timer_class = SNDRV_TIMER_CLASS_GLOBAL; int seq_default_timer_sclass = SNDRV_TIMER_SCLASS_NONE; int seq_default_timer_card = -1; int seq_default_timer_device = SNDRV_TIMER_GLOBAL_SYSTEM; int seq_default_timer_subdevice = 0; int seq_default_timer_resolution = 0; /* Hz */ MODULE_AUTHOR("Frank van de Pol <fv...@co...>, Jaroslav Kysela <pe...@su...>"); MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer."); MODULE_LICENSE("GPL"); module_param_array(seq_client_load, int, NULL, 0444); MODULE_PARM_DESC(seq_client_load, "The numbers of global (system) clients to load through kmod."); module_param(seq_default_timer_class, int, 0644); MODULE_PARM_DESC(seq_default_timer_class, "The default timer class."); module_param(seq_default_timer_sclass, int, 0644); MODULE_PARM_DESC(seq_default_timer_sclass, "The default timer slave class."); module_param(seq_default_timer_card, int, 0644); MODULE_PARM_DESC(seq_default_timer_card, "The default timer card number."); module_param(seq_default_timer_device, int, 0644); MODULE_PARM_DESC(seq_default_timer_device, "The default timer device number."); module_param(seq_default_timer_subdevice, int, 0644); MODULE_PARM_DESC(seq_default_timer_subdevice, "The default timer subdevice number."); module_param(seq_default_timer_resolution, int, 0644); MODULE_PARM_DESC(seq_default_timer_resolution, "The default timer resolution in Hz."); /* * INIT PART */ static int __init alsa_seq_init(void) { int err; snd_seq_autoload_lock(); if ((err = client_init_data()) < 0) goto error; /* init memory, room for selected events */ if ((err = snd_sequencer_memory_init()) < 0) goto error; /* init event queues */ if ((err = snd_seq_queues_init()) < 0) goto error; /* register sequencer device */ if ((err = snd_sequencer_device_init()) < 0) goto error; /* register proc interface */ if ((err = snd_seq_info_init()) < 0) goto error; /* register our internal client */ if ((err = snd_seq_system_client_init()) < 0) goto error; error: snd_seq_autoload_unlock(); return err; } static void __exit alsa_seq_exit(void) { /* unregister our internal client */ snd_seq_system_client_done(); /* unregister proc interface */ snd_seq_info_done(); /* delete timing queues */ snd_seq_queues_delete(); /* unregister sequencer device */ snd_sequencer_device_done(); /* release event memory */ snd_sequencer_memory_done(); } module_init(alsa_seq_init) module_exit(alsa_seq_exit) /* seq_clientmgr.c */ EXPORT_SYMBOL(snd_seq_create_kernel_client); EXPORT_SYMBOL(snd_seq_delete_kernel_client); EXPORT_SYMBOL(snd_seq_kernel_client_enqueue); EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking); EXPORT_SYMBOL(snd_seq_kernel_client_dispatch); EXPORT_SYMBOL(snd_seq_kernel_client_ctl); EXPORT_SYMBOL(snd_seq_kernel_client_write_poll); EXPORT_SYMBOL(snd_seq_set_queue_tempo); /* seq_memory.c */ EXPORT_SYMBOL(snd_seq_expand_var_event); EXPORT_SYMBOL(snd_seq_dump_var_event); /* seq_ports.c */ EXPORT_SYMBOL(snd_seq_event_port_attach); EXPORT_SYMBOL(snd_seq_event_port_detach); /* seq_lock.c */ #if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG) /*EXPORT_SYMBOL(snd_seq_sleep_in_lock);*/ /*EXPORT_SYMBOL(snd_seq_sleep_timeout_in_lock);*/ EXPORT_SYMBOL(snd_use_lock_sync_helper); #endif --- NEW FILE: seq_lock.h --- #ifndef __SND_SEQ_LOCK_H #define __SND_SEQ_LOCK_H #include <linux/sched.h> #if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG) typedef atomic_t snd_use_lock_t; /* initialize lock */ #define snd_use_lock_init(lockp) atomic_set(lockp, 0) /* increment lock */ #define snd_use_lock_use(lockp) atomic_inc(lockp) /* release lock */ #define snd_use_lock_free(lockp) atomic_dec(lockp) /* wait until all locks are released */ void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line); #define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__) #else /* SMP || CONFIG_SND_DEBUG */ typedef spinlock_t snd_use_lock_t; /* dummy */ #define snd_use_lock_init(lockp) /**/ #define snd_use_lock_use(lockp) /**/ #define snd_use_lock_free(lockp) /**/ #define snd_use_lock_sync(lockp) /**/ #endif /* SMP || CONFIG_SND_DEBUG */ #endif /* __SND_SEQ_LOCK_H */ --- NEW FILE: seq_queue.c --- /* * ALSA sequencer Timing queue handling * Copyright (c) 1998-1999 by Frank van de Pol <fv...@co...> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * MAJOR CHANGES * Nov. 13, 1999 Takashi Iwai <iw...@ww...> * - Queues are allocated dynamically via ioctl. * - When owner client is deleted, all owned queues are deleted, too. * - Owner of unlocked queue is kept unmodified even if it is * manipulated by other clients. * - Owner field in SET_QUEUE_OWNER ioctl must be identical with the * caller client. i.e. Changing owner to a third client is not * allowed. * * Aug. 30, 2000 Takashi Iwai * - Queues are managed in static array again, but with better way. * The API itself is identical. * - The queue is locked when queue_t pinter is returned via * queueptr(). This pointer *MUST* be released afterward by * queuefree(ptr). * - Addition of experimental sync support. */ #include <sound/driver.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include "seq_memory.h" #include "seq_queue.h" #include "seq_clientmgr.h" #include "seq_fifo.h" #include "seq_timer.h" #include "seq_info.h" /* list of allocated queues */ static queue_t *queue_list[SNDRV_SEQ_MAX_QUEUES]; static DEFINE_SPINLOCK(queue_list_lock); /* number of queues allocated */ static int num_queues; int snd_seq_queue_get_cur_queues(void) { return num_queues; } /*----------------------------------------------------------------*/ /* assign queue id and insert to list */ static int queue_list_add(queue_t *q) { int i; unsigned long flags; spin_lock_irqsave(&queue_list_lock, flags); for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if (! queue_list[i]) { queue_list[i] = q; q->queue = i; num_queues++; spin_unlock_irqrestore(&queue_list_lock, flags); return i; } } spin_unlock_irqrestore(&queue_list_lock, flags); return -1; } static queue_t *queue_list_remove(int id, int client) { queue_t *q; unsigned long flags; spin_lock_irqsave(&queue_list_lock, flags); q = queue_list[id]; if (q) { spin_lock(&q->owner_lock); if (q->owner == client) { /* found */ q->klocked = 1; spin_unlock(&q->owner_lock); queue_list[id] = NULL; num_queues--; spin_unlock_irqrestore(&queue_list_lock, flags); return q; } spin_unlock(&q->owner_lock); } spin_unlock_irqrestore(&queue_list_lock, flags); return NULL; } /*----------------------------------------------------------------*/ /* create new queue (constructor) */ static queue_t *queue_new(int owner, int locked) { queue_t *q; q = kcalloc(1, sizeof(*q), GFP_KERNEL); if (q == NULL) { snd_printd("malloc failed for snd_seq_queue_new()\n"); return NULL; } spin_lock_init(&q->owner_lock); spin_lock_init(&q->check_lock); init_MUTEX(&q->timer_mutex); snd_use_lock_init(&q->use_lock); q->queue = -1; q->tickq = snd_seq_prioq_new(); q->timeq = snd_seq_prioq_new(); q->timer = snd_seq_timer_new(); if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) { snd_seq_prioq_delete(&q->tickq); snd_seq_prioq_delete(&q->timeq); snd_seq_timer_delete(&q->timer); kfree(q); return NULL; } q->owner = owner; q->locked = locked; q->klocked = 0; return q; } /* delete queue (destructor) */ static void queue_delete(queue_t *q) { /* stop and release the timer */ snd_seq_timer_stop(q->timer); snd_seq_timer_close(q); /* wait until access free */ snd_use_lock_sync(&q->use_lock); /* release resources... */ snd_seq_prioq_delete(&q->tickq); snd_seq_prioq_delete(&q->timeq); snd_seq_timer_delete(&q->timer); kfree(q); } /*----------------------------------------------------------------*/ /* setup queues */ int __init snd_seq_queues_init(void) { /* memset(queue_list, 0, sizeof(queue_list)); num_queues = 0; */ return 0; } /* delete all existing queues */ void __exit snd_seq_queues_delete(void) { int i; /* clear list */ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if (queue_list[i]) queue_delete(queue_list[i]); } } /* allocate a new queue - * return queue index value or negative value for error */ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) { queue_t *q; q = queue_new(client, locked); if (q == NULL) return -ENOMEM; q->info_flags = info_flags; if (queue_list_add(q) < 0) { queue_delete(q); return -ENOMEM; } snd_seq_queue_use(q->queue, client, 1); /* use this queue */ return q->queue; } /* delete a queue - queue must be owned by the client */ int snd_seq_queue_delete(int client, int queueid) { queue_t *q; if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES) return -EINVAL; q = queue_list_remove(queueid, client); if (q == NULL) return -EINVAL; queue_delete(q); return 0; } /* return pointer to queue structure for specified id */ queue_t *queueptr(int queueid) { queue_t *q; unsigned long flags; if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES) return NULL; spin_lock_irqsave(&queue_list_lock, flags); q = queue_list[queueid]; if (q) snd_use_lock_use(&q->use_lock); spin_unlock_irqrestore(&queue_list_lock, flags); return q; } /* return the (first) queue matching with the specified name */ queue_t *snd_seq_queue_find_name(char *name) { int i; queue_t *q; for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) != NULL) { if (strncmp(q->name, name, sizeof(q->name)) == 0) return q; queuefree(q); } } return NULL; } /* -------------------------------------------------------- */ void snd_seq_check_queue(queue_t *q, int atomic, int hop) { unsigned long flags; snd_seq_event_cell_t *cell; if (q == NULL) return; /* make this function non-reentrant */ spin_lock_irqsave(&q->check_lock, flags); if (q->check_blocked) { q->check_again = 1; spin_unlock_irqrestore(&q->check_lock, flags); return; /* other thread is already checking queues */ } q->check_blocked = 1; spin_unlock_irqrestore(&q->check_lock, flags); __again: /* Process tick queue... */ while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) { if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick, &cell->event.time.tick)) { cell = snd_seq_prioq_cell_out(q->tickq); if (cell) snd_seq_dispatch_event(cell, atomic, hop); } else { /* event remains in the queue */ break; } } /* Process time queue... */ while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) { if (snd_seq_compare_real_time(&q->timer->cur_time, &cell->event.time.time)) { cell = snd_seq_prioq_cell_out(q->timeq); if (cell) snd_seq_dispatch_event(cell, atomic, hop); } else { /* event remains in the queue */ break; } } /* free lock */ spin_lock_irqsave(&q->check_lock, flags); if (q->check_again) { q->check_again = 0; spin_unlock_irqrestore(&q->check_lock, flags); goto __again; } q->check_blocked = 0; spin_unlock_irqrestore(&q->check_lock, flags); } /* enqueue a event to singe queue */ int snd_seq_enqueue_event(snd_seq_event_cell_t *cell, int atomic, int hop) { int dest, err; queue_t *q; snd_assert(cell != NULL, return -EINVAL); dest = cell->event.queue; /* destination queue */ q = queueptr(dest); if (q == NULL) return -EINVAL; /* handle relative time stamps, convert them into absolute */ if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) { switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { case SNDRV_SEQ_TIME_STAMP_TICK: cell->event.time.tick += q->timer->tick.cur_tick; break; case SNDRV_SEQ_TIME_STAMP_REAL: snd_seq_inc_real_time(&cell->event.time.time, &q->timer->cur_time); break; } cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK; cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS; } /* enqueue event in the real-time or midi queue */ switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { case SNDRV_SEQ_TIME_STAMP_TICK: err = snd_seq_prioq_cell_in(q->tickq, cell); break; case SNDRV_SEQ_TIME_STAMP_REAL: default: err = snd_seq_prioq_cell_in(q->timeq, cell); break; } if (err < 0) { queuefree(q); /* unlock */ return err; } /* trigger dispatching */ snd_seq_check_queue(q, atomic, hop); queuefree(q); /* unlock */ return 0; } /*----------------------------------------------------------------*/ static inline int check_access(queue_t *q, int client) { return (q->owner == client) || (!q->locked && !q->klocked); } /* check if the client has permission to modify queue parameters. * if it does, lock the queue */ static int queue_access_lock(queue_t *q, int client) { unsigned long flags; int access_ok; spin_lock_irqsave(&q->owner_lock, flags); access_ok = check_access(q, client); if (access_ok) q->klocked = 1; spin_unlock_irqrestore(&q->owner_lock, flags); return access_ok; } /* unlock the queue */ static inline void queue_access_unlock(queue_t *q) { unsigned long flags; spin_lock_irqsave(&q->owner_lock, flags); q->klocked = 0; spin_unlock_irqrestore(&q->owner_lock, flags); } /* exported - only checking permission */ int snd_seq_queue_check_access(int queueid, int client) { queue_t *q = queueptr(queueid); int access_ok; unsigned long flags; if (! q) return 0; spin_lock_irqsave(&q->owner_lock, flags); access_ok = check_access(q, client); spin_unlock_irqrestore(&q->owner_lock, flags); queuefree(q); return access_ok; } /*----------------------------------------------------------------*/ /* * change queue's owner and permission */ int snd_seq_queue_set_owner(int queueid, int client, int locked) { queue_t *q = queueptr(queueid); if (q == NULL) return -EINVAL; if (! queue_access_lock(q, client)) { queuefree(q); return -EPERM; } q->locked = locked ? 1 : 0; q->owner = client; queue_access_unlock(q); queuefree(q); return 0; } /*----------------------------------------------------------------*/ /* open timer - * q->use mutex should be down before calling this function to avoid * confliction with snd_seq_queue_use() */ int snd_seq_queue_timer_open(int queueid) { int result = 0; queue_t *queue; seq_timer_t *tmr; queue = queueptr(queueid); if (queue == NULL) return -EINVAL; tmr = queue->timer; if ((result = snd_seq_timer_open(queue)) < 0) { snd_seq_timer_defaults(tmr); result = snd_seq_timer_open(queue); } queuefree(queue); return result; } /* close timer - * q->use mutex should be down before calling this function */ int snd_seq_queue_timer_close(int queueid) { queue_t *queue; seq_timer_t *tmr; int result = 0; queue = queueptr(queueid); if (queue == NULL) return -EINVAL; tmr = queue->timer; snd_seq_timer_close(queue); queuefree(queue); return result; } /* change queue tempo and ppq */ int snd_seq_queue_timer_set_tempo(int queueid, int client, snd_seq_queue_tempo_t *info) { queue_t *q = queueptr(queueid); int result; if (q == NULL) return -EINVAL; if (! queue_access_lock(q, client)) { queuefree(q); return -EPERM; } result = snd_seq_timer_set_tempo(q->timer, info->tempo); if (result >= 0) result = snd_seq_timer_set_ppq(q->timer, info->ppq); if (result >= 0 && info->skew_base > 0) result = snd_seq_timer_set_skew(q->timer, info->skew_value, info->skew_base); queue_access_unlock(q); queuefree(q); return result; } /* use or unuse this queue - * if it is the first client, starts the timer. * if it is not longer used by any clients, stop the timer. */ int snd_seq_queue_use(int queueid, int client, int use) { queue_t *queue; queue = queueptr(queueid); if (queue == NULL) return -EINVAL; down(&queue->timer_mutex); if (use) { if (!test_and_set_bit(client, queue->clients_bitmap)) queue->clients++; } else { if (test_and_clear_bit(client, queue->clients_bitmap)) queue->clients--; } if (queue->clients) { if (use && queue->clients == 1) snd_seq_timer_defaults(queue->timer); snd_seq_timer_open(queue); } else { snd_seq_timer_close(queue); } up(&queue->timer_mutex); queuefree(queue); return 0; } /* * check if queue is used by the client * return negative value if the queue is invalid. * return 0 if not used, 1 if used. */ int snd_seq_queue_is_used(int queueid, int client) { queue_t *q; int result; q = queueptr(queueid); if (q == NULL) return -EINVAL; /* invalid queue */ result = test_bit(client, q->clients_bitmap) ? 1 : 0; queuefree(q); return result; } /*----------------------------------------------------------------*/ /* notification that client has left the system - * stop the timer on all queues owned by this client */ void snd_seq_queue_client_termination(int client) { unsigned long flags; int i; queue_t *q; for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) == NULL) continue; spin_lock_irqsave(&q->owner_lock, flags); if (q->owner == client) q->klocked = 1; spin_unlock_irqrestore(&q->owner_lock, flags); if (q->owner == client) { if (q->timer->running) snd_seq_timer_stop(q->timer); snd_seq_timer_reset(q->timer); } queuefree(q); } } /* final stage notification - * remove cells for no longer exist client (for non-owned queue) * or delete this queue (for owned queue) */ void snd_seq_queue_client_leave(int client) { int i; queue_t *q; /* delete own queues from queue list */ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queue_list_remove(i, client)) != NULL) queue_delete(q); } /* remove cells from existing queues - * they are not owned by this client */ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) == NULL) continue; if (test_bit(client, q->clients_bitmap)) { snd_seq_prioq_leave(q->tickq, client, 0); snd_seq_prioq_leave(q->timeq, client, 0); snd_seq_queue_use(q->queue, client, 0); } queuefree(q); } } /*----------------------------------------------------------------*/ /* remove cells from all queues */ void snd_seq_queue_client_leave_cells(int client) { int i; queue_t *q; for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) == NULL) continue; snd_seq_prioq_leave(q->tickq, client, 0); snd_seq_prioq_leave(q->timeq, client, 0); queuefree(q); } } /* remove cells based on flush criteria */ void snd_seq_queue_remove_cells(int client, snd_seq_remove_events_t *info) { int i; queue_t *q; for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) == NULL) continue; if (test_bit(client, q->clients_bitmap) && (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) || q->queue == info->queue)) { snd_seq_prioq_remove_events(q->tickq, client, info); snd_seq_prioq_remove_events(q->timeq, client, info); } queuefree(q); } } /*----------------------------------------------------------------*/ /* * send events to all subscribed ports */ static void queue_broadcast_event(queue_t *q, snd_seq_event_t *ev, int atomic, int hop) { snd_seq_event_t sev; sev = *ev; sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS; sev.time.tick = q->timer->tick.cur_tick; sev.queue = q->queue; sev.data.queue.queue = q->queue; /* broadcast events from Timer port */ sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM; sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER; sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop); } /* * process a received queue-control event. * this function is exported for seq_sync.c. */ void snd_seq_queue_process_event(queue_t *q, snd_seq_event_t *ev, int atomic, int hop) { switch (ev->type) { case SNDRV_SEQ_EVENT_START: snd_seq_prioq_leave(q->tickq, ev->source.client, 1); snd_seq_prioq_leave(q->timeq, ev->source.client, 1); if (! snd_seq_timer_start(q->timer)) queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_CONTINUE: if (! snd_seq_timer_continue(q->timer)) queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_STOP: snd_seq_timer_stop(q->timer); queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_TEMPO: snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_SETPOS_TICK: if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { queue_broadcast_event(q, ev, atomic, hop); } break; case SNDRV_SEQ_EVENT_SETPOS_TIME: if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { queue_broadcast_event(q, ev, atomic, hop); } break; case SNDRV_SEQ_EVENT_QUEUE_SKEW: if (snd_seq_timer_set_skew(q->timer, ev->data.queue.param.skew.value, ev->data.queue.param.skew.base) == 0) { queue_broadcast_event(q, ev, atomic, hop); } break; } } /* * Queue control via timer control port: * this function is exported as a callback of timer port. */ int snd_seq_control_queue(snd_seq_event_t *ev, int atomic, int hop) { queue_t *q; snd_assert(ev != NULL, return -EINVAL); q = queueptr(ev->data.queue.queue); if (q == NULL) return -EINVAL; if (! queue_access_lock(q, ev->source.client)) { queuefree(q); return -EPERM; } snd_seq_queue_process_event(q, ev, atomic, hop); queue_access_unlock(q); queuefree(q); return 0; } /*----------------------------------------------------------------*/ /* exported to seq_info.c */ void snd_seq_info_queues_read(snd_info_entry_t *entry, snd_info_buffer_t * buffer) { int i, bpm; queue_t *q; seq_timer_t *tmr; for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if ((q = queueptr(i)) == NULL) continue; tmr = q->timer; if (tmr->tempo) bpm = 60000000 / tmr->tempo; else bpm = 0; snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); snd_iprintf(buffer, "owned by client : %d\n", q->owner); snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free"); snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped"); snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq); snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo); snd_iprintf(buffer, "current BPM : %d\n", bpm); snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec); snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick); snd_iprintf(buffer, "\n"); queuefree(q); } } --- NEW FILE: seq_midi.c --- /* * Generic MIDI synth driver for ALSA sequencer * Copyright (c) 1998 by Frank van de Pol <fv...@co...> * Jaroslav Kysela <pe...@su...> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* Possible options for midisynth module: - automatic opening of midi ports on first received event or subscription (close will be performed when client leaves) */ #include <sound/driver.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/moduleparam.h> #include <asm/semaphore.h> #include <sound/core.h> #include <sound/rawmidi.h> #include <sound/seq_kernel.h> #include <sound/seq_device.h> #include <sound/seq_midi_event.h> #include <sound/initval.h> MODULE_AUTHOR("Frank van de Pol <fv...@co...>, Jaroslav Kysela <pe...@su...>"); MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer MIDI synth."); MODULE_LICENSE("GPL"); static int output_buffer_size = PAGE_SIZE; module_param(output_buffer_size, int, 0644); MODULE_PARM_DESC(output_buffer_size, "Output buffer size in bytes."); static int input_buffer_size = PAGE_SIZE; module_param(input_buffer_size, int, 0644); MODULE_PARM_DESC(input_buffer_size, "Input buffer size in bytes."); /* data for this midi synth driver */ typedef struct { snd_card_t *card; int device; int subdevice; snd_rawmidi_file_t input_rfile; snd_rawmidi_file_t output_rfile; int seq_client; int seq_port; snd_midi_event_t *parser; } seq_midisynth_t; typedef struct { int seq_client; int num_ports; int ports_per_device[SNDRV_RAWMIDI_DEVICES]; seq_midisynth_t *ports[SNDRV_RAWMIDI_DEVICES]; } seq_midisynth_client_t; static seq_midisynth_client_t *synths[SNDRV_CARDS]; static DECLARE_MUTEX(register_mutex); /* handle rawmidi input event (MIDI v1.0 stream) */ static void snd_midi_input_event(snd_rawmidi_substream_t * substream) { snd_rawmidi_runtime_t *runtime; seq_midisynth_t *msynth; snd_seq_event_t ev; char buf[16], *pbuf; long res, count; if (substream == NULL) return; runtime = substream->runtime; msynth = (seq_midisynth_t *) runtime->private_data; if (msynth == NULL) return; memset(&ev, 0, sizeof(ev)); while (runtime->avail > 0) { res = snd_rawmidi_kernel_read(substream, buf, sizeof(buf)); if (res <= 0) continue; if (msynth->parser == NULL) continue; pbuf = buf; while (res > 0) { count = snd_midi_event_encode(msynth->parser, pbuf, res, &ev); if (count < 0) break; pbuf += count; res -= count; if (ev.type != SNDRV_SEQ_EVENT_NONE) { ev.source.port = msynth->seq_port; ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; snd_seq_kernel_client_dispatch(msynth->seq_client, &ev, 1, 0); /* clear event and reset header */ memset(&ev, 0, sizeof(ev)); } } } } static int dump_midi(snd_rawmidi_substream_t *substream, const char *buf, int count) { snd_rawmidi_runtime_t *runtime; int tmp; snd_assert(substream != NULL || buf != NULL, return -EINVAL); runtime = substream->runtime; if ((tmp = runtime->avail) < count) { snd_printd("warning, output event was lost (count = %i, available = %i)\n", count, tmp); return -ENOMEM; } if (snd_rawmidi_kernel_write(substream, buf, count) < count) return -EINVAL; return 0; } static int event_process_midi(snd_seq_event_t * ev, int direct, void *private_data, int atomic, int hop) { seq_midisynth_t *msynth = (seq_midisynth_t *) private_data; unsigned char msg[10]; /* buffer for constructing midi messages */ snd_rawmidi_substream_t *substream; int res; snd_assert(msynth != NULL, return -EINVAL); substream = msynth->output_rfile.output; if (substream == NULL) return -ENODEV; if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { /* special case, to save space */ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) { /* invalid event */ snd_printd("seq_midi: invalid sysex event flags = 0x%x\n", ev->flags); return 0; } res = snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)dump_midi, substream); snd_midi_event_reset_decode(msynth->parser); if (res < 0) return res; } else { if (msynth->parser == NULL) return -EIO; res = snd_midi_event_decode(msynth->parser, msg, sizeof(msg), ev); if (res < 0) return res; if ((res = dump_midi(substream, msg, res)) < 0) { snd_midi_event_reset_decode(msynth->parser); return res; } } return 0; } static int snd_seq_midisynth_new(seq_midisynth_t *msynth, snd_card_t *card, int device, int subdevice) { if (snd_midi_event_new(MAX_MIDI_EVENT_BUF, &msynth->parser) < 0) return -ENOMEM; msynth->card = card; msynth->device = device; msynth->subdevice = subdevice; return 0; } /* open associated midi device for input */ static int midisynth_subscribe(void *private_data, snd_seq_port_subscribe_t *info) { int err; seq_midisynth_t *msynth = (seq_midisynth_t *)private_data; snd_rawmidi_runtime_t *runtime; snd_rawmidi_params_t params; /* open midi port */ if ((err = snd_rawmidi_kernel_open(msynth->card->number, msynth->device, msynth->subdevice, SNDRV_RAWMIDI_LFLG_INPUT, &msynth->input_rfile)) < 0) { snd_printd("midi input open failed!!!\n"); return err; } runtime = msynth->input_rfile.input->runtime; memset(¶ms, 0, sizeof(params)); params.avail_min = 1; params.buffer_size = input_buffer_size; if ((err = snd_rawmidi_input_params(msynth->input_rfile.input, ¶ms)) < 0) { snd_rawmidi_kernel_release(&msynth->input_rfile); return err; } snd_midi_event_reset_encode(msynth->parser); runtime->event = snd_midi_input_event; runtime->private_data = msynth; snd_rawmidi_kernel_read(msynth->input_rfile.input, NULL, 0); return 0; } /* close associated midi device for input */ static int midisynth_unsubscribe(void *private_data, snd_seq_port_subscribe_t *info) { int err; seq_midisynth_t *msynth = (seq_midisynth_t *)private_data; snd_assert(msynth->input_rfile.input != NULL, return -EINVAL); err = snd_rawmidi_kernel_release(&msynth->input_rfile); return err; } /* open associated midi device for output */ static int midisynth_use(void *private_data, snd_seq_port_subscribe_t *info) { int err; seq_midisynth_t *msynth = (seq_midisynth_t *)private_data; snd_rawmidi_params_t params; /* open midi port */ if ((err = snd_rawmidi_kernel_open(msynth->card->number, msynth->device, msynth->subdevice, SNDRV_RAWMIDI_LFLG_OUTPUT, &msynth->output_rfile)) < 0) { snd_printd("midi output open failed!!!\n"); return err; } memset(¶ms, 0, sizeof(params)); params.avail_min = 1; params.buffer_size = output_buffer_size; if ((err = snd_rawmidi_output_params(msynth->output_rfile.output, ¶ms)) < 0) { snd_rawmidi_kernel_release(&msynth->output_rfile); return err; } snd_midi_event_reset_decode(msynth->parser); return 0; } /* close associated midi device for output */ static int midisynth_unuse(void *private_data, snd_seq_port_subscribe_t *info) { seq_midisynth_t *msynth = (seq_midisynth_t *)private_data; unsigned char buf = 0xff; /* MIDI reset */ snd_assert(msynth->output_rfile.output != NULL, return -EINVAL); /* sending single MIDI reset message to shut the device up */ snd_rawmidi_kernel_write(msynth->output_rfile.output, &buf, 1); snd_rawmidi_drain_output(msynth->output_rfile.output); return snd_rawmidi_kernel_release(&msynth->output_rfile); } /* delete given midi synth port */ static void snd_seq_midisynth_delete(seq_midisynth_t *msynth) { if (msynth == NULL) return; if (msynth->seq_client > 0) { /* delete port */ snd_seq_event_port_detach(msynth->seq_client, msynth->seq_port); } if (msynth->parser) snd_midi_event_free(msynth->parser); } /* set our client name */ static int set_client_name(seq_midisynth_client_t *client, snd_card_t *card, snd_rawmidi_info_t *rmidi) { snd_seq_client_info_t cinfo; const char *name; memset(&cinfo, 0, sizeof(cinfo)); cinfo.client = client->seq_client; cinfo.type = KERNEL_CLIENT; name = rmidi->name[0] ? (const char *)rmidi->name : "External MIDI"; strlcpy(cinfo.name, name, sizeof(cinfo.name)); return snd_seq_kernel_client_ctl(client->seq_client, SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, &cinfo); } /* register new midi synth port */ static int snd_seq_midisynth_register_port(snd_seq_device_t *dev) { seq_midisynth_client_t *client; seq_midisynth_t *msynth, *ms; snd_seq_port_info_t *port; snd_rawmidi_info_t *info; int newclient = 0; unsigned int p, ports; snd_seq_client_callback_t callbacks; snd_seq_port_callback_t pcallbacks; snd_card_t *card = dev->card; int device = dev->device; unsigned int input_count = 0, output_count = 0; snd_assert(card != NULL && device >= 0 && device < SNDRV_RAWMIDI_DEVICES, return -EINVAL); info = kmalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; info->device = device; info->stream = SNDRV_RAWMIDI_STREAM_OUTPUT; info->subdevice = 0; if (snd_rawmidi_info_select(card, info) >= 0) output_count = info->subdevices_count; info->stream = SNDRV_RAWMIDI_STREAM_INPUT; if (snd_rawmidi_info_select(card, info) >= 0) { input_count = info->subdevices_count; } ports = output_count; if (ports < input_count) ports = input_count; if (ports == 0) { kfree(info); return -ENODEV; } if (ports > (256 / SNDRV_RAWMIDI_DEVICES)) ports = 256 / SNDRV_RAWMIDI_DEVICES; down(®ister_mutex); client = synths[card->number]; if (client == NULL) { newclient = 1; client = kcalloc(1, sizeof(*client), GFP_KERNEL); if (client == NULL) { up(®ister_mutex); kfree(info); return -ENOMEM; } memset(&callbacks, 0, sizeof(callbacks)); callbacks.private_data = client; callbacks.allow_input = callbacks.allow_output = 1; client->seq_client = snd_seq_create_kernel_client(card, 0, &callbacks); if (client->seq_client < 0) { kfree(client); up(®ister_mutex); kfree(info); return -ENOMEM; } set_client_name(client, card, info); } else if (device == 0) set_client_name(client, card, info); /* use the first device's name */ msynth = kcalloc(ports, sizeof(seq_midisynth_t), GFP_KERNEL); port = kmalloc(sizeof(*port), GFP_KERNEL); if (msynth == NULL || port == NULL) goto __nomem; for (p = 0; p < ports; p++) { ms = &msynth[p]; if (snd_seq_midisynth_new(ms, card, device, p) < 0) goto __nomem; /* declare port */ memset(port, 0, sizeof(*port)); port->addr.client = client->seq_client; port->addr.port = device * (256 / SNDRV_RAWMIDI_DEVICES) + p; port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT; memset(info, 0, sizeof(*info)); info->device = device; if (p < output_count) info->stream = SNDRV_RAWMIDI_STREAM_OUTPUT; else info->stream = SNDRV_RAWMIDI_STREAM_INPUT; info->subdevice = p; if (snd_rawmidi_info_select(card, info) >= 0) strcpy(port->name, info->subname); if (! port->name[0]) { if (info->name[0]) { if (ports > 1) snprintf(port->name, sizeof(port->name), "%s-%d", info->name, p); else snprintf(port->name, sizeof(port->name), "%s", info->name); } else { /* last resort */ if (ports > 1) sprintf(port->name, "MIDI %d-%d-%d", card->number, device, p); else sprintf(port->name, "MIDI %d-%d", card->number, device); } } if ((info->flags & SNDRV_RAWMIDI_INFO_OUTPUT) && p < output_count) port->capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SYNC_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE; if ((info->flags & SNDRV_RAWMIDI_INFO_INPUT) && p < input_count) port->capability |= SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SYNC_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ; if ((port->capability & (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_READ)) == (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_READ) && info->flags & SNDRV_RAWMIDI_INFO_DUPLEX) port->capability |= SNDRV_SEQ_PORT_CAP_DUPLEX; port->type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC; port->midi_channels = 16; memset(&pcallbacks, 0, sizeof(pcallbacks)); pcallbacks.owner = THIS_MODULE; pcallbacks.private_data = ms; pcallbacks.subscribe = midisynth_subscribe; pcallbacks.unsubscribe = midisynth_unsubscribe; pcallbacks.use = midisynth_use; pcallbacks.unuse = midisynth_unuse; pcallbacks.event_input = event_process_midi; port->kernel = &pcallbacks; if (snd_seq_kernel_client_ctl(client->seq_client, SNDRV_SEQ_IOCTL_CREATE_PORT, port)<0) goto __nomem; ms->seq_client = client->seq_client; ms->seq_port = port->addr.port; } client->ports_per_device[device] = ports; client->ports[device] = msynth; client->num_ports++; if (newclient) synths[card->number] = client; up(®ister_mutex); return 0; /* success */ __nomem: if (msynth != NULL) { for (p = 0; p < ports; p++) snd_seq_midisynth_delete(&msynth[p]); kfree(msynth); } if (newclient) { snd_seq_delete_kernel_client(client->seq_client); kfree(client); } kfree(info); kfree(port); up(®ister_mutex); return -ENOMEM; } /* release midi synth port */ static int snd_seq_midisynth_unregister_port(snd_seq_device_t *dev) { seq_midisynth_client_t *client; seq_midisynth_t *msynth; snd_card_t *card = dev->card; int device = dev->device, p, ports; down(®ister_mutex); client = synths[card->number]; if (client == NULL || client->ports[device] == NULL) { up(®ister_mutex); return -ENODEV; } ports = client->ports_per_device[device]; client->ports_per_device[device] = 0; msynth = client->ports[device]; client->ports[device] = NULL; snd_runtime_check(msynth != NULL || ports <= 0, goto __skip); for (p = 0; p < ports; p++) snd_seq_midisynth_delete(&msynth[p]); kfree(msynth); __skip: client->num_ports--; if (client->num_ports <= 0) { snd_seq_delete_kernel_client(client->seq_client); synths[card->number] = NULL; kfree(client); } up(®ister_mutex); return 0; } static int __init alsa_seq_midi_init(void) { static snd_seq_dev_ops_t ops = { snd_seq_midisynth_register_port, snd_seq_midisynth_unregister_port, }; memset(&synths, 0, sizeof(synths)); snd_seq_autoload_lock(); snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_MIDISYNTH, &ops, 0); snd_seq_autoload_unlock(); return 0; } static void __exit alsa_seq_midi_exit(void) { snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_MIDISYNTH); } module_init(alsa_seq_midi_init) module_exit(alsa_seq_midi_exit) --- NEW FILE: seq_midi_event.c --- /* * MIDI byte <-> sequencer event coder * * Copyright (C) 1998,99 Takashi Iwai <ti...@su...>, * Jaroslav Kysela <pe...@su...> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <sound/driver.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/string.h> #include <sound/core.h> #include <sound/seq_kernel.h> #include <sound/seq_midi_event.h> #include <sound/asoundef.h> MODULE_AUTHOR("Takashi Iwai <ti...@su...>, Jaroslav Kysela <pe...@su...>"); MODULE_DESCRIPTION("MIDI byte <-> sequencer event coder"); MODULE_LICENSE("GPL"); /* queue type */ /* from 0 to 7 are normal commands (note off, on, etc.) */ #define ST_NOTEOFF 0 #define ST_NOTEON 1 #define ST_SPECIAL 8 #define ST_SYSEX ST_SPECIAL /* from 8 to 15 are events for 0xf0-0xf7 */ /* status event types */ typedef void (*event_encode_t)(snd_midi_event_t *dev, snd_seq_event_t *ev); typedef void (*event_decode_t)(snd_seq_event_t *ev, unsigned char *buf); /* * prototypes */ static void note_event(snd_midi_event_t *dev, snd_seq_event_t *ev); static void one_param_ctrl_event(snd_midi_event_t *dev, snd_seq_event_t *ev); static void pitchbend_ctrl_event(snd_midi_event_t *dev, snd_seq_event_t *ev); static void two_param_ctrl_event(snd_midi_event_t *dev, snd_seq_event_t *ev); static void one_param_event(snd_midi_event_t *dev, snd_seq_event_t *ev); static void songpos_event(snd_midi_event_t *dev, snd_seq_event_t *ev); static void note_decode(snd_seq_event_t *ev, unsigned char *buf); static void one_param_decode(snd_seq_event_t *ev, unsigned char *buf); static void pitchbend_decode(snd_seq_event_t *ev, unsigned char *buf); static void two_param_decode(snd_seq_event_t *ev, unsigned char *buf); static void songpos_decode(snd_seq_event_t *ev, unsigned char *buf); /* * event list */ static struct status_event_list_t { int event; int qlen; event_encode_t encode; event_decode_t decode; } status_event[] = { /* 0x80 - 0xf0 */ {SNDRV_SEQ_EVENT_NOTEOFF, 2, note_event, note_decode}, {SNDRV_SEQ_EVENT_NOTEON, 2, note_event, note_decode}, {SNDRV_SEQ_EVENT_KEYPRESS, 2, note_event, note_decode}, {SNDRV_SEQ_EVENT_CONTROLLER, 2, two_param_ctrl_event, two_param_decode}, {SNDRV_SEQ_EVENT_PGMCHANGE, 1, one_param_ctrl_event, one_param_decode}, {SNDRV_SEQ_EVENT_CHANPRESS, 1, one_param_ctrl_event, one_param_decode}, {SNDRV_SEQ_EVENT_PITCHBEND, 2, pitchbend_ctrl_event, pitchbend_decode}, {SNDRV_SEQ_EVENT_NONE, 0, NULL, NULL}, /* 0xf0 */ /* 0xf0 - 0xff */ {SNDRV_SEQ_EVENT_SYSEX, 1, NULL, NULL}, /* sysex: 0xf0 */ {SNDRV_SEQ_EVENT_QFRAME, 1, one_param_event, one_param_decode}, /* 0xf1 */ {SNDRV_SEQ_EVENT_SONGPOS, 2, songpos_event, songpos_decode}, /* 0xf2 */ {SNDRV_SEQ_EVENT_SONGSEL, 1, one_param_event, one_param_decode}, /* 0xf3 */ {SNDRV_SEQ_EVENT_NONE, 0, NULL, NULL}, /* 0xf4 */ {SNDRV_SEQ_EVENT_NONE, 0, NULL, NULL}, /* 0xf5 */ {SNDRV_SEQ_EVENT_TUNE_REQUEST, 0, NULL, NULL}, /* 0xf6 */ {SNDRV_SEQ_EVENT_NONE, 0, NULL, NULL}, /* 0xf7 */ {SNDRV_SEQ_EVENT_CLOCK, 0, NULL, NULL}, /* 0xf8 */ {SNDRV_SEQ_EVENT_NONE, 0, NULL, NULL}, /* 0xf9 */ {SNDRV_SEQ_EVENT_START, 0, NULL, NULL}, /* 0xfa */ {SNDRV_SEQ_EVENT_CONTINUE, 0, NULL, NULL}, /* 0xfb */ {SNDRV_SEQ_EVENT_STOP, 0, NULL, NULL}, /* 0xfc */ {SNDRV_SEQ_EVENT_NONE, 0, NULL, NULL}, /* 0xfd */ {SNDRV_SEQ_EVENT_SENSING, 0, NULL, NULL}, /* 0xfe */ {SNDRV_SEQ_EVENT_RESET, 0, NULL, NULL}, /* 0xff */ }; static int extra_decode_ctrl14(snd_midi_event_t *dev, unsigned char *buf, int len, snd_seq_event_t *ev); static int extra_decode_xrpn(snd_midi_event_t *dev, unsigned char *buf, int count, snd_seq_event_t *ev); static struct extra_event_list_t { int event; int (*decode)(snd_midi_event_t *dev, unsigned char *buf, int len, snd_seq_event_t *ev); } extra_event[] = { {SNDRV_SEQ_EVENT_CONTROL14, extra_decode_ctrl14}, {SNDRV_SEQ_EVENT_NONREGPARAM, extra_decode_xrpn}, {SNDRV_SEQ_EVENT_REGPARAM, extra_decode_xrpn}, }; /* * new/delete record */ int snd_midi_event_new(int bufsize, snd_midi_event_t **rdev) { snd_midi_event_t *dev; *rdev = NULL; dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; if (bufsize > 0) { dev->buf = kmalloc(bufsize, GFP_KERNEL); if (dev->buf == NULL) { kfree(dev); return -ENOMEM; } } dev->bufsize = bufsize; dev->lastcmd = 0xff; spin_lock_init(&dev->lock); *rdev = dev; return 0; } void snd_midi_event_free(snd_midi_event_t *dev) { if (dev != NULL) { kfree(dev->buf); kfree(dev); } } /* * initialize record */ inline static void reset_encode(snd_midi_event_t *dev) { dev->read = 0; dev->qlen = 0; dev->type = 0; } void snd_midi_event_reset_encode(snd_midi_event_t *dev) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); reset_encode(dev); spin_unlock_irqrestore(&dev->lock, flags); } void snd_midi_event_reset_decode(snd_midi_event_t *dev) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); dev->lastcmd = 0xff; spin_unlock_irqrestore(&dev->lock, flags); } void snd_midi_event_init(snd_midi_event_t *dev) { snd_midi_event_reset_encode(dev); snd_midi_event_reset_decode(dev); } void snd_midi_event_no_status(snd_midi_event_t *dev, int on) { dev->nostat = on ? 1 : 0; } /* * resize buffer */ int snd_midi_event_resize_buffer(snd_midi_event_t *dev, int bufsize) { unsigned char *new_buf, *old_buf; unsigned long flags; if (bufsize == dev->bufsize) return 0; new_buf = kmalloc(bufsize, GFP_KERNEL); if (new_buf == NULL) return -ENOMEM; spin_lock_irqsave(&dev->lock, flags); old_buf = dev->buf; dev->buf = new_buf; dev->bufsize = bufsize; reset_encode(dev); spin_unlock_irqrestore(&dev->lock, flags); kfree(old_buf); return 0; } /* * read bytes and encode to sequencer event if finished * return the size of encoded bytes */ long snd_midi_event_encode(snd_midi_event_t *dev, unsigned char *buf, long count, snd_seq_event_t *ev) { long result = 0; int rc; ev->type = SNDRV_SEQ_EVENT_NONE; while (count-- > 0) { rc = snd_midi_event_encode_byte(dev, *buf++, ev); result++; if (rc < 0) return rc; else if (rc > 0) return result; } return result; } /* * read one byte and encode to sequencer event: * return 1 if MIDI bytes are encoded to an event * 0 data is not finished * negative for error */ int snd_midi_event_encode_byte(snd_midi_event_t *dev, int c, snd_seq_event_t *ev) { int rc = 0; unsigned long flags; c &= 0xff; if (c >= MIDI_CMD_COMMON_CLOCK) { /* real-time event */ ev->type = status_event[ST_SPECIAL + c - 0xf0].event; ev->flags &= ~SNDRV_SEQ_EVENT_LENGTH_MASK; ev->flags |= SNDRV_SEQ_EVENT_LENGTH_FIXED; return 1; } spin_lock_irqsave(&dev->lock, flags); if (dev->qlen > 0) { /* rest of command */ dev->buf[dev->read++] = c; if (dev->type != ST_SYSEX) dev->qlen--; } else { /* new command */ dev->read = 1; if (c & 0x80) { dev->buf[0] = c; if ((c & 0xf0) == 0xf0) /* special events */ dev->type = (c & 0x0f) + ST_SPECIAL; else dev->type = (c >> 4) & 0x07; dev->qlen = status_event[dev->type].qlen; } else { /* process this byte as argument */ dev->buf[dev->read++] = c; dev->qlen = status_event[dev->type].qlen - 1; } } if (dev->qlen == 0) { ev->type = status_event[dev->type].event; ev->flags &= ~SNDRV_SEQ_EVENT_LENGTH_MASK; ev->flags |= SNDRV_SEQ_EVENT_LENGTH_FIXED; if (status_event[dev->type].encode) /* set data values */ status_event[dev->type].encode(dev, ev); rc = 1; } else if (dev->type == ST_SYSEX) { if (c == MIDI_CMD_COMMON_SYSEX_END || dev->read >= dev->bufsize) { ev->flags &= ~SNDRV_SEQ_EVENT_LENGTH_MASK; ev->flags |= SNDRV_SEQ_EVENT_LENGTH_VARIABLE; ev->type = SNDRV_SEQ_EVENT_SYSEX; ev->data.ext.len = dev->read; ev->data.ext.ptr = dev->buf; if (c != MIDI_CMD_COMMON_SYSEX_END) dev->read = 0; /* continue to parse */ else reset_encode(dev); /* all parsed */ rc = 1; } } spin_unlock_irqrestore(&dev->lock, flags); return rc; } /* encode note event */ static void note_event(snd_midi_event_t *dev, snd_seq_event_t *ev) { ev->data.note.channel = dev->buf[0] & 0x0f; ev->data.note.note = dev->buf[1]; ev->data.note.velocity = dev->buf[2]; } /* encode one parameter controls */ static void one_param_ctrl_event(snd_midi_event_t *dev, snd_seq_event_t *ev) { ev->data.control.channel = dev->buf[0] & 0x0f; ev->data.control.value = dev->buf[1]; } /* encode pitch wheel change */ static void pitchbend_ctrl_event(snd_midi_event_t *dev, snd_seq_event_t *ev) { ev->data.control.channel = dev->buf[0] & 0x0f; ev->data.control.value = (int)dev->buf[2] * 128 + (int)dev->buf[1] - 8192; } /* encode midi control change */ static void two_param_ctrl_event(snd_midi_event_t *dev, snd_seq_event_t *ev) { ev->data.control.channel = dev->buf[0] & 0x0f; ev->data.control.param = dev->buf[1]; ev->data.control.value = dev->buf[2]; } /* encode one parameter value*/ static void one_param_event(snd_midi_event_t *dev, snd_seq_event_t *ev) { ev->data.control.value = dev->buf[1]; } /* encode song position */ static void songpos_event(snd_midi_event_t *dev, snd_seq_event_t *ev) { ev->data.control.value = (int)dev->buf[2] * 128 + (int)dev->buf[1]; } /* * decode from a sequencer event to midi bytes * return the size of decoded midi events */ long snd_midi_event_decode(snd_midi_event_t *dev, unsigned char *buf, long count, snd_seq_event_t *ev) { unsigned int cmd, type; if (ev->type == SNDRV_SEQ_EVENT_NONE) return -ENOENT; for (type = 0; type < ARRAY_SIZE(status_event); type++) { if (ev->type == status_event[type].event) goto __found; } for (type = 0; type < ARRAY_SIZE(extra_event); type++) { if (ev->type == extra_event[type].event) return extra_event[type].decode(dev, buf, count, ev); } return -ENOENT; __found: if (type >= ST_SPECIAL) cmd = 0xf0 + (type - ST_SPECIAL); else /* data.note.channel and data.control.channel is identical */ cmd = 0x80 | (type << 4) | (ev->data.note.channel & 0x0f); if (cmd == MIDI_CMD_COMMON_SYSEX) { snd_midi_event_reset_decode(dev); return snd_seq_expand_var_event(ev, count, buf, 1, 0); } else { int qlen; unsigned char xbuf[4]; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if ((cmd & 0xf0) == 0xf0 || dev->lastcmd != cmd || dev->nostat) { dev->lastcmd = cmd; spin_unlock_irqrestore(&dev->lock, flags); xbuf[0] = cmd; if (status_event[type].decode) status_event[type].decode(ev, xbuf + 1); qlen = status_event[type].qlen + 1; } else { spin_unlock_irqrestore(&dev->lock, flags); if (status_event[type].decode) status_event[type].decode(ev, xbuf + 0); qlen = status_event[type].qlen; } if (count < qlen) return -ENOMEM; memcpy(buf, xbuf, qlen); return qlen; } } /* decode note event */ static void note_decode(snd_seq_event_t *ev, unsigned char *buf) { buf[0] = ev->data.note.note & 0x7f; buf[1] = ev->data.note.velocity & 0x7f; } /* decode one parameter controls */ static void one_param_decode(snd_seq_event_t *ev, unsigned char *buf) { buf[0] = ev->data.control.value & 0x7f; } /* decode pitch wheel change */ static void pitchbend_decode(snd_seq_event_t *ev, unsigned char *buf) { int value = ev->data.control.value + 8192; buf[0] = value & 0x7f; buf[1] = (value >> 7) & 0x7f; } /* decode midi control change */ static void two_param_decode(snd_seq_event_t *ev, unsigned char *buf) { buf[0] = ev->data.control.param & 0x7f; buf[1] = ev->data.control.value & 0x7f; } /* decode song position */ static void songpos_decode(snd_seq_event_t *ev, unsigned char *buf) { buf[0] = ev->data.control.value & 0x7f; buf[1] = (ev->data.control.value >> 7) & 0x7f; } /* decode 14bit control */ static int extra_decode_ctrl14(snd_midi_event_t *dev, unsigned char *buf, int count, snd_seq_event_t *ev) { unsigned char cmd; int idx = 0; cmd = MIDI_CMD_CONTROL|(ev->data.control.channel & 0x0f); if (ev->data.control.param < 0x20) { if (count < 4) return -ENOMEM; if (dev->nostat && count < 6) return -ENOMEM; if (cmd != dev->lastcmd || dev->nostat) { if (count < 5) return -ENOMEM; buf[idx++] = dev->lastcmd = cmd; } buf[idx++] = ev->data.control.param; buf[idx++] = (ev->data.control.value >> 7) & 0x7f; if (dev->nostat) buf[idx++] = cmd; buf[idx++] = ev->data.control.param + 0x20; buf[idx++] = ev->data.control.value & 0x7f; } else { if (count < 2) return -ENOMEM; if (cmd != dev->lastcmd || dev->nostat) { if (count < 3) return -ENOMEM; buf[idx++] = dev->lastcmd = cmd; } buf[idx++] = ev->data.control.param & 0x7f; buf[idx++] = ev->data.control.value & 0x7f; } return idx; } /* decode reg/nonreg param */ static int extra_decode_xrpn(snd_midi_event_t *dev, unsigned char *buf, int count, snd_seq_event_t *ev) { unsigned char cmd; char *cbytes; static char cbytes_nrpn[4] = { MIDI_CTL_NONREG_PARM_NUM_MSB, MIDI_CTL_NONREG_PARM_NUM_LSB, MIDI_CTL_MSB_DATA_ENTRY, MIDI_CTL_LSB_DATA_ENTRY }; static char cbytes_rpn[4] = { MIDI_CTL_REGIST_PARM_NUM_MSB, MIDI_CTL_REGIST_PARM_NUM_LSB, MIDI_CTL_MSB_DATA_ENTRY, MIDI_CTL_LSB_DATA_ENTRY }; unsigned char bytes[4]; int idx = 0, i; if (count < 8) return -ENOMEM; if (dev->nostat && count < 12) return -ENOMEM; cmd = MIDI... [truncated message content] |
Update of /cvsroot/netnice/Linux/drivers/usb/core In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv9332 Added Files: Tag: netnice2612 Kconfig Makefile buffer.c config.c devices.c devio.c file.c hcd-pci.c hcd.c hcd.h hub.c hub.h inode.c message.c otg_whitelist.h sysfs.c urb.c usb.c usb.h Log Message: Adding missing Linux/drivers/usb/core -Matt --- NEW FILE: message.c --- /* * message.c - synchronous message handling */ #include <linux/config.h> #ifdef CONFIG_USB_DEBUG #define DEBUG #else #undef DEBUG #endif #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/timer.h> [...1424 lines suppressed...] // synchronous request completion model EXPORT_SYMBOL(usb_control_msg); EXPORT_SYMBOL(usb_bulk_msg); EXPORT_SYMBOL(usb_sg_init); EXPORT_SYMBOL(usb_sg_cancel); EXPORT_SYMBOL(usb_sg_wait); // synchronous control message convenience routines EXPORT_SYMBOL(usb_get_descriptor); EXPORT_SYMBOL(usb_get_status); EXPORT_SYMBOL(usb_get_string); EXPORT_SYMBOL(usb_string); // synchronous calls that also maintain usbcore state EXPORT_SYMBOL(usb_clear_halt); EXPORT_SYMBOL(usb_reset_configuration); EXPORT_SYMBOL(usb_set_interface); --- NEW FILE: config.c --- #include <linux/config.h> #ifdef CONFIG_USB_DEBUG #define DEBUG #endif #include <linux/usb.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <asm/byteorder.h> #include "usb.h" #include "hcd.h" #define USB_MAXALTSETTING 128 /* Hard limit */ #define USB_MAXENDPOINTS 30 /* Hard limit */ #define USB_MAXCONFIG 8 /* Arbitrary limit */ static inline const char *plural(int n) { return (n == 1 ? "" : "s"); } static int find_next_descriptor(unsigned char *buffer, int size, int dt1, int dt2, int *num_skipped) { struct usb_descriptor_header *h; int n = 0; unsigned char *buffer0 = buffer; /* Find the next descriptor of type dt1 or dt2 */ while (size > 0) { h = (struct usb_descriptor_header *) buffer; if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2) break; buffer += h->bLength; size -= h->bLength; ++n; } /* Store the number of descriptors skipped and return the * number of bytes skipped */ if (num_skipped) *num_skipped = n; return buffer - buffer0; } static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; int n, i; d = (struct usb_endpoint_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE) n = USB_DT_ENDPOINT_AUDIO_SIZE; else if (d->bLength >= USB_DT_ENDPOINT_SIZE) n = USB_DT_ENDPOINT_SIZE; else { dev_warn(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint descriptor of length %d, skipping\n", cfgno, inum, asnum, d->bLength); goto skip_to_next_endpoint_or_interface_descriptor; } i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK; if (i >= 16 || i == 0) { dev_warn(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } /* Only store as many endpoints as we have room for */ if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; ++ifp->desc.bNumEndpoints; memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); /* Skip over any Class Specific or Vendor Specific descriptors; * find the next endpoint or interface descriptor */ endpoint->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); endpoint->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "endpoint"); return buffer - buffer0 + i; skip_to_next_endpoint_or_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } void usb_release_interface_cache(struct kref *ref) { struct usb_interface_cache *intfc = ref_to_usb_interface_cache(ref); int j; for (j = 0; j < intfc->num_altsetting; j++) kfree(intfc->altsetting[j].endpoint); kfree(intfc); } static int usb_parse_interface(struct device *ddev, int cfgno, struct usb_host_config *config, unsigned char *buffer, int size, u8 inums[], u8 nalts[]) { unsigned char *buffer0 = buffer; struct usb_interface_descriptor *d; int inum, asnum; struct usb_interface_cache *intfc; struct usb_host_interface *alt; int i, n; int len, retval; int num_ep, num_ep_orig; d = (struct usb_interface_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength < USB_DT_INTERFACE_SIZE) goto skip_to_next_interface_descriptor; /* Which interface entry is this? */ intfc = NULL; inum = d->bInterfaceNumber; for (i = 0; i < config->desc.bNumInterfaces; ++i) { if (inums[i] == inum) { intfc = config->intf_cache[i]; break; } } if (!intfc || intfc->num_altsetting >= nalts[i]) goto skip_to_next_interface_descriptor; /* Check for duplicate altsetting entries */ asnum = d->bAlternateSetting; for ((i = 0, alt = &intfc->altsetting[0]); i < intfc->num_altsetting; (++i, ++alt)) { if (alt->desc.bAlternateSetting == asnum) { dev_warn(ddev, "Duplicate descriptor for config %d " "interface %d altsetting %d, skipping\n", cfgno, inum, asnum); goto skip_to_next_interface_descriptor; } } ++intfc->num_altsetting; memcpy(&alt->desc, d, USB_DT_INTERFACE_SIZE); /* Skip over any Class Specific or Vendor Specific descriptors; * find the first endpoint or interface descriptor */ alt->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); alt->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "interface"); buffer += i; size -= i; /* Allocate space for the right(?) number of endpoints */ num_ep = num_ep_orig = alt->desc.bNumEndpoints; alt->desc.bNumEndpoints = 0; // Use as a counter if (num_ep > USB_MAXENDPOINTS) { dev_warn(ddev, "too many endpoints for config %d interface %d " "altsetting %d: %d, using maximum allowed: %d\n", cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS); num_ep = USB_MAXENDPOINTS; } len = sizeof(struct usb_host_endpoint) * num_ep; alt->endpoint = kmalloc(len, GFP_KERNEL); if (!alt->endpoint) return -ENOMEM; memset(alt->endpoint, 0, len); /* Parse all the endpoint descriptors */ n = 0; while (size > 0) { if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; buffer += retval; size -= retval; } if (n != num_ep_orig) dev_warn(ddev, "config %d interface %d altsetting %d has %d " "endpoint descriptor%s, different from the interface " "descriptor's value: %d\n", cfgno, inum, asnum, n, plural(n), num_ep_orig); return buffer - buffer0; skip_to_next_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } static int usb_parse_configuration(struct device *ddev, int cfgidx, struct usb_host_config *config, unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; int cfgno; int nintf, nintf_orig; int i, j, n; struct usb_interface_cache *intfc; unsigned char *buffer2; int size2; struct usb_descriptor_header *header; int len, retval; u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES]; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE) { dev_err(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); return -EINVAL; } cfgno = config->desc.bConfigurationValue; buffer += config->desc.bLength; size -= config->desc.bLength; nintf = nintf_orig = config->desc.bNumInterfaces; if (nintf > USB_MAXINTERFACES) { dev_warn(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", cfgno, nintf, USB_MAXINTERFACES); nintf = USB_MAXINTERFACES; } /* Go through the descriptors, checking their length and counting the * number of altsettings for each interface */ n = 0; for ((buffer2 = buffer, size2 = size); size2 > 0; (buffer2 += header->bLength, size2 -= header->bLength)) { if (size2 < sizeof(struct usb_descriptor_header)) { dev_warn(ddev, "config %d descriptor has %d excess " "byte%s, ignoring\n", cfgno, size2, plural(size2)); break; } header = (struct usb_descriptor_header *) buffer2; if ((header->bLength > size2) || (header->bLength < 2)) { dev_warn(ddev, "config %d has an invalid descriptor " "of length %d, skipping remainder of the config\n", cfgno, header->bLength); break; } if (header->bDescriptorType == USB_DT_INTERFACE) { struct usb_interface_descriptor *d; int inum; d = (struct usb_interface_descriptor *) header; if (d->bLength < USB_DT_INTERFACE_SIZE) { dev_warn(ddev, "config %d has an invalid " "interface descriptor of length %d, " "skipping\n", cfgno, d->bLength); continue; } inum = d->bInterfaceNumber; if (inum >= nintf_orig) dev_warn(ddev, "config %d has an invalid " "interface number: %d but max is %d\n", cfgno, inum, nintf_orig - 1); /* Have we already encountered this interface? * Count its altsettings */ for (i = 0; i < n; ++i) { if (inums[i] == inum) break; } if (i < n) { if (nalts[i] < 255) ++nalts[i]; } else if (n < USB_MAXINTERFACES) { inums[n] = inum; nalts[n] = 1; ++n; } } else if (header->bDescriptorType == USB_DT_DEVICE || header->bDescriptorType == USB_DT_CONFIG) dev_warn(ddev, "config %d contains an unexpected " "descriptor of type 0x%X, skipping\n", cfgno, header->bDescriptorType); } /* for ((buffer2 = buffer, size2 = size); ...) */ size = buffer2 - buffer; config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); if (n != nintf) dev_warn(ddev, "config %d has %d interface%s, different from " "the descriptor's value: %d\n", cfgno, n, plural(n), nintf_orig); else if (n == 0) dev_warn(ddev, "config %d has no interfaces?\n", cfgno); config->desc.bNumInterfaces = nintf = n; /* Check for missing interface numbers */ for (i = 0; i < nintf; ++i) { for (j = 0; j < nintf; ++j) { if (inums[j] == i) break; } if (j >= nintf) dev_warn(ddev, "config %d has no interface number " "%d\n", cfgno, i); } /* Allocate the usb_interface_caches and altsetting arrays */ for (i = 0; i < nintf; ++i) { j = nalts[i]; if (j > USB_MAXALTSETTING) { dev_warn(ddev, "too many alternate settings for " "config %d interface %d: %d, " "using maximum allowed: %d\n", cfgno, inums[i], j, USB_MAXALTSETTING); nalts[i] = j = USB_MAXALTSETTING; } len = sizeof(*intfc) + sizeof(struct usb_host_interface) * j; config->intf_cache[i] = intfc = kmalloc(len, GFP_KERNEL); if (!intfc) return -ENOMEM; memset(intfc, 0, len); kref_init(&intfc->ref); } /* Skip over any Class Specific or Vendor Specific descriptors; * find the first interface descriptor */ config->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, &n); config->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "configuration"); buffer += i; size -= i; /* Parse all the interface/altsetting descriptors */ while (size > 0) { retval = usb_parse_interface(ddev, cfgno, config, buffer, size, inums, nalts); if (retval < 0) return retval; buffer += retval; size -= retval; } /* Check for missing altsettings */ for (i = 0; i < nintf; ++i) { intfc = config->intf_cache[i]; for (j = 0; j < intfc->num_altsetting; ++j) { for (n = 0; n < intfc->num_altsetting; ++n) { if (intfc->altsetting[n].desc. bAlternateSetting == j) break; } if (n >= intfc->num_altsetting) dev_warn(ddev, "config %d interface %d has no " "altsetting %d\n", cfgno, inums[i], j); } } return 0; } // hub-only!! ... and only exported for reset/reinit path. // otherwise used internally on disconnect/destroy path void usb_destroy_configuration(struct usb_device *dev) { int c, i; if (!dev->config) return; if (dev->rawdescriptors) { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) kfree(dev->rawdescriptors[i]); kfree(dev->rawdescriptors); dev->rawdescriptors = NULL; } for (c = 0; c < dev->descriptor.bNumConfigurations; c++) { struct usb_host_config *cf = &dev->config[c]; kfree(cf->string); cf->string = NULL; for (i = 0; i < cf->desc.bNumInterfaces; i++) { if (cf->intf_cache[i]) kref_put(&cf->intf_cache[i]->ref, usb_release_interface_cache); } } kfree(dev->config); dev->config = NULL; } // hub-only!! ... and only in reset path, or usb_new_device() // (used by real hubs and virtual root hubs) int usb_get_configuration(struct usb_device *dev) { struct device *ddev = &dev->dev; int ncfg = dev->descriptor.bNumConfigurations; int result = -ENOMEM; unsigned int cfgno, length; unsigned char *buffer; unsigned char *bigbuffer; struct usb_config_descriptor *desc; if (ncfg > USB_MAXCONFIG) { dev_warn(ddev, "too many configurations: %d, " "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG); dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; } if (ncfg < 1) { dev_err(ddev, "no configurations\n"); return -EINVAL; } length = ncfg * sizeof(struct usb_host_config); dev->config = kmalloc(length, GFP_KERNEL); if (!dev->config) goto err2; memset(dev->config, 0, length); length = ncfg * sizeof(char *); dev->rawdescriptors = kmalloc(length, GFP_KERNEL); if (!dev->rawdescriptors) goto err2; memset(dev->rawdescriptors, 0, length); buffer = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL); if (!buffer) goto err2; desc = (struct usb_config_descriptor *)buffer; for (cfgno = 0; cfgno < ncfg; cfgno++) { /* We grab just the first descriptor so we know how long * the whole configuration is */ result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, buffer, USB_DT_CONFIG_SIZE); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s\n", cfgno, "start"); goto err; } else if (result < 4) { dev_err(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, USB_DT_CONFIG_SIZE, result); result = -EINVAL; goto err; } length = max((int) le16_to_cpu(desc->wTotalLength), USB_DT_CONFIG_SIZE); /* Now that we know the length, get the whole thing */ bigbuffer = kmalloc(length, GFP_KERNEL); if (!bigbuffer) { result = -ENOMEM; goto err; } result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, bigbuffer, length); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s\n", cfgno, "all"); kfree(bigbuffer); goto err; } if (result < length) { dev_warn(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, length, result); length = result; } dev->rawdescriptors[cfgno] = bigbuffer; result = usb_parse_configuration(&dev->dev, cfgno, &dev->config[cfgno], bigbuffer, length); if (result < 0) { ++cfgno; goto err; } } result = 0; err: kfree(buffer); dev->descriptor.bNumConfigurations = cfgno; err2: if (result == -ENOMEM) dev_err(ddev, "out of memory\n"); return result; } --- NEW FILE: urb.c --- #include <linux/config.h> #include <linux/module.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/init.h> #ifdef CONFIG_USB_DEBUG #define DEBUG #else #undef DEBUG #endif #include <linux/usb.h> #include "hcd.h" #define to_urb(d) container_of(d, struct urb, kref) static void urb_destroy(struct kref *kref) { struct urb *urb = to_urb(kref); kfree(urb); } /** * usb_init_urb - initializes a urb so that it can be used by a USB driver * @urb: pointer to the urb to initialize * * Initializes a urb so that the USB subsystem can use it properly. * * If a urb is created with a call to usb_alloc_urb() it is not * necessary to call this function. Only use this if you allocate the * space for a struct urb on your own. If you call this function, be * careful when freeing the memory for your urb that it is no longer in * use by the USB core. * * Only use this function if you _really_ understand what you are doing. */ void usb_init_urb(struct urb *urb) { if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); spin_lock_init(&urb->lock); } } /** * usb_alloc_urb - creates a new urb for a USB driver to use * @iso_packets: number of iso packets for this urb * @mem_flags: the type of memory to allocate, see kmalloc() for a list of * valid options for this. * * Creates an urb for the USB driver to use, initializes a few internal * structures, incrementes the usage counter, and returns a pointer to it. * * If no memory is available, NULL is returned. * * If the driver want to use this urb for interrupt, control, or bulk * endpoints, pass '0' as the number of iso packets. * * The driver must call usb_free_urb() when it is finished with the urb. */ struct urb *usb_alloc_urb(int iso_packets, int mem_flags) { struct urb *urb; urb = (struct urb *)kmalloc(sizeof(struct urb) + iso_packets * sizeof(struct usb_iso_packet_descriptor), mem_flags); if (!urb) { err("alloc_urb: kmalloc failed"); return NULL; } usb_init_urb(urb); return urb; } /** * usb_free_urb - frees the memory used by a urb when all users of it are finished * @urb: pointer to the urb to free, may be NULL * * Must be called when a user of a urb is finished with it. When the last user * of the urb calls this function, the memory of the urb is freed. * * Note: The transfer buffer associated with the urb is not freed, that must be * done elsewhere. */ void usb_free_urb(struct urb *urb) { if (urb) kref_put(&urb->kref, urb_destroy); } /** * usb_get_urb - increments the reference count of the urb * @urb: pointer to the urb to modify, may be NULL * * This must be called whenever a urb is transferred from a device driver to a * host controller driver. This allows proper reference counting to happen * for urbs. * * A pointer to the urb with the incremented reference counter is returned. */ struct urb * usb_get_urb(struct urb *urb) { if (urb) kref_get(&urb->kref); return urb; } /*-------------------------------------------------------------------*/ /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request * @mem_flags: the type of memory to allocate, see kmalloc() for a list * of valid options for this. * * This submits a transfer request, and transfers control of the URB * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * * The caller must have correctly initialized the URB before submitting * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are * available to ensure that most fields are correctly initialized, for * the particular kind of transfer, although they will not initialize * any transfer flags. * * Successful submissions return 0; otherwise this routine returns a * negative error number. If the submission is successful, the complete() * callback from the URB will be called exactly once, when the USB core and * Host Controller Driver (HCD) are finished with the URB. When the completion * function is called, control of the URB is returned to the device * driver which issued the request. The completion handler may then * immediately free or reuse that URB. * * With few exceptions, USB device drivers should never access URB fields * provided by usbcore or the HCD until its complete() is called. * The exceptions relate to periodic transfer scheduling. For both * interrupt and isochronous urbs, as part of successful URB submission * urb->interval is modified to reflect the actual transfer period used * (normally some power of two units). And for isochronous urbs, * urb->start_frame is modified to reflect when the URB's transfers were * scheduled to start. Not all isochronous transfer scheduling policies * will work, but most host controller drivers should easily handle ISO * queues going from now until 10-200 msec into the future. * * For control endpoints, the synchronous usb_control_msg() call is * often used (in non-interrupt context) instead of this call. * That is often used through convenience wrappers, for the requests * that are standardized in the USB 2.0 specification. For bulk * endpoints, a synchronous usb_bulk_msg() call is available. * * Request Queuing: * * URBs may be submitted to endpoints before previous ones complete, to * minimize the impact of interrupt latencies and system overhead on data * throughput. With that queuing policy, an endpoint's queue would never * be empty. This is required for continuous isochronous data streams, * and may also be required for some kinds of interrupt transfers. Such * queuing also maximizes bandwidth utilization by letting USB controllers * start work on later requests before driver software has finished the * completion processing for earlier (successful) requests. * * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * * Periodic transfers (interrupt or isochronous) are performed repeatedly, * using the interval specified in the urb. Submitting the first urb to * the endpoint reserves the bandwidth necessary to make those transfers. * If the USB subsystem can't allocate sufficient bandwidth to perform * the periodic request, submitting such a periodic request should fail. * * Device drivers must explicitly request that repetition, by ensuring that * some URB is always on the endpoint's queue (except possibly for short * periods during completion callacks). When there is no longer an urb * queued, the endpoint's bandwidth reservation is canceled. This means * drivers can use their completion handlers to ensure they keep bandwidth * they need, by reinitializing and resubmitting the just-completed urb * until the driver longer needs that periodic bandwidth. * * Memory Flags: * * The general rules for how to decide which mem_flags to use * are the same as for kmalloc. There are four * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and * GFP_ATOMIC. * * GFP_NOFS is not ever used, as it has not been implemented yet. * * GFP_ATOMIC is used when * (a) you are inside a completion handler, an interrupt, bottom half, * tasklet or timer, or * (b) you are holding a spinlock or rwlock (does not apply to * semaphores), or * (c) current->state != TASK_RUNNING, this is the case only after * you've changed it. * * GFP_NOIO is used in the block io path and error handling of storage * devices. * * All other situations use GFP_KERNEL. * * Some more specific rules for mem_flags can be inferred, such as * (1) start_xmit, timeout, and receive methods of network drivers must * use GFP_ATOMIC (they are called with a spinlock held); * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also * called with a spinlock held); * (3) If you use a kernel thread with a network driver you must use * GFP_NOIO, unless (b) or (c) apply; * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) * apply or your are in a storage driver's block io path; * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and * (6) changing firmware on a running storage or net device uses * GFP_NOIO, unless b) or c) apply * */ int usb_submit_urb(struct urb *urb, int mem_flags) { int pipe, temp, max; struct usb_device *dev; struct usb_operations *op; int is_out; if (!urb || urb->hcpriv || !urb->complete) return -EINVAL; if (!(dev = urb->dev) || (dev->state < USB_STATE_DEFAULT) || (!dev->bus) || (dev->devnum <= 0)) return -ENODEV; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (!(op = dev->bus->op) || !op->submit_urb) return -ENODEV; urb->status = -EINPROGRESS; urb->actual_length = 0; urb->bandwidth = 0; /* Lots of sanity checks, so HCDs can rely on clean data * and don't need to duplicate tests */ pipe = urb->pipe; temp = usb_pipetype (pipe); is_out = usb_pipeout (pipe); if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED) return -ENODEV; /* FIXME there should be a sharable lock protecting us against * config/altsetting changes and disconnects, kicking in here. * (here == before maxpacket, and eventually endpoint type, * checks get made.) */ max = usb_maxpacket (dev, pipe, is_out); if (max <= 0) { dev_dbg(&dev->dev, "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", usb_pipeendpoint (pipe), is_out ? "out" : "in", __FUNCTION__, max); return -EMSGSIZE; } /* periodic transfers limit size per frame/uframe, * but drivers only control those sizes for ISO. * while we're checking, initialize return status. */ if (temp == PIPE_ISOCHRONOUS) { int n, len; /* "high bandwidth" mode, 1-3 packets/uframe? */ if (dev->speed == USB_SPEED_HIGH) { int mult = 1 + ((max >> 11) & 0x03); max &= 0x07ff; max *= mult; } if (urb->number_of_packets <= 0) return -EINVAL; for (n = 0; n < urb->number_of_packets; n++) { len = urb->iso_frame_desc [n].length; if (len < 0 || len > max) return -EMSGSIZE; urb->iso_frame_desc [n].status = -EXDEV; urb->iso_frame_desc [n].actual_length = 0; } } /* the I/O buffer must be mapped/unmapped, except when length=0 */ if (urb->transfer_buffer_length < 0) return -EMSGSIZE; #ifdef DEBUG /* stuff that drivers shouldn't do, but which shouldn't * cause problems in HCDs if they get it wrong. */ { unsigned int orig_flags = urb->transfer_flags; unsigned int allowed; /* enforce simple/standard policy */ allowed = URB_ASYNC_UNLINK; // affects later unlinks allowed |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); allowed |= URB_NO_INTERRUPT; switch (temp) { case PIPE_BULK: if (is_out) allowed |= URB_ZERO_PACKET; /* FALLTHROUGH */ case PIPE_CONTROL: allowed |= URB_NO_FSBR; /* only affects UHCI */ /* FALLTHROUGH */ default: /* all non-iso endpoints */ if (!is_out) allowed |= URB_SHORT_NOT_OK; break; case PIPE_ISOCHRONOUS: allowed |= URB_ISO_ASAP; break; } urb->transfer_flags &= allowed; /* fail if submitter gave bogus flags */ if (urb->transfer_flags != orig_flags) { err ("BOGUS urb flags, %x --> %x", orig_flags, urb->transfer_flags); return -EINVAL; } } #endif /* * Force periodic transfer intervals to be legal values that are * a power of two (so HCDs don't need to). * * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC * supports different values... this uses EHCI/UHCI defaults (and * EHCI can use smaller non-default values). */ switch (temp) { case PIPE_ISOCHRONOUS: case PIPE_INTERRUPT: /* too small? */ if (urb->interval <= 0) return -EINVAL; /* too big? */ switch (dev->speed) { case USB_SPEED_HIGH: /* units are microframes */ // NOTE usb handles 2^15 if (urb->interval > (1024 * 8)) urb->interval = 1024 * 8; temp = 1024 * 8; break; case USB_SPEED_FULL: /* units are frames/msec */ case USB_SPEED_LOW: if (temp == PIPE_INTERRUPT) { if (urb->interval > 255) return -EINVAL; // NOTE ohci only handles up to 32 temp = 128; } else { if (urb->interval > 1024) urb->interval = 1024; // NOTE usb and ohci handle up to 2^15 temp = 1024; } break; default: return -EINVAL; } /* power of two? */ while (temp > urb->interval) temp >>= 1; urb->interval = temp; } return op->submit_urb (urb, mem_flags); } /*-------------------------------------------------------------------*/ /** * usb_unlink_urb - abort/cancel a transfer request for an endpoint * @urb: pointer to urb describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. URBs complete only * once per submission, and may be canceled only once per submission. * Successful cancellation means the requests's completion handler will * be called with a status code indicating that the request has been * canceled (rather than any other code) and will quickly be removed * from host controller data structures. * * In the past, clearing the URB_ASYNC_UNLINK transfer flag for the * URB indicated that the request was synchronous. This usage is now * deprecated; if the flag is clear the call will be forwarded to * usb_kill_urb() and the return value will be 0. In the future, drivers * should call usb_kill_urb() directly for synchronous unlinking. * * When the URB_ASYNC_UNLINK transfer flag for the URB is set, this * request is asynchronous. Success is indicated by returning -EINPROGRESS, * at which time the URB will normally have been unlinked but not yet * given back to the device driver. When it is called, the completion * function will see urb->status == -ECONNRESET. Failure is indicated * by any other return value. Unlinking will fail when the URB is not * currently "linked" (i.e., it was never submitted, or it was unlinked * before, or the hardware is already finished with it), even if the * completion handler has not yet run. * * Unlinking and Endpoint Queues: * * Host Controller Drivers (HCDs) place all the URBs for a particular * endpoint in a queue. Normally the queue advances as the controller * hardware processes each request. But when an URB terminates with an * error its queue stops, at least until that URB's completion routine * returns. It is guaranteed that the queue will not restart until all * its unlinked URBs have been fully retired, with their completion * routines run, even if that's not until some time after the original * completion handler returns. Normally the same behavior and guarantees * apply when an URB terminates because it was unlinked; however if an * URB is unlinked before the hardware has started to execute it, then * its queue is not guaranteed to stop until all the preceding URBs have * completed. * * This means that USB device drivers can safely build deep queues for * large or complex transfers, and clean them up reliably after any sort * of aborted transfer by unlinking all pending URBs at the first fault. * * Note that an URB terminating early because a short packet was received * will count as an error if and only if the URB_SHORT_NOT_OK flag is set. * Also, that all unlinks performed in any URB completion handler must * be asynchronous. * * Queues for isochronous endpoints are treated differently, because they * advance at fixed rates. Such queues do not stop when an URB is unlinked. * An unlinked URB may leave a gap in the stream of packets. It is undefined * whether such gaps can be filled in. * * When a control URB terminates with an error, it is likely that the * status stage of the transfer will not take place, even if it is merely * a soft error resulting from a short-packet with URB_SHORT_NOT_OK set. */ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) { #ifdef CONFIG_DEBUG_KERNEL if (printk_ratelimit()) { printk(KERN_NOTICE "usb_unlink_urb() is deprecated for " "synchronous unlinks. Use usb_kill_urb() instead.\n"); WARN_ON(1); } #endif usb_kill_urb(urb); return 0; } if (!(urb->dev && urb->dev->bus && urb->dev->bus->op)) return -ENODEV; return urb->dev->bus->op->unlink_urb(urb, -ECONNRESET); } /** * usb_kill_urb - cancel a transfer request and wait for it to finish * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and available for reuse. These features make * this an ideal way to stop I/O in a disconnect() callback or close() * function. If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * While the routine is running, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). */ void usb_kill_urb(struct urb *urb) { if (!(urb && urb->dev && urb->dev->bus && urb->dev->bus->op)) return; spin_lock_irq(&urb->lock); ++urb->reject; spin_unlock_irq(&urb->lock); urb->dev->bus->op->unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); spin_lock_irq(&urb->lock); --urb->reject; spin_unlock_irq(&urb->lock); } EXPORT_SYMBOL(usb_init_urb); EXPORT_SYMBOL(usb_alloc_urb); EXPORT_SYMBOL(usb_free_urb); EXPORT_SYMBOL(usb_get_urb); EXPORT_SYMBOL(usb_submit_urb); EXPORT_SYMBOL(usb_unlink_urb); EXPORT_SYMBOL(usb_kill_urb); --- NEW FILE: Kconfig --- # # USB Core configuration # config USB_DEBUG bool "USB verbose debug messages" depends on USB help Say Y here if you want the USB core & hub drivers to produce a bunch of debug messages to the system log. Select this if you are having a problem with USB support and want to see more of what is going on. comment "Miscellaneous USB options" depends on USB config USB_DEVICEFS bool "USB device filesystem" depends on USB ---help--- If you say Y here (and to "/proc file system support" in the "File systems" section, above), you will get a file /proc/bus/usb/devices which lists the devices currently connected to your USB bus or busses, and for every connected device a file named "/proc/bus/usb/xxx/yyy", where xxx is the bus number and yyy the device number; the latter files can be used by user space programs to talk directly to the device. These files are "virtual", meaning they are generated on the fly and not stored on the hard drive. You may need to mount the usbfs file system to see the files, use mount -t usbfs none /proc/bus/usb For the format of the various /proc/bus/usb/ files, please read <file:Documentation/usb/proc_usb_info.txt>. Please note that this code is completely unrelated to devfs, the "/dev file system support". Most users want to say Y here. config USB_BANDWIDTH bool "Enforce USB bandwidth allocation (EXPERIMENTAL)" depends on USB && EXPERIMENTAL help If you say Y here, the USB subsystem enforces USB bandwidth allocation and will prevent some device opens from succeeding if they would cause USB bandwidth usage to go above 90% of the bus bandwidth. If you say N here, these conditions will cause warning messages about USB bandwidth usage to be logged and some devices or drivers may not work correctly. config USB_DYNAMIC_MINORS bool "Dynamic USB minor allocation (EXPERIMENTAL)" depends on USB && EXPERIMENTAL help If you say Y here, the USB subsystem will use dynamic minor allocation for any device that uses the USB major number. This means that you can have more than 16 of a single type of device (like USB printers). If you are unsure about this, say N here. config USB_SUSPEND bool "USB suspend/resume (EXPERIMENTAL)" depends on USB && PM && EXPERIMENTAL help If you say Y here, you can use driver calls or the sysfs "power/state" file to suspend or resume individual USB peripherals. There are many related features, such as remote wakeup and driver-specific suspend processing, that may not yet work as expected. If you are unsure about this, say N here. config USB_OTG bool depends on USB && EXPERIMENTAL select USB_SUSPEND default n config USB_OTG_WHITELIST bool "Rely on OTG Targeted Peripherals List" depends on USB_OTG default y help If you say Y here, the "otg_whitelist.h" file will be used as a product whitelist, so USB peripherals not listed there will be rejected during enumeration. This behavior is required by the USB OTG specification for all devices not on your product's "Targeted Peripherals List". Otherwise, peripherals not listed there will only generate a warning and enumeration will continue. That's more like what normal Linux-USB hosts do (other than the warning), and is convenient for many stages of product development. --- NEW FILE: devio.c --- /*****************************************************************************/ /* * devio.c -- User space communication with USB devices. * * Copyright (C) 1999-2000 Thomas Sailer (sa...@if...) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software [...1434 lines suppressed...] { struct dev_state *ps = (struct dev_state *)file->private_data; unsigned int mask = 0; poll_wait(file, &ps->wait, wait); if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= POLLOUT | POLLWRNORM; if (!connected(ps->dev)) mask |= POLLERR | POLLHUP; return mask; } struct file_operations usbfs_device_file_operations = { .llseek = usbdev_lseek, .read = usbdev_read, .poll = usbdev_poll, .ioctl = usbdev_ioctl, .open = usbdev_open, .release = usbdev_release, }; --- NEW FILE: sysfs.c --- /* * drivers/usb/core/sysfs.c * * (C) Copyright 2002 David Brownell * (C) Copyright 2002,2004 Greg Kroah-Hartman * (C) Copyright 2002,2004 IBM Corp. * * All of the sysfs file attributes for usb devices and interfaces. * */ #include <linux/config.h> #include <linux/kernel.h> #ifdef CONFIG_USB_DEBUG #define DEBUG #else #undef DEBUG #endif #include <linux/usb.h> #include "usb.h" /* Active configuration fields */ #define usb_actconfig_show(field, multiplier, format_string) \ static ssize_t show_##field (struct device *dev, char *buf) \ { \ struct usb_device *udev; \ struct usb_host_config *actconfig; \ \ udev = to_usb_device (dev); \ actconfig = udev->actconfig; \ if (actconfig) \ return sprintf (buf, format_string, \ actconfig->desc.field * multiplier); \ else \ return 0; \ } \ #define usb_actconfig_attr(field, multiplier, format_string) \ usb_actconfig_show(field, multiplier, format_string) \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_actconfig_attr (bNumInterfaces, 1, "%2d\n") usb_actconfig_attr (bmAttributes, 1, "%2x\n") usb_actconfig_attr (bMaxPower, 2, "%3dmA\n") static ssize_t show_configuration_string(struct device *dev, char *buf) { struct usb_device *udev; struct usb_host_config *actconfig; int len; udev = to_usb_device (dev); actconfig = udev->actconfig; if ((!actconfig) || (!actconfig->string)) return 0; len = sprintf(buf, actconfig->string, PAGE_SIZE); if (len < 0) return 0; buf[len] = '\n'; buf[len+1] = 0; return len+1; } static DEVICE_ATTR(configuration, S_IRUGO, show_configuration_string, NULL); /* configuration value is always present, and r/w */ usb_actconfig_show(bConfigurationValue, 1, "%u\n"); static ssize_t set_bConfigurationValue (struct device *dev, const char *buf, size_t count) { struct usb_device *udev = udev = to_usb_device (dev); int config, value; if (sscanf (buf, "%u", &config) != 1 || config > 255) return -EINVAL; usb_lock_device(udev); value = usb_set_configuration (udev, config); usb_unlock_device(udev); return (value < 0) ? value : count; } static DEVICE_ATTR(bConfigurationValue, S_IRUGO | S_IWUSR, show_bConfigurationValue, set_bConfigurationValue); /* String fields */ #define usb_string_attr(name) \ static ssize_t show_##name(struct device *dev, char *buf) \ { \ struct usb_device *udev; \ int len; \ \ udev = to_usb_device (dev); \ len = snprintf(buf, 256, "%s", udev->name); \ if (len < 0) \ return 0; \ buf[len] = '\n'; \ buf[len+1] = 0; \ return len+1; \ } \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); usb_string_attr(product); usb_string_attr(manufacturer); usb_string_attr(serial); static ssize_t show_speed (struct device *dev, char *buf) { struct usb_device *udev; char *speed; udev = to_usb_device (dev); switch (udev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } return sprintf (buf, "%s\n", speed); } static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL); static ssize_t show_devnum (struct device *dev, char *buf) { struct usb_device *udev; udev = to_usb_device (dev); return sprintf (buf, "%d\n", udev->devnum); } static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL); static ssize_t show_version (struct device *dev, char *buf) { struct usb_device *udev; u16 bcdUSB; udev = to_usb_device(dev); bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB); return sprintf(buf, "%2x.%02x\n", bcdUSB >> 8, bcdUSB & 0xff); } static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_maxchild (struct device *dev, char *buf) { struct usb_device *udev; udev = to_usb_device (dev); return sprintf (buf, "%d\n", udev->maxchild); } static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL); /* Descriptor fields */ #define usb_descriptor_attr_le16(field, format_string) \ static ssize_t \ show_##field (struct device *dev, char *buf) \ { \ struct usb_device *udev; \ \ udev = to_usb_device (dev); \ return sprintf (buf, format_string, \ le16_to_cpu(udev->descriptor.field)); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_descriptor_attr_le16(idVendor, "%04x\n") usb_descriptor_attr_le16(idProduct, "%04x\n") usb_descriptor_attr_le16(bcdDevice, "%04x\n") #define usb_descriptor_attr(field, format_string) \ static ssize_t \ show_##field (struct device *dev, char *buf) \ { \ struct usb_device *udev; \ \ udev = to_usb_device (dev); \ return sprintf (buf, format_string, udev->descriptor.field); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_descriptor_attr (bDeviceClass, "%02x\n") usb_descriptor_attr (bDeviceSubClass, "%02x\n") usb_descriptor_attr (bDeviceProtocol, "%02x\n") usb_descriptor_attr (bNumConfigurations, "%d\n") static struct attribute *dev_attrs[] = { /* current configuration's attributes */ &dev_attr_bNumInterfaces.attr, &dev_attr_bConfigurationValue.attr, &dev_attr_bmAttributes.attr, &dev_attr_bMaxPower.attr, /* device attributes */ &dev_attr_idVendor.attr, &dev_attr_idProduct.attr, &dev_attr_bcdDevice.attr, &dev_attr_bDeviceClass.attr, &dev_attr_bDeviceSubClass.attr, &dev_attr_bDeviceProtocol.attr, &dev_attr_bNumConfigurations.attr, &dev_attr_speed.attr, &dev_attr_devnum.attr, &dev_attr_version.attr, &dev_attr_maxchild.attr, NULL, }; static struct attribute_group dev_attr_grp = { .attrs = dev_attrs, }; void usb_create_sysfs_dev_files (struct usb_device *udev) { struct device *dev = &udev->dev; sysfs_create_group(&dev->kobj, &dev_attr_grp); if (udev->manufacturer) device_create_file (dev, &dev_attr_manufacturer); if (udev->product) device_create_file (dev, &dev_attr_product); if (udev->serial) device_create_file (dev, &dev_attr_serial); device_create_file (dev, &dev_attr_configuration); } void usb_remove_sysfs_dev_files (struct usb_device *udev) { struct device *dev = &udev->dev; sysfs_remove_group(&dev->kobj, &dev_attr_grp); if (udev->descriptor.iManufacturer) device_remove_file(dev, &dev_attr_manufacturer); if (udev->descriptor.iProduct) device_remove_file(dev, &dev_attr_product); if (udev->descriptor.iSerialNumber) device_remove_file(dev, &dev_attr_serial); device_remove_file (dev, &dev_attr_configuration); } /* Interface fields */ #define usb_intf_attr(field, format_string) \ static ssize_t \ show_##field (struct device *dev, char *buf) \ { \ struct usb_interface *intf = to_usb_interface (dev); \ \ return sprintf (buf, format_string, intf->cur_altsetting->desc.field); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_intf_attr (bInterfaceNumber, "%02x\n") usb_intf_attr (bAlternateSetting, "%2d\n") usb_intf_attr (bNumEndpoints, "%02x\n") usb_intf_attr (bInterfaceClass, "%02x\n") usb_intf_attr (bInterfaceSubClass, "%02x\n") usb_intf_attr (bInterfaceProtocol, "%02x\n") static ssize_t show_interface_string(struct device *dev, char *buf) { struct usb_interface *intf; struct usb_device *udev; int len; intf = to_usb_interface (dev); udev = interface_to_usbdev (intf); len = snprintf(buf, 256, "%s", intf->cur_altsetting->string); if (len < 0) return 0; buf[len] = '\n'; buf[len+1] = 0; return len+1; } static DEVICE_ATTR(interface, S_IRUGO, show_interface_string, NULL); static ssize_t show_modalias(struct device *dev, char *buf) { struct usb_interface *intf; struct usb_device *udev; int len; intf = to_usb_interface(dev); udev = interface_to_usbdev(intf); len = sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), le16_to_cpu(udev->descriptor.bcdDevice), udev->descriptor.bDeviceClass, udev->descriptor.bDeviceSubClass, udev->descriptor.bDeviceProtocol); buf += len; if (udev->descriptor.bDeviceClass == 0) { struct usb_host_interface *alt = intf->cur_altsetting; return len + sprintf(buf, "%02Xisc%02Xip%02X\n", alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol); } else { return len + sprintf(buf, "*isc*ip*\n"); } } static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL); static struct attribute *intf_attrs[] = { &dev_attr_bInterfaceNumber.attr, &dev_attr_bAlternateSetting.attr, &dev_attr_bNumEndpoints.attr, &dev_attr_bInterfaceClass.attr, &dev_attr_bInterfaceSubClass.attr, &dev_attr_bInterfaceProtocol.attr, &dev_attr_modalias.attr, NULL, }; static struct attribute_group intf_attr_grp = { .attrs = intf_attrs, }; void usb_create_sysfs_intf_files (struct usb_interface *intf) { sysfs_create_group(&intf->dev.kobj, &intf_attr_grp); if (intf->cur_altsetting->string) device_create_file(&intf->dev, &dev_attr_interface); } void usb_remove_sysfs_intf_files (struct usb_interface *intf) { sysfs_remove_group(&intf->dev.kobj, &intf_attr_grp); if (intf->cur_altsetting->string) device_remove_file(&intf->dev, &dev_attr_interface); } --- NEW FILE: hcd-pci.c --- /* * (C) Copyright David Brownell 2000-2002 * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/config.h> #ifdef CONFIG_USB_DEBUG #define DEBUG #else #undef DEBUG #endif #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <asm/io.h> #include <asm/irq.h> #include <linux/usb.h> #include "hcd.h" /* PCI-based HCs are common, but plenty of non-PCI HCs are used too */ /*-------------------------------------------------------------------------*/ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_pci_probe - initialize PCI-based HCDs * @dev: USB Host Controller being probed * @id: pci hotplug id connecting controller to HCD framework * Context: !in_interrupt() * * Allocates basic PCI resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * * Store this function in the HCD's struct pci_driver as probe(). */ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id) { struct hc_driver *driver; struct usb_hcd *hcd; int retval; if (usb_disabled()) return -ENODEV; if (!id || !(driver = (struct hc_driver *) id->driver_data)) return -EINVAL; if (pci_enable_device (dev) < 0) return -ENODEV; dev->current_state = PCI_D0; dev->dev.power.power_state = PMSG_ON; if (!dev->irq) { dev_err (&dev->dev, "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", pci_name(dev)); retval = -ENODEV; goto err1; } hcd = usb_create_hcd (driver, &dev->dev, pci_name(dev)); if (!hcd) { retval = -ENOMEM; goto err1; } if (driver->flags & HCD_MEMORY) { // EHCI, OHCI hcd->rsrc_start = pci_resource_start (dev, 0); hcd->rsrc_len = pci_resource_len (dev, 0); if (!request_mem_region (hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg (&dev->dev, "controller already in use\n"); retval = -EBUSY; goto err2; } hcd->regs = ioremap_nocache (hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg (&dev->dev, "error mapping memory\n"); retval = -EFAULT; goto err3; } } else { // UHCI int region; for (region = 0; region < PCI_ROM_RESOURCE; region++) { if (!(pci_resource_flags (dev, region) & IORESOURCE_IO)) continue; hcd->rsrc_start = pci_resource_start (dev, region); hcd->rsrc_len = pci_resource_len (dev, region); if (request_region (hcd->rsrc_start, hcd->rsrc_len, driver->description)) break; } if (region == PCI_ROM_RESOURCE) { dev_dbg (&dev->dev, "no i/o regions available\n"); retval = -EBUSY; goto err1; } } #ifdef CONFIG_PCI_NAMES hcd->product_desc = dev->pretty_name; #endif pci_set_master (dev); retval = usb_add_hcd (hcd, dev->irq, SA_SHIRQ); if (retval != 0) goto err4; return retval; err4: if (driver->flags & HCD_MEMORY) { iounmap (hcd->regs); err3: release_mem_region (hcd->rsrc_start, hcd->rsrc_len); } else release_region (hcd->rsrc_start, hcd->rsrc_len); err2: usb_put_hcd (hcd); err1: pci_disable_device (dev); dev_err (&dev->dev, "init %s fail, %d\n", pci_name(dev), retval); return retval; } EXPORT_SYMBOL (usb_hcd_pci_probe); /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_pci_remove - shutdown processing for PCI-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_pci_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * * Store this function in the HCD's struct pci_driver as remove(). */ void usb_hcd_pci_remove (struct pci_dev *dev) { struct usb_hcd *hcd; hcd = pci_get_drvdata(dev); if (!hcd) return; usb_remove_hcd (hcd); if (hcd->driver->flags & HCD_MEMORY) { iounmap (hcd->regs); release_mem_region (hcd->rsrc_start, hcd->rsrc_len); } else { release_region (hcd->rsrc_start, hcd->rsrc_len); } usb_put_hcd (hcd); pci_disable_device(dev); } EXPORT_SYMBOL (usb_hcd_pci_remove); #ifdef CONFIG_PM /** * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD * @dev: USB Host Controller being suspended * @message: semantics in flux * * Store this function in the HCD's struct pci_driver as suspend(). */ int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message) { struct usb_hcd *hcd; int retval = 0; int has_pci_pm; hcd = pci_get_drvdata(dev); /* FIXME until the generic PM interfaces change a lot more, this * can't use PCI D1 and D2 states. For example, the confusion * between messages and states will need to vanish, and messages * will need to provide a target system state again. * * It'll be important to learn characteristics of the target state, * especially on embedded hardware where the HCD will often be in * charge of an external VBUS power supply and one or more clocks. * Some target system states will leave them active; others won't. * (With PCI, that's often handled by platform BIOS code.) */ /* even when the PCI layer rejects some of the PCI calls * below, HCs can try global suspend and reduce DMA traffic. * PM-sensitive HCDs may already have done this. */ has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM); switch (hcd->state) { /* entry if root hub wasn't yet suspended ... from sysfs, * without autosuspend, or if USB_SUSPEND isn't configured. */ case HC_STATE_RUNNING: hcd->state = HC_STATE_QUIESCING; retval = hcd->driver->suspend (hcd, message); if (retval) { dev_dbg (hcd->self.controller, "suspend fail, retval %d\n", retval); break; } hcd->state = HC_STATE_SUSPENDED; /* FALLTHROUGH */ /* entry with CONFIG_USB_SUSPEND, or hcds that autosuspend: the * controller and/or root hub will already have been suspended, * but it won't be ready for a PCI resume call. * * FIXME only CONFIG_USB_SUSPEND guarantees hub_suspend() will * have been called, otherwise root hub timers still run ... */ case HC_STATE_SUSPENDED: /* no DMA or IRQs except when HC is active */ if (dev->current_state == PCI_D0) { free_irq (hcd->irq, hcd); pci_save_state (dev); pci_disable_device (dev); } if (!has_pci_pm) { dev_dbg (hcd->self.controller, "--> PCI D0/legacy\n"); break; } /* NOTE: dev->current_state becomes nonzero only here, and * only for devices that support PCI PM. Also, exiting * PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset * some device state (e.g. as part of clock reinit). */ retval = pci_set_power_state (dev, PCI_D3hot); if (retval == 0) { dev_dbg (hcd->self.controller, "--> PCI D3\n"); pci_enable_wake (dev, PCI_D3hot, hcd->remote_wakeup); pci_enable_wake (dev, PCI_D3cold, hcd->remote_wakeup); } else if (retval < 0) { dev_dbg (&dev->dev, "PCI D3 suspend fail, %d\n", retval); (void) usb_hcd_pci_resume (dev); break; } break; default: dev_dbg (hcd->self.controller, "hcd state %d; not suspended\n", hcd->state); WARN_ON(1); retval = -EINVAL; break; } /* update power_state **ONLY** to make sysfs happier */ if (retval == 0) dev->dev.power.power_state = message; return retval; } EXPORT_SYMBOL (usb_hcd_pci_suspend); /** * usb_hcd_pci_resume - power management resume of a PCI-based HCD * @dev: USB Host Controller being resumed * * Store this function in the HCD's struct pci_driver as resume(). */ int usb_hcd_pci_resume (struct pci_dev *dev) { struct usb_hcd *hcd; int retval; hcd = pci_get_drvdata(dev); if (hcd->state != HC_STATE_SUSPENDED) { dev_dbg (hcd->self.controller, "can't resume, not suspended!\n"); return 0; } /* NOTE: chip docs cover clean "real suspend" cases (what Linux * calls "standby", "suspend to RAM", and so on). There are also * dirty cases when swsusp fakes a suspend in "shutdown" mode. */ if (dev->current_state != PCI_D0) { #ifdef DEBUG int pci_pm; u16 pmcr; pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM); pci_read_config_word(dev, pci_pm + PCI_PM_CTRL, &pmcr); pmcr &= PCI_PM_CTRL_STATE_MASK; if (pmcr) { /* Clean case: power to USB and to HC registers was * maintained; remote wakeup is easy. */ dev_dbg(hcd->self.controller, "resume from PCI D%d\n", pmcr); } else { /* Clean: HC lost Vcc power, D0 uninitialized * + Vaux may have preserved port and transceiver * state ... for remote wakeup from D3cold * + or not; HCD must reinit + re-enumerate * * Dirty: D0 semi-initialized cases with swsusp * + after BIOS init * + after Linux init (HCD statically linked) */ dev_dbg(hcd->self.controller, "PCI D0, from previous PCI D%d\n", dev->current_state); } #endif pci_enable_wake (dev, dev->current_state, 0); pci_enable_wake (dev, PCI_D3cold, 0); } else { /* Same basic cases: clean (powered/not), dirty */ dev_dbg(hcd->self.controller, "PCI legacy resume\n"); } /* NOTE: the PCI API itself is asymmetric here. We don't need to * pci_set_power_state(PCI_D0) since that's part of re-enabling; * but that won't re-enable bus mastering. Yet pci_disable_device() * explicitly disables bus mastering... */ retval = pci_enable_device (dev); if (retval < 0) { dev_err (hcd->self.controller, "can't re-enable after resume, %d!\n", retval); return retval; } pci_set_master (dev); pci_restore_state (dev); dev->dev.power.power_state = PMSG_ON; hcd->state = HC_STATE_RESUMING; hcd->saw_irq = 0; retval = request_irq (dev->irq, usb_hcd_irq, SA_SHIRQ, hcd->irq_descr, hcd); if (retval < 0) { dev_err (hcd->self.controller, "can't restore IRQ after resume!\n"); usb_hc_died (hcd); return retval; } retval = hcd->driver->resume (hcd); if (!HC_IS_RUNNING (hcd->state)) { dev_dbg (hcd->self.controller, "resume fail, retval %d\n", retval); usb_hc_died (hcd); } return retval; } EXPORT_SYMBOL (usb_hcd_pci_resume); #endif /* CONFIG_PM */ --- NEW FILE: inode.c --- /*****************************************************************************/ /* * inode.c -- Inode/Dentry functions for the USB device file system. * * Copyright (C) 2000 Thomas Sailer (sa...@if...) * Copyright (C) 2001,2002,2004 Greg Kroah-Hartman (gr...@kr...) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public Li... [truncated message content] |