|
From: Andy P. <at...@us...> - 2002-04-09 16:29:14
|
Update of /cvsroot/linux-vax/kernel-2.4/net/sunrpc
In directory usw-pr-cvs1:/tmp/cvs-serv32481/sunrpc
Modified Files:
auth.c auth_null.c auth_unix.c clnt.c pmap_clnt.c sched.c
stats.c sunrpc_syms.c svc.c svcsock.c sysctl.c xdr.c xprt.c
Log Message:
synch 2.4.15 commit 21
Index: auth.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/auth.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- auth.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ auth.c 9 Apr 2002 16:29:01 -0000 1.2
@@ -69,7 +69,7 @@
auth->au_ops->destroy(auth);
}
-spinlock_t rpc_credcache_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t rpc_credcache_lock = SPIN_LOCK_UNLOCKED;
/*
* Initialize RPC credential cache
@@ -81,42 +81,61 @@
auth->au_nextgc = jiffies + (auth->au_expire >> 1);
}
+/*
+ * Destroy an unreferenced credential
+ */
static inline void
-rpcauth_crdestroy(struct rpc_auth *auth, struct rpc_cred *cred)
+rpcauth_crdestroy(struct rpc_cred *cred)
{
#ifdef RPC_DEBUG
if (cred->cr_magic != RPCAUTH_CRED_MAGIC)
BUG();
cred->cr_magic = 0;
+ if (atomic_read(&cred->cr_count) || cred->cr_auth)
+ BUG();
#endif
- if (auth->au_ops->crdestroy)
- auth->au_ops->crdestroy(cred);
- else
- rpc_free(cred);
+ cred->cr_ops->crdestroy(cred);
}
/*
- * Clear the RPC credential cache
+ * Destroy a list of credentials
+ */
+static inline
+void rpcauth_destroy_credlist(struct rpc_cred *head)
+{
+ struct rpc_cred *cred;
+
+ while ((cred = head) != NULL) {
+ head = cred->cr_next;
+ rpcauth_crdestroy(cred);
+ }
+}
+
+/*
+ * Clear the RPC credential cache, and delete those credentials
+ * that are not referenced.
*/
void
rpcauth_free_credcache(struct rpc_auth *auth)
{
- struct rpc_cred **q, *cred;
- void (*destroy)(struct rpc_cred *);
+ struct rpc_cred **q, *cred, *free = NULL;
int i;
- if (!(destroy = auth->au_ops->crdestroy))
- destroy = (void (*)(struct rpc_cred *)) rpc_free;
-
spin_lock(&rpc_credcache_lock);
for (i = 0; i < RPC_CREDCACHE_NR; i++) {
q = &auth->au_credcache[i];
while ((cred = *q) != NULL) {
*q = cred->cr_next;
- destroy(cred);
+ cred->cr_auth = NULL;
+ if (atomic_read(&cred->cr_count) == 0) {
+ cred->cr_next = free;
+ free = cred;
+ } else
+ cred->cr_next = NULL;
}
}
spin_unlock(&rpc_credcache_lock);
+ rpcauth_destroy_credlist(free);
}
/*
@@ -133,9 +152,10 @@
for (i = 0; i < RPC_CREDCACHE_NR; i++) {
q = &auth->au_credcache[i];
while ((cred = *q) != NULL) {
- if (!cred->cr_count &&
+ if (!atomic_read(&cred->cr_count) &&
time_before(cred->cr_expire, jiffies)) {
*q = cred->cr_next;
+ cred->cr_auth = NULL;
cred->cr_next = free;
free = cred;
continue;
@@ -144,10 +164,7 @@
}
}
spin_unlock(&rpc_credcache_lock);
- while ((cred = free) != NULL) {
- free = cred->cr_next;
- rpcauth_crdestroy(auth, cred);
- }
+ rpcauth_destroy_credlist(free);
auth->au_nextgc = jiffies + auth->au_expire;
}
@@ -163,8 +180,8 @@
spin_lock(&rpc_credcache_lock);
cred->cr_next = auth->au_credcache[nr];
auth->au_credcache[nr] = cred;
- cred->cr_count++;
- cred->cr_expire = jiffies + auth->au_expire;
+ cred->cr_auth = auth;
+ get_rpccred(cred);
spin_unlock(&rpc_credcache_lock);
}
@@ -187,7 +204,7 @@
q = &auth->au_credcache[nr];
while ((cred = *q) != NULL) {
if (!(cred->cr_flags & RPCAUTH_CRED_DEAD) &&
- auth->au_ops->crmatch(cred, taskflags)) {
+ cred->cr_ops->crmatch(cred, taskflags)) {
*q = cred->cr_next;
break;
}
@@ -213,23 +230,23 @@
* Remove cred handle from cache
*/
static void
-rpcauth_remove_credcache(struct rpc_auth *auth, struct rpc_cred *cred)
+rpcauth_remove_credcache(struct rpc_cred *cred)
{
+ struct rpc_auth *auth = cred->cr_auth;
struct rpc_cred **q, *cr;
int nr;
nr = (cred->cr_uid & RPC_CREDCACHE_MASK);
- spin_lock(&rpc_credcache_lock);
q = &auth->au_credcache[nr];
while ((cr = *q) != NULL) {
if (cred == cr) {
*q = cred->cr_next;
cred->cr_next = NULL;
+ cred->cr_auth = NULL;
break;
}
q = &cred->cr_next;
}
- spin_unlock(&rpc_credcache_lock);
}
struct rpc_cred *
@@ -258,7 +275,7 @@
{
dprintk("RPC: matching %s cred %d\n",
auth->au_ops->au_name, taskflags);
- return auth->au_ops->crmatch(cred, taskflags);
+ return cred->cr_ops->crmatch(cred, taskflags);
}
void
@@ -266,26 +283,25 @@
{
dprintk("RPC: %4d holding %s cred %p\n",
task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred);
- if (task->tk_msg.rpc_cred) {
- spin_lock(&rpc_credcache_lock);
- task->tk_msg.rpc_cred->cr_count++;
- task->tk_msg.rpc_cred->cr_expire = jiffies + task->tk_auth->au_expire;
- spin_unlock(&rpc_credcache_lock);
- }
+ if (task->tk_msg.rpc_cred)
+ get_rpccred(task->tk_msg.rpc_cred);
}
void
-rpcauth_releasecred(struct rpc_auth *auth, struct rpc_cred *cred)
+put_rpccred(struct rpc_cred *cred)
{
- spin_lock(&rpc_credcache_lock);
- if (cred != NULL && cred->cr_count > 0) {
- if (!--cred->cr_count && (cred->cr_flags & RPCAUTH_CRED_DEAD)) {
- spin_unlock(&rpc_credcache_lock);
- rpcauth_remove_credcache(auth, cred);
- rpcauth_crdestroy(auth, cred);
- return;
- }
+ if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
+ return;
+
+ if (cred->cr_auth && cred->cr_flags & RPCAUTH_CRED_DEAD)
+ rpcauth_remove_credcache(cred);
+
+ if (!cred->cr_auth) {
+ spin_unlock(&rpc_credcache_lock);
+ rpcauth_crdestroy(cred);
+ return;
}
+ cred->cr_expire = jiffies + cred->cr_auth->au_expire;
spin_unlock(&rpc_credcache_lock);
}
@@ -298,7 +314,7 @@
dprintk("RPC: %4d releasing %s cred %p\n",
task->tk_pid, auth->au_ops->au_name, cred);
- rpcauth_releasecred(auth, cred);
+ put_rpccred(cred);
task->tk_msg.rpc_cred = NULL;
}
@@ -306,10 +322,11 @@
rpcauth_marshcred(struct rpc_task *task, u32 *p)
{
struct rpc_auth *auth = task->tk_auth;
+ struct rpc_cred *cred = task->tk_msg.rpc_cred;
dprintk("RPC: %4d marshaling %s cred %p\n",
- task->tk_pid, auth->au_ops->au_name, task->tk_msg.rpc_cred);
- return auth->au_ops->crmarshal(task, p,
+ task->tk_pid, auth->au_ops->au_name, cred);
+ return cred->cr_ops->crmarshal(task, p,
task->tk_flags & RPC_CALL_REALUID);
}
@@ -317,20 +334,22 @@
rpcauth_checkverf(struct rpc_task *task, u32 *p)
{
struct rpc_auth *auth = task->tk_auth;
+ struct rpc_cred *cred = task->tk_msg.rpc_cred;
dprintk("RPC: %4d validating %s cred %p\n",
- task->tk_pid, auth->au_ops->au_name, task->tk_msg.rpc_cred);
- return auth->au_ops->crvalidate(task, p);
+ task->tk_pid, auth->au_ops->au_name, cred);
+ return cred->cr_ops->crvalidate(task, p);
}
int
rpcauth_refreshcred(struct rpc_task *task)
{
struct rpc_auth *auth = task->tk_auth;
+ struct rpc_cred *cred = task->tk_msg.rpc_cred;
dprintk("RPC: %4d refreshing %s cred %p\n",
- task->tk_pid, auth->au_ops->au_name, task->tk_msg.rpc_cred);
- task->tk_status = auth->au_ops->crrefresh(task);
+ task->tk_pid, auth->au_ops->au_name, cred);
+ task->tk_status = cred->cr_ops->crrefresh(task);
return task->tk_status;
}
Index: auth_null.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/auth_null.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- auth_null.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ auth_null.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -17,6 +17,8 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
+static struct rpc_credops null_credops;
+
static struct rpc_auth *
nul_create(struct rpc_clnt *clnt)
{
@@ -52,9 +54,10 @@
if (!(cred = (struct rpc_cred *) rpc_allocate(flags, sizeof(*cred))))
return NULL;
- cred->cr_count = 0;
+ atomic_set(&cred->cr_count, 0);
cred->cr_flags = RPCAUTH_CRED_UPTODATE;
cred->cr_uid = current->uid;
+ cred->cr_ops = &null_credops;
return cred;
}
@@ -124,7 +127,11 @@
#endif
nul_create,
nul_destroy,
- nul_create_cred,
+ nul_create_cred
+};
+
+static
+struct rpc_credops null_credops = {
nul_destroy_cred,
nul_match,
nul_marshal,
Index: auth_unix.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/auth_unix.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- auth_unix.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ auth_unix.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -33,6 +33,8 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
+static struct rpc_credops unix_credops;
+
static struct rpc_auth *
unx_create(struct rpc_clnt *clnt)
{
@@ -71,7 +73,7 @@
if (!(cred = (struct unx_cred *) rpc_allocate(flags, sizeof(*cred))))
return NULL;
- cred->uc_count = 0;
+ atomic_set(&cred->uc_count, 0);
cred->uc_flags = RPCAUTH_CRED_UPTODATE;
if (flags & RPC_TASK_ROOTCREDS) {
cred->uc_uid = cred->uc_fsuid = 0;
@@ -91,6 +93,7 @@
if (i < NFS_NGROUPS)
cred->uc_gids[i] = NOGROUP;
}
+ cred->uc_base.cr_ops = &unix_credops;
return (struct rpc_cred *) cred;
}
@@ -106,7 +109,7 @@
if (!(cred = (struct unx_cred *) rpc_malloc(task, sizeof(*cred))))
return NULL;
- cred->uc_count = 1;
+ atomic_set(&cred->uc_count, 1);
cred->uc_flags = RPCAUTH_CRED_DEAD|RPCAUTH_CRED_UPTODATE;
cred->uc_uid = uid;
cred->uc_gid = gid;
@@ -236,7 +239,11 @@
#endif
unx_create,
unx_destroy,
- unx_create_cred,
+ unx_create_cred
+};
+
+static
+struct rpc_credops unix_credops = {
unx_destroy_cred,
unx_match,
unx_marshal,
Index: clnt.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/clnt.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- clnt.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ clnt.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -55,6 +55,8 @@
static void call_refreshresult(struct rpc_task *task);
static void call_timeout(struct rpc_task *task);
static void call_reconnect(struct rpc_task *task);
+static void child_reconnect(struct rpc_task *);
+static void child_reconnect_status(struct rpc_task *);
static u32 * call_header(struct rpc_task *task);
static u32 * call_verify(struct rpc_task *task);
@@ -79,7 +81,6 @@
#ifdef RPC_DEBUG
rpc_register_sysctl();
#endif
- xdr_init();
if (!xprt)
goto out;
@@ -526,6 +527,7 @@
call_reconnect(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
+ struct rpc_task *child;
dprintk("RPC: %4d call_reconnect status %d\n",
task->tk_pid, task->tk_status);
@@ -533,10 +535,31 @@
task->tk_action = call_transmit;
if (task->tk_status < 0 || !clnt->cl_xprt->stream)
return;
- clnt->cl_stats->netreconn++;
+
+ /* Run as a child to ensure it runs as an rpciod task */
+ child = rpc_new_child(clnt, task);
+ if (child) {
+ child->tk_action = child_reconnect;
+ rpc_run_child(task, child, NULL);
+ }
+}
+
+static void child_reconnect(struct rpc_task *task)
+{
+ task->tk_client->cl_stats->netreconn++;
+ task->tk_status = 0;
+ task->tk_action = child_reconnect_status;
xprt_reconnect(task);
}
+static void child_reconnect_status(struct rpc_task *task)
+{
+ if (task->tk_status == -EAGAIN)
+ task->tk_action = child_reconnect;
+ else
+ task->tk_action = NULL;
+}
+
/*
* 5. Transmit the RPC request, and wait for reply
*/
@@ -694,9 +717,14 @@
}
if (task->tk_status < 12) {
- printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
- clnt->cl_protname, task->tk_status);
- rpc_exit(task, -EIO);
+ if (!clnt->cl_softrtry) {
+ task->tk_action = call_transmit;
+ clnt->cl_stats->rpcretrans++;
+ } else {
+ printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
+ clnt->cl_protname, task->tk_status);
+ rpc_exit(task, -EIO);
+ }
return;
}
Index: pmap_clnt.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/pmap_clnt.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- pmap_clnt.c 14 Jan 2001 17:13:59 -0000 1.1.1.1
+++ pmap_clnt.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -31,7 +31,7 @@
static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int);
static void pmap_getport_done(struct rpc_task *);
extern struct rpc_program pmap_program;
-spinlock_t pmap_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t pmap_lock = SPIN_LOCK_UNLOCKED;
/*
* Obtain the port for a given RPC service on a given host. This one can
Index: sched.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/sched.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- sched.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ sched.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -30,7 +30,7 @@
/*
* We give RPC the same get_free_pages priority as NFS
*/
-#define GFP_RPC GFP_NFS
+#define GFP_RPC GFP_NOFS
static void __rpc_default_timer(struct rpc_task *task);
static void rpciod_killall(void);
@@ -76,7 +76,7 @@
/*
* Spinlock for other critical sections of code.
*/
-spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;
/*
* This is the last-ditch buffer for NFS swap requests
@@ -744,7 +744,7 @@
* for readahead):
*
* sync user requests: GFP_KERNEL
- * async requests: GFP_RPC (== GFP_NFS)
+ * async requests: GFP_RPC (== GFP_NOFS)
* swap requests: GFP_ATOMIC (or new GFP_SWAPPER)
*/
void *
@@ -772,8 +772,8 @@
}
if (flags & RPC_TASK_ASYNC)
return NULL;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ>>4);
+ current->policy |= SCHED_YIELD;
+ schedule();
} while (!signalled());
return NULL;
@@ -1059,17 +1059,13 @@
rpciod_pid = current->pid;
up(&rpciod_running);
- exit_fs(current);
- exit_files(current);
- exit_mm(current);
+ daemonize();
spin_lock_irq(¤t->sigmask_lock);
siginitsetinv(¤t->blocked, sigmask(SIGKILL));
recalc_sigpending(current);
spin_unlock_irq(¤t->sigmask_lock);
- current->session = 1;
- current->pgrp = 1;
strcpy(current->comm, "rpciod");
dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid);
@@ -1118,8 +1114,8 @@
__rpc_schedule();
if (all_tasks) {
dprintk("rpciod_killall: waiting for tasks to exit\n");
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
+ current->policy |= SCHED_YIELD;
+ schedule();
}
}
@@ -1189,8 +1185,8 @@
* wait briefly before checking the process id.
*/
current->sigpending = 0;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
+ current->policy |= SCHED_YIELD;
+ schedule();
/*
* Display a message if we're going to wait longer.
*/
Index: stats.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/stats.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- stats.c 14 Jan 2001 17:14:00 -0000 1.1.1.1
+++ stats.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -202,3 +202,4 @@
rpc_proc_exit();
}
#endif
+MODULE_LICENSE("GPL");
Index: sunrpc_syms.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/sunrpc_syms.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- sunrpc_syms.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ sunrpc_syms.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -65,7 +65,7 @@
EXPORT_SYMBOL(rpcauth_lookupcred);
EXPORT_SYMBOL(rpcauth_bindcred);
EXPORT_SYMBOL(rpcauth_matchcred);
-EXPORT_SYMBOL(rpcauth_releasecred);
+EXPORT_SYMBOL(put_rpccred);
/* RPC server stuff */
EXPORT_SYMBOL(svc_create);
@@ -92,18 +92,11 @@
EXPORT_SYMBOL(xdr_encode_array);
EXPORT_SYMBOL(xdr_encode_string);
EXPORT_SYMBOL(xdr_decode_string);
+EXPORT_SYMBOL(xdr_decode_string_inplace);
EXPORT_SYMBOL(xdr_decode_netobj);
EXPORT_SYMBOL(xdr_encode_netobj);
-EXPORT_SYMBOL(xdr_zero);
-EXPORT_SYMBOL(xdr_one);
-EXPORT_SYMBOL(xdr_two);
EXPORT_SYMBOL(xdr_shift_iovec);
EXPORT_SYMBOL(xdr_zero_iovec);
-
-/* RPC errors */
-EXPORT_SYMBOL(rpc_success);
-EXPORT_SYMBOL(rpc_garbage_args);
-EXPORT_SYMBOL(rpc_system_err);
/* Debugging symbols */
#ifdef RPC_DEBUG
Index: svc.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/svc.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- svc.c 14 Jan 2001 17:14:09 -0000 1.1.1.1
+++ svc.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -31,7 +31,6 @@
{
struct svc_serv *serv;
- xdr_init();
#ifdef RPC_DEBUG
rpc_register_sysctl();
#endif
@@ -368,7 +367,9 @@
err_bad_prog:
#ifdef RPC_PARANOIA
- printk("svc: unknown program %d (me %d)\n", prog, progp->pg_prog);
+ if (prog != 100227 || progp->pg_prog != 100003)
+ printk("svc: unknown program %d (me %d)\n", prog, progp->pg_prog);
+ /* else it is just a Solaris client seeing if ACLs are supported */
#endif
serv->sv_stats->rpcbadfmt++;
svc_putlong(resp, rpc_prog_unavail);
Index: svcsock.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/svcsock.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- svcsock.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ svcsock.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -212,16 +212,20 @@
svc_sock_release(struct svc_rqst *rqstp)
{
struct svc_sock *svsk = rqstp->rq_sock;
+ struct svc_serv *serv = svsk->sk_server;
- if (!svsk)
- return;
svc_release_skb(rqstp);
rqstp->rq_sock = NULL;
+
+ spin_lock_bh(&serv->sv_lock);
if (!--(svsk->sk_inuse) && svsk->sk_dead) {
+ spin_unlock_bh(&serv->sv_lock);
dprintk("svc: releasing dead socket\n");
sock_release(svsk->sk_sock);
kfree(svsk);
}
+ else
+ spin_unlock_bh(&serv->sv_lock);
}
/*
@@ -377,10 +381,17 @@
dprintk("svc: recvfrom returned error %d\n", -err);
}
+ /* Sorry. */
+ if (skb_is_nonlinear(skb)) {
+ if (skb_linearize(skb, GFP_KERNEL) != 0) {
+ kfree_skb(skb);
+ svc_sock_received(svsk, 0);
+ return 0;
+ }
+ }
+
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
- unsigned int csum = skb->csum;
- csum = csum_partial(skb->h.raw, skb->len, csum);
- if ((unsigned short)csum_fold(csum)) {
+ if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
skb_free_datagram(svsk->sk_sk, skb);
svc_sock_received(svsk, 0);
return 0;
@@ -391,7 +402,7 @@
svsk->sk_data = 1;
len = skb->len - sizeof(struct udphdr);
- data = (u32 *) (skb->h.raw + sizeof(struct udphdr));
+ data = (u32 *) (skb->data + sizeof(struct udphdr));
rqstp->rq_skbuff = skb;
rqstp->rq_argbuf.base = data;
@@ -451,11 +462,11 @@
}
/*
- * A state change on a listening socket means there's a connection
- * pending.
+ * A data_ready event on a listening socket means there's a connection
+ * pending. Do not use state_change as a substitute for it.
*/
static void
-svc_tcp_state_change1(struct sock *sk)
+svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
{
struct svc_sock *svsk;
@@ -483,7 +494,7 @@
* A state change on a connected socket means it's dying or dead.
*/
static void
-svc_tcp_state_change2(struct sock *sk)
+svc_tcp_state_change(struct sock *sk)
{
struct svc_sock *svsk;
@@ -766,10 +777,10 @@
if (sk->state == TCP_LISTEN) {
dprintk("setting up TCP socket for listening\n");
- sk->state_change = svc_tcp_state_change1;
+ sk->data_ready = svc_tcp_listen_data_ready;
} else {
dprintk("setting up TCP socket for reading\n");
- sk->state_change = svc_tcp_state_change2;
+ sk->state_change = svc_tcp_state_change;
sk->data_ready = svc_tcp_data_ready;
svsk->sk_reclen = 0;
@@ -1034,14 +1045,15 @@
if (svsk->sk_qued)
rpc_remove_list(&serv->sv_sockets, svsk);
- spin_unlock_bh(&serv->sv_lock);
svsk->sk_dead = 1;
if (!svsk->sk_inuse) {
+ spin_unlock_bh(&serv->sv_lock);
sock_release(svsk->sk_sock);
kfree(svsk);
} else {
+ spin_unlock_bh(&serv->sv_lock);
printk(KERN_NOTICE "svc: server socket destroy delayed\n");
/* svsk->sk_server = NULL; */
}
Index: sysctl.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/sysctl.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- sysctl.c 14 Jan 2001 17:14:11 -0000 1.1.1.1
+++ sysctl.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -63,7 +63,7 @@
{
char tmpbuf[20], *p, c;
unsigned int value;
- int left, len;
+ size_t left, len;
if ((file->f_pos && !write) || !*lenp) {
*lenp = 0;
Index: xdr.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/xdr.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- xdr.c 14 Jan 2001 17:14:11 -0000 1.1.1.1
+++ xdr.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -14,41 +14,6 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
-u32 rpc_success, rpc_prog_unavail, rpc_prog_mismatch, rpc_proc_unavail,
- rpc_garbage_args, rpc_system_err;
-u32 rpc_auth_ok, rpc_autherr_badcred, rpc_autherr_rejectedcred,
- rpc_autherr_badverf, rpc_autherr_rejectedverf, rpc_autherr_tooweak;
-u32 xdr_zero, xdr_one, xdr_two;
-
-void
-xdr_init(void)
-{
- static int inited = 0;
-
- if (inited)
- return;
-
- xdr_zero = htonl(0);
- xdr_one = htonl(1);
- xdr_two = htonl(2);
-
- rpc_success = htonl(RPC_SUCCESS);
- rpc_prog_unavail = htonl(RPC_PROG_UNAVAIL);
- rpc_prog_mismatch = htonl(RPC_PROG_MISMATCH);
- rpc_proc_unavail = htonl(RPC_PROC_UNAVAIL);
- rpc_garbage_args = htonl(RPC_GARBAGE_ARGS);
- rpc_system_err = htonl(RPC_SYSTEM_ERR);
-
- rpc_auth_ok = htonl(RPC_AUTH_OK);
- rpc_autherr_badcred = htonl(RPC_AUTH_BADCRED);
- rpc_autherr_rejectedcred = htonl(RPC_AUTH_REJECTEDCRED);
- rpc_autherr_badverf = htonl(RPC_AUTH_BADVERF);
- rpc_autherr_rejectedverf = htonl(RPC_AUTH_REJECTEDVERF);
- rpc_autherr_tooweak = htonl(RPC_AUTH_TOOWEAK);
-
- inited = 1;
-}
-
/*
* XDR functions for basic NFS types
*/
@@ -121,6 +86,19 @@
*sp = string;
return p + XDR_QUADLEN(len);
}
+
+u32 *
+xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
+{
+ unsigned int len;
+
+ if ((len = ntohl(*p++)) > maxlen)
+ return NULL;
+ *lenp = len;
+ *sp = (char *) p;
+ return p + XDR_QUADLEN(len);
+}
+
/*
* Realign the iovec if the server missed out some reply elements
Index: xprt.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/net/sunrpc/xprt.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- xprt.c 25 Feb 2001 23:14:56 -0000 1.1.1.2
+++ xprt.c 9 Apr 2002 16:29:02 -0000 1.2
@@ -75,20 +75,11 @@
* Local variables
*/
-/* Spinlock for critical sections in the code. */
-spinlock_t xprt_sock_lock = SPIN_LOCK_UNLOCKED;
-spinlock_t xprt_lock = SPIN_LOCK_UNLOCKED;
-
#ifdef RPC_DEBUG
# undef RPC_DEBUG_DATA
# define RPCDBG_FACILITY RPCDBG_XPRT
#endif
-#ifndef MAX
-# define MAX(a, b) ((a) > (b)? (a) : (b))
-# define MIN(a, b) ((a) < (b)? (a) : (b))
-#endif
-
/*
* Local functions
*/
@@ -177,6 +168,44 @@
}
/*
+ * Serialize write access to sockets, in order to prevent different
+ * requests from interfering with each other.
+ * Also prevents TCP socket reconnections from colliding with writes.
+ */
+static int
+xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ int retval;
+ spin_lock_bh(&xprt->sock_lock);
+ if (!xprt->snd_task)
+ xprt->snd_task = task;
+ else if (xprt->snd_task != task) {
+ dprintk("RPC: %4d TCP write queue full (task %d)\n",
+ task->tk_pid, xprt->snd_task->tk_pid);
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+ rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+ }
+ retval = xprt->snd_task == task;
+ spin_unlock_bh(&xprt->sock_lock);
+ return retval;
+}
+
+/*
+ * Releases the socket for use by other requests.
+ */
+static void
+xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ spin_lock_bh(&xprt->sock_lock);
+ if (xprt->snd_task == task) {
+ xprt->snd_task = NULL;
+ rpc_wake_up_next(&xprt->sending);
+ }
+ spin_unlock_bh(&xprt->sock_lock);
+}
+
+/*
* Write data to socket.
*/
static inline int
@@ -290,7 +319,10 @@
if (xprt->nocong)
return;
- spin_lock_bh(&xprt_sock_lock);
+ /*
+ * Note: we're in a BH context
+ */
+ spin_lock(&xprt->xprt_lock);
cwnd = xprt->cwnd;
if (result >= 0) {
if (xprt->cong < cwnd || time_before(jiffies, xprt->congtime))
@@ -318,7 +350,7 @@
xprt->cwnd = cwnd;
out:
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock(&xprt->xprt_lock);
}
/*
@@ -399,6 +431,8 @@
/*
* Reconnect a broken TCP connection.
+ *
+ * Note: This cannot collide with the TCP reads, as both run from rpciod
*/
void
xprt_reconnect(struct rpc_task *task)
@@ -421,15 +455,10 @@
return;
}
- spin_lock(&xprt_lock);
- if (xprt->connecting) {
- task->tk_timeout = 0;
- rpc_sleep_on(&xprt->reconn, task, NULL, NULL);
- spin_unlock(&xprt_lock);
+ if (!xprt_lock_write(xprt, task))
return;
- }
- xprt->connecting = 1;
- spin_unlock(&xprt_lock);
+ if (xprt_connected(xprt))
+ goto out_write;
status = -ENOTCONN;
if (!inet) {
@@ -444,6 +473,7 @@
/* Reset TCP record info */
xprt->tcp_offset = 0;
+ xprt->tcp_reclen = 0;
xprt->tcp_copied = 0;
xprt->tcp_more = 0;
@@ -472,24 +502,22 @@
dprintk("RPC: %4d connect status %d connected %d\n",
task->tk_pid, status, xprt_connected(xprt));
- spin_lock_bh(&xprt_sock_lock);
+ spin_lock_bh(&xprt->sock_lock);
if (!xprt_connected(xprt)) {
task->tk_timeout = xprt->timeout.to_maxval;
- rpc_sleep_on(&xprt->reconn, task, xprt_reconn_status, NULL);
- spin_unlock_bh(&xprt_sock_lock);
+ rpc_sleep_on(&xprt->sending, task, xprt_reconn_status, NULL);
+ spin_unlock_bh(&xprt->sock_lock);
return;
}
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock_bh(&xprt->sock_lock);
}
defer:
- spin_lock(&xprt_lock);
- xprt->connecting = 0;
if (status < 0) {
rpc_delay(task, 5*HZ);
task->tk_status = -ENOTCONN;
}
- rpc_wake_up(&xprt->reconn);
- spin_unlock(&xprt_lock);
+ out_write:
+ xprt_release_write(xprt, task);
}
/*
@@ -504,10 +532,7 @@
dprintk("RPC: %4d xprt_reconn_timeout %d\n",
task->tk_pid, task->tk_status);
- spin_lock(&xprt_lock);
- xprt->connecting = 0;
- rpc_wake_up(&xprt->reconn);
- spin_unlock(&xprt_lock);
+ xprt_release_write(xprt, task);
}
/*
@@ -591,7 +616,7 @@
struct sk_buff *skb,
int copied)
{
- __u8 *pkt_data = skb->h.raw + sizeof(struct udphdr);
+ int offset = sizeof(struct udphdr);
__u8 *cur_ptr = iov->iov_base;
__kernel_size_t cur_len = iov->iov_len;
unsigned int csum = skb->csum;
@@ -599,18 +624,18 @@
int slack = skb->len - copied - sizeof(struct udphdr);
if (need_csum)
- csum = csum_partial(skb->h.raw, sizeof(struct udphdr), csum);
+ csum = csum_partial(skb->data, sizeof(struct udphdr), csum);
while (copied > 0) {
if (cur_len) {
int to_move = cur_len;
if (to_move > copied)
to_move = copied;
if (need_csum)
- csum = csum_partial_copy_nocheck(pkt_data, cur_ptr,
- to_move, csum);
+ csum = skb_copy_and_csum_bits(skb, offset, cur_ptr,
+ to_move, csum);
else
- memcpy(cur_ptr, pkt_data, to_move);
- pkt_data += to_move;
+ skb_copy_bits(skb, offset, cur_ptr, to_move);
+ offset += to_move;
copied -= to_move;
cur_ptr += to_move;
cur_len -= to_move;
@@ -623,7 +648,7 @@
}
if (need_csum) {
if (slack > 0)
- csum = csum_partial(pkt_data, slack, csum);
+ csum = skb_checksum(skb, offset, slack, csum);
if ((unsigned short)csum_fold(csum))
return -1;
}
@@ -704,10 +729,6 @@
struct iovec riov;
int want, result;
- if (xprt->tcp_offset >= xprt->tcp_reclen + sizeof(xprt->tcp_recm)) {
- xprt->tcp_offset = 0;
- xprt->tcp_reclen = 0;
- }
if (xprt->tcp_offset >= sizeof(xprt->tcp_recm))
goto done;
@@ -723,10 +744,6 @@
want -= result;
} while (want);
- /* Is this another fragment in the last message */
- if (!xprt->tcp_more)
- xprt->tcp_copied = 0; /* No, so we're reading a new message */
-
/* Get the record length and mask out the last fragment bit */
xprt->tcp_reclen = ntohl(xprt->tcp_recm);
xprt->tcp_more = (xprt->tcp_reclen & 0x80000000) ? 0 : 1;
@@ -749,7 +766,7 @@
if (xprt->tcp_copied >= sizeof(xprt->tcp_xid) || !avail)
goto done;
- want = MIN(sizeof(xprt->tcp_xid) - xprt->tcp_copied, avail);
+ want = min_t(unsigned int, sizeof(xprt->tcp_xid) - xprt->tcp_copied, avail);
do {
dprintk("RPC: reading xid (%d bytes)\n", want);
riov.iov_base = ((u8*) &xprt->tcp_xid) + xprt->tcp_copied;
@@ -776,7 +793,7 @@
if (req->rq_rlen <= xprt->tcp_copied || !avail)
goto done;
- want = MIN(req->rq_rlen - xprt->tcp_copied, avail);
+ want = min_t(unsigned int, req->rq_rlen - xprt->tcp_copied, avail);
do {
dprintk("RPC: %4d TCP receiving %d bytes\n",
req->rq_task->tk_pid, want);
@@ -810,7 +827,7 @@
int want, result = 0;
while (avail) {
- want = MIN(avail, sizeof(dummy));
+ want = min_t(unsigned int, avail, sizeof(dummy));
riov.iov_base = dummy;
riov.iov_len = want;
dprintk("RPC: TCP skipping %d bytes\n", want);
@@ -848,14 +865,15 @@
/* Read in a new fragment marker if necessary */
/* Can we ever really expect to get completely empty fragments? */
- if ((result = tcp_read_fraghdr(xprt)) <= 0)
+ if ((result = tcp_read_fraghdr(xprt)) < 0)
return result;
avail = result;
/* Read in the xid if necessary */
- if ((result = tcp_read_xid(xprt, avail)) <= 0)
+ if ((result = tcp_read_xid(xprt, avail)) < 0)
return result;
- avail = result;
+ if (!(avail = result))
+ goto out_ok;
/* Find and lock the request corresponding to this xid */
req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
@@ -873,9 +891,14 @@
if ((result = tcp_read_discard(xprt, avail)) < 0)
return result;
+ out_ok:
dprintk("RPC: tcp_input_record done (off %d reclen %d copied %d)\n",
xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_copied);
result = xprt->tcp_reclen;
+ xprt->tcp_reclen = 0;
+ xprt->tcp_offset = 0;
+ if (!xprt->tcp_more)
+ xprt->tcp_copied = 0;
return result;
}
@@ -890,11 +913,19 @@
rpciod_wake_up();
}
+int xprt_tcp_pending(void)
+{
+ int retval;
+
+ spin_lock_bh(&rpc_queue_lock);
+ retval = !list_empty(&rpc_xprt_pending);
+ spin_unlock_bh(&rpc_queue_lock);
+ return retval;
+}
+
static inline
void xprt_append_pending(struct rpc_xprt *xprt)
{
- if (!list_empty(&xprt->rx_pending))
- return;
spin_lock_bh(&rpc_queue_lock);
if (list_empty(&xprt->rx_pending)) {
list_add(&xprt->rx_pending, rpc_xprt_pending.prev);
@@ -1008,11 +1039,10 @@
case TCP_ESTABLISHED:
if (xprt_test_and_set_connected(xprt))
break;
- spin_lock_bh(&xprt_sock_lock);
+ spin_lock(&xprt->sock_lock);
if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->sending)
rpc_wake_up_task(xprt->snd_task);
- rpc_wake_up(&xprt->reconn);
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock(&xprt->sock_lock);
break;
case TCP_SYN_SENT:
case TCP_SYN_RECV:
@@ -1046,10 +1076,10 @@
return;
if (!xprt_test_and_set_wspace(xprt)) {
- spin_lock_bh(&xprt_sock_lock);
+ spin_lock(&xprt->sock_lock);
if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->sending)
rpc_wake_up_task(xprt->snd_task);
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock(&xprt->sock_lock);
}
if (test_bit(SOCK_NOSPACE, &sock->flags)) {
@@ -1072,14 +1102,14 @@
/* Wait until we have enough socket memory */
- if (sock_wspace(sk) < min(sk->sndbuf,XPRT_MIN_WRITE_SPACE))
+ if (sock_wspace(sk) < min_t(int, sk->sndbuf,XPRT_MIN_WRITE_SPACE))
return;
if (!xprt_test_and_set_wspace(xprt)) {
- spin_lock_bh(&xprt_sock_lock);
+ spin_lock(&xprt->sock_lock);
if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->sending)
rpc_wake_up_task(xprt->snd_task);
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock(&xprt->sock_lock);
}
if (sk->sleep && waitqueue_active(sk->sleep))
@@ -1105,51 +1135,6 @@
rpc_wake_up_task(task);
}
-
-/*
- * Serialize access to sockets, in order to prevent different
- * requests from interfering with each other.
- */
-static int
-xprt_down_transmit(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
- struct rpc_rqst *req = task->tk_rqstp;
-
- spin_lock(&xprt_lock);
- if (xprt->snd_task && xprt->snd_task != task) {
- dprintk("RPC: %4d TCP write queue full (task %d)\n",
- task->tk_pid, xprt->snd_task->tk_pid);
- task->tk_timeout = 0;
- task->tk_status = -EAGAIN;
- rpc_sleep_on(&xprt->sending, task, NULL, NULL);
- } else if (!xprt->snd_task) {
- xprt->snd_task = task;
-#ifdef RPC_PROFILE
- req->rq_xtime = jiffies;
-#endif
- req->rq_bytes_sent = 0;
- }
- spin_unlock(&xprt_lock);
- return xprt->snd_task == task;
-}
-
-/*
- * Releases the socket for use by other requests.
- */
-static inline void
-xprt_up_transmit(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
-
- if (xprt->snd_task && xprt->snd_task == task) {
- spin_lock(&xprt_lock);
- xprt->snd_task = NULL;
- rpc_wake_up_next(&xprt->sending);
- spin_unlock(&xprt_lock);
- }
-}
-
/*
* Place the actual RPC call.
* We have to copy the iovec because sendmsg fiddles with its contents.
@@ -1183,9 +1168,12 @@
*marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
}
- if (!xprt_down_transmit(task))
+ if (!xprt_lock_write(xprt, task))
return;
+#ifdef RPC_PROFILE
+ req->rq_xtime = jiffies;
+#endif
do_xprt_transmit(task);
}
@@ -1253,12 +1241,12 @@
switch (status) {
case -ENOMEM:
/* Protect against (udp|tcp)_write_space */
- spin_lock_bh(&xprt_sock_lock);
+ spin_lock_bh(&xprt->sock_lock);
if (!xprt_wspace(xprt)) {
task->tk_timeout = req->rq_timeout.to_current;
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
}
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock_bh(&xprt->sock_lock);
return;
case -EAGAIN:
/* Keep holding the socket if it is blocked */
@@ -1269,6 +1257,9 @@
if (!xprt->stream)
return;
default:
+ if (xprt->stream)
+ xprt_disconnect(xprt);
+ req->rq_bytes_sent = 0;
goto out_release;
}
@@ -1279,7 +1270,7 @@
rpc_add_timer(task, xprt_timer);
rpc_unlock_task(task);
out_release:
- xprt_up_transmit(task);
+ xprt_release_write(xprt, task);
}
/*
@@ -1314,7 +1305,7 @@
dprintk("RPC: %4d xprt_reserve cong = %ld cwnd = %ld\n",
task->tk_pid, xprt->cong, xprt->cwnd);
- spin_lock_bh(&xprt_sock_lock);
+ spin_lock_bh(&xprt->xprt_lock);
xprt_reserve_status(task);
if (task->tk_rqstp) {
task->tk_timeout = 0;
@@ -1325,7 +1316,7 @@
task->tk_status = -EAGAIN;
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
}
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock_bh(&xprt->xprt_lock);
dprintk("RPC: %4d xprt_reserve returns %d\n",
task->tk_pid, task->tk_status);
return task->tk_status;
@@ -1398,7 +1389,11 @@
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req;
- xprt_up_transmit(task);
+ if (xprt->snd_task == task) {
+ if (xprt->stream)
+ xprt_disconnect(xprt);
+ xprt_release_write(xprt, task);
+ }
if (!(req = task->tk_rqstp))
return;
task->tk_rqstp = NULL;
@@ -1406,13 +1401,7 @@
dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
- /* remove slot from queue of pending */
- if (task->tk_rpcwait) {
- printk("RPC: task of released request still queued!\n");
- rpc_remove_wait_queue(task);
- }
-
- spin_lock_bh(&xprt_sock_lock);
+ spin_lock_bh(&xprt->xprt_lock);
req->rq_next = xprt->free;
xprt->free = req;
@@ -1420,7 +1409,7 @@
xprt->cong -= RPC_CWNDSCALE;
xprt_clear_backlog(xprt);
- spin_unlock_bh(&xprt_sock_lock);
+ spin_unlock_bh(&xprt->xprt_lock);
}
/*
@@ -1477,6 +1466,8 @@
} else
xprt->cwnd = RPC_INITCWND;
xprt->congtime = jiffies;
+ spin_lock_init(&xprt->sock_lock);
+ spin_lock_init(&xprt->xprt_lock);
init_waitqueue_head(&xprt->cong_wait);
/* Set timeout parameters */
@@ -1490,7 +1481,6 @@
xprt->pending = RPC_INIT_WAITQ("xprt_pending");
xprt->sending = RPC_INIT_WAITQ("xprt_sending");
xprt->backlog = RPC_INIT_WAITQ("xprt_backlog");
- xprt->reconn = RPC_INIT_WAITQ("xprt_reconn");
/* initialize free list */
for (i = 0, req = xprt->slot; i < RPC_MAXREQS-1; i++, req++)
@@ -1626,7 +1616,6 @@
rpc_wake_up(&xprt->sending);
rpc_wake_up(&xprt->pending);
rpc_wake_up(&xprt->backlog);
- rpc_wake_up(&xprt->reconn);
if (waitqueue_active(&xprt->cong_wait))
wake_up(&xprt->cong_wait);
}
|