Merge branch 'nfs-for-2.6.35' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6
* 'nfs-for-2.6.35' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (78 commits) SUNRPC: Don't spam gssd with upcall requests when the kerberos key expired SUNRPC: Reorder the struct rpc_task fields SUNRPC: Remove the 'tk_magic' debugging field SUNRPC: Move the task->tk_bytes_sent and tk_rtt to struct rpc_rqst NFS: Don't call iput() in nfs_access_cache_shrinker NFS: Clean up nfs_access_zap_cache() NFS: Don't run nfs_access_cache_shrinker() when the mask is GFP_NOFS SUNRPC: Ensure rpcauth_prune_expired() respects the nr_to_scan parameter SUNRPC: Ensure memory shrinker doesn't waste time in rpcauth_prune_expired() SUNRPC: Dont run rpcauth_cache_shrinker() when gfp_mask is GFP_NOFS NFS: Read requests can use GFP_KERNEL. NFS: Clean up nfs_create_request() NFS: Don't use GFP_KERNEL in rpcsec_gss downcalls NFSv4: Don't use GFP_KERNEL allocations in state recovery SUNRPC: Fix xs_setup_bc_tcp() SUNRPC: Replace jiffies-based metrics with ktime-based metrics ktime: introduce ktime_to_ms() SUNRPC: RPC metrics and RTT estimator should use same RTT value NFS: Calldata for nfs4_renew_done() NFS: Squelch compiler warning in nfs_add_server_stats() ...
This commit is contained in:
commit
6a6be470c3
56 changed files with 3384 additions and 859 deletions
|
@ -934,7 +934,6 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
|
|||
}
|
||||
|
||||
fsinfo.fattr = fattr;
|
||||
nfs_fattr_init(fattr);
|
||||
error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo);
|
||||
if (error < 0)
|
||||
goto out_error;
|
||||
|
@ -1047,13 +1046,18 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
|
|||
struct nfs_fh *mntfh)
|
||||
{
|
||||
struct nfs_server *server;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fattr *fattr;
|
||||
int error;
|
||||
|
||||
server = nfs_alloc_server();
|
||||
if (!server)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
error = -ENOMEM;
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
goto error;
|
||||
|
||||
/* Get a client representation */
|
||||
error = nfs_init_server(server, data);
|
||||
if (error < 0)
|
||||
|
@ -1064,7 +1068,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
|
|||
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
|
||||
|
||||
/* Probe the root fh to retrieve its FSID */
|
||||
error = nfs_probe_fsinfo(server, mntfh, &fattr);
|
||||
error = nfs_probe_fsinfo(server, mntfh, fattr);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
if (server->nfs_client->rpc_ops->version == 3) {
|
||||
|
@ -1077,14 +1081,14 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
|
|||
server->namelen = NFS2_MAXNAMLEN;
|
||||
}
|
||||
|
||||
if (!(fattr.valid & NFS_ATTR_FATTR)) {
|
||||
error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
|
||||
if (!(fattr->valid & NFS_ATTR_FATTR)) {
|
||||
error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
|
||||
if (error < 0) {
|
||||
dprintk("nfs_create_server: getattr error = %d\n", -error);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid));
|
||||
memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
|
||||
|
||||
dprintk("Server FSID: %llx:%llx\n",
|
||||
(unsigned long long) server->fsid.major,
|
||||
|
@ -1096,9 +1100,11 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
|
|||
spin_unlock(&nfs_client_lock);
|
||||
|
||||
server->mount_time = jiffies;
|
||||
nfs_free_fattr(fattr);
|
||||
return server;
|
||||
|
||||
error:
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_server(server);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
@ -1340,7 +1346,7 @@ error:
|
|||
struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
|
||||
struct nfs_fh *mntfh)
|
||||
{
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fattr *fattr;
|
||||
struct nfs_server *server;
|
||||
int error;
|
||||
|
||||
|
@ -1350,6 +1356,11 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
|
|||
if (!server)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
error = -ENOMEM;
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
goto error;
|
||||
|
||||
/* set up the general RPC client */
|
||||
error = nfs4_init_server(server, data);
|
||||
if (error < 0)
|
||||
|
@ -1364,7 +1375,7 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
|
|||
goto error;
|
||||
|
||||
/* Probe the root fh to retrieve its FSID */
|
||||
error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
|
||||
error = nfs4_get_rootfh(server, mntfh);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -1375,7 +1386,7 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
|
|||
|
||||
nfs4_session_set_rwsize(server);
|
||||
|
||||
error = nfs_probe_fsinfo(server, mntfh, &fattr);
|
||||
error = nfs_probe_fsinfo(server, mntfh, fattr);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -1389,9 +1400,11 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
|
|||
|
||||
server->mount_time = jiffies;
|
||||
dprintk("<-- nfs4_create_server() = %p\n", server);
|
||||
nfs_free_fattr(fattr);
|
||||
return server;
|
||||
|
||||
error:
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_server(server);
|
||||
dprintk("<-- nfs4_create_server() = error %d\n", error);
|
||||
return ERR_PTR(error);
|
||||
|
@ -1405,7 +1418,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
{
|
||||
struct nfs_client *parent_client;
|
||||
struct nfs_server *server, *parent_server;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fattr *fattr;
|
||||
int error;
|
||||
|
||||
dprintk("--> nfs4_create_referral_server()\n");
|
||||
|
@ -1414,6 +1427,11 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
if (!server)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
error = -ENOMEM;
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
goto error;
|
||||
|
||||
parent_server = NFS_SB(data->sb);
|
||||
parent_client = parent_server->nfs_client;
|
||||
|
||||
|
@ -1443,12 +1461,12 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
|
||||
|
||||
/* Probe the root fh to retrieve its FSID and filehandle */
|
||||
error = nfs4_path_walk(server, mntfh, data->mnt_path);
|
||||
error = nfs4_get_rootfh(server, mntfh);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
/* probe the filesystem info for this server filesystem */
|
||||
error = nfs_probe_fsinfo(server, mntfh, &fattr);
|
||||
error = nfs_probe_fsinfo(server, mntfh, fattr);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -1466,10 +1484,12 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
|
||||
server->mount_time = jiffies;
|
||||
|
||||
nfs_free_fattr(fattr);
|
||||
dprintk("<-- nfs_create_referral_server() = %p\n", server);
|
||||
return server;
|
||||
|
||||
error:
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_server(server);
|
||||
dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
|
||||
return ERR_PTR(error);
|
||||
|
@ -1485,7 +1505,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
|
|||
struct nfs_fattr *fattr)
|
||||
{
|
||||
struct nfs_server *server;
|
||||
struct nfs_fattr fattr_fsinfo;
|
||||
struct nfs_fattr *fattr_fsinfo;
|
||||
int error;
|
||||
|
||||
dprintk("--> nfs_clone_server(,%llx:%llx,)\n",
|
||||
|
@ -1496,6 +1516,11 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
|
|||
if (!server)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
error = -ENOMEM;
|
||||
fattr_fsinfo = nfs_alloc_fattr();
|
||||
if (fattr_fsinfo == NULL)
|
||||
goto out_free_server;
|
||||
|
||||
/* Copy data from the source */
|
||||
server->nfs_client = source->nfs_client;
|
||||
atomic_inc(&server->nfs_client->cl_count);
|
||||
|
@ -1512,7 +1537,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
|
|||
nfs_init_server_aclclient(server);
|
||||
|
||||
/* probe the filesystem info for this server filesystem */
|
||||
error = nfs_probe_fsinfo(server, fh, &fattr_fsinfo);
|
||||
error = nfs_probe_fsinfo(server, fh, fattr_fsinfo);
|
||||
if (error < 0)
|
||||
goto out_free_server;
|
||||
|
||||
|
@ -1534,10 +1559,12 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
|
|||
|
||||
server->mount_time = jiffies;
|
||||
|
||||
nfs_free_fattr(fattr_fsinfo);
|
||||
dprintk("<-- nfs_clone_server() = %p\n", server);
|
||||
return server;
|
||||
|
||||
out_free_server:
|
||||
nfs_free_fattr(fattr_fsinfo);
|
||||
nfs_free_server(server);
|
||||
dprintk("<-- nfs_clone_server() = error %d\n", error);
|
||||
return ERR_PTR(error);
|
||||
|
|
|
@ -213,7 +213,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
|
|||
struct nfs_delegation *freeme = NULL;
|
||||
int status = 0;
|
||||
|
||||
delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
|
||||
delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
|
||||
if (delegation == NULL)
|
||||
return -ENOMEM;
|
||||
memcpy(delegation->stateid.data, res->delegation.data,
|
||||
|
|
141
fs/nfs/dir.c
141
fs/nfs/dir.c
|
@ -530,9 +530,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
nfs_readdir_descriptor_t my_desc,
|
||||
*desc = &my_desc;
|
||||
struct nfs_entry my_entry;
|
||||
struct nfs_fh fh;
|
||||
struct nfs_fattr fattr;
|
||||
long res;
|
||||
int res = -ENOMEM;
|
||||
|
||||
dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
|
||||
dentry->d_parent->d_name.name, dentry->d_name.name,
|
||||
|
@ -554,9 +552,11 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
|
||||
my_entry.cookie = my_entry.prev_cookie = 0;
|
||||
my_entry.eof = 0;
|
||||
my_entry.fh = &fh;
|
||||
my_entry.fattr = &fattr;
|
||||
nfs_fattr_init(&fattr);
|
||||
my_entry.fh = nfs_alloc_fhandle();
|
||||
my_entry.fattr = nfs_alloc_fattr();
|
||||
if (my_entry.fh == NULL || my_entry.fattr == NULL)
|
||||
goto out_alloc_failed;
|
||||
|
||||
desc->entry = &my_entry;
|
||||
|
||||
nfs_block_sillyrename(dentry);
|
||||
|
@ -598,7 +598,10 @@ out:
|
|||
nfs_unblock_sillyrename(dentry);
|
||||
if (res > 0)
|
||||
res = 0;
|
||||
dfprintk(FILE, "NFS: readdir(%s/%s) returns %ld\n",
|
||||
out_alloc_failed:
|
||||
nfs_free_fattr(my_entry.fattr);
|
||||
nfs_free_fhandle(my_entry.fh);
|
||||
dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
|
||||
dentry->d_parent->d_name.name, dentry->d_name.name,
|
||||
res);
|
||||
return res;
|
||||
|
@ -776,9 +779,9 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
|
|||
struct inode *dir;
|
||||
struct inode *inode;
|
||||
struct dentry *parent;
|
||||
struct nfs_fh *fhandle = NULL;
|
||||
struct nfs_fattr *fattr = NULL;
|
||||
int error;
|
||||
struct nfs_fh fhandle;
|
||||
struct nfs_fattr fattr;
|
||||
|
||||
parent = dget_parent(dentry);
|
||||
dir = parent->d_inode;
|
||||
|
@ -811,14 +814,22 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
|
|||
if (NFS_STALE(inode))
|
||||
goto out_bad;
|
||||
|
||||
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
|
||||
error = -ENOMEM;
|
||||
fhandle = nfs_alloc_fhandle();
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fhandle == NULL || fattr == NULL)
|
||||
goto out_error;
|
||||
|
||||
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
|
||||
if (error)
|
||||
goto out_bad;
|
||||
if (nfs_compare_fh(NFS_FH(inode), &fhandle))
|
||||
if (nfs_compare_fh(NFS_FH(inode), fhandle))
|
||||
goto out_bad;
|
||||
if ((error = nfs_refresh_inode(inode, &fattr)) != 0)
|
||||
if ((error = nfs_refresh_inode(inode, fattr)) != 0)
|
||||
goto out_bad;
|
||||
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
out_set_verifier:
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
out_valid:
|
||||
|
@ -842,11 +853,21 @@ out_zap_parent:
|
|||
shrink_dcache_parent(dentry);
|
||||
}
|
||||
d_drop(dentry);
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
dput(parent);
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
|
||||
__func__, dentry->d_parent->d_name.name,
|
||||
dentry->d_name.name);
|
||||
return 0;
|
||||
out_error:
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
dput(parent);
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
|
||||
__func__, dentry->d_parent->d_name.name,
|
||||
dentry->d_name.name, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -911,9 +932,9 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
|
|||
struct dentry *res;
|
||||
struct dentry *parent;
|
||||
struct inode *inode = NULL;
|
||||
struct nfs_fh *fhandle = NULL;
|
||||
struct nfs_fattr *fattr = NULL;
|
||||
int error;
|
||||
struct nfs_fh fhandle;
|
||||
struct nfs_fattr fattr;
|
||||
|
||||
dfprintk(VFS, "NFS: lookup(%s/%s)\n",
|
||||
dentry->d_parent->d_name.name, dentry->d_name.name);
|
||||
|
@ -923,7 +944,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
|
|||
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
|
||||
goto out;
|
||||
|
||||
res = ERR_PTR(-ENOMEM);
|
||||
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
|
||||
|
||||
/*
|
||||
|
@ -936,17 +956,23 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
|
|||
goto out;
|
||||
}
|
||||
|
||||
res = ERR_PTR(-ENOMEM);
|
||||
fhandle = nfs_alloc_fhandle();
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fhandle == NULL || fattr == NULL)
|
||||
goto out;
|
||||
|
||||
parent = dentry->d_parent;
|
||||
/* Protect against concurrent sillydeletes */
|
||||
nfs_block_sillyrename(parent);
|
||||
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
|
||||
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
|
||||
if (error == -ENOENT)
|
||||
goto no_entry;
|
||||
if (error < 0) {
|
||||
res = ERR_PTR(error);
|
||||
goto out_unblock_sillyrename;
|
||||
}
|
||||
inode = nfs_fhget(dentry->d_sb, &fhandle, &fattr);
|
||||
inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
|
||||
res = (struct dentry *)inode;
|
||||
if (IS_ERR(res))
|
||||
goto out_unblock_sillyrename;
|
||||
|
@ -962,6 +988,8 @@ no_entry:
|
|||
out_unblock_sillyrename:
|
||||
nfs_unblock_sillyrename(parent);
|
||||
out:
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -1669,28 +1697,33 @@ static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
|||
smp_mb__after_atomic_dec();
|
||||
}
|
||||
|
||||
static void nfs_access_free_list(struct list_head *head)
|
||||
{
|
||||
struct nfs_access_entry *cache;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
cache = list_entry(head->next, struct nfs_access_entry, lru);
|
||||
list_del(&cache->lru);
|
||||
nfs_access_free_entry(cache);
|
||||
}
|
||||
}
|
||||
|
||||
int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
|
||||
{
|
||||
LIST_HEAD(head);
|
||||
struct nfs_inode *nfsi;
|
||||
struct nfs_access_entry *cache;
|
||||
|
||||
restart:
|
||||
if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
|
||||
return (nr_to_scan == 0) ? 0 : -1;
|
||||
|
||||
spin_lock(&nfs_access_lru_lock);
|
||||
list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) {
|
||||
struct rw_semaphore *s_umount;
|
||||
struct inode *inode;
|
||||
|
||||
if (nr_to_scan-- == 0)
|
||||
break;
|
||||
s_umount = &nfsi->vfs_inode.i_sb->s_umount;
|
||||
if (!down_read_trylock(s_umount))
|
||||
continue;
|
||||
inode = igrab(&nfsi->vfs_inode);
|
||||
if (inode == NULL) {
|
||||
up_read(s_umount);
|
||||
continue;
|
||||
}
|
||||
inode = &nfsi->vfs_inode;
|
||||
spin_lock(&inode->i_lock);
|
||||
if (list_empty(&nfsi->access_cache_entry_lru))
|
||||
goto remove_lru_entry;
|
||||
|
@ -1704,61 +1737,47 @@ restart:
|
|||
else {
|
||||
remove_lru_entry:
|
||||
list_del_init(&nfsi->access_cache_inode_lru);
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
iput(inode);
|
||||
up_read(s_umount);
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
while (!list_empty(&head)) {
|
||||
cache = list_entry(head.next, struct nfs_access_entry, lru);
|
||||
list_del(&cache->lru);
|
||||
nfs_access_free_entry(cache);
|
||||
}
|
||||
nfs_access_free_list(&head);
|
||||
return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure;
|
||||
}
|
||||
|
||||
static void __nfs_access_zap_cache(struct inode *inode)
|
||||
static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct rb_root *root_node = &nfsi->access_cache;
|
||||
struct rb_node *n, *dispose = NULL;
|
||||
struct rb_node *n;
|
||||
struct nfs_access_entry *entry;
|
||||
|
||||
/* Unhook entries from the cache */
|
||||
while ((n = rb_first(root_node)) != NULL) {
|
||||
entry = rb_entry(n, struct nfs_access_entry, rb_node);
|
||||
rb_erase(n, root_node);
|
||||
list_del(&entry->lru);
|
||||
n->rb_left = dispose;
|
||||
dispose = n;
|
||||
list_move(&entry->lru, head);
|
||||
}
|
||||
nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
/* Now kill them all! */
|
||||
while (dispose != NULL) {
|
||||
n = dispose;
|
||||
dispose = n->rb_left;
|
||||
nfs_access_free_entry(rb_entry(n, struct nfs_access_entry, rb_node));
|
||||
}
|
||||
}
|
||||
|
||||
void nfs_access_zap_cache(struct inode *inode)
|
||||
{
|
||||
LIST_HEAD(head);
|
||||
|
||||
if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0)
|
||||
return;
|
||||
/* Remove from global LRU init */
|
||||
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
||||
spin_lock(&nfs_access_lru_lock);
|
||||
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
|
||||
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
/* This will release the spinlock */
|
||||
__nfs_access_zap_cache(inode);
|
||||
__nfs_access_zap_cache(NFS_I(inode), &head);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
nfs_access_free_list(&head);
|
||||
}
|
||||
|
||||
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred)
|
||||
|
@ -1809,8 +1828,8 @@ out_stale:
|
|||
nfs_access_free_entry(cache);
|
||||
return -ENOENT;
|
||||
out_zap:
|
||||
/* This will release the spinlock */
|
||||
__nfs_access_zap_cache(inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
nfs_access_zap_cache(inode);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
|
@ -1865,9 +1884,11 @@ static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *s
|
|||
smp_mb__after_atomic_inc();
|
||||
|
||||
/* Add inode to global LRU list */
|
||||
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
||||
if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
||||
spin_lock(&nfs_access_lru_lock);
|
||||
list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list);
|
||||
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
|
||||
list_add_tail(&NFS_I(inode)->access_cache_inode_lru,
|
||||
&nfs_access_lru_list);
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -161,13 +161,16 @@ static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
|
|||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
if (server->flags & NFS_MOUNT_NOAC)
|
||||
goto force_reval;
|
||||
if (nfs_have_delegated_attributes(inode))
|
||||
goto out_noreval;
|
||||
|
||||
if (filp->f_flags & O_DIRECT)
|
||||
goto force_reval;
|
||||
if (nfsi->npages != 0)
|
||||
return 0;
|
||||
if (!(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))
|
||||
if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
|
||||
goto force_reval;
|
||||
if (nfs_attribute_timeout(inode))
|
||||
goto force_reval;
|
||||
out_noreval:
|
||||
return 0;
|
||||
force_reval:
|
||||
return __nfs_revalidate_inode(server, inode);
|
||||
|
|
|
@ -467,7 +467,8 @@ int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
|
|||
struct list_head *pages,
|
||||
unsigned *nr_pages)
|
||||
{
|
||||
int ret, npages = *nr_pages;
|
||||
unsigned npages = *nr_pages;
|
||||
int ret;
|
||||
|
||||
dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
|
||||
NFS_I(inode)->fscache, npages, inode);
|
||||
|
|
189
fs/nfs/getroot.c
189
fs/nfs/getroot.c
|
@ -78,159 +78,94 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
|
|||
{
|
||||
struct nfs_server *server = NFS_SB(sb);
|
||||
struct nfs_fsinfo fsinfo;
|
||||
struct nfs_fattr fattr;
|
||||
struct dentry *mntroot;
|
||||
struct dentry *ret;
|
||||
struct inode *inode;
|
||||
int error;
|
||||
|
||||
/* get the actual root for this mount */
|
||||
fsinfo.fattr = &fattr;
|
||||
fsinfo.fattr = nfs_alloc_fattr();
|
||||
if (fsinfo.fattr == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
error = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
|
||||
if (error < 0) {
|
||||
dprintk("nfs_get_root: getattr error = %d\n", -error);
|
||||
return ERR_PTR(error);
|
||||
ret = ERR_PTR(error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
|
||||
if (IS_ERR(inode)) {
|
||||
dprintk("nfs_get_root: get root inode failed\n");
|
||||
return ERR_CAST(inode);
|
||||
ret = ERR_CAST(inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = nfs_superblock_set_dummy_root(sb, inode);
|
||||
if (error != 0)
|
||||
return ERR_PTR(error);
|
||||
if (error != 0) {
|
||||
ret = ERR_PTR(error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* root dentries normally start off anonymous and get spliced in later
|
||||
* if the dentry tree reaches them; however if the dentry already
|
||||
* exists, we'll pick it up at this point and use it as the root
|
||||
*/
|
||||
mntroot = d_obtain_alias(inode);
|
||||
if (IS_ERR(mntroot)) {
|
||||
ret = d_obtain_alias(inode);
|
||||
if (IS_ERR(ret)) {
|
||||
dprintk("nfs_get_root: get root dentry failed\n");
|
||||
return mntroot;
|
||||
goto out;
|
||||
}
|
||||
|
||||
security_d_instantiate(mntroot, inode);
|
||||
security_d_instantiate(ret, inode);
|
||||
|
||||
if (!mntroot->d_op)
|
||||
mntroot->d_op = server->nfs_client->rpc_ops->dentry_ops;
|
||||
|
||||
return mntroot;
|
||||
if (ret->d_op == NULL)
|
||||
ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
|
||||
out:
|
||||
nfs_free_fattr(fsinfo.fattr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
|
||||
/*
|
||||
* Do a simple pathwalk from the root FH of the server to the nominated target
|
||||
* of the mountpoint
|
||||
* - give error on symlinks
|
||||
* - give error on ".." occurring in the path
|
||||
* - follow traversals
|
||||
*/
|
||||
int nfs4_path_walk(struct nfs_server *server,
|
||||
struct nfs_fh *mntfh,
|
||||
const char *path)
|
||||
int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
|
||||
{
|
||||
struct nfs_fsinfo fsinfo;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fh lastfh;
|
||||
struct qstr name;
|
||||
int ret;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
dprintk("--> nfs4_path_walk(,,%s)\n", path);
|
||||
dprintk("--> nfs4_get_rootfh()\n");
|
||||
|
||||
fsinfo.fattr = &fattr;
|
||||
nfs_fattr_init(&fattr);
|
||||
|
||||
/* Eat leading slashes */
|
||||
while (*path == '/')
|
||||
path++;
|
||||
fsinfo.fattr = nfs_alloc_fattr();
|
||||
if (fsinfo.fattr == NULL)
|
||||
goto out;
|
||||
|
||||
/* Start by getting the root filehandle from the server */
|
||||
ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
|
||||
if (ret < 0) {
|
||||
dprintk("nfs4_get_root: getroot error = %d\n", -ret);
|
||||
return ret;
|
||||
dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!S_ISDIR(fattr.mode)) {
|
||||
printk(KERN_ERR "nfs4_get_root:"
|
||||
if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_MODE)
|
||||
|| !S_ISDIR(fsinfo.fattr->mode)) {
|
||||
printk(KERN_ERR "nfs4_get_rootfh:"
|
||||
" getroot encountered non-directory\n");
|
||||
return -ENOTDIR;
|
||||
ret = -ENOTDIR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* FIXME: It is quite valid for the server to return a referral here */
|
||||
if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
|
||||
printk(KERN_ERR "nfs4_get_root:"
|
||||
if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
|
||||
printk(KERN_ERR "nfs4_get_rootfh:"
|
||||
" getroot obtained referral\n");
|
||||
return -EREMOTE;
|
||||
ret = -EREMOTE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
next_component:
|
||||
dprintk("Next: %s\n", path);
|
||||
|
||||
/* extract the next bit of the path */
|
||||
if (!*path)
|
||||
goto path_walk_complete;
|
||||
|
||||
name.name = path;
|
||||
while (*path && *path != '/')
|
||||
path++;
|
||||
name.len = path - (const char *) name.name;
|
||||
|
||||
if (name.len > NFS4_MAXNAMLEN)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
eat_dot_dir:
|
||||
while (*path == '/')
|
||||
path++;
|
||||
|
||||
if (path[0] == '.' && (path[1] == '/' || !path[1])) {
|
||||
path += 2;
|
||||
goto eat_dot_dir;
|
||||
}
|
||||
|
||||
/* FIXME: Why shouldn't the user be able to use ".." in the path? */
|
||||
if (path[0] == '.' && path[1] == '.' && (path[2] == '/' || !path[2])
|
||||
) {
|
||||
printk(KERN_ERR "nfs4_get_root:"
|
||||
" Mount path contains reference to \"..\"\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* lookup the next FH in the sequence */
|
||||
memcpy(&lastfh, mntfh, sizeof(lastfh));
|
||||
|
||||
dprintk("LookupFH: %*.*s [%s]\n", name.len, name.len, name.name, path);
|
||||
|
||||
ret = server->nfs_client->rpc_ops->lookupfh(server, &lastfh, &name,
|
||||
mntfh, &fattr);
|
||||
if (ret < 0) {
|
||||
dprintk("nfs4_get_root: getroot error = %d\n", -ret);
|
||||
memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
|
||||
out:
|
||||
nfs_free_fattr(fsinfo.fattr);
|
||||
dprintk("<-- nfs4_get_rootfh() = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!S_ISDIR(fattr.mode)) {
|
||||
printk(KERN_ERR "nfs4_get_root:"
|
||||
" lookupfh encountered non-directory\n");
|
||||
return -ENOTDIR;
|
||||
}
|
||||
|
||||
/* FIXME: Referrals are quite valid here too */
|
||||
if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
|
||||
printk(KERN_ERR "nfs4_get_root:"
|
||||
" lookupfh obtained referral\n");
|
||||
return -EREMOTE;
|
||||
}
|
||||
|
||||
goto next_component;
|
||||
|
||||
path_walk_complete:
|
||||
memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid));
|
||||
dprintk("<-- nfs4_path_walk() = 0\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -239,8 +174,8 @@ path_walk_complete:
|
|||
struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
|
||||
{
|
||||
struct nfs_server *server = NFS_SB(sb);
|
||||
struct nfs_fattr fattr;
|
||||
struct dentry *mntroot;
|
||||
struct nfs_fattr *fattr = NULL;
|
||||
struct dentry *ret;
|
||||
struct inode *inode;
|
||||
int error;
|
||||
|
||||
|
@ -254,40 +189,50 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
|
|||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
return ERR_PTR(-ENOMEM);;
|
||||
|
||||
/* get the actual root for this mount */
|
||||
error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
|
||||
error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
|
||||
if (error < 0) {
|
||||
dprintk("nfs_get_root: getattr error = %d\n", -error);
|
||||
return ERR_PTR(error);
|
||||
ret = ERR_PTR(error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
inode = nfs_fhget(sb, mntfh, &fattr);
|
||||
inode = nfs_fhget(sb, mntfh, fattr);
|
||||
if (IS_ERR(inode)) {
|
||||
dprintk("nfs_get_root: get root inode failed\n");
|
||||
return ERR_CAST(inode);
|
||||
ret = ERR_CAST(inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = nfs_superblock_set_dummy_root(sb, inode);
|
||||
if (error != 0)
|
||||
return ERR_PTR(error);
|
||||
if (error != 0) {
|
||||
ret = ERR_PTR(error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* root dentries normally start off anonymous and get spliced in later
|
||||
* if the dentry tree reaches them; however if the dentry already
|
||||
* exists, we'll pick it up at this point and use it as the root
|
||||
*/
|
||||
mntroot = d_obtain_alias(inode);
|
||||
if (IS_ERR(mntroot)) {
|
||||
ret = d_obtain_alias(inode);
|
||||
if (IS_ERR(ret)) {
|
||||
dprintk("nfs_get_root: get root dentry failed\n");
|
||||
return mntroot;
|
||||
goto out;
|
||||
}
|
||||
|
||||
security_d_instantiate(mntroot, inode);
|
||||
security_d_instantiate(ret, inode);
|
||||
|
||||
if (!mntroot->d_op)
|
||||
mntroot->d_op = server->nfs_client->rpc_ops->dentry_ops;
|
||||
if (ret->d_op == NULL)
|
||||
ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
|
||||
|
||||
out:
|
||||
nfs_free_fattr(fattr);
|
||||
dprintk("<-- nfs4_get_root()\n");
|
||||
return mntroot;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NFS_V4 */
|
||||
|
|
|
@ -393,8 +393,8 @@ int
|
|||
nfs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct nfs_fattr fattr;
|
||||
int error;
|
||||
struct nfs_fattr *fattr;
|
||||
int error = -ENOMEM;
|
||||
|
||||
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
|
||||
|
||||
|
@ -417,14 +417,20 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
filemap_write_and_wait(inode->i_mapping);
|
||||
nfs_wb_all(inode);
|
||||
}
|
||||
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
goto out;
|
||||
/*
|
||||
* Return any delegations if we're going to change ACLs
|
||||
*/
|
||||
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
|
||||
nfs_inode_return_delegation(inode);
|
||||
error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
|
||||
error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
|
||||
if (error == 0)
|
||||
nfs_refresh_inode(inode, &fattr);
|
||||
nfs_refresh_inode(inode, fattr);
|
||||
nfs_free_fattr(fattr);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -682,7 +688,7 @@ int
|
|||
__nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
||||
{
|
||||
int status = -ESTALE;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fattr *fattr = NULL;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
|
||||
|
@ -693,8 +699,13 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
if (NFS_STALE(inode))
|
||||
goto out;
|
||||
|
||||
status = -ENOMEM;
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
goto out;
|
||||
|
||||
nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
|
||||
status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), &fattr);
|
||||
status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr);
|
||||
if (status != 0) {
|
||||
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n",
|
||||
inode->i_sb->s_id,
|
||||
|
@ -707,7 +718,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
goto out;
|
||||
}
|
||||
|
||||
status = nfs_refresh_inode(inode, &fattr);
|
||||
status = nfs_refresh_inode(inode, fattr);
|
||||
if (status) {
|
||||
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
|
||||
inode->i_sb->s_id,
|
||||
|
@ -723,6 +734,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
(long long)NFS_FILEID(inode));
|
||||
|
||||
out:
|
||||
nfs_free_fattr(fattr);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -730,9 +742,14 @@ int nfs_attribute_timeout(struct inode *inode)
|
|||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
|
||||
}
|
||||
|
||||
static int nfs_attribute_cache_expired(struct inode *inode)
|
||||
{
|
||||
if (nfs_have_delegated_attributes(inode))
|
||||
return 0;
|
||||
return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
|
||||
return nfs_attribute_timeout(inode);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -745,7 +762,7 @@ int nfs_attribute_timeout(struct inode *inode)
|
|||
int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
||||
{
|
||||
if (!(NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATTR)
|
||||
&& !nfs_attribute_timeout(inode))
|
||||
&& !nfs_attribute_cache_expired(inode))
|
||||
return NFS_STALE(inode) ? -ESTALE : 0;
|
||||
return __nfs_revalidate_inode(server, inode);
|
||||
}
|
||||
|
@ -782,7 +799,8 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
|
|||
int ret = 0;
|
||||
|
||||
if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
|
||||
|| nfs_attribute_timeout(inode) || NFS_STALE(inode)) {
|
||||
|| nfs_attribute_cache_expired(inode)
|
||||
|| NFS_STALE(inode)) {
|
||||
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -916,6 +934,26 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
|
|||
fattr->gencount = nfs_inc_attr_generation_counter();
|
||||
}
|
||||
|
||||
struct nfs_fattr *nfs_alloc_fattr(void)
|
||||
{
|
||||
struct nfs_fattr *fattr;
|
||||
|
||||
fattr = kmalloc(sizeof(*fattr), GFP_NOFS);
|
||||
if (fattr != NULL)
|
||||
nfs_fattr_init(fattr);
|
||||
return fattr;
|
||||
}
|
||||
|
||||
struct nfs_fh *nfs_alloc_fhandle(void)
|
||||
{
|
||||
struct nfs_fh *fh;
|
||||
|
||||
fh = kmalloc(sizeof(struct nfs_fh), GFP_NOFS);
|
||||
if (fh != NULL)
|
||||
fh->size = 0;
|
||||
return fh;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_inode_attrs_need_update - check if the inode attributes need updating
|
||||
* @inode - pointer to inode
|
||||
|
|
|
@ -244,9 +244,7 @@ extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *);
|
|||
#ifdef CONFIG_NFS_V4
|
||||
extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *);
|
||||
|
||||
extern int nfs4_path_walk(struct nfs_server *server,
|
||||
struct nfs_fh *mntfh,
|
||||
const char *path);
|
||||
extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
|
||||
#endif
|
||||
|
||||
/* read.c */
|
||||
|
|
|
@ -36,14 +36,14 @@ static inline void nfs_inc_stats(const struct inode *inode,
|
|||
|
||||
static inline void nfs_add_server_stats(const struct nfs_server *server,
|
||||
enum nfs_stat_bytecounters stat,
|
||||
unsigned long addend)
|
||||
long addend)
|
||||
{
|
||||
this_cpu_add(server->io_stats->bytes[stat], addend);
|
||||
}
|
||||
|
||||
static inline void nfs_add_stats(const struct inode *inode,
|
||||
enum nfs_stat_bytecounters stat,
|
||||
unsigned long addend)
|
||||
long addend)
|
||||
{
|
||||
nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ static inline void nfs_add_stats(const struct inode *inode,
|
|||
#ifdef CONFIG_NFS_FSCACHE
|
||||
static inline void nfs_add_fscache_stats(struct inode *inode,
|
||||
enum nfs_stat_fscachecounters stat,
|
||||
unsigned long addend)
|
||||
long addend)
|
||||
{
|
||||
this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
|
||||
}
|
||||
|
|
|
@ -105,8 +105,8 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
|
|||
struct vfsmount *mnt;
|
||||
struct nfs_server *server = NFS_SERVER(dentry->d_inode);
|
||||
struct dentry *parent;
|
||||
struct nfs_fh fh;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fh *fh = NULL;
|
||||
struct nfs_fattr *fattr = NULL;
|
||||
int err;
|
||||
|
||||
dprintk("--> nfs_follow_mountpoint()\n");
|
||||
|
@ -115,6 +115,12 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
|
|||
if (IS_ROOT(dentry))
|
||||
goto out_err;
|
||||
|
||||
err = -ENOMEM;
|
||||
fh = nfs_alloc_fhandle();
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fh == NULL || fattr == NULL)
|
||||
goto out_err;
|
||||
|
||||
dprintk("%s: enter\n", __func__);
|
||||
dput(nd->path.dentry);
|
||||
nd->path.dentry = dget(dentry);
|
||||
|
@ -123,16 +129,16 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
|
|||
parent = dget_parent(nd->path.dentry);
|
||||
err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
|
||||
&nd->path.dentry->d_name,
|
||||
&fh, &fattr);
|
||||
fh, fattr);
|
||||
dput(parent);
|
||||
if (err != 0)
|
||||
goto out_err;
|
||||
|
||||
if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
|
||||
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
|
||||
mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
|
||||
else
|
||||
mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
|
||||
&fattr);
|
||||
mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, fh,
|
||||
fattr);
|
||||
err = PTR_ERR(mnt);
|
||||
if (IS_ERR(mnt))
|
||||
goto out_err;
|
||||
|
@ -151,6 +157,8 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
|
|||
nd->path.dentry = dget(mnt->mnt_root);
|
||||
schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
|
||||
out:
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fh);
|
||||
dprintk("%s: done, returned %d\n", __func__, err);
|
||||
|
||||
dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
|
||||
|
|
|
@ -185,7 +185,6 @@ static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl,
|
|||
struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs_fattr fattr;
|
||||
struct page *pages[NFSACL_MAXPAGES] = { };
|
||||
struct nfs3_getaclargs args = {
|
||||
.fh = NFS_FH(inode),
|
||||
|
@ -193,7 +192,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
|
|||
.pages = pages,
|
||||
};
|
||||
struct nfs3_getaclres res = {
|
||||
.fattr = &fattr,
|
||||
0
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = &args,
|
||||
|
@ -228,7 +227,10 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
|
|||
|
||||
dprintk("NFS call getacl\n");
|
||||
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_GETACL];
|
||||
nfs_fattr_init(&fattr);
|
||||
res.fattr = nfs_alloc_fattr();
|
||||
if (res.fattr == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
status = rpc_call_sync(server->client_acl, &msg, 0);
|
||||
dprintk("NFS reply getacl: %d\n", status);
|
||||
|
||||
|
@ -238,7 +240,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
|
|||
|
||||
switch (status) {
|
||||
case 0:
|
||||
status = nfs_refresh_inode(inode, &fattr);
|
||||
status = nfs_refresh_inode(inode, res.fattr);
|
||||
break;
|
||||
case -EPFNOSUPPORT:
|
||||
case -EPROTONOSUPPORT:
|
||||
|
@ -278,6 +280,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
|
|||
getout:
|
||||
posix_acl_release(res.acl_access);
|
||||
posix_acl_release(res.acl_default);
|
||||
nfs_free_fattr(res.fattr);
|
||||
|
||||
if (status != 0) {
|
||||
posix_acl_release(acl);
|
||||
|
@ -290,7 +293,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
|
|||
struct posix_acl *dfacl)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fattr *fattr;
|
||||
struct page *pages[NFSACL_MAXPAGES];
|
||||
struct nfs3_setaclargs args = {
|
||||
.inode = inode,
|
||||
|
@ -335,8 +338,13 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
|
|||
}
|
||||
|
||||
dprintk("NFS call setacl\n");
|
||||
status = -ENOMEM;
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
goto out_freepages;
|
||||
|
||||
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_SETACL];
|
||||
nfs_fattr_init(&fattr);
|
||||
msg.rpc_resp = fattr;
|
||||
status = rpc_call_sync(server->client_acl, &msg, 0);
|
||||
nfs_access_zap_cache(inode);
|
||||
nfs_zap_acl_cache(inode);
|
||||
|
@ -344,7 +352,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
|
|||
|
||||
switch (status) {
|
||||
case 0:
|
||||
status = nfs_refresh_inode(inode, &fattr);
|
||||
status = nfs_refresh_inode(inode, fattr);
|
||||
nfs3_cache_acls(inode, acl, dfacl);
|
||||
break;
|
||||
case -EPFNOSUPPORT:
|
||||
|
@ -355,6 +363,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
|
|||
case -ENOTSUPP:
|
||||
status = -EOPNOTSUPP;
|
||||
}
|
||||
nfs_free_fattr(fattr);
|
||||
out_freepages:
|
||||
while (args.npages != 0) {
|
||||
args.npages--;
|
||||
|
|
|
@ -144,14 +144,12 @@ static int
|
|||
nfs3_proc_lookup(struct inode *dir, struct qstr *name,
|
||||
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
||||
{
|
||||
struct nfs_fattr dir_attr;
|
||||
struct nfs3_diropargs arg = {
|
||||
.fh = NFS_FH(dir),
|
||||
.name = name->name,
|
||||
.len = name->len
|
||||
};
|
||||
struct nfs3_diropres res = {
|
||||
.dir_attr = &dir_attr,
|
||||
.fh = fhandle,
|
||||
.fattr = fattr
|
||||
};
|
||||
|
@ -163,29 +161,30 @@ nfs3_proc_lookup(struct inode *dir, struct qstr *name,
|
|||
int status;
|
||||
|
||||
dprintk("NFS call lookup %s\n", name->name);
|
||||
nfs_fattr_init(&dir_attr);
|
||||
res.dir_attr = nfs_alloc_fattr();
|
||||
if (res.dir_attr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
nfs_fattr_init(fattr);
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
nfs_refresh_inode(dir, &dir_attr);
|
||||
nfs_refresh_inode(dir, res.dir_attr);
|
||||
if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR)) {
|
||||
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
|
||||
msg.rpc_argp = fhandle;
|
||||
msg.rpc_resp = fattr;
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
}
|
||||
nfs_free_fattr(res.dir_attr);
|
||||
dprintk("NFS reply lookup: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
||||
{
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs3_accessargs arg = {
|
||||
.fh = NFS_FH(inode),
|
||||
};
|
||||
struct nfs3_accessres res = {
|
||||
.fattr = &fattr,
|
||||
};
|
||||
struct nfs3_accessres res;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS],
|
||||
.rpc_argp = &arg,
|
||||
|
@ -193,7 +192,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
|||
.rpc_cred = entry->cred,
|
||||
};
|
||||
int mode = entry->mask;
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call access\n");
|
||||
|
||||
|
@ -210,9 +209,13 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
|||
if (mode & MAY_EXEC)
|
||||
arg.access |= NFS3_ACCESS_EXECUTE;
|
||||
}
|
||||
nfs_fattr_init(&fattr);
|
||||
|
||||
res.fattr = nfs_alloc_fattr();
|
||||
if (res.fattr == NULL)
|
||||
goto out;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
|
||||
nfs_refresh_inode(inode, &fattr);
|
||||
nfs_refresh_inode(inode, res.fattr);
|
||||
if (status == 0) {
|
||||
entry->mask = 0;
|
||||
if (res.access & NFS3_ACCESS_READ)
|
||||
|
@ -222,6 +225,8 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
|||
if (res.access & (NFS3_ACCESS_LOOKUP|NFS3_ACCESS_EXECUTE))
|
||||
entry->mask |= MAY_EXEC;
|
||||
}
|
||||
nfs_free_fattr(res.fattr);
|
||||
out:
|
||||
dprintk("NFS reply access: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -229,7 +234,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
|||
static int nfs3_proc_readlink(struct inode *inode, struct page *page,
|
||||
unsigned int pgbase, unsigned int pglen)
|
||||
{
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fattr *fattr;
|
||||
struct nfs3_readlinkargs args = {
|
||||
.fh = NFS_FH(inode),
|
||||
.pgbase = pgbase,
|
||||
|
@ -239,14 +244,19 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page,
|
|||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_READLINK],
|
||||
.rpc_argp = &args,
|
||||
.rpc_resp = &fattr,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call readlink\n");
|
||||
nfs_fattr_init(&fattr);
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fattr == NULL)
|
||||
goto out;
|
||||
msg.rpc_resp = fattr;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
|
||||
nfs_refresh_inode(inode, &fattr);
|
||||
nfs_refresh_inode(inode, fattr);
|
||||
nfs_free_fattr(fattr);
|
||||
out:
|
||||
dprintk("NFS reply readlink: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -396,12 +406,17 @@ nfs3_proc_remove(struct inode *dir, struct qstr *name)
|
|||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call remove %s\n", name->name);
|
||||
nfs_fattr_init(&res.dir_attr);
|
||||
res.dir_attr = nfs_alloc_fattr();
|
||||
if (res.dir_attr == NULL)
|
||||
goto out;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
nfs_post_op_update_inode(dir, &res.dir_attr);
|
||||
nfs_post_op_update_inode(dir, res.dir_attr);
|
||||
nfs_free_fattr(res.dir_attr);
|
||||
out:
|
||||
dprintk("NFS reply remove: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -419,7 +434,7 @@ nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
|
|||
if (nfs3_async_handle_jukebox(task, dir))
|
||||
return 0;
|
||||
res = task->tk_msg.rpc_resp;
|
||||
nfs_post_op_update_inode(dir, &res->dir_attr);
|
||||
nfs_post_op_update_inode(dir, res->dir_attr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -427,7 +442,6 @@ static int
|
|||
nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
||||
struct inode *new_dir, struct qstr *new_name)
|
||||
{
|
||||
struct nfs_fattr old_dir_attr, new_dir_attr;
|
||||
struct nfs3_renameargs arg = {
|
||||
.fromfh = NFS_FH(old_dir),
|
||||
.fromname = old_name->name,
|
||||
|
@ -436,23 +450,27 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
|||
.toname = new_name->name,
|
||||
.tolen = new_name->len
|
||||
};
|
||||
struct nfs3_renameres res = {
|
||||
.fromattr = &old_dir_attr,
|
||||
.toattr = &new_dir_attr
|
||||
};
|
||||
struct nfs3_renameres res;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_RENAME],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
|
||||
nfs_fattr_init(&old_dir_attr);
|
||||
nfs_fattr_init(&new_dir_attr);
|
||||
|
||||
res.fromattr = nfs_alloc_fattr();
|
||||
res.toattr = nfs_alloc_fattr();
|
||||
if (res.fromattr == NULL || res.toattr == NULL)
|
||||
goto out;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
|
||||
nfs_post_op_update_inode(old_dir, &old_dir_attr);
|
||||
nfs_post_op_update_inode(new_dir, &new_dir_attr);
|
||||
nfs_post_op_update_inode(old_dir, res.fromattr);
|
||||
nfs_post_op_update_inode(new_dir, res.toattr);
|
||||
out:
|
||||
nfs_free_fattr(res.toattr);
|
||||
nfs_free_fattr(res.fromattr);
|
||||
dprintk("NFS reply rename: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -460,30 +478,32 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
|||
static int
|
||||
nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
|
||||
{
|
||||
struct nfs_fattr dir_attr, fattr;
|
||||
struct nfs3_linkargs arg = {
|
||||
.fromfh = NFS_FH(inode),
|
||||
.tofh = NFS_FH(dir),
|
||||
.toname = name->name,
|
||||
.tolen = name->len
|
||||
};
|
||||
struct nfs3_linkres res = {
|
||||
.dir_attr = &dir_attr,
|
||||
.fattr = &fattr
|
||||
};
|
||||
struct nfs3_linkres res;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_LINK],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call link %s\n", name->name);
|
||||
nfs_fattr_init(&dir_attr);
|
||||
nfs_fattr_init(&fattr);
|
||||
res.fattr = nfs_alloc_fattr();
|
||||
res.dir_attr = nfs_alloc_fattr();
|
||||
if (res.fattr == NULL || res.dir_attr == NULL)
|
||||
goto out;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
|
||||
nfs_post_op_update_inode(dir, &dir_attr);
|
||||
nfs_post_op_update_inode(inode, &fattr);
|
||||
nfs_post_op_update_inode(dir, res.dir_attr);
|
||||
nfs_post_op_update_inode(inode, res.fattr);
|
||||
out:
|
||||
nfs_free_fattr(res.dir_attr);
|
||||
nfs_free_fattr(res.fattr);
|
||||
dprintk("NFS reply link: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -554,7 +574,7 @@ out:
|
|||
static int
|
||||
nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
|
||||
{
|
||||
struct nfs_fattr dir_attr;
|
||||
struct nfs_fattr *dir_attr;
|
||||
struct nfs3_diropargs arg = {
|
||||
.fh = NFS_FH(dir),
|
||||
.name = name->name,
|
||||
|
@ -563,14 +583,19 @@ nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
|
|||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_RMDIR],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &dir_attr,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call rmdir %s\n", name->name);
|
||||
nfs_fattr_init(&dir_attr);
|
||||
dir_attr = nfs_alloc_fattr();
|
||||
if (dir_attr == NULL)
|
||||
goto out;
|
||||
|
||||
msg.rpc_resp = dir_attr;
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
nfs_post_op_update_inode(dir, &dir_attr);
|
||||
nfs_post_op_update_inode(dir, dir_attr);
|
||||
nfs_free_fattr(dir_attr);
|
||||
out:
|
||||
dprintk("NFS reply rmdir: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -589,7 +614,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
|
|||
u64 cookie, struct page *page, unsigned int count, int plus)
|
||||
{
|
||||
struct inode *dir = dentry->d_inode;
|
||||
struct nfs_fattr dir_attr;
|
||||
__be32 *verf = NFS_COOKIEVERF(dir);
|
||||
struct nfs3_readdirargs arg = {
|
||||
.fh = NFS_FH(dir),
|
||||
|
@ -600,7 +624,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
|
|||
.pages = &page
|
||||
};
|
||||
struct nfs3_readdirres res = {
|
||||
.dir_attr = &dir_attr,
|
||||
.verf = verf,
|
||||
.plus = plus
|
||||
};
|
||||
|
@ -610,7 +633,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
|
|||
.rpc_resp = &res,
|
||||
.rpc_cred = cred
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
if (plus)
|
||||
msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS];
|
||||
|
@ -618,12 +641,17 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
|
|||
dprintk("NFS call readdir%s %d\n",
|
||||
plus? "plus" : "", (unsigned int) cookie);
|
||||
|
||||
nfs_fattr_init(&dir_attr);
|
||||
res.dir_attr = nfs_alloc_fattr();
|
||||
if (res.dir_attr == NULL)
|
||||
goto out;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
|
||||
nfs_invalidate_atime(dir);
|
||||
nfs_refresh_inode(dir, res.dir_attr);
|
||||
|
||||
nfs_refresh_inode(dir, &dir_attr);
|
||||
nfs_free_fattr(res.dir_attr);
|
||||
out:
|
||||
dprintk("NFS reply readdir: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -762,7 +762,7 @@ nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
|
|||
static int
|
||||
nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
|
||||
{
|
||||
return nfs3_xdr_wccstat(req, p, &res->dir_attr);
|
||||
return nfs3_xdr_wccstat(req, p, res->dir_attr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -206,14 +206,14 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
|
|||
|
||||
|
||||
/* nfs4proc.c */
|
||||
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
|
||||
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
|
||||
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
|
||||
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
|
||||
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
|
||||
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
|
||||
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
|
||||
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
|
||||
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
|
||||
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
|
||||
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait);
|
||||
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
|
||||
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
|
||||
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
|
||||
|
@ -286,7 +286,7 @@ extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
|||
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
||||
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
|
||||
|
||||
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter);
|
||||
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
||||
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
|
||||
extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
|
||||
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
|
||||
|
|
|
@ -115,6 +115,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
|
|||
char *page, char *page2,
|
||||
const struct nfs4_fs_location *location)
|
||||
{
|
||||
const size_t addr_bufsize = sizeof(struct sockaddr_storage);
|
||||
struct vfsmount *mnt = ERR_PTR(-ENOENT);
|
||||
char *mnt_path;
|
||||
unsigned int maxbuflen;
|
||||
|
@ -126,9 +127,12 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
|
|||
mountdata->mnt_path = mnt_path;
|
||||
maxbuflen = mnt_path - 1 - page2;
|
||||
|
||||
mountdata->addr = kmalloc(addr_bufsize, GFP_KERNEL);
|
||||
if (mountdata->addr == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (s = 0; s < location->nservers; s++) {
|
||||
const struct nfs4_string *buf = &location->servers[s];
|
||||
struct sockaddr_storage addr;
|
||||
|
||||
if (buf->len <= 0 || buf->len >= maxbuflen)
|
||||
continue;
|
||||
|
@ -137,11 +141,10 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
|
|||
continue;
|
||||
|
||||
mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
|
||||
(struct sockaddr *)&addr, sizeof(addr));
|
||||
mountdata->addr, addr_bufsize);
|
||||
if (mountdata->addrlen == 0)
|
||||
continue;
|
||||
|
||||
mountdata->addr = (struct sockaddr *)&addr;
|
||||
rpc_set_port(mountdata->addr, NFS_PORT);
|
||||
|
||||
memcpy(page2, buf->data, buf->len);
|
||||
|
@ -156,6 +159,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
|
|||
if (!IS_ERR(mnt))
|
||||
break;
|
||||
}
|
||||
kfree(mountdata->addr);
|
||||
return mnt;
|
||||
}
|
||||
|
||||
|
@ -221,8 +225,8 @@ out:
|
|||
|
||||
/*
|
||||
* nfs_do_refmount - handle crossing a referral on server
|
||||
* @mnt_parent - mountpoint of referral
|
||||
* @dentry - dentry of referral
|
||||
* @nd - nameidata info
|
||||
*
|
||||
*/
|
||||
struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentry *dentry)
|
||||
|
|
|
@ -70,6 +70,9 @@ static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinf
|
|||
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
|
||||
static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
|
||||
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
|
||||
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
||||
struct nfs_fattr *fattr, struct iattr *sattr,
|
||||
struct nfs4_state *state);
|
||||
|
||||
/* Prevent leaks of NFSv4 errors into userland */
|
||||
static int nfs4_map_errors(int err)
|
||||
|
@ -714,17 +717,18 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
|
|||
|
||||
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
|
||||
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
|
||||
const struct iattr *attrs)
|
||||
const struct iattr *attrs,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct dentry *parent = dget_parent(path->dentry);
|
||||
struct inode *dir = parent->d_inode;
|
||||
struct nfs_server *server = NFS_SERVER(dir);
|
||||
struct nfs4_opendata *p;
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
p = kzalloc(sizeof(*p), gfp_mask);
|
||||
if (p == NULL)
|
||||
goto err;
|
||||
p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
|
||||
p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
|
||||
if (p->o_arg.seqid == NULL)
|
||||
goto err_free;
|
||||
path_get(path);
|
||||
|
@ -1060,7 +1064,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
|
|||
{
|
||||
struct nfs4_opendata *opendata;
|
||||
|
||||
opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL);
|
||||
opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS);
|
||||
if (opendata == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
opendata->state = state;
|
||||
|
@ -1648,7 +1652,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
|
|||
if (path->dentry->d_inode != NULL)
|
||||
nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode);
|
||||
status = -ENOMEM;
|
||||
opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr);
|
||||
opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL);
|
||||
if (opendata == NULL)
|
||||
goto err_put_state_owner;
|
||||
|
||||
|
@ -1659,15 +1663,24 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
|
|||
if (status != 0)
|
||||
goto err_opendata_put;
|
||||
|
||||
if (opendata->o_arg.open_flags & O_EXCL)
|
||||
nfs4_exclusive_attrset(opendata, sattr);
|
||||
|
||||
state = nfs4_opendata_to_nfs4_state(opendata);
|
||||
status = PTR_ERR(state);
|
||||
if (IS_ERR(state))
|
||||
goto err_opendata_put;
|
||||
if (server->caps & NFS_CAP_POSIX_LOCK)
|
||||
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
|
||||
|
||||
if (opendata->o_arg.open_flags & O_EXCL) {
|
||||
nfs4_exclusive_attrset(opendata, sattr);
|
||||
|
||||
nfs_fattr_init(opendata->o_res.f_attr);
|
||||
status = nfs4_do_setattr(state->inode, cred,
|
||||
opendata->o_res.f_attr, sattr,
|
||||
state);
|
||||
if (status == 0)
|
||||
nfs_setattr_update_inode(state->inode, sattr);
|
||||
nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
|
||||
}
|
||||
nfs4_opendata_put(opendata);
|
||||
nfs4_put_state_owner(sp);
|
||||
*res = state;
|
||||
|
@ -1914,7 +1927,7 @@ static const struct rpc_call_ops nfs4_close_ops = {
|
|||
*
|
||||
* NOTE: Caller must be holding the sp->so_owner semaphore!
|
||||
*/
|
||||
int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
|
||||
int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(state->inode);
|
||||
struct nfs4_closedata *calldata;
|
||||
|
@ -1933,7 +1946,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
|
|||
};
|
||||
int status = -ENOMEM;
|
||||
|
||||
calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
|
||||
calldata = kzalloc(sizeof(*calldata), gfp_mask);
|
||||
if (calldata == NULL)
|
||||
goto out;
|
||||
calldata->inode = state->inode;
|
||||
|
@ -1941,7 +1954,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
|
|||
calldata->arg.fh = NFS_FH(state->inode);
|
||||
calldata->arg.stateid = &state->open_stateid;
|
||||
/* Serialization for the sequence id */
|
||||
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
|
||||
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
|
||||
if (calldata->arg.seqid == NULL)
|
||||
goto out_free_calldata;
|
||||
calldata->arg.fmode = 0;
|
||||
|
@ -2404,14 +2417,12 @@ static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh
|
|||
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs4_accessargs args = {
|
||||
.fh = NFS_FH(inode),
|
||||
.bitmask = server->attr_bitmask,
|
||||
};
|
||||
struct nfs4_accessres res = {
|
||||
.server = server,
|
||||
.fattr = &fattr,
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
|
||||
|
@ -2438,7 +2449,11 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
|
|||
if (mode & MAY_EXEC)
|
||||
args.access |= NFS4_ACCESS_EXECUTE;
|
||||
}
|
||||
nfs_fattr_init(&fattr);
|
||||
|
||||
res.fattr = nfs_alloc_fattr();
|
||||
if (res.fattr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
status = nfs4_call_sync(server, &msg, &args, &res, 0);
|
||||
if (!status) {
|
||||
entry->mask = 0;
|
||||
|
@ -2448,8 +2463,9 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
|
|||
entry->mask |= MAY_WRITE;
|
||||
if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
|
||||
entry->mask |= MAY_EXEC;
|
||||
nfs_refresh_inode(inode, &fattr);
|
||||
nfs_refresh_inode(inode, res.fattr);
|
||||
}
|
||||
nfs_free_fattr(res.fattr);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2562,13 +2578,6 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
|||
}
|
||||
d_add(dentry, igrab(state->inode));
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
if (flags & O_EXCL) {
|
||||
struct nfs_fattr fattr;
|
||||
status = nfs4_do_setattr(state->inode, cred, &fattr, sattr, state);
|
||||
if (status == 0)
|
||||
nfs_setattr_update_inode(state->inode, sattr);
|
||||
nfs_post_op_update_inode(state->inode, &fattr);
|
||||
}
|
||||
if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0)
|
||||
status = nfs4_intent_set_file(nd, &path, state, fmode);
|
||||
else
|
||||
|
@ -2596,14 +2605,19 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
|
|||
.rpc_argp = &args,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
res.dir_attr = nfs_alloc_fattr();
|
||||
if (res.dir_attr == NULL)
|
||||
goto out;
|
||||
|
||||
nfs_fattr_init(&res.dir_attr);
|
||||
status = nfs4_call_sync(server, &msg, &args, &res, 1);
|
||||
if (status == 0) {
|
||||
update_changeattr(dir, &res.cinfo);
|
||||
nfs_post_op_update_inode(dir, &res.dir_attr);
|
||||
nfs_post_op_update_inode(dir, res.dir_attr);
|
||||
}
|
||||
nfs_free_fattr(res.dir_attr);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2638,7 +2652,7 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
|
|||
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
|
||||
return 0;
|
||||
update_changeattr(dir, &res->cinfo);
|
||||
nfs_post_op_update_inode(dir, &res->dir_attr);
|
||||
nfs_post_op_update_inode(dir, res->dir_attr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -2653,29 +2667,31 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
|||
.new_name = new_name,
|
||||
.bitmask = server->attr_bitmask,
|
||||
};
|
||||
struct nfs_fattr old_fattr, new_fattr;
|
||||
struct nfs4_rename_res res = {
|
||||
.server = server,
|
||||
.old_fattr = &old_fattr,
|
||||
.new_fattr = &new_fattr,
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
res.old_fattr = nfs_alloc_fattr();
|
||||
res.new_fattr = nfs_alloc_fattr();
|
||||
if (res.old_fattr == NULL || res.new_fattr == NULL)
|
||||
goto out;
|
||||
|
||||
nfs_fattr_init(res.old_fattr);
|
||||
nfs_fattr_init(res.new_fattr);
|
||||
status = nfs4_call_sync(server, &msg, &arg, &res, 1);
|
||||
|
||||
if (!status) {
|
||||
update_changeattr(old_dir, &res.old_cinfo);
|
||||
nfs_post_op_update_inode(old_dir, res.old_fattr);
|
||||
update_changeattr(new_dir, &res.new_cinfo);
|
||||
nfs_post_op_update_inode(new_dir, res.new_fattr);
|
||||
}
|
||||
out:
|
||||
nfs_free_fattr(res.new_fattr);
|
||||
nfs_free_fattr(res.old_fattr);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2702,28 +2718,30 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
|
|||
.name = name,
|
||||
.bitmask = server->attr_bitmask,
|
||||
};
|
||||
struct nfs_fattr fattr, dir_attr;
|
||||
struct nfs4_link_res res = {
|
||||
.server = server,
|
||||
.fattr = &fattr,
|
||||
.dir_attr = &dir_attr,
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
res.fattr = nfs_alloc_fattr();
|
||||
res.dir_attr = nfs_alloc_fattr();
|
||||
if (res.fattr == NULL || res.dir_attr == NULL)
|
||||
goto out;
|
||||
|
||||
nfs_fattr_init(res.fattr);
|
||||
nfs_fattr_init(res.dir_attr);
|
||||
status = nfs4_call_sync(server, &msg, &arg, &res, 1);
|
||||
if (!status) {
|
||||
update_changeattr(dir, &res.cinfo);
|
||||
nfs_post_op_update_inode(dir, res.dir_attr);
|
||||
nfs_post_op_update_inode(inode, res.fattr);
|
||||
}
|
||||
|
||||
out:
|
||||
nfs_free_fattr(res.dir_attr);
|
||||
nfs_free_fattr(res.fattr);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -3146,23 +3164,31 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
|
|||
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
|
||||
}
|
||||
|
||||
struct nfs4_renewdata {
|
||||
struct nfs_client *client;
|
||||
unsigned long timestamp;
|
||||
};
|
||||
|
||||
/*
|
||||
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
|
||||
* standalone procedure for queueing an asynchronous RENEW.
|
||||
*/
|
||||
static void nfs4_renew_release(void *data)
|
||||
static void nfs4_renew_release(void *calldata)
|
||||
{
|
||||
struct nfs_client *clp = data;
|
||||
struct nfs4_renewdata *data = calldata;
|
||||
struct nfs_client *clp = data->client;
|
||||
|
||||
if (atomic_read(&clp->cl_count) > 1)
|
||||
nfs4_schedule_state_renewal(clp);
|
||||
nfs_put_client(clp);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static void nfs4_renew_done(struct rpc_task *task, void *data)
|
||||
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_client *clp = data;
|
||||
unsigned long timestamp = task->tk_start;
|
||||
struct nfs4_renewdata *data = calldata;
|
||||
struct nfs_client *clp = data->client;
|
||||
unsigned long timestamp = data->timestamp;
|
||||
|
||||
if (task->tk_status < 0) {
|
||||
/* Unless we're shutting down, schedule state recovery! */
|
||||
|
@ -3188,11 +3214,17 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
|
|||
.rpc_argp = clp,
|
||||
.rpc_cred = cred,
|
||||
};
|
||||
struct nfs4_renewdata *data;
|
||||
|
||||
if (!atomic_inc_not_zero(&clp->cl_count))
|
||||
return -EIO;
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
return -ENOMEM;
|
||||
data->client = clp;
|
||||
data->timestamp = jiffies;
|
||||
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
|
||||
&nfs4_renew_ops, clp);
|
||||
&nfs4_renew_ops, data);
|
||||
}
|
||||
|
||||
int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
|
||||
|
@ -3494,7 +3526,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
|
|||
return _nfs4_async_handle_error(task, server, server->nfs_client, state);
|
||||
}
|
||||
|
||||
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
|
||||
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
|
||||
unsigned short port, struct rpc_cred *cred,
|
||||
struct nfs4_setclientid_res *res)
|
||||
{
|
||||
nfs4_verifier sc_verifier;
|
||||
struct nfs4_setclientid setclientid = {
|
||||
|
@ -3504,7 +3538,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
|
|||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
|
||||
.rpc_argp = &setclientid,
|
||||
.rpc_resp = clp,
|
||||
.rpc_resp = res,
|
||||
.rpc_cred = cred,
|
||||
};
|
||||
__be32 *p;
|
||||
|
@ -3547,12 +3581,14 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
|
|||
return status;
|
||||
}
|
||||
|
||||
static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
|
||||
static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp,
|
||||
struct nfs4_setclientid_res *arg,
|
||||
struct rpc_cred *cred)
|
||||
{
|
||||
struct nfs_fsinfo fsinfo;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
|
||||
.rpc_argp = clp,
|
||||
.rpc_argp = arg,
|
||||
.rpc_resp = &fsinfo,
|
||||
.rpc_cred = cred,
|
||||
};
|
||||
|
@ -3570,12 +3606,14 @@ static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cre
|
|||
return status;
|
||||
}
|
||||
|
||||
int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
|
||||
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
|
||||
struct nfs4_setclientid_res *arg,
|
||||
struct rpc_cred *cred)
|
||||
{
|
||||
long timeout = 0;
|
||||
int err;
|
||||
do {
|
||||
err = _nfs4_proc_setclientid_confirm(clp, cred);
|
||||
err = _nfs4_proc_setclientid_confirm(clp, arg, cred);
|
||||
switch (err) {
|
||||
case 0:
|
||||
return err;
|
||||
|
@ -3667,7 +3705,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
|
|||
};
|
||||
int status = 0;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
data = kzalloc(sizeof(*data), GFP_NOFS);
|
||||
if (data == NULL)
|
||||
return -ENOMEM;
|
||||
data->args.fhandle = &data->fh;
|
||||
|
@ -3823,7 +3861,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
|
|||
struct nfs4_unlockdata *p;
|
||||
struct inode *inode = lsp->ls_state->inode;
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
p = kzalloc(sizeof(*p), GFP_NOFS);
|
||||
if (p == NULL)
|
||||
return NULL;
|
||||
p->arg.fh = NFS_FH(inode);
|
||||
|
@ -3961,7 +3999,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
|
|||
if (test_bit(NFS_DELEGATED_STATE, &state->flags))
|
||||
goto out;
|
||||
lsp = request->fl_u.nfs4_fl.owner;
|
||||
seqid = nfs_alloc_seqid(&lsp->ls_seqid);
|
||||
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
|
||||
status = -ENOMEM;
|
||||
if (seqid == NULL)
|
||||
goto out;
|
||||
|
@ -3989,22 +4027,23 @@ struct nfs4_lockdata {
|
|||
};
|
||||
|
||||
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
|
||||
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp)
|
||||
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct nfs4_lockdata *p;
|
||||
struct inode *inode = lsp->ls_state->inode;
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
p = kzalloc(sizeof(*p), gfp_mask);
|
||||
if (p == NULL)
|
||||
return NULL;
|
||||
|
||||
p->arg.fh = NFS_FH(inode);
|
||||
p->arg.fl = &p->fl;
|
||||
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid);
|
||||
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
|
||||
if (p->arg.open_seqid == NULL)
|
||||
goto out_free;
|
||||
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
|
||||
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
|
||||
if (p->arg.lock_seqid == NULL)
|
||||
goto out_free_seqid;
|
||||
p->arg.lock_stateid = &lsp->ls_stateid;
|
||||
|
@ -4158,7 +4197,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
|
|||
|
||||
dprintk("%s: begin!\n", __func__);
|
||||
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
|
||||
fl->fl_u.nfs4_fl.owner);
|
||||
fl->fl_u.nfs4_fl.owner,
|
||||
recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
|
||||
if (data == NULL)
|
||||
return -ENOMEM;
|
||||
if (IS_SETLKW(cmd))
|
||||
|
@ -4647,7 +4687,7 @@ static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
|
|||
if (max_reqs != tbl->max_slots) {
|
||||
ret = -ENOMEM;
|
||||
new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
|
||||
GFP_KERNEL);
|
||||
GFP_NOFS);
|
||||
if (!new)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
@ -4712,7 +4752,7 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
|
|||
|
||||
dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
|
||||
|
||||
slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
|
||||
slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS);
|
||||
if (!slot)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
@ -4761,7 +4801,7 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
|
|||
struct nfs4_session *session;
|
||||
struct nfs4_slot_table *tbl;
|
||||
|
||||
session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL);
|
||||
session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
|
||||
if (!session)
|
||||
return NULL;
|
||||
|
||||
|
@ -5105,8 +5145,8 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
|
|||
|
||||
if (!atomic_inc_not_zero(&clp->cl_count))
|
||||
return -EIO;
|
||||
args = kzalloc(sizeof(*args), GFP_KERNEL);
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
args = kzalloc(sizeof(*args), GFP_NOFS);
|
||||
res = kzalloc(sizeof(*res), GFP_NOFS);
|
||||
if (!args || !res) {
|
||||
kfree(args);
|
||||
kfree(res);
|
||||
|
@ -5207,7 +5247,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
|
|||
int status = -ENOMEM;
|
||||
|
||||
dprintk("--> %s\n", __func__);
|
||||
calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
|
||||
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
|
||||
if (calldata == NULL)
|
||||
goto out;
|
||||
calldata->clp = clp;
|
||||
|
|
|
@ -62,6 +62,7 @@ static LIST_HEAD(nfs4_clientid_list);
|
|||
|
||||
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
|
||||
{
|
||||
struct nfs4_setclientid_res clid;
|
||||
unsigned short port;
|
||||
int status;
|
||||
|
||||
|
@ -69,11 +70,15 @@ int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
|
|||
if (clp->cl_addr.ss_family == AF_INET6)
|
||||
port = nfs_callback_tcpport6;
|
||||
|
||||
status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred);
|
||||
if (status == 0)
|
||||
status = nfs4_proc_setclientid_confirm(clp, cred);
|
||||
if (status == 0)
|
||||
status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
clp->cl_clientid = clid.clientid;
|
||||
nfs4_schedule_state_renewal(clp);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -361,7 +366,7 @@ nfs4_alloc_state_owner(void)
|
|||
{
|
||||
struct nfs4_state_owner *sp;
|
||||
|
||||
sp = kzalloc(sizeof(*sp),GFP_KERNEL);
|
||||
sp = kzalloc(sizeof(*sp),GFP_NOFS);
|
||||
if (!sp)
|
||||
return NULL;
|
||||
spin_lock_init(&sp->so_lock);
|
||||
|
@ -435,7 +440,7 @@ nfs4_alloc_open_state(void)
|
|||
{
|
||||
struct nfs4_state *state;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
state = kzalloc(sizeof(*state), GFP_NOFS);
|
||||
if (!state)
|
||||
return NULL;
|
||||
atomic_set(&state->count, 1);
|
||||
|
@ -537,7 +542,8 @@ void nfs4_put_open_state(struct nfs4_state *state)
|
|||
/*
|
||||
* Close the current file.
|
||||
*/
|
||||
static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait)
|
||||
static void __nfs4_close(struct path *path, struct nfs4_state *state,
|
||||
fmode_t fmode, gfp_t gfp_mask, int wait)
|
||||
{
|
||||
struct nfs4_state_owner *owner = state->owner;
|
||||
int call_close = 0;
|
||||
|
@ -578,17 +584,17 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fm
|
|||
nfs4_put_open_state(state);
|
||||
nfs4_put_state_owner(owner);
|
||||
} else
|
||||
nfs4_do_close(path, state, wait);
|
||||
nfs4_do_close(path, state, gfp_mask, wait);
|
||||
}
|
||||
|
||||
void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
|
||||
{
|
||||
__nfs4_close(path, state, fmode, 0);
|
||||
__nfs4_close(path, state, fmode, GFP_NOFS, 0);
|
||||
}
|
||||
|
||||
void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
|
||||
{
|
||||
__nfs4_close(path, state, fmode, 1);
|
||||
__nfs4_close(path, state, fmode, GFP_KERNEL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -618,7 +624,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
|
|||
struct nfs4_lock_state *lsp;
|
||||
struct nfs_client *clp = state->owner->so_client;
|
||||
|
||||
lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
|
||||
lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
|
||||
if (lsp == NULL)
|
||||
return NULL;
|
||||
rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
|
||||
|
@ -754,11 +760,11 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
|
|||
nfs4_put_lock_state(lsp);
|
||||
}
|
||||
|
||||
struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
|
||||
struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
|
||||
{
|
||||
struct nfs_seqid *new;
|
||||
|
||||
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||
new = kmalloc(sizeof(*new), gfp_mask);
|
||||
if (new != NULL) {
|
||||
new->sequence = counter;
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
|
@ -1347,7 +1353,7 @@ static int nfs4_recall_slot(struct nfs_client *clp)
|
|||
|
||||
nfs4_begin_drain_session(clp);
|
||||
new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
|
||||
GFP_KERNEL);
|
||||
GFP_NOFS);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1504,14 +1504,14 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
|
|||
hdr->replen += decode_setclientid_maxsz;
|
||||
}
|
||||
|
||||
static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr)
|
||||
static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr)
|
||||
{
|
||||
__be32 *p;
|
||||
|
||||
p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE);
|
||||
*p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM);
|
||||
p = xdr_encode_hyper(p, client_state->cl_clientid);
|
||||
xdr_encode_opaque_fixed(p, client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
|
||||
p = xdr_encode_hyper(p, arg->clientid);
|
||||
xdr_encode_opaque_fixed(p, arg->confirm.data, NFS4_VERIFIER_SIZE);
|
||||
hdr->nops++;
|
||||
hdr->replen += decode_setclientid_confirm_maxsz;
|
||||
}
|
||||
|
@ -2324,7 +2324,7 @@ static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4
|
|||
/*
|
||||
* a SETCLIENTID_CONFIRM request
|
||||
*/
|
||||
static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
|
||||
static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg)
|
||||
{
|
||||
struct xdr_stream xdr;
|
||||
struct compound_hdr hdr = {
|
||||
|
@ -2334,7 +2334,7 @@ static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
|
|||
|
||||
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
||||
encode_compound_hdr(&xdr, req, &hdr);
|
||||
encode_setclientid_confirm(&xdr, clp, &hdr);
|
||||
encode_setclientid_confirm(&xdr, arg, &hdr);
|
||||
encode_putrootfh(&xdr, &hdr);
|
||||
encode_fsinfo(&xdr, lease_bitmap, &hdr);
|
||||
encode_nops(&hdr);
|
||||
|
@ -4397,7 +4397,7 @@ out_overflow:
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
|
||||
static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res)
|
||||
{
|
||||
__be32 *p;
|
||||
uint32_t opnum;
|
||||
|
@ -4417,8 +4417,8 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
|
|||
p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
|
||||
if (unlikely(!p))
|
||||
goto out_overflow;
|
||||
p = xdr_decode_hyper(p, &clp->cl_clientid);
|
||||
memcpy(clp->cl_confirm.data, p, NFS4_VERIFIER_SIZE);
|
||||
p = xdr_decode_hyper(p, &res->clientid);
|
||||
memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE);
|
||||
} else if (nfserr == NFSERR_CLID_INUSE) {
|
||||
uint32_t len;
|
||||
|
||||
|
@ -4815,7 +4815,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem
|
|||
goto out;
|
||||
if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
|
||||
goto out;
|
||||
decode_getfattr(&xdr, &res->dir_attr, res->server,
|
||||
decode_getfattr(&xdr, res->dir_attr, res->server,
|
||||
!RPC_IS_ASYNC(rqstp->rq_task));
|
||||
out:
|
||||
return status;
|
||||
|
@ -5498,7 +5498,7 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
|
|||
* Decode SETCLIENTID response
|
||||
*/
|
||||
static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
|
||||
struct nfs_client *clp)
|
||||
struct nfs4_setclientid_res *res)
|
||||
{
|
||||
struct xdr_stream xdr;
|
||||
struct compound_hdr hdr;
|
||||
|
@ -5507,7 +5507,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
|
|||
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
|
||||
status = decode_compound_hdr(&xdr, &hdr);
|
||||
if (!status)
|
||||
status = decode_setclientid(&xdr, clp);
|
||||
status = decode_setclientid(&xdr, res);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -488,7 +488,6 @@ static int __init root_nfs_ports(void)
|
|||
*/
|
||||
static int __init root_nfs_get_handle(void)
|
||||
{
|
||||
struct nfs_fh fh;
|
||||
struct sockaddr_in sin;
|
||||
unsigned int auth_flav_len = 0;
|
||||
struct nfs_mount_request request = {
|
||||
|
@ -499,21 +498,24 @@ static int __init root_nfs_get_handle(void)
|
|||
NFS_MNT3_VERSION : NFS_MNT_VERSION,
|
||||
.protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
|
||||
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
|
||||
.fh = &fh,
|
||||
.auth_flav_len = &auth_flav_len,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
request.fh = nfs_alloc_fhandle();
|
||||
if (!request.fh)
|
||||
goto out;
|
||||
set_sockaddr(&sin, servaddr, htons(mount_port));
|
||||
status = nfs_mount(&request);
|
||||
if (status < 0)
|
||||
printk(KERN_ERR "Root-NFS: Server returned error %d "
|
||||
"while mounting %s\n", status, nfs_export_path);
|
||||
else {
|
||||
nfs_data.root.size = fh.size;
|
||||
memcpy(nfs_data.root.data, fh.data, fh.size);
|
||||
nfs_data.root.size = request.fh->size;
|
||||
memcpy(&nfs_data.root.data, request.fh->data, request.fh->size);
|
||||
}
|
||||
|
||||
nfs_free_fhandle(request.fh);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,16 +60,10 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
|
|||
{
|
||||
struct nfs_page *req;
|
||||
|
||||
for (;;) {
|
||||
/* try to allocate the request struct */
|
||||
req = nfs_page_alloc();
|
||||
if (req != NULL)
|
||||
break;
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
return ERR_PTR(-ERESTARTSYS);
|
||||
yield();
|
||||
}
|
||||
if (req == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Initialize the request struct. Initially, we assume a
|
||||
* long write-back delay. This will be adjusted in
|
||||
|
|
146
fs/nfs/proc.c
146
fs/nfs/proc.c
|
@ -224,35 +224,60 @@ static int nfs_proc_readlink(struct inode *inode, struct page *page,
|
|||
return status;
|
||||
}
|
||||
|
||||
struct nfs_createdata {
|
||||
struct nfs_createargs arg;
|
||||
struct nfs_diropok res;
|
||||
struct nfs_fh fhandle;
|
||||
struct nfs_fattr fattr;
|
||||
};
|
||||
|
||||
static struct nfs_createdata *nfs_alloc_createdata(struct inode *dir,
|
||||
struct dentry *dentry, struct iattr *sattr)
|
||||
{
|
||||
struct nfs_createdata *data;
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
|
||||
if (data != NULL) {
|
||||
data->arg.fh = NFS_FH(dir);
|
||||
data->arg.name = dentry->d_name.name;
|
||||
data->arg.len = dentry->d_name.len;
|
||||
data->arg.sattr = sattr;
|
||||
nfs_fattr_init(&data->fattr);
|
||||
data->fhandle.size = 0;
|
||||
data->res.fh = &data->fhandle;
|
||||
data->res.fattr = &data->fattr;
|
||||
}
|
||||
return data;
|
||||
};
|
||||
|
||||
static void nfs_free_createdata(const struct nfs_createdata *data)
|
||||
{
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
||||
int flags, struct nameidata *nd)
|
||||
{
|
||||
struct nfs_fh fhandle;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_createargs arg = {
|
||||
.fh = NFS_FH(dir),
|
||||
.name = dentry->d_name.name,
|
||||
.len = dentry->d_name.len,
|
||||
.sattr = sattr
|
||||
};
|
||||
struct nfs_diropok res = {
|
||||
.fh = &fhandle,
|
||||
.fattr = &fattr
|
||||
};
|
||||
struct nfs_createdata *data;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs_procedures[NFSPROC_CREATE],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
nfs_fattr_init(&fattr);
|
||||
dprintk("NFS call create %s\n", dentry->d_name.name);
|
||||
data = nfs_alloc_createdata(dir, dentry, sattr);
|
||||
if (data == NULL)
|
||||
goto out;
|
||||
msg.rpc_argp = &data->arg;
|
||||
msg.rpc_resp = &data->res;
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
nfs_mark_for_revalidate(dir);
|
||||
if (status == 0)
|
||||
status = nfs_instantiate(dentry, &fhandle, &fattr);
|
||||
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
|
||||
nfs_free_createdata(data);
|
||||
out:
|
||||
dprintk("NFS reply create: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -264,24 +289,12 @@ static int
|
|||
nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
||||
dev_t rdev)
|
||||
{
|
||||
struct nfs_fh fhandle;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_createargs arg = {
|
||||
.fh = NFS_FH(dir),
|
||||
.name = dentry->d_name.name,
|
||||
.len = dentry->d_name.len,
|
||||
.sattr = sattr
|
||||
};
|
||||
struct nfs_diropok res = {
|
||||
.fh = &fhandle,
|
||||
.fattr = &fattr
|
||||
};
|
||||
struct nfs_createdata *data;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs_procedures[NFSPROC_CREATE],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status, mode;
|
||||
umode_t mode;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call mknod %s\n", dentry->d_name.name);
|
||||
|
||||
|
@ -294,17 +307,24 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
|||
sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */
|
||||
}
|
||||
|
||||
nfs_fattr_init(&fattr);
|
||||
data = nfs_alloc_createdata(dir, dentry, sattr);
|
||||
if (data == NULL)
|
||||
goto out;
|
||||
msg.rpc_argp = &data->arg;
|
||||
msg.rpc_resp = &data->res;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
nfs_mark_for_revalidate(dir);
|
||||
|
||||
if (status == -EINVAL && S_ISFIFO(mode)) {
|
||||
sattr->ia_mode = mode;
|
||||
nfs_fattr_init(&fattr);
|
||||
nfs_fattr_init(data->res.fattr);
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
}
|
||||
if (status == 0)
|
||||
status = nfs_instantiate(dentry, &fhandle, &fattr);
|
||||
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
|
||||
nfs_free_createdata(data);
|
||||
out:
|
||||
dprintk("NFS reply mknod: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -398,8 +418,8 @@ static int
|
|||
nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
|
||||
unsigned int len, struct iattr *sattr)
|
||||
{
|
||||
struct nfs_fh fhandle;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fh *fh;
|
||||
struct nfs_fattr *fattr;
|
||||
struct nfs_symlinkargs arg = {
|
||||
.fromfh = NFS_FH(dir),
|
||||
.fromname = dentry->d_name.name,
|
||||
|
@ -412,13 +432,19 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
|
|||
.rpc_proc = &nfs_procedures[NFSPROC_SYMLINK],
|
||||
.rpc_argp = &arg,
|
||||
};
|
||||
int status;
|
||||
|
||||
if (len > NFS2_MAXPATHLEN)
|
||||
return -ENAMETOOLONG;
|
||||
int status = -ENAMETOOLONG;
|
||||
|
||||
dprintk("NFS call symlink %s\n", dentry->d_name.name);
|
||||
|
||||
if (len > NFS2_MAXPATHLEN)
|
||||
goto out;
|
||||
|
||||
fh = nfs_alloc_fhandle();
|
||||
fattr = nfs_alloc_fattr();
|
||||
status = -ENOMEM;
|
||||
if (fh == NULL || fattr == NULL)
|
||||
goto out;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
nfs_mark_for_revalidate(dir);
|
||||
|
||||
|
@ -427,12 +453,12 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
|
|||
* filehandle size to zero indicates to nfs_instantiate that it
|
||||
* should fill in the data with a LOOKUP call on the wire.
|
||||
*/
|
||||
if (status == 0) {
|
||||
nfs_fattr_init(&fattr);
|
||||
fhandle.size = 0;
|
||||
status = nfs_instantiate(dentry, &fhandle, &fattr);
|
||||
}
|
||||
if (status == 0)
|
||||
status = nfs_instantiate(dentry, fh, fattr);
|
||||
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fh);
|
||||
out:
|
||||
dprintk("NFS reply symlink: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
@ -440,31 +466,25 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
|
|||
static int
|
||||
nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
|
||||
{
|
||||
struct nfs_fh fhandle;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_createargs arg = {
|
||||
.fh = NFS_FH(dir),
|
||||
.name = dentry->d_name.name,
|
||||
.len = dentry->d_name.len,
|
||||
.sattr = sattr
|
||||
};
|
||||
struct nfs_diropok res = {
|
||||
.fh = &fhandle,
|
||||
.fattr = &fattr
|
||||
};
|
||||
struct nfs_createdata *data;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs_procedures[NFSPROC_MKDIR],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
|
||||
nfs_fattr_init(&fattr);
|
||||
data = nfs_alloc_createdata(dir, dentry, sattr);
|
||||
if (data == NULL)
|
||||
goto out;
|
||||
msg.rpc_argp = &data->arg;
|
||||
msg.rpc_resp = &data->res;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
|
||||
nfs_mark_for_revalidate(dir);
|
||||
if (status == 0)
|
||||
status = nfs_instantiate(dentry, &fhandle, &fattr);
|
||||
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
|
||||
nfs_free_createdata(data);
|
||||
out:
|
||||
dprintk("NFS reply mkdir: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ static mempool_t *nfs_rdata_mempool;
|
|||
|
||||
struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
|
||||
{
|
||||
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
|
||||
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_KERNEL);
|
||||
|
||||
if (p) {
|
||||
memset(p, 0, sizeof(*p));
|
||||
|
@ -50,7 +50,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
|
|||
if (pagecount <= ARRAY_SIZE(p->page_array))
|
||||
p->pagevec = p->page_array;
|
||||
else {
|
||||
p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
|
||||
p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!p->pagevec) {
|
||||
mempool_free(p, nfs_rdata_mempool);
|
||||
p = NULL;
|
||||
|
|
149
fs/nfs/super.c
149
fs/nfs/super.c
|
@ -141,7 +141,6 @@ static const match_table_t nfs_mount_option_tokens = {
|
|||
{ Opt_resvport, "resvport" },
|
||||
{ Opt_noresvport, "noresvport" },
|
||||
{ Opt_fscache, "fsc" },
|
||||
{ Opt_fscache_uniq, "fsc=%s" },
|
||||
{ Opt_nofscache, "nofsc" },
|
||||
|
||||
{ Opt_port, "port=%s" },
|
||||
|
@ -171,6 +170,7 @@ static const match_table_t nfs_mount_option_tokens = {
|
|||
{ Opt_mountaddr, "mountaddr=%s" },
|
||||
|
||||
{ Opt_lookupcache, "lookupcache=%s" },
|
||||
{ Opt_fscache_uniq, "fsc=%s" },
|
||||
|
||||
{ Opt_err, NULL }
|
||||
};
|
||||
|
@ -423,15 +423,19 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
unsigned char blockbits;
|
||||
unsigned long blockres;
|
||||
struct nfs_fh *fh = NFS_FH(dentry->d_inode);
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_fsstat res = {
|
||||
.fattr = &fattr,
|
||||
};
|
||||
int error;
|
||||
struct nfs_fsstat res;
|
||||
int error = -ENOMEM;
|
||||
|
||||
res.fattr = nfs_alloc_fattr();
|
||||
if (res.fattr == NULL)
|
||||
goto out_err;
|
||||
|
||||
error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
|
||||
|
||||
nfs_free_fattr(res.fattr);
|
||||
if (error < 0)
|
||||
goto out_err;
|
||||
|
||||
buf->f_type = NFS_SUPER_MAGIC;
|
||||
|
||||
/*
|
||||
|
@ -1046,14 +1050,6 @@ static int nfs_parse_mount_options(char *raw,
|
|||
kfree(mnt->fscache_uniq);
|
||||
mnt->fscache_uniq = NULL;
|
||||
break;
|
||||
case Opt_fscache_uniq:
|
||||
string = match_strdup(args);
|
||||
if (!string)
|
||||
goto out_nomem;
|
||||
kfree(mnt->fscache_uniq);
|
||||
mnt->fscache_uniq = string;
|
||||
mnt->options |= NFS_OPTION_FSCACHE;
|
||||
break;
|
||||
|
||||
/*
|
||||
* options that take numeric values
|
||||
|
@ -1384,6 +1380,14 @@ static int nfs_parse_mount_options(char *raw,
|
|||
return 0;
|
||||
};
|
||||
break;
|
||||
case Opt_fscache_uniq:
|
||||
string = match_strdup(args);
|
||||
if (string == NULL)
|
||||
goto out_nomem;
|
||||
kfree(mnt->fscache_uniq);
|
||||
mnt->fscache_uniq = string;
|
||||
mnt->options |= NFS_OPTION_FSCACHE;
|
||||
break;
|
||||
|
||||
/*
|
||||
* Special options
|
||||
|
@ -2172,7 +2176,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
|
|||
int error = -ENOMEM;
|
||||
|
||||
data = nfs_alloc_parsed_mount_data(3);
|
||||
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
|
||||
mntfh = nfs_alloc_fhandle();
|
||||
if (data == NULL || mntfh == NULL)
|
||||
goto out_free_fh;
|
||||
|
||||
|
@ -2247,7 +2251,7 @@ out:
|
|||
kfree(data->fscache_uniq);
|
||||
security_free_mnt_opts(&data->lsm_opts);
|
||||
out_free_fh:
|
||||
kfree(mntfh);
|
||||
nfs_free_fhandle(mntfh);
|
||||
kfree(data);
|
||||
return error;
|
||||
|
||||
|
@ -2556,7 +2560,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
|
|||
};
|
||||
int error = -ENOMEM;
|
||||
|
||||
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
|
||||
mntfh = nfs_alloc_fhandle();
|
||||
if (data == NULL || mntfh == NULL)
|
||||
goto out_free_fh;
|
||||
|
||||
|
@ -2614,7 +2618,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
|
|||
out:
|
||||
security_free_mnt_opts(&data->lsm_opts);
|
||||
out_free_fh:
|
||||
kfree(mntfh);
|
||||
nfs_free_fhandle(mntfh);
|
||||
return error;
|
||||
|
||||
out_free:
|
||||
|
@ -2669,41 +2673,120 @@ out_freepage:
|
|||
free_page((unsigned long)page);
|
||||
}
|
||||
|
||||
struct nfs_referral_count {
|
||||
struct list_head list;
|
||||
const struct task_struct *task;
|
||||
unsigned int referral_count;
|
||||
};
|
||||
|
||||
static LIST_HEAD(nfs_referral_count_list);
|
||||
static DEFINE_SPINLOCK(nfs_referral_count_list_lock);
|
||||
|
||||
static struct nfs_referral_count *nfs_find_referral_count(void)
|
||||
{
|
||||
struct nfs_referral_count *p;
|
||||
|
||||
list_for_each_entry(p, &nfs_referral_count_list, list) {
|
||||
if (p->task == current)
|
||||
return p;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define NFS_MAX_NESTED_REFERRALS 2
|
||||
|
||||
static int nfs_referral_loop_protect(void)
|
||||
{
|
||||
struct nfs_referral_count *p, *new;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
goto out;
|
||||
new->task = current;
|
||||
new->referral_count = 1;
|
||||
|
||||
ret = 0;
|
||||
spin_lock(&nfs_referral_count_list_lock);
|
||||
p = nfs_find_referral_count();
|
||||
if (p != NULL) {
|
||||
if (p->referral_count >= NFS_MAX_NESTED_REFERRALS)
|
||||
ret = -ELOOP;
|
||||
else
|
||||
p->referral_count++;
|
||||
} else {
|
||||
list_add(&new->list, &nfs_referral_count_list);
|
||||
new = NULL;
|
||||
}
|
||||
spin_unlock(&nfs_referral_count_list_lock);
|
||||
kfree(new);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nfs_referral_loop_unprotect(void)
|
||||
{
|
||||
struct nfs_referral_count *p;
|
||||
|
||||
spin_lock(&nfs_referral_count_list_lock);
|
||||
p = nfs_find_referral_count();
|
||||
p->referral_count--;
|
||||
if (p->referral_count == 0)
|
||||
list_del(&p->list);
|
||||
else
|
||||
p = NULL;
|
||||
spin_unlock(&nfs_referral_count_list_lock);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
static int nfs_follow_remote_path(struct vfsmount *root_mnt,
|
||||
const char *export_path, struct vfsmount *mnt_target)
|
||||
{
|
||||
struct nameidata *nd = NULL;
|
||||
struct mnt_namespace *ns_private;
|
||||
struct nameidata nd;
|
||||
struct super_block *s;
|
||||
int ret;
|
||||
|
||||
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
|
||||
if (nd == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ns_private = create_mnt_ns(root_mnt);
|
||||
ret = PTR_ERR(ns_private);
|
||||
if (IS_ERR(ns_private))
|
||||
goto out_mntput;
|
||||
|
||||
ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
|
||||
export_path, LOOKUP_FOLLOW, &nd);
|
||||
ret = nfs_referral_loop_protect();
|
||||
if (ret != 0)
|
||||
goto out_put_mnt_ns;
|
||||
|
||||
ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
|
||||
export_path, LOOKUP_FOLLOW, nd);
|
||||
|
||||
nfs_referral_loop_unprotect();
|
||||
put_mnt_ns(ns_private);
|
||||
|
||||
if (ret != 0)
|
||||
goto out_err;
|
||||
|
||||
s = nd.path.mnt->mnt_sb;
|
||||
s = nd->path.mnt->mnt_sb;
|
||||
atomic_inc(&s->s_active);
|
||||
mnt_target->mnt_sb = s;
|
||||
mnt_target->mnt_root = dget(nd.path.dentry);
|
||||
mnt_target->mnt_root = dget(nd->path.dentry);
|
||||
|
||||
/* Correct the device pathname */
|
||||
nfs_fix_devname(&nd.path, mnt_target);
|
||||
nfs_fix_devname(&nd->path, mnt_target);
|
||||
|
||||
path_put(&nd.path);
|
||||
path_put(&nd->path);
|
||||
kfree(nd);
|
||||
down_write(&s->s_umount);
|
||||
return 0;
|
||||
out_put_mnt_ns:
|
||||
put_mnt_ns(ns_private);
|
||||
out_mntput:
|
||||
mntput(root_mnt);
|
||||
out_err:
|
||||
kfree(nd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2874,17 +2957,21 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
|
|||
struct super_block *s;
|
||||
struct nfs_server *server;
|
||||
struct dentry *mntroot;
|
||||
struct nfs_fh mntfh;
|
||||
struct nfs_fh *mntfh;
|
||||
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
|
||||
struct nfs_sb_mountdata sb_mntdata = {
|
||||
.mntflags = flags,
|
||||
};
|
||||
int error;
|
||||
int error = -ENOMEM;
|
||||
|
||||
dprintk("--> nfs4_referral_get_sb()\n");
|
||||
|
||||
mntfh = nfs_alloc_fhandle();
|
||||
if (mntfh == NULL)
|
||||
goto out_err_nofh;
|
||||
|
||||
/* create a new volume representation */
|
||||
server = nfs4_create_referral_server(data, &mntfh);
|
||||
server = nfs4_create_referral_server(data, mntfh);
|
||||
if (IS_ERR(server)) {
|
||||
error = PTR_ERR(server);
|
||||
goto out_err_noserver;
|
||||
|
@ -2916,7 +3003,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
|
|||
nfs_fscache_get_super_cookie(s, NULL, data);
|
||||
}
|
||||
|
||||
mntroot = nfs4_get_root(s, &mntfh);
|
||||
mntroot = nfs4_get_root(s, mntfh);
|
||||
if (IS_ERR(mntroot)) {
|
||||
error = PTR_ERR(mntroot);
|
||||
goto error_splat_super;
|
||||
|
@ -2933,12 +3020,15 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
|
|||
|
||||
security_sb_clone_mnt_opts(data->sb, s);
|
||||
|
||||
nfs_free_fhandle(mntfh);
|
||||
dprintk("<-- nfs4_referral_get_sb() = 0\n");
|
||||
return 0;
|
||||
|
||||
out_err_nosb:
|
||||
nfs_free_server(server);
|
||||
out_err_noserver:
|
||||
nfs_free_fhandle(mntfh);
|
||||
out_err_nofh:
|
||||
dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
|
||||
return error;
|
||||
|
||||
|
@ -2947,6 +3037,7 @@ error_splat_super:
|
|||
bdi_unregister(&server->backing_dev_info);
|
||||
error_splat_bdi:
|
||||
deactivate_locked_super(s);
|
||||
nfs_free_fhandle(mntfh);
|
||||
dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ struct nfs_unlinkdata {
|
|||
struct nfs_removeres res;
|
||||
struct inode *dir;
|
||||
struct rpc_cred *cred;
|
||||
struct nfs_fattr dir_attr;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -169,7 +170,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
|
|||
}
|
||||
nfs_sb_active(dir->i_sb);
|
||||
data->args.fh = NFS_FH(dir);
|
||||
nfs_fattr_init(&data->res.dir_attr);
|
||||
nfs_fattr_init(data->res.dir_attr);
|
||||
|
||||
NFS_PROTO(dir)->unlink_setup(&msg, dir);
|
||||
|
||||
|
@ -259,6 +260,7 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
|
|||
goto out_free;
|
||||
}
|
||||
data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
|
||||
data->res.dir_attr = &data->dir_attr;
|
||||
|
||||
status = -EBUSY;
|
||||
spin_lock(&dentry->d_lock);
|
||||
|
|
|
@ -130,7 +130,7 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
|
|||
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
|
||||
#define ktime_to_ns(kt) ((kt).tv64)
|
||||
|
||||
#else
|
||||
#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
|
||||
|
||||
/*
|
||||
* Helper macros/inlines to get the ktime_t math right in the timespec
|
||||
|
@ -275,7 +275,7 @@ static inline s64 ktime_to_ns(const ktime_t kt)
|
|||
return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
|
||||
|
||||
/**
|
||||
* ktime_equal - Compares two ktime_t variables to see if they are equal
|
||||
|
@ -295,6 +295,12 @@ static inline s64 ktime_to_us(const ktime_t kt)
|
|||
return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
|
||||
}
|
||||
|
||||
static inline s64 ktime_to_ms(const ktime_t kt)
|
||||
{
|
||||
struct timeval tv = ktime_to_timeval(kt);
|
||||
return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
|
||||
}
|
||||
|
||||
static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
|
||||
{
|
||||
return ktime_to_us(ktime_sub(later, earlier));
|
||||
|
|
|
@ -356,6 +356,20 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
|
|||
extern u64 nfs_compat_user_ino64(u64 fileid);
|
||||
extern void nfs_fattr_init(struct nfs_fattr *fattr);
|
||||
|
||||
extern struct nfs_fattr *nfs_alloc_fattr(void);
|
||||
|
||||
static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
|
||||
{
|
||||
kfree(fattr);
|
||||
}
|
||||
|
||||
extern struct nfs_fh *nfs_alloc_fhandle(void);
|
||||
|
||||
static inline void nfs_free_fhandle(const struct nfs_fh *fh)
|
||||
{
|
||||
kfree(fh);
|
||||
}
|
||||
|
||||
/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
|
||||
extern __be32 root_nfs_parse_addr(char *name); /*__init*/
|
||||
extern unsigned long nfs_inc_attr_generation_counter(void);
|
||||
|
|
|
@ -44,7 +44,6 @@ struct nfs_client {
|
|||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
u64 cl_clientid; /* constant */
|
||||
nfs4_verifier cl_confirm;
|
||||
unsigned long cl_state;
|
||||
|
||||
struct rb_root cl_openowner_id;
|
||||
|
|
|
@ -386,8 +386,8 @@ struct nfs_removeargs {
|
|||
|
||||
struct nfs_removeres {
|
||||
const struct nfs_server *server;
|
||||
struct nfs_fattr *dir_attr;
|
||||
struct nfs4_change_info cinfo;
|
||||
struct nfs_fattr dir_attr;
|
||||
struct nfs4_sequence_res seq_res;
|
||||
};
|
||||
|
||||
|
@ -824,6 +824,11 @@ struct nfs4_setclientid {
|
|||
u32 sc_cb_ident;
|
||||
};
|
||||
|
||||
struct nfs4_setclientid_res {
|
||||
u64 clientid;
|
||||
nfs4_verifier confirm;
|
||||
};
|
||||
|
||||
struct nfs4_statfs_arg {
|
||||
const struct nfs_fh * fh;
|
||||
const u32 * bitmask;
|
||||
|
|
|
@ -54,6 +54,7 @@ struct rpc_cred {
|
|||
#define RPCAUTH_CRED_NEW 0
|
||||
#define RPCAUTH_CRED_UPTODATE 1
|
||||
#define RPCAUTH_CRED_HASHED 2
|
||||
#define RPCAUTH_CRED_NEGATIVE 3
|
||||
|
||||
#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0
|
||||
|
||||
|
|
|
@ -82,6 +82,7 @@ struct gss_cred {
|
|||
enum rpc_gss_svc gc_service;
|
||||
struct gss_cl_ctx *gc_ctx;
|
||||
struct gss_upcall_msg *gc_upcall;
|
||||
unsigned long gc_upcall_timestamp;
|
||||
unsigned char gc_machine_cred : 1;
|
||||
};
|
||||
|
||||
|
|
|
@ -35,7 +35,8 @@ int gss_import_sec_context(
|
|||
const void* input_token,
|
||||
size_t bufsize,
|
||||
struct gss_api_mech *mech,
|
||||
struct gss_ctx **ctx_id);
|
||||
struct gss_ctx **ctx_id,
|
||||
gfp_t gfp_mask);
|
||||
u32 gss_get_mic(
|
||||
struct gss_ctx *ctx_id,
|
||||
struct xdr_buf *message,
|
||||
|
@ -80,6 +81,8 @@ struct gss_api_mech {
|
|||
/* pseudoflavors supported by this mechanism: */
|
||||
int gm_pf_num;
|
||||
struct pf_desc * gm_pfs;
|
||||
/* Should the following be a callback operation instead? */
|
||||
const char *gm_upcall_enctypes;
|
||||
};
|
||||
|
||||
/* and must provide the following operations: */
|
||||
|
@ -87,7 +90,8 @@ struct gss_api_ops {
|
|||
int (*gss_import_sec_context)(
|
||||
const void *input_token,
|
||||
size_t bufsize,
|
||||
struct gss_ctx *ctx_id);
|
||||
struct gss_ctx *ctx_id,
|
||||
gfp_t gfp_mask);
|
||||
u32 (*gss_get_mic)(
|
||||
struct gss_ctx *ctx_id,
|
||||
struct xdr_buf *message,
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
|
||||
* lib/gssapi/krb5/gssapiP_krb5.h, and others
|
||||
*
|
||||
* Copyright (c) 2000 The Regents of the University of Michigan.
|
||||
* Copyright (c) 2000-2008 The Regents of the University of Michigan.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Andy Adamson <andros@umich.edu>
|
||||
|
@ -36,17 +36,86 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sunrpc/auth_gss.h>
|
||||
#include <linux/sunrpc/gss_err.h>
|
||||
#include <linux/sunrpc/gss_asn1.h>
|
||||
|
||||
/* Length of constant used in key derivation */
|
||||
#define GSS_KRB5_K5CLENGTH (5)
|
||||
|
||||
/* Maximum key length (in bytes) for the supported crypto algorithms*/
|
||||
#define GSS_KRB5_MAX_KEYLEN (32)
|
||||
|
||||
/* Maximum checksum function output for the supported crypto algorithms */
|
||||
#define GSS_KRB5_MAX_CKSUM_LEN (20)
|
||||
|
||||
/* Maximum blocksize for the supported crypto algorithms */
|
||||
#define GSS_KRB5_MAX_BLOCKSIZE (16)
|
||||
|
||||
struct krb5_ctx;
|
||||
|
||||
struct gss_krb5_enctype {
|
||||
const u32 etype; /* encryption (key) type */
|
||||
const u32 ctype; /* checksum type */
|
||||
const char *name; /* "friendly" name */
|
||||
const char *encrypt_name; /* crypto encrypt name */
|
||||
const char *cksum_name; /* crypto checksum name */
|
||||
const u16 signalg; /* signing algorithm */
|
||||
const u16 sealalg; /* sealing algorithm */
|
||||
const u32 blocksize; /* encryption blocksize */
|
||||
const u32 conflen; /* confounder length
|
||||
(normally the same as
|
||||
the blocksize) */
|
||||
const u32 cksumlength; /* checksum length */
|
||||
const u32 keyed_cksum; /* is it a keyed cksum? */
|
||||
const u32 keybytes; /* raw key len, in bytes */
|
||||
const u32 keylength; /* final key len, in bytes */
|
||||
u32 (*encrypt) (struct crypto_blkcipher *tfm,
|
||||
void *iv, void *in, void *out,
|
||||
int length); /* encryption function */
|
||||
u32 (*decrypt) (struct crypto_blkcipher *tfm,
|
||||
void *iv, void *in, void *out,
|
||||
int length); /* decryption function */
|
||||
u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
|
||||
struct xdr_netobj *in,
|
||||
struct xdr_netobj *out); /* complete key generation */
|
||||
u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf, int ec,
|
||||
struct page **pages); /* v2 encryption function */
|
||||
u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf, u32 *headskip,
|
||||
u32 *tailskip); /* v2 decryption function */
|
||||
};
|
||||
|
||||
/* krb5_ctx flags definitions */
|
||||
#define KRB5_CTX_FLAG_INITIATOR 0x00000001
|
||||
#define KRB5_CTX_FLAG_CFX 0x00000002
|
||||
#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
|
||||
|
||||
struct krb5_ctx {
|
||||
int initiate; /* 1 = initiating, 0 = accepting */
|
||||
u32 enctype;
|
||||
u32 flags;
|
||||
const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
|
||||
struct crypto_blkcipher *enc;
|
||||
struct crypto_blkcipher *seq;
|
||||
struct crypto_blkcipher *acceptor_enc;
|
||||
struct crypto_blkcipher *initiator_enc;
|
||||
struct crypto_blkcipher *acceptor_enc_aux;
|
||||
struct crypto_blkcipher *initiator_enc_aux;
|
||||
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
|
||||
u8 cksum[GSS_KRB5_MAX_KEYLEN];
|
||||
s32 endtime;
|
||||
u32 seq_send;
|
||||
u64 seq_send64;
|
||||
struct xdr_netobj mech_used;
|
||||
u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
|
||||
u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
|
||||
u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
|
||||
u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
|
||||
u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
|
||||
u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
|
||||
};
|
||||
|
||||
extern spinlock_t krb5_seq_lock;
|
||||
|
@ -57,6 +126,18 @@ extern spinlock_t krb5_seq_lock;
|
|||
#define KG_TOK_MIC_MSG 0x0101
|
||||
#define KG_TOK_WRAP_MSG 0x0201
|
||||
|
||||
#define KG2_TOK_INITIAL 0x0101
|
||||
#define KG2_TOK_RESPONSE 0x0202
|
||||
#define KG2_TOK_MIC 0x0404
|
||||
#define KG2_TOK_WRAP 0x0504
|
||||
|
||||
#define KG2_TOKEN_FLAG_SENTBYACCEPTOR 0x01
|
||||
#define KG2_TOKEN_FLAG_SEALED 0x02
|
||||
#define KG2_TOKEN_FLAG_ACCEPTORSUBKEY 0x04
|
||||
|
||||
#define KG2_RESP_FLAG_ERROR 0x0001
|
||||
#define KG2_RESP_FLAG_DELEG_OK 0x0002
|
||||
|
||||
enum sgn_alg {
|
||||
SGN_ALG_DES_MAC_MD5 = 0x0000,
|
||||
SGN_ALG_MD2_5 = 0x0001,
|
||||
|
@ -81,6 +162,9 @@ enum seal_alg {
|
|||
#define CKSUMTYPE_RSA_MD5_DES 0x0008
|
||||
#define CKSUMTYPE_NIST_SHA 0x0009
|
||||
#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
|
||||
#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
|
||||
#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
|
||||
#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
|
||||
|
||||
/* from gssapi_err_krb5.h */
|
||||
#define KG_CCACHE_NOMATCH (39756032L)
|
||||
|
@ -111,11 +195,56 @@ enum seal_alg {
|
|||
#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
|
||||
#define ENCTYPE_DES_HMAC_SHA1 0x0008
|
||||
#define ENCTYPE_DES3_CBC_SHA1 0x0010
|
||||
#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
|
||||
#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
|
||||
#define ENCTYPE_ARCFOUR_HMAC 0x0017
|
||||
#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
|
||||
#define ENCTYPE_UNKNOWN 0x01ff
|
||||
|
||||
s32
|
||||
make_checksum(char *, char *header, int hdrlen, struct xdr_buf *body,
|
||||
int body_offset, struct xdr_netobj *cksum);
|
||||
/*
|
||||
* Constants used for key derivation
|
||||
*/
|
||||
/* for 3DES */
|
||||
#define KG_USAGE_SEAL (22)
|
||||
#define KG_USAGE_SIGN (23)
|
||||
#define KG_USAGE_SEQ (24)
|
||||
|
||||
/* from rfc3961 */
|
||||
#define KEY_USAGE_SEED_CHECKSUM (0x99)
|
||||
#define KEY_USAGE_SEED_ENCRYPTION (0xAA)
|
||||
#define KEY_USAGE_SEED_INTEGRITY (0x55)
|
||||
|
||||
/* from rfc4121 */
|
||||
#define KG_USAGE_ACCEPTOR_SEAL (22)
|
||||
#define KG_USAGE_ACCEPTOR_SIGN (23)
|
||||
#define KG_USAGE_INITIATOR_SEAL (24)
|
||||
#define KG_USAGE_INITIATOR_SIGN (25)
|
||||
|
||||
/*
|
||||
* This compile-time check verifies that we will not exceed the
|
||||
* slack space allotted by the client and server auth_gss code
|
||||
* before they call gss_wrap().
|
||||
*/
|
||||
#define GSS_KRB5_MAX_SLACK_NEEDED \
|
||||
(GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
|
||||
+ GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
|
||||
+ GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
|
||||
+ GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
|
||||
+ GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
|
||||
+ GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
|
||||
+ 4 + 4 /* RPC verifier */ \
|
||||
+ GSS_KRB5_TOK_HDR_LEN \
|
||||
+ GSS_KRB5_MAX_CKSUM_LEN)
|
||||
|
||||
u32
|
||||
make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
|
||||
struct xdr_buf *body, int body_offset, u8 *cksumkey,
|
||||
unsigned int usage, struct xdr_netobj *cksumout);
|
||||
|
||||
u32
|
||||
make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
|
||||
struct xdr_buf *body, int body_offset, u8 *key,
|
||||
unsigned int usage, struct xdr_netobj *cksum);
|
||||
|
||||
u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
|
||||
struct xdr_netobj *);
|
||||
|
@ -149,11 +278,54 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
|
|||
int offset);
|
||||
|
||||
s32
|
||||
krb5_make_seq_num(struct crypto_blkcipher *key,
|
||||
krb5_make_seq_num(struct krb5_ctx *kctx,
|
||||
struct crypto_blkcipher *key,
|
||||
int direction,
|
||||
u32 seqnum, unsigned char *cksum, unsigned char *buf);
|
||||
|
||||
s32
|
||||
krb5_get_seq_num(struct crypto_blkcipher *key,
|
||||
krb5_get_seq_num(struct krb5_ctx *kctx,
|
||||
unsigned char *cksum,
|
||||
unsigned char *buf, int *direction, u32 *seqnum);
|
||||
|
||||
int
|
||||
xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
|
||||
|
||||
u32
|
||||
krb5_derive_key(const struct gss_krb5_enctype *gk5e,
|
||||
const struct xdr_netobj *inkey,
|
||||
struct xdr_netobj *outkey,
|
||||
const struct xdr_netobj *in_constant,
|
||||
gfp_t gfp_mask);
|
||||
|
||||
u32
|
||||
gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
|
||||
struct xdr_netobj *randombits,
|
||||
struct xdr_netobj *key);
|
||||
|
||||
u32
|
||||
gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
|
||||
struct xdr_netobj *randombits,
|
||||
struct xdr_netobj *key);
|
||||
|
||||
u32
|
||||
gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf, int ec,
|
||||
struct page **pages);
|
||||
|
||||
u32
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf, u32 *plainoffset,
|
||||
u32 *plainlen);
|
||||
|
||||
int
|
||||
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
|
||||
struct crypto_blkcipher *cipher,
|
||||
unsigned char *cksum);
|
||||
|
||||
int
|
||||
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
|
||||
struct crypto_blkcipher *cipher,
|
||||
s32 seqnum);
|
||||
void
|
||||
gss_krb5_make_confounder(char *p, u32 conflen);
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define _LINUX_SUNRPC_METRICS_H
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
#define RPC_IOSTATS_VERS "1.0"
|
||||
|
||||
|
@ -58,9 +59,9 @@ struct rpc_iostats {
|
|||
* and the total time the request spent from init to release
|
||||
* are measured.
|
||||
*/
|
||||
unsigned long long om_queue, /* jiffies queued for xmit */
|
||||
om_rtt, /* jiffies for RPC RTT */
|
||||
om_execute; /* jiffies for RPC execution */
|
||||
ktime_t om_queue, /* queued for xmit */
|
||||
om_rtt, /* RPC RTT */
|
||||
om_execute; /* RPC execution */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct rpc_task;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#define _LINUX_SUNRPC_SCHED_H_
|
||||
|
||||
#include <linux/timer.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/sunrpc/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/wait.h>
|
||||
|
@ -40,21 +41,15 @@ struct rpc_wait {
|
|||
* This is the RPC task struct
|
||||
*/
|
||||
struct rpc_task {
|
||||
#ifdef RPC_DEBUG
|
||||
unsigned long tk_magic; /* 0xf00baa */
|
||||
#endif
|
||||
atomic_t tk_count; /* Reference count */
|
||||
struct list_head tk_task; /* global list of tasks */
|
||||
struct rpc_clnt * tk_client; /* RPC client */
|
||||
struct rpc_rqst * tk_rqstp; /* RPC request */
|
||||
int tk_status; /* result of last operation */
|
||||
|
||||
/*
|
||||
* RPC call state
|
||||
*/
|
||||
struct rpc_message tk_msg; /* RPC call info */
|
||||
__u8 tk_garb_retry;
|
||||
__u8 tk_cred_retry;
|
||||
|
||||
/*
|
||||
* callback to be executed after waking up
|
||||
|
@ -67,7 +62,6 @@ struct rpc_task {
|
|||
void * tk_calldata;
|
||||
|
||||
unsigned long tk_timeout; /* timeout for rpc_sleep() */
|
||||
unsigned short tk_flags; /* misc flags */
|
||||
unsigned long tk_runstate; /* Task run status */
|
||||
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
|
||||
* be any workqueue
|
||||
|
@ -78,17 +72,19 @@ struct rpc_task {
|
|||
struct rpc_wait tk_wait; /* RPC wait */
|
||||
} u;
|
||||
|
||||
unsigned short tk_timeouts; /* maj timeouts */
|
||||
size_t tk_bytes_sent; /* total bytes sent */
|
||||
unsigned long tk_start; /* RPC task init timestamp */
|
||||
long tk_rtt; /* round-trip time (jiffies) */
|
||||
ktime_t tk_start; /* RPC task init timestamp */
|
||||
|
||||
pid_t tk_owner; /* Process id for batching tasks */
|
||||
unsigned char tk_priority : 2;/* Task priority */
|
||||
int tk_status; /* result of last operation */
|
||||
unsigned short tk_flags; /* misc flags */
|
||||
unsigned short tk_timeouts; /* maj timeouts */
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
unsigned short tk_pid; /* debugging aid */
|
||||
#endif
|
||||
unsigned char tk_priority : 2,/* Task priority */
|
||||
tk_garb_retry : 2,
|
||||
tk_cred_retry : 2;
|
||||
};
|
||||
#define tk_xprt tk_client->cl_xprt
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
/*
|
||||
* include/linux/sunrpc/xdr.h
|
||||
* XDR standard data types and function declarations
|
||||
*
|
||||
* Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
|
||||
*
|
||||
* Based on:
|
||||
* RFC 4506 "XDR: External Data Representation Standard", May 2006
|
||||
*/
|
||||
|
||||
#ifndef _SUNRPC_XDR_H_
|
||||
|
@ -62,7 +65,6 @@ struct xdr_buf {
|
|||
|
||||
unsigned int buflen, /* Total length of storage buffer */
|
||||
len; /* Length of XDR encoded message */
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/sunrpc/sched.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
#include <linux/sunrpc/msg_prot.h>
|
||||
|
@ -65,8 +66,6 @@ struct rpc_rqst {
|
|||
struct rpc_task * rq_task; /* RPC task data */
|
||||
__be32 rq_xid; /* request XID */
|
||||
int rq_cong; /* has incremented xprt->cong */
|
||||
int rq_reply_bytes_recvd; /* number of reply */
|
||||
/* bytes received */
|
||||
u32 rq_seqno; /* gss seq no. used on req. */
|
||||
int rq_enc_pages_num;
|
||||
struct page **rq_enc_pages; /* scratch pages for use by
|
||||
|
@ -77,12 +76,16 @@ struct rpc_rqst {
|
|||
__u32 * rq_buffer; /* XDR encode buffer */
|
||||
size_t rq_callsize,
|
||||
rq_rcvsize;
|
||||
size_t rq_xmit_bytes_sent; /* total bytes sent */
|
||||
size_t rq_reply_bytes_recvd; /* total reply bytes */
|
||||
/* received */
|
||||
|
||||
struct xdr_buf rq_private_buf; /* The receive buffer
|
||||
* used in the softirq.
|
||||
*/
|
||||
unsigned long rq_majortimeo; /* major timeout alarm */
|
||||
unsigned long rq_timeout; /* Current timeout value */
|
||||
ktime_t rq_rtt; /* round-trip time */
|
||||
unsigned int rq_retries; /* # of retries */
|
||||
unsigned int rq_connect_cookie;
|
||||
/* A cookie used to track the
|
||||
|
@ -94,7 +97,7 @@ struct rpc_rqst {
|
|||
*/
|
||||
u32 rq_bytes_sent; /* Bytes we have sent */
|
||||
|
||||
unsigned long rq_xtime; /* when transmitted */
|
||||
ktime_t rq_xtime; /* transmit time stamp */
|
||||
int rq_ntrans;
|
||||
|
||||
#if defined(CONFIG_NFS_V4_1)
|
||||
|
@ -174,8 +177,7 @@ struct rpc_xprt {
|
|||
/*
|
||||
* Connection of transports
|
||||
*/
|
||||
unsigned long connect_timeout,
|
||||
bind_timeout,
|
||||
unsigned long bind_timeout,
|
||||
reestablish_timeout;
|
||||
unsigned int connect_cookie; /* A cookie that gets bumped
|
||||
every time the transport
|
||||
|
@ -294,7 +296,6 @@ void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
|
|||
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
|
||||
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
|
||||
void xprt_write_space(struct rpc_xprt *xprt);
|
||||
void xprt_update_rtt(struct rpc_task *task);
|
||||
void xprt_adjust_cwnd(struct rpc_task *task, int result);
|
||||
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
|
||||
void xprt_complete_rqst(struct rpc_task *task, int copied);
|
||||
|
|
|
@ -236,10 +236,15 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
|
|||
|
||||
list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
|
||||
|
||||
/* Enforce a 60 second garbage collection moratorium */
|
||||
if (nr_to_scan-- == 0)
|
||||
break;
|
||||
/*
|
||||
* Enforce a 60 second garbage collection moratorium
|
||||
* Note that the cred_unused list must be time-ordered.
|
||||
*/
|
||||
if (time_in_range(cred->cr_expire, expired, jiffies) &&
|
||||
test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
|
||||
continue;
|
||||
return 0;
|
||||
|
||||
list_del_init(&cred->cr_lru);
|
||||
number_cred_unused--;
|
||||
|
@ -252,13 +257,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
|
|||
get_rpccred(cred);
|
||||
list_add_tail(&cred->cr_lru, free);
|
||||
rpcauth_unhash_cred_locked(cred);
|
||||
nr_to_scan--;
|
||||
}
|
||||
spin_unlock(cache_lock);
|
||||
if (nr_to_scan == 0)
|
||||
break;
|
||||
}
|
||||
return nr_to_scan;
|
||||
return (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -270,11 +272,12 @@ rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
|
|||
LIST_HEAD(free);
|
||||
int res;
|
||||
|
||||
if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
|
||||
return (nr_to_scan == 0) ? 0 : -1;
|
||||
if (list_empty(&cred_unused))
|
||||
return 0;
|
||||
spin_lock(&rpc_credcache_lock);
|
||||
nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan);
|
||||
res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
|
||||
res = rpcauth_prune_expired(&free, nr_to_scan);
|
||||
spin_unlock(&rpc_credcache_lock);
|
||||
rpcauth_destroy_credlist(&free);
|
||||
return res;
|
||||
|
|
|
@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
|
|||
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
|
||||
|
||||
rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
|
||||
gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o
|
||||
gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
|
||||
|
||||
obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
|
||||
|
||||
|
|
|
@ -57,11 +57,14 @@ static const struct rpc_authops authgss_ops;
|
|||
static const struct rpc_credops gss_credops;
|
||||
static const struct rpc_credops gss_nullops;
|
||||
|
||||
#define GSS_RETRY_EXPIRED 5
|
||||
static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
#endif
|
||||
|
||||
#define GSS_CRED_SLACK 1024
|
||||
#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
|
||||
/* length of a krb5 verifier (48), plus data added before arguments when
|
||||
* using integrity (two 4-byte integers): */
|
||||
#define GSS_VERF_SLACK 100
|
||||
|
@ -229,7 +232,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
|
|||
p = ERR_PTR(-EFAULT);
|
||||
goto err;
|
||||
}
|
||||
ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
|
||||
ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS);
|
||||
if (ret < 0) {
|
||||
p = ERR_PTR(ret);
|
||||
goto err;
|
||||
|
@ -349,6 +352,24 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg)
|
|||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
|
||||
{
|
||||
switch (gss_msg->msg.errno) {
|
||||
case 0:
|
||||
if (gss_msg->ctx == NULL)
|
||||
break;
|
||||
clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
|
||||
gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
|
||||
break;
|
||||
case -EKEYEXPIRED:
|
||||
set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
|
||||
}
|
||||
gss_cred->gc_upcall_timestamp = jiffies;
|
||||
gss_cred->gc_upcall = NULL;
|
||||
rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
|
||||
}
|
||||
|
||||
static void
|
||||
gss_upcall_callback(struct rpc_task *task)
|
||||
{
|
||||
|
@ -358,13 +379,9 @@ gss_upcall_callback(struct rpc_task *task)
|
|||
struct inode *inode = &gss_msg->inode->vfs_inode;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (gss_msg->ctx)
|
||||
gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
|
||||
else
|
||||
task->tk_status = gss_msg->msg.errno;
|
||||
gss_cred->gc_upcall = NULL;
|
||||
rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
|
||||
gss_handle_downcall_result(gss_cred, gss_msg);
|
||||
spin_unlock(&inode->i_lock);
|
||||
task->tk_status = gss_msg->msg.errno;
|
||||
gss_release_msg(gss_msg);
|
||||
}
|
||||
|
||||
|
@ -377,11 +394,12 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
|
|||
static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
|
||||
struct rpc_clnt *clnt, int machine_cred)
|
||||
{
|
||||
struct gss_api_mech *mech = gss_msg->auth->mech;
|
||||
char *p = gss_msg->databuf;
|
||||
int len = 0;
|
||||
|
||||
gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
|
||||
gss_msg->auth->mech->gm_name,
|
||||
mech->gm_name,
|
||||
gss_msg->uid);
|
||||
p += gss_msg->msg.len;
|
||||
if (clnt->cl_principal) {
|
||||
|
@ -398,6 +416,11 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
|
|||
p += len;
|
||||
gss_msg->msg.len += len;
|
||||
}
|
||||
if (mech->gm_upcall_enctypes) {
|
||||
len = sprintf(p, mech->gm_upcall_enctypes);
|
||||
p += len;
|
||||
gss_msg->msg.len += len;
|
||||
}
|
||||
len = sprintf(p, "\n");
|
||||
gss_msg->msg.len += len;
|
||||
|
||||
|
@ -507,18 +530,16 @@ gss_refresh_upcall(struct rpc_task *task)
|
|||
spin_lock(&inode->i_lock);
|
||||
if (gss_cred->gc_upcall != NULL)
|
||||
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
|
||||
else if (gss_msg->ctx != NULL) {
|
||||
gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
|
||||
gss_cred->gc_upcall = NULL;
|
||||
rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
|
||||
} else if (gss_msg->msg.errno >= 0) {
|
||||
else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
|
||||
task->tk_timeout = 0;
|
||||
gss_cred->gc_upcall = gss_msg;
|
||||
/* gss_upcall_callback will release the reference to gss_upcall_msg */
|
||||
atomic_inc(&gss_msg->count);
|
||||
rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
|
||||
} else
|
||||
} else {
|
||||
gss_handle_downcall_result(gss_cred, gss_msg);
|
||||
err = gss_msg->msg.errno;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
gss_release_msg(gss_msg);
|
||||
out:
|
||||
|
@ -1117,6 +1138,23 @@ static int gss_renew_cred(struct rpc_task *task)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gss_cred_is_negative_entry(struct rpc_cred *cred)
|
||||
{
|
||||
if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
|
||||
unsigned long now = jiffies;
|
||||
unsigned long begin, expire;
|
||||
struct gss_cred *gss_cred;
|
||||
|
||||
gss_cred = container_of(cred, struct gss_cred, gc_base);
|
||||
begin = gss_cred->gc_upcall_timestamp;
|
||||
expire = begin + gss_expired_cred_retry_delay * HZ;
|
||||
|
||||
if (time_in_range_open(now, begin, expire))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Refresh credentials. XXX - finish
|
||||
*/
|
||||
|
@ -1126,6 +1164,9 @@ gss_refresh(struct rpc_task *task)
|
|||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
||||
int ret = 0;
|
||||
|
||||
if (gss_cred_is_negative_entry(cred))
|
||||
return -EKEYEXPIRED;
|
||||
|
||||
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
|
||||
!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
|
||||
ret = gss_renew_cred(task);
|
||||
|
@ -1316,15 +1357,21 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
|
|||
inpages = snd_buf->pages + first;
|
||||
snd_buf->pages = rqstp->rq_enc_pages;
|
||||
snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
|
||||
/* Give the tail its own page, in case we need extra space in the
|
||||
* head when wrapping: */
|
||||
/*
|
||||
* Give the tail its own page, in case we need extra space in the
|
||||
* head when wrapping:
|
||||
*
|
||||
* call_allocate() allocates twice the slack space required
|
||||
* by the authentication flavor to rq_callsize.
|
||||
* For GSS, slack is GSS_CRED_SLACK.
|
||||
*/
|
||||
if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
|
||||
tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
|
||||
memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
|
||||
snd_buf->tail[0].iov_base = tmp;
|
||||
}
|
||||
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
|
||||
/* RPC_SLACK_SPACE should prevent this ever happening: */
|
||||
/* slack space should prevent this ever happening: */
|
||||
BUG_ON(snd_buf->len > snd_buf->buflen);
|
||||
status = -EIO;
|
||||
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
|
||||
|
@ -1573,5 +1620,11 @@ static void __exit exit_rpcsec_gss(void)
|
|||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
module_param_named(expired_cred_retry_delay,
|
||||
gss_expired_cred_retry_delay,
|
||||
uint, 0644);
|
||||
MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
|
||||
"the RPC engine retries an expired credential");
|
||||
|
||||
module_init(init_rpcsec_gss)
|
||||
module_exit(exit_rpcsec_gss)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* linux/net/sunrpc/gss_krb5_crypto.c
|
||||
*
|
||||
* Copyright (c) 2000 The Regents of the University of Michigan.
|
||||
* Copyright (c) 2000-2008 The Regents of the University of Michigan.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Andy Adamson <andros@umich.edu>
|
||||
|
@ -41,6 +41,7 @@
|
|||
#include <linux/crypto.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sunrpc/gss_krb5.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
|
||||
|
@ -58,13 +59,13 @@ krb5_encrypt(
|
|||
{
|
||||
u32 ret = -EINVAL;
|
||||
struct scatterlist sg[1];
|
||||
u8 local_iv[16] = {0};
|
||||
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
|
||||
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
|
||||
|
||||
if (length % crypto_blkcipher_blocksize(tfm) != 0)
|
||||
goto out;
|
||||
|
||||
if (crypto_blkcipher_ivsize(tfm) > 16) {
|
||||
if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
|
||||
dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
|
||||
crypto_blkcipher_ivsize(tfm));
|
||||
goto out;
|
||||
|
@ -92,13 +93,13 @@ krb5_decrypt(
|
|||
{
|
||||
u32 ret = -EINVAL;
|
||||
struct scatterlist sg[1];
|
||||
u8 local_iv[16] = {0};
|
||||
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
|
||||
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
|
||||
|
||||
if (length % crypto_blkcipher_blocksize(tfm) != 0)
|
||||
goto out;
|
||||
|
||||
if (crypto_blkcipher_ivsize(tfm) > 16) {
|
||||
if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
|
||||
dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
|
||||
crypto_blkcipher_ivsize(tfm));
|
||||
goto out;
|
||||
|
@ -123,21 +124,155 @@ checksummer(struct scatterlist *sg, void *data)
|
|||
return crypto_hash_update(desc, sg, sg->length);
|
||||
}
|
||||
|
||||
/* checksum the plaintext data and hdrlen bytes of the token header */
|
||||
s32
|
||||
make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
|
||||
int body_offset, struct xdr_netobj *cksum)
|
||||
static int
|
||||
arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
|
||||
{
|
||||
struct hash_desc desc; /* XXX add to ctx? */
|
||||
unsigned int ms_usage;
|
||||
|
||||
switch (usage) {
|
||||
case KG_USAGE_SIGN:
|
||||
ms_usage = 15;
|
||||
break;
|
||||
case KG_USAGE_SEAL:
|
||||
ms_usage = 13;
|
||||
break;
|
||||
default:
|
||||
return EINVAL;;
|
||||
}
|
||||
salt[0] = (ms_usage >> 0) & 0xff;
|
||||
salt[1] = (ms_usage >> 8) & 0xff;
|
||||
salt[2] = (ms_usage >> 16) & 0xff;
|
||||
salt[3] = (ms_usage >> 24) & 0xff;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
|
||||
struct xdr_buf *body, int body_offset, u8 *cksumkey,
|
||||
unsigned int usage, struct xdr_netobj *cksumout)
|
||||
{
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg[1];
|
||||
int err;
|
||||
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
u8 rc4salt[4];
|
||||
struct crypto_hash *md5;
|
||||
struct crypto_hash *hmac_md5;
|
||||
|
||||
desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
|
||||
if (cksumkey == NULL)
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (cksumout->len < kctx->gk5e->cksumlength) {
|
||||
dprintk("%s: checksum buffer length, %u, too small for %s\n",
|
||||
__func__, cksumout->len, kctx->gk5e->name);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
|
||||
dprintk("%s: invalid usage value %u\n", __func__, usage);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(md5))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(hmac_md5)) {
|
||||
crypto_free_hash(md5);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
desc.tfm = md5;
|
||||
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto out;
|
||||
sg_init_one(sg, rc4salt, 4);
|
||||
err = crypto_hash_update(&desc, sg, 4);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
sg_init_one(sg, header, hdrlen);
|
||||
err = crypto_hash_update(&desc, sg, hdrlen);
|
||||
if (err)
|
||||
goto out;
|
||||
err = xdr_process_buf(body, body_offset, body->len - body_offset,
|
||||
checksummer, &desc);
|
||||
if (err)
|
||||
goto out;
|
||||
err = crypto_hash_final(&desc, checksumdata);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
desc.tfm = hmac_md5;
|
||||
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto out;
|
||||
err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
|
||||
err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
|
||||
checksumdata);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
|
||||
cksumout->len = kctx->gk5e->cksumlength;
|
||||
out:
|
||||
crypto_free_hash(md5);
|
||||
crypto_free_hash(hmac_md5);
|
||||
return err ? GSS_S_FAILURE : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* checksum the plaintext data and hdrlen bytes of the token header
|
||||
* The checksum is performed over the first 8 bytes of the
|
||||
* gss token header and then over the data body
|
||||
*/
|
||||
u32
|
||||
make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
|
||||
struct xdr_buf *body, int body_offset, u8 *cksumkey,
|
||||
unsigned int usage, struct xdr_netobj *cksumout)
|
||||
{
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg[1];
|
||||
int err;
|
||||
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
unsigned int checksumlen;
|
||||
|
||||
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
|
||||
return make_checksum_hmac_md5(kctx, header, hdrlen,
|
||||
body, body_offset,
|
||||
cksumkey, usage, cksumout);
|
||||
|
||||
if (cksumout->len < kctx->gk5e->cksumlength) {
|
||||
dprintk("%s: checksum buffer length, %u, too small for %s\n",
|
||||
__func__, cksumout->len, kctx->gk5e->name);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(desc.tfm))
|
||||
return GSS_S_FAILURE;
|
||||
cksum->len = crypto_hash_digestsize(desc.tfm);
|
||||
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
checksumlen = crypto_hash_digestsize(desc.tfm);
|
||||
|
||||
if (cksumkey != NULL) {
|
||||
err = crypto_hash_setkey(desc.tfm, cksumkey,
|
||||
kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -149,15 +284,109 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
|
|||
checksummer, &desc);
|
||||
if (err)
|
||||
goto out;
|
||||
err = crypto_hash_final(&desc, cksum->data);
|
||||
err = crypto_hash_final(&desc, checksumdata);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
switch (kctx->gk5e->ctype) {
|
||||
case CKSUMTYPE_RSA_MD5:
|
||||
err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
|
||||
checksumdata, checksumlen);
|
||||
if (err)
|
||||
goto out;
|
||||
memcpy(cksumout->data,
|
||||
checksumdata + checksumlen - kctx->gk5e->cksumlength,
|
||||
kctx->gk5e->cksumlength);
|
||||
break;
|
||||
case CKSUMTYPE_HMAC_SHA1_DES3:
|
||||
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
cksumout->len = kctx->gk5e->cksumlength;
|
||||
out:
|
||||
crypto_free_hash(desc.tfm);
|
||||
return err ? GSS_S_FAILURE : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* checksum the plaintext data and hdrlen bytes of the token header
|
||||
* Per rfc4121, sec. 4.2.4, the checksum is performed over the data
|
||||
* body then over the first 16 octets of the MIC token
|
||||
* Inclusion of the header data in the calculation of the
|
||||
* checksum is optional.
|
||||
*/
|
||||
u32
|
||||
make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
|
||||
struct xdr_buf *body, int body_offset, u8 *cksumkey,
|
||||
unsigned int usage, struct xdr_netobj *cksumout)
|
||||
{
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg[1];
|
||||
int err;
|
||||
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
unsigned int checksumlen;
|
||||
|
||||
if (kctx->gk5e->keyed_cksum == 0) {
|
||||
dprintk("%s: expected keyed hash for %s\n",
|
||||
__func__, kctx->gk5e->name);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
if (cksumkey == NULL) {
|
||||
dprintk("%s: no key supplied for %s\n",
|
||||
__func__, kctx->gk5e->name);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(desc.tfm))
|
||||
return GSS_S_FAILURE;
|
||||
checksumlen = crypto_hash_digestsize(desc.tfm);
|
||||
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto out;
|
||||
err = xdr_process_buf(body, body_offset, body->len - body_offset,
|
||||
checksummer, &desc);
|
||||
if (err)
|
||||
goto out;
|
||||
if (header != NULL) {
|
||||
sg_init_one(sg, header, hdrlen);
|
||||
err = crypto_hash_update(&desc, sg, hdrlen);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
err = crypto_hash_final(&desc, checksumdata);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
cksumout->len = kctx->gk5e->cksumlength;
|
||||
|
||||
switch (kctx->gk5e->ctype) {
|
||||
case CKSUMTYPE_HMAC_SHA1_96_AES128:
|
||||
case CKSUMTYPE_HMAC_SHA1_96_AES256:
|
||||
/* note that this truncates the hash */
|
||||
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
out:
|
||||
crypto_free_hash(desc.tfm);
|
||||
return err ? GSS_S_FAILURE : 0;
|
||||
}
|
||||
|
||||
struct encryptor_desc {
|
||||
u8 iv[8]; /* XXX hard-coded blocksize */
|
||||
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
|
||||
struct blkcipher_desc desc;
|
||||
int pos;
|
||||
struct xdr_buf *outbuf;
|
||||
|
@ -198,7 +427,7 @@ encryptor(struct scatterlist *sg, void *data)
|
|||
desc->fraglen += sg->length;
|
||||
desc->pos += sg->length;
|
||||
|
||||
fraglen = thislen & 7; /* XXX hardcoded blocksize */
|
||||
fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
|
||||
thislen -= fraglen;
|
||||
|
||||
if (thislen == 0)
|
||||
|
@ -256,7 +485,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
|
|||
}
|
||||
|
||||
struct decryptor_desc {
|
||||
u8 iv[8]; /* XXX hard-coded blocksize */
|
||||
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
|
||||
struct blkcipher_desc desc;
|
||||
struct scatterlist frags[4];
|
||||
int fragno;
|
||||
|
@ -278,7 +507,7 @@ decryptor(struct scatterlist *sg, void *data)
|
|||
desc->fragno++;
|
||||
desc->fraglen += sg->length;
|
||||
|
||||
fraglen = thislen & 7; /* XXX hardcoded blocksize */
|
||||
fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
|
||||
thislen -= fraglen;
|
||||
|
||||
if (thislen == 0)
|
||||
|
@ -325,3 +554,437 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
|
|||
|
||||
return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function makes the assumption that it was ultimately called
|
||||
* from gss_wrap().
|
||||
*
|
||||
* The client auth_gss code moves any existing tail data into a
|
||||
* separate page before calling gss_wrap.
|
||||
* The server svcauth_gss code ensures that both the head and the
|
||||
* tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
|
||||
*
|
||||
* Even with that guarantee, this function may be called more than
|
||||
* once in the processing of gss_wrap(). The best we can do is
|
||||
* verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
|
||||
* largest expected shift will fit within RPC_MAX_AUTH_SIZE.
|
||||
* At run-time we can verify that a single invocation of this
|
||||
* function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
|
||||
*/
|
||||
|
||||
int
|
||||
xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
|
||||
{
|
||||
u8 *p;
|
||||
|
||||
if (shiftlen == 0)
|
||||
return 0;
|
||||
|
||||
BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
|
||||
BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
|
||||
|
||||
p = buf->head[0].iov_base + base;
|
||||
|
||||
memmove(p + shiftlen, p, buf->head[0].iov_len - base);
|
||||
|
||||
buf->head[0].iov_len += shiftlen;
|
||||
buf->len += shiftlen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
|
||||
u32 offset, u8 *iv, struct page **pages, int encrypt)
|
||||
{
|
||||
u32 ret;
|
||||
struct scatterlist sg[1];
|
||||
struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
|
||||
u8 data[crypto_blkcipher_blocksize(cipher) * 2];
|
||||
struct page **save_pages;
|
||||
u32 len = buf->len - offset;
|
||||
|
||||
BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
|
||||
|
||||
/*
|
||||
* For encryption, we want to read from the cleartext
|
||||
* page cache pages, and write the encrypted data to
|
||||
* the supplied xdr_buf pages.
|
||||
*/
|
||||
save_pages = buf->pages;
|
||||
if (encrypt)
|
||||
buf->pages = pages;
|
||||
|
||||
ret = read_bytes_from_xdr_buf(buf, offset, data, len);
|
||||
buf->pages = save_pages;
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sg_init_one(sg, data, len);
|
||||
|
||||
if (encrypt)
|
||||
ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
|
||||
else
|
||||
ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = write_bytes_to_xdr_buf(buf, offset, data, len);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf, int ec, struct page **pages)
|
||||
{
|
||||
u32 err;
|
||||
struct xdr_netobj hmac;
|
||||
u8 *cksumkey;
|
||||
u8 *ecptr;
|
||||
struct crypto_blkcipher *cipher, *aux_cipher;
|
||||
int blocksize;
|
||||
struct page **save_pages;
|
||||
int nblocks, nbytes;
|
||||
struct encryptor_desc desc;
|
||||
u32 cbcbytes;
|
||||
unsigned int usage;
|
||||
|
||||
if (kctx->initiate) {
|
||||
cipher = kctx->initiator_enc;
|
||||
aux_cipher = kctx->initiator_enc_aux;
|
||||
cksumkey = kctx->initiator_integ;
|
||||
usage = KG_USAGE_INITIATOR_SEAL;
|
||||
} else {
|
||||
cipher = kctx->acceptor_enc;
|
||||
aux_cipher = kctx->acceptor_enc_aux;
|
||||
cksumkey = kctx->acceptor_integ;
|
||||
usage = KG_USAGE_ACCEPTOR_SEAL;
|
||||
}
|
||||
blocksize = crypto_blkcipher_blocksize(cipher);
|
||||
|
||||
/* hide the gss token header and insert the confounder */
|
||||
offset += GSS_KRB5_TOK_HDR_LEN;
|
||||
if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
|
||||
return GSS_S_FAILURE;
|
||||
gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
|
||||
offset -= GSS_KRB5_TOK_HDR_LEN;
|
||||
|
||||
if (buf->tail[0].iov_base != NULL) {
|
||||
ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
|
||||
} else {
|
||||
buf->tail[0].iov_base = buf->head[0].iov_base
|
||||
+ buf->head[0].iov_len;
|
||||
buf->tail[0].iov_len = 0;
|
||||
ecptr = buf->tail[0].iov_base;
|
||||
}
|
||||
|
||||
memset(ecptr, 'X', ec);
|
||||
buf->tail[0].iov_len += ec;
|
||||
buf->len += ec;
|
||||
|
||||
/* copy plaintext gss token header after filler (if any) */
|
||||
memcpy(ecptr + ec, buf->head[0].iov_base + offset,
|
||||
GSS_KRB5_TOK_HDR_LEN);
|
||||
buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
|
||||
buf->len += GSS_KRB5_TOK_HDR_LEN;
|
||||
|
||||
/* Do the HMAC */
|
||||
hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
|
||||
hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
|
||||
|
||||
/*
|
||||
* When we are called, pages points to the real page cache
|
||||
* data -- which we can't go and encrypt! buf->pages points
|
||||
* to scratch pages which we are going to send off to the
|
||||
* client/server. Swap in the plaintext pages to calculate
|
||||
* the hmac.
|
||||
*/
|
||||
save_pages = buf->pages;
|
||||
buf->pages = pages;
|
||||
|
||||
err = make_checksum_v2(kctx, NULL, 0, buf,
|
||||
offset + GSS_KRB5_TOK_HDR_LEN,
|
||||
cksumkey, usage, &hmac);
|
||||
buf->pages = save_pages;
|
||||
if (err)
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
|
||||
nblocks = (nbytes + blocksize - 1) / blocksize;
|
||||
cbcbytes = 0;
|
||||
if (nblocks > 2)
|
||||
cbcbytes = (nblocks - 2) * blocksize;
|
||||
|
||||
memset(desc.iv, 0, sizeof(desc.iv));
|
||||
|
||||
if (cbcbytes) {
|
||||
desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
|
||||
desc.fragno = 0;
|
||||
desc.fraglen = 0;
|
||||
desc.pages = pages;
|
||||
desc.outbuf = buf;
|
||||
desc.desc.info = desc.iv;
|
||||
desc.desc.flags = 0;
|
||||
desc.desc.tfm = aux_cipher;
|
||||
|
||||
sg_init_table(desc.infrags, 4);
|
||||
sg_init_table(desc.outfrags, 4);
|
||||
|
||||
err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
|
||||
cbcbytes, encryptor, &desc);
|
||||
if (err)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Make sure IV carries forward from any CBC results. */
|
||||
err = gss_krb5_cts_crypt(cipher, buf,
|
||||
offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
|
||||
desc.iv, pages, 1);
|
||||
if (err) {
|
||||
err = GSS_S_FAILURE;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Now update buf to account for HMAC */
|
||||
buf->tail[0].iov_len += kctx->gk5e->cksumlength;
|
||||
buf->len += kctx->gk5e->cksumlength;
|
||||
|
||||
out_err:
|
||||
if (err)
|
||||
err = GSS_S_FAILURE;
|
||||
return err;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
|
||||
u32 *headskip, u32 *tailskip)
|
||||
{
|
||||
struct xdr_buf subbuf;
|
||||
u32 ret = 0;
|
||||
u8 *cksum_key;
|
||||
struct crypto_blkcipher *cipher, *aux_cipher;
|
||||
struct xdr_netobj our_hmac_obj;
|
||||
u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
int nblocks, blocksize, cbcbytes;
|
||||
struct decryptor_desc desc;
|
||||
unsigned int usage;
|
||||
|
||||
if (kctx->initiate) {
|
||||
cipher = kctx->acceptor_enc;
|
||||
aux_cipher = kctx->acceptor_enc_aux;
|
||||
cksum_key = kctx->acceptor_integ;
|
||||
usage = KG_USAGE_ACCEPTOR_SEAL;
|
||||
} else {
|
||||
cipher = kctx->initiator_enc;
|
||||
aux_cipher = kctx->initiator_enc_aux;
|
||||
cksum_key = kctx->initiator_integ;
|
||||
usage = KG_USAGE_INITIATOR_SEAL;
|
||||
}
|
||||
blocksize = crypto_blkcipher_blocksize(cipher);
|
||||
|
||||
|
||||
/* create a segment skipping the header and leaving out the checksum */
|
||||
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
|
||||
(buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
|
||||
kctx->gk5e->cksumlength));
|
||||
|
||||
nblocks = (subbuf.len + blocksize - 1) / blocksize;
|
||||
|
||||
cbcbytes = 0;
|
||||
if (nblocks > 2)
|
||||
cbcbytes = (nblocks - 2) * blocksize;
|
||||
|
||||
memset(desc.iv, 0, sizeof(desc.iv));
|
||||
|
||||
if (cbcbytes) {
|
||||
desc.fragno = 0;
|
||||
desc.fraglen = 0;
|
||||
desc.desc.info = desc.iv;
|
||||
desc.desc.flags = 0;
|
||||
desc.desc.tfm = aux_cipher;
|
||||
|
||||
sg_init_table(desc.frags, 4);
|
||||
|
||||
ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Make sure IV carries forward from any CBC results. */
|
||||
ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
|
||||
/* Calculate our hmac over the plaintext data */
|
||||
our_hmac_obj.len = sizeof(our_hmac);
|
||||
our_hmac_obj.data = our_hmac;
|
||||
|
||||
ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
|
||||
cksum_key, usage, &our_hmac_obj);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
/* Get the packet's hmac value */
|
||||
ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
|
||||
pkt_hmac, kctx->gk5e->cksumlength);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
|
||||
ret = GSS_S_BAD_SIG;
|
||||
goto out_err;
|
||||
}
|
||||
*headskip = kctx->gk5e->conflen;
|
||||
*tailskip = kctx->gk5e->cksumlength;
|
||||
out_err:
|
||||
if (ret && ret != GSS_S_BAD_SIG)
|
||||
ret = GSS_S_FAILURE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute Kseq given the initial session key and the checksum.
|
||||
* Set the key of the given cipher.
|
||||
*/
|
||||
int
|
||||
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
|
||||
unsigned char *cksum)
|
||||
{
|
||||
struct crypto_hash *hmac;
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg[1];
|
||||
u8 Kseq[GSS_KRB5_MAX_KEYLEN];
|
||||
u32 zeroconstant = 0;
|
||||
int err;
|
||||
|
||||
dprintk("%s: entered\n", __func__);
|
||||
|
||||
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(hmac)) {
|
||||
dprintk("%s: error %ld, allocating hash '%s'\n",
|
||||
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
|
||||
return PTR_ERR(hmac);
|
||||
}
|
||||
|
||||
desc.tfm = hmac;
|
||||
desc.flags = 0;
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Compute intermediate Kseq from session key */
|
||||
err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
sg_init_table(sg, 1);
|
||||
sg_set_buf(sg, &zeroconstant, 4);
|
||||
|
||||
err = crypto_hash_digest(&desc, sg, 4, Kseq);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Compute final Kseq from the checksum and intermediate Kseq */
|
||||
err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
sg_set_buf(sg, cksum, 8);
|
||||
|
||||
err = crypto_hash_digest(&desc, sg, 8, Kseq);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = 0;
|
||||
|
||||
out_err:
|
||||
crypto_free_hash(hmac);
|
||||
dprintk("%s: returning %d\n", __func__, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute Kcrypt given the initial session key and the plaintext seqnum.
|
||||
* Set the key of cipher kctx->enc.
|
||||
*/
|
||||
int
|
||||
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
|
||||
s32 seqnum)
|
||||
{
|
||||
struct crypto_hash *hmac;
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg[1];
|
||||
u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
|
||||
u8 zeroconstant[4] = {0};
|
||||
u8 seqnumarray[4];
|
||||
int err, i;
|
||||
|
||||
dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
|
||||
|
||||
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(hmac)) {
|
||||
dprintk("%s: error %ld, allocating hash '%s'\n",
|
||||
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
|
||||
return PTR_ERR(hmac);
|
||||
}
|
||||
|
||||
desc.tfm = hmac;
|
||||
desc.flags = 0;
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Compute intermediate Kcrypt from session key */
|
||||
for (i = 0; i < kctx->gk5e->keylength; i++)
|
||||
Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
|
||||
|
||||
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
sg_init_table(sg, 1);
|
||||
sg_set_buf(sg, zeroconstant, 4);
|
||||
|
||||
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
|
||||
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
|
||||
seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
|
||||
seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
|
||||
seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
|
||||
|
||||
sg_set_buf(sg, seqnumarray, 4);
|
||||
|
||||
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = 0;
|
||||
|
||||
out_err:
|
||||
crypto_free_hash(hmac);
|
||||
dprintk("%s: returning %d\n", __func__, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
336
net/sunrpc/auth_gss/gss_krb5_keys.c
Normal file
336
net/sunrpc/auth_gss/gss_krb5_keys.c
Normal file
|
@ -0,0 +1,336 @@
|
|||
/*
|
||||
* COPYRIGHT (c) 2008
|
||||
* The Regents of the University of Michigan
|
||||
* ALL RIGHTS RESERVED
|
||||
*
|
||||
* Permission is granted to use, copy, create derivative works
|
||||
* and redistribute this software and such derivative works
|
||||
* for any purpose, so long as the name of The University of
|
||||
* Michigan is not used in any advertising or publicity
|
||||
* pertaining to the use of distribution of this software
|
||||
* without specific, written prior authorization. If the
|
||||
* above copyright notice or any other identification of the
|
||||
* University of Michigan is included in any copy of any
|
||||
* portion of this software, then the disclaimer below must
|
||||
* also be included.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
|
||||
* FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
|
||||
* PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
|
||||
* MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
|
||||
* WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
|
||||
* REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
|
||||
* FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
|
||||
* CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
|
||||
* OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
|
||||
* IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGES.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 1998 by the FundsXpress, INC.
|
||||
*
|
||||
* All rights reserved.
|
||||
*
|
||||
* Export of this software from the United States of America may require
|
||||
* a specific license from the United States Government. It is the
|
||||
* responsibility of any person or organization contemplating export to
|
||||
* obtain such a license before exporting.
|
||||
*
|
||||
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
|
||||
* distribute this software and its documentation for any purpose and
|
||||
* without fee is hereby granted, provided that the above copyright
|
||||
* notice appear in all copies and that both that copyright notice and
|
||||
* this permission notice appear in supporting documentation, and that
|
||||
* the name of FundsXpress. not be used in advertising or publicity pertaining
|
||||
* to distribution of the software without specific, written prior
|
||||
* permission. FundsXpress makes no representations about the suitability of
|
||||
* this software for any purpose. It is provided "as is" without express
|
||||
* or implied warranty.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sunrpc/gss_krb5.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is the n-fold function as described in rfc3961, sec 5.1
|
||||
* Taken from MIT Kerberos and modified.
|
||||
*/
|
||||
|
||||
static void krb5_nfold(u32 inbits, const u8 *in,
|
||||
u32 outbits, u8 *out)
|
||||
{
|
||||
int a, b, c, lcm;
|
||||
int byte, i, msbit;
|
||||
|
||||
/* the code below is more readable if I make these bytes
|
||||
instead of bits */
|
||||
|
||||
inbits >>= 3;
|
||||
outbits >>= 3;
|
||||
|
||||
/* first compute lcm(n,k) */
|
||||
|
||||
a = outbits;
|
||||
b = inbits;
|
||||
|
||||
while (b != 0) {
|
||||
c = b;
|
||||
b = a%b;
|
||||
a = c;
|
||||
}
|
||||
|
||||
lcm = outbits*inbits/a;
|
||||
|
||||
/* now do the real work */
|
||||
|
||||
memset(out, 0, outbits);
|
||||
byte = 0;
|
||||
|
||||
/* this will end up cycling through k lcm(k,n)/k times, which
|
||||
is correct */
|
||||
for (i = lcm-1; i >= 0; i--) {
|
||||
/* compute the msbit in k which gets added into this byte */
|
||||
msbit = (
|
||||
/* first, start with the msbit in the first,
|
||||
* unrotated byte */
|
||||
((inbits << 3) - 1)
|
||||
/* then, for each byte, shift to the right
|
||||
* for each repetition */
|
||||
+ (((inbits << 3) + 13) * (i/inbits))
|
||||
/* last, pick out the correct byte within
|
||||
* that shifted repetition */
|
||||
+ ((inbits - (i % inbits)) << 3)
|
||||
) % (inbits << 3);
|
||||
|
||||
/* pull out the byte value itself */
|
||||
byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
|
||||
(in[((inbits) - (msbit >> 3)) % inbits]))
|
||||
>> ((msbit & 7) + 1)) & 0xff;
|
||||
|
||||
/* do the addition */
|
||||
byte += out[i % outbits];
|
||||
out[i % outbits] = byte & 0xff;
|
||||
|
||||
/* keep around the carry bit, if any */
|
||||
byte >>= 8;
|
||||
|
||||
}
|
||||
|
||||
/* if there's a carry bit left over, add it back in */
|
||||
if (byte) {
|
||||
for (i = outbits - 1; i >= 0; i--) {
|
||||
/* do the addition */
|
||||
byte += out[i];
|
||||
out[i] = byte & 0xff;
|
||||
|
||||
/* keep around the carry bit, if any */
|
||||
byte >>= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the DK (derive_key) function as described in rfc3961, sec 5.1
|
||||
* Taken from MIT Kerberos and modified.
|
||||
*/
|
||||
|
||||
u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
|
||||
const struct xdr_netobj *inkey,
|
||||
struct xdr_netobj *outkey,
|
||||
const struct xdr_netobj *in_constant,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
size_t blocksize, keybytes, keylength, n;
|
||||
unsigned char *inblockdata, *outblockdata, *rawkey;
|
||||
struct xdr_netobj inblock, outblock;
|
||||
struct crypto_blkcipher *cipher;
|
||||
u32 ret = EINVAL;
|
||||
|
||||
blocksize = gk5e->blocksize;
|
||||
keybytes = gk5e->keybytes;
|
||||
keylength = gk5e->keylength;
|
||||
|
||||
if ((inkey->len != keylength) || (outkey->len != keylength))
|
||||
goto err_return;
|
||||
|
||||
cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(cipher))
|
||||
goto err_return;
|
||||
if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len))
|
||||
goto err_return;
|
||||
|
||||
/* allocate and set up buffers */
|
||||
|
||||
ret = ENOMEM;
|
||||
inblockdata = kmalloc(blocksize, gfp_mask);
|
||||
if (inblockdata == NULL)
|
||||
goto err_free_cipher;
|
||||
|
||||
outblockdata = kmalloc(blocksize, gfp_mask);
|
||||
if (outblockdata == NULL)
|
||||
goto err_free_in;
|
||||
|
||||
rawkey = kmalloc(keybytes, gfp_mask);
|
||||
if (rawkey == NULL)
|
||||
goto err_free_out;
|
||||
|
||||
inblock.data = (char *) inblockdata;
|
||||
inblock.len = blocksize;
|
||||
|
||||
outblock.data = (char *) outblockdata;
|
||||
outblock.len = blocksize;
|
||||
|
||||
/* initialize the input block */
|
||||
|
||||
if (in_constant->len == inblock.len) {
|
||||
memcpy(inblock.data, in_constant->data, inblock.len);
|
||||
} else {
|
||||
krb5_nfold(in_constant->len * 8, in_constant->data,
|
||||
inblock.len * 8, inblock.data);
|
||||
}
|
||||
|
||||
/* loop encrypting the blocks until enough key bytes are generated */
|
||||
|
||||
n = 0;
|
||||
while (n < keybytes) {
|
||||
(*(gk5e->encrypt))(cipher, NULL, inblock.data,
|
||||
outblock.data, inblock.len);
|
||||
|
||||
if ((keybytes - n) <= outblock.len) {
|
||||
memcpy(rawkey + n, outblock.data, (keybytes - n));
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(rawkey + n, outblock.data, outblock.len);
|
||||
memcpy(inblock.data, outblock.data, outblock.len);
|
||||
n += outblock.len;
|
||||
}
|
||||
|
||||
/* postprocess the key */
|
||||
|
||||
inblock.data = (char *) rawkey;
|
||||
inblock.len = keybytes;
|
||||
|
||||
BUG_ON(gk5e->mk_key == NULL);
|
||||
ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey);
|
||||
if (ret) {
|
||||
dprintk("%s: got %d from mk_key function for '%s'\n",
|
||||
__func__, ret, gk5e->encrypt_name);
|
||||
goto err_free_raw;
|
||||
}
|
||||
|
||||
/* clean memory, free resources and exit */
|
||||
|
||||
ret = 0;
|
||||
|
||||
err_free_raw:
|
||||
memset(rawkey, 0, keybytes);
|
||||
kfree(rawkey);
|
||||
err_free_out:
|
||||
memset(outblockdata, 0, blocksize);
|
||||
kfree(outblockdata);
|
||||
err_free_in:
|
||||
memset(inblockdata, 0, blocksize);
|
||||
kfree(inblockdata);
|
||||
err_free_cipher:
|
||||
crypto_free_blkcipher(cipher);
|
||||
err_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define smask(step) ((1<<step)-1)
|
||||
#define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step)))
|
||||
#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1)
|
||||
|
||||
static void mit_des_fixup_key_parity(u8 key[8])
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < 8; i++) {
|
||||
key[i] &= 0xfe;
|
||||
key[i] |= 1^parity_char(key[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the des3 key derivation postprocess function
|
||||
*/
|
||||
u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
|
||||
struct xdr_netobj *randombits,
|
||||
struct xdr_netobj *key)
|
||||
{
|
||||
int i;
|
||||
u32 ret = EINVAL;
|
||||
|
||||
if (key->len != 24) {
|
||||
dprintk("%s: key->len is %d\n", __func__, key->len);
|
||||
goto err_out;
|
||||
}
|
||||
if (randombits->len != 21) {
|
||||
dprintk("%s: randombits->len is %d\n",
|
||||
__func__, randombits->len);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* take the seven bytes, move them around into the top 7 bits of the
|
||||
8 key bytes, then compute the parity bits. Do this three times. */
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
memcpy(key->data + i*8, randombits->data + i*7, 7);
|
||||
key->data[i*8+7] = (((key->data[i*8]&1)<<1) |
|
||||
((key->data[i*8+1]&1)<<2) |
|
||||
((key->data[i*8+2]&1)<<3) |
|
||||
((key->data[i*8+3]&1)<<4) |
|
||||
((key->data[i*8+4]&1)<<5) |
|
||||
((key->data[i*8+5]&1)<<6) |
|
||||
((key->data[i*8+6]&1)<<7));
|
||||
|
||||
mit_des_fixup_key_parity(key->data + i*8);
|
||||
}
|
||||
ret = 0;
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the aes key derivation postprocess function
|
||||
*/
|
||||
u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
|
||||
struct xdr_netobj *randombits,
|
||||
struct xdr_netobj *key)
|
||||
{
|
||||
u32 ret = EINVAL;
|
||||
|
||||
if (key->len != 16 && key->len != 32) {
|
||||
dprintk("%s: key->len is %d\n", __func__, key->len);
|
||||
goto err_out;
|
||||
}
|
||||
if (randombits->len != 16 && randombits->len != 32) {
|
||||
dprintk("%s: randombits->len is %d\n",
|
||||
__func__, randombits->len);
|
||||
goto err_out;
|
||||
}
|
||||
if (randombits->len != key->len) {
|
||||
dprintk("%s: randombits->len is %d, key->len is %d\n",
|
||||
__func__, randombits->len, key->len);
|
||||
goto err_out;
|
||||
}
|
||||
memcpy(key->data, randombits->data, key->len);
|
||||
ret = 0;
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* linux/net/sunrpc/gss_krb5_mech.c
|
||||
*
|
||||
* Copyright (c) 2001 The Regents of the University of Michigan.
|
||||
* Copyright (c) 2001-2008 The Regents of the University of Michigan.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Andy Adamson <andros@umich.edu>
|
||||
|
@ -48,6 +48,143 @@
|
|||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
#endif
|
||||
|
||||
static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
|
||||
|
||||
static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
|
||||
/*
|
||||
* DES (All DES enctypes are mapped to the same gss functionality)
|
||||
*/
|
||||
{
|
||||
.etype = ENCTYPE_DES_CBC_RAW,
|
||||
.ctype = CKSUMTYPE_RSA_MD5,
|
||||
.name = "des-cbc-crc",
|
||||
.encrypt_name = "cbc(des)",
|
||||
.cksum_name = "md5",
|
||||
.encrypt = krb5_encrypt,
|
||||
.decrypt = krb5_decrypt,
|
||||
.mk_key = NULL,
|
||||
.signalg = SGN_ALG_DES_MAC_MD5,
|
||||
.sealalg = SEAL_ALG_DES,
|
||||
.keybytes = 7,
|
||||
.keylength = 8,
|
||||
.blocksize = 8,
|
||||
.conflen = 8,
|
||||
.cksumlength = 8,
|
||||
.keyed_cksum = 0,
|
||||
},
|
||||
/*
|
||||
* RC4-HMAC
|
||||
*/
|
||||
{
|
||||
.etype = ENCTYPE_ARCFOUR_HMAC,
|
||||
.ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR,
|
||||
.name = "rc4-hmac",
|
||||
.encrypt_name = "ecb(arc4)",
|
||||
.cksum_name = "hmac(md5)",
|
||||
.encrypt = krb5_encrypt,
|
||||
.decrypt = krb5_decrypt,
|
||||
.mk_key = NULL,
|
||||
.signalg = SGN_ALG_HMAC_MD5,
|
||||
.sealalg = SEAL_ALG_MICROSOFT_RC4,
|
||||
.keybytes = 16,
|
||||
.keylength = 16,
|
||||
.blocksize = 1,
|
||||
.conflen = 8,
|
||||
.cksumlength = 8,
|
||||
.keyed_cksum = 1,
|
||||
},
|
||||
/*
|
||||
* 3DES
|
||||
*/
|
||||
{
|
||||
.etype = ENCTYPE_DES3_CBC_RAW,
|
||||
.ctype = CKSUMTYPE_HMAC_SHA1_DES3,
|
||||
.name = "des3-hmac-sha1",
|
||||
.encrypt_name = "cbc(des3_ede)",
|
||||
.cksum_name = "hmac(sha1)",
|
||||
.encrypt = krb5_encrypt,
|
||||
.decrypt = krb5_decrypt,
|
||||
.mk_key = gss_krb5_des3_make_key,
|
||||
.signalg = SGN_ALG_HMAC_SHA1_DES3_KD,
|
||||
.sealalg = SEAL_ALG_DES3KD,
|
||||
.keybytes = 21,
|
||||
.keylength = 24,
|
||||
.blocksize = 8,
|
||||
.conflen = 8,
|
||||
.cksumlength = 20,
|
||||
.keyed_cksum = 1,
|
||||
},
|
||||
/*
|
||||
* AES128
|
||||
*/
|
||||
{
|
||||
.etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96,
|
||||
.ctype = CKSUMTYPE_HMAC_SHA1_96_AES128,
|
||||
.name = "aes128-cts",
|
||||
.encrypt_name = "cts(cbc(aes))",
|
||||
.cksum_name = "hmac(sha1)",
|
||||
.encrypt = krb5_encrypt,
|
||||
.decrypt = krb5_decrypt,
|
||||
.mk_key = gss_krb5_aes_make_key,
|
||||
.encrypt_v2 = gss_krb5_aes_encrypt,
|
||||
.decrypt_v2 = gss_krb5_aes_decrypt,
|
||||
.signalg = -1,
|
||||
.sealalg = -1,
|
||||
.keybytes = 16,
|
||||
.keylength = 16,
|
||||
.blocksize = 16,
|
||||
.conflen = 16,
|
||||
.cksumlength = 12,
|
||||
.keyed_cksum = 1,
|
||||
},
|
||||
/*
|
||||
* AES256
|
||||
*/
|
||||
{
|
||||
.etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96,
|
||||
.ctype = CKSUMTYPE_HMAC_SHA1_96_AES256,
|
||||
.name = "aes256-cts",
|
||||
.encrypt_name = "cts(cbc(aes))",
|
||||
.cksum_name = "hmac(sha1)",
|
||||
.encrypt = krb5_encrypt,
|
||||
.decrypt = krb5_decrypt,
|
||||
.mk_key = gss_krb5_aes_make_key,
|
||||
.encrypt_v2 = gss_krb5_aes_encrypt,
|
||||
.decrypt_v2 = gss_krb5_aes_decrypt,
|
||||
.signalg = -1,
|
||||
.sealalg = -1,
|
||||
.keybytes = 32,
|
||||
.keylength = 32,
|
||||
.blocksize = 16,
|
||||
.conflen = 16,
|
||||
.cksumlength = 12,
|
||||
.keyed_cksum = 1,
|
||||
},
|
||||
};
|
||||
|
||||
static const int num_supported_enctypes =
|
||||
ARRAY_SIZE(supported_gss_krb5_enctypes);
|
||||
|
||||
static int
|
||||
supported_gss_krb5_enctype(int etype)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < num_supported_enctypes; i++)
|
||||
if (supported_gss_krb5_enctypes[i].etype == etype)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct gss_krb5_enctype *
|
||||
get_gss_krb5_enctype(int etype)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < num_supported_enctypes; i++)
|
||||
if (supported_gss_krb5_enctypes[i].etype == etype)
|
||||
return &supported_gss_krb5_enctypes[i];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const void *
|
||||
simple_get_bytes(const void *p, const void *end, void *res, int len)
|
||||
{
|
||||
|
@ -78,35 +215,45 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
|
|||
}
|
||||
|
||||
static inline const void *
|
||||
get_key(const void *p, const void *end, struct crypto_blkcipher **res)
|
||||
get_key(const void *p, const void *end,
|
||||
struct krb5_ctx *ctx, struct crypto_blkcipher **res)
|
||||
{
|
||||
struct xdr_netobj key;
|
||||
int alg;
|
||||
char *alg_name;
|
||||
|
||||
p = simple_get_bytes(p, end, &alg, sizeof(alg));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
|
||||
switch (alg) {
|
||||
case ENCTYPE_DES_CBC_CRC:
|
||||
case ENCTYPE_DES_CBC_MD4:
|
||||
case ENCTYPE_DES_CBC_MD5:
|
||||
/* Map all these key types to ENCTYPE_DES_CBC_RAW */
|
||||
alg = ENCTYPE_DES_CBC_RAW;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!supported_gss_krb5_enctype(alg)) {
|
||||
printk(KERN_WARNING "gss_kerberos_mech: unsupported "
|
||||
"encryption key algorithm %d\n", alg);
|
||||
goto out_err;
|
||||
}
|
||||
p = simple_get_netobj(p, end, &key);
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
|
||||
switch (alg) {
|
||||
case ENCTYPE_DES_CBC_RAW:
|
||||
alg_name = "cbc(des)";
|
||||
break;
|
||||
default:
|
||||
printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
|
||||
goto out_err_free_key;
|
||||
}
|
||||
*res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
|
||||
*res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(*res)) {
|
||||
printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
|
||||
printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
|
||||
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
|
||||
*res = NULL;
|
||||
goto out_err_free_key;
|
||||
}
|
||||
if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
|
||||
printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
|
||||
printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
|
||||
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
|
||||
goto out_err_free_tfm;
|
||||
}
|
||||
|
||||
|
@ -123,56 +270,55 @@ out_err:
|
|||
}
|
||||
|
||||
static int
|
||||
gss_import_sec_context_kerberos(const void *p,
|
||||
size_t len,
|
||||
struct gss_ctx *ctx_id)
|
||||
gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
|
||||
{
|
||||
const void *end = (const void *)((const char *)p + len);
|
||||
struct krb5_ctx *ctx;
|
||||
int tmp;
|
||||
|
||||
if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
|
||||
p = ERR_PTR(-ENOMEM);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
|
||||
/* Old format supports only DES! Any other enctype uses new format */
|
||||
ctx->enctype = ENCTYPE_DES_CBC_RAW;
|
||||
|
||||
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
|
||||
if (ctx->gk5e == NULL)
|
||||
goto out_err;
|
||||
|
||||
/* The downcall format was designed before we completely understood
|
||||
* the uses of the context fields; so it includes some stuff we
|
||||
* just give some minimal sanity-checking, and some we ignore
|
||||
* completely (like the next twenty bytes): */
|
||||
if (unlikely(p + 20 > end || p + 20 < p))
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
p += 20;
|
||||
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
if (tmp != SGN_ALG_DES_MAC_MD5) {
|
||||
p = ERR_PTR(-ENOSYS);
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
}
|
||||
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
if (tmp != SEAL_ALG_DES) {
|
||||
p = ERR_PTR(-ENOSYS);
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
}
|
||||
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send));
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_ctx;
|
||||
goto out_err;
|
||||
p = simple_get_netobj(p, end, &ctx->mech_used);
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_ctx;
|
||||
p = get_key(p, end, &ctx->enc);
|
||||
goto out_err;
|
||||
p = get_key(p, end, ctx, &ctx->enc);
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_mech;
|
||||
p = get_key(p, end, &ctx->seq);
|
||||
p = get_key(p, end, ctx, &ctx->seq);
|
||||
if (IS_ERR(p))
|
||||
goto out_err_free_key1;
|
||||
if (p != end) {
|
||||
|
@ -180,9 +326,6 @@ gss_import_sec_context_kerberos(const void *p,
|
|||
goto out_err_free_key2;
|
||||
}
|
||||
|
||||
ctx_id->internal_ctx_id = ctx;
|
||||
|
||||
dprintk("RPC: Successfully imported new context.\n");
|
||||
return 0;
|
||||
|
||||
out_err_free_key2:
|
||||
|
@ -191,18 +334,378 @@ out_err_free_key1:
|
|||
crypto_free_blkcipher(ctx->enc);
|
||||
out_err_free_mech:
|
||||
kfree(ctx->mech_used.data);
|
||||
out_err_free_ctx:
|
||||
kfree(ctx);
|
||||
out_err:
|
||||
return PTR_ERR(p);
|
||||
}
|
||||
|
||||
struct crypto_blkcipher *
|
||||
context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
|
||||
{
|
||||
struct crypto_blkcipher *cp;
|
||||
|
||||
cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(cp)) {
|
||||
dprintk("gss_kerberos_mech: unable to initialize "
|
||||
"crypto algorithm %s\n", cname);
|
||||
return NULL;
|
||||
}
|
||||
if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
|
||||
dprintk("gss_kerberos_mech: error setting key for "
|
||||
"crypto algorithm %s\n", cname);
|
||||
crypto_free_blkcipher(cp);
|
||||
return NULL;
|
||||
}
|
||||
return cp;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed)
|
||||
{
|
||||
cdata[0] = (usage>>24)&0xff;
|
||||
cdata[1] = (usage>>16)&0xff;
|
||||
cdata[2] = (usage>>8)&0xff;
|
||||
cdata[3] = usage&0xff;
|
||||
cdata[4] = seed;
|
||||
}
|
||||
|
||||
static int
|
||||
context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
|
||||
{
|
||||
struct xdr_netobj c, keyin, keyout;
|
||||
u8 cdata[GSS_KRB5_K5CLENGTH];
|
||||
u32 err;
|
||||
|
||||
c.len = GSS_KRB5_K5CLENGTH;
|
||||
c.data = cdata;
|
||||
|
||||
keyin.data = ctx->Ksess;
|
||||
keyin.len = ctx->gk5e->keylength;
|
||||
keyout.len = ctx->gk5e->keylength;
|
||||
|
||||
/* seq uses the raw key */
|
||||
ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
|
||||
ctx->Ksess);
|
||||
if (ctx->seq == NULL)
|
||||
goto out_err;
|
||||
|
||||
ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
|
||||
ctx->Ksess);
|
||||
if (ctx->enc == NULL)
|
||||
goto out_free_seq;
|
||||
|
||||
/* derive cksum */
|
||||
set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM);
|
||||
keyout.data = ctx->cksum;
|
||||
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
|
||||
if (err) {
|
||||
dprintk("%s: Error %d deriving cksum key\n",
|
||||
__func__, err);
|
||||
goto out_free_enc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_enc:
|
||||
crypto_free_blkcipher(ctx->enc);
|
||||
out_free_seq:
|
||||
crypto_free_blkcipher(ctx->seq);
|
||||
out_err:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that RC4 depends on deriving keys using the sequence
|
||||
* number or the checksum of a token. Therefore, the final keys
|
||||
* cannot be calculated until the token is being constructed!
|
||||
*/
|
||||
static int
|
||||
context_derive_keys_rc4(struct krb5_ctx *ctx)
|
||||
{
|
||||
struct crypto_hash *hmac;
|
||||
char sigkeyconstant[] = "signaturekey";
|
||||
int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg[1];
|
||||
int err;
|
||||
|
||||
dprintk("RPC: %s: entered\n", __func__);
|
||||
/*
|
||||
* derive cksum (aka Ksign) key
|
||||
*/
|
||||
hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(hmac)) {
|
||||
dprintk("%s: error %ld allocating hash '%s'\n",
|
||||
__func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
|
||||
err = PTR_ERR(hmac);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
|
||||
if (err)
|
||||
goto out_err_free_hmac;
|
||||
|
||||
sg_init_table(sg, 1);
|
||||
sg_set_buf(sg, sigkeyconstant, slen);
|
||||
|
||||
desc.tfm = hmac;
|
||||
desc.flags = 0;
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto out_err_free_hmac;
|
||||
|
||||
err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
|
||||
if (err)
|
||||
goto out_err_free_hmac;
|
||||
/*
|
||||
* allocate hash, and blkciphers for data and seqnum encryption
|
||||
*/
|
||||
ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(ctx->enc)) {
|
||||
err = PTR_ERR(ctx->enc);
|
||||
goto out_err_free_hmac;
|
||||
}
|
||||
|
||||
ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(ctx->seq)) {
|
||||
crypto_free_blkcipher(ctx->enc);
|
||||
err = PTR_ERR(ctx->seq);
|
||||
goto out_err_free_hmac;
|
||||
}
|
||||
|
||||
dprintk("RPC: %s: returning success\n", __func__);
|
||||
|
||||
err = 0;
|
||||
|
||||
out_err_free_hmac:
|
||||
crypto_free_hash(hmac);
|
||||
out_err:
|
||||
dprintk("RPC: %s: returning %d\n", __func__, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
|
||||
{
|
||||
struct xdr_netobj c, keyin, keyout;
|
||||
u8 cdata[GSS_KRB5_K5CLENGTH];
|
||||
u32 err;
|
||||
|
||||
c.len = GSS_KRB5_K5CLENGTH;
|
||||
c.data = cdata;
|
||||
|
||||
keyin.data = ctx->Ksess;
|
||||
keyin.len = ctx->gk5e->keylength;
|
||||
keyout.len = ctx->gk5e->keylength;
|
||||
|
||||
/* initiator seal encryption */
|
||||
set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
|
||||
keyout.data = ctx->initiator_seal;
|
||||
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
|
||||
if (err) {
|
||||
dprintk("%s: Error %d deriving initiator_seal key\n",
|
||||
__func__, err);
|
||||
goto out_err;
|
||||
}
|
||||
ctx->initiator_enc = context_v2_alloc_cipher(ctx,
|
||||
ctx->gk5e->encrypt_name,
|
||||
ctx->initiator_seal);
|
||||
if (ctx->initiator_enc == NULL)
|
||||
goto out_err;
|
||||
|
||||
/* acceptor seal encryption */
|
||||
set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
|
||||
keyout.data = ctx->acceptor_seal;
|
||||
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
|
||||
if (err) {
|
||||
dprintk("%s: Error %d deriving acceptor_seal key\n",
|
||||
__func__, err);
|
||||
goto out_free_initiator_enc;
|
||||
}
|
||||
ctx->acceptor_enc = context_v2_alloc_cipher(ctx,
|
||||
ctx->gk5e->encrypt_name,
|
||||
ctx->acceptor_seal);
|
||||
if (ctx->acceptor_enc == NULL)
|
||||
goto out_free_initiator_enc;
|
||||
|
||||
/* initiator sign checksum */
|
||||
set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
|
||||
keyout.data = ctx->initiator_sign;
|
||||
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
|
||||
if (err) {
|
||||
dprintk("%s: Error %d deriving initiator_sign key\n",
|
||||
__func__, err);
|
||||
goto out_free_acceptor_enc;
|
||||
}
|
||||
|
||||
/* acceptor sign checksum */
|
||||
set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
|
||||
keyout.data = ctx->acceptor_sign;
|
||||
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
|
||||
if (err) {
|
||||
dprintk("%s: Error %d deriving acceptor_sign key\n",
|
||||
__func__, err);
|
||||
goto out_free_acceptor_enc;
|
||||
}
|
||||
|
||||
/* initiator seal integrity */
|
||||
set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
|
||||
keyout.data = ctx->initiator_integ;
|
||||
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
|
||||
if (err) {
|
||||
dprintk("%s: Error %d deriving initiator_integ key\n",
|
||||
__func__, err);
|
||||
goto out_free_acceptor_enc;
|
||||
}
|
||||
|
||||
/* acceptor seal integrity */
|
||||
set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
|
||||
keyout.data = ctx->acceptor_integ;
|
||||
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
|
||||
if (err) {
|
||||
dprintk("%s: Error %d deriving acceptor_integ key\n",
|
||||
__func__, err);
|
||||
goto out_free_acceptor_enc;
|
||||
}
|
||||
|
||||
switch (ctx->enctype) {
|
||||
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
||||
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
||||
ctx->initiator_enc_aux =
|
||||
context_v2_alloc_cipher(ctx, "cbc(aes)",
|
||||
ctx->initiator_seal);
|
||||
if (ctx->initiator_enc_aux == NULL)
|
||||
goto out_free_acceptor_enc;
|
||||
ctx->acceptor_enc_aux =
|
||||
context_v2_alloc_cipher(ctx, "cbc(aes)",
|
||||
ctx->acceptor_seal);
|
||||
if (ctx->acceptor_enc_aux == NULL) {
|
||||
crypto_free_blkcipher(ctx->initiator_enc_aux);
|
||||
goto out_free_acceptor_enc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_acceptor_enc:
|
||||
crypto_free_blkcipher(ctx->acceptor_enc);
|
||||
out_free_initiator_enc:
|
||||
crypto_free_blkcipher(ctx->initiator_enc);
|
||||
out_err:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
int keylen;
|
||||
|
||||
p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR;
|
||||
|
||||
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
/* set seq_send for use by "older" enctypes */
|
||||
ctx->seq_send = ctx->seq_send64;
|
||||
if (ctx->seq_send64 != ctx->seq_send) {
|
||||
dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
|
||||
(long unsigned)ctx->seq_send64, ctx->seq_send);
|
||||
goto out_err;
|
||||
}
|
||||
p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
/* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */
|
||||
if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1)
|
||||
ctx->enctype = ENCTYPE_DES3_CBC_RAW;
|
||||
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
|
||||
if (ctx->gk5e == NULL) {
|
||||
dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n",
|
||||
ctx->enctype);
|
||||
p = ERR_PTR(-EINVAL);
|
||||
goto out_err;
|
||||
}
|
||||
keylen = ctx->gk5e->keylength;
|
||||
|
||||
p = simple_get_bytes(p, end, ctx->Ksess, keylen);
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
|
||||
if (p != end) {
|
||||
p = ERR_PTR(-EINVAL);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data,
|
||||
gss_kerberos_mech.gm_oid.len, gfp_mask);
|
||||
if (unlikely(ctx->mech_used.data == NULL)) {
|
||||
p = ERR_PTR(-ENOMEM);
|
||||
goto out_err;
|
||||
}
|
||||
ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
|
||||
|
||||
switch (ctx->enctype) {
|
||||
case ENCTYPE_DES3_CBC_RAW:
|
||||
return context_derive_keys_des3(ctx, gfp_mask);
|
||||
case ENCTYPE_ARCFOUR_HMAC:
|
||||
return context_derive_keys_rc4(ctx);
|
||||
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
||||
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
||||
return context_derive_keys_new(ctx, gfp_mask);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
out_err:
|
||||
return PTR_ERR(p);
|
||||
}
|
||||
|
||||
static int
|
||||
gss_import_sec_context_kerberos(const void *p, size_t len,
|
||||
struct gss_ctx *ctx_id,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
const void *end = (const void *)((const char *)p + len);
|
||||
struct krb5_ctx *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), gfp_mask);
|
||||
if (ctx == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (len == 85)
|
||||
ret = gss_import_v1_context(p, end, ctx);
|
||||
else
|
||||
ret = gss_import_v2_context(p, end, ctx, gfp_mask);
|
||||
|
||||
if (ret == 0)
|
||||
ctx_id->internal_ctx_id = ctx;
|
||||
else
|
||||
kfree(ctx);
|
||||
|
||||
dprintk("RPC: %s: returning %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
gss_delete_sec_context_kerberos(void *internal_ctx) {
|
||||
struct krb5_ctx *kctx = internal_ctx;
|
||||
|
||||
crypto_free_blkcipher(kctx->seq);
|
||||
crypto_free_blkcipher(kctx->enc);
|
||||
crypto_free_blkcipher(kctx->acceptor_enc);
|
||||
crypto_free_blkcipher(kctx->initiator_enc);
|
||||
crypto_free_blkcipher(kctx->acceptor_enc_aux);
|
||||
crypto_free_blkcipher(kctx->initiator_enc_aux);
|
||||
kfree(kctx->mech_used.data);
|
||||
kfree(kctx);
|
||||
}
|
||||
|
@ -241,6 +744,7 @@ static struct gss_api_mech gss_kerberos_mech = {
|
|||
.gm_ops = &gss_kerberos_ops,
|
||||
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
|
||||
.gm_pfs = gss_kerberos_pfs,
|
||||
.gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ",
|
||||
};
|
||||
|
||||
static int __init init_kerberos_module(void)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c
|
||||
*
|
||||
* Copyright (c) 2000 The Regents of the University of Michigan.
|
||||
* Copyright (c) 2000-2008 The Regents of the University of Michigan.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Andy Adamson <andros@umich.edu>
|
||||
|
@ -70,53 +70,154 @@
|
|||
|
||||
DEFINE_SPINLOCK(krb5_seq_lock);
|
||||
|
||||
u32
|
||||
gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
|
||||
static char *
|
||||
setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
|
||||
{
|
||||
__be16 *ptr, *krb5_hdr;
|
||||
int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
|
||||
|
||||
token->len = g_token_size(&ctx->mech_used, body_size);
|
||||
|
||||
ptr = (__be16 *)token->data;
|
||||
g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr);
|
||||
|
||||
/* ptr now at start of header described in rfc 1964, section 1.2.1: */
|
||||
krb5_hdr = ptr;
|
||||
*ptr++ = KG_TOK_MIC_MSG;
|
||||
*ptr++ = cpu_to_le16(ctx->gk5e->signalg);
|
||||
*ptr++ = SEAL_ALG_NONE;
|
||||
*ptr++ = 0xffff;
|
||||
|
||||
return (char *)krb5_hdr;
|
||||
}
|
||||
|
||||
static void *
|
||||
setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
|
||||
{
|
||||
__be16 *ptr, *krb5_hdr;
|
||||
u8 *p, flags = 0x00;
|
||||
|
||||
if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
|
||||
flags |= 0x01;
|
||||
if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
|
||||
flags |= 0x04;
|
||||
|
||||
/* Per rfc 4121, sec 4.2.6.1, there is no header,
|
||||
* just start the token */
|
||||
krb5_hdr = ptr = (__be16 *)token->data;
|
||||
|
||||
*ptr++ = KG2_TOK_MIC;
|
||||
p = (u8 *)ptr;
|
||||
*p++ = flags;
|
||||
*p++ = 0xff;
|
||||
ptr = (__be16 *)p;
|
||||
*ptr++ = 0xffff;
|
||||
*ptr++ = 0xffff;
|
||||
|
||||
token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
|
||||
return krb5_hdr;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
|
||||
struct xdr_netobj *token)
|
||||
{
|
||||
struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
|
||||
char cksumdata[16];
|
||||
struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
|
||||
unsigned char *ptr, *msg_start;
|
||||
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
|
||||
.data = cksumdata};
|
||||
void *ptr;
|
||||
s32 now;
|
||||
u32 seq_send;
|
||||
u8 *cksumkey;
|
||||
|
||||
dprintk("RPC: gss_krb5_seal\n");
|
||||
dprintk("RPC: %s\n", __func__);
|
||||
BUG_ON(ctx == NULL);
|
||||
|
||||
now = get_seconds();
|
||||
|
||||
token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8);
|
||||
ptr = setup_token(ctx, token);
|
||||
|
||||
ptr = token->data;
|
||||
g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr);
|
||||
if (ctx->gk5e->keyed_cksum)
|
||||
cksumkey = ctx->cksum;
|
||||
else
|
||||
cksumkey = NULL;
|
||||
|
||||
/* ptr now at header described in rfc 1964, section 1.2.1: */
|
||||
ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff);
|
||||
ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff);
|
||||
|
||||
msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8;
|
||||
|
||||
*(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
|
||||
memset(ptr + 4, 0xff, 4);
|
||||
|
||||
if (make_checksum("md5", ptr, 8, text, 0, &md5cksum))
|
||||
if (make_checksum(ctx, ptr, 8, text, 0, cksumkey,
|
||||
KG_USAGE_SIGN, &md5cksum))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
|
||||
md5cksum.data, md5cksum.len))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
|
||||
|
||||
spin_lock(&krb5_seq_lock);
|
||||
seq_send = ctx->seq_send++;
|
||||
spin_unlock(&krb5_seq_lock);
|
||||
|
||||
if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
|
||||
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN,
|
||||
ptr + 8))
|
||||
if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
|
||||
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
|
||||
struct xdr_netobj *token)
|
||||
{
|
||||
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
struct xdr_netobj cksumobj = { .len = sizeof(cksumdata),
|
||||
.data = cksumdata};
|
||||
void *krb5_hdr;
|
||||
s32 now;
|
||||
u64 seq_send;
|
||||
u8 *cksumkey;
|
||||
unsigned int cksum_usage;
|
||||
|
||||
dprintk("RPC: %s\n", __func__);
|
||||
|
||||
krb5_hdr = setup_token_v2(ctx, token);
|
||||
|
||||
/* Set up the sequence number. Now 64-bits in clear
|
||||
* text and w/o direction indicator */
|
||||
spin_lock(&krb5_seq_lock);
|
||||
seq_send = ctx->seq_send64++;
|
||||
spin_unlock(&krb5_seq_lock);
|
||||
*((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send);
|
||||
|
||||
if (ctx->initiate) {
|
||||
cksumkey = ctx->initiator_sign;
|
||||
cksum_usage = KG_USAGE_INITIATOR_SIGN;
|
||||
} else {
|
||||
cksumkey = ctx->acceptor_sign;
|
||||
cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
|
||||
}
|
||||
|
||||
if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN,
|
||||
text, 0, cksumkey, cksum_usage, &cksumobj))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len);
|
||||
|
||||
now = get_seconds();
|
||||
|
||||
return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
|
||||
struct xdr_netobj *token)
|
||||
{
|
||||
struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
|
||||
|
||||
switch (ctx->enctype) {
|
||||
default:
|
||||
BUG();
|
||||
case ENCTYPE_DES_CBC_RAW:
|
||||
case ENCTYPE_DES3_CBC_RAW:
|
||||
case ENCTYPE_ARCFOUR_HMAC:
|
||||
return gss_get_mic_v1(ctx, text, token);
|
||||
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
||||
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
||||
return gss_get_mic_v2(ctx, text, token);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,14 +39,51 @@
|
|||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
#endif
|
||||
|
||||
static s32
|
||||
krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
|
||||
unsigned char *cksum, unsigned char *buf)
|
||||
{
|
||||
struct crypto_blkcipher *cipher;
|
||||
unsigned char plain[8];
|
||||
s32 code;
|
||||
|
||||
dprintk("RPC: %s:\n", __func__);
|
||||
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
|
||||
plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
|
||||
plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
|
||||
plain[3] = (unsigned char) ((seqnum >> 0) & 0xff);
|
||||
plain[4] = direction;
|
||||
plain[5] = direction;
|
||||
plain[6] = direction;
|
||||
plain[7] = direction;
|
||||
|
||||
code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
|
||||
if (code)
|
||||
goto out;
|
||||
|
||||
code = krb5_encrypt(cipher, cksum, plain, buf, 8);
|
||||
out:
|
||||
crypto_free_blkcipher(cipher);
|
||||
return code;
|
||||
}
|
||||
s32
|
||||
krb5_make_seq_num(struct crypto_blkcipher *key,
|
||||
krb5_make_seq_num(struct krb5_ctx *kctx,
|
||||
struct crypto_blkcipher *key,
|
||||
int direction,
|
||||
u32 seqnum,
|
||||
unsigned char *cksum, unsigned char *buf)
|
||||
{
|
||||
unsigned char plain[8];
|
||||
|
||||
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
|
||||
return krb5_make_rc4_seq_num(kctx, direction, seqnum,
|
||||
cksum, buf);
|
||||
|
||||
plain[0] = (unsigned char) (seqnum & 0xff);
|
||||
plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
|
||||
plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
|
||||
|
@ -60,17 +97,59 @@ krb5_make_seq_num(struct crypto_blkcipher *key,
|
|||
return krb5_encrypt(key, cksum, plain, buf, 8);
|
||||
}
|
||||
|
||||
static s32
|
||||
krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
|
||||
unsigned char *buf, int *direction, s32 *seqnum)
|
||||
{
|
||||
struct crypto_blkcipher *cipher;
|
||||
unsigned char plain[8];
|
||||
s32 code;
|
||||
|
||||
dprintk("RPC: %s:\n", __func__);
|
||||
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
|
||||
if (code)
|
||||
goto out;
|
||||
|
||||
code = krb5_decrypt(cipher, cksum, buf, plain, 8);
|
||||
if (code)
|
||||
goto out;
|
||||
|
||||
if ((plain[4] != plain[5]) || (plain[4] != plain[6])
|
||||
|| (plain[4] != plain[7])) {
|
||||
code = (s32)KG_BAD_SEQ;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*direction = plain[4];
|
||||
|
||||
*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
|
||||
(plain[2] << 8) | (plain[3]));
|
||||
out:
|
||||
crypto_free_blkcipher(cipher);
|
||||
return code;
|
||||
}
|
||||
|
||||
s32
|
||||
krb5_get_seq_num(struct crypto_blkcipher *key,
|
||||
krb5_get_seq_num(struct krb5_ctx *kctx,
|
||||
unsigned char *cksum,
|
||||
unsigned char *buf,
|
||||
int *direction, u32 *seqnum)
|
||||
{
|
||||
s32 code;
|
||||
unsigned char plain[8];
|
||||
struct crypto_blkcipher *key = kctx->seq;
|
||||
|
||||
dprintk("RPC: krb5_get_seq_num:\n");
|
||||
|
||||
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
|
||||
return krb5_get_rc4_seq_num(kctx, cksum, buf,
|
||||
direction, seqnum);
|
||||
|
||||
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
|
||||
return code;
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c
|
||||
*
|
||||
* Copyright (c) 2000 The Regents of the University of Michigan.
|
||||
* Copyright (c) 2000-2008 The Regents of the University of Michigan.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Andy Adamson <andros@umich.edu>
|
||||
|
@ -70,20 +70,21 @@
|
|||
/* read_token is a mic token, and message_buffer is the data that the mic was
|
||||
* supposedly taken over. */
|
||||
|
||||
u32
|
||||
gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
|
||||
static u32
|
||||
gss_verify_mic_v1(struct krb5_ctx *ctx,
|
||||
struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
|
||||
{
|
||||
struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
|
||||
int signalg;
|
||||
int sealalg;
|
||||
char cksumdata[16];
|
||||
struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
|
||||
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
|
||||
.data = cksumdata};
|
||||
s32 now;
|
||||
int direction;
|
||||
u32 seqnum;
|
||||
unsigned char *ptr = (unsigned char *)read_token->data;
|
||||
int bodysize;
|
||||
u8 *cksumkey;
|
||||
|
||||
dprintk("RPC: krb5_read_token\n");
|
||||
|
||||
|
@ -98,7 +99,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
|
|||
/* XXX sanity-check bodysize?? */
|
||||
|
||||
signalg = ptr[2] + (ptr[3] << 8);
|
||||
if (signalg != SGN_ALG_DES_MAC_MD5)
|
||||
if (signalg != ctx->gk5e->signalg)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
sealalg = ptr[4] + (ptr[5] << 8);
|
||||
|
@ -108,13 +109,17 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
|
|||
if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum))
|
||||
if (ctx->gk5e->keyed_cksum)
|
||||
cksumkey = ctx->cksum;
|
||||
else
|
||||
cksumkey = NULL;
|
||||
|
||||
if (make_checksum(ctx, ptr, 8, message_buffer, 0,
|
||||
cksumkey, KG_USAGE_SIGN, &md5cksum))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
|
||||
if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
|
||||
ctx->gk5e->cksumlength))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
/* it got through unscathed. Make sure the context is unexpired */
|
||||
|
@ -126,7 +131,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
|
|||
|
||||
/* do sequencing checks */
|
||||
|
||||
if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum))
|
||||
if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
|
||||
&direction, &seqnum))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if ((ctx->initiate && direction != 0xff) ||
|
||||
|
@ -135,3 +141,86 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
|
|||
|
||||
return GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_verify_mic_v2(struct krb5_ctx *ctx,
|
||||
struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
|
||||
{
|
||||
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
|
||||
.data = cksumdata};
|
||||
s32 now;
|
||||
u64 seqnum;
|
||||
u8 *ptr = read_token->data;
|
||||
u8 *cksumkey;
|
||||
u8 flags;
|
||||
int i;
|
||||
unsigned int cksum_usage;
|
||||
|
||||
dprintk("RPC: %s\n", __func__);
|
||||
|
||||
if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_MIC)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
flags = ptr[2];
|
||||
if ((!ctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
|
||||
(ctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
if (flags & KG2_TOKEN_FLAG_SEALED) {
|
||||
dprintk("%s: token has unexpected sealed flag\n", __func__);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
for (i = 3; i < 8; i++)
|
||||
if (ptr[i] != 0xff)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
if (ctx->initiate) {
|
||||
cksumkey = ctx->acceptor_sign;
|
||||
cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
|
||||
} else {
|
||||
cksumkey = ctx->initiator_sign;
|
||||
cksum_usage = KG_USAGE_INITIATOR_SIGN;
|
||||
}
|
||||
|
||||
if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0,
|
||||
cksumkey, cksum_usage, &cksumobj))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN,
|
||||
ctx->gk5e->cksumlength))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
/* it got through unscathed. Make sure the context is unexpired */
|
||||
now = get_seconds();
|
||||
if (now > ctx->endtime)
|
||||
return GSS_S_CONTEXT_EXPIRED;
|
||||
|
||||
/* do sequencing checks */
|
||||
|
||||
seqnum = be64_to_cpup((__be64 *)ptr + 8);
|
||||
|
||||
return GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
|
||||
struct xdr_buf *message_buffer,
|
||||
struct xdr_netobj *read_token)
|
||||
{
|
||||
struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
|
||||
|
||||
switch (ctx->enctype) {
|
||||
default:
|
||||
BUG();
|
||||
case ENCTYPE_DES_CBC_RAW:
|
||||
case ENCTYPE_DES3_CBC_RAW:
|
||||
case ENCTYPE_ARCFOUR_HMAC:
|
||||
return gss_verify_mic_v1(ctx, message_buffer, read_token);
|
||||
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
||||
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
||||
return gss_verify_mic_v2(ctx, message_buffer, read_token);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,33 @@
|
|||
/*
|
||||
* COPYRIGHT (c) 2008
|
||||
* The Regents of the University of Michigan
|
||||
* ALL RIGHTS RESERVED
|
||||
*
|
||||
* Permission is granted to use, copy, create derivative works
|
||||
* and redistribute this software and such derivative works
|
||||
* for any purpose, so long as the name of The University of
|
||||
* Michigan is not used in any advertising or publicity
|
||||
* pertaining to the use of distribution of this software
|
||||
* without specific, written prior authorization. If the
|
||||
* above copyright notice or any other identification of the
|
||||
* University of Michigan is included in any copy of any
|
||||
* portion of this software, then the disclaimer below must
|
||||
* also be included.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
|
||||
* FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
|
||||
* PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
|
||||
* MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
|
||||
* WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
|
||||
* REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
|
||||
* FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
|
||||
* CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
|
||||
* OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
|
||||
* IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGES.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/sunrpc/gss_krb5.h>
|
||||
|
@ -12,10 +42,7 @@
|
|||
static inline int
|
||||
gss_krb5_padding(int blocksize, int length)
|
||||
{
|
||||
/* Most of the code is block-size independent but currently we
|
||||
* use only 8: */
|
||||
BUG_ON(blocksize != 8);
|
||||
return 8 - (length & 7);
|
||||
return blocksize - (length % blocksize);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -86,8 +113,8 @@ out:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
make_confounder(char *p, u32 conflen)
|
||||
void
|
||||
gss_krb5_make_confounder(char *p, u32 conflen)
|
||||
{
|
||||
static u64 i = 0;
|
||||
u64 *q = (u64 *)p;
|
||||
|
@ -127,69 +154,73 @@ make_confounder(char *p, u32 conflen)
|
|||
|
||||
/* XXX factor out common code with seal/unseal. */
|
||||
|
||||
u32
|
||||
gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
|
||||
static u32
|
||||
gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
|
||||
struct xdr_buf *buf, struct page **pages)
|
||||
{
|
||||
struct krb5_ctx *kctx = ctx->internal_ctx_id;
|
||||
char cksumdata[16];
|
||||
struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
|
||||
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
|
||||
.data = cksumdata};
|
||||
int blocksize = 0, plainlen;
|
||||
unsigned char *ptr, *msg_start;
|
||||
s32 now;
|
||||
int headlen;
|
||||
struct page **tmp_pages;
|
||||
u32 seq_send;
|
||||
u8 *cksumkey;
|
||||
u32 conflen = kctx->gk5e->conflen;
|
||||
|
||||
dprintk("RPC: gss_wrap_kerberos\n");
|
||||
dprintk("RPC: %s\n", __func__);
|
||||
|
||||
now = get_seconds();
|
||||
|
||||
blocksize = crypto_blkcipher_blocksize(kctx->enc);
|
||||
gss_krb5_add_padding(buf, offset, blocksize);
|
||||
BUG_ON((buf->len - offset) % blocksize);
|
||||
plainlen = blocksize + buf->len - offset;
|
||||
plainlen = conflen + buf->len - offset;
|
||||
|
||||
headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
|
||||
headlen = g_token_size(&kctx->mech_used,
|
||||
GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
|
||||
(buf->len - offset);
|
||||
|
||||
ptr = buf->head[0].iov_base + offset;
|
||||
/* shift data to make room for header. */
|
||||
xdr_extend_head(buf, offset, headlen);
|
||||
|
||||
/* XXX Would be cleverer to encrypt while copying. */
|
||||
/* XXX bounds checking, slack, etc. */
|
||||
memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
|
||||
buf->head[0].iov_len += headlen;
|
||||
buf->len += headlen;
|
||||
BUG_ON((buf->len - offset - headlen) % blocksize);
|
||||
|
||||
g_make_token_header(&kctx->mech_used,
|
||||
GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr);
|
||||
GSS_KRB5_TOK_HDR_LEN +
|
||||
kctx->gk5e->cksumlength + plainlen, &ptr);
|
||||
|
||||
|
||||
/* ptr now at header described in rfc 1964, section 1.2.1: */
|
||||
ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
|
||||
ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
|
||||
|
||||
msg_start = ptr + 24;
|
||||
msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
|
||||
|
||||
*(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
|
||||
*(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
|
||||
memset(ptr + 4, 0xff, 4);
|
||||
*(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES);
|
||||
*(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
|
||||
|
||||
make_confounder(msg_start, blocksize);
|
||||
gss_krb5_make_confounder(msg_start, conflen);
|
||||
|
||||
if (kctx->gk5e->keyed_cksum)
|
||||
cksumkey = kctx->cksum;
|
||||
else
|
||||
cksumkey = NULL;
|
||||
|
||||
/* XXXJBF: UGH!: */
|
||||
tmp_pages = buf->pages;
|
||||
buf->pages = pages;
|
||||
if (make_checksum("md5", ptr, 8, buf,
|
||||
offset + headlen - blocksize, &md5cksum))
|
||||
if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
|
||||
cksumkey, KG_USAGE_SEAL, &md5cksum))
|
||||
return GSS_S_FAILURE;
|
||||
buf->pages = tmp_pages;
|
||||
|
||||
if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
|
||||
md5cksum.data, md5cksum.len))
|
||||
return GSS_S_FAILURE;
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
|
||||
|
||||
spin_lock(&krb5_seq_lock);
|
||||
seq_send = kctx->seq_send++;
|
||||
|
@ -197,25 +228,42 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
|
|||
|
||||
/* XXX would probably be more efficient to compute checksum
|
||||
* and encrypt at the same time: */
|
||||
if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
|
||||
if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
|
||||
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
|
||||
pages))
|
||||
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
|
||||
struct crypto_blkcipher *cipher;
|
||||
int err;
|
||||
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(cipher))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
|
||||
|
||||
err = gss_encrypt_xdr_buf(cipher, buf,
|
||||
offset + headlen - conflen, pages);
|
||||
crypto_free_blkcipher(cipher);
|
||||
if (err)
|
||||
return GSS_S_FAILURE;
|
||||
} else {
|
||||
if (gss_encrypt_xdr_buf(kctx->enc, buf,
|
||||
offset + headlen - conflen, pages))
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
|
||||
static u32
|
||||
gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
||||
{
|
||||
struct krb5_ctx *kctx = ctx->internal_ctx_id;
|
||||
int signalg;
|
||||
int sealalg;
|
||||
char cksumdata[16];
|
||||
struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
|
||||
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
|
||||
struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
|
||||
.data = cksumdata};
|
||||
s32 now;
|
||||
int direction;
|
||||
s32 seqnum;
|
||||
|
@ -224,6 +272,9 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
|
|||
void *data_start, *orig_start;
|
||||
int data_len;
|
||||
int blocksize;
|
||||
u32 conflen = kctx->gk5e->conflen;
|
||||
int crypt_offset;
|
||||
u8 *cksumkey;
|
||||
|
||||
dprintk("RPC: gss_unwrap_kerberos\n");
|
||||
|
||||
|
@ -241,29 +292,65 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
|
|||
/* get the sign and seal algorithms */
|
||||
|
||||
signalg = ptr[2] + (ptr[3] << 8);
|
||||
if (signalg != SGN_ALG_DES_MAC_MD5)
|
||||
if (signalg != kctx->gk5e->signalg)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
sealalg = ptr[4] + (ptr[5] << 8);
|
||||
if (sealalg != SEAL_ALG_DES)
|
||||
if (sealalg != kctx->gk5e->sealalg)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
if (gss_decrypt_xdr_buf(kctx->enc, buf,
|
||||
ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base))
|
||||
/*
|
||||
* Data starts after token header and checksum. ptr points
|
||||
* to the beginning of the token header
|
||||
*/
|
||||
crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
|
||||
(unsigned char *)buf->head[0].iov_base;
|
||||
|
||||
/*
|
||||
* Need plaintext seqnum to derive encryption key for arcfour-hmac
|
||||
*/
|
||||
if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
|
||||
ptr + 8, &direction, &seqnum))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
if ((kctx->initiate && direction != 0xff) ||
|
||||
(!kctx->initiate && direction != 0))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
|
||||
struct crypto_blkcipher *cipher;
|
||||
int err;
|
||||
|
||||
cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(cipher))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
|
||||
|
||||
err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
|
||||
crypto_free_blkcipher(cipher);
|
||||
if (err)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
} else {
|
||||
if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
}
|
||||
|
||||
if (make_checksum("md5", ptr, 8, buf,
|
||||
ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
|
||||
if (kctx->gk5e->keyed_cksum)
|
||||
cksumkey = kctx->cksum;
|
||||
else
|
||||
cksumkey = NULL;
|
||||
|
||||
if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
|
||||
cksumkey, KG_USAGE_SEAL, &md5cksum))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
|
||||
md5cksum.data, md5cksum.len))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
|
||||
if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
|
||||
kctx->gk5e->cksumlength))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
/* it got through unscathed. Make sure the context is unexpired */
|
||||
|
@ -275,19 +362,12 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
|
|||
|
||||
/* do sequencing checks */
|
||||
|
||||
if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
|
||||
&direction, &seqnum))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
if ((kctx->initiate && direction != 0xff) ||
|
||||
(!kctx->initiate && direction != 0))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
/* Copy the data back to the right position. XXX: Would probably be
|
||||
* better to copy and encrypt at the same time. */
|
||||
|
||||
blocksize = crypto_blkcipher_blocksize(kctx->enc);
|
||||
data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize;
|
||||
data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
|
||||
conflen;
|
||||
orig_start = buf->head[0].iov_base + offset;
|
||||
data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
|
||||
memmove(orig_start, data_start, data_len);
|
||||
|
@ -299,3 +379,209 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
|
|||
|
||||
return GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot currently handle tokens with rotated data. We need a
|
||||
* generalized routine to rotate the data in place. It is anticipated
|
||||
* that we won't encounter rotated data in the general case.
|
||||
*/
|
||||
static u32
|
||||
rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
|
||||
{
|
||||
unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
|
||||
|
||||
if (realrrc == 0)
|
||||
return 0;
|
||||
|
||||
dprintk("%s: cannot process token with rotated data: "
|
||||
"rrc %u, realrrc %u\n", __func__, rrc, realrrc);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf, struct page **pages)
|
||||
{
|
||||
int blocksize;
|
||||
u8 *ptr, *plainhdr;
|
||||
s32 now;
|
||||
u8 flags = 0x00;
|
||||
__be16 *be16ptr, ec = 0;
|
||||
__be64 *be64ptr;
|
||||
u32 err;
|
||||
|
||||
dprintk("RPC: %s\n", __func__);
|
||||
|
||||
if (kctx->gk5e->encrypt_v2 == NULL)
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
/* make room for gss token header */
|
||||
if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
/* construct gss token header */
|
||||
ptr = plainhdr = buf->head[0].iov_base + offset;
|
||||
*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
|
||||
*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
|
||||
|
||||
if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
|
||||
flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
|
||||
if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
|
||||
flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
|
||||
/* We always do confidentiality in wrap tokens */
|
||||
flags |= KG2_TOKEN_FLAG_SEALED;
|
||||
|
||||
*ptr++ = flags;
|
||||
*ptr++ = 0xff;
|
||||
be16ptr = (__be16 *)ptr;
|
||||
|
||||
blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
|
||||
*be16ptr++ = cpu_to_be16(ec);
|
||||
/* "inner" token header always uses 0 for RRC */
|
||||
*be16ptr++ = cpu_to_be16(0);
|
||||
|
||||
be64ptr = (__be64 *)be16ptr;
|
||||
spin_lock(&krb5_seq_lock);
|
||||
*be64ptr = cpu_to_be64(kctx->seq_send64++);
|
||||
spin_unlock(&krb5_seq_lock);
|
||||
|
||||
err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
now = get_seconds();
|
||||
return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
||||
{
|
||||
s32 now;
|
||||
u64 seqnum;
|
||||
u8 *ptr;
|
||||
u8 flags = 0x00;
|
||||
u16 ec, rrc;
|
||||
int err;
|
||||
u32 headskip, tailskip;
|
||||
u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
|
||||
unsigned int movelen;
|
||||
|
||||
|
||||
dprintk("RPC: %s\n", __func__);
|
||||
|
||||
if (kctx->gk5e->decrypt_v2 == NULL)
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
ptr = buf->head[0].iov_base + offset;
|
||||
|
||||
if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
flags = ptr[2];
|
||||
if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
|
||||
(kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
|
||||
dprintk("%s: token missing expected sealed flag\n", __func__);
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
}
|
||||
|
||||
if (ptr[3] != 0xff)
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
ec = be16_to_cpup((__be16 *)(ptr + 4));
|
||||
rrc = be16_to_cpup((__be16 *)(ptr + 6));
|
||||
|
||||
seqnum = be64_to_cpup((__be64 *)(ptr + 8));
|
||||
|
||||
if (rrc != 0) {
|
||||
err = rotate_left(kctx, offset, buf, rrc);
|
||||
if (err)
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
|
||||
&headskip, &tailskip);
|
||||
if (err)
|
||||
return GSS_S_FAILURE;
|
||||
|
||||
/*
|
||||
* Retrieve the decrypted gss token header and verify
|
||||
* it against the original
|
||||
*/
|
||||
err = read_bytes_from_xdr_buf(buf,
|
||||
buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
|
||||
decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
|
||||
if (err) {
|
||||
dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
if (memcmp(ptr, decrypted_hdr, 6)
|
||||
|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
|
||||
dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
|
||||
return GSS_S_FAILURE;
|
||||
}
|
||||
|
||||
/* do sequencing checks */
|
||||
|
||||
/* it got through unscathed. Make sure the context is unexpired */
|
||||
now = get_seconds();
|
||||
if (now > kctx->endtime)
|
||||
return GSS_S_CONTEXT_EXPIRED;
|
||||
|
||||
/*
|
||||
* Move the head data back to the right position in xdr_buf.
|
||||
* We ignore any "ec" data since it might be in the head or
|
||||
* the tail, and we really don't need to deal with it.
|
||||
* Note that buf->head[0].iov_len may indicate the available
|
||||
* head buffer space rather than that actually occupied.
|
||||
*/
|
||||
movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
|
||||
movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
|
||||
BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
|
||||
buf->head[0].iov_len);
|
||||
memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
|
||||
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
|
||||
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
|
||||
|
||||
return GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
|
||||
struct xdr_buf *buf, struct page **pages)
|
||||
{
|
||||
struct krb5_ctx *kctx = gctx->internal_ctx_id;
|
||||
|
||||
switch (kctx->enctype) {
|
||||
default:
|
||||
BUG();
|
||||
case ENCTYPE_DES_CBC_RAW:
|
||||
case ENCTYPE_DES3_CBC_RAW:
|
||||
case ENCTYPE_ARCFOUR_HMAC:
|
||||
return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
|
||||
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
||||
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
||||
return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
|
||||
}
|
||||
}
|
||||
|
||||
u32
|
||||
gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
|
||||
{
|
||||
struct krb5_ctx *kctx = gctx->internal_ctx_id;
|
||||
|
||||
switch (kctx->enctype) {
|
||||
default:
|
||||
BUG();
|
||||
case ENCTYPE_DES_CBC_RAW:
|
||||
case ENCTYPE_DES3_CBC_RAW:
|
||||
case ENCTYPE_ARCFOUR_HMAC:
|
||||
return gss_unwrap_kerberos_v1(kctx, offset, buf);
|
||||
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
||||
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
||||
return gss_unwrap_kerberos_v2(kctx, offset, buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -249,14 +249,15 @@ EXPORT_SYMBOL_GPL(gss_mech_put);
|
|||
int
|
||||
gss_import_sec_context(const void *input_token, size_t bufsize,
|
||||
struct gss_api_mech *mech,
|
||||
struct gss_ctx **ctx_id)
|
||||
struct gss_ctx **ctx_id,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
|
||||
if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask)))
|
||||
return -ENOMEM;
|
||||
(*ctx_id)->mech_type = gss_mech_get(mech);
|
||||
|
||||
return mech->gm_ops
|
||||
->gss_import_sec_context(input_token, bufsize, *ctx_id);
|
||||
->gss_import_sec_context(input_token, bufsize, *ctx_id, gfp_mask);
|
||||
}
|
||||
|
||||
/* gss_get_mic: compute a mic over message and return mic_token. */
|
||||
|
@ -285,6 +286,20 @@ gss_verify_mic(struct gss_ctx *context_handle,
|
|||
mic_token);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called from both the client and server code.
|
||||
* Each makes guarantees about how much "slack" space is available
|
||||
* for the underlying function in "buf"'s head and tail while
|
||||
* performing the wrap.
|
||||
*
|
||||
* The client and server code allocate RPC_MAX_AUTH_SIZE extra
|
||||
* space in both the head and tail which is available for use by
|
||||
* the wrap function.
|
||||
*
|
||||
* Underlying functions should verify they do not use more than
|
||||
* RPC_MAX_AUTH_SIZE of extra space in either the head or tail
|
||||
* when performing the wrap.
|
||||
*/
|
||||
u32
|
||||
gss_wrap(struct gss_ctx *ctx_id,
|
||||
int offset,
|
||||
|
|
|
@ -84,13 +84,14 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
|
|||
|
||||
static int
|
||||
gss_import_sec_context_spkm3(const void *p, size_t len,
|
||||
struct gss_ctx *ctx_id)
|
||||
struct gss_ctx *ctx_id,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
const void *end = (const void *)((const char *)p + len);
|
||||
struct spkm3_ctx *ctx;
|
||||
int version;
|
||||
|
||||
if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS)))
|
||||
if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask)))
|
||||
goto out_err;
|
||||
|
||||
p = simple_get_bytes(p, end, &version, sizeof(version));
|
||||
|
|
|
@ -494,7 +494,7 @@ static int rsc_parse(struct cache_detail *cd,
|
|||
len = qword_get(&mesg, buf, mlen);
|
||||
if (len < 0)
|
||||
goto out;
|
||||
status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
|
||||
status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, GFP_KERNEL);
|
||||
if (status)
|
||||
goto out;
|
||||
|
||||
|
@ -1315,6 +1315,14 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
|
|||
inpages = resbuf->pages;
|
||||
/* XXX: Would be better to write some xdr helper functions for
|
||||
* nfs{2,3,4}xdr.c that place the data right, instead of copying: */
|
||||
|
||||
/*
|
||||
* If there is currently tail data, make sure there is
|
||||
* room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
|
||||
* the page, and move the current tail data such that
|
||||
* there is RPC_MAX_AUTH_SIZE slack space available in
|
||||
* both the head and tail.
|
||||
*/
|
||||
if (resbuf->tail[0].iov_base) {
|
||||
BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
|
||||
+ PAGE_SIZE);
|
||||
|
@ -1327,6 +1335,13 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
|
|||
resbuf->tail[0].iov_len);
|
||||
resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
|
||||
}
|
||||
/*
|
||||
* If there is no current tail data, make sure there is
|
||||
* room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
|
||||
* allotted page, and set up tail information such that there
|
||||
* is RPC_MAX_AUTH_SIZE slack space available in both the
|
||||
* head and tail.
|
||||
*/
|
||||
if (resbuf->tail[0].iov_base == NULL) {
|
||||
if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -556,26 +556,16 @@ static const struct rpc_call_ops rpc_default_ops = {
|
|||
*/
|
||||
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
|
||||
{
|
||||
struct rpc_task *task, *ret;
|
||||
struct rpc_task *task;
|
||||
|
||||
task = rpc_new_task(task_setup_data);
|
||||
if (task == NULL) {
|
||||
rpc_release_calldata(task_setup_data->callback_ops,
|
||||
task_setup_data->callback_data);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(task))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (task->tk_status != 0) {
|
||||
ret = ERR_PTR(task->tk_status);
|
||||
rpc_put_task(task);
|
||||
goto out;
|
||||
}
|
||||
atomic_inc(&task->tk_count);
|
||||
rpc_execute(task);
|
||||
ret = task;
|
||||
out:
|
||||
return ret;
|
||||
return task;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_run_task);
|
||||
|
||||
|
@ -657,9 +647,8 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
|
|||
* Create an rpc_task to send the data
|
||||
*/
|
||||
task = rpc_new_task(&task_setup_data);
|
||||
if (!task) {
|
||||
if (IS_ERR(task)) {
|
||||
xprt_free_bc_request(req);
|
||||
task = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
task->tk_rqstp = req;
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
|
||||
#ifdef RPC_DEBUG
|
||||
#define RPCDBG_FACILITY RPCDBG_SCHED
|
||||
#define RPC_TASK_MAGIC_ID 0xf00baa
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -237,7 +236,6 @@ static void rpc_task_set_debuginfo(struct rpc_task *task)
|
|||
{
|
||||
static atomic_t rpc_pid;
|
||||
|
||||
task->tk_magic = RPC_TASK_MAGIC_ID;
|
||||
task->tk_pid = atomic_inc_return(&rpc_pid);
|
||||
}
|
||||
#else
|
||||
|
@ -360,9 +358,6 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
|
|||
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
|
||||
task->tk_pid, jiffies);
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
|
||||
#endif
|
||||
/* Has the task been executed yet? If not, we cannot wake it up! */
|
||||
if (!RPC_IS_ACTIVATED(task)) {
|
||||
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
|
||||
|
@ -834,7 +829,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
|
|||
}
|
||||
|
||||
/* starting timestamp */
|
||||
task->tk_start = jiffies;
|
||||
task->tk_start = ktime_get();
|
||||
|
||||
dprintk("RPC: new task initialized, procpid %u\n",
|
||||
task_pid_nr(current));
|
||||
|
@ -856,16 +851,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
|
|||
|
||||
if (task == NULL) {
|
||||
task = rpc_alloc_task();
|
||||
if (task == NULL)
|
||||
goto out;
|
||||
if (task == NULL) {
|
||||
rpc_release_calldata(setup_data->callback_ops,
|
||||
setup_data->callback_data);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
flags = RPC_TASK_DYNAMIC;
|
||||
}
|
||||
|
||||
rpc_init_task(task, setup_data);
|
||||
if (task->tk_status < 0) {
|
||||
int err = task->tk_status;
|
||||
rpc_put_task(task);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
task->tk_flags |= flags;
|
||||
dprintk("RPC: allocated task %p\n", task);
|
||||
out:
|
||||
return task;
|
||||
}
|
||||
|
||||
|
@ -909,9 +911,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task);
|
|||
|
||||
static void rpc_release_task(struct rpc_task *task)
|
||||
{
|
||||
#ifdef RPC_DEBUG
|
||||
BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
|
||||
#endif
|
||||
dprintk("RPC: %5u release task\n", task->tk_pid);
|
||||
|
||||
if (!list_empty(&task->tk_task)) {
|
||||
|
@ -923,9 +922,6 @@ static void rpc_release_task(struct rpc_task *task)
|
|||
}
|
||||
BUG_ON (RPC_IS_QUEUED(task));
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
task->tk_magic = 0;
|
||||
#endif
|
||||
/* Wake up anyone who is waiting for task completion */
|
||||
rpc_mark_complete_task(task);
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ void rpc_count_iostats(struct rpc_task *task)
|
|||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
struct rpc_iostats *stats;
|
||||
struct rpc_iostats *op_metrics;
|
||||
long rtt, execute, queue;
|
||||
ktime_t delta;
|
||||
|
||||
if (!task->tk_client || !task->tk_client->cl_metrics || !req)
|
||||
return;
|
||||
|
@ -156,23 +156,16 @@ void rpc_count_iostats(struct rpc_task *task)
|
|||
op_metrics->om_ntrans += req->rq_ntrans;
|
||||
op_metrics->om_timeouts += task->tk_timeouts;
|
||||
|
||||
op_metrics->om_bytes_sent += task->tk_bytes_sent;
|
||||
op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
|
||||
op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
|
||||
|
||||
queue = (long)req->rq_xtime - task->tk_start;
|
||||
if (queue < 0)
|
||||
queue = -queue;
|
||||
op_metrics->om_queue += queue;
|
||||
delta = ktime_sub(req->rq_xtime, task->tk_start);
|
||||
op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
|
||||
|
||||
rtt = task->tk_rtt;
|
||||
if (rtt < 0)
|
||||
rtt = -rtt;
|
||||
op_metrics->om_rtt += rtt;
|
||||
op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
|
||||
|
||||
execute = (long)jiffies - task->tk_start;
|
||||
if (execute < 0)
|
||||
execute = -execute;
|
||||
op_metrics->om_execute += execute;
|
||||
delta = ktime_sub(ktime_get(), task->tk_start);
|
||||
op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta);
|
||||
}
|
||||
|
||||
static void _print_name(struct seq_file *seq, unsigned int op,
|
||||
|
@ -186,8 +179,6 @@ static void _print_name(struct seq_file *seq, unsigned int op,
|
|||
seq_printf(seq, "\t%12u: ", op);
|
||||
}
|
||||
|
||||
#define MILLISECS_PER_JIFFY (1000 / HZ)
|
||||
|
||||
void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
|
||||
{
|
||||
struct rpc_iostats *stats = clnt->cl_metrics;
|
||||
|
@ -214,9 +205,9 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
|
|||
metrics->om_timeouts,
|
||||
metrics->om_bytes_sent,
|
||||
metrics->om_bytes_recv,
|
||||
metrics->om_queue * MILLISECS_PER_JIFFY,
|
||||
metrics->om_rtt * MILLISECS_PER_JIFFY,
|
||||
metrics->om_execute * MILLISECS_PER_JIFFY);
|
||||
ktime_to_ms(metrics->om_queue),
|
||||
ktime_to_ms(metrics->om_rtt),
|
||||
ktime_to_ms(metrics->om_execute));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_print_iostats);
|
||||
|
|
|
@ -762,6 +762,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un
|
|||
__write_bytes_to_xdr_buf(&subbuf, obj, len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
|
||||
|
||||
int
|
||||
xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
#include <linux/sunrpc/clnt.h>
|
||||
#include <linux/sunrpc/metrics.h>
|
||||
|
@ -62,7 +63,6 @@
|
|||
* Local functions
|
||||
*/
|
||||
static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
|
||||
static inline void do_xprt_reserve(struct rpc_task *);
|
||||
static void xprt_connect_status(struct rpc_task *task);
|
||||
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
|
||||
|
||||
|
@ -711,12 +711,16 @@ void xprt_connect(struct rpc_task *task)
|
|||
if (task->tk_rqstp)
|
||||
task->tk_rqstp->rq_bytes_sent = 0;
|
||||
|
||||
task->tk_timeout = xprt->connect_timeout;
|
||||
task->tk_timeout = task->tk_rqstp->rq_timeout;
|
||||
rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
|
||||
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state))
|
||||
return;
|
||||
if (xprt_test_and_set_connecting(xprt))
|
||||
return;
|
||||
xprt->stat.connect_start = jiffies;
|
||||
xprt->ops->connect(task);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void xprt_connect_status(struct rpc_task *task)
|
||||
|
@ -771,25 +775,19 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
|
||||
|
||||
/**
|
||||
* xprt_update_rtt - update an RPC client's RTT state after receiving a reply
|
||||
* @task: RPC request that recently completed
|
||||
*
|
||||
*/
|
||||
void xprt_update_rtt(struct rpc_task *task)
|
||||
static void xprt_update_rtt(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
struct rpc_rtt *rtt = task->tk_client->cl_rtt;
|
||||
unsigned timer = task->tk_msg.rpc_proc->p_timer;
|
||||
long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
|
||||
|
||||
if (timer) {
|
||||
if (req->rq_ntrans == 1)
|
||||
rpc_update_rtt(rtt, timer,
|
||||
(long)jiffies - req->rq_xtime);
|
||||
rpc_update_rtt(rtt, timer, m);
|
||||
rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_update_rtt);
|
||||
|
||||
/**
|
||||
* xprt_complete_rqst - called when reply processing is complete
|
||||
|
@ -807,7 +805,9 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
|
|||
task->tk_pid, ntohl(req->rq_xid), copied);
|
||||
|
||||
xprt->stat.recvs++;
|
||||
task->tk_rtt = (long)jiffies - req->rq_xtime;
|
||||
req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
|
||||
if (xprt->ops->timer != NULL)
|
||||
xprt_update_rtt(task);
|
||||
|
||||
list_del_init(&req->rq_list);
|
||||
req->rq_private_buf.len = copied;
|
||||
|
@ -906,7 +906,7 @@ void xprt_transmit(struct rpc_task *task)
|
|||
return;
|
||||
|
||||
req->rq_connect_cookie = xprt->connect_cookie;
|
||||
req->rq_xtime = jiffies;
|
||||
req->rq_xtime = ktime_get();
|
||||
status = xprt->ops->send_request(task);
|
||||
if (status != 0) {
|
||||
task->tk_status = status;
|
||||
|
@ -935,7 +935,7 @@ void xprt_transmit(struct rpc_task *task)
|
|||
spin_unlock_bh(&xprt->transport_lock);
|
||||
}
|
||||
|
||||
static inline void do_xprt_reserve(struct rpc_task *task)
|
||||
static void xprt_alloc_slot(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_xprt *xprt = task->tk_xprt;
|
||||
|
||||
|
@ -955,6 +955,16 @@ static inline void do_xprt_reserve(struct rpc_task *task)
|
|||
rpc_sleep_on(&xprt->backlog, task, NULL);
|
||||
}
|
||||
|
||||
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
||||
{
|
||||
memset(req, 0, sizeof(*req)); /* mark unused */
|
||||
|
||||
spin_lock(&xprt->reserve_lock);
|
||||
list_add(&req->rq_list, &xprt->free);
|
||||
rpc_wake_up_next(&xprt->backlog);
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_reserve - allocate an RPC request slot
|
||||
* @task: RPC task requesting a slot allocation
|
||||
|
@ -968,7 +978,7 @@ void xprt_reserve(struct rpc_task *task)
|
|||
|
||||
task->tk_status = -EIO;
|
||||
spin_lock(&xprt->reserve_lock);
|
||||
do_xprt_reserve(task);
|
||||
xprt_alloc_slot(task);
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
}
|
||||
|
||||
|
@ -1006,14 +1016,10 @@ void xprt_release(struct rpc_task *task)
|
|||
{
|
||||
struct rpc_xprt *xprt;
|
||||
struct rpc_rqst *req;
|
||||
int is_bc_request;
|
||||
|
||||
if (!(req = task->tk_rqstp))
|
||||
return;
|
||||
|
||||
/* Preallocated backchannel request? */
|
||||
is_bc_request = bc_prealloc(req);
|
||||
|
||||
xprt = req->rq_xprt;
|
||||
rpc_count_iostats(task);
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
|
@ -1027,21 +1033,16 @@ void xprt_release(struct rpc_task *task)
|
|||
mod_timer(&xprt->timer,
|
||||
xprt->last_used + xprt->idle_timeout);
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
if (!bc_prealloc(req))
|
||||
if (req->rq_buffer)
|
||||
xprt->ops->buf_free(req->rq_buffer);
|
||||
task->tk_rqstp = NULL;
|
||||
if (req->rq_release_snd_buf)
|
||||
req->rq_release_snd_buf(req);
|
||||
|
||||
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
|
||||
if (likely(!is_bc_request)) {
|
||||
memset(req, 0, sizeof(*req)); /* mark unused */
|
||||
|
||||
spin_lock(&xprt->reserve_lock);
|
||||
list_add(&req->rq_list, &xprt->free);
|
||||
rpc_wake_up_next(&xprt->backlog);
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
} else
|
||||
if (likely(!bc_prealloc(req)))
|
||||
xprt_free_slot(xprt, req);
|
||||
else
|
||||
xprt_free_bc_request(req);
|
||||
}
|
||||
|
||||
|
|
|
@ -305,7 +305,6 @@ xprt_setup_rdma(struct xprt_create *args)
|
|||
/* 60 second timeout, no retries */
|
||||
xprt->timeout = &xprt_rdma_default_timeout;
|
||||
xprt->bind_timeout = (60U * HZ);
|
||||
xprt->connect_timeout = (60U * HZ);
|
||||
xprt->reestablish_timeout = (5U * HZ);
|
||||
xprt->idle_timeout = (5U * 60 * HZ);
|
||||
|
||||
|
@ -449,7 +448,6 @@ xprt_rdma_connect(struct rpc_task *task)
|
|||
struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt;
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
|
||||
if (!xprt_test_and_set_connecting(xprt)) {
|
||||
if (r_xprt->rx_ep.rep_connected != 0) {
|
||||
/* Reconnect */
|
||||
schedule_delayed_work(&r_xprt->rdma_connect,
|
||||
|
@ -464,7 +462,6 @@ xprt_rdma_connect(struct rpc_task *task)
|
|||
if (!RPC_IS_ASYNC(task))
|
||||
flush_scheduled_work();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -677,7 +674,7 @@ xprt_rdma_send_request(struct rpc_task *task)
|
|||
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
|
||||
goto drop_connection;
|
||||
|
||||
task->tk_bytes_sent += rqst->rq_snd_buf.len;
|
||||
rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
|
||||
rqst->rq_bytes_sent = 0;
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -137,20 +137,6 @@ static ctl_table sunrpc_table[] = {
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Time out for an RPC UDP socket connect. UDP socket connects are
|
||||
* synchronous, but we set a timeout anyway in case of resource
|
||||
* exhaustion on the local host.
|
||||
*/
|
||||
#define XS_UDP_CONN_TO (5U * HZ)
|
||||
|
||||
/*
|
||||
* Wait duration for an RPC TCP connection to be established. Solaris
|
||||
* NFS over TCP uses 60 seconds, for example, which is in line with how
|
||||
* long a server takes to reboot.
|
||||
*/
|
||||
#define XS_TCP_CONN_TO (60U * HZ)
|
||||
|
||||
/*
|
||||
* Wait duration for a reply from the RPC portmapper.
|
||||
*/
|
||||
|
@ -542,7 +528,7 @@ static int xs_udp_send_request(struct rpc_task *task)
|
|||
xdr->len - req->rq_bytes_sent, status);
|
||||
|
||||
if (status >= 0) {
|
||||
task->tk_bytes_sent += status;
|
||||
req->rq_xmit_bytes_sent += status;
|
||||
if (status >= req->rq_slen)
|
||||
return 0;
|
||||
/* Still some bytes left; set up for a retry later. */
|
||||
|
@ -638,7 +624,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
|
|||
/* If we've sent the entire packet, immediately
|
||||
* reset the count of bytes sent. */
|
||||
req->rq_bytes_sent += status;
|
||||
task->tk_bytes_sent += status;
|
||||
req->rq_xmit_bytes_sent += status;
|
||||
if (likely(req->rq_bytes_sent >= req->rq_slen)) {
|
||||
req->rq_bytes_sent = 0;
|
||||
return 0;
|
||||
|
@ -858,7 +844,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
|
|||
dst_confirm(skb_dst(skb));
|
||||
|
||||
xprt_adjust_cwnd(task, copied);
|
||||
xprt_update_rtt(task);
|
||||
xprt_complete_rqst(task, copied);
|
||||
|
||||
out_unlock:
|
||||
|
@ -2016,9 +2001,6 @@ static void xs_connect(struct rpc_task *task)
|
|||
struct rpc_xprt *xprt = task->tk_xprt;
|
||||
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
||||
|
||||
if (xprt_test_and_set_connecting(xprt))
|
||||
return;
|
||||
|
||||
if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
|
||||
dprintk("RPC: xs_connect delayed xprt %p for %lu "
|
||||
"seconds\n",
|
||||
|
@ -2038,16 +2020,6 @@ static void xs_connect(struct rpc_task *task)
|
|||
}
|
||||
}
|
||||
|
||||
static void xs_tcp_connect(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_xprt *xprt = task->tk_xprt;
|
||||
|
||||
/* Exit if we need to wait for socket shutdown to complete */
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state))
|
||||
return;
|
||||
xs_connect(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* xs_udp_print_stats - display UDP socket-specifc stats
|
||||
* @xprt: rpc_xprt struct containing statistics
|
||||
|
@ -2246,7 +2218,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
|
|||
.release_xprt = xs_tcp_release_xprt,
|
||||
.rpcbind = rpcb_getport_async,
|
||||
.set_port = xs_set_port,
|
||||
.connect = xs_tcp_connect,
|
||||
.connect = xs_connect,
|
||||
.buf_alloc = rpc_malloc,
|
||||
.buf_free = rpc_free,
|
||||
.send_request = xs_tcp_send_request,
|
||||
|
@ -2337,7 +2309,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
|
|||
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
|
||||
|
||||
xprt->bind_timeout = XS_BIND_TO;
|
||||
xprt->connect_timeout = XS_UDP_CONN_TO;
|
||||
xprt->reestablish_timeout = XS_UDP_REEST_TO;
|
||||
xprt->idle_timeout = XS_IDLE_DISC_TO;
|
||||
|
||||
|
@ -2412,7 +2383,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
|
|||
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
|
||||
|
||||
xprt->bind_timeout = XS_BIND_TO;
|
||||
xprt->connect_timeout = XS_TCP_CONN_TO;
|
||||
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
||||
xprt->idle_timeout = XS_IDLE_DISC_TO;
|
||||
|
||||
|
@ -2472,9 +2442,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
|
|||
struct sock_xprt *transport;
|
||||
struct svc_sock *bc_sock;
|
||||
|
||||
if (!args->bc_xprt)
|
||||
ERR_PTR(-EINVAL);
|
||||
|
||||
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
|
||||
if (IS_ERR(xprt))
|
||||
return xprt;
|
||||
|
@ -2488,7 +2455,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
|
|||
/* backchannel */
|
||||
xprt_set_bound(xprt);
|
||||
xprt->bind_timeout = 0;
|
||||
xprt->connect_timeout = 0;
|
||||
xprt->reestablish_timeout = 0;
|
||||
xprt->idle_timeout = 0;
|
||||
|
||||
|
|
Loading…
Reference in a new issue