diff options
Diffstat (limited to 'net')
157 files changed, 1798 insertions, 1768 deletions
diff --git a/net/9p/client.c b/net/9p/client.c index 34d417670935..8eb75425e6e6 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
| @@ -1100,7 +1100,7 @@ void p9_client_begin_disconnect(struct p9_client *clnt) | |||
| 1100 | EXPORT_SYMBOL(p9_client_begin_disconnect); | 1100 | EXPORT_SYMBOL(p9_client_begin_disconnect); |
| 1101 | 1101 | ||
| 1102 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, | 1102 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, |
| 1103 | char *uname, u32 n_uname, char *aname) | 1103 | char *uname, kuid_t n_uname, char *aname) |
| 1104 | { | 1104 | { |
| 1105 | int err = 0; | 1105 | int err = 0; |
| 1106 | struct p9_req_t *req; | 1106 | struct p9_req_t *req; |
| @@ -1117,7 +1117,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, | |||
| 1117 | goto error; | 1117 | goto error; |
| 1118 | } | 1118 | } |
| 1119 | 1119 | ||
| 1120 | req = p9_client_rpc(clnt, P9_TATTACH, "ddss?d", fid->fid, | 1120 | req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid, |
| 1121 | afid ? afid->fid : P9_NOFID, uname, aname, n_uname); | 1121 | afid ? afid->fid : P9_NOFID, uname, aname, n_uname); |
| 1122 | if (IS_ERR(req)) { | 1122 | if (IS_ERR(req)) { |
| 1123 | err = PTR_ERR(req); | 1123 | err = PTR_ERR(req); |
| @@ -1270,7 +1270,7 @@ error: | |||
| 1270 | EXPORT_SYMBOL(p9_client_open); | 1270 | EXPORT_SYMBOL(p9_client_open); |
| 1271 | 1271 | ||
| 1272 | int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, | 1272 | int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, |
| 1273 | gid_t gid, struct p9_qid *qid) | 1273 | kgid_t gid, struct p9_qid *qid) |
| 1274 | { | 1274 | { |
| 1275 | int err = 0; | 1275 | int err = 0; |
| 1276 | struct p9_client *clnt; | 1276 | struct p9_client *clnt; |
| @@ -1279,13 +1279,14 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, | |||
| 1279 | 1279 | ||
| 1280 | p9_debug(P9_DEBUG_9P, | 1280 | p9_debug(P9_DEBUG_9P, |
| 1281 | ">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n", | 1281 | ">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n", |
| 1282 | ofid->fid, name, flags, mode, gid); | 1282 | ofid->fid, name, flags, mode, |
| 1283 | from_kgid(&init_user_ns, gid)); | ||
| 1283 | clnt = ofid->clnt; | 1284 | clnt = ofid->clnt; |
| 1284 | 1285 | ||
| 1285 | if (ofid->mode != -1) | 1286 | if (ofid->mode != -1) |
| 1286 | return -EINVAL; | 1287 | return -EINVAL; |
| 1287 | 1288 | ||
| 1288 | req = p9_client_rpc(clnt, P9_TLCREATE, "dsddd", ofid->fid, name, flags, | 1289 | req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags, |
| 1289 | mode, gid); | 1290 | mode, gid); |
| 1290 | if (IS_ERR(req)) { | 1291 | if (IS_ERR(req)) { |
| 1291 | err = PTR_ERR(req); | 1292 | err = PTR_ERR(req); |
| @@ -1358,7 +1359,7 @@ error: | |||
| 1358 | } | 1359 | } |
| 1359 | EXPORT_SYMBOL(p9_client_fcreate); | 1360 | EXPORT_SYMBOL(p9_client_fcreate); |
| 1360 | 1361 | ||
| 1361 | int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid, | 1362 | int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, kgid_t gid, |
| 1362 | struct p9_qid *qid) | 1363 | struct p9_qid *qid) |
| 1363 | { | 1364 | { |
| 1364 | int err = 0; | 1365 | int err = 0; |
| @@ -1369,7 +1370,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid, | |||
| 1369 | dfid->fid, name, symtgt); | 1370 | dfid->fid, name, symtgt); |
| 1370 | clnt = dfid->clnt; | 1371 | clnt = dfid->clnt; |
| 1371 | 1372 | ||
| 1372 | req = p9_client_rpc(clnt, P9_TSYMLINK, "dssd", dfid->fid, name, symtgt, | 1373 | req = p9_client_rpc(clnt, P9_TSYMLINK, "dssg", dfid->fid, name, symtgt, |
| 1373 | gid); | 1374 | gid); |
| 1374 | if (IS_ERR(req)) { | 1375 | if (IS_ERR(req)) { |
| 1375 | err = PTR_ERR(req); | 1376 | err = PTR_ERR(req); |
| @@ -1710,7 +1711,9 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid) | |||
| 1710 | (unsigned long long)ret->qid.path, ret->qid.version, ret->mode, | 1711 | (unsigned long long)ret->qid.path, ret->qid.version, ret->mode, |
| 1711 | ret->atime, ret->mtime, (unsigned long long)ret->length, | 1712 | ret->atime, ret->mtime, (unsigned long long)ret->length, |
| 1712 | ret->name, ret->uid, ret->gid, ret->muid, ret->extension, | 1713 | ret->name, ret->uid, ret->gid, ret->muid, ret->extension, |
| 1713 | ret->n_uid, ret->n_gid, ret->n_muid); | 1714 | from_kuid(&init_user_ns, ret->n_uid), |
| 1715 | from_kgid(&init_user_ns, ret->n_gid), | ||
| 1716 | from_kuid(&init_user_ns, ret->n_muid)); | ||
| 1714 | 1717 | ||
| 1715 | p9_free_req(clnt, req); | 1718 | p9_free_req(clnt, req); |
| 1716 | return ret; | 1719 | return ret; |
| @@ -1764,8 +1767,10 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, | |||
| 1764 | "<<< st_btime_sec=%lld st_btime_nsec=%lld\n" | 1767 | "<<< st_btime_sec=%lld st_btime_nsec=%lld\n" |
| 1765 | "<<< st_gen=%lld st_data_version=%lld", | 1768 | "<<< st_gen=%lld st_data_version=%lld", |
| 1766 | ret->st_result_mask, ret->qid.type, ret->qid.path, | 1769 | ret->st_result_mask, ret->qid.type, ret->qid.path, |
| 1767 | ret->qid.version, ret->st_mode, ret->st_nlink, ret->st_uid, | 1770 | ret->qid.version, ret->st_mode, ret->st_nlink, |
| 1768 | ret->st_gid, ret->st_rdev, ret->st_size, ret->st_blksize, | 1771 | from_kuid(&init_user_ns, ret->st_uid), |
| 1772 | from_kgid(&init_user_ns, ret->st_gid), | ||
| 1773 | ret->st_rdev, ret->st_size, ret->st_blksize, | ||
| 1769 | ret->st_blocks, ret->st_atime_sec, ret->st_atime_nsec, | 1774 | ret->st_blocks, ret->st_atime_sec, ret->st_atime_nsec, |
| 1770 | ret->st_mtime_sec, ret->st_mtime_nsec, ret->st_ctime_sec, | 1775 | ret->st_mtime_sec, ret->st_mtime_nsec, ret->st_ctime_sec, |
| 1771 | ret->st_ctime_nsec, ret->st_btime_sec, ret->st_btime_nsec, | 1776 | ret->st_ctime_nsec, ret->st_btime_sec, ret->st_btime_nsec, |
| @@ -1828,7 +1833,9 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) | |||
| 1828 | (unsigned long long)wst->qid.path, wst->qid.version, wst->mode, | 1833 | (unsigned long long)wst->qid.path, wst->qid.version, wst->mode, |
| 1829 | wst->atime, wst->mtime, (unsigned long long)wst->length, | 1834 | wst->atime, wst->mtime, (unsigned long long)wst->length, |
| 1830 | wst->name, wst->uid, wst->gid, wst->muid, wst->extension, | 1835 | wst->name, wst->uid, wst->gid, wst->muid, wst->extension, |
| 1831 | wst->n_uid, wst->n_gid, wst->n_muid); | 1836 | from_kuid(&init_user_ns, wst->n_uid), |
| 1837 | from_kgid(&init_user_ns, wst->n_gid), | ||
| 1838 | from_kuid(&init_user_ns, wst->n_muid)); | ||
| 1832 | 1839 | ||
| 1833 | req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); | 1840 | req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); |
| 1834 | if (IS_ERR(req)) { | 1841 | if (IS_ERR(req)) { |
| @@ -1857,7 +1864,9 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr) | |||
| 1857 | " valid=%x mode=%x uid=%d gid=%d size=%lld\n" | 1864 | " valid=%x mode=%x uid=%d gid=%d size=%lld\n" |
| 1858 | " atime_sec=%lld atime_nsec=%lld\n" | 1865 | " atime_sec=%lld atime_nsec=%lld\n" |
| 1859 | " mtime_sec=%lld mtime_nsec=%lld\n", | 1866 | " mtime_sec=%lld mtime_nsec=%lld\n", |
| 1860 | p9attr->valid, p9attr->mode, p9attr->uid, p9attr->gid, | 1867 | p9attr->valid, p9attr->mode, |
| 1868 | from_kuid(&init_user_ns, p9attr->uid), | ||
| 1869 | from_kgid(&init_user_ns, p9attr->gid), | ||
| 1861 | p9attr->size, p9attr->atime_sec, p9attr->atime_nsec, | 1870 | p9attr->size, p9attr->atime_sec, p9attr->atime_nsec, |
| 1862 | p9attr->mtime_sec, p9attr->mtime_nsec); | 1871 | p9attr->mtime_sec, p9attr->mtime_nsec); |
| 1863 | 1872 | ||
| @@ -2106,7 +2115,7 @@ error: | |||
| 2106 | EXPORT_SYMBOL(p9_client_readdir); | 2115 | EXPORT_SYMBOL(p9_client_readdir); |
| 2107 | 2116 | ||
| 2108 | int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, | 2117 | int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, |
| 2109 | dev_t rdev, gid_t gid, struct p9_qid *qid) | 2118 | dev_t rdev, kgid_t gid, struct p9_qid *qid) |
| 2110 | { | 2119 | { |
| 2111 | int err; | 2120 | int err; |
| 2112 | struct p9_client *clnt; | 2121 | struct p9_client *clnt; |
| @@ -2116,7 +2125,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, | |||
| 2116 | clnt = fid->clnt; | 2125 | clnt = fid->clnt; |
| 2117 | p9_debug(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d " | 2126 | p9_debug(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d " |
| 2118 | "minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev)); | 2127 | "minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev)); |
| 2119 | req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddd", fid->fid, name, mode, | 2128 | req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddg", fid->fid, name, mode, |
| 2120 | MAJOR(rdev), MINOR(rdev), gid); | 2129 | MAJOR(rdev), MINOR(rdev), gid); |
| 2121 | if (IS_ERR(req)) | 2130 | if (IS_ERR(req)) |
| 2122 | return PTR_ERR(req); | 2131 | return PTR_ERR(req); |
| @@ -2137,7 +2146,7 @@ error: | |||
| 2137 | EXPORT_SYMBOL(p9_client_mknod_dotl); | 2146 | EXPORT_SYMBOL(p9_client_mknod_dotl); |
| 2138 | 2147 | ||
| 2139 | int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, | 2148 | int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, |
| 2140 | gid_t gid, struct p9_qid *qid) | 2149 | kgid_t gid, struct p9_qid *qid) |
| 2141 | { | 2150 | { |
| 2142 | int err; | 2151 | int err; |
| 2143 | struct p9_client *clnt; | 2152 | struct p9_client *clnt; |
| @@ -2146,8 +2155,8 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, | |||
| 2146 | err = 0; | 2155 | err = 0; |
| 2147 | clnt = fid->clnt; | 2156 | clnt = fid->clnt; |
| 2148 | p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n", | 2157 | p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n", |
| 2149 | fid->fid, name, mode, gid); | 2158 | fid->fid, name, mode, from_kgid(&init_user_ns, gid)); |
| 2150 | req = p9_client_rpc(clnt, P9_TMKDIR, "dsdd", fid->fid, name, mode, | 2159 | req = p9_client_rpc(clnt, P9_TMKDIR, "dsdg", fid->fid, name, mode, |
| 2151 | gid); | 2160 | gid); |
| 2152 | if (IS_ERR(req)) | 2161 | if (IS_ERR(req)) |
| 2153 | return PTR_ERR(req); | 2162 | return PTR_ERR(req); |
diff --git a/net/9p/error.c b/net/9p/error.c index 2ab2de76010f..126fd0dceea2 100644 --- a/net/9p/error.c +++ b/net/9p/error.c | |||
| @@ -221,15 +221,13 @@ EXPORT_SYMBOL(p9_error_init); | |||
| 221 | int p9_errstr2errno(char *errstr, int len) | 221 | int p9_errstr2errno(char *errstr, int len) |
| 222 | { | 222 | { |
| 223 | int errno; | 223 | int errno; |
| 224 | struct hlist_node *p; | ||
| 225 | struct errormap *c; | 224 | struct errormap *c; |
| 226 | int bucket; | 225 | int bucket; |
| 227 | 226 | ||
| 228 | errno = 0; | 227 | errno = 0; |
| 229 | p = NULL; | ||
| 230 | c = NULL; | 228 | c = NULL; |
| 231 | bucket = jhash(errstr, len, 0) % ERRHASHSZ; | 229 | bucket = jhash(errstr, len, 0) % ERRHASHSZ; |
| 232 | hlist_for_each_entry(c, p, &hash_errmap[bucket], list) { | 230 | hlist_for_each_entry(c, &hash_errmap[bucket], list) { |
| 233 | if (c->namelen == len && !memcmp(c->name, errstr, len)) { | 231 | if (c->namelen == len && !memcmp(c->name, errstr, len)) { |
| 234 | errno = c->val; | 232 | errno = c->val; |
| 235 | break; | 233 | break; |
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 3d33ecf13327..ab9127ec5b7a 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
| @@ -85,6 +85,8 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) | |||
| 85 | d - int32_t | 85 | d - int32_t |
| 86 | q - int64_t | 86 | q - int64_t |
| 87 | s - string | 87 | s - string |
| 88 | u - numeric uid | ||
| 89 | g - numeric gid | ||
| 88 | S - stat | 90 | S - stat |
| 89 | Q - qid | 91 | Q - qid |
| 90 | D - data blob (int32_t size followed by void *, results are not freed) | 92 | D - data blob (int32_t size followed by void *, results are not freed) |
| @@ -163,6 +165,26 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 163 | (*sptr)[len] = 0; | 165 | (*sptr)[len] = 0; |
| 164 | } | 166 | } |
| 165 | break; | 167 | break; |
| 168 | case 'u': { | ||
| 169 | kuid_t *uid = va_arg(ap, kuid_t *); | ||
| 170 | __le32 le_val; | ||
| 171 | if (pdu_read(pdu, &le_val, sizeof(le_val))) { | ||
| 172 | errcode = -EFAULT; | ||
| 173 | break; | ||
| 174 | } | ||
| 175 | *uid = make_kuid(&init_user_ns, | ||
| 176 | le32_to_cpu(le_val)); | ||
| 177 | } break; | ||
| 178 | case 'g': { | ||
| 179 | kgid_t *gid = va_arg(ap, kgid_t *); | ||
| 180 | __le32 le_val; | ||
| 181 | if (pdu_read(pdu, &le_val, sizeof(le_val))) { | ||
| 182 | errcode = -EFAULT; | ||
| 183 | break; | ||
| 184 | } | ||
| 185 | *gid = make_kgid(&init_user_ns, | ||
| 186 | le32_to_cpu(le_val)); | ||
| 187 | } break; | ||
| 166 | case 'Q':{ | 188 | case 'Q':{ |
| 167 | struct p9_qid *qid = | 189 | struct p9_qid *qid = |
| 168 | va_arg(ap, struct p9_qid *); | 190 | va_arg(ap, struct p9_qid *); |
| @@ -177,11 +199,12 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 177 | va_arg(ap, struct p9_wstat *); | 199 | va_arg(ap, struct p9_wstat *); |
| 178 | 200 | ||
| 179 | memset(stbuf, 0, sizeof(struct p9_wstat)); | 201 | memset(stbuf, 0, sizeof(struct p9_wstat)); |
| 180 | stbuf->n_uid = stbuf->n_gid = stbuf->n_muid = | 202 | stbuf->n_uid = stbuf->n_muid = INVALID_UID; |
| 181 | -1; | 203 | stbuf->n_gid = INVALID_GID; |
| 204 | |||
| 182 | errcode = | 205 | errcode = |
| 183 | p9pdu_readf(pdu, proto_version, | 206 | p9pdu_readf(pdu, proto_version, |
| 184 | "wwdQdddqssss?sddd", | 207 | "wwdQdddqssss?sugu", |
| 185 | &stbuf->size, &stbuf->type, | 208 | &stbuf->size, &stbuf->type, |
| 186 | &stbuf->dev, &stbuf->qid, | 209 | &stbuf->dev, &stbuf->qid, |
| 187 | &stbuf->mode, &stbuf->atime, | 210 | &stbuf->mode, &stbuf->atime, |
| @@ -294,7 +317,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 294 | memset(stbuf, 0, sizeof(struct p9_stat_dotl)); | 317 | memset(stbuf, 0, sizeof(struct p9_stat_dotl)); |
| 295 | errcode = | 318 | errcode = |
| 296 | p9pdu_readf(pdu, proto_version, | 319 | p9pdu_readf(pdu, proto_version, |
| 297 | "qQdddqqqqqqqqqqqqqqq", | 320 | "qQdugqqqqqqqqqqqqqqq", |
| 298 | &stbuf->st_result_mask, | 321 | &stbuf->st_result_mask, |
| 299 | &stbuf->qid, | 322 | &stbuf->qid, |
| 300 | &stbuf->st_mode, | 323 | &stbuf->st_mode, |
| @@ -377,6 +400,20 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 377 | errcode = -EFAULT; | 400 | errcode = -EFAULT; |
| 378 | } | 401 | } |
| 379 | break; | 402 | break; |
| 403 | case 'u': { | ||
| 404 | kuid_t uid = va_arg(ap, kuid_t); | ||
| 405 | __le32 val = cpu_to_le32( | ||
| 406 | from_kuid(&init_user_ns, uid)); | ||
| 407 | if (pdu_write(pdu, &val, sizeof(val))) | ||
| 408 | errcode = -EFAULT; | ||
| 409 | } break; | ||
| 410 | case 'g': { | ||
| 411 | kgid_t gid = va_arg(ap, kgid_t); | ||
| 412 | __le32 val = cpu_to_le32( | ||
| 413 | from_kgid(&init_user_ns, gid)); | ||
| 414 | if (pdu_write(pdu, &val, sizeof(val))) | ||
| 415 | errcode = -EFAULT; | ||
| 416 | } break; | ||
| 380 | case 'Q':{ | 417 | case 'Q':{ |
| 381 | const struct p9_qid *qid = | 418 | const struct p9_qid *qid = |
| 382 | va_arg(ap, const struct p9_qid *); | 419 | va_arg(ap, const struct p9_qid *); |
| @@ -390,7 +427,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 390 | va_arg(ap, const struct p9_wstat *); | 427 | va_arg(ap, const struct p9_wstat *); |
| 391 | errcode = | 428 | errcode = |
| 392 | p9pdu_writef(pdu, proto_version, | 429 | p9pdu_writef(pdu, proto_version, |
| 393 | "wwdQdddqssss?sddd", | 430 | "wwdQdddqssss?sugu", |
| 394 | stbuf->size, stbuf->type, | 431 | stbuf->size, stbuf->type, |
| 395 | stbuf->dev, &stbuf->qid, | 432 | stbuf->dev, &stbuf->qid, |
| 396 | stbuf->mode, stbuf->atime, | 433 | stbuf->mode, stbuf->atime, |
| @@ -468,7 +505,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 468 | struct p9_iattr_dotl *); | 505 | struct p9_iattr_dotl *); |
| 469 | 506 | ||
| 470 | errcode = p9pdu_writef(pdu, proto_version, | 507 | errcode = p9pdu_writef(pdu, proto_version, |
| 471 | "ddddqqqqq", | 508 | "ddugqqqqq", |
| 472 | p9attr->valid, | 509 | p9attr->valid, |
| 473 | p9attr->mode, | 510 | p9attr->mode, |
| 474 | p9attr->uid, | 511 | p9attr->uid, |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index de2e950a0a7a..74dea377fe5b 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
| @@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = { | |||
| 655 | .create = p9_virtio_create, | 655 | .create = p9_virtio_create, |
| 656 | .close = p9_virtio_close, | 656 | .close = p9_virtio_close, |
| 657 | .request = p9_virtio_request, | 657 | .request = p9_virtio_request, |
| 658 | .zc_request = p9_virtio_zc_request, | 658 | //.zc_request = p9_virtio_zc_request, |
| 659 | .cancel = p9_virtio_cancel, | 659 | .cancel = p9_virtio_cancel, |
| 660 | /* | 660 | /* |
| 661 | * We leave one entry for input and one entry for response | 661 | * We leave one entry for input and one entry for response |
diff --git a/net/9p/util.c b/net/9p/util.c index 6ceeeb384de7..59f278e64f58 100644 --- a/net/9p/util.c +++ b/net/9p/util.c | |||
| @@ -87,23 +87,18 @@ EXPORT_SYMBOL(p9_idpool_destroy); | |||
| 87 | 87 | ||
| 88 | int p9_idpool_get(struct p9_idpool *p) | 88 | int p9_idpool_get(struct p9_idpool *p) |
| 89 | { | 89 | { |
| 90 | int i = 0; | 90 | int i; |
| 91 | int error; | ||
| 92 | unsigned long flags; | 91 | unsigned long flags; |
| 93 | 92 | ||
| 94 | retry: | 93 | idr_preload(GFP_NOFS); |
| 95 | if (idr_pre_get(&p->pool, GFP_NOFS) == 0) | ||
| 96 | return -1; | ||
| 97 | |||
| 98 | spin_lock_irqsave(&p->lock, flags); | 94 | spin_lock_irqsave(&p->lock, flags); |
| 99 | 95 | ||
| 100 | /* no need to store exactly p, we just need something non-null */ | 96 | /* no need to store exactly p, we just need something non-null */ |
| 101 | error = idr_get_new(&p->pool, p, &i); | 97 | i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); |
| 102 | spin_unlock_irqrestore(&p->lock, flags); | ||
| 103 | 98 | ||
| 104 | if (error == -EAGAIN) | 99 | spin_unlock_irqrestore(&p->lock, flags); |
| 105 | goto retry; | 100 | idr_preload_end(); |
| 106 | else if (error) | 101 | if (i < 0) |
| 107 | return -1; | 102 | return -1; |
| 108 | 103 | ||
| 109 | p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p); | 104 | p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p); |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 33475291c9c1..4a141e3cf076 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
| @@ -93,10 +93,9 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to, | |||
| 93 | struct atalk_iface *atif) | 93 | struct atalk_iface *atif) |
| 94 | { | 94 | { |
| 95 | struct sock *s; | 95 | struct sock *s; |
| 96 | struct hlist_node *node; | ||
| 97 | 96 | ||
| 98 | read_lock_bh(&atalk_sockets_lock); | 97 | read_lock_bh(&atalk_sockets_lock); |
| 99 | sk_for_each(s, node, &atalk_sockets) { | 98 | sk_for_each(s, &atalk_sockets) { |
| 100 | struct atalk_sock *at = at_sk(s); | 99 | struct atalk_sock *at = at_sk(s); |
| 101 | 100 | ||
| 102 | if (to->sat_port != at->src_port) | 101 | if (to->sat_port != at->src_port) |
| @@ -141,11 +140,10 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk, | |||
| 141 | struct sockaddr_at *sat) | 140 | struct sockaddr_at *sat) |
| 142 | { | 141 | { |
| 143 | struct sock *s; | 142 | struct sock *s; |
| 144 | struct hlist_node *node; | ||
| 145 | struct atalk_sock *at; | 143 | struct atalk_sock *at; |
| 146 | 144 | ||
| 147 | write_lock_bh(&atalk_sockets_lock); | 145 | write_lock_bh(&atalk_sockets_lock); |
| 148 | sk_for_each(s, node, &atalk_sockets) { | 146 | sk_for_each(s, &atalk_sockets) { |
| 149 | at = at_sk(s); | 147 | at = at_sk(s); |
| 150 | 148 | ||
| 151 | if (at->src_net == sat->sat_addr.s_net && | 149 | if (at->src_net == sat->sat_addr.s_net && |
| @@ -1084,9 +1082,8 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat) | |||
| 1084 | sat->sat_port < ATPORT_LAST; | 1082 | sat->sat_port < ATPORT_LAST; |
| 1085 | sat->sat_port++) { | 1083 | sat->sat_port++) { |
| 1086 | struct sock *s; | 1084 | struct sock *s; |
| 1087 | struct hlist_node *node; | ||
| 1088 | 1085 | ||
| 1089 | sk_for_each(s, node, &atalk_sockets) { | 1086 | sk_for_each(s, &atalk_sockets) { |
| 1090 | struct atalk_sock *at = at_sk(s); | 1087 | struct atalk_sock *at = at_sk(s); |
| 1091 | 1088 | ||
| 1092 | if (at->src_net == sat->sat_addr.s_net && | 1089 | if (at->src_net == sat->sat_addr.s_net && |
diff --git a/net/atm/common.c b/net/atm/common.c index 806fc0a40051..7b491006eaf4 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
| @@ -270,11 +270,11 @@ void atm_dev_release_vccs(struct atm_dev *dev) | |||
| 270 | write_lock_irq(&vcc_sklist_lock); | 270 | write_lock_irq(&vcc_sklist_lock); |
| 271 | for (i = 0; i < VCC_HTABLE_SIZE; i++) { | 271 | for (i = 0; i < VCC_HTABLE_SIZE; i++) { |
| 272 | struct hlist_head *head = &vcc_hash[i]; | 272 | struct hlist_head *head = &vcc_hash[i]; |
| 273 | struct hlist_node *node, *tmp; | 273 | struct hlist_node *tmp; |
| 274 | struct sock *s; | 274 | struct sock *s; |
| 275 | struct atm_vcc *vcc; | 275 | struct atm_vcc *vcc; |
| 276 | 276 | ||
| 277 | sk_for_each_safe(s, node, tmp, head) { | 277 | sk_for_each_safe(s, tmp, head) { |
| 278 | vcc = atm_sk(s); | 278 | vcc = atm_sk(s); |
| 279 | if (vcc->dev == dev) { | 279 | if (vcc->dev == dev) { |
| 280 | vcc_release_async(vcc, -EPIPE); | 280 | vcc_release_async(vcc, -EPIPE); |
| @@ -317,11 +317,10 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) | |||
| 317 | static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) | 317 | static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) |
| 318 | { | 318 | { |
| 319 | struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; | 319 | struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; |
| 320 | struct hlist_node *node; | ||
| 321 | struct sock *s; | 320 | struct sock *s; |
| 322 | struct atm_vcc *walk; | 321 | struct atm_vcc *walk; |
| 323 | 322 | ||
| 324 | sk_for_each(s, node, head) { | 323 | sk_for_each(s, head) { |
| 325 | walk = atm_sk(s); | 324 | walk = atm_sk(s); |
| 326 | if (walk->dev != vcc->dev) | 325 | if (walk->dev != vcc->dev) |
| 327 | continue; | 326 | continue; |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 2e3d942e77f1..f23916be18fb 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
| @@ -842,7 +842,9 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, | |||
| 842 | --*l; | 842 | --*l; |
| 843 | } | 843 | } |
| 844 | 844 | ||
| 845 | hlist_for_each_entry_from(tmp, e, next) { | 845 | tmp = container_of(e, struct lec_arp_table, next); |
| 846 | |||
| 847 | hlist_for_each_entry_from(tmp, next) { | ||
| 846 | if (--*l < 0) | 848 | if (--*l < 0) |
| 847 | break; | 849 | break; |
| 848 | } | 850 | } |
| @@ -1307,7 +1309,6 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) | |||
| 1307 | static int | 1309 | static int |
| 1308 | lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) | 1310 | lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) |
| 1309 | { | 1311 | { |
| 1310 | struct hlist_node *node; | ||
| 1311 | struct lec_arp_table *entry; | 1312 | struct lec_arp_table *entry; |
| 1312 | int i, remove_vcc = 1; | 1313 | int i, remove_vcc = 1; |
| 1313 | 1314 | ||
| @@ -1326,7 +1327,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) | |||
| 1326 | * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT | 1327 | * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT |
| 1327 | */ | 1328 | */ |
| 1328 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1329 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 1329 | hlist_for_each_entry(entry, node, | 1330 | hlist_for_each_entry(entry, |
| 1330 | &priv->lec_arp_tables[i], next) { | 1331 | &priv->lec_arp_tables[i], next) { |
| 1331 | if (memcmp(to_remove->atm_addr, | 1332 | if (memcmp(to_remove->atm_addr, |
| 1332 | entry->atm_addr, ATM_ESA_LEN) == 0) { | 1333 | entry->atm_addr, ATM_ESA_LEN) == 0) { |
| @@ -1364,14 +1365,13 @@ static const char *get_status_string(unsigned char st) | |||
| 1364 | 1365 | ||
| 1365 | static void dump_arp_table(struct lec_priv *priv) | 1366 | static void dump_arp_table(struct lec_priv *priv) |
| 1366 | { | 1367 | { |
| 1367 | struct hlist_node *node; | ||
| 1368 | struct lec_arp_table *rulla; | 1368 | struct lec_arp_table *rulla; |
| 1369 | char buf[256]; | 1369 | char buf[256]; |
| 1370 | int i, j, offset; | 1370 | int i, j, offset; |
| 1371 | 1371 | ||
| 1372 | pr_info("Dump %p:\n", priv); | 1372 | pr_info("Dump %p:\n", priv); |
| 1373 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1373 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 1374 | hlist_for_each_entry(rulla, node, | 1374 | hlist_for_each_entry(rulla, |
| 1375 | &priv->lec_arp_tables[i], next) { | 1375 | &priv->lec_arp_tables[i], next) { |
| 1376 | offset = 0; | 1376 | offset = 0; |
| 1377 | offset += sprintf(buf, "%d: %p\n", i, rulla); | 1377 | offset += sprintf(buf, "%d: %p\n", i, rulla); |
| @@ -1403,7 +1403,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
| 1403 | 1403 | ||
| 1404 | if (!hlist_empty(&priv->lec_no_forward)) | 1404 | if (!hlist_empty(&priv->lec_no_forward)) |
| 1405 | pr_info("No forward\n"); | 1405 | pr_info("No forward\n"); |
| 1406 | hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { | 1406 | hlist_for_each_entry(rulla, &priv->lec_no_forward, next) { |
| 1407 | offset = 0; | 1407 | offset = 0; |
| 1408 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); | 1408 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); |
| 1409 | offset += sprintf(buf + offset, " Atm:"); | 1409 | offset += sprintf(buf + offset, " Atm:"); |
| @@ -1428,7 +1428,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
| 1428 | 1428 | ||
| 1429 | if (!hlist_empty(&priv->lec_arp_empty_ones)) | 1429 | if (!hlist_empty(&priv->lec_arp_empty_ones)) |
| 1430 | pr_info("Empty ones\n"); | 1430 | pr_info("Empty ones\n"); |
| 1431 | hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { | 1431 | hlist_for_each_entry(rulla, &priv->lec_arp_empty_ones, next) { |
| 1432 | offset = 0; | 1432 | offset = 0; |
| 1433 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); | 1433 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); |
| 1434 | offset += sprintf(buf + offset, " Atm:"); | 1434 | offset += sprintf(buf + offset, " Atm:"); |
| @@ -1453,7 +1453,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
| 1453 | 1453 | ||
| 1454 | if (!hlist_empty(&priv->mcast_fwds)) | 1454 | if (!hlist_empty(&priv->mcast_fwds)) |
| 1455 | pr_info("Multicast Forward VCCs\n"); | 1455 | pr_info("Multicast Forward VCCs\n"); |
| 1456 | hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { | 1456 | hlist_for_each_entry(rulla, &priv->mcast_fwds, next) { |
| 1457 | offset = 0; | 1457 | offset = 0; |
| 1458 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); | 1458 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); |
| 1459 | offset += sprintf(buf + offset, " Atm:"); | 1459 | offset += sprintf(buf + offset, " Atm:"); |
| @@ -1487,7 +1487,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
| 1487 | static void lec_arp_destroy(struct lec_priv *priv) | 1487 | static void lec_arp_destroy(struct lec_priv *priv) |
| 1488 | { | 1488 | { |
| 1489 | unsigned long flags; | 1489 | unsigned long flags; |
| 1490 | struct hlist_node *node, *next; | 1490 | struct hlist_node *next; |
| 1491 | struct lec_arp_table *entry; | 1491 | struct lec_arp_table *entry; |
| 1492 | int i; | 1492 | int i; |
| 1493 | 1493 | ||
| @@ -1499,7 +1499,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
| 1499 | 1499 | ||
| 1500 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 1500 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
| 1501 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1501 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 1502 | hlist_for_each_entry_safe(entry, node, next, | 1502 | hlist_for_each_entry_safe(entry, next, |
| 1503 | &priv->lec_arp_tables[i], next) { | 1503 | &priv->lec_arp_tables[i], next) { |
| 1504 | lec_arp_remove(priv, entry); | 1504 | lec_arp_remove(priv, entry); |
| 1505 | lec_arp_put(entry); | 1505 | lec_arp_put(entry); |
| @@ -1507,7 +1507,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
| 1507 | INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); | 1507 | INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); |
| 1508 | } | 1508 | } |
| 1509 | 1509 | ||
| 1510 | hlist_for_each_entry_safe(entry, node, next, | 1510 | hlist_for_each_entry_safe(entry, next, |
| 1511 | &priv->lec_arp_empty_ones, next) { | 1511 | &priv->lec_arp_empty_ones, next) { |
| 1512 | del_timer_sync(&entry->timer); | 1512 | del_timer_sync(&entry->timer); |
| 1513 | lec_arp_clear_vccs(entry); | 1513 | lec_arp_clear_vccs(entry); |
| @@ -1516,7 +1516,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
| 1516 | } | 1516 | } |
| 1517 | INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); | 1517 | INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); |
| 1518 | 1518 | ||
| 1519 | hlist_for_each_entry_safe(entry, node, next, | 1519 | hlist_for_each_entry_safe(entry, next, |
| 1520 | &priv->lec_no_forward, next) { | 1520 | &priv->lec_no_forward, next) { |
| 1521 | del_timer_sync(&entry->timer); | 1521 | del_timer_sync(&entry->timer); |
| 1522 | lec_arp_clear_vccs(entry); | 1522 | lec_arp_clear_vccs(entry); |
| @@ -1525,7 +1525,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
| 1525 | } | 1525 | } |
| 1526 | INIT_HLIST_HEAD(&priv->lec_no_forward); | 1526 | INIT_HLIST_HEAD(&priv->lec_no_forward); |
| 1527 | 1527 | ||
| 1528 | hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { | 1528 | hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { |
| 1529 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ | 1529 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ |
| 1530 | lec_arp_clear_vccs(entry); | 1530 | lec_arp_clear_vccs(entry); |
| 1531 | hlist_del(&entry->next); | 1531 | hlist_del(&entry->next); |
| @@ -1542,14 +1542,13 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
| 1542 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | 1542 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, |
| 1543 | const unsigned char *mac_addr) | 1543 | const unsigned char *mac_addr) |
| 1544 | { | 1544 | { |
| 1545 | struct hlist_node *node; | ||
| 1546 | struct hlist_head *head; | 1545 | struct hlist_head *head; |
| 1547 | struct lec_arp_table *entry; | 1546 | struct lec_arp_table *entry; |
| 1548 | 1547 | ||
| 1549 | pr_debug("%pM\n", mac_addr); | 1548 | pr_debug("%pM\n", mac_addr); |
| 1550 | 1549 | ||
| 1551 | head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; | 1550 | head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; |
| 1552 | hlist_for_each_entry(entry, node, head, next) { | 1551 | hlist_for_each_entry(entry, head, next) { |
| 1553 | if (ether_addr_equal(mac_addr, entry->mac_addr)) | 1552 | if (ether_addr_equal(mac_addr, entry->mac_addr)) |
| 1554 | return entry; | 1553 | return entry; |
| 1555 | } | 1554 | } |
| @@ -1686,7 +1685,7 @@ static void lec_arp_check_expire(struct work_struct *work) | |||
| 1686 | unsigned long flags; | 1685 | unsigned long flags; |
| 1687 | struct lec_priv *priv = | 1686 | struct lec_priv *priv = |
| 1688 | container_of(work, struct lec_priv, lec_arp_work.work); | 1687 | container_of(work, struct lec_priv, lec_arp_work.work); |
| 1689 | struct hlist_node *node, *next; | 1688 | struct hlist_node *next; |
| 1690 | struct lec_arp_table *entry; | 1689 | struct lec_arp_table *entry; |
| 1691 | unsigned long now; | 1690 | unsigned long now; |
| 1692 | int i; | 1691 | int i; |
| @@ -1696,7 +1695,7 @@ static void lec_arp_check_expire(struct work_struct *work) | |||
| 1696 | restart: | 1695 | restart: |
| 1697 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 1696 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
| 1698 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1697 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 1699 | hlist_for_each_entry_safe(entry, node, next, | 1698 | hlist_for_each_entry_safe(entry, next, |
| 1700 | &priv->lec_arp_tables[i], next) { | 1699 | &priv->lec_arp_tables[i], next) { |
| 1701 | if (__lec_arp_check_expire(entry, now, priv)) { | 1700 | if (__lec_arp_check_expire(entry, now, priv)) { |
| 1702 | struct sk_buff *skb; | 1701 | struct sk_buff *skb; |
| @@ -1823,14 +1822,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, | |||
| 1823 | unsigned long permanent) | 1822 | unsigned long permanent) |
| 1824 | { | 1823 | { |
| 1825 | unsigned long flags; | 1824 | unsigned long flags; |
| 1826 | struct hlist_node *node, *next; | 1825 | struct hlist_node *next; |
| 1827 | struct lec_arp_table *entry; | 1826 | struct lec_arp_table *entry; |
| 1828 | int i; | 1827 | int i; |
| 1829 | 1828 | ||
| 1830 | pr_debug("\n"); | 1829 | pr_debug("\n"); |
| 1831 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 1830 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
| 1832 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1831 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 1833 | hlist_for_each_entry_safe(entry, node, next, | 1832 | hlist_for_each_entry_safe(entry, next, |
| 1834 | &priv->lec_arp_tables[i], next) { | 1833 | &priv->lec_arp_tables[i], next) { |
| 1835 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && | 1834 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && |
| 1836 | (permanent || | 1835 | (permanent || |
| @@ -1855,7 +1854,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, | |||
| 1855 | unsigned int targetless_le_arp) | 1854 | unsigned int targetless_le_arp) |
| 1856 | { | 1855 | { |
| 1857 | unsigned long flags; | 1856 | unsigned long flags; |
| 1858 | struct hlist_node *node, *next; | 1857 | struct hlist_node *next; |
| 1859 | struct lec_arp_table *entry, *tmp; | 1858 | struct lec_arp_table *entry, *tmp; |
| 1860 | int i; | 1859 | int i; |
| 1861 | 1860 | ||
| @@ -1870,7 +1869,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, | |||
| 1870 | * we have no entry in the cache. 7.1.30 | 1869 | * we have no entry in the cache. 7.1.30 |
| 1871 | */ | 1870 | */ |
| 1872 | if (!hlist_empty(&priv->lec_arp_empty_ones)) { | 1871 | if (!hlist_empty(&priv->lec_arp_empty_ones)) { |
| 1873 | hlist_for_each_entry_safe(entry, node, next, | 1872 | hlist_for_each_entry_safe(entry, next, |
| 1874 | &priv->lec_arp_empty_ones, next) { | 1873 | &priv->lec_arp_empty_ones, next) { |
| 1875 | if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { | 1874 | if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { |
| 1876 | hlist_del(&entry->next); | 1875 | hlist_del(&entry->next); |
| @@ -1915,7 +1914,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, | |||
| 1915 | memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); | 1914 | memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); |
| 1916 | del_timer(&entry->timer); | 1915 | del_timer(&entry->timer); |
| 1917 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1916 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 1918 | hlist_for_each_entry(tmp, node, | 1917 | hlist_for_each_entry(tmp, |
| 1919 | &priv->lec_arp_tables[i], next) { | 1918 | &priv->lec_arp_tables[i], next) { |
| 1920 | if (entry != tmp && | 1919 | if (entry != tmp && |
| 1921 | !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { | 1920 | !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { |
| @@ -1956,7 +1955,6 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, | |||
| 1956 | void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) | 1955 | void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) |
| 1957 | { | 1956 | { |
| 1958 | unsigned long flags; | 1957 | unsigned long flags; |
| 1959 | struct hlist_node *node; | ||
| 1960 | struct lec_arp_table *entry; | 1958 | struct lec_arp_table *entry; |
| 1961 | int i, found_entry = 0; | 1959 | int i, found_entry = 0; |
| 1962 | 1960 | ||
| @@ -2026,7 +2024,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, | |||
| 2026 | ioc_data->atm_addr[16], ioc_data->atm_addr[17], | 2024 | ioc_data->atm_addr[16], ioc_data->atm_addr[17], |
| 2027 | ioc_data->atm_addr[18], ioc_data->atm_addr[19]); | 2025 | ioc_data->atm_addr[18], ioc_data->atm_addr[19]); |
| 2028 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 2026 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 2029 | hlist_for_each_entry(entry, node, | 2027 | hlist_for_each_entry(entry, |
| 2030 | &priv->lec_arp_tables[i], next) { | 2028 | &priv->lec_arp_tables[i], next) { |
| 2031 | if (memcmp | 2029 | if (memcmp |
| 2032 | (ioc_data->atm_addr, entry->atm_addr, | 2030 | (ioc_data->atm_addr, entry->atm_addr, |
| @@ -2103,7 +2101,6 @@ out: | |||
| 2103 | static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) | 2101 | static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) |
| 2104 | { | 2102 | { |
| 2105 | unsigned long flags; | 2103 | unsigned long flags; |
| 2106 | struct hlist_node *node; | ||
| 2107 | struct lec_arp_table *entry; | 2104 | struct lec_arp_table *entry; |
| 2108 | int i; | 2105 | int i; |
| 2109 | 2106 | ||
| @@ -2111,7 +2108,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) | |||
| 2111 | restart: | 2108 | restart: |
| 2112 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2109 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
| 2113 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 2110 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 2114 | hlist_for_each_entry(entry, node, | 2111 | hlist_for_each_entry(entry, |
| 2115 | &priv->lec_arp_tables[i], next) { | 2112 | &priv->lec_arp_tables[i], next) { |
| 2116 | if (entry->flush_tran_id == tran_id && | 2113 | if (entry->flush_tran_id == tran_id && |
| 2117 | entry->status == ESI_FLUSH_PENDING) { | 2114 | entry->status == ESI_FLUSH_PENDING) { |
| @@ -2140,13 +2137,12 @@ lec_set_flush_tran_id(struct lec_priv *priv, | |||
| 2140 | const unsigned char *atm_addr, unsigned long tran_id) | 2137 | const unsigned char *atm_addr, unsigned long tran_id) |
| 2141 | { | 2138 | { |
| 2142 | unsigned long flags; | 2139 | unsigned long flags; |
| 2143 | struct hlist_node *node; | ||
| 2144 | struct lec_arp_table *entry; | 2140 | struct lec_arp_table *entry; |
| 2145 | int i; | 2141 | int i; |
| 2146 | 2142 | ||
| 2147 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2143 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
| 2148 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) | 2144 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) |
| 2149 | hlist_for_each_entry(entry, node, | 2145 | hlist_for_each_entry(entry, |
| 2150 | &priv->lec_arp_tables[i], next) { | 2146 | &priv->lec_arp_tables[i], next) { |
| 2151 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { | 2147 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { |
| 2152 | entry->flush_tran_id = tran_id; | 2148 | entry->flush_tran_id = tran_id; |
| @@ -2198,7 +2194,7 @@ out: | |||
| 2198 | static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | 2194 | static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) |
| 2199 | { | 2195 | { |
| 2200 | unsigned long flags; | 2196 | unsigned long flags; |
| 2201 | struct hlist_node *node, *next; | 2197 | struct hlist_node *next; |
| 2202 | struct lec_arp_table *entry; | 2198 | struct lec_arp_table *entry; |
| 2203 | int i; | 2199 | int i; |
| 2204 | 2200 | ||
| @@ -2208,7 +2204,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
| 2208 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2204 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
| 2209 | 2205 | ||
| 2210 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 2206 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
| 2211 | hlist_for_each_entry_safe(entry, node, next, | 2207 | hlist_for_each_entry_safe(entry, next, |
| 2212 | &priv->lec_arp_tables[i], next) { | 2208 | &priv->lec_arp_tables[i], next) { |
| 2213 | if (vcc == entry->vcc) { | 2209 | if (vcc == entry->vcc) { |
| 2214 | lec_arp_remove(priv, entry); | 2210 | lec_arp_remove(priv, entry); |
| @@ -2219,7 +2215,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
| 2219 | } | 2215 | } |
| 2220 | } | 2216 | } |
| 2221 | 2217 | ||
| 2222 | hlist_for_each_entry_safe(entry, node, next, | 2218 | hlist_for_each_entry_safe(entry, next, |
| 2223 | &priv->lec_arp_empty_ones, next) { | 2219 | &priv->lec_arp_empty_ones, next) { |
| 2224 | if (entry->vcc == vcc) { | 2220 | if (entry->vcc == vcc) { |
| 2225 | lec_arp_clear_vccs(entry); | 2221 | lec_arp_clear_vccs(entry); |
| @@ -2229,7 +2225,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
| 2229 | } | 2225 | } |
| 2230 | } | 2226 | } |
| 2231 | 2227 | ||
| 2232 | hlist_for_each_entry_safe(entry, node, next, | 2228 | hlist_for_each_entry_safe(entry, next, |
| 2233 | &priv->lec_no_forward, next) { | 2229 | &priv->lec_no_forward, next) { |
| 2234 | if (entry->recv_vcc == vcc) { | 2230 | if (entry->recv_vcc == vcc) { |
| 2235 | lec_arp_clear_vccs(entry); | 2231 | lec_arp_clear_vccs(entry); |
| @@ -2239,7 +2235,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
| 2239 | } | 2235 | } |
| 2240 | } | 2236 | } |
| 2241 | 2237 | ||
| 2242 | hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { | 2238 | hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { |
| 2243 | if (entry->recv_vcc == vcc) { | 2239 | if (entry->recv_vcc == vcc) { |
| 2244 | lec_arp_clear_vccs(entry); | 2240 | lec_arp_clear_vccs(entry); |
| 2245 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ | 2241 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ |
| @@ -2257,13 +2253,13 @@ lec_arp_check_empties(struct lec_priv *priv, | |||
| 2257 | struct atm_vcc *vcc, struct sk_buff *skb) | 2253 | struct atm_vcc *vcc, struct sk_buff *skb) |
| 2258 | { | 2254 | { |
| 2259 | unsigned long flags; | 2255 | unsigned long flags; |
| 2260 | struct hlist_node *node, *next; | 2256 | struct hlist_node *next; |
| 2261 | struct lec_arp_table *entry, *tmp; | 2257 | struct lec_arp_table *entry, *tmp; |
| 2262 | struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; | 2258 | struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; |
| 2263 | unsigned char *src = hdr->h_source; | 2259 | unsigned char *src = hdr->h_source; |
| 2264 | 2260 | ||
| 2265 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2261 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
| 2266 | hlist_for_each_entry_safe(entry, node, next, | 2262 | hlist_for_each_entry_safe(entry, next, |
| 2267 | &priv->lec_arp_empty_ones, next) { | 2263 | &priv->lec_arp_empty_ones, next) { |
| 2268 | if (vcc == entry->vcc) { | 2264 | if (vcc == entry->vcc) { |
| 2269 | del_timer(&entry->timer); | 2265 | del_timer(&entry->timer); |
diff --git a/net/atm/proc.c b/net/atm/proc.c index b4e75340b162..6ac35ff0d6b9 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c | |||
| @@ -385,7 +385,7 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, | |||
| 385 | page = get_zeroed_page(GFP_KERNEL); | 385 | page = get_zeroed_page(GFP_KERNEL); |
| 386 | if (!page) | 386 | if (!page) |
| 387 | return -ENOMEM; | 387 | return -ENOMEM; |
| 388 | dev = PDE(file->f_path.dentry->d_inode)->data; | 388 | dev = PDE(file_inode(file))->data; |
| 389 | if (!dev->ops->proc_read) | 389 | if (!dev->ops->proc_read) |
| 390 | length = -EINVAL; | 390 | length = -EINVAL; |
| 391 | else { | 391 | else { |
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 86767ca908a3..4176887e72eb 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
| @@ -217,7 +217,6 @@ static void purge_vcc(struct atm_vcc *vcc) | |||
| 217 | 217 | ||
| 218 | static void sigd_close(struct atm_vcc *vcc) | 218 | static void sigd_close(struct atm_vcc *vcc) |
| 219 | { | 219 | { |
| 220 | struct hlist_node *node; | ||
| 221 | struct sock *s; | 220 | struct sock *s; |
| 222 | int i; | 221 | int i; |
| 223 | 222 | ||
| @@ -231,7 +230,7 @@ static void sigd_close(struct atm_vcc *vcc) | |||
| 231 | for (i = 0; i < VCC_HTABLE_SIZE; ++i) { | 230 | for (i = 0; i < VCC_HTABLE_SIZE; ++i) { |
| 232 | struct hlist_head *head = &vcc_hash[i]; | 231 | struct hlist_head *head = &vcc_hash[i]; |
| 233 | 232 | ||
| 234 | sk_for_each(s, node, head) { | 233 | sk_for_each(s, head) { |
| 235 | vcc = atm_sk(s); | 234 | vcc = atm_sk(s); |
| 236 | 235 | ||
| 237 | purge_vcc(vcc); | 236 | purge_vcc(vcc); |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 69a06c47b648..7b11f8bc5071 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
| @@ -81,14 +81,13 @@ static void ax25_kill_by_device(struct net_device *dev) | |||
| 81 | { | 81 | { |
| 82 | ax25_dev *ax25_dev; | 82 | ax25_dev *ax25_dev; |
| 83 | ax25_cb *s; | 83 | ax25_cb *s; |
| 84 | struct hlist_node *node; | ||
| 85 | 84 | ||
| 86 | if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) | 85 | if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) |
| 87 | return; | 86 | return; |
| 88 | 87 | ||
| 89 | spin_lock_bh(&ax25_list_lock); | 88 | spin_lock_bh(&ax25_list_lock); |
| 90 | again: | 89 | again: |
| 91 | ax25_for_each(s, node, &ax25_list) { | 90 | ax25_for_each(s, &ax25_list) { |
| 92 | if (s->ax25_dev == ax25_dev) { | 91 | if (s->ax25_dev == ax25_dev) { |
| 93 | s->ax25_dev = NULL; | 92 | s->ax25_dev = NULL; |
| 94 | spin_unlock_bh(&ax25_list_lock); | 93 | spin_unlock_bh(&ax25_list_lock); |
| @@ -158,10 +157,9 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi, | |||
| 158 | struct net_device *dev, int type) | 157 | struct net_device *dev, int type) |
| 159 | { | 158 | { |
| 160 | ax25_cb *s; | 159 | ax25_cb *s; |
| 161 | struct hlist_node *node; | ||
| 162 | 160 | ||
| 163 | spin_lock(&ax25_list_lock); | 161 | spin_lock(&ax25_list_lock); |
| 164 | ax25_for_each(s, node, &ax25_list) { | 162 | ax25_for_each(s, &ax25_list) { |
| 165 | if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) | 163 | if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) |
| 166 | continue; | 164 | continue; |
| 167 | if (s->sk && !ax25cmp(&s->source_addr, addr) && | 165 | if (s->sk && !ax25cmp(&s->source_addr, addr) && |
| @@ -187,10 +185,9 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, | |||
| 187 | { | 185 | { |
| 188 | struct sock *sk = NULL; | 186 | struct sock *sk = NULL; |
| 189 | ax25_cb *s; | 187 | ax25_cb *s; |
| 190 | struct hlist_node *node; | ||
| 191 | 188 | ||
| 192 | spin_lock(&ax25_list_lock); | 189 | spin_lock(&ax25_list_lock); |
| 193 | ax25_for_each(s, node, &ax25_list) { | 190 | ax25_for_each(s, &ax25_list) { |
| 194 | if (s->sk && !ax25cmp(&s->source_addr, my_addr) && | 191 | if (s->sk && !ax25cmp(&s->source_addr, my_addr) && |
| 195 | !ax25cmp(&s->dest_addr, dest_addr) && | 192 | !ax25cmp(&s->dest_addr, dest_addr) && |
| 196 | s->sk->sk_type == type) { | 193 | s->sk->sk_type == type) { |
| @@ -213,10 +210,9 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr, | |||
| 213 | ax25_digi *digi, struct net_device *dev) | 210 | ax25_digi *digi, struct net_device *dev) |
| 214 | { | 211 | { |
| 215 | ax25_cb *s; | 212 | ax25_cb *s; |
| 216 | struct hlist_node *node; | ||
| 217 | 213 | ||
| 218 | spin_lock_bh(&ax25_list_lock); | 214 | spin_lock_bh(&ax25_list_lock); |
| 219 | ax25_for_each(s, node, &ax25_list) { | 215 | ax25_for_each(s, &ax25_list) { |
| 220 | if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) | 216 | if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) |
| 221 | continue; | 217 | continue; |
| 222 | if (s->ax25_dev == NULL) | 218 | if (s->ax25_dev == NULL) |
| @@ -248,10 +244,9 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) | |||
| 248 | { | 244 | { |
| 249 | ax25_cb *s; | 245 | ax25_cb *s; |
| 250 | struct sk_buff *copy; | 246 | struct sk_buff *copy; |
| 251 | struct hlist_node *node; | ||
| 252 | 247 | ||
| 253 | spin_lock(&ax25_list_lock); | 248 | spin_lock(&ax25_list_lock); |
| 254 | ax25_for_each(s, node, &ax25_list) { | 249 | ax25_for_each(s, &ax25_list) { |
| 255 | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && | 250 | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && |
| 256 | s->sk->sk_type == SOCK_RAW && | 251 | s->sk->sk_type == SOCK_RAW && |
| 257 | s->sk->sk_protocol == proto && | 252 | s->sk->sk_protocol == proto && |
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c index 5ea7fd3e2af9..e05bd57b5afd 100644 --- a/net/ax25/ax25_ds_subr.c +++ b/net/ax25/ax25_ds_subr.c | |||
| @@ -39,7 +39,6 @@ void ax25_ds_nr_error_recovery(ax25_cb *ax25) | |||
| 39 | void ax25_ds_enquiry_response(ax25_cb *ax25) | 39 | void ax25_ds_enquiry_response(ax25_cb *ax25) |
| 40 | { | 40 | { |
| 41 | ax25_cb *ax25o; | 41 | ax25_cb *ax25o; |
| 42 | struct hlist_node *node; | ||
| 43 | 42 | ||
| 44 | /* Please note that neither DK4EG's nor DG2FEF's | 43 | /* Please note that neither DK4EG's nor DG2FEF's |
| 45 | * DAMA spec mention the following behaviour as seen | 44 | * DAMA spec mention the following behaviour as seen |
| @@ -80,7 +79,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25) | |||
| 80 | ax25_ds_set_timer(ax25->ax25_dev); | 79 | ax25_ds_set_timer(ax25->ax25_dev); |
| 81 | 80 | ||
| 82 | spin_lock(&ax25_list_lock); | 81 | spin_lock(&ax25_list_lock); |
| 83 | ax25_for_each(ax25o, node, &ax25_list) { | 82 | ax25_for_each(ax25o, &ax25_list) { |
| 84 | if (ax25o == ax25) | 83 | if (ax25o == ax25) |
| 85 | continue; | 84 | continue; |
| 86 | 85 | ||
| @@ -159,10 +158,9 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev) | |||
| 159 | { | 158 | { |
| 160 | ax25_cb *ax25; | 159 | ax25_cb *ax25; |
| 161 | int res = 0; | 160 | int res = 0; |
| 162 | struct hlist_node *node; | ||
| 163 | 161 | ||
| 164 | spin_lock(&ax25_list_lock); | 162 | spin_lock(&ax25_list_lock); |
| 165 | ax25_for_each(ax25, node, &ax25_list) | 163 | ax25_for_each(ax25, &ax25_list) |
| 166 | if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { | 164 | if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { |
| 167 | res = 1; | 165 | res = 1; |
| 168 | break; | 166 | break; |
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 993c439b4f71..951cd57bb07d 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c | |||
| @@ -70,7 +70,6 @@ static void ax25_ds_timeout(unsigned long arg) | |||
| 70 | { | 70 | { |
| 71 | ax25_dev *ax25_dev = (struct ax25_dev *) arg; | 71 | ax25_dev *ax25_dev = (struct ax25_dev *) arg; |
| 72 | ax25_cb *ax25; | 72 | ax25_cb *ax25; |
| 73 | struct hlist_node *node; | ||
| 74 | 73 | ||
| 75 | if (ax25_dev == NULL || !ax25_dev->dama.slave) | 74 | if (ax25_dev == NULL || !ax25_dev->dama.slave) |
| 76 | return; /* Yikes! */ | 75 | return; /* Yikes! */ |
| @@ -81,7 +80,7 @@ static void ax25_ds_timeout(unsigned long arg) | |||
| 81 | } | 80 | } |
| 82 | 81 | ||
| 83 | spin_lock(&ax25_list_lock); | 82 | spin_lock(&ax25_list_lock); |
| 84 | ax25_for_each(ax25, node, &ax25_list) { | 83 | ax25_for_each(ax25, &ax25_list) { |
| 85 | if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) | 84 | if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) |
| 86 | continue; | 85 | continue; |
| 87 | 86 | ||
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index 7d5f24b82cc8..7f16e8a931b2 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c | |||
| @@ -193,10 +193,9 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev) | |||
| 193 | void ax25_link_failed(ax25_cb *ax25, int reason) | 193 | void ax25_link_failed(ax25_cb *ax25, int reason) |
| 194 | { | 194 | { |
| 195 | struct ax25_linkfail *lf; | 195 | struct ax25_linkfail *lf; |
| 196 | struct hlist_node *node; | ||
| 197 | 196 | ||
| 198 | spin_lock_bh(&linkfail_lock); | 197 | spin_lock_bh(&linkfail_lock); |
| 199 | hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node) | 198 | hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node) |
| 200 | lf->func(ax25, reason); | 199 | lf->func(ax25, reason); |
| 201 | spin_unlock_bh(&linkfail_lock); | 200 | spin_unlock_bh(&linkfail_lock); |
| 202 | } | 201 | } |
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c index 957999e43ff7..71c4badbc807 100644 --- a/net/ax25/ax25_uid.c +++ b/net/ax25/ax25_uid.c | |||
| @@ -54,10 +54,9 @@ EXPORT_SYMBOL(ax25_uid_policy); | |||
| 54 | ax25_uid_assoc *ax25_findbyuid(kuid_t uid) | 54 | ax25_uid_assoc *ax25_findbyuid(kuid_t uid) |
| 55 | { | 55 | { |
| 56 | ax25_uid_assoc *ax25_uid, *res = NULL; | 56 | ax25_uid_assoc *ax25_uid, *res = NULL; |
| 57 | struct hlist_node *node; | ||
| 58 | 57 | ||
| 59 | read_lock(&ax25_uid_lock); | 58 | read_lock(&ax25_uid_lock); |
| 60 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 59 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
| 61 | if (uid_eq(ax25_uid->uid, uid)) { | 60 | if (uid_eq(ax25_uid->uid, uid)) { |
| 62 | ax25_uid_hold(ax25_uid); | 61 | ax25_uid_hold(ax25_uid); |
| 63 | res = ax25_uid; | 62 | res = ax25_uid; |
| @@ -74,7 +73,6 @@ EXPORT_SYMBOL(ax25_findbyuid); | |||
| 74 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | 73 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) |
| 75 | { | 74 | { |
| 76 | ax25_uid_assoc *ax25_uid; | 75 | ax25_uid_assoc *ax25_uid; |
| 77 | struct hlist_node *node; | ||
| 78 | ax25_uid_assoc *user; | 76 | ax25_uid_assoc *user; |
| 79 | unsigned long res; | 77 | unsigned long res; |
| 80 | 78 | ||
| @@ -82,7 +80,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
| 82 | case SIOCAX25GETUID: | 80 | case SIOCAX25GETUID: |
| 83 | res = -ENOENT; | 81 | res = -ENOENT; |
| 84 | read_lock(&ax25_uid_lock); | 82 | read_lock(&ax25_uid_lock); |
| 85 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 83 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
| 86 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { | 84 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { |
| 87 | res = from_kuid_munged(current_user_ns(), ax25_uid->uid); | 85 | res = from_kuid_munged(current_user_ns(), ax25_uid->uid); |
| 88 | break; | 86 | break; |
| @@ -126,7 +124,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
| 126 | 124 | ||
| 127 | ax25_uid = NULL; | 125 | ax25_uid = NULL; |
| 128 | write_lock(&ax25_uid_lock); | 126 | write_lock(&ax25_uid_lock); |
| 129 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 127 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
| 130 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) | 128 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) |
| 131 | break; | 129 | break; |
| 132 | } | 130 | } |
| @@ -212,11 +210,10 @@ const struct file_operations ax25_uid_fops = { | |||
| 212 | void __exit ax25_uid_free(void) | 210 | void __exit ax25_uid_free(void) |
| 213 | { | 211 | { |
| 214 | ax25_uid_assoc *ax25_uid; | 212 | ax25_uid_assoc *ax25_uid; |
| 215 | struct hlist_node *node; | ||
| 216 | 213 | ||
| 217 | write_lock(&ax25_uid_lock); | 214 | write_lock(&ax25_uid_lock); |
| 218 | again: | 215 | again: |
| 219 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 216 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
| 220 | hlist_del_init(&ax25_uid->uid_node); | 217 | hlist_del_init(&ax25_uid->uid_node); |
| 221 | ax25_uid_put(ax25_uid); | 218 | ax25_uid_put(ax25_uid); |
| 222 | goto again; | 219 | goto again; |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 72fe1bbf7721..a0b253ecadaf 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
| @@ -487,7 +487,6 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, | |||
| 487 | */ | 487 | */ |
| 488 | struct batadv_forw_packet *forw_packet_aggr = NULL; | 488 | struct batadv_forw_packet *forw_packet_aggr = NULL; |
| 489 | struct batadv_forw_packet *forw_packet_pos = NULL; | 489 | struct batadv_forw_packet *forw_packet_pos = NULL; |
| 490 | struct hlist_node *tmp_node; | ||
| 491 | struct batadv_ogm_packet *batadv_ogm_packet; | 490 | struct batadv_ogm_packet *batadv_ogm_packet; |
| 492 | bool direct_link; | 491 | bool direct_link; |
| 493 | unsigned long max_aggregation_jiffies; | 492 | unsigned long max_aggregation_jiffies; |
| @@ -500,7 +499,7 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, | |||
| 500 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 499 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
| 501 | /* own packets are not to be aggregated */ | 500 | /* own packets are not to be aggregated */ |
| 502 | if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { | 501 | if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { |
| 503 | hlist_for_each_entry(forw_packet_pos, tmp_node, | 502 | hlist_for_each_entry(forw_packet_pos, |
| 504 | &bat_priv->forw_bat_list, list) { | 503 | &bat_priv->forw_bat_list, list) { |
| 505 | if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, | 504 | if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, |
| 506 | bat_priv, packet_len, | 505 | bat_priv, packet_len, |
| @@ -655,7 +654,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
| 655 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; | 654 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; |
| 656 | struct batadv_neigh_node *router = NULL; | 655 | struct batadv_neigh_node *router = NULL; |
| 657 | struct batadv_orig_node *orig_node_tmp; | 656 | struct batadv_orig_node *orig_node_tmp; |
| 658 | struct hlist_node *node; | ||
| 659 | int if_num; | 657 | int if_num; |
| 660 | uint8_t sum_orig, sum_neigh; | 658 | uint8_t sum_orig, sum_neigh; |
| 661 | uint8_t *neigh_addr; | 659 | uint8_t *neigh_addr; |
| @@ -665,7 +663,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
| 665 | "update_originator(): Searching and updating originator entry of received packet\n"); | 663 | "update_originator(): Searching and updating originator entry of received packet\n"); |
| 666 | 664 | ||
| 667 | rcu_read_lock(); | 665 | rcu_read_lock(); |
| 668 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 666 | hlist_for_each_entry_rcu(tmp_neigh_node, |
| 669 | &orig_node->neigh_list, list) { | 667 | &orig_node->neigh_list, list) { |
| 670 | neigh_addr = tmp_neigh_node->addr; | 668 | neigh_addr = tmp_neigh_node->addr; |
| 671 | if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && | 669 | if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && |
| @@ -801,7 +799,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
| 801 | { | 799 | { |
| 802 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 800 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
| 803 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; | 801 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; |
| 804 | struct hlist_node *node; | ||
| 805 | uint8_t total_count; | 802 | uint8_t total_count; |
| 806 | uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; | 803 | uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; |
| 807 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; | 804 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; |
| @@ -810,7 +807,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
| 810 | 807 | ||
| 811 | /* find corresponding one hop neighbor */ | 808 | /* find corresponding one hop neighbor */ |
| 812 | rcu_read_lock(); | 809 | rcu_read_lock(); |
| 813 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 810 | hlist_for_each_entry_rcu(tmp_neigh_node, |
| 814 | &orig_neigh_node->neigh_list, list) { | 811 | &orig_neigh_node->neigh_list, list) { |
| 815 | if (!batadv_compare_eth(tmp_neigh_node->addr, | 812 | if (!batadv_compare_eth(tmp_neigh_node->addr, |
| 816 | orig_neigh_node->orig)) | 813 | orig_neigh_node->orig)) |
| @@ -920,7 +917,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, | |||
| 920 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 917 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
| 921 | struct batadv_orig_node *orig_node; | 918 | struct batadv_orig_node *orig_node; |
| 922 | struct batadv_neigh_node *tmp_neigh_node; | 919 | struct batadv_neigh_node *tmp_neigh_node; |
| 923 | struct hlist_node *node; | ||
| 924 | int is_duplicate = 0; | 920 | int is_duplicate = 0; |
| 925 | int32_t seq_diff; | 921 | int32_t seq_diff; |
| 926 | int need_update = 0; | 922 | int need_update = 0; |
| @@ -943,7 +939,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, | |||
| 943 | goto out; | 939 | goto out; |
| 944 | 940 | ||
| 945 | rcu_read_lock(); | 941 | rcu_read_lock(); |
| 946 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 942 | hlist_for_each_entry_rcu(tmp_neigh_node, |
| 947 | &orig_node->neigh_list, list) { | 943 | &orig_node->neigh_list, list) { |
| 948 | is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, | 944 | is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, |
| 949 | orig_node->last_real_seqno, | 945 | orig_node->last_real_seqno, |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 30f46526cbbd..6a4f728680ae 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
| @@ -144,7 +144,6 @@ static struct batadv_bla_claim | |||
| 144 | { | 144 | { |
| 145 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; | 145 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; |
| 146 | struct hlist_head *head; | 146 | struct hlist_head *head; |
| 147 | struct hlist_node *node; | ||
| 148 | struct batadv_bla_claim *claim; | 147 | struct batadv_bla_claim *claim; |
| 149 | struct batadv_bla_claim *claim_tmp = NULL; | 148 | struct batadv_bla_claim *claim_tmp = NULL; |
| 150 | int index; | 149 | int index; |
| @@ -156,7 +155,7 @@ static struct batadv_bla_claim | |||
| 156 | head = &hash->table[index]; | 155 | head = &hash->table[index]; |
| 157 | 156 | ||
| 158 | rcu_read_lock(); | 157 | rcu_read_lock(); |
| 159 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 158 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
| 160 | if (!batadv_compare_claim(&claim->hash_entry, data)) | 159 | if (!batadv_compare_claim(&claim->hash_entry, data)) |
| 161 | continue; | 160 | continue; |
| 162 | 161 | ||
| @@ -185,7 +184,6 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, | |||
| 185 | { | 184 | { |
| 186 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; | 185 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
| 187 | struct hlist_head *head; | 186 | struct hlist_head *head; |
| 188 | struct hlist_node *node; | ||
| 189 | struct batadv_bla_backbone_gw search_entry, *backbone_gw; | 187 | struct batadv_bla_backbone_gw search_entry, *backbone_gw; |
| 190 | struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; | 188 | struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; |
| 191 | int index; | 189 | int index; |
| @@ -200,7 +198,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, | |||
| 200 | head = &hash->table[index]; | 198 | head = &hash->table[index]; |
| 201 | 199 | ||
| 202 | rcu_read_lock(); | 200 | rcu_read_lock(); |
| 203 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 201 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
| 204 | if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, | 202 | if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, |
| 205 | &search_entry)) | 203 | &search_entry)) |
| 206 | continue; | 204 | continue; |
| @@ -221,7 +219,7 @@ static void | |||
| 221 | batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) | 219 | batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) |
| 222 | { | 220 | { |
| 223 | struct batadv_hashtable *hash; | 221 | struct batadv_hashtable *hash; |
| 224 | struct hlist_node *node, *node_tmp; | 222 | struct hlist_node *node_tmp; |
| 225 | struct hlist_head *head; | 223 | struct hlist_head *head; |
| 226 | struct batadv_bla_claim *claim; | 224 | struct batadv_bla_claim *claim; |
| 227 | int i; | 225 | int i; |
| @@ -236,13 +234,13 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) | |||
| 236 | list_lock = &hash->list_locks[i]; | 234 | list_lock = &hash->list_locks[i]; |
| 237 | 235 | ||
| 238 | spin_lock_bh(list_lock); | 236 | spin_lock_bh(list_lock); |
| 239 | hlist_for_each_entry_safe(claim, node, node_tmp, | 237 | hlist_for_each_entry_safe(claim, node_tmp, |
| 240 | head, hash_entry) { | 238 | head, hash_entry) { |
| 241 | if (claim->backbone_gw != backbone_gw) | 239 | if (claim->backbone_gw != backbone_gw) |
| 242 | continue; | 240 | continue; |
| 243 | 241 | ||
| 244 | batadv_claim_free_ref(claim); | 242 | batadv_claim_free_ref(claim); |
| 245 | hlist_del_rcu(node); | 243 | hlist_del_rcu(&claim->hash_entry); |
| 246 | } | 244 | } |
| 247 | spin_unlock_bh(list_lock); | 245 | spin_unlock_bh(list_lock); |
| 248 | } | 246 | } |
| @@ -460,7 +458,6 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, | |||
| 460 | struct batadv_hard_iface *primary_if, | 458 | struct batadv_hard_iface *primary_if, |
| 461 | short vid) | 459 | short vid) |
| 462 | { | 460 | { |
| 463 | struct hlist_node *node; | ||
| 464 | struct hlist_head *head; | 461 | struct hlist_head *head; |
| 465 | struct batadv_hashtable *hash; | 462 | struct batadv_hashtable *hash; |
| 466 | struct batadv_bla_claim *claim; | 463 | struct batadv_bla_claim *claim; |
| @@ -481,7 +478,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, | |||
| 481 | head = &hash->table[i]; | 478 | head = &hash->table[i]; |
| 482 | 479 | ||
| 483 | rcu_read_lock(); | 480 | rcu_read_lock(); |
| 484 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 481 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
| 485 | /* only own claims are interesting */ | 482 | /* only own claims are interesting */ |
| 486 | if (claim->backbone_gw != backbone_gw) | 483 | if (claim->backbone_gw != backbone_gw) |
| 487 | continue; | 484 | continue; |
| @@ -958,7 +955,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, | |||
| 958 | static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) | 955 | static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) |
| 959 | { | 956 | { |
| 960 | struct batadv_bla_backbone_gw *backbone_gw; | 957 | struct batadv_bla_backbone_gw *backbone_gw; |
| 961 | struct hlist_node *node, *node_tmp; | 958 | struct hlist_node *node_tmp; |
| 962 | struct hlist_head *head; | 959 | struct hlist_head *head; |
| 963 | struct batadv_hashtable *hash; | 960 | struct batadv_hashtable *hash; |
| 964 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 961 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| @@ -973,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) | |||
| 973 | list_lock = &hash->list_locks[i]; | 970 | list_lock = &hash->list_locks[i]; |
| 974 | 971 | ||
| 975 | spin_lock_bh(list_lock); | 972 | spin_lock_bh(list_lock); |
| 976 | hlist_for_each_entry_safe(backbone_gw, node, node_tmp, | 973 | hlist_for_each_entry_safe(backbone_gw, node_tmp, |
| 977 | head, hash_entry) { | 974 | head, hash_entry) { |
| 978 | if (now) | 975 | if (now) |
| 979 | goto purge_now; | 976 | goto purge_now; |
| @@ -992,7 +989,7 @@ purge_now: | |||
| 992 | 989 | ||
| 993 | batadv_bla_del_backbone_claims(backbone_gw); | 990 | batadv_bla_del_backbone_claims(backbone_gw); |
| 994 | 991 | ||
| 995 | hlist_del_rcu(node); | 992 | hlist_del_rcu(&backbone_gw->hash_entry); |
| 996 | batadv_backbone_gw_free_ref(backbone_gw); | 993 | batadv_backbone_gw_free_ref(backbone_gw); |
| 997 | } | 994 | } |
| 998 | spin_unlock_bh(list_lock); | 995 | spin_unlock_bh(list_lock); |
| @@ -1013,7 +1010,6 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, | |||
| 1013 | int now) | 1010 | int now) |
| 1014 | { | 1011 | { |
| 1015 | struct batadv_bla_claim *claim; | 1012 | struct batadv_bla_claim *claim; |
| 1016 | struct hlist_node *node; | ||
| 1017 | struct hlist_head *head; | 1013 | struct hlist_head *head; |
| 1018 | struct batadv_hashtable *hash; | 1014 | struct batadv_hashtable *hash; |
| 1019 | int i; | 1015 | int i; |
| @@ -1026,7 +1022,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, | |||
| 1026 | head = &hash->table[i]; | 1022 | head = &hash->table[i]; |
| 1027 | 1023 | ||
| 1028 | rcu_read_lock(); | 1024 | rcu_read_lock(); |
| 1029 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 1025 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
| 1030 | if (now) | 1026 | if (now) |
| 1031 | goto purge_now; | 1027 | goto purge_now; |
| 1032 | if (!batadv_compare_eth(claim->backbone_gw->orig, | 1028 | if (!batadv_compare_eth(claim->backbone_gw->orig, |
| @@ -1062,7 +1058,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | |||
| 1062 | struct batadv_hard_iface *oldif) | 1058 | struct batadv_hard_iface *oldif) |
| 1063 | { | 1059 | { |
| 1064 | struct batadv_bla_backbone_gw *backbone_gw; | 1060 | struct batadv_bla_backbone_gw *backbone_gw; |
| 1065 | struct hlist_node *node; | ||
| 1066 | struct hlist_head *head; | 1061 | struct hlist_head *head; |
| 1067 | struct batadv_hashtable *hash; | 1062 | struct batadv_hashtable *hash; |
| 1068 | __be16 group; | 1063 | __be16 group; |
| @@ -1086,7 +1081,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | |||
| 1086 | head = &hash->table[i]; | 1081 | head = &hash->table[i]; |
| 1087 | 1082 | ||
| 1088 | rcu_read_lock(); | 1083 | rcu_read_lock(); |
| 1089 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1084 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
| 1090 | /* own orig still holds the old value. */ | 1085 | /* own orig still holds the old value. */ |
| 1091 | if (!batadv_compare_eth(backbone_gw->orig, | 1086 | if (!batadv_compare_eth(backbone_gw->orig, |
| 1092 | oldif->net_dev->dev_addr)) | 1087 | oldif->net_dev->dev_addr)) |
| @@ -1112,7 +1107,6 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
| 1112 | struct delayed_work *delayed_work; | 1107 | struct delayed_work *delayed_work; |
| 1113 | struct batadv_priv *bat_priv; | 1108 | struct batadv_priv *bat_priv; |
| 1114 | struct batadv_priv_bla *priv_bla; | 1109 | struct batadv_priv_bla *priv_bla; |
| 1115 | struct hlist_node *node; | ||
| 1116 | struct hlist_head *head; | 1110 | struct hlist_head *head; |
| 1117 | struct batadv_bla_backbone_gw *backbone_gw; | 1111 | struct batadv_bla_backbone_gw *backbone_gw; |
| 1118 | struct batadv_hashtable *hash; | 1112 | struct batadv_hashtable *hash; |
| @@ -1140,7 +1134,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
| 1140 | head = &hash->table[i]; | 1134 | head = &hash->table[i]; |
| 1141 | 1135 | ||
| 1142 | rcu_read_lock(); | 1136 | rcu_read_lock(); |
| 1143 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1137 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
| 1144 | if (!batadv_compare_eth(backbone_gw->orig, | 1138 | if (!batadv_compare_eth(backbone_gw->orig, |
| 1145 | primary_if->net_dev->dev_addr)) | 1139 | primary_if->net_dev->dev_addr)) |
| 1146 | continue; | 1140 | continue; |
| @@ -1322,7 +1316,6 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) | |||
| 1322 | { | 1316 | { |
| 1323 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; | 1317 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
| 1324 | struct hlist_head *head; | 1318 | struct hlist_head *head; |
| 1325 | struct hlist_node *node; | ||
| 1326 | struct batadv_bla_backbone_gw *backbone_gw; | 1319 | struct batadv_bla_backbone_gw *backbone_gw; |
| 1327 | int i; | 1320 | int i; |
| 1328 | 1321 | ||
| @@ -1336,7 +1329,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) | |||
| 1336 | head = &hash->table[i]; | 1329 | head = &hash->table[i]; |
| 1337 | 1330 | ||
| 1338 | rcu_read_lock(); | 1331 | rcu_read_lock(); |
| 1339 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1332 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
| 1340 | if (batadv_compare_eth(backbone_gw->orig, orig)) { | 1333 | if (batadv_compare_eth(backbone_gw->orig, orig)) { |
| 1341 | rcu_read_unlock(); | 1334 | rcu_read_unlock(); |
| 1342 | return 1; | 1335 | return 1; |
| @@ -1607,7 +1600,6 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
| 1607 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; | 1600 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; |
| 1608 | struct batadv_bla_claim *claim; | 1601 | struct batadv_bla_claim *claim; |
| 1609 | struct batadv_hard_iface *primary_if; | 1602 | struct batadv_hard_iface *primary_if; |
| 1610 | struct hlist_node *node; | ||
| 1611 | struct hlist_head *head; | 1603 | struct hlist_head *head; |
| 1612 | uint32_t i; | 1604 | uint32_t i; |
| 1613 | bool is_own; | 1605 | bool is_own; |
| @@ -1628,7 +1620,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
| 1628 | head = &hash->table[i]; | 1620 | head = &hash->table[i]; |
| 1629 | 1621 | ||
| 1630 | rcu_read_lock(); | 1622 | rcu_read_lock(); |
| 1631 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 1623 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
| 1632 | is_own = batadv_compare_eth(claim->backbone_gw->orig, | 1624 | is_own = batadv_compare_eth(claim->backbone_gw->orig, |
| 1633 | primary_addr); | 1625 | primary_addr); |
| 1634 | seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", | 1626 | seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", |
| @@ -1652,7 +1644,6 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
| 1652 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; | 1644 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
| 1653 | struct batadv_bla_backbone_gw *backbone_gw; | 1645 | struct batadv_bla_backbone_gw *backbone_gw; |
| 1654 | struct batadv_hard_iface *primary_if; | 1646 | struct batadv_hard_iface *primary_if; |
| 1655 | struct hlist_node *node; | ||
| 1656 | struct hlist_head *head; | 1647 | struct hlist_head *head; |
| 1657 | int secs, msecs; | 1648 | int secs, msecs; |
| 1658 | uint32_t i; | 1649 | uint32_t i; |
| @@ -1674,7 +1665,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
| 1674 | head = &hash->table[i]; | 1665 | head = &hash->table[i]; |
| 1675 | 1666 | ||
| 1676 | rcu_read_lock(); | 1667 | rcu_read_lock(); |
| 1677 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1668 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
| 1678 | msecs = jiffies_to_msecs(jiffies - | 1669 | msecs = jiffies_to_msecs(jiffies - |
| 1679 | backbone_gw->lasttime); | 1670 | backbone_gw->lasttime); |
| 1680 | secs = msecs / 1000; | 1671 | secs = msecs / 1000; |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 761a59002e34..d54188a112ea 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
| @@ -83,7 +83,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, | |||
| 83 | { | 83 | { |
| 84 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 84 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| 85 | struct batadv_dat_entry *dat_entry; | 85 | struct batadv_dat_entry *dat_entry; |
| 86 | struct hlist_node *node, *node_tmp; | 86 | struct hlist_node *node_tmp; |
| 87 | struct hlist_head *head; | 87 | struct hlist_head *head; |
| 88 | uint32_t i; | 88 | uint32_t i; |
| 89 | 89 | ||
| @@ -95,7 +95,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, | |||
| 95 | list_lock = &bat_priv->dat.hash->list_locks[i]; | 95 | list_lock = &bat_priv->dat.hash->list_locks[i]; |
| 96 | 96 | ||
| 97 | spin_lock_bh(list_lock); | 97 | spin_lock_bh(list_lock); |
| 98 | hlist_for_each_entry_safe(dat_entry, node, node_tmp, head, | 98 | hlist_for_each_entry_safe(dat_entry, node_tmp, head, |
| 99 | hash_entry) { | 99 | hash_entry) { |
| 100 | /* if an helper function has been passed as parameter, | 100 | /* if an helper function has been passed as parameter, |
| 101 | * ask it if the entry has to be purged or not | 101 | * ask it if the entry has to be purged or not |
| @@ -103,7 +103,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, | |||
| 103 | if (to_purge && !to_purge(dat_entry)) | 103 | if (to_purge && !to_purge(dat_entry)) |
| 104 | continue; | 104 | continue; |
| 105 | 105 | ||
| 106 | hlist_del_rcu(node); | 106 | hlist_del_rcu(&dat_entry->hash_entry); |
| 107 | batadv_dat_entry_free_ref(dat_entry); | 107 | batadv_dat_entry_free_ref(dat_entry); |
| 108 | } | 108 | } |
| 109 | spin_unlock_bh(list_lock); | 109 | spin_unlock_bh(list_lock); |
| @@ -235,7 +235,6 @@ static struct batadv_dat_entry * | |||
| 235 | batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) | 235 | batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) |
| 236 | { | 236 | { |
| 237 | struct hlist_head *head; | 237 | struct hlist_head *head; |
| 238 | struct hlist_node *node; | ||
| 239 | struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; | 238 | struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; |
| 240 | struct batadv_hashtable *hash = bat_priv->dat.hash; | 239 | struct batadv_hashtable *hash = bat_priv->dat.hash; |
| 241 | uint32_t index; | 240 | uint32_t index; |
| @@ -247,7 +246,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) | |||
| 247 | head = &hash->table[index]; | 246 | head = &hash->table[index]; |
| 248 | 247 | ||
| 249 | rcu_read_lock(); | 248 | rcu_read_lock(); |
| 250 | hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { | 249 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { |
| 251 | if (dat_entry->ip != ip) | 250 | if (dat_entry->ip != ip) |
| 252 | continue; | 251 | continue; |
| 253 | 252 | ||
| @@ -465,7 +464,6 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | |||
| 465 | batadv_dat_addr_t max = 0, tmp_max = 0; | 464 | batadv_dat_addr_t max = 0, tmp_max = 0; |
| 466 | struct batadv_orig_node *orig_node, *max_orig_node = NULL; | 465 | struct batadv_orig_node *orig_node, *max_orig_node = NULL; |
| 467 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 466 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 468 | struct hlist_node *node; | ||
| 469 | struct hlist_head *head; | 467 | struct hlist_head *head; |
| 470 | int i; | 468 | int i; |
| 471 | 469 | ||
| @@ -481,7 +479,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | |||
| 481 | head = &hash->table[i]; | 479 | head = &hash->table[i]; |
| 482 | 480 | ||
| 483 | rcu_read_lock(); | 481 | rcu_read_lock(); |
| 484 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 482 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 485 | /* the dht space is a ring and addresses are unsigned */ | 483 | /* the dht space is a ring and addresses are unsigned */ |
| 486 | tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + | 484 | tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + |
| 487 | ip_key; | 485 | ip_key; |
| @@ -686,7 +684,6 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) | |||
| 686 | struct batadv_hashtable *hash = bat_priv->dat.hash; | 684 | struct batadv_hashtable *hash = bat_priv->dat.hash; |
| 687 | struct batadv_dat_entry *dat_entry; | 685 | struct batadv_dat_entry *dat_entry; |
| 688 | struct batadv_hard_iface *primary_if; | 686 | struct batadv_hard_iface *primary_if; |
| 689 | struct hlist_node *node; | ||
| 690 | struct hlist_head *head; | 687 | struct hlist_head *head; |
| 691 | unsigned long last_seen_jiffies; | 688 | unsigned long last_seen_jiffies; |
| 692 | int last_seen_msecs, last_seen_secs, last_seen_mins; | 689 | int last_seen_msecs, last_seen_secs, last_seen_mins; |
| @@ -704,7 +701,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) | |||
| 704 | head = &hash->table[i]; | 701 | head = &hash->table[i]; |
| 705 | 702 | ||
| 706 | rcu_read_lock(); | 703 | rcu_read_lock(); |
| 707 | hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { | 704 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { |
| 708 | last_seen_jiffies = jiffies - dat_entry->last_update; | 705 | last_seen_jiffies = jiffies - dat_entry->last_update; |
| 709 | last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); | 706 | last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); |
| 710 | last_seen_mins = last_seen_msecs / 60000; | 707 | last_seen_mins = last_seen_msecs / 60000; |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 074107f2cfaa..34f99a46ec1d 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
| @@ -114,7 +114,6 @@ static struct batadv_gw_node * | |||
| 114 | batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | 114 | batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) |
| 115 | { | 115 | { |
| 116 | struct batadv_neigh_node *router; | 116 | struct batadv_neigh_node *router; |
| 117 | struct hlist_node *node; | ||
| 118 | struct batadv_gw_node *gw_node, *curr_gw = NULL; | 117 | struct batadv_gw_node *gw_node, *curr_gw = NULL; |
| 119 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; | 118 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; |
| 120 | uint32_t gw_divisor; | 119 | uint32_t gw_divisor; |
| @@ -127,7 +126,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
| 127 | gw_divisor *= 64; | 126 | gw_divisor *= 64; |
| 128 | 127 | ||
| 129 | rcu_read_lock(); | 128 | rcu_read_lock(); |
| 130 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { | 129 | hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { |
| 131 | if (gw_node->deleted) | 130 | if (gw_node->deleted) |
| 132 | continue; | 131 | continue; |
| 133 | 132 | ||
| @@ -344,7 +343,6 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
| 344 | struct batadv_orig_node *orig_node, | 343 | struct batadv_orig_node *orig_node, |
| 345 | uint8_t new_gwflags) | 344 | uint8_t new_gwflags) |
| 346 | { | 345 | { |
| 347 | struct hlist_node *node; | ||
| 348 | struct batadv_gw_node *gw_node, *curr_gw; | 346 | struct batadv_gw_node *gw_node, *curr_gw; |
| 349 | 347 | ||
| 350 | /* Note: We don't need a NULL check here, since curr_gw never gets | 348 | /* Note: We don't need a NULL check here, since curr_gw never gets |
| @@ -355,7 +353,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
| 355 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 353 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
| 356 | 354 | ||
| 357 | rcu_read_lock(); | 355 | rcu_read_lock(); |
| 358 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { | 356 | hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { |
| 359 | if (gw_node->orig_node != orig_node) | 357 | if (gw_node->orig_node != orig_node) |
| 360 | continue; | 358 | continue; |
| 361 | 359 | ||
| @@ -403,7 +401,7 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, | |||
| 403 | void batadv_gw_node_purge(struct batadv_priv *bat_priv) | 401 | void batadv_gw_node_purge(struct batadv_priv *bat_priv) |
| 404 | { | 402 | { |
| 405 | struct batadv_gw_node *gw_node, *curr_gw; | 403 | struct batadv_gw_node *gw_node, *curr_gw; |
| 406 | struct hlist_node *node, *node_tmp; | 404 | struct hlist_node *node_tmp; |
| 407 | unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); | 405 | unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); |
| 408 | int do_deselect = 0; | 406 | int do_deselect = 0; |
| 409 | 407 | ||
| @@ -411,7 +409,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
| 411 | 409 | ||
| 412 | spin_lock_bh(&bat_priv->gw.list_lock); | 410 | spin_lock_bh(&bat_priv->gw.list_lock); |
| 413 | 411 | ||
| 414 | hlist_for_each_entry_safe(gw_node, node, node_tmp, | 412 | hlist_for_each_entry_safe(gw_node, node_tmp, |
| 415 | &bat_priv->gw.list, list) { | 413 | &bat_priv->gw.list, list) { |
| 416 | if (((!gw_node->deleted) || | 414 | if (((!gw_node->deleted) || |
| 417 | (time_before(jiffies, gw_node->deleted + timeout))) && | 415 | (time_before(jiffies, gw_node->deleted + timeout))) && |
| @@ -476,7 +474,6 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
| 476 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 474 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
| 477 | struct batadv_hard_iface *primary_if; | 475 | struct batadv_hard_iface *primary_if; |
| 478 | struct batadv_gw_node *gw_node; | 476 | struct batadv_gw_node *gw_node; |
| 479 | struct hlist_node *node; | ||
| 480 | int gw_count = 0; | 477 | int gw_count = 0; |
| 481 | 478 | ||
| 482 | primary_if = batadv_seq_print_text_primary_if_get(seq); | 479 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
| @@ -490,7 +487,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
| 490 | primary_if->net_dev->dev_addr, net_dev->name); | 487 | primary_if->net_dev->dev_addr, net_dev->name); |
| 491 | 488 | ||
| 492 | rcu_read_lock(); | 489 | rcu_read_lock(); |
| 493 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { | 490 | hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { |
| 494 | if (gw_node->deleted) | 491 | if (gw_node->deleted) |
| 495 | continue; | 492 | continue; |
| 496 | 493 | ||
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 21fe6987733b..0488d70c8c35 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
| @@ -345,9 +345,8 @@ void batadv_recv_handler_unregister(uint8_t packet_type) | |||
| 345 | static struct batadv_algo_ops *batadv_algo_get(char *name) | 345 | static struct batadv_algo_ops *batadv_algo_get(char *name) |
| 346 | { | 346 | { |
| 347 | struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; | 347 | struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; |
| 348 | struct hlist_node *node; | ||
| 349 | 348 | ||
| 350 | hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) { | 349 | hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) { |
| 351 | if (strcmp(bat_algo_ops_tmp->name, name) != 0) | 350 | if (strcmp(bat_algo_ops_tmp->name, name) != 0) |
| 352 | continue; | 351 | continue; |
| 353 | 352 | ||
| @@ -411,11 +410,10 @@ out: | |||
| 411 | int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) | 410 | int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) |
| 412 | { | 411 | { |
| 413 | struct batadv_algo_ops *bat_algo_ops; | 412 | struct batadv_algo_ops *bat_algo_ops; |
| 414 | struct hlist_node *node; | ||
| 415 | 413 | ||
| 416 | seq_printf(seq, "Available routing algorithms:\n"); | 414 | seq_printf(seq, "Available routing algorithms:\n"); |
| 417 | 415 | ||
| 418 | hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) { | 416 | hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) { |
| 419 | seq_printf(seq, "%s\n", bat_algo_ops->name); | 417 | seq_printf(seq, "%s\n", bat_algo_ops->name); |
| 420 | } | 418 | } |
| 421 | 419 | ||
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 457ea445217c..96fb80b724dc 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
| @@ -118,7 +118,7 @@ out: | |||
| 118 | 118 | ||
| 119 | static void batadv_orig_node_free_rcu(struct rcu_head *rcu) | 119 | static void batadv_orig_node_free_rcu(struct rcu_head *rcu) |
| 120 | { | 120 | { |
| 121 | struct hlist_node *node, *node_tmp; | 121 | struct hlist_node *node_tmp; |
| 122 | struct batadv_neigh_node *neigh_node, *tmp_neigh_node; | 122 | struct batadv_neigh_node *neigh_node, *tmp_neigh_node; |
| 123 | struct batadv_orig_node *orig_node; | 123 | struct batadv_orig_node *orig_node; |
| 124 | 124 | ||
| @@ -134,7 +134,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) | |||
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | /* for all neighbors towards this originator ... */ | 136 | /* for all neighbors towards this originator ... */ |
| 137 | hlist_for_each_entry_safe(neigh_node, node, node_tmp, | 137 | hlist_for_each_entry_safe(neigh_node, node_tmp, |
| 138 | &orig_node->neigh_list, list) { | 138 | &orig_node->neigh_list, list) { |
| 139 | hlist_del_rcu(&neigh_node->list); | 139 | hlist_del_rcu(&neigh_node->list); |
| 140 | batadv_neigh_node_free_ref(neigh_node); | 140 | batadv_neigh_node_free_ref(neigh_node); |
| @@ -161,7 +161,7 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) | |||
| 161 | void batadv_originator_free(struct batadv_priv *bat_priv) | 161 | void batadv_originator_free(struct batadv_priv *bat_priv) |
| 162 | { | 162 | { |
| 163 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 163 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 164 | struct hlist_node *node, *node_tmp; | 164 | struct hlist_node *node_tmp; |
| 165 | struct hlist_head *head; | 165 | struct hlist_head *head; |
| 166 | spinlock_t *list_lock; /* spinlock to protect write access */ | 166 | spinlock_t *list_lock; /* spinlock to protect write access */ |
| 167 | struct batadv_orig_node *orig_node; | 167 | struct batadv_orig_node *orig_node; |
| @@ -179,9 +179,9 @@ void batadv_originator_free(struct batadv_priv *bat_priv) | |||
| 179 | list_lock = &hash->list_locks[i]; | 179 | list_lock = &hash->list_locks[i]; |
| 180 | 180 | ||
| 181 | spin_lock_bh(list_lock); | 181 | spin_lock_bh(list_lock); |
| 182 | hlist_for_each_entry_safe(orig_node, node, node_tmp, | 182 | hlist_for_each_entry_safe(orig_node, node_tmp, |
| 183 | head, hash_entry) { | 183 | head, hash_entry) { |
| 184 | hlist_del_rcu(node); | 184 | hlist_del_rcu(&orig_node->hash_entry); |
| 185 | batadv_orig_node_free_ref(orig_node); | 185 | batadv_orig_node_free_ref(orig_node); |
| 186 | } | 186 | } |
| 187 | spin_unlock_bh(list_lock); | 187 | spin_unlock_bh(list_lock); |
| @@ -274,7 +274,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, | |||
| 274 | struct batadv_orig_node *orig_node, | 274 | struct batadv_orig_node *orig_node, |
| 275 | struct batadv_neigh_node **best_neigh_node) | 275 | struct batadv_neigh_node **best_neigh_node) |
| 276 | { | 276 | { |
| 277 | struct hlist_node *node, *node_tmp; | 277 | struct hlist_node *node_tmp; |
| 278 | struct batadv_neigh_node *neigh_node; | 278 | struct batadv_neigh_node *neigh_node; |
| 279 | bool neigh_purged = false; | 279 | bool neigh_purged = false; |
| 280 | unsigned long last_seen; | 280 | unsigned long last_seen; |
| @@ -285,7 +285,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, | |||
| 285 | spin_lock_bh(&orig_node->neigh_list_lock); | 285 | spin_lock_bh(&orig_node->neigh_list_lock); |
| 286 | 286 | ||
| 287 | /* for all neighbors towards this originator ... */ | 287 | /* for all neighbors towards this originator ... */ |
| 288 | hlist_for_each_entry_safe(neigh_node, node, node_tmp, | 288 | hlist_for_each_entry_safe(neigh_node, node_tmp, |
| 289 | &orig_node->neigh_list, list) { | 289 | &orig_node->neigh_list, list) { |
| 290 | last_seen = neigh_node->last_seen; | 290 | last_seen = neigh_node->last_seen; |
| 291 | if_incoming = neigh_node->if_incoming; | 291 | if_incoming = neigh_node->if_incoming; |
| @@ -348,7 +348,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, | |||
| 348 | static void _batadv_purge_orig(struct batadv_priv *bat_priv) | 348 | static void _batadv_purge_orig(struct batadv_priv *bat_priv) |
| 349 | { | 349 | { |
| 350 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 350 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 351 | struct hlist_node *node, *node_tmp; | 351 | struct hlist_node *node_tmp; |
| 352 | struct hlist_head *head; | 352 | struct hlist_head *head; |
| 353 | spinlock_t *list_lock; /* spinlock to protect write access */ | 353 | spinlock_t *list_lock; /* spinlock to protect write access */ |
| 354 | struct batadv_orig_node *orig_node; | 354 | struct batadv_orig_node *orig_node; |
| @@ -363,13 +363,13 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) | |||
| 363 | list_lock = &hash->list_locks[i]; | 363 | list_lock = &hash->list_locks[i]; |
| 364 | 364 | ||
| 365 | spin_lock_bh(list_lock); | 365 | spin_lock_bh(list_lock); |
| 366 | hlist_for_each_entry_safe(orig_node, node, node_tmp, | 366 | hlist_for_each_entry_safe(orig_node, node_tmp, |
| 367 | head, hash_entry) { | 367 | head, hash_entry) { |
| 368 | if (batadv_purge_orig_node(bat_priv, orig_node)) { | 368 | if (batadv_purge_orig_node(bat_priv, orig_node)) { |
| 369 | if (orig_node->gw_flags) | 369 | if (orig_node->gw_flags) |
| 370 | batadv_gw_node_delete(bat_priv, | 370 | batadv_gw_node_delete(bat_priv, |
| 371 | orig_node); | 371 | orig_node); |
| 372 | hlist_del_rcu(node); | 372 | hlist_del_rcu(&orig_node->hash_entry); |
| 373 | batadv_orig_node_free_ref(orig_node); | 373 | batadv_orig_node_free_ref(orig_node); |
| 374 | continue; | 374 | continue; |
| 375 | } | 375 | } |
| @@ -408,7 +408,6 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) | |||
| 408 | struct net_device *net_dev = (struct net_device *)seq->private; | 408 | struct net_device *net_dev = (struct net_device *)seq->private; |
| 409 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 409 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
| 410 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 410 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 411 | struct hlist_node *node, *node_tmp; | ||
| 412 | struct hlist_head *head; | 411 | struct hlist_head *head; |
| 413 | struct batadv_hard_iface *primary_if; | 412 | struct batadv_hard_iface *primary_if; |
| 414 | struct batadv_orig_node *orig_node; | 413 | struct batadv_orig_node *orig_node; |
| @@ -434,7 +433,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) | |||
| 434 | head = &hash->table[i]; | 433 | head = &hash->table[i]; |
| 435 | 434 | ||
| 436 | rcu_read_lock(); | 435 | rcu_read_lock(); |
| 437 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 436 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 438 | neigh_node = batadv_orig_node_get_router(orig_node); | 437 | neigh_node = batadv_orig_node_get_router(orig_node); |
| 439 | if (!neigh_node) | 438 | if (!neigh_node) |
| 440 | continue; | 439 | continue; |
| @@ -453,7 +452,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) | |||
| 453 | neigh_node->addr, | 452 | neigh_node->addr, |
| 454 | neigh_node->if_incoming->net_dev->name); | 453 | neigh_node->if_incoming->net_dev->name); |
| 455 | 454 | ||
| 456 | hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, | 455 | hlist_for_each_entry_rcu(neigh_node_tmp, |
| 457 | &orig_node->neigh_list, list) { | 456 | &orig_node->neigh_list, list) { |
| 458 | seq_printf(seq, " %pM (%3i)", | 457 | seq_printf(seq, " %pM (%3i)", |
| 459 | neigh_node_tmp->addr, | 458 | neigh_node_tmp->addr, |
| @@ -511,7 +510,6 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | |||
| 511 | { | 510 | { |
| 512 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 511 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
| 513 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 512 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 514 | struct hlist_node *node; | ||
| 515 | struct hlist_head *head; | 513 | struct hlist_head *head; |
| 516 | struct batadv_orig_node *orig_node; | 514 | struct batadv_orig_node *orig_node; |
| 517 | uint32_t i; | 515 | uint32_t i; |
| @@ -524,7 +522,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | |||
| 524 | head = &hash->table[i]; | 522 | head = &hash->table[i]; |
| 525 | 523 | ||
| 526 | rcu_read_lock(); | 524 | rcu_read_lock(); |
| 527 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 525 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 528 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 526 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
| 529 | ret = batadv_orig_node_add_if(orig_node, max_if_num); | 527 | ret = batadv_orig_node_add_if(orig_node, max_if_num); |
| 530 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | 528 | spin_unlock_bh(&orig_node->ogm_cnt_lock); |
| @@ -595,7 +593,6 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | |||
| 595 | { | 593 | { |
| 596 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 594 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
| 597 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 595 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 598 | struct hlist_node *node; | ||
| 599 | struct hlist_head *head; | 596 | struct hlist_head *head; |
| 600 | struct batadv_hard_iface *hard_iface_tmp; | 597 | struct batadv_hard_iface *hard_iface_tmp; |
| 601 | struct batadv_orig_node *orig_node; | 598 | struct batadv_orig_node *orig_node; |
| @@ -609,7 +606,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | |||
| 609 | head = &hash->table[i]; | 606 | head = &hash->table[i]; |
| 610 | 607 | ||
| 611 | rcu_read_lock(); | 608 | rcu_read_lock(); |
| 612 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 609 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 613 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 610 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
| 614 | ret = batadv_orig_node_del_if(orig_node, max_if_num, | 611 | ret = batadv_orig_node_del_if(orig_node, max_if_num, |
| 615 | hard_iface->if_num); | 612 | hard_iface->if_num); |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 286bf743e76a..7df48fa7669d 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
| @@ -68,7 +68,6 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
| 68 | { | 68 | { |
| 69 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 69 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 70 | struct hlist_head *head; | 70 | struct hlist_head *head; |
| 71 | struct hlist_node *node; | ||
| 72 | struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; | 71 | struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; |
| 73 | int index; | 72 | int index; |
| 74 | 73 | ||
| @@ -79,7 +78,7 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
| 79 | head = &hash->table[index]; | 78 | head = &hash->table[index]; |
| 80 | 79 | ||
| 81 | rcu_read_lock(); | 80 | rcu_read_lock(); |
| 82 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 81 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 83 | if (!batadv_compare_eth(orig_node, data)) | 82 | if (!batadv_compare_eth(orig_node, data)) |
| 84 | continue; | 83 | continue; |
| 85 | 84 | ||
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 60ba03fc8390..5ee21cebbbb0 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
| @@ -37,7 +37,6 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) | |||
| 37 | { | 37 | { |
| 38 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 38 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
| 39 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 39 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 40 | struct hlist_node *node; | ||
| 41 | struct hlist_head *head; | 40 | struct hlist_head *head; |
| 42 | struct batadv_orig_node *orig_node; | 41 | struct batadv_orig_node *orig_node; |
| 43 | unsigned long *word; | 42 | unsigned long *word; |
| @@ -49,7 +48,7 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) | |||
| 49 | head = &hash->table[i]; | 48 | head = &hash->table[i]; |
| 50 | 49 | ||
| 51 | rcu_read_lock(); | 50 | rcu_read_lock(); |
| 52 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 51 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 53 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 52 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
| 54 | word_index = hard_iface->if_num * BATADV_NUM_WORDS; | 53 | word_index = hard_iface->if_num * BATADV_NUM_WORDS; |
| 55 | word = &(orig_node->bcast_own[word_index]); | 54 | word = &(orig_node->bcast_own[word_index]); |
| @@ -146,7 +145,6 @@ out: | |||
| 146 | void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, | 145 | void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, |
| 147 | struct batadv_neigh_node *neigh_node) | 146 | struct batadv_neigh_node *neigh_node) |
| 148 | { | 147 | { |
| 149 | struct hlist_node *node; | ||
| 150 | struct batadv_neigh_node *tmp_neigh_node, *router = NULL; | 148 | struct batadv_neigh_node *tmp_neigh_node, *router = NULL; |
| 151 | uint8_t interference_candidate = 0; | 149 | uint8_t interference_candidate = 0; |
| 152 | 150 | ||
| @@ -169,7 +167,7 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, | |||
| 169 | * interface. If we do, we won't select this candidate because of | 167 | * interface. If we do, we won't select this candidate because of |
| 170 | * possible interference. | 168 | * possible interference. |
| 171 | */ | 169 | */ |
| 172 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 170 | hlist_for_each_entry_rcu(tmp_neigh_node, |
| 173 | &orig_node->neigh_list, list) { | 171 | &orig_node->neigh_list, list) { |
| 174 | if (tmp_neigh_node == neigh_node) | 172 | if (tmp_neigh_node == neigh_node) |
| 175 | continue; | 173 | continue; |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 80ca65fc89a1..a67cffde37ae 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
| @@ -316,7 +316,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
| 316 | const struct batadv_hard_iface *hard_iface) | 316 | const struct batadv_hard_iface *hard_iface) |
| 317 | { | 317 | { |
| 318 | struct batadv_forw_packet *forw_packet; | 318 | struct batadv_forw_packet *forw_packet; |
| 319 | struct hlist_node *tmp_node, *safe_tmp_node; | 319 | struct hlist_node *safe_tmp_node; |
| 320 | bool pending; | 320 | bool pending; |
| 321 | 321 | ||
| 322 | if (hard_iface) | 322 | if (hard_iface) |
| @@ -329,7 +329,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
| 329 | 329 | ||
| 330 | /* free bcast list */ | 330 | /* free bcast list */ |
| 331 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 331 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
| 332 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | 332 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, |
| 333 | &bat_priv->forw_bcast_list, list) { | 333 | &bat_priv->forw_bcast_list, list) { |
| 334 | /* if purge_outstanding_packets() was called with an argument | 334 | /* if purge_outstanding_packets() was called with an argument |
| 335 | * we delete only packets belonging to the given interface | 335 | * we delete only packets belonging to the given interface |
| @@ -355,7 +355,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
| 355 | 355 | ||
| 356 | /* free batman packet list */ | 356 | /* free batman packet list */ |
| 357 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 357 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
| 358 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | 358 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, |
| 359 | &bat_priv->forw_bat_list, list) { | 359 | &bat_priv->forw_bat_list, list) { |
| 360 | /* if purge_outstanding_packets() was called with an argument | 360 | /* if purge_outstanding_packets() was called with an argument |
| 361 | * we delete only packets belonging to the given interface | 361 | * we delete only packets belonging to the given interface |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index d44672f4a349..98a66a021a60 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -56,7 +56,6 @@ static struct batadv_tt_common_entry * | |||
| 56 | batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) | 56 | batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) |
| 57 | { | 57 | { |
| 58 | struct hlist_head *head; | 58 | struct hlist_head *head; |
| 59 | struct hlist_node *node; | ||
| 60 | struct batadv_tt_common_entry *tt_common_entry; | 59 | struct batadv_tt_common_entry *tt_common_entry; |
| 61 | struct batadv_tt_common_entry *tt_common_entry_tmp = NULL; | 60 | struct batadv_tt_common_entry *tt_common_entry_tmp = NULL; |
| 62 | uint32_t index; | 61 | uint32_t index; |
| @@ -68,7 +67,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) | |||
| 68 | head = &hash->table[index]; | 67 | head = &hash->table[index]; |
| 69 | 68 | ||
| 70 | rcu_read_lock(); | 69 | rcu_read_lock(); |
| 71 | hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { | 70 | hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) { |
| 72 | if (!batadv_compare_eth(tt_common_entry, data)) | 71 | if (!batadv_compare_eth(tt_common_entry, data)) |
| 73 | continue; | 72 | continue; |
| 74 | 73 | ||
| @@ -257,7 +256,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | |||
| 257 | struct batadv_tt_local_entry *tt_local; | 256 | struct batadv_tt_local_entry *tt_local; |
| 258 | struct batadv_tt_global_entry *tt_global; | 257 | struct batadv_tt_global_entry *tt_global; |
| 259 | struct hlist_head *head; | 258 | struct hlist_head *head; |
| 260 | struct hlist_node *node; | ||
| 261 | struct batadv_tt_orig_list_entry *orig_entry; | 259 | struct batadv_tt_orig_list_entry *orig_entry; |
| 262 | int hash_added; | 260 | int hash_added; |
| 263 | bool roamed_back = false; | 261 | bool roamed_back = false; |
| @@ -339,7 +337,7 @@ check_roaming: | |||
| 339 | /* These node are probably going to update their tt table */ | 337 | /* These node are probably going to update their tt table */ |
| 340 | head = &tt_global->orig_list; | 338 | head = &tt_global->orig_list; |
| 341 | rcu_read_lock(); | 339 | rcu_read_lock(); |
| 342 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 340 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
| 343 | batadv_send_roam_adv(bat_priv, tt_global->common.addr, | 341 | batadv_send_roam_adv(bat_priv, tt_global->common.addr, |
| 344 | orig_entry->orig_node); | 342 | orig_entry->orig_node); |
| 345 | } | 343 | } |
| @@ -470,7 +468,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
| 470 | struct batadv_tt_common_entry *tt_common_entry; | 468 | struct batadv_tt_common_entry *tt_common_entry; |
| 471 | struct batadv_tt_local_entry *tt_local; | 469 | struct batadv_tt_local_entry *tt_local; |
| 472 | struct batadv_hard_iface *primary_if; | 470 | struct batadv_hard_iface *primary_if; |
| 473 | struct hlist_node *node; | ||
| 474 | struct hlist_head *head; | 471 | struct hlist_head *head; |
| 475 | uint32_t i; | 472 | uint32_t i; |
| 476 | int last_seen_secs; | 473 | int last_seen_secs; |
| @@ -494,7 +491,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
| 494 | head = &hash->table[i]; | 491 | head = &hash->table[i]; |
| 495 | 492 | ||
| 496 | rcu_read_lock(); | 493 | rcu_read_lock(); |
| 497 | hlist_for_each_entry_rcu(tt_common_entry, node, | 494 | hlist_for_each_entry_rcu(tt_common_entry, |
| 498 | head, hash_entry) { | 495 | head, hash_entry) { |
| 499 | tt_local = container_of(tt_common_entry, | 496 | tt_local = container_of(tt_common_entry, |
| 500 | struct batadv_tt_local_entry, | 497 | struct batadv_tt_local_entry, |
| @@ -605,9 +602,9 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, | |||
| 605 | { | 602 | { |
| 606 | struct batadv_tt_local_entry *tt_local_entry; | 603 | struct batadv_tt_local_entry *tt_local_entry; |
| 607 | struct batadv_tt_common_entry *tt_common_entry; | 604 | struct batadv_tt_common_entry *tt_common_entry; |
| 608 | struct hlist_node *node, *node_tmp; | 605 | struct hlist_node *node_tmp; |
| 609 | 606 | ||
| 610 | hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, | 607 | hlist_for_each_entry_safe(tt_common_entry, node_tmp, head, |
| 611 | hash_entry) { | 608 | hash_entry) { |
| 612 | tt_local_entry = container_of(tt_common_entry, | 609 | tt_local_entry = container_of(tt_common_entry, |
| 613 | struct batadv_tt_local_entry, | 610 | struct batadv_tt_local_entry, |
| @@ -651,7 +648,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
| 651 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 648 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| 652 | struct batadv_tt_common_entry *tt_common_entry; | 649 | struct batadv_tt_common_entry *tt_common_entry; |
| 653 | struct batadv_tt_local_entry *tt_local; | 650 | struct batadv_tt_local_entry *tt_local; |
| 654 | struct hlist_node *node, *node_tmp; | 651 | struct hlist_node *node_tmp; |
| 655 | struct hlist_head *head; | 652 | struct hlist_head *head; |
| 656 | uint32_t i; | 653 | uint32_t i; |
| 657 | 654 | ||
| @@ -665,9 +662,9 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
| 665 | list_lock = &hash->list_locks[i]; | 662 | list_lock = &hash->list_locks[i]; |
| 666 | 663 | ||
| 667 | spin_lock_bh(list_lock); | 664 | spin_lock_bh(list_lock); |
| 668 | hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, | 665 | hlist_for_each_entry_safe(tt_common_entry, node_tmp, |
| 669 | head, hash_entry) { | 666 | head, hash_entry) { |
| 670 | hlist_del_rcu(node); | 667 | hlist_del_rcu(&tt_common_entry->hash_entry); |
| 671 | tt_local = container_of(tt_common_entry, | 668 | tt_local = container_of(tt_common_entry, |
| 672 | struct batadv_tt_local_entry, | 669 | struct batadv_tt_local_entry, |
| 673 | common); | 670 | common); |
| @@ -724,11 +721,10 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry, | |||
| 724 | { | 721 | { |
| 725 | struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; | 722 | struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; |
| 726 | const struct hlist_head *head; | 723 | const struct hlist_head *head; |
| 727 | struct hlist_node *node; | ||
| 728 | 724 | ||
| 729 | rcu_read_lock(); | 725 | rcu_read_lock(); |
| 730 | head = &entry->orig_list; | 726 | head = &entry->orig_list; |
| 731 | hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { | 727 | hlist_for_each_entry_rcu(tmp_orig_entry, head, list) { |
| 732 | if (tmp_orig_entry->orig_node != orig_node) | 728 | if (tmp_orig_entry->orig_node != orig_node) |
| 733 | continue; | 729 | continue; |
| 734 | if (!atomic_inc_not_zero(&tmp_orig_entry->refcount)) | 730 | if (!atomic_inc_not_zero(&tmp_orig_entry->refcount)) |
| @@ -940,12 +936,11 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry) | |||
| 940 | { | 936 | { |
| 941 | struct batadv_neigh_node *router = NULL; | 937 | struct batadv_neigh_node *router = NULL; |
| 942 | struct hlist_head *head; | 938 | struct hlist_head *head; |
| 943 | struct hlist_node *node; | ||
| 944 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; | 939 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; |
| 945 | int best_tq = 0; | 940 | int best_tq = 0; |
| 946 | 941 | ||
| 947 | head = &tt_global_entry->orig_list; | 942 | head = &tt_global_entry->orig_list; |
| 948 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 943 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
| 949 | router = batadv_orig_node_get_router(orig_entry->orig_node); | 944 | router = batadv_orig_node_get_router(orig_entry->orig_node); |
| 950 | if (!router) | 945 | if (!router) |
| 951 | continue; | 946 | continue; |
| @@ -973,7 +968,6 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, | |||
| 973 | struct seq_file *seq) | 968 | struct seq_file *seq) |
| 974 | { | 969 | { |
| 975 | struct hlist_head *head; | 970 | struct hlist_head *head; |
| 976 | struct hlist_node *node; | ||
| 977 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry; | 971 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry; |
| 978 | struct batadv_tt_common_entry *tt_common_entry; | 972 | struct batadv_tt_common_entry *tt_common_entry; |
| 979 | uint16_t flags; | 973 | uint16_t flags; |
| @@ -997,7 +991,7 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, | |||
| 997 | 991 | ||
| 998 | head = &tt_global_entry->orig_list; | 992 | head = &tt_global_entry->orig_list; |
| 999 | 993 | ||
| 1000 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 994 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
| 1001 | if (best_entry == orig_entry) | 995 | if (best_entry == orig_entry) |
| 1002 | continue; | 996 | continue; |
| 1003 | 997 | ||
| @@ -1020,7 +1014,6 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
| 1020 | struct batadv_tt_common_entry *tt_common_entry; | 1014 | struct batadv_tt_common_entry *tt_common_entry; |
| 1021 | struct batadv_tt_global_entry *tt_global; | 1015 | struct batadv_tt_global_entry *tt_global; |
| 1022 | struct batadv_hard_iface *primary_if; | 1016 | struct batadv_hard_iface *primary_if; |
| 1023 | struct hlist_node *node; | ||
| 1024 | struct hlist_head *head; | 1017 | struct hlist_head *head; |
| 1025 | uint32_t i; | 1018 | uint32_t i; |
| 1026 | 1019 | ||
| @@ -1039,7 +1032,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
| 1039 | head = &hash->table[i]; | 1032 | head = &hash->table[i]; |
| 1040 | 1033 | ||
| 1041 | rcu_read_lock(); | 1034 | rcu_read_lock(); |
| 1042 | hlist_for_each_entry_rcu(tt_common_entry, node, | 1035 | hlist_for_each_entry_rcu(tt_common_entry, |
| 1043 | head, hash_entry) { | 1036 | head, hash_entry) { |
| 1044 | tt_global = container_of(tt_common_entry, | 1037 | tt_global = container_of(tt_common_entry, |
| 1045 | struct batadv_tt_global_entry, | 1038 | struct batadv_tt_global_entry, |
| @@ -1059,13 +1052,13 @@ static void | |||
| 1059 | batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) | 1052 | batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) |
| 1060 | { | 1053 | { |
| 1061 | struct hlist_head *head; | 1054 | struct hlist_head *head; |
| 1062 | struct hlist_node *node, *safe; | 1055 | struct hlist_node *safe; |
| 1063 | struct batadv_tt_orig_list_entry *orig_entry; | 1056 | struct batadv_tt_orig_list_entry *orig_entry; |
| 1064 | 1057 | ||
| 1065 | spin_lock_bh(&tt_global_entry->list_lock); | 1058 | spin_lock_bh(&tt_global_entry->list_lock); |
| 1066 | head = &tt_global_entry->orig_list; | 1059 | head = &tt_global_entry->orig_list; |
| 1067 | hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { | 1060 | hlist_for_each_entry_safe(orig_entry, safe, head, list) { |
| 1068 | hlist_del_rcu(node); | 1061 | hlist_del_rcu(&orig_entry->list); |
| 1069 | batadv_tt_orig_list_entry_free_ref(orig_entry); | 1062 | batadv_tt_orig_list_entry_free_ref(orig_entry); |
| 1070 | } | 1063 | } |
| 1071 | spin_unlock_bh(&tt_global_entry->list_lock); | 1064 | spin_unlock_bh(&tt_global_entry->list_lock); |
| @@ -1078,18 +1071,18 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv, | |||
| 1078 | const char *message) | 1071 | const char *message) |
| 1079 | { | 1072 | { |
| 1080 | struct hlist_head *head; | 1073 | struct hlist_head *head; |
| 1081 | struct hlist_node *node, *safe; | 1074 | struct hlist_node *safe; |
| 1082 | struct batadv_tt_orig_list_entry *orig_entry; | 1075 | struct batadv_tt_orig_list_entry *orig_entry; |
| 1083 | 1076 | ||
| 1084 | spin_lock_bh(&tt_global_entry->list_lock); | 1077 | spin_lock_bh(&tt_global_entry->list_lock); |
| 1085 | head = &tt_global_entry->orig_list; | 1078 | head = &tt_global_entry->orig_list; |
| 1086 | hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { | 1079 | hlist_for_each_entry_safe(orig_entry, safe, head, list) { |
| 1087 | if (orig_entry->orig_node == orig_node) { | 1080 | if (orig_entry->orig_node == orig_node) { |
| 1088 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1081 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
| 1089 | "Deleting %pM from global tt entry %pM: %s\n", | 1082 | "Deleting %pM from global tt entry %pM: %s\n", |
| 1090 | orig_node->orig, | 1083 | orig_node->orig, |
| 1091 | tt_global_entry->common.addr, message); | 1084 | tt_global_entry->common.addr, message); |
| 1092 | hlist_del_rcu(node); | 1085 | hlist_del_rcu(&orig_entry->list); |
| 1093 | batadv_tt_orig_list_entry_free_ref(orig_entry); | 1086 | batadv_tt_orig_list_entry_free_ref(orig_entry); |
| 1094 | } | 1087 | } |
| 1095 | } | 1088 | } |
| @@ -1108,7 +1101,6 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, | |||
| 1108 | { | 1101 | { |
| 1109 | bool last_entry = true; | 1102 | bool last_entry = true; |
| 1110 | struct hlist_head *head; | 1103 | struct hlist_head *head; |
| 1111 | struct hlist_node *node; | ||
| 1112 | struct batadv_tt_orig_list_entry *orig_entry; | 1104 | struct batadv_tt_orig_list_entry *orig_entry; |
| 1113 | 1105 | ||
| 1114 | /* no local entry exists, case 1: | 1106 | /* no local entry exists, case 1: |
| @@ -1117,7 +1109,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, | |||
| 1117 | 1109 | ||
| 1118 | rcu_read_lock(); | 1110 | rcu_read_lock(); |
| 1119 | head = &tt_global_entry->orig_list; | 1111 | head = &tt_global_entry->orig_list; |
| 1120 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 1112 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
| 1121 | if (orig_entry->orig_node != orig_node) { | 1113 | if (orig_entry->orig_node != orig_node) { |
| 1122 | last_entry = false; | 1114 | last_entry = false; |
| 1123 | break; | 1115 | break; |
| @@ -1202,7 +1194,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, | |||
| 1202 | struct batadv_tt_common_entry *tt_common_entry; | 1194 | struct batadv_tt_common_entry *tt_common_entry; |
| 1203 | uint32_t i; | 1195 | uint32_t i; |
| 1204 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 1196 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
| 1205 | struct hlist_node *node, *safe; | 1197 | struct hlist_node *safe; |
| 1206 | struct hlist_head *head; | 1198 | struct hlist_head *head; |
| 1207 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1199 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| 1208 | 1200 | ||
| @@ -1214,7 +1206,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, | |||
| 1214 | list_lock = &hash->list_locks[i]; | 1206 | list_lock = &hash->list_locks[i]; |
| 1215 | 1207 | ||
| 1216 | spin_lock_bh(list_lock); | 1208 | spin_lock_bh(list_lock); |
| 1217 | hlist_for_each_entry_safe(tt_common_entry, node, safe, | 1209 | hlist_for_each_entry_safe(tt_common_entry, safe, |
| 1218 | head, hash_entry) { | 1210 | head, hash_entry) { |
| 1219 | tt_global = container_of(tt_common_entry, | 1211 | tt_global = container_of(tt_common_entry, |
| 1220 | struct batadv_tt_global_entry, | 1212 | struct batadv_tt_global_entry, |
| @@ -1227,7 +1219,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, | |||
| 1227 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1219 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
| 1228 | "Deleting global tt entry %pM: %s\n", | 1220 | "Deleting global tt entry %pM: %s\n", |
| 1229 | tt_global->common.addr, message); | 1221 | tt_global->common.addr, message); |
| 1230 | hlist_del_rcu(node); | 1222 | hlist_del_rcu(&tt_common_entry->hash_entry); |
| 1231 | batadv_tt_global_entry_free_ref(tt_global); | 1223 | batadv_tt_global_entry_free_ref(tt_global); |
| 1232 | } | 1224 | } |
| 1233 | } | 1225 | } |
| @@ -1262,7 +1254,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) | |||
| 1262 | { | 1254 | { |
| 1263 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 1255 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
| 1264 | struct hlist_head *head; | 1256 | struct hlist_head *head; |
| 1265 | struct hlist_node *node, *node_tmp; | 1257 | struct hlist_node *node_tmp; |
| 1266 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1258 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| 1267 | uint32_t i; | 1259 | uint32_t i; |
| 1268 | char *msg = NULL; | 1260 | char *msg = NULL; |
| @@ -1274,7 +1266,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) | |||
| 1274 | list_lock = &hash->list_locks[i]; | 1266 | list_lock = &hash->list_locks[i]; |
| 1275 | 1267 | ||
| 1276 | spin_lock_bh(list_lock); | 1268 | spin_lock_bh(list_lock); |
| 1277 | hlist_for_each_entry_safe(tt_common, node, node_tmp, head, | 1269 | hlist_for_each_entry_safe(tt_common, node_tmp, head, |
| 1278 | hash_entry) { | 1270 | hash_entry) { |
| 1279 | tt_global = container_of(tt_common, | 1271 | tt_global = container_of(tt_common, |
| 1280 | struct batadv_tt_global_entry, | 1272 | struct batadv_tt_global_entry, |
| @@ -1287,7 +1279,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) | |||
| 1287 | "Deleting global tt entry (%pM): %s\n", | 1279 | "Deleting global tt entry (%pM): %s\n", |
| 1288 | tt_global->common.addr, msg); | 1280 | tt_global->common.addr, msg); |
| 1289 | 1281 | ||
| 1290 | hlist_del_rcu(node); | 1282 | hlist_del_rcu(&tt_common->hash_entry); |
| 1291 | 1283 | ||
| 1292 | batadv_tt_global_entry_free_ref(tt_global); | 1284 | batadv_tt_global_entry_free_ref(tt_global); |
| 1293 | } | 1285 | } |
| @@ -1301,7 +1293,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) | |||
| 1301 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1293 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| 1302 | struct batadv_tt_common_entry *tt_common_entry; | 1294 | struct batadv_tt_common_entry *tt_common_entry; |
| 1303 | struct batadv_tt_global_entry *tt_global; | 1295 | struct batadv_tt_global_entry *tt_global; |
| 1304 | struct hlist_node *node, *node_tmp; | 1296 | struct hlist_node *node_tmp; |
| 1305 | struct hlist_head *head; | 1297 | struct hlist_head *head; |
| 1306 | uint32_t i; | 1298 | uint32_t i; |
| 1307 | 1299 | ||
| @@ -1315,9 +1307,9 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) | |||
| 1315 | list_lock = &hash->list_locks[i]; | 1307 | list_lock = &hash->list_locks[i]; |
| 1316 | 1308 | ||
| 1317 | spin_lock_bh(list_lock); | 1309 | spin_lock_bh(list_lock); |
| 1318 | hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, | 1310 | hlist_for_each_entry_safe(tt_common_entry, node_tmp, |
| 1319 | head, hash_entry) { | 1311 | head, hash_entry) { |
| 1320 | hlist_del_rcu(node); | 1312 | hlist_del_rcu(&tt_common_entry->hash_entry); |
| 1321 | tt_global = container_of(tt_common_entry, | 1313 | tt_global = container_of(tt_common_entry, |
| 1322 | struct batadv_tt_global_entry, | 1314 | struct batadv_tt_global_entry, |
| 1323 | common); | 1315 | common); |
| @@ -1397,7 +1389,6 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
| 1397 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 1389 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
| 1398 | struct batadv_tt_common_entry *tt_common; | 1390 | struct batadv_tt_common_entry *tt_common; |
| 1399 | struct batadv_tt_global_entry *tt_global; | 1391 | struct batadv_tt_global_entry *tt_global; |
| 1400 | struct hlist_node *node; | ||
| 1401 | struct hlist_head *head; | 1392 | struct hlist_head *head; |
| 1402 | uint32_t i; | 1393 | uint32_t i; |
| 1403 | int j; | 1394 | int j; |
| @@ -1406,7 +1397,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
| 1406 | head = &hash->table[i]; | 1397 | head = &hash->table[i]; |
| 1407 | 1398 | ||
| 1408 | rcu_read_lock(); | 1399 | rcu_read_lock(); |
| 1409 | hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { | 1400 | hlist_for_each_entry_rcu(tt_common, head, hash_entry) { |
| 1410 | tt_global = container_of(tt_common, | 1401 | tt_global = container_of(tt_common, |
| 1411 | struct batadv_tt_global_entry, | 1402 | struct batadv_tt_global_entry, |
| 1412 | common); | 1403 | common); |
| @@ -1449,7 +1440,6 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) | |||
| 1449 | uint16_t total = 0, total_one; | 1440 | uint16_t total = 0, total_one; |
| 1450 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; | 1441 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
| 1451 | struct batadv_tt_common_entry *tt_common; | 1442 | struct batadv_tt_common_entry *tt_common; |
| 1452 | struct hlist_node *node; | ||
| 1453 | struct hlist_head *head; | 1443 | struct hlist_head *head; |
| 1454 | uint32_t i; | 1444 | uint32_t i; |
| 1455 | int j; | 1445 | int j; |
| @@ -1458,7 +1448,7 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) | |||
| 1458 | head = &hash->table[i]; | 1448 | head = &hash->table[i]; |
| 1459 | 1449 | ||
| 1460 | rcu_read_lock(); | 1450 | rcu_read_lock(); |
| 1461 | hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { | 1451 | hlist_for_each_entry_rcu(tt_common, head, hash_entry) { |
| 1462 | /* not yet committed clients have not to be taken into | 1452 | /* not yet committed clients have not to be taken into |
| 1463 | * account while computing the CRC | 1453 | * account while computing the CRC |
| 1464 | */ | 1454 | */ |
| @@ -1597,7 +1587,6 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, | |||
| 1597 | struct batadv_tt_common_entry *tt_common_entry; | 1587 | struct batadv_tt_common_entry *tt_common_entry; |
| 1598 | struct batadv_tt_query_packet *tt_response; | 1588 | struct batadv_tt_query_packet *tt_response; |
| 1599 | struct batadv_tt_change *tt_change; | 1589 | struct batadv_tt_change *tt_change; |
| 1600 | struct hlist_node *node; | ||
| 1601 | struct hlist_head *head; | 1590 | struct hlist_head *head; |
| 1602 | struct sk_buff *skb = NULL; | 1591 | struct sk_buff *skb = NULL; |
| 1603 | uint16_t tt_tot, tt_count; | 1592 | uint16_t tt_tot, tt_count; |
| @@ -1627,7 +1616,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, | |||
| 1627 | for (i = 0; i < hash->size; i++) { | 1616 | for (i = 0; i < hash->size; i++) { |
| 1628 | head = &hash->table[i]; | 1617 | head = &hash->table[i]; |
| 1629 | 1618 | ||
| 1630 | hlist_for_each_entry_rcu(tt_common_entry, node, | 1619 | hlist_for_each_entry_rcu(tt_common_entry, |
| 1631 | head, hash_entry) { | 1620 | head, hash_entry) { |
| 1632 | if (tt_count == tt_tot) | 1621 | if (tt_count == tt_tot) |
| 1633 | break; | 1622 | break; |
| @@ -2307,7 +2296,6 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, | |||
| 2307 | uint32_t i; | 2296 | uint32_t i; |
| 2308 | uint16_t changed_num = 0; | 2297 | uint16_t changed_num = 0; |
| 2309 | struct hlist_head *head; | 2298 | struct hlist_head *head; |
| 2310 | struct hlist_node *node; | ||
| 2311 | struct batadv_tt_common_entry *tt_common_entry; | 2299 | struct batadv_tt_common_entry *tt_common_entry; |
| 2312 | 2300 | ||
| 2313 | if (!hash) | 2301 | if (!hash) |
| @@ -2317,7 +2305,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, | |||
| 2317 | head = &hash->table[i]; | 2305 | head = &hash->table[i]; |
| 2318 | 2306 | ||
| 2319 | rcu_read_lock(); | 2307 | rcu_read_lock(); |
| 2320 | hlist_for_each_entry_rcu(tt_common_entry, node, | 2308 | hlist_for_each_entry_rcu(tt_common_entry, |
| 2321 | head, hash_entry) { | 2309 | head, hash_entry) { |
| 2322 | if (enable) { | 2310 | if (enable) { |
| 2323 | if ((tt_common_entry->flags & flags) == flags) | 2311 | if ((tt_common_entry->flags & flags) == flags) |
| @@ -2342,7 +2330,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
| 2342 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; | 2330 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
| 2343 | struct batadv_tt_common_entry *tt_common; | 2331 | struct batadv_tt_common_entry *tt_common; |
| 2344 | struct batadv_tt_local_entry *tt_local; | 2332 | struct batadv_tt_local_entry *tt_local; |
| 2345 | struct hlist_node *node, *node_tmp; | 2333 | struct hlist_node *node_tmp; |
| 2346 | struct hlist_head *head; | 2334 | struct hlist_head *head; |
| 2347 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 2335 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| 2348 | uint32_t i; | 2336 | uint32_t i; |
| @@ -2355,7 +2343,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
| 2355 | list_lock = &hash->list_locks[i]; | 2343 | list_lock = &hash->list_locks[i]; |
| 2356 | 2344 | ||
| 2357 | spin_lock_bh(list_lock); | 2345 | spin_lock_bh(list_lock); |
| 2358 | hlist_for_each_entry_safe(tt_common, node, node_tmp, head, | 2346 | hlist_for_each_entry_safe(tt_common, node_tmp, head, |
| 2359 | hash_entry) { | 2347 | hash_entry) { |
| 2360 | if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) | 2348 | if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) |
| 2361 | continue; | 2349 | continue; |
| @@ -2365,7 +2353,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
| 2365 | tt_common->addr); | 2353 | tt_common->addr); |
| 2366 | 2354 | ||
| 2367 | atomic_dec(&bat_priv->tt.local_entry_num); | 2355 | atomic_dec(&bat_priv->tt.local_entry_num); |
| 2368 | hlist_del_rcu(node); | 2356 | hlist_del_rcu(&tt_common->hash_entry); |
| 2369 | tt_local = container_of(tt_common, | 2357 | tt_local = container_of(tt_common, |
| 2370 | struct batadv_tt_local_entry, | 2358 | struct batadv_tt_local_entry, |
| 2371 | common); | 2359 | common); |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index 22d2785177d1..c053244b97bd 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
| @@ -97,7 +97,6 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
| 97 | { | 97 | { |
| 98 | struct batadv_hashtable *hash = bat_priv->vis.hash; | 98 | struct batadv_hashtable *hash = bat_priv->vis.hash; |
| 99 | struct hlist_head *head; | 99 | struct hlist_head *head; |
| 100 | struct hlist_node *node; | ||
| 101 | struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; | 100 | struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; |
| 102 | uint32_t index; | 101 | uint32_t index; |
| 103 | 102 | ||
| @@ -108,8 +107,8 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
| 108 | head = &hash->table[index]; | 107 | head = &hash->table[index]; |
| 109 | 108 | ||
| 110 | rcu_read_lock(); | 109 | rcu_read_lock(); |
| 111 | hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { | 110 | hlist_for_each_entry_rcu(vis_info, head, hash_entry) { |
| 112 | if (!batadv_vis_info_cmp(node, data)) | 111 | if (!batadv_vis_info_cmp(&vis_info->hash_entry, data)) |
| 113 | continue; | 112 | continue; |
| 114 | 113 | ||
| 115 | vis_info_tmp = vis_info; | 114 | vis_info_tmp = vis_info; |
| @@ -128,9 +127,8 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface, | |||
| 128 | bool primary) | 127 | bool primary) |
| 129 | { | 128 | { |
| 130 | struct batadv_vis_if_list_entry *entry; | 129 | struct batadv_vis_if_list_entry *entry; |
| 131 | struct hlist_node *pos; | ||
| 132 | 130 | ||
| 133 | hlist_for_each_entry(entry, pos, if_list, list) { | 131 | hlist_for_each_entry(entry, if_list, list) { |
| 134 | if (batadv_compare_eth(entry->addr, interface)) | 132 | if (batadv_compare_eth(entry->addr, interface)) |
| 135 | return; | 133 | return; |
| 136 | } | 134 | } |
| @@ -148,9 +146,8 @@ static void batadv_vis_data_read_prim_sec(struct seq_file *seq, | |||
| 148 | const struct hlist_head *if_list) | 146 | const struct hlist_head *if_list) |
| 149 | { | 147 | { |
| 150 | struct batadv_vis_if_list_entry *entry; | 148 | struct batadv_vis_if_list_entry *entry; |
| 151 | struct hlist_node *pos; | ||
| 152 | 149 | ||
| 153 | hlist_for_each_entry(entry, pos, if_list, list) { | 150 | hlist_for_each_entry(entry, if_list, list) { |
| 154 | if (entry->primary) | 151 | if (entry->primary) |
| 155 | seq_printf(seq, "PRIMARY, "); | 152 | seq_printf(seq, "PRIMARY, "); |
| 156 | else | 153 | else |
| @@ -198,9 +195,8 @@ static void batadv_vis_data_read_entries(struct seq_file *seq, | |||
| 198 | { | 195 | { |
| 199 | int i; | 196 | int i; |
| 200 | struct batadv_vis_if_list_entry *entry; | 197 | struct batadv_vis_if_list_entry *entry; |
| 201 | struct hlist_node *pos; | ||
| 202 | 198 | ||
| 203 | hlist_for_each_entry(entry, pos, list, list) { | 199 | hlist_for_each_entry(entry, list, list) { |
| 204 | seq_printf(seq, "%pM,", entry->addr); | 200 | seq_printf(seq, "%pM,", entry->addr); |
| 205 | 201 | ||
| 206 | for (i = 0; i < packet->entries; i++) | 202 | for (i = 0; i < packet->entries; i++) |
| @@ -218,17 +214,16 @@ static void batadv_vis_data_read_entries(struct seq_file *seq, | |||
| 218 | static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, | 214 | static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, |
| 219 | const struct hlist_head *head) | 215 | const struct hlist_head *head) |
| 220 | { | 216 | { |
| 221 | struct hlist_node *node; | ||
| 222 | struct batadv_vis_info *info; | 217 | struct batadv_vis_info *info; |
| 223 | struct batadv_vis_packet *packet; | 218 | struct batadv_vis_packet *packet; |
| 224 | uint8_t *entries_pos; | 219 | uint8_t *entries_pos; |
| 225 | struct batadv_vis_info_entry *entries; | 220 | struct batadv_vis_info_entry *entries; |
| 226 | struct batadv_vis_if_list_entry *entry; | 221 | struct batadv_vis_if_list_entry *entry; |
| 227 | struct hlist_node *pos, *n; | 222 | struct hlist_node *n; |
| 228 | 223 | ||
| 229 | HLIST_HEAD(vis_if_list); | 224 | HLIST_HEAD(vis_if_list); |
| 230 | 225 | ||
| 231 | hlist_for_each_entry_rcu(info, node, head, hash_entry) { | 226 | hlist_for_each_entry_rcu(info, head, hash_entry) { |
| 232 | packet = (struct batadv_vis_packet *)info->skb_packet->data; | 227 | packet = (struct batadv_vis_packet *)info->skb_packet->data; |
| 233 | entries_pos = (uint8_t *)packet + sizeof(*packet); | 228 | entries_pos = (uint8_t *)packet + sizeof(*packet); |
| 234 | entries = (struct batadv_vis_info_entry *)entries_pos; | 229 | entries = (struct batadv_vis_info_entry *)entries_pos; |
| @@ -240,7 +235,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, | |||
| 240 | batadv_vis_data_read_entries(seq, &vis_if_list, packet, | 235 | batadv_vis_data_read_entries(seq, &vis_if_list, packet, |
| 241 | entries); | 236 | entries); |
| 242 | 237 | ||
| 243 | hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) { | 238 | hlist_for_each_entry_safe(entry, n, &vis_if_list, list) { |
| 244 | hlist_del(&entry->list); | 239 | hlist_del(&entry->list); |
| 245 | kfree(entry); | 240 | kfree(entry); |
| 246 | } | 241 | } |
| @@ -519,7 +514,6 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv, | |||
| 519 | { | 514 | { |
| 520 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 515 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 521 | struct batadv_neigh_node *router; | 516 | struct batadv_neigh_node *router; |
| 522 | struct hlist_node *node; | ||
| 523 | struct hlist_head *head; | 517 | struct hlist_head *head; |
| 524 | struct batadv_orig_node *orig_node; | 518 | struct batadv_orig_node *orig_node; |
| 525 | struct batadv_vis_packet *packet; | 519 | struct batadv_vis_packet *packet; |
| @@ -532,7 +526,7 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv, | |||
| 532 | head = &hash->table[i]; | 526 | head = &hash->table[i]; |
| 533 | 527 | ||
| 534 | rcu_read_lock(); | 528 | rcu_read_lock(); |
| 535 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 529 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 536 | router = batadv_orig_node_get_router(orig_node); | 530 | router = batadv_orig_node_get_router(orig_node); |
| 537 | if (!router) | 531 | if (!router) |
| 538 | continue; | 532 | continue; |
| @@ -571,7 +565,6 @@ static bool batadv_vis_packet_full(const struct batadv_vis_info *info) | |||
| 571 | static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) | 565 | static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) |
| 572 | { | 566 | { |
| 573 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 567 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 574 | struct hlist_node *node; | ||
| 575 | struct hlist_head *head; | 568 | struct hlist_head *head; |
| 576 | struct batadv_orig_node *orig_node; | 569 | struct batadv_orig_node *orig_node; |
| 577 | struct batadv_neigh_node *router; | 570 | struct batadv_neigh_node *router; |
| @@ -605,7 +598,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) | |||
| 605 | head = &hash->table[i]; | 598 | head = &hash->table[i]; |
| 606 | 599 | ||
| 607 | rcu_read_lock(); | 600 | rcu_read_lock(); |
| 608 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 601 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 609 | router = batadv_orig_node_get_router(orig_node); | 602 | router = batadv_orig_node_get_router(orig_node); |
| 610 | if (!router) | 603 | if (!router) |
| 611 | continue; | 604 | continue; |
| @@ -644,7 +637,7 @@ next: | |||
| 644 | head = &hash->table[i]; | 637 | head = &hash->table[i]; |
| 645 | 638 | ||
| 646 | rcu_read_lock(); | 639 | rcu_read_lock(); |
| 647 | hlist_for_each_entry_rcu(tt_common_entry, node, head, | 640 | hlist_for_each_entry_rcu(tt_common_entry, head, |
| 648 | hash_entry) { | 641 | hash_entry) { |
| 649 | packet_pos = skb_put(info->skb_packet, sizeof(*entry)); | 642 | packet_pos = skb_put(info->skb_packet, sizeof(*entry)); |
| 650 | entry = (struct batadv_vis_info_entry *)packet_pos; | 643 | entry = (struct batadv_vis_info_entry *)packet_pos; |
| @@ -673,14 +666,14 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) | |||
| 673 | { | 666 | { |
| 674 | uint32_t i; | 667 | uint32_t i; |
| 675 | struct batadv_hashtable *hash = bat_priv->vis.hash; | 668 | struct batadv_hashtable *hash = bat_priv->vis.hash; |
| 676 | struct hlist_node *node, *node_tmp; | 669 | struct hlist_node *node_tmp; |
| 677 | struct hlist_head *head; | 670 | struct hlist_head *head; |
| 678 | struct batadv_vis_info *info; | 671 | struct batadv_vis_info *info; |
| 679 | 672 | ||
| 680 | for (i = 0; i < hash->size; i++) { | 673 | for (i = 0; i < hash->size; i++) { |
| 681 | head = &hash->table[i]; | 674 | head = &hash->table[i]; |
| 682 | 675 | ||
| 683 | hlist_for_each_entry_safe(info, node, node_tmp, | 676 | hlist_for_each_entry_safe(info, node_tmp, |
| 684 | head, hash_entry) { | 677 | head, hash_entry) { |
| 685 | /* never purge own data. */ | 678 | /* never purge own data. */ |
| 686 | if (info == bat_priv->vis.my_info) | 679 | if (info == bat_priv->vis.my_info) |
| @@ -688,7 +681,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) | |||
| 688 | 681 | ||
| 689 | if (batadv_has_timed_out(info->first_seen, | 682 | if (batadv_has_timed_out(info->first_seen, |
| 690 | BATADV_VIS_TIMEOUT)) { | 683 | BATADV_VIS_TIMEOUT)) { |
| 691 | hlist_del(node); | 684 | hlist_del(&info->hash_entry); |
| 692 | batadv_send_list_del(info); | 685 | batadv_send_list_del(info); |
| 693 | kref_put(&info->refcount, batadv_free_info); | 686 | kref_put(&info->refcount, batadv_free_info); |
| 694 | } | 687 | } |
| @@ -700,7 +693,6 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, | |||
| 700 | struct batadv_vis_info *info) | 693 | struct batadv_vis_info *info) |
| 701 | { | 694 | { |
| 702 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 695 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
| 703 | struct hlist_node *node; | ||
| 704 | struct hlist_head *head; | 696 | struct hlist_head *head; |
| 705 | struct batadv_orig_node *orig_node; | 697 | struct batadv_orig_node *orig_node; |
| 706 | struct batadv_vis_packet *packet; | 698 | struct batadv_vis_packet *packet; |
| @@ -715,7 +707,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, | |||
| 715 | head = &hash->table[i]; | 707 | head = &hash->table[i]; |
| 716 | 708 | ||
| 717 | rcu_read_lock(); | 709 | rcu_read_lock(); |
| 718 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 710 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
| 719 | /* if it's a vis server and reachable, send it. */ | 711 | /* if it's a vis server and reachable, send it. */ |
| 720 | if (!(orig_node->flags & BATADV_VIS_SERVER)) | 712 | if (!(orig_node->flags & BATADV_VIS_SERVER)) |
| 721 | continue; | 713 | continue; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 07f073935811..6a93614f2c49 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
| @@ -70,14 +70,13 @@ static struct bt_sock_list hci_sk_list = { | |||
| 70 | void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) | 70 | void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) |
| 71 | { | 71 | { |
| 72 | struct sock *sk; | 72 | struct sock *sk; |
| 73 | struct hlist_node *node; | ||
| 74 | struct sk_buff *skb_copy = NULL; | 73 | struct sk_buff *skb_copy = NULL; |
| 75 | 74 | ||
| 76 | BT_DBG("hdev %p len %d", hdev, skb->len); | 75 | BT_DBG("hdev %p len %d", hdev, skb->len); |
| 77 | 76 | ||
| 78 | read_lock(&hci_sk_list.lock); | 77 | read_lock(&hci_sk_list.lock); |
| 79 | 78 | ||
| 80 | sk_for_each(sk, node, &hci_sk_list.head) { | 79 | sk_for_each(sk, &hci_sk_list.head) { |
| 81 | struct hci_filter *flt; | 80 | struct hci_filter *flt; |
| 82 | struct sk_buff *nskb; | 81 | struct sk_buff *nskb; |
| 83 | 82 | ||
| @@ -142,13 +141,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 142 | void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) | 141 | void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) |
| 143 | { | 142 | { |
| 144 | struct sock *sk; | 143 | struct sock *sk; |
| 145 | struct hlist_node *node; | ||
| 146 | 144 | ||
| 147 | BT_DBG("len %d", skb->len); | 145 | BT_DBG("len %d", skb->len); |
| 148 | 146 | ||
| 149 | read_lock(&hci_sk_list.lock); | 147 | read_lock(&hci_sk_list.lock); |
| 150 | 148 | ||
| 151 | sk_for_each(sk, node, &hci_sk_list.head) { | 149 | sk_for_each(sk, &hci_sk_list.head) { |
| 152 | struct sk_buff *nskb; | 150 | struct sk_buff *nskb; |
| 153 | 151 | ||
| 154 | /* Skip the original socket */ | 152 | /* Skip the original socket */ |
| @@ -176,7 +174,6 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) | |||
| 176 | void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | 174 | void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) |
| 177 | { | 175 | { |
| 178 | struct sock *sk; | 176 | struct sock *sk; |
| 179 | struct hlist_node *node; | ||
| 180 | struct sk_buff *skb_copy = NULL; | 177 | struct sk_buff *skb_copy = NULL; |
| 181 | __le16 opcode; | 178 | __le16 opcode; |
| 182 | 179 | ||
| @@ -210,7 +207,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 210 | 207 | ||
| 211 | read_lock(&hci_sk_list.lock); | 208 | read_lock(&hci_sk_list.lock); |
| 212 | 209 | ||
| 213 | sk_for_each(sk, node, &hci_sk_list.head) { | 210 | sk_for_each(sk, &hci_sk_list.head) { |
| 214 | struct sk_buff *nskb; | 211 | struct sk_buff *nskb; |
| 215 | 212 | ||
| 216 | if (sk->sk_state != BT_BOUND) | 213 | if (sk->sk_state != BT_BOUND) |
| @@ -251,13 +248,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 251 | static void send_monitor_event(struct sk_buff *skb) | 248 | static void send_monitor_event(struct sk_buff *skb) |
| 252 | { | 249 | { |
| 253 | struct sock *sk; | 250 | struct sock *sk; |
| 254 | struct hlist_node *node; | ||
| 255 | 251 | ||
| 256 | BT_DBG("len %d", skb->len); | 252 | BT_DBG("len %d", skb->len); |
| 257 | 253 | ||
| 258 | read_lock(&hci_sk_list.lock); | 254 | read_lock(&hci_sk_list.lock); |
| 259 | 255 | ||
| 260 | sk_for_each(sk, node, &hci_sk_list.head) { | 256 | sk_for_each(sk, &hci_sk_list.head) { |
| 261 | struct sk_buff *nskb; | 257 | struct sk_buff *nskb; |
| 262 | 258 | ||
| 263 | if (sk->sk_state != BT_BOUND) | 259 | if (sk->sk_state != BT_BOUND) |
| @@ -393,11 +389,10 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) | |||
| 393 | 389 | ||
| 394 | if (event == HCI_DEV_UNREG) { | 390 | if (event == HCI_DEV_UNREG) { |
| 395 | struct sock *sk; | 391 | struct sock *sk; |
| 396 | struct hlist_node *node; | ||
| 397 | 392 | ||
| 398 | /* Detach sockets from device */ | 393 | /* Detach sockets from device */ |
| 399 | read_lock(&hci_sk_list.lock); | 394 | read_lock(&hci_sk_list.lock); |
| 400 | sk_for_each(sk, node, &hci_sk_list.head) { | 395 | sk_for_each(sk, &hci_sk_list.head) { |
| 401 | bh_lock_sock_nested(sk); | 396 | bh_lock_sock_nested(sk); |
| 402 | if (hci_pi(sk)->hdev == hdev) { | 397 | if (hci_pi(sk)->hdev == hdev) { |
| 403 | hci_pi(sk)->hdev = NULL; | 398 | hci_pi(sk)->hdev = NULL; |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index ce3f6658f4b2..c23bae86263b 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
| @@ -107,15 +107,14 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) | |||
| 107 | static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) | 107 | static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) |
| 108 | { | 108 | { |
| 109 | struct sock *sk = NULL; | 109 | struct sock *sk = NULL; |
| 110 | struct hlist_node *node; | ||
| 111 | 110 | ||
| 112 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 111 | sk_for_each(sk, &rfcomm_sk_list.head) { |
| 113 | if (rfcomm_pi(sk)->channel == channel && | 112 | if (rfcomm_pi(sk)->channel == channel && |
| 114 | !bacmp(&bt_sk(sk)->src, src)) | 113 | !bacmp(&bt_sk(sk)->src, src)) |
| 115 | break; | 114 | break; |
| 116 | } | 115 | } |
| 117 | 116 | ||
| 118 | return node ? sk : NULL; | 117 | return sk ? sk : NULL; |
| 119 | } | 118 | } |
| 120 | 119 | ||
| 121 | /* Find socket with channel and source bdaddr. | 120 | /* Find socket with channel and source bdaddr. |
| @@ -124,11 +123,10 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) | |||
| 124 | static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) | 123 | static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) |
| 125 | { | 124 | { |
| 126 | struct sock *sk = NULL, *sk1 = NULL; | 125 | struct sock *sk = NULL, *sk1 = NULL; |
| 127 | struct hlist_node *node; | ||
| 128 | 126 | ||
| 129 | read_lock(&rfcomm_sk_list.lock); | 127 | read_lock(&rfcomm_sk_list.lock); |
| 130 | 128 | ||
| 131 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 129 | sk_for_each(sk, &rfcomm_sk_list.head) { |
| 132 | if (state && sk->sk_state != state) | 130 | if (state && sk->sk_state != state) |
| 133 | continue; | 131 | continue; |
| 134 | 132 | ||
| @@ -145,7 +143,7 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t * | |||
| 145 | 143 | ||
| 146 | read_unlock(&rfcomm_sk_list.lock); | 144 | read_unlock(&rfcomm_sk_list.lock); |
| 147 | 145 | ||
| 148 | return node ? sk : sk1; | 146 | return sk ? sk : sk1; |
| 149 | } | 147 | } |
| 150 | 148 | ||
| 151 | static void rfcomm_sock_destruct(struct sock *sk) | 149 | static void rfcomm_sock_destruct(struct sock *sk) |
| @@ -970,11 +968,10 @@ done: | |||
| 970 | static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) | 968 | static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) |
| 971 | { | 969 | { |
| 972 | struct sock *sk; | 970 | struct sock *sk; |
| 973 | struct hlist_node *node; | ||
| 974 | 971 | ||
| 975 | read_lock(&rfcomm_sk_list.lock); | 972 | read_lock(&rfcomm_sk_list.lock); |
| 976 | 973 | ||
| 977 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 974 | sk_for_each(sk, &rfcomm_sk_list.head) { |
| 978 | seq_printf(f, "%pMR %pMR %d %d\n", | 975 | seq_printf(f, "%pMR %pMR %d %d\n", |
| 979 | &bt_sk(sk)->src, &bt_sk(sk)->dst, | 976 | &bt_sk(sk)->src, &bt_sk(sk)->dst, |
| 980 | sk->sk_state, rfcomm_pi(sk)->channel); | 977 | sk->sk_state, rfcomm_pi(sk)->channel); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index b5178d62064e..79d87d8d4f51 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -259,10 +259,9 @@ drop: | |||
| 259 | /* -------- Socket interface ---------- */ | 259 | /* -------- Socket interface ---------- */ |
| 260 | static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) | 260 | static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) |
| 261 | { | 261 | { |
| 262 | struct hlist_node *node; | ||
| 263 | struct sock *sk; | 262 | struct sock *sk; |
| 264 | 263 | ||
| 265 | sk_for_each(sk, node, &sco_sk_list.head) { | 264 | sk_for_each(sk, &sco_sk_list.head) { |
| 266 | if (sk->sk_state != BT_LISTEN) | 265 | if (sk->sk_state != BT_LISTEN) |
| 267 | continue; | 266 | continue; |
| 268 | 267 | ||
| @@ -279,11 +278,10 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) | |||
| 279 | static struct sock *sco_get_sock_listen(bdaddr_t *src) | 278 | static struct sock *sco_get_sock_listen(bdaddr_t *src) |
| 280 | { | 279 | { |
| 281 | struct sock *sk = NULL, *sk1 = NULL; | 280 | struct sock *sk = NULL, *sk1 = NULL; |
| 282 | struct hlist_node *node; | ||
| 283 | 281 | ||
| 284 | read_lock(&sco_sk_list.lock); | 282 | read_lock(&sco_sk_list.lock); |
| 285 | 283 | ||
| 286 | sk_for_each(sk, node, &sco_sk_list.head) { | 284 | sk_for_each(sk, &sco_sk_list.head) { |
| 287 | if (sk->sk_state != BT_LISTEN) | 285 | if (sk->sk_state != BT_LISTEN) |
| 288 | continue; | 286 | continue; |
| 289 | 287 | ||
| @@ -298,7 +296,7 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src) | |||
| 298 | 296 | ||
| 299 | read_unlock(&sco_sk_list.lock); | 297 | read_unlock(&sco_sk_list.lock); |
| 300 | 298 | ||
| 301 | return node ? sk : sk1; | 299 | return sk ? sk : sk1; |
| 302 | } | 300 | } |
| 303 | 301 | ||
| 304 | static void sco_sock_destruct(struct sock *sk) | 302 | static void sco_sock_destruct(struct sock *sk) |
| @@ -951,14 +949,13 @@ static void sco_conn_ready(struct sco_conn *conn) | |||
| 951 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) | 949 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) |
| 952 | { | 950 | { |
| 953 | struct sock *sk; | 951 | struct sock *sk; |
| 954 | struct hlist_node *node; | ||
| 955 | int lm = 0; | 952 | int lm = 0; |
| 956 | 953 | ||
| 957 | BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); | 954 | BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); |
| 958 | 955 | ||
| 959 | /* Find listening sockets */ | 956 | /* Find listening sockets */ |
| 960 | read_lock(&sco_sk_list.lock); | 957 | read_lock(&sco_sk_list.lock); |
| 961 | sk_for_each(sk, node, &sco_sk_list.head) { | 958 | sk_for_each(sk, &sco_sk_list.head) { |
| 962 | if (sk->sk_state != BT_LISTEN) | 959 | if (sk->sk_state != BT_LISTEN) |
| 963 | continue; | 960 | continue; |
| 964 | 961 | ||
| @@ -1018,11 +1015,10 @@ drop: | |||
| 1018 | static int sco_debugfs_show(struct seq_file *f, void *p) | 1015 | static int sco_debugfs_show(struct seq_file *f, void *p) |
| 1019 | { | 1016 | { |
| 1020 | struct sock *sk; | 1017 | struct sock *sk; |
| 1021 | struct hlist_node *node; | ||
| 1022 | 1018 | ||
| 1023 | read_lock(&sco_sk_list.lock); | 1019 | read_lock(&sco_sk_list.lock); |
| 1024 | 1020 | ||
| 1025 | sk_for_each(sk, node, &sco_sk_list.head) { | 1021 | sk_for_each(sk, &sco_sk_list.head) { |
| 1026 | seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, | 1022 | seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, |
| 1027 | &bt_sk(sk)->dst, sk->sk_state); | 1023 | &bt_sk(sk)->dst, sk->sk_state); |
| 1028 | } | 1024 | } |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 8117900af4de..b0812c91c0f0 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -181,9 +181,9 @@ void br_fdb_cleanup(unsigned long _data) | |||
| 181 | spin_lock(&br->hash_lock); | 181 | spin_lock(&br->hash_lock); |
| 182 | for (i = 0; i < BR_HASH_SIZE; i++) { | 182 | for (i = 0; i < BR_HASH_SIZE; i++) { |
| 183 | struct net_bridge_fdb_entry *f; | 183 | struct net_bridge_fdb_entry *f; |
| 184 | struct hlist_node *h, *n; | 184 | struct hlist_node *n; |
| 185 | 185 | ||
| 186 | hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { | 186 | hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) { |
| 187 | unsigned long this_timer; | 187 | unsigned long this_timer; |
| 188 | if (f->is_static) | 188 | if (f->is_static) |
| 189 | continue; | 189 | continue; |
| @@ -207,8 +207,8 @@ void br_fdb_flush(struct net_bridge *br) | |||
| 207 | spin_lock_bh(&br->hash_lock); | 207 | spin_lock_bh(&br->hash_lock); |
| 208 | for (i = 0; i < BR_HASH_SIZE; i++) { | 208 | for (i = 0; i < BR_HASH_SIZE; i++) { |
| 209 | struct net_bridge_fdb_entry *f; | 209 | struct net_bridge_fdb_entry *f; |
| 210 | struct hlist_node *h, *n; | 210 | struct hlist_node *n; |
| 211 | hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { | 211 | hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) { |
| 212 | if (!f->is_static) | 212 | if (!f->is_static) |
| 213 | fdb_delete(br, f); | 213 | fdb_delete(br, f); |
| 214 | } | 214 | } |
| @@ -266,10 +266,9 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, | |||
| 266 | const unsigned char *addr, | 266 | const unsigned char *addr, |
| 267 | __u16 vid) | 267 | __u16 vid) |
| 268 | { | 268 | { |
| 269 | struct hlist_node *h; | ||
| 270 | struct net_bridge_fdb_entry *fdb; | 269 | struct net_bridge_fdb_entry *fdb; |
| 271 | 270 | ||
| 272 | hlist_for_each_entry_rcu(fdb, h, | 271 | hlist_for_each_entry_rcu(fdb, |
| 273 | &br->hash[br_mac_hash(addr, vid)], hlist) { | 272 | &br->hash[br_mac_hash(addr, vid)], hlist) { |
| 274 | if (ether_addr_equal(fdb->addr.addr, addr) && | 273 | if (ether_addr_equal(fdb->addr.addr, addr) && |
| 275 | fdb->vlan_id == vid) { | 274 | fdb->vlan_id == vid) { |
| @@ -315,14 +314,13 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, | |||
| 315 | { | 314 | { |
| 316 | struct __fdb_entry *fe = buf; | 315 | struct __fdb_entry *fe = buf; |
| 317 | int i, num = 0; | 316 | int i, num = 0; |
| 318 | struct hlist_node *h; | ||
| 319 | struct net_bridge_fdb_entry *f; | 317 | struct net_bridge_fdb_entry *f; |
| 320 | 318 | ||
| 321 | memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); | 319 | memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); |
| 322 | 320 | ||
| 323 | rcu_read_lock(); | 321 | rcu_read_lock(); |
| 324 | for (i = 0; i < BR_HASH_SIZE; i++) { | 322 | for (i = 0; i < BR_HASH_SIZE; i++) { |
| 325 | hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { | 323 | hlist_for_each_entry_rcu(f, &br->hash[i], hlist) { |
| 326 | if (num >= maxnum) | 324 | if (num >= maxnum) |
| 327 | goto out; | 325 | goto out; |
| 328 | 326 | ||
| @@ -363,10 +361,9 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, | |||
| 363 | const unsigned char *addr, | 361 | const unsigned char *addr, |
| 364 | __u16 vid) | 362 | __u16 vid) |
| 365 | { | 363 | { |
| 366 | struct hlist_node *h; | ||
| 367 | struct net_bridge_fdb_entry *fdb; | 364 | struct net_bridge_fdb_entry *fdb; |
| 368 | 365 | ||
| 369 | hlist_for_each_entry(fdb, h, head, hlist) { | 366 | hlist_for_each_entry(fdb, head, hlist) { |
| 370 | if (ether_addr_equal(fdb->addr.addr, addr) && | 367 | if (ether_addr_equal(fdb->addr.addr, addr) && |
| 371 | fdb->vlan_id == vid) | 368 | fdb->vlan_id == vid) |
| 372 | return fdb; | 369 | return fdb; |
| @@ -378,10 +375,9 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, | |||
| 378 | const unsigned char *addr, | 375 | const unsigned char *addr, |
| 379 | __u16 vid) | 376 | __u16 vid) |
| 380 | { | 377 | { |
| 381 | struct hlist_node *h; | ||
| 382 | struct net_bridge_fdb_entry *fdb; | 378 | struct net_bridge_fdb_entry *fdb; |
| 383 | 379 | ||
| 384 | hlist_for_each_entry_rcu(fdb, h, head, hlist) { | 380 | hlist_for_each_entry_rcu(fdb, head, hlist) { |
| 385 | if (ether_addr_equal(fdb->addr.addr, addr) && | 381 | if (ether_addr_equal(fdb->addr.addr, addr) && |
| 386 | fdb->vlan_id == vid) | 382 | fdb->vlan_id == vid) |
| 387 | return fdb; | 383 | return fdb; |
| @@ -593,10 +589,9 @@ int br_fdb_dump(struct sk_buff *skb, | |||
| 593 | goto out; | 589 | goto out; |
| 594 | 590 | ||
| 595 | for (i = 0; i < BR_HASH_SIZE; i++) { | 591 | for (i = 0; i < BR_HASH_SIZE; i++) { |
| 596 | struct hlist_node *h; | ||
| 597 | struct net_bridge_fdb_entry *f; | 592 | struct net_bridge_fdb_entry *f; |
| 598 | 593 | ||
| 599 | hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { | 594 | hlist_for_each_entry_rcu(f, &br->hash[i], hlist) { |
| 600 | if (idx < cb->args[0]) | 595 | if (idx < cb->args[0]) |
| 601 | goto skip; | 596 | goto skip; |
| 602 | 597 | ||
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 38991e03646d..9f97b850fc65 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
| @@ -18,7 +18,6 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 18 | { | 18 | { |
| 19 | struct net_bridge *br = netdev_priv(dev); | 19 | struct net_bridge *br = netdev_priv(dev); |
| 20 | struct net_bridge_port *p; | 20 | struct net_bridge_port *p; |
| 21 | struct hlist_node *n; | ||
| 22 | struct nlattr *nest; | 21 | struct nlattr *nest; |
| 23 | 22 | ||
| 24 | if (!br->multicast_router || hlist_empty(&br->router_list)) | 23 | if (!br->multicast_router || hlist_empty(&br->router_list)) |
| @@ -28,7 +27,7 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 28 | if (nest == NULL) | 27 | if (nest == NULL) |
| 29 | return -EMSGSIZE; | 28 | return -EMSGSIZE; |
| 30 | 29 | ||
| 31 | hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) { | 30 | hlist_for_each_entry_rcu(p, &br->router_list, rlist) { |
| 32 | if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) | 31 | if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) |
| 33 | goto fail; | 32 | goto fail; |
| 34 | } | 33 | } |
| @@ -61,12 +60,11 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 61 | return -EMSGSIZE; | 60 | return -EMSGSIZE; |
| 62 | 61 | ||
| 63 | for (i = 0; i < mdb->max; i++) { | 62 | for (i = 0; i < mdb->max; i++) { |
| 64 | struct hlist_node *h; | ||
| 65 | struct net_bridge_mdb_entry *mp; | 63 | struct net_bridge_mdb_entry *mp; |
| 66 | struct net_bridge_port_group *p, **pp; | 64 | struct net_bridge_port_group *p, **pp; |
| 67 | struct net_bridge_port *port; | 65 | struct net_bridge_port *port; |
| 68 | 66 | ||
| 69 | hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) { | 67 | hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { |
| 70 | if (idx < s_idx) | 68 | if (idx < s_idx) |
| 71 | goto skip; | 69 | goto skip; |
| 72 | 70 | ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 7d886b0a8b7b..10e6fce1bb62 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -86,9 +86,8 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get( | |||
| 86 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) | 86 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) |
| 87 | { | 87 | { |
| 88 | struct net_bridge_mdb_entry *mp; | 88 | struct net_bridge_mdb_entry *mp; |
| 89 | struct hlist_node *p; | ||
| 90 | 89 | ||
| 91 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 90 | hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { |
| 92 | if (br_ip_equal(&mp->addr, dst)) | 91 | if (br_ip_equal(&mp->addr, dst)) |
| 93 | return mp; | 92 | return mp; |
| 94 | } | 93 | } |
| @@ -178,13 +177,12 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new, | |||
| 178 | int elasticity) | 177 | int elasticity) |
| 179 | { | 178 | { |
| 180 | struct net_bridge_mdb_entry *mp; | 179 | struct net_bridge_mdb_entry *mp; |
| 181 | struct hlist_node *p; | ||
| 182 | int maxlen; | 180 | int maxlen; |
| 183 | int len; | 181 | int len; |
| 184 | int i; | 182 | int i; |
| 185 | 183 | ||
| 186 | for (i = 0; i < old->max; i++) | 184 | for (i = 0; i < old->max; i++) |
| 187 | hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) | 185 | hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) |
| 188 | hlist_add_head(&mp->hlist[new->ver], | 186 | hlist_add_head(&mp->hlist[new->ver], |
| 189 | &new->mhash[br_ip_hash(new, &mp->addr)]); | 187 | &new->mhash[br_ip_hash(new, &mp->addr)]); |
| 190 | 188 | ||
| @@ -194,7 +192,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new, | |||
| 194 | maxlen = 0; | 192 | maxlen = 0; |
| 195 | for (i = 0; i < new->max; i++) { | 193 | for (i = 0; i < new->max; i++) { |
| 196 | len = 0; | 194 | len = 0; |
| 197 | hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) | 195 | hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) |
| 198 | len++; | 196 | len++; |
| 199 | if (len > maxlen) | 197 | if (len > maxlen) |
| 200 | maxlen = len; | 198 | maxlen = len; |
| @@ -510,14 +508,13 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( | |||
| 510 | { | 508 | { |
| 511 | struct net_bridge_mdb_htable *mdb; | 509 | struct net_bridge_mdb_htable *mdb; |
| 512 | struct net_bridge_mdb_entry *mp; | 510 | struct net_bridge_mdb_entry *mp; |
| 513 | struct hlist_node *p; | ||
| 514 | unsigned int count = 0; | 511 | unsigned int count = 0; |
| 515 | unsigned int max; | 512 | unsigned int max; |
| 516 | int elasticity; | 513 | int elasticity; |
| 517 | int err; | 514 | int err; |
| 518 | 515 | ||
| 519 | mdb = rcu_dereference_protected(br->mdb, 1); | 516 | mdb = rcu_dereference_protected(br->mdb, 1); |
| 520 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 517 | hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { |
| 521 | count++; | 518 | count++; |
| 522 | if (unlikely(br_ip_equal(group, &mp->addr))) | 519 | if (unlikely(br_ip_equal(group, &mp->addr))) |
| 523 | return mp; | 520 | return mp; |
| @@ -882,10 +879,10 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
| 882 | { | 879 | { |
| 883 | struct net_bridge *br = port->br; | 880 | struct net_bridge *br = port->br; |
| 884 | struct net_bridge_port_group *pg; | 881 | struct net_bridge_port_group *pg; |
| 885 | struct hlist_node *p, *n; | 882 | struct hlist_node *n; |
| 886 | 883 | ||
| 887 | spin_lock(&br->multicast_lock); | 884 | spin_lock(&br->multicast_lock); |
| 888 | hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) | 885 | hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) |
| 889 | br_multicast_del_pg(br, pg); | 886 | br_multicast_del_pg(br, pg); |
| 890 | 887 | ||
| 891 | if (!hlist_unhashed(&port->rlist)) | 888 | if (!hlist_unhashed(&port->rlist)) |
| @@ -1025,12 +1022,12 @@ static void br_multicast_add_router(struct net_bridge *br, | |||
| 1025 | struct net_bridge_port *port) | 1022 | struct net_bridge_port *port) |
| 1026 | { | 1023 | { |
| 1027 | struct net_bridge_port *p; | 1024 | struct net_bridge_port *p; |
| 1028 | struct hlist_node *n, *slot = NULL; | 1025 | struct hlist_node *slot = NULL; |
| 1029 | 1026 | ||
| 1030 | hlist_for_each_entry(p, n, &br->router_list, rlist) { | 1027 | hlist_for_each_entry(p, &br->router_list, rlist) { |
| 1031 | if ((unsigned long) port >= (unsigned long) p) | 1028 | if ((unsigned long) port >= (unsigned long) p) |
| 1032 | break; | 1029 | break; |
| 1033 | slot = n; | 1030 | slot = &p->rlist; |
| 1034 | } | 1031 | } |
| 1035 | 1032 | ||
| 1036 | if (slot) | 1033 | if (slot) |
| @@ -1653,7 +1650,7 @@ void br_multicast_stop(struct net_bridge *br) | |||
| 1653 | { | 1650 | { |
| 1654 | struct net_bridge_mdb_htable *mdb; | 1651 | struct net_bridge_mdb_htable *mdb; |
| 1655 | struct net_bridge_mdb_entry *mp; | 1652 | struct net_bridge_mdb_entry *mp; |
| 1656 | struct hlist_node *p, *n; | 1653 | struct hlist_node *n; |
| 1657 | u32 ver; | 1654 | u32 ver; |
| 1658 | int i; | 1655 | int i; |
| 1659 | 1656 | ||
| @@ -1670,7 +1667,7 @@ void br_multicast_stop(struct net_bridge *br) | |||
| 1670 | 1667 | ||
| 1671 | ver = mdb->ver; | 1668 | ver = mdb->ver; |
| 1672 | for (i = 0; i < mdb->max; i++) { | 1669 | for (i = 0; i < mdb->max; i++) { |
| 1673 | hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], | 1670 | hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], |
| 1674 | hlist[ver]) { | 1671 | hlist[ver]) { |
| 1675 | del_timer(&mp->timer); | 1672 | del_timer(&mp->timer); |
| 1676 | call_rcu_bh(&mp->rcu, br_multicast_free_group); | 1673 | call_rcu_bh(&mp->rcu, br_multicast_free_group); |
diff --git a/net/can/af_can.c b/net/can/af_can.c index ddac1ee2ed20..c48e5220bbac 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
| @@ -516,7 +516,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
| 516 | { | 516 | { |
| 517 | struct receiver *r = NULL; | 517 | struct receiver *r = NULL; |
| 518 | struct hlist_head *rl; | 518 | struct hlist_head *rl; |
| 519 | struct hlist_node *next; | ||
| 520 | struct dev_rcv_lists *d; | 519 | struct dev_rcv_lists *d; |
| 521 | 520 | ||
| 522 | if (dev && dev->type != ARPHRD_CAN) | 521 | if (dev && dev->type != ARPHRD_CAN) |
| @@ -540,7 +539,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
| 540 | * been registered before. | 539 | * been registered before. |
| 541 | */ | 540 | */ |
| 542 | 541 | ||
| 543 | hlist_for_each_entry_rcu(r, next, rl, list) { | 542 | hlist_for_each_entry_rcu(r, rl, list) { |
| 544 | if (r->can_id == can_id && r->mask == mask && | 543 | if (r->can_id == can_id && r->mask == mask && |
| 545 | r->func == func && r->data == data) | 544 | r->func == func && r->data == data) |
| 546 | break; | 545 | break; |
| @@ -552,7 +551,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
| 552 | * will be NULL, while r will point to the last item of the list. | 551 | * will be NULL, while r will point to the last item of the list. |
| 553 | */ | 552 | */ |
| 554 | 553 | ||
| 555 | if (!next) { | 554 | if (!r) { |
| 556 | printk(KERN_ERR "BUG: receive list entry not found for " | 555 | printk(KERN_ERR "BUG: receive list entry not found for " |
| 557 | "dev %s, id %03X, mask %03X\n", | 556 | "dev %s, id %03X, mask %03X\n", |
| 558 | DNAME(dev), can_id, mask); | 557 | DNAME(dev), can_id, mask); |
| @@ -590,7 +589,6 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r) | |||
| 590 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | 589 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) |
| 591 | { | 590 | { |
| 592 | struct receiver *r; | 591 | struct receiver *r; |
| 593 | struct hlist_node *n; | ||
| 594 | int matches = 0; | 592 | int matches = 0; |
| 595 | struct can_frame *cf = (struct can_frame *)skb->data; | 593 | struct can_frame *cf = (struct can_frame *)skb->data; |
| 596 | canid_t can_id = cf->can_id; | 594 | canid_t can_id = cf->can_id; |
| @@ -600,7 +598,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
| 600 | 598 | ||
| 601 | if (can_id & CAN_ERR_FLAG) { | 599 | if (can_id & CAN_ERR_FLAG) { |
| 602 | /* check for error message frame entries only */ | 600 | /* check for error message frame entries only */ |
| 603 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) { | 601 | hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) { |
| 604 | if (can_id & r->mask) { | 602 | if (can_id & r->mask) { |
| 605 | deliver(skb, r); | 603 | deliver(skb, r); |
| 606 | matches++; | 604 | matches++; |
| @@ -610,13 +608,13 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
| 610 | } | 608 | } |
| 611 | 609 | ||
| 612 | /* check for unfiltered entries */ | 610 | /* check for unfiltered entries */ |
| 613 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) { | 611 | hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) { |
| 614 | deliver(skb, r); | 612 | deliver(skb, r); |
| 615 | matches++; | 613 | matches++; |
| 616 | } | 614 | } |
| 617 | 615 | ||
| 618 | /* check for can_id/mask entries */ | 616 | /* check for can_id/mask entries */ |
| 619 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) { | 617 | hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) { |
| 620 | if ((can_id & r->mask) == r->can_id) { | 618 | if ((can_id & r->mask) == r->can_id) { |
| 621 | deliver(skb, r); | 619 | deliver(skb, r); |
| 622 | matches++; | 620 | matches++; |
| @@ -624,7 +622,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
| 624 | } | 622 | } |
| 625 | 623 | ||
| 626 | /* check for inverted can_id/mask entries */ | 624 | /* check for inverted can_id/mask entries */ |
| 627 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) { | 625 | hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) { |
| 628 | if ((can_id & r->mask) != r->can_id) { | 626 | if ((can_id & r->mask) != r->can_id) { |
| 629 | deliver(skb, r); | 627 | deliver(skb, r); |
| 630 | matches++; | 628 | matches++; |
| @@ -636,7 +634,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
| 636 | return matches; | 634 | return matches; |
| 637 | 635 | ||
| 638 | if (can_id & CAN_EFF_FLAG) { | 636 | if (can_id & CAN_EFF_FLAG) { |
| 639 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { | 637 | hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) { |
| 640 | if (r->can_id == can_id) { | 638 | if (r->can_id == can_id) { |
| 641 | deliver(skb, r); | 639 | deliver(skb, r); |
| 642 | matches++; | 640 | matches++; |
| @@ -644,7 +642,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
| 644 | } | 642 | } |
| 645 | } else { | 643 | } else { |
| 646 | can_id &= CAN_SFF_MASK; | 644 | can_id &= CAN_SFF_MASK; |
| 647 | hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) { | 645 | hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) { |
| 648 | deliver(skb, r); | 646 | deliver(skb, r); |
| 649 | matches++; | 647 | matches++; |
| 650 | } | 648 | } |
diff --git a/net/can/gw.c b/net/can/gw.c index c185fcd5e828..2d117dc5ebea 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
| @@ -457,11 +457,11 @@ static int cgw_notifier(struct notifier_block *nb, | |||
| 457 | if (msg == NETDEV_UNREGISTER) { | 457 | if (msg == NETDEV_UNREGISTER) { |
| 458 | 458 | ||
| 459 | struct cgw_job *gwj = NULL; | 459 | struct cgw_job *gwj = NULL; |
| 460 | struct hlist_node *n, *nx; | 460 | struct hlist_node *nx; |
| 461 | 461 | ||
| 462 | ASSERT_RTNL(); | 462 | ASSERT_RTNL(); |
| 463 | 463 | ||
| 464 | hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { | 464 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
| 465 | 465 | ||
| 466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { | 466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { |
| 467 | hlist_del(&gwj->list); | 467 | hlist_del(&gwj->list); |
| @@ -575,12 +575,11 @@ cancel: | |||
| 575 | static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) | 575 | static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) |
| 576 | { | 576 | { |
| 577 | struct cgw_job *gwj = NULL; | 577 | struct cgw_job *gwj = NULL; |
| 578 | struct hlist_node *n; | ||
| 579 | int idx = 0; | 578 | int idx = 0; |
| 580 | int s_idx = cb->args[0]; | 579 | int s_idx = cb->args[0]; |
| 581 | 580 | ||
| 582 | rcu_read_lock(); | 581 | rcu_read_lock(); |
| 583 | hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) { | 582 | hlist_for_each_entry_rcu(gwj, &cgw_list, list) { |
| 584 | if (idx < s_idx) | 583 | if (idx < s_idx) |
| 585 | goto cont; | 584 | goto cont; |
| 586 | 585 | ||
| @@ -858,11 +857,11 @@ out: | |||
| 858 | static void cgw_remove_all_jobs(void) | 857 | static void cgw_remove_all_jobs(void) |
| 859 | { | 858 | { |
| 860 | struct cgw_job *gwj = NULL; | 859 | struct cgw_job *gwj = NULL; |
| 861 | struct hlist_node *n, *nx; | 860 | struct hlist_node *nx; |
| 862 | 861 | ||
| 863 | ASSERT_RTNL(); | 862 | ASSERT_RTNL(); |
| 864 | 863 | ||
| 865 | hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { | 864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
| 866 | hlist_del(&gwj->list); | 865 | hlist_del(&gwj->list); |
| 867 | cgw_unregister_filter(gwj); | 866 | cgw_unregister_filter(gwj); |
| 868 | kfree(gwj); | 867 | kfree(gwj); |
| @@ -872,7 +871,7 @@ static void cgw_remove_all_jobs(void) | |||
| 872 | static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | 871 | static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
| 873 | { | 872 | { |
| 874 | struct cgw_job *gwj = NULL; | 873 | struct cgw_job *gwj = NULL; |
| 875 | struct hlist_node *n, *nx; | 874 | struct hlist_node *nx; |
| 876 | struct rtcanmsg *r; | 875 | struct rtcanmsg *r; |
| 877 | struct cf_mod mod; | 876 | struct cf_mod mod; |
| 878 | struct can_can_gw ccgw; | 877 | struct can_can_gw ccgw; |
| @@ -907,7 +906,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
| 907 | ASSERT_RTNL(); | 906 | ASSERT_RTNL(); |
| 908 | 907 | ||
| 909 | /* remove only the first matching entry */ | 908 | /* remove only the first matching entry */ |
| 910 | hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { | 909 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
| 911 | 910 | ||
| 912 | if (gwj->flags != r->flags) | 911 | if (gwj->flags != r->flags) |
| 913 | continue; | 912 | continue; |
diff --git a/net/can/proc.c b/net/can/proc.c index 497335892146..1ab8c888f102 100644 --- a/net/can/proc.c +++ b/net/can/proc.c | |||
| @@ -195,9 +195,8 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, | |||
| 195 | struct net_device *dev) | 195 | struct net_device *dev) |
| 196 | { | 196 | { |
| 197 | struct receiver *r; | 197 | struct receiver *r; |
| 198 | struct hlist_node *n; | ||
| 199 | 198 | ||
| 200 | hlist_for_each_entry_rcu(r, n, rx_list, list) { | 199 | hlist_for_each_entry_rcu(r, rx_list, list) { |
| 201 | char *fmt = (r->can_id & CAN_EFF_FLAG)? | 200 | char *fmt = (r->can_id & CAN_EFF_FLAG)? |
| 202 | " %-5s %08x %08x %pK %pK %8ld %s\n" : | 201 | " %-5s %08x %08x %pK %pK %8ld %s\n" : |
| 203 | " %-5s %03x %08x %pK %pK %8ld %s\n"; | 202 | " %-5s %03x %08x %pK %pK %8ld %s\n"; |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index ee71ea26777a..e65e6e4be38b 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
| @@ -15,6 +15,8 @@ | |||
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/statfs.h> | 16 | #include <linux/statfs.h> |
| 17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
| 18 | #include <linux/nsproxy.h> | ||
| 19 | #include <net/net_namespace.h> | ||
| 18 | 20 | ||
| 19 | 21 | ||
| 20 | #include <linux/ceph/ceph_features.h> | 22 | #include <linux/ceph/ceph_features.h> |
| @@ -26,6 +28,22 @@ | |||
| 26 | #include "crypto.h" | 28 | #include "crypto.h" |
| 27 | 29 | ||
| 28 | 30 | ||
| 31 | /* | ||
| 32 | * Module compatibility interface. For now it doesn't do anything, | ||
| 33 | * but its existence signals a certain level of functionality. | ||
| 34 | * | ||
| 35 | * The data buffer is used to pass information both to and from | ||
| 36 | * libceph. The return value indicates whether libceph determines | ||
| 37 | * it is compatible with the caller (from another kernel module), | ||
| 38 | * given the provided data. | ||
| 39 | * | ||
| 40 | * The data pointer can be null. | ||
| 41 | */ | ||
| 42 | bool libceph_compatible(void *data) | ||
| 43 | { | ||
| 44 | return true; | ||
| 45 | } | ||
| 46 | EXPORT_SYMBOL(libceph_compatible); | ||
| 29 | 47 | ||
| 30 | /* | 48 | /* |
| 31 | * find filename portion of a path (/foo/bar/baz -> baz) | 49 | * find filename portion of a path (/foo/bar/baz -> baz) |
| @@ -292,6 +310,9 @@ ceph_parse_options(char *options, const char *dev_name, | |||
| 292 | int err = -ENOMEM; | 310 | int err = -ENOMEM; |
| 293 | substring_t argstr[MAX_OPT_ARGS]; | 311 | substring_t argstr[MAX_OPT_ARGS]; |
| 294 | 312 | ||
| 313 | if (current->nsproxy->net_ns != &init_net) | ||
| 314 | return ERR_PTR(-EINVAL); | ||
| 315 | |||
| 295 | opt = kzalloc(sizeof(*opt), GFP_KERNEL); | 316 | opt = kzalloc(sizeof(*opt), GFP_KERNEL); |
| 296 | if (!opt) | 317 | if (!opt) |
| 297 | return ERR_PTR(-ENOMEM); | 318 | return ERR_PTR(-ENOMEM); |
| @@ -585,10 +606,8 @@ static int __init init_ceph_lib(void) | |||
| 585 | if (ret < 0) | 606 | if (ret < 0) |
| 586 | goto out_crypto; | 607 | goto out_crypto; |
| 587 | 608 | ||
| 588 | pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n", | 609 | pr_info("loaded (mon/osd proto %d/%d)\n", |
| 589 | CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL, | 610 | CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL); |
| 590 | CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT, | ||
| 591 | CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT); | ||
| 592 | 611 | ||
| 593 | return 0; | 612 | return 0; |
| 594 | 613 | ||
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c index 3fbda04de29c..1348df96fe15 100644 --- a/net/ceph/ceph_strings.c +++ b/net/ceph/ceph_strings.c | |||
| @@ -21,9 +21,15 @@ const char *ceph_osd_op_name(int op) | |||
| 21 | switch (op) { | 21 | switch (op) { |
| 22 | case CEPH_OSD_OP_READ: return "read"; | 22 | case CEPH_OSD_OP_READ: return "read"; |
| 23 | case CEPH_OSD_OP_STAT: return "stat"; | 23 | case CEPH_OSD_OP_STAT: return "stat"; |
| 24 | case CEPH_OSD_OP_MAPEXT: return "mapext"; | ||
| 25 | case CEPH_OSD_OP_SPARSE_READ: return "sparse-read"; | ||
| 26 | case CEPH_OSD_OP_NOTIFY: return "notify"; | ||
| 27 | case CEPH_OSD_OP_NOTIFY_ACK: return "notify-ack"; | ||
| 28 | case CEPH_OSD_OP_ASSERT_VER: return "assert-version"; | ||
| 24 | 29 | ||
| 25 | case CEPH_OSD_OP_MASKTRUNC: return "masktrunc"; | 30 | case CEPH_OSD_OP_MASKTRUNC: return "masktrunc"; |
| 26 | 31 | ||
| 32 | case CEPH_OSD_OP_CREATE: return "create"; | ||
| 27 | case CEPH_OSD_OP_WRITE: return "write"; | 33 | case CEPH_OSD_OP_WRITE: return "write"; |
| 28 | case CEPH_OSD_OP_DELETE: return "delete"; | 34 | case CEPH_OSD_OP_DELETE: return "delete"; |
| 29 | case CEPH_OSD_OP_TRUNCATE: return "truncate"; | 35 | case CEPH_OSD_OP_TRUNCATE: return "truncate"; |
| @@ -39,6 +45,11 @@ const char *ceph_osd_op_name(int op) | |||
| 39 | case CEPH_OSD_OP_TMAPUP: return "tmapup"; | 45 | case CEPH_OSD_OP_TMAPUP: return "tmapup"; |
| 40 | case CEPH_OSD_OP_TMAPGET: return "tmapget"; | 46 | case CEPH_OSD_OP_TMAPGET: return "tmapget"; |
| 41 | case CEPH_OSD_OP_TMAPPUT: return "tmapput"; | 47 | case CEPH_OSD_OP_TMAPPUT: return "tmapput"; |
| 48 | case CEPH_OSD_OP_WATCH: return "watch"; | ||
| 49 | |||
| 50 | case CEPH_OSD_OP_CLONERANGE: return "clonerange"; | ||
| 51 | case CEPH_OSD_OP_ASSERT_SRC_VERSION: return "assert-src-version"; | ||
| 52 | case CEPH_OSD_OP_SRC_CMPXATTR: return "src-cmpxattr"; | ||
| 42 | 53 | ||
| 43 | case CEPH_OSD_OP_GETXATTR: return "getxattr"; | 54 | case CEPH_OSD_OP_GETXATTR: return "getxattr"; |
| 44 | case CEPH_OSD_OP_GETXATTRS: return "getxattrs"; | 55 | case CEPH_OSD_OP_GETXATTRS: return "getxattrs"; |
| @@ -53,6 +64,10 @@ const char *ceph_osd_op_name(int op) | |||
| 53 | case CEPH_OSD_OP_BALANCEREADS: return "balance-reads"; | 64 | case CEPH_OSD_OP_BALANCEREADS: return "balance-reads"; |
| 54 | case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads"; | 65 | case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads"; |
| 55 | case CEPH_OSD_OP_SCRUB: return "scrub"; | 66 | case CEPH_OSD_OP_SCRUB: return "scrub"; |
| 67 | case CEPH_OSD_OP_SCRUB_RESERVE: return "scrub-reserve"; | ||
| 68 | case CEPH_OSD_OP_SCRUB_UNRESERVE: return "scrub-unreserve"; | ||
| 69 | case CEPH_OSD_OP_SCRUB_STOP: return "scrub-stop"; | ||
| 70 | case CEPH_OSD_OP_SCRUB_MAP: return "scrub-map"; | ||
| 56 | 71 | ||
| 57 | case CEPH_OSD_OP_WRLOCK: return "wrlock"; | 72 | case CEPH_OSD_OP_WRLOCK: return "wrlock"; |
| 58 | case CEPH_OSD_OP_WRUNLOCK: return "wrunlock"; | 73 | case CEPH_OSD_OP_WRUNLOCK: return "wrunlock"; |
| @@ -64,10 +79,34 @@ const char *ceph_osd_op_name(int op) | |||
| 64 | case CEPH_OSD_OP_CALL: return "call"; | 79 | case CEPH_OSD_OP_CALL: return "call"; |
| 65 | 80 | ||
| 66 | case CEPH_OSD_OP_PGLS: return "pgls"; | 81 | case CEPH_OSD_OP_PGLS: return "pgls"; |
| 82 | case CEPH_OSD_OP_PGLS_FILTER: return "pgls-filter"; | ||
| 83 | case CEPH_OSD_OP_OMAPGETKEYS: return "omap-get-keys"; | ||
| 84 | case CEPH_OSD_OP_OMAPGETVALS: return "omap-get-vals"; | ||
| 85 | case CEPH_OSD_OP_OMAPGETHEADER: return "omap-get-header"; | ||
| 86 | case CEPH_OSD_OP_OMAPGETVALSBYKEYS: return "omap-get-vals-by-keys"; | ||
| 87 | case CEPH_OSD_OP_OMAPSETVALS: return "omap-set-vals"; | ||
| 88 | case CEPH_OSD_OP_OMAPSETHEADER: return "omap-set-header"; | ||
| 89 | case CEPH_OSD_OP_OMAPCLEAR: return "omap-clear"; | ||
| 90 | case CEPH_OSD_OP_OMAPRMKEYS: return "omap-rm-keys"; | ||
| 67 | } | 91 | } |
| 68 | return "???"; | 92 | return "???"; |
| 69 | } | 93 | } |
| 70 | 94 | ||
| 95 | const char *ceph_osd_state_name(int s) | ||
| 96 | { | ||
| 97 | switch (s) { | ||
| 98 | case CEPH_OSD_EXISTS: | ||
| 99 | return "exists"; | ||
| 100 | case CEPH_OSD_UP: | ||
| 101 | return "up"; | ||
| 102 | case CEPH_OSD_AUTOOUT: | ||
| 103 | return "autoout"; | ||
| 104 | case CEPH_OSD_NEW: | ||
| 105 | return "new"; | ||
| 106 | default: | ||
| 107 | return "???"; | ||
| 108 | } | ||
| 109 | } | ||
| 71 | 110 | ||
| 72 | const char *ceph_pool_op_name(int op) | 111 | const char *ceph_pool_op_name(int op) |
| 73 | { | 112 | { |
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 35fce755ce10..cbd06a91941c 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c | |||
| @@ -287,6 +287,7 @@ static int is_out(const struct crush_map *map, const __u32 *weight, int item, in | |||
| 287 | * @outpos: our position in that vector | 287 | * @outpos: our position in that vector |
| 288 | * @firstn: true if choosing "first n" items, false if choosing "indep" | 288 | * @firstn: true if choosing "first n" items, false if choosing "indep" |
| 289 | * @recurse_to_leaf: true if we want one device under each item of given type | 289 | * @recurse_to_leaf: true if we want one device under each item of given type |
| 290 | * @descend_once: true if we should only try one descent before giving up | ||
| 290 | * @out2: second output vector for leaf items (if @recurse_to_leaf) | 291 | * @out2: second output vector for leaf items (if @recurse_to_leaf) |
| 291 | */ | 292 | */ |
| 292 | static int crush_choose(const struct crush_map *map, | 293 | static int crush_choose(const struct crush_map *map, |
| @@ -295,7 +296,7 @@ static int crush_choose(const struct crush_map *map, | |||
| 295 | int x, int numrep, int type, | 296 | int x, int numrep, int type, |
| 296 | int *out, int outpos, | 297 | int *out, int outpos, |
| 297 | int firstn, int recurse_to_leaf, | 298 | int firstn, int recurse_to_leaf, |
| 298 | int *out2) | 299 | int descend_once, int *out2) |
| 299 | { | 300 | { |
| 300 | int rep; | 301 | int rep; |
| 301 | unsigned int ftotal, flocal; | 302 | unsigned int ftotal, flocal; |
| @@ -391,7 +392,7 @@ static int crush_choose(const struct crush_map *map, | |||
| 391 | } | 392 | } |
| 392 | 393 | ||
| 393 | reject = 0; | 394 | reject = 0; |
| 394 | if (recurse_to_leaf) { | 395 | if (!collide && recurse_to_leaf) { |
| 395 | if (item < 0) { | 396 | if (item < 0) { |
| 396 | if (crush_choose(map, | 397 | if (crush_choose(map, |
| 397 | map->buckets[-1-item], | 398 | map->buckets[-1-item], |
| @@ -399,6 +400,7 @@ static int crush_choose(const struct crush_map *map, | |||
| 399 | x, outpos+1, 0, | 400 | x, outpos+1, 0, |
| 400 | out2, outpos, | 401 | out2, outpos, |
| 401 | firstn, 0, | 402 | firstn, 0, |
| 403 | map->chooseleaf_descend_once, | ||
| 402 | NULL) <= outpos) | 404 | NULL) <= outpos) |
| 403 | /* didn't get leaf */ | 405 | /* didn't get leaf */ |
| 404 | reject = 1; | 406 | reject = 1; |
| @@ -422,7 +424,10 @@ reject: | |||
| 422 | ftotal++; | 424 | ftotal++; |
| 423 | flocal++; | 425 | flocal++; |
| 424 | 426 | ||
| 425 | if (collide && flocal <= map->choose_local_tries) | 427 | if (reject && descend_once) |
| 428 | /* let outer call try again */ | ||
| 429 | skip_rep = 1; | ||
| 430 | else if (collide && flocal <= map->choose_local_tries) | ||
| 426 | /* retry locally a few times */ | 431 | /* retry locally a few times */ |
| 427 | retry_bucket = 1; | 432 | retry_bucket = 1; |
| 428 | else if (map->choose_local_fallback_tries > 0 && | 433 | else if (map->choose_local_fallback_tries > 0 && |
| @@ -485,6 +490,7 @@ int crush_do_rule(const struct crush_map *map, | |||
| 485 | int i, j; | 490 | int i, j; |
| 486 | int numrep; | 491 | int numrep; |
| 487 | int firstn; | 492 | int firstn; |
| 493 | const int descend_once = 0; | ||
| 488 | 494 | ||
| 489 | if ((__u32)ruleno >= map->max_rules) { | 495 | if ((__u32)ruleno >= map->max_rules) { |
| 490 | dprintk(" bad ruleno %d\n", ruleno); | 496 | dprintk(" bad ruleno %d\n", ruleno); |
| @@ -544,7 +550,8 @@ int crush_do_rule(const struct crush_map *map, | |||
| 544 | curstep->arg2, | 550 | curstep->arg2, |
| 545 | o+osize, j, | 551 | o+osize, j, |
| 546 | firstn, | 552 | firstn, |
| 547 | recurse_to_leaf, c+osize); | 553 | recurse_to_leaf, |
| 554 | descend_once, c+osize); | ||
| 548 | } | 555 | } |
| 549 | 556 | ||
| 550 | if (recurse_to_leaf) | 557 | if (recurse_to_leaf) |
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index af14cb425164..6e7a236525b6 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
| @@ -423,7 +423,8 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, | |||
| 423 | } | 423 | } |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | int ceph_key_instantiate(struct key *key, struct key_preparsed_payload *prep) | 426 | static int ceph_key_instantiate(struct key *key, |
| 427 | struct key_preparsed_payload *prep) | ||
| 427 | { | 428 | { |
| 428 | struct ceph_crypto_key *ckey; | 429 | struct ceph_crypto_key *ckey; |
| 429 | size_t datalen = prep->datalen; | 430 | size_t datalen = prep->datalen; |
| @@ -458,12 +459,12 @@ err: | |||
| 458 | return ret; | 459 | return ret; |
| 459 | } | 460 | } |
| 460 | 461 | ||
| 461 | int ceph_key_match(const struct key *key, const void *description) | 462 | static int ceph_key_match(const struct key *key, const void *description) |
| 462 | { | 463 | { |
| 463 | return strcmp(key->description, description) == 0; | 464 | return strcmp(key->description, description) == 0; |
| 464 | } | 465 | } |
| 465 | 466 | ||
| 466 | void ceph_key_destroy(struct key *key) { | 467 | static void ceph_key_destroy(struct key *key) { |
| 467 | struct ceph_crypto_key *ckey = key->payload.data; | 468 | struct ceph_crypto_key *ckey = key->payload.data; |
| 468 | 469 | ||
| 469 | ceph_crypto_key_destroy(ckey); | 470 | ceph_crypto_key_destroy(ckey); |
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 38b5dc1823d4..00d051f4894e 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c | |||
| @@ -66,9 +66,9 @@ static int osdmap_show(struct seq_file *s, void *p) | |||
| 66 | for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) { | 66 | for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) { |
| 67 | struct ceph_pg_pool_info *pool = | 67 | struct ceph_pg_pool_info *pool = |
| 68 | rb_entry(n, struct ceph_pg_pool_info, node); | 68 | rb_entry(n, struct ceph_pg_pool_info, node); |
| 69 | seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n", | 69 | seq_printf(s, "pg_pool %llu pg_num %d / %d\n", |
| 70 | pool->id, pool->v.pg_num, pool->pg_num_mask, | 70 | (unsigned long long)pool->id, pool->pg_num, |
| 71 | pool->v.lpg_num, pool->lpg_num_mask); | 71 | pool->pg_num_mask); |
| 72 | } | 72 | } |
| 73 | for (i = 0; i < client->osdc.osdmap->max_osd; i++) { | 73 | for (i = 0; i < client->osdc.osdmap->max_osd; i++) { |
| 74 | struct ceph_entity_addr *addr = | 74 | struct ceph_entity_addr *addr = |
| @@ -123,26 +123,16 @@ static int osdc_show(struct seq_file *s, void *pp) | |||
| 123 | mutex_lock(&osdc->request_mutex); | 123 | mutex_lock(&osdc->request_mutex); |
| 124 | for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { | 124 | for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { |
| 125 | struct ceph_osd_request *req; | 125 | struct ceph_osd_request *req; |
| 126 | struct ceph_osd_request_head *head; | 126 | int opcode; |
| 127 | struct ceph_osd_op *op; | ||
| 128 | int num_ops; | ||
| 129 | int opcode, olen; | ||
| 130 | int i; | 127 | int i; |
| 131 | 128 | ||
| 132 | req = rb_entry(p, struct ceph_osd_request, r_node); | 129 | req = rb_entry(p, struct ceph_osd_request, r_node); |
| 133 | 130 | ||
| 134 | seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid, | 131 | seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid, |
| 135 | req->r_osd ? req->r_osd->o_osd : -1, | 132 | req->r_osd ? req->r_osd->o_osd : -1, |
| 136 | le32_to_cpu(req->r_pgid.pool), | 133 | req->r_pgid.pool, req->r_pgid.seed); |
| 137 | le16_to_cpu(req->r_pgid.ps)); | ||
| 138 | 134 | ||
| 139 | head = req->r_request->front.iov_base; | 135 | seq_printf(s, "%.*s", req->r_oid_len, req->r_oid); |
| 140 | op = (void *)(head + 1); | ||
| 141 | |||
| 142 | num_ops = le16_to_cpu(head->num_ops); | ||
| 143 | olen = le32_to_cpu(head->object_len); | ||
| 144 | seq_printf(s, "%.*s", olen, | ||
| 145 | (const char *)(head->ops + num_ops)); | ||
| 146 | 136 | ||
| 147 | if (req->r_reassert_version.epoch) | 137 | if (req->r_reassert_version.epoch) |
| 148 | seq_printf(s, "\t%u'%llu", | 138 | seq_printf(s, "\t%u'%llu", |
| @@ -151,10 +141,9 @@ static int osdc_show(struct seq_file *s, void *pp) | |||
| 151 | else | 141 | else |
| 152 | seq_printf(s, "\t"); | 142 | seq_printf(s, "\t"); |
| 153 | 143 | ||
| 154 | for (i = 0; i < num_ops; i++) { | 144 | for (i = 0; i < req->r_num_ops; i++) { |
| 155 | opcode = le16_to_cpu(op->op); | 145 | opcode = le16_to_cpu(req->r_request_ops[i].op); |
| 156 | seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); | 146 | seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); |
| 157 | op++; | ||
| 158 | } | 147 | } |
| 159 | 148 | ||
| 160 | seq_printf(s, "\n"); | 149 | seq_printf(s, "\n"); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 5ccf87ed8d68..2c0669fb54e3 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -9,8 +9,9 @@ | |||
| 9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
| 10 | #include <linux/socket.h> | 10 | #include <linux/socket.h> |
| 11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
| 12 | #ifdef CONFIG_BLOCK | ||
| 12 | #include <linux/bio.h> | 13 | #include <linux/bio.h> |
| 13 | #include <linux/blkdev.h> | 14 | #endif /* CONFIG_BLOCK */ |
| 14 | #include <linux/dns_resolver.h> | 15 | #include <linux/dns_resolver.h> |
| 15 | #include <net/tcp.h> | 16 | #include <net/tcp.h> |
| 16 | 17 | ||
| @@ -97,6 +98,57 @@ | |||
| 97 | #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ | 98 | #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ |
| 98 | #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ | 99 | #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ |
| 99 | 100 | ||
| 101 | static bool con_flag_valid(unsigned long con_flag) | ||
| 102 | { | ||
| 103 | switch (con_flag) { | ||
| 104 | case CON_FLAG_LOSSYTX: | ||
| 105 | case CON_FLAG_KEEPALIVE_PENDING: | ||
| 106 | case CON_FLAG_WRITE_PENDING: | ||
| 107 | case CON_FLAG_SOCK_CLOSED: | ||
| 108 | case CON_FLAG_BACKOFF: | ||
| 109 | return true; | ||
| 110 | default: | ||
| 111 | return false; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) | ||
| 116 | { | ||
| 117 | BUG_ON(!con_flag_valid(con_flag)); | ||
| 118 | |||
| 119 | clear_bit(con_flag, &con->flags); | ||
| 120 | } | ||
| 121 | |||
| 122 | static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) | ||
| 123 | { | ||
| 124 | BUG_ON(!con_flag_valid(con_flag)); | ||
| 125 | |||
| 126 | set_bit(con_flag, &con->flags); | ||
| 127 | } | ||
| 128 | |||
| 129 | static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) | ||
| 130 | { | ||
| 131 | BUG_ON(!con_flag_valid(con_flag)); | ||
| 132 | |||
| 133 | return test_bit(con_flag, &con->flags); | ||
| 134 | } | ||
| 135 | |||
| 136 | static bool con_flag_test_and_clear(struct ceph_connection *con, | ||
| 137 | unsigned long con_flag) | ||
| 138 | { | ||
| 139 | BUG_ON(!con_flag_valid(con_flag)); | ||
| 140 | |||
| 141 | return test_and_clear_bit(con_flag, &con->flags); | ||
| 142 | } | ||
| 143 | |||
| 144 | static bool con_flag_test_and_set(struct ceph_connection *con, | ||
| 145 | unsigned long con_flag) | ||
| 146 | { | ||
| 147 | BUG_ON(!con_flag_valid(con_flag)); | ||
| 148 | |||
| 149 | return test_and_set_bit(con_flag, &con->flags); | ||
| 150 | } | ||
| 151 | |||
| 100 | /* static tag bytes (protocol control messages) */ | 152 | /* static tag bytes (protocol control messages) */ |
| 101 | static char tag_msg = CEPH_MSGR_TAG_MSG; | 153 | static char tag_msg = CEPH_MSGR_TAG_MSG; |
| 102 | static char tag_ack = CEPH_MSGR_TAG_ACK; | 154 | static char tag_ack = CEPH_MSGR_TAG_ACK; |
| @@ -114,7 +166,7 @@ static struct lock_class_key socket_class; | |||
| 114 | 166 | ||
| 115 | static void queue_con(struct ceph_connection *con); | 167 | static void queue_con(struct ceph_connection *con); |
| 116 | static void con_work(struct work_struct *); | 168 | static void con_work(struct work_struct *); |
| 117 | static void ceph_fault(struct ceph_connection *con); | 169 | static void con_fault(struct ceph_connection *con); |
| 118 | 170 | ||
| 119 | /* | 171 | /* |
| 120 | * Nicely render a sockaddr as a string. An array of formatted | 172 | * Nicely render a sockaddr as a string. An array of formatted |
| @@ -171,7 +223,7 @@ static void encode_my_addr(struct ceph_messenger *msgr) | |||
| 171 | */ | 223 | */ |
| 172 | static struct workqueue_struct *ceph_msgr_wq; | 224 | static struct workqueue_struct *ceph_msgr_wq; |
| 173 | 225 | ||
| 174 | void _ceph_msgr_exit(void) | 226 | static void _ceph_msgr_exit(void) |
| 175 | { | 227 | { |
| 176 | if (ceph_msgr_wq) { | 228 | if (ceph_msgr_wq) { |
| 177 | destroy_workqueue(ceph_msgr_wq); | 229 | destroy_workqueue(ceph_msgr_wq); |
| @@ -308,7 +360,7 @@ static void ceph_sock_write_space(struct sock *sk) | |||
| 308 | * buffer. See net/ipv4/tcp_input.c:tcp_check_space() | 360 | * buffer. See net/ipv4/tcp_input.c:tcp_check_space() |
| 309 | * and net/core/stream.c:sk_stream_write_space(). | 361 | * and net/core/stream.c:sk_stream_write_space(). |
| 310 | */ | 362 | */ |
| 311 | if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) { | 363 | if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { |
| 312 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | 364 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { |
| 313 | dout("%s %p queueing write work\n", __func__, con); | 365 | dout("%s %p queueing write work\n", __func__, con); |
| 314 | clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 366 | clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| @@ -333,7 +385,7 @@ static void ceph_sock_state_change(struct sock *sk) | |||
| 333 | case TCP_CLOSE_WAIT: | 385 | case TCP_CLOSE_WAIT: |
| 334 | dout("%s TCP_CLOSE_WAIT\n", __func__); | 386 | dout("%s TCP_CLOSE_WAIT\n", __func__); |
| 335 | con_sock_state_closing(con); | 387 | con_sock_state_closing(con); |
| 336 | set_bit(CON_FLAG_SOCK_CLOSED, &con->flags); | 388 | con_flag_set(con, CON_FLAG_SOCK_CLOSED); |
| 337 | queue_con(con); | 389 | queue_con(con); |
| 338 | break; | 390 | break; |
| 339 | case TCP_ESTABLISHED: | 391 | case TCP_ESTABLISHED: |
| @@ -474,7 +526,7 @@ static int con_close_socket(struct ceph_connection *con) | |||
| 474 | * received a socket close event before we had the chance to | 526 | * received a socket close event before we had the chance to |
| 475 | * shut the socket down. | 527 | * shut the socket down. |
| 476 | */ | 528 | */ |
| 477 | clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags); | 529 | con_flag_clear(con, CON_FLAG_SOCK_CLOSED); |
| 478 | 530 | ||
| 479 | con_sock_state_closed(con); | 531 | con_sock_state_closed(con); |
| 480 | return rc; | 532 | return rc; |
| @@ -538,11 +590,10 @@ void ceph_con_close(struct ceph_connection *con) | |||
| 538 | ceph_pr_addr(&con->peer_addr.in_addr)); | 590 | ceph_pr_addr(&con->peer_addr.in_addr)); |
| 539 | con->state = CON_STATE_CLOSED; | 591 | con->state = CON_STATE_CLOSED; |
| 540 | 592 | ||
| 541 | clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */ | 593 | con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ |
| 542 | clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); | 594 | con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); |
| 543 | clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 595 | con_flag_clear(con, CON_FLAG_WRITE_PENDING); |
| 544 | clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); | 596 | con_flag_clear(con, CON_FLAG_BACKOFF); |
| 545 | clear_bit(CON_FLAG_BACKOFF, &con->flags); | ||
| 546 | 597 | ||
| 547 | reset_connection(con); | 598 | reset_connection(con); |
| 548 | con->peer_global_seq = 0; | 599 | con->peer_global_seq = 0; |
| @@ -798,7 +849,7 @@ static void prepare_write_message(struct ceph_connection *con) | |||
| 798 | /* no, queue up footer too and be done */ | 849 | /* no, queue up footer too and be done */ |
| 799 | prepare_write_message_footer(con); | 850 | prepare_write_message_footer(con); |
| 800 | 851 | ||
| 801 | set_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 852 | con_flag_set(con, CON_FLAG_WRITE_PENDING); |
| 802 | } | 853 | } |
| 803 | 854 | ||
| 804 | /* | 855 | /* |
| @@ -819,7 +870,7 @@ static void prepare_write_ack(struct ceph_connection *con) | |||
| 819 | &con->out_temp_ack); | 870 | &con->out_temp_ack); |
| 820 | 871 | ||
| 821 | con->out_more = 1; /* more will follow.. eventually.. */ | 872 | con->out_more = 1; /* more will follow.. eventually.. */ |
| 822 | set_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 873 | con_flag_set(con, CON_FLAG_WRITE_PENDING); |
| 823 | } | 874 | } |
| 824 | 875 | ||
| 825 | /* | 876 | /* |
| @@ -830,7 +881,7 @@ static void prepare_write_keepalive(struct ceph_connection *con) | |||
| 830 | dout("prepare_write_keepalive %p\n", con); | 881 | dout("prepare_write_keepalive %p\n", con); |
| 831 | con_out_kvec_reset(con); | 882 | con_out_kvec_reset(con); |
| 832 | con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); | 883 | con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); |
| 833 | set_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 884 | con_flag_set(con, CON_FLAG_WRITE_PENDING); |
| 834 | } | 885 | } |
| 835 | 886 | ||
| 836 | /* | 887 | /* |
| @@ -873,7 +924,7 @@ static void prepare_write_banner(struct ceph_connection *con) | |||
| 873 | &con->msgr->my_enc_addr); | 924 | &con->msgr->my_enc_addr); |
| 874 | 925 | ||
| 875 | con->out_more = 0; | 926 | con->out_more = 0; |
| 876 | set_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 927 | con_flag_set(con, CON_FLAG_WRITE_PENDING); |
| 877 | } | 928 | } |
| 878 | 929 | ||
| 879 | static int prepare_write_connect(struct ceph_connection *con) | 930 | static int prepare_write_connect(struct ceph_connection *con) |
| @@ -923,7 +974,7 @@ static int prepare_write_connect(struct ceph_connection *con) | |||
| 923 | auth->authorizer_buf); | 974 | auth->authorizer_buf); |
| 924 | 975 | ||
| 925 | con->out_more = 0; | 976 | con->out_more = 0; |
| 926 | set_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 977 | con_flag_set(con, CON_FLAG_WRITE_PENDING); |
| 927 | 978 | ||
| 928 | return 0; | 979 | return 0; |
| 929 | } | 980 | } |
| @@ -1643,7 +1694,7 @@ static int process_connect(struct ceph_connection *con) | |||
| 1643 | le32_to_cpu(con->in_reply.connect_seq)); | 1694 | le32_to_cpu(con->in_reply.connect_seq)); |
| 1644 | 1695 | ||
| 1645 | if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) | 1696 | if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) |
| 1646 | set_bit(CON_FLAG_LOSSYTX, &con->flags); | 1697 | con_flag_set(con, CON_FLAG_LOSSYTX); |
| 1647 | 1698 | ||
| 1648 | con->delay = 0; /* reset backoff memory */ | 1699 | con->delay = 0; /* reset backoff memory */ |
| 1649 | 1700 | ||
| @@ -2080,15 +2131,14 @@ do_next: | |||
| 2080 | prepare_write_ack(con); | 2131 | prepare_write_ack(con); |
| 2081 | goto more; | 2132 | goto more; |
| 2082 | } | 2133 | } |
| 2083 | if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING, | 2134 | if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { |
| 2084 | &con->flags)) { | ||
| 2085 | prepare_write_keepalive(con); | 2135 | prepare_write_keepalive(con); |
| 2086 | goto more; | 2136 | goto more; |
| 2087 | } | 2137 | } |
| 2088 | } | 2138 | } |
| 2089 | 2139 | ||
| 2090 | /* Nothing to do! */ | 2140 | /* Nothing to do! */ |
| 2091 | clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 2141 | con_flag_clear(con, CON_FLAG_WRITE_PENDING); |
| 2092 | dout("try_write nothing else to write.\n"); | 2142 | dout("try_write nothing else to write.\n"); |
| 2093 | ret = 0; | 2143 | ret = 0; |
| 2094 | out: | 2144 | out: |
| @@ -2268,7 +2318,7 @@ static void queue_con(struct ceph_connection *con) | |||
| 2268 | 2318 | ||
| 2269 | static bool con_sock_closed(struct ceph_connection *con) | 2319 | static bool con_sock_closed(struct ceph_connection *con) |
| 2270 | { | 2320 | { |
| 2271 | if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) | 2321 | if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) |
| 2272 | return false; | 2322 | return false; |
| 2273 | 2323 | ||
| 2274 | #define CASE(x) \ | 2324 | #define CASE(x) \ |
| @@ -2295,6 +2345,41 @@ static bool con_sock_closed(struct ceph_connection *con) | |||
| 2295 | return true; | 2345 | return true; |
| 2296 | } | 2346 | } |
| 2297 | 2347 | ||
| 2348 | static bool con_backoff(struct ceph_connection *con) | ||
| 2349 | { | ||
| 2350 | int ret; | ||
| 2351 | |||
| 2352 | if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) | ||
| 2353 | return false; | ||
| 2354 | |||
| 2355 | ret = queue_con_delay(con, round_jiffies_relative(con->delay)); | ||
| 2356 | if (ret) { | ||
| 2357 | dout("%s: con %p FAILED to back off %lu\n", __func__, | ||
| 2358 | con, con->delay); | ||
| 2359 | BUG_ON(ret == -ENOENT); | ||
| 2360 | con_flag_set(con, CON_FLAG_BACKOFF); | ||
| 2361 | } | ||
| 2362 | |||
| 2363 | return true; | ||
| 2364 | } | ||
| 2365 | |||
| 2366 | /* Finish fault handling; con->mutex must *not* be held here */ | ||
| 2367 | |||
| 2368 | static void con_fault_finish(struct ceph_connection *con) | ||
| 2369 | { | ||
| 2370 | /* | ||
| 2371 | * in case we faulted due to authentication, invalidate our | ||
| 2372 | * current tickets so that we can get new ones. | ||
| 2373 | */ | ||
| 2374 | if (con->auth_retry && con->ops->invalidate_authorizer) { | ||
| 2375 | dout("calling invalidate_authorizer()\n"); | ||
| 2376 | con->ops->invalidate_authorizer(con); | ||
| 2377 | } | ||
| 2378 | |||
| 2379 | if (con->ops->fault) | ||
| 2380 | con->ops->fault(con); | ||
| 2381 | } | ||
| 2382 | |||
| 2298 | /* | 2383 | /* |
| 2299 | * Do some work on a connection. Drop a connection ref when we're done. | 2384 | * Do some work on a connection. Drop a connection ref when we're done. |
| 2300 | */ | 2385 | */ |
| @@ -2302,73 +2387,68 @@ static void con_work(struct work_struct *work) | |||
| 2302 | { | 2387 | { |
| 2303 | struct ceph_connection *con = container_of(work, struct ceph_connection, | 2388 | struct ceph_connection *con = container_of(work, struct ceph_connection, |
| 2304 | work.work); | 2389 | work.work); |
| 2305 | int ret; | 2390 | bool fault; |
| 2306 | 2391 | ||
| 2307 | mutex_lock(&con->mutex); | 2392 | mutex_lock(&con->mutex); |
| 2308 | restart: | 2393 | while (true) { |
| 2309 | if (con_sock_closed(con)) | 2394 | int ret; |
| 2310 | goto fault; | ||
| 2311 | 2395 | ||
| 2312 | if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { | 2396 | if ((fault = con_sock_closed(con))) { |
| 2313 | dout("con_work %p backing off\n", con); | 2397 | dout("%s: con %p SOCK_CLOSED\n", __func__, con); |
| 2314 | ret = queue_con_delay(con, round_jiffies_relative(con->delay)); | 2398 | break; |
| 2315 | if (ret) { | 2399 | } |
| 2316 | dout("con_work %p FAILED to back off %lu\n", con, | 2400 | if (con_backoff(con)) { |
| 2317 | con->delay); | 2401 | dout("%s: con %p BACKOFF\n", __func__, con); |
| 2318 | BUG_ON(ret == -ENOENT); | 2402 | break; |
| 2319 | set_bit(CON_FLAG_BACKOFF, &con->flags); | 2403 | } |
| 2404 | if (con->state == CON_STATE_STANDBY) { | ||
| 2405 | dout("%s: con %p STANDBY\n", __func__, con); | ||
| 2406 | break; | ||
| 2407 | } | ||
| 2408 | if (con->state == CON_STATE_CLOSED) { | ||
| 2409 | dout("%s: con %p CLOSED\n", __func__, con); | ||
| 2410 | BUG_ON(con->sock); | ||
| 2411 | break; | ||
| 2412 | } | ||
| 2413 | if (con->state == CON_STATE_PREOPEN) { | ||
| 2414 | dout("%s: con %p PREOPEN\n", __func__, con); | ||
| 2415 | BUG_ON(con->sock); | ||
| 2320 | } | 2416 | } |
| 2321 | goto done; | ||
| 2322 | } | ||
| 2323 | 2417 | ||
| 2324 | if (con->state == CON_STATE_STANDBY) { | 2418 | ret = try_read(con); |
| 2325 | dout("con_work %p STANDBY\n", con); | 2419 | if (ret < 0) { |
| 2326 | goto done; | 2420 | if (ret == -EAGAIN) |
| 2327 | } | 2421 | continue; |
| 2328 | if (con->state == CON_STATE_CLOSED) { | 2422 | con->error_msg = "socket error on read"; |
| 2329 | dout("con_work %p CLOSED\n", con); | 2423 | fault = true; |
| 2330 | BUG_ON(con->sock); | 2424 | break; |
| 2331 | goto done; | 2425 | } |
| 2332 | } | ||
| 2333 | if (con->state == CON_STATE_PREOPEN) { | ||
| 2334 | dout("con_work OPENING\n"); | ||
| 2335 | BUG_ON(con->sock); | ||
| 2336 | } | ||
| 2337 | 2426 | ||
| 2338 | ret = try_read(con); | 2427 | ret = try_write(con); |
| 2339 | if (ret == -EAGAIN) | 2428 | if (ret < 0) { |
| 2340 | goto restart; | 2429 | if (ret == -EAGAIN) |
| 2341 | if (ret < 0) { | 2430 | continue; |
| 2342 | con->error_msg = "socket error on read"; | 2431 | con->error_msg = "socket error on write"; |
| 2343 | goto fault; | 2432 | fault = true; |
| 2344 | } | 2433 | } |
| 2345 | 2434 | ||
| 2346 | ret = try_write(con); | 2435 | break; /* If we make it to here, we're done */ |
| 2347 | if (ret == -EAGAIN) | ||
| 2348 | goto restart; | ||
| 2349 | if (ret < 0) { | ||
| 2350 | con->error_msg = "socket error on write"; | ||
| 2351 | goto fault; | ||
| 2352 | } | 2436 | } |
| 2353 | 2437 | if (fault) | |
| 2354 | done: | 2438 | con_fault(con); |
| 2355 | mutex_unlock(&con->mutex); | 2439 | mutex_unlock(&con->mutex); |
| 2356 | done_unlocked: | ||
| 2357 | con->ops->put(con); | ||
| 2358 | return; | ||
| 2359 | 2440 | ||
| 2360 | fault: | 2441 | if (fault) |
| 2361 | ceph_fault(con); /* error/fault path */ | 2442 | con_fault_finish(con); |
| 2362 | goto done_unlocked; | ||
| 2363 | } | ||
| 2364 | 2443 | ||
| 2444 | con->ops->put(con); | ||
| 2445 | } | ||
| 2365 | 2446 | ||
| 2366 | /* | 2447 | /* |
| 2367 | * Generic error/fault handler. A retry mechanism is used with | 2448 | * Generic error/fault handler. A retry mechanism is used with |
| 2368 | * exponential backoff | 2449 | * exponential backoff |
| 2369 | */ | 2450 | */ |
| 2370 | static void ceph_fault(struct ceph_connection *con) | 2451 | static void con_fault(struct ceph_connection *con) |
| 2371 | __releases(con->mutex) | ||
| 2372 | { | 2452 | { |
| 2373 | pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), | 2453 | pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), |
| 2374 | ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); | 2454 | ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); |
| @@ -2381,10 +2461,10 @@ static void ceph_fault(struct ceph_connection *con) | |||
| 2381 | 2461 | ||
| 2382 | con_close_socket(con); | 2462 | con_close_socket(con); |
| 2383 | 2463 | ||
| 2384 | if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) { | 2464 | if (con_flag_test(con, CON_FLAG_LOSSYTX)) { |
| 2385 | dout("fault on LOSSYTX channel, marking CLOSED\n"); | 2465 | dout("fault on LOSSYTX channel, marking CLOSED\n"); |
| 2386 | con->state = CON_STATE_CLOSED; | 2466 | con->state = CON_STATE_CLOSED; |
| 2387 | goto out_unlock; | 2467 | return; |
| 2388 | } | 2468 | } |
| 2389 | 2469 | ||
| 2390 | if (con->in_msg) { | 2470 | if (con->in_msg) { |
| @@ -2401,9 +2481,9 @@ static void ceph_fault(struct ceph_connection *con) | |||
| 2401 | /* If there are no messages queued or keepalive pending, place | 2481 | /* If there are no messages queued or keepalive pending, place |
| 2402 | * the connection in a STANDBY state */ | 2482 | * the connection in a STANDBY state */ |
| 2403 | if (list_empty(&con->out_queue) && | 2483 | if (list_empty(&con->out_queue) && |
| 2404 | !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) { | 2484 | !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { |
| 2405 | dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); | 2485 | dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); |
| 2406 | clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); | 2486 | con_flag_clear(con, CON_FLAG_WRITE_PENDING); |
| 2407 | con->state = CON_STATE_STANDBY; | 2487 | con->state = CON_STATE_STANDBY; |
| 2408 | } else { | 2488 | } else { |
| 2409 | /* retry after a delay. */ | 2489 | /* retry after a delay. */ |
| @@ -2412,23 +2492,9 @@ static void ceph_fault(struct ceph_connection *con) | |||
| 2412 | con->delay = BASE_DELAY_INTERVAL; | 2492 | con->delay = BASE_DELAY_INTERVAL; |
| 2413 | else if (con->delay < MAX_DELAY_INTERVAL) | 2493 | else if (con->delay < MAX_DELAY_INTERVAL) |
| 2414 | con->delay *= 2; | 2494 | con->delay *= 2; |
| 2415 | set_bit(CON_FLAG_BACKOFF, &con->flags); | 2495 | con_flag_set(con, CON_FLAG_BACKOFF); |
| 2416 | queue_con(con); | 2496 | queue_con(con); |
| 2417 | } | 2497 | } |
| 2418 | |||
| 2419 | out_unlock: | ||
| 2420 | mutex_unlock(&con->mutex); | ||
| 2421 | /* | ||
| 2422 | * in case we faulted due to authentication, invalidate our | ||
| 2423 | * current tickets so that we can get new ones. | ||
| 2424 | */ | ||
| 2425 | if (con->auth_retry && con->ops->invalidate_authorizer) { | ||
| 2426 | dout("calling invalidate_authorizer()\n"); | ||
| 2427 | con->ops->invalidate_authorizer(con); | ||
| 2428 | } | ||
| 2429 | |||
| 2430 | if (con->ops->fault) | ||
| 2431 | con->ops->fault(con); | ||
| 2432 | } | 2498 | } |
| 2433 | 2499 | ||
| 2434 | 2500 | ||
| @@ -2469,8 +2535,8 @@ static void clear_standby(struct ceph_connection *con) | |||
| 2469 | dout("clear_standby %p and ++connect_seq\n", con); | 2535 | dout("clear_standby %p and ++connect_seq\n", con); |
| 2470 | con->state = CON_STATE_PREOPEN; | 2536 | con->state = CON_STATE_PREOPEN; |
| 2471 | con->connect_seq++; | 2537 | con->connect_seq++; |
| 2472 | WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags)); | 2538 | WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); |
| 2473 | WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)); | 2539 | WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); |
| 2474 | } | 2540 | } |
| 2475 | } | 2541 | } |
| 2476 | 2542 | ||
| @@ -2511,7 +2577,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) | |||
| 2511 | 2577 | ||
| 2512 | /* if there wasn't anything waiting to send before, queue | 2578 | /* if there wasn't anything waiting to send before, queue |
| 2513 | * new work */ | 2579 | * new work */ |
| 2514 | if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) | 2580 | if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) |
| 2515 | queue_con(con); | 2581 | queue_con(con); |
| 2516 | } | 2582 | } |
| 2517 | EXPORT_SYMBOL(ceph_con_send); | 2583 | EXPORT_SYMBOL(ceph_con_send); |
| @@ -2600,8 +2666,8 @@ void ceph_con_keepalive(struct ceph_connection *con) | |||
| 2600 | mutex_lock(&con->mutex); | 2666 | mutex_lock(&con->mutex); |
| 2601 | clear_standby(con); | 2667 | clear_standby(con); |
| 2602 | mutex_unlock(&con->mutex); | 2668 | mutex_unlock(&con->mutex); |
| 2603 | if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 && | 2669 | if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && |
| 2604 | test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) | 2670 | con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) |
| 2605 | queue_con(con); | 2671 | queue_con(con); |
| 2606 | } | 2672 | } |
| 2607 | EXPORT_SYMBOL(ceph_con_keepalive); | 2673 | EXPORT_SYMBOL(ceph_con_keepalive); |
| @@ -2651,9 +2717,11 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, | |||
| 2651 | m->page_alignment = 0; | 2717 | m->page_alignment = 0; |
| 2652 | m->pages = NULL; | 2718 | m->pages = NULL; |
| 2653 | m->pagelist = NULL; | 2719 | m->pagelist = NULL; |
| 2720 | #ifdef CONFIG_BLOCK | ||
| 2654 | m->bio = NULL; | 2721 | m->bio = NULL; |
| 2655 | m->bio_iter = NULL; | 2722 | m->bio_iter = NULL; |
| 2656 | m->bio_seg = 0; | 2723 | m->bio_seg = 0; |
| 2724 | #endif /* CONFIG_BLOCK */ | ||
| 2657 | m->trail = NULL; | 2725 | m->trail = NULL; |
| 2658 | 2726 | ||
| 2659 | /* front */ | 2727 | /* front */ |
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 812eb3b46c1f..aef5b1062bee 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
| @@ -697,7 +697,7 @@ int ceph_monc_delete_snapid(struct ceph_mon_client *monc, | |||
| 697 | u32 pool, u64 snapid) | 697 | u32 pool, u64 snapid) |
| 698 | { | 698 | { |
| 699 | return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, | 699 | return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, |
| 700 | pool, snapid, 0, 0); | 700 | pool, snapid, NULL, 0); |
| 701 | 701 | ||
| 702 | } | 702 | } |
| 703 | 703 | ||
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index eb9a44478764..d730dd4d8eb2 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | 23 | ||
| 24 | static const struct ceph_connection_operations osd_con_ops; | 24 | static const struct ceph_connection_operations osd_con_ops; |
| 25 | 25 | ||
| 26 | static void send_queued(struct ceph_osd_client *osdc); | 26 | static void __send_queued(struct ceph_osd_client *osdc); |
| 27 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); | 27 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); |
| 28 | static void __register_request(struct ceph_osd_client *osdc, | 28 | static void __register_request(struct ceph_osd_client *osdc, |
| 29 | struct ceph_osd_request *req); | 29 | struct ceph_osd_request *req); |
| @@ -32,64 +32,12 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, | |||
| 32 | static void __send_request(struct ceph_osd_client *osdc, | 32 | static void __send_request(struct ceph_osd_client *osdc, |
| 33 | struct ceph_osd_request *req); | 33 | struct ceph_osd_request *req); |
| 34 | 34 | ||
| 35 | static int op_needs_trail(int op) | ||
| 36 | { | ||
| 37 | switch (op) { | ||
| 38 | case CEPH_OSD_OP_GETXATTR: | ||
| 39 | case CEPH_OSD_OP_SETXATTR: | ||
| 40 | case CEPH_OSD_OP_CMPXATTR: | ||
| 41 | case CEPH_OSD_OP_CALL: | ||
| 42 | case CEPH_OSD_OP_NOTIFY: | ||
| 43 | return 1; | ||
| 44 | default: | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | } | ||
| 48 | |||
| 49 | static int op_has_extent(int op) | 35 | static int op_has_extent(int op) |
| 50 | { | 36 | { |
| 51 | return (op == CEPH_OSD_OP_READ || | 37 | return (op == CEPH_OSD_OP_READ || |
| 52 | op == CEPH_OSD_OP_WRITE); | 38 | op == CEPH_OSD_OP_WRITE); |
| 53 | } | 39 | } |
| 54 | 40 | ||
| 55 | int ceph_calc_raw_layout(struct ceph_osd_client *osdc, | ||
| 56 | struct ceph_file_layout *layout, | ||
| 57 | u64 snapid, | ||
| 58 | u64 off, u64 *plen, u64 *bno, | ||
| 59 | struct ceph_osd_request *req, | ||
| 60 | struct ceph_osd_req_op *op) | ||
| 61 | { | ||
| 62 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | ||
| 63 | u64 orig_len = *plen; | ||
| 64 | u64 objoff, objlen; /* extent in object */ | ||
| 65 | int r; | ||
| 66 | |||
| 67 | reqhead->snapid = cpu_to_le64(snapid); | ||
| 68 | |||
| 69 | /* object extent? */ | ||
| 70 | r = ceph_calc_file_object_mapping(layout, off, plen, bno, | ||
| 71 | &objoff, &objlen); | ||
| 72 | if (r < 0) | ||
| 73 | return r; | ||
| 74 | if (*plen < orig_len) | ||
| 75 | dout(" skipping last %llu, final file extent %llu~%llu\n", | ||
| 76 | orig_len - *plen, off, *plen); | ||
| 77 | |||
| 78 | if (op_has_extent(op->op)) { | ||
| 79 | op->extent.offset = objoff; | ||
| 80 | op->extent.length = objlen; | ||
| 81 | } | ||
| 82 | req->r_num_pages = calc_pages_for(off, *plen); | ||
| 83 | req->r_page_alignment = off & ~PAGE_MASK; | ||
| 84 | if (op->op == CEPH_OSD_OP_WRITE) | ||
| 85 | op->payload_len = *plen; | ||
| 86 | |||
| 87 | dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", | ||
| 88 | *bno, objoff, objlen, req->r_num_pages); | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | EXPORT_SYMBOL(ceph_calc_raw_layout); | ||
| 92 | |||
| 93 | /* | 41 | /* |
| 94 | * Implement client access to distributed object storage cluster. | 42 | * Implement client access to distributed object storage cluster. |
| 95 | * | 43 | * |
| @@ -115,20 +63,48 @@ EXPORT_SYMBOL(ceph_calc_raw_layout); | |||
| 115 | * | 63 | * |
| 116 | * fill osd op in request message. | 64 | * fill osd op in request message. |
| 117 | */ | 65 | */ |
| 118 | static int calc_layout(struct ceph_osd_client *osdc, | 66 | static int calc_layout(struct ceph_vino vino, |
| 119 | struct ceph_vino vino, | ||
| 120 | struct ceph_file_layout *layout, | 67 | struct ceph_file_layout *layout, |
| 121 | u64 off, u64 *plen, | 68 | u64 off, u64 *plen, |
| 122 | struct ceph_osd_request *req, | 69 | struct ceph_osd_request *req, |
| 123 | struct ceph_osd_req_op *op) | 70 | struct ceph_osd_req_op *op) |
| 124 | { | 71 | { |
| 125 | u64 bno; | 72 | u64 orig_len = *plen; |
| 73 | u64 bno = 0; | ||
| 74 | u64 objoff = 0; | ||
| 75 | u64 objlen = 0; | ||
| 126 | int r; | 76 | int r; |
| 127 | 77 | ||
| 128 | r = ceph_calc_raw_layout(osdc, layout, vino.snap, off, | 78 | /* object extent? */ |
| 129 | plen, &bno, req, op); | 79 | r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno, |
| 80 | &objoff, &objlen); | ||
| 130 | if (r < 0) | 81 | if (r < 0) |
| 131 | return r; | 82 | return r; |
| 83 | if (objlen < orig_len) { | ||
| 84 | *plen = objlen; | ||
| 85 | dout(" skipping last %llu, final file extent %llu~%llu\n", | ||
| 86 | orig_len - *plen, off, *plen); | ||
| 87 | } | ||
| 88 | |||
| 89 | if (op_has_extent(op->op)) { | ||
| 90 | u32 osize = le32_to_cpu(layout->fl_object_size); | ||
| 91 | op->extent.offset = objoff; | ||
| 92 | op->extent.length = objlen; | ||
| 93 | if (op->extent.truncate_size <= off - objoff) { | ||
| 94 | op->extent.truncate_size = 0; | ||
| 95 | } else { | ||
| 96 | op->extent.truncate_size -= off - objoff; | ||
| 97 | if (op->extent.truncate_size > osize) | ||
| 98 | op->extent.truncate_size = osize; | ||
| 99 | } | ||
| 100 | } | ||
| 101 | req->r_num_pages = calc_pages_for(off, *plen); | ||
| 102 | req->r_page_alignment = off & ~PAGE_MASK; | ||
| 103 | if (op->op == CEPH_OSD_OP_WRITE) | ||
| 104 | op->payload_len = *plen; | ||
| 105 | |||
| 106 | dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", | ||
| 107 | bno, objoff, objlen, req->r_num_pages); | ||
| 132 | 108 | ||
| 133 | snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); | 109 | snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); |
| 134 | req->r_oid_len = strlen(req->r_oid); | 110 | req->r_oid_len = strlen(req->r_oid); |
| @@ -148,25 +124,19 @@ void ceph_osdc_release_request(struct kref *kref) | |||
| 148 | if (req->r_request) | 124 | if (req->r_request) |
| 149 | ceph_msg_put(req->r_request); | 125 | ceph_msg_put(req->r_request); |
| 150 | if (req->r_con_filling_msg) { | 126 | if (req->r_con_filling_msg) { |
| 151 | dout("%s revoking pages %p from con %p\n", __func__, | 127 | dout("%s revoking msg %p from con %p\n", __func__, |
| 152 | req->r_pages, req->r_con_filling_msg); | 128 | req->r_reply, req->r_con_filling_msg); |
| 153 | ceph_msg_revoke_incoming(req->r_reply); | 129 | ceph_msg_revoke_incoming(req->r_reply); |
| 154 | req->r_con_filling_msg->ops->put(req->r_con_filling_msg); | 130 | req->r_con_filling_msg->ops->put(req->r_con_filling_msg); |
| 131 | req->r_con_filling_msg = NULL; | ||
| 155 | } | 132 | } |
| 156 | if (req->r_reply) | 133 | if (req->r_reply) |
| 157 | ceph_msg_put(req->r_reply); | 134 | ceph_msg_put(req->r_reply); |
| 158 | if (req->r_own_pages) | 135 | if (req->r_own_pages) |
| 159 | ceph_release_page_vector(req->r_pages, | 136 | ceph_release_page_vector(req->r_pages, |
| 160 | req->r_num_pages); | 137 | req->r_num_pages); |
| 161 | #ifdef CONFIG_BLOCK | ||
| 162 | if (req->r_bio) | ||
| 163 | bio_put(req->r_bio); | ||
| 164 | #endif | ||
| 165 | ceph_put_snap_context(req->r_snapc); | 138 | ceph_put_snap_context(req->r_snapc); |
| 166 | if (req->r_trail) { | 139 | ceph_pagelist_release(&req->r_trail); |
| 167 | ceph_pagelist_release(req->r_trail); | ||
| 168 | kfree(req->r_trail); | ||
| 169 | } | ||
| 170 | if (req->r_mempool) | 140 | if (req->r_mempool) |
| 171 | mempool_free(req, req->r_osdc->req_mempool); | 141 | mempool_free(req, req->r_osdc->req_mempool); |
| 172 | else | 142 | else |
| @@ -174,37 +144,25 @@ void ceph_osdc_release_request(struct kref *kref) | |||
| 174 | } | 144 | } |
| 175 | EXPORT_SYMBOL(ceph_osdc_release_request); | 145 | EXPORT_SYMBOL(ceph_osdc_release_request); |
| 176 | 146 | ||
| 177 | static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail) | ||
| 178 | { | ||
| 179 | int i = 0; | ||
| 180 | |||
| 181 | if (needs_trail) | ||
| 182 | *needs_trail = 0; | ||
| 183 | while (ops[i].op) { | ||
| 184 | if (needs_trail && op_needs_trail(ops[i].op)) | ||
| 185 | *needs_trail = 1; | ||
| 186 | i++; | ||
| 187 | } | ||
| 188 | |||
| 189 | return i; | ||
| 190 | } | ||
| 191 | |||
| 192 | struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | 147 | struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, |
| 193 | int flags, | ||
| 194 | struct ceph_snap_context *snapc, | 148 | struct ceph_snap_context *snapc, |
| 195 | struct ceph_osd_req_op *ops, | 149 | unsigned int num_ops, |
| 196 | bool use_mempool, | 150 | bool use_mempool, |
| 197 | gfp_t gfp_flags, | 151 | gfp_t gfp_flags) |
| 198 | struct page **pages, | ||
| 199 | struct bio *bio) | ||
| 200 | { | 152 | { |
| 201 | struct ceph_osd_request *req; | 153 | struct ceph_osd_request *req; |
| 202 | struct ceph_msg *msg; | 154 | struct ceph_msg *msg; |
| 203 | int needs_trail; | 155 | size_t msg_size; |
| 204 | int num_op = get_num_ops(ops, &needs_trail); | 156 | |
| 205 | size_t msg_size = sizeof(struct ceph_osd_request_head); | 157 | msg_size = 4 + 4 + 8 + 8 + 4+8; |
| 206 | 158 | msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ | |
| 207 | msg_size += num_op*sizeof(struct ceph_osd_op); | 159 | msg_size += 1 + 8 + 4 + 4; /* pg_t */ |
| 160 | msg_size += 4 + MAX_OBJ_NAME_SIZE; | ||
| 161 | msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); | ||
| 162 | msg_size += 8; /* snapid */ | ||
| 163 | msg_size += 8; /* snap_seq */ | ||
| 164 | msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ | ||
| 165 | msg_size += 4; | ||
| 208 | 166 | ||
| 209 | if (use_mempool) { | 167 | if (use_mempool) { |
| 210 | req = mempool_alloc(osdc->req_mempool, gfp_flags); | 168 | req = mempool_alloc(osdc->req_mempool, gfp_flags); |
| @@ -228,10 +186,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | |||
| 228 | INIT_LIST_HEAD(&req->r_req_lru_item); | 186 | INIT_LIST_HEAD(&req->r_req_lru_item); |
| 229 | INIT_LIST_HEAD(&req->r_osd_item); | 187 | INIT_LIST_HEAD(&req->r_osd_item); |
| 230 | 188 | ||
| 231 | req->r_flags = flags; | ||
| 232 | |||
| 233 | WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); | ||
| 234 | |||
| 235 | /* create reply message */ | 189 | /* create reply message */ |
| 236 | if (use_mempool) | 190 | if (use_mempool) |
| 237 | msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); | 191 | msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); |
| @@ -244,20 +198,9 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | |||
| 244 | } | 198 | } |
| 245 | req->r_reply = msg; | 199 | req->r_reply = msg; |
| 246 | 200 | ||
| 247 | /* allocate space for the trailing data */ | 201 | ceph_pagelist_init(&req->r_trail); |
| 248 | if (needs_trail) { | ||
| 249 | req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags); | ||
| 250 | if (!req->r_trail) { | ||
| 251 | ceph_osdc_put_request(req); | ||
| 252 | return NULL; | ||
| 253 | } | ||
| 254 | ceph_pagelist_init(req->r_trail); | ||
| 255 | } | ||
| 256 | 202 | ||
| 257 | /* create request message; allow space for oid */ | 203 | /* create request message; allow space for oid */ |
| 258 | msg_size += MAX_OBJ_NAME_SIZE; | ||
| 259 | if (snapc) | ||
| 260 | msg_size += sizeof(u64) * snapc->num_snaps; | ||
| 261 | if (use_mempool) | 204 | if (use_mempool) |
| 262 | msg = ceph_msgpool_get(&osdc->msgpool_op, 0); | 205 | msg = ceph_msgpool_get(&osdc->msgpool_op, 0); |
| 263 | else | 206 | else |
| @@ -270,13 +213,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | |||
| 270 | memset(msg->front.iov_base, 0, msg->front.iov_len); | 213 | memset(msg->front.iov_base, 0, msg->front.iov_len); |
| 271 | 214 | ||
| 272 | req->r_request = msg; | 215 | req->r_request = msg; |
| 273 | req->r_pages = pages; | ||
| 274 | #ifdef CONFIG_BLOCK | ||
| 275 | if (bio) { | ||
| 276 | req->r_bio = bio; | ||
| 277 | bio_get(req->r_bio); | ||
| 278 | } | ||
| 279 | #endif | ||
| 280 | 216 | ||
| 281 | return req; | 217 | return req; |
| 282 | } | 218 | } |
| @@ -289,6 +225,8 @@ static void osd_req_encode_op(struct ceph_osd_request *req, | |||
| 289 | dst->op = cpu_to_le16(src->op); | 225 | dst->op = cpu_to_le16(src->op); |
| 290 | 226 | ||
| 291 | switch (src->op) { | 227 | switch (src->op) { |
| 228 | case CEPH_OSD_OP_STAT: | ||
| 229 | break; | ||
| 292 | case CEPH_OSD_OP_READ: | 230 | case CEPH_OSD_OP_READ: |
| 293 | case CEPH_OSD_OP_WRITE: | 231 | case CEPH_OSD_OP_WRITE: |
| 294 | dst->extent.offset = | 232 | dst->extent.offset = |
| @@ -300,52 +238,20 @@ static void osd_req_encode_op(struct ceph_osd_request *req, | |||
| 300 | dst->extent.truncate_seq = | 238 | dst->extent.truncate_seq = |
| 301 | cpu_to_le32(src->extent.truncate_seq); | 239 | cpu_to_le32(src->extent.truncate_seq); |
| 302 | break; | 240 | break; |
| 303 | |||
| 304 | case CEPH_OSD_OP_GETXATTR: | ||
| 305 | case CEPH_OSD_OP_SETXATTR: | ||
| 306 | case CEPH_OSD_OP_CMPXATTR: | ||
| 307 | BUG_ON(!req->r_trail); | ||
| 308 | |||
| 309 | dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); | ||
| 310 | dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); | ||
| 311 | dst->xattr.cmp_op = src->xattr.cmp_op; | ||
| 312 | dst->xattr.cmp_mode = src->xattr.cmp_mode; | ||
| 313 | ceph_pagelist_append(req->r_trail, src->xattr.name, | ||
| 314 | src->xattr.name_len); | ||
| 315 | ceph_pagelist_append(req->r_trail, src->xattr.val, | ||
| 316 | src->xattr.value_len); | ||
| 317 | break; | ||
| 318 | case CEPH_OSD_OP_CALL: | 241 | case CEPH_OSD_OP_CALL: |
| 319 | BUG_ON(!req->r_trail); | ||
| 320 | |||
| 321 | dst->cls.class_len = src->cls.class_len; | 242 | dst->cls.class_len = src->cls.class_len; |
| 322 | dst->cls.method_len = src->cls.method_len; | 243 | dst->cls.method_len = src->cls.method_len; |
| 323 | dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); | 244 | dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); |
| 324 | 245 | ||
| 325 | ceph_pagelist_append(req->r_trail, src->cls.class_name, | 246 | ceph_pagelist_append(&req->r_trail, src->cls.class_name, |
| 326 | src->cls.class_len); | 247 | src->cls.class_len); |
| 327 | ceph_pagelist_append(req->r_trail, src->cls.method_name, | 248 | ceph_pagelist_append(&req->r_trail, src->cls.method_name, |
| 328 | src->cls.method_len); | 249 | src->cls.method_len); |
| 329 | ceph_pagelist_append(req->r_trail, src->cls.indata, | 250 | ceph_pagelist_append(&req->r_trail, src->cls.indata, |
| 330 | src->cls.indata_len); | 251 | src->cls.indata_len); |
| 331 | break; | 252 | break; |
| 332 | case CEPH_OSD_OP_ROLLBACK: | ||
| 333 | dst->snap.snapid = cpu_to_le64(src->snap.snapid); | ||
| 334 | break; | ||
| 335 | case CEPH_OSD_OP_STARTSYNC: | 253 | case CEPH_OSD_OP_STARTSYNC: |
| 336 | break; | 254 | break; |
| 337 | case CEPH_OSD_OP_NOTIFY: | ||
| 338 | { | ||
| 339 | __le32 prot_ver = cpu_to_le32(src->watch.prot_ver); | ||
| 340 | __le32 timeout = cpu_to_le32(src->watch.timeout); | ||
| 341 | |||
| 342 | BUG_ON(!req->r_trail); | ||
| 343 | |||
| 344 | ceph_pagelist_append(req->r_trail, | ||
| 345 | &prot_ver, sizeof(prot_ver)); | ||
| 346 | ceph_pagelist_append(req->r_trail, | ||
| 347 | &timeout, sizeof(timeout)); | ||
| 348 | } | ||
| 349 | case CEPH_OSD_OP_NOTIFY_ACK: | 255 | case CEPH_OSD_OP_NOTIFY_ACK: |
| 350 | case CEPH_OSD_OP_WATCH: | 256 | case CEPH_OSD_OP_WATCH: |
| 351 | dst->watch.cookie = cpu_to_le64(src->watch.cookie); | 257 | dst->watch.cookie = cpu_to_le64(src->watch.cookie); |
| @@ -356,6 +262,64 @@ static void osd_req_encode_op(struct ceph_osd_request *req, | |||
| 356 | pr_err("unrecognized osd opcode %d\n", dst->op); | 262 | pr_err("unrecognized osd opcode %d\n", dst->op); |
| 357 | WARN_ON(1); | 263 | WARN_ON(1); |
| 358 | break; | 264 | break; |
| 265 | case CEPH_OSD_OP_MAPEXT: | ||
| 266 | case CEPH_OSD_OP_MASKTRUNC: | ||
| 267 | case CEPH_OSD_OP_SPARSE_READ: | ||
| 268 | case CEPH_OSD_OP_NOTIFY: | ||
| 269 | case CEPH_OSD_OP_ASSERT_VER: | ||
| 270 | case CEPH_OSD_OP_WRITEFULL: | ||
| 271 | case CEPH_OSD_OP_TRUNCATE: | ||
| 272 | case CEPH_OSD_OP_ZERO: | ||
| 273 | case CEPH_OSD_OP_DELETE: | ||
| 274 | case CEPH_OSD_OP_APPEND: | ||
| 275 | case CEPH_OSD_OP_SETTRUNC: | ||
| 276 | case CEPH_OSD_OP_TRIMTRUNC: | ||
| 277 | case CEPH_OSD_OP_TMAPUP: | ||
| 278 | case CEPH_OSD_OP_TMAPPUT: | ||
| 279 | case CEPH_OSD_OP_TMAPGET: | ||
| 280 | case CEPH_OSD_OP_CREATE: | ||
| 281 | case CEPH_OSD_OP_ROLLBACK: | ||
| 282 | case CEPH_OSD_OP_OMAPGETKEYS: | ||
| 283 | case CEPH_OSD_OP_OMAPGETVALS: | ||
| 284 | case CEPH_OSD_OP_OMAPGETHEADER: | ||
| 285 | case CEPH_OSD_OP_OMAPGETVALSBYKEYS: | ||
| 286 | case CEPH_OSD_OP_MODE_RD: | ||
| 287 | case CEPH_OSD_OP_OMAPSETVALS: | ||
| 288 | case CEPH_OSD_OP_OMAPSETHEADER: | ||
| 289 | case CEPH_OSD_OP_OMAPCLEAR: | ||
| 290 | case CEPH_OSD_OP_OMAPRMKEYS: | ||
| 291 | case CEPH_OSD_OP_OMAP_CMP: | ||
| 292 | case CEPH_OSD_OP_CLONERANGE: | ||
| 293 | case CEPH_OSD_OP_ASSERT_SRC_VERSION: | ||
| 294 | case CEPH_OSD_OP_SRC_CMPXATTR: | ||
| 295 | case CEPH_OSD_OP_GETXATTR: | ||
| 296 | case CEPH_OSD_OP_GETXATTRS: | ||
| 297 | case CEPH_OSD_OP_CMPXATTR: | ||
| 298 | case CEPH_OSD_OP_SETXATTR: | ||
| 299 | case CEPH_OSD_OP_SETXATTRS: | ||
| 300 | case CEPH_OSD_OP_RESETXATTRS: | ||
| 301 | case CEPH_OSD_OP_RMXATTR: | ||
| 302 | case CEPH_OSD_OP_PULL: | ||
| 303 | case CEPH_OSD_OP_PUSH: | ||
| 304 | case CEPH_OSD_OP_BALANCEREADS: | ||
| 305 | case CEPH_OSD_OP_UNBALANCEREADS: | ||
| 306 | case CEPH_OSD_OP_SCRUB: | ||
| 307 | case CEPH_OSD_OP_SCRUB_RESERVE: | ||
| 308 | case CEPH_OSD_OP_SCRUB_UNRESERVE: | ||
| 309 | case CEPH_OSD_OP_SCRUB_STOP: | ||
| 310 | case CEPH_OSD_OP_SCRUB_MAP: | ||
| 311 | case CEPH_OSD_OP_WRLOCK: | ||
| 312 | case CEPH_OSD_OP_WRUNLOCK: | ||
| 313 | case CEPH_OSD_OP_RDLOCK: | ||
| 314 | case CEPH_OSD_OP_RDUNLOCK: | ||
| 315 | case CEPH_OSD_OP_UPLOCK: | ||
| 316 | case CEPH_OSD_OP_DNLOCK: | ||
| 317 | case CEPH_OSD_OP_PGLS: | ||
| 318 | case CEPH_OSD_OP_PGLS_FILTER: | ||
| 319 | pr_err("unsupported osd opcode %s\n", | ||
| 320 | ceph_osd_op_name(dst->op)); | ||
| 321 | WARN_ON(1); | ||
| 322 | break; | ||
| 359 | } | 323 | } |
| 360 | dst->payload_len = cpu_to_le32(src->payload_len); | 324 | dst->payload_len = cpu_to_le32(src->payload_len); |
| 361 | } | 325 | } |
| @@ -365,75 +329,95 @@ static void osd_req_encode_op(struct ceph_osd_request *req, | |||
| 365 | * | 329 | * |
| 366 | */ | 330 | */ |
| 367 | void ceph_osdc_build_request(struct ceph_osd_request *req, | 331 | void ceph_osdc_build_request(struct ceph_osd_request *req, |
| 368 | u64 off, u64 *plen, | 332 | u64 off, u64 len, unsigned int num_ops, |
| 369 | struct ceph_osd_req_op *src_ops, | 333 | struct ceph_osd_req_op *src_ops, |
| 370 | struct ceph_snap_context *snapc, | 334 | struct ceph_snap_context *snapc, u64 snap_id, |
| 371 | struct timespec *mtime, | 335 | struct timespec *mtime) |
| 372 | const char *oid, | ||
| 373 | int oid_len) | ||
| 374 | { | 336 | { |
| 375 | struct ceph_msg *msg = req->r_request; | 337 | struct ceph_msg *msg = req->r_request; |
| 376 | struct ceph_osd_request_head *head; | ||
| 377 | struct ceph_osd_req_op *src_op; | 338 | struct ceph_osd_req_op *src_op; |
| 378 | struct ceph_osd_op *op; | ||
| 379 | void *p; | 339 | void *p; |
| 380 | int num_op = get_num_ops(src_ops, NULL); | 340 | size_t msg_size; |
| 381 | size_t msg_size = sizeof(*head) + num_op*sizeof(*op); | ||
| 382 | int flags = req->r_flags; | 341 | int flags = req->r_flags; |
| 383 | u64 data_len = 0; | 342 | u64 data_len; |
| 384 | int i; | 343 | int i; |
| 385 | 344 | ||
| 386 | head = msg->front.iov_base; | 345 | req->r_num_ops = num_ops; |
| 387 | op = (void *)(head + 1); | 346 | req->r_snapid = snap_id; |
| 388 | p = (void *)(op + num_op); | ||
| 389 | |||
| 390 | req->r_snapc = ceph_get_snap_context(snapc); | 347 | req->r_snapc = ceph_get_snap_context(snapc); |
| 391 | 348 | ||
| 392 | head->client_inc = cpu_to_le32(1); /* always, for now. */ | 349 | /* encode request */ |
| 393 | head->flags = cpu_to_le32(flags); | 350 | msg->hdr.version = cpu_to_le16(4); |
| 394 | if (flags & CEPH_OSD_FLAG_WRITE) | ||
| 395 | ceph_encode_timespec(&head->mtime, mtime); | ||
| 396 | head->num_ops = cpu_to_le16(num_op); | ||
| 397 | |||
| 398 | |||
| 399 | /* fill in oid */ | ||
| 400 | head->object_len = cpu_to_le32(oid_len); | ||
| 401 | memcpy(p, oid, oid_len); | ||
| 402 | p += oid_len; | ||
| 403 | 351 | ||
| 352 | p = msg->front.iov_base; | ||
| 353 | ceph_encode_32(&p, 1); /* client_inc is always 1 */ | ||
| 354 | req->r_request_osdmap_epoch = p; | ||
| 355 | p += 4; | ||
| 356 | req->r_request_flags = p; | ||
| 357 | p += 4; | ||
| 358 | if (req->r_flags & CEPH_OSD_FLAG_WRITE) | ||
| 359 | ceph_encode_timespec(p, mtime); | ||
| 360 | p += sizeof(struct ceph_timespec); | ||
| 361 | req->r_request_reassert_version = p; | ||
| 362 | p += sizeof(struct ceph_eversion); /* will get filled in */ | ||
| 363 | |||
| 364 | /* oloc */ | ||
| 365 | ceph_encode_8(&p, 4); | ||
| 366 | ceph_encode_8(&p, 4); | ||
| 367 | ceph_encode_32(&p, 8 + 4 + 4); | ||
| 368 | req->r_request_pool = p; | ||
| 369 | p += 8; | ||
| 370 | ceph_encode_32(&p, -1); /* preferred */ | ||
| 371 | ceph_encode_32(&p, 0); /* key len */ | ||
| 372 | |||
| 373 | ceph_encode_8(&p, 1); | ||
| 374 | req->r_request_pgid = p; | ||
| 375 | p += 8 + 4; | ||
| 376 | ceph_encode_32(&p, -1); /* preferred */ | ||
| 377 | |||
| 378 | /* oid */ | ||
| 379 | ceph_encode_32(&p, req->r_oid_len); | ||
| 380 | memcpy(p, req->r_oid, req->r_oid_len); | ||
| 381 | dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); | ||
| 382 | p += req->r_oid_len; | ||
| 383 | |||
| 384 | /* ops */ | ||
| 385 | ceph_encode_16(&p, num_ops); | ||
| 404 | src_op = src_ops; | 386 | src_op = src_ops; |
| 405 | while (src_op->op) { | 387 | req->r_request_ops = p; |
| 406 | osd_req_encode_op(req, op, src_op); | 388 | for (i = 0; i < num_ops; i++, src_op++) { |
| 407 | src_op++; | 389 | osd_req_encode_op(req, p, src_op); |
| 408 | op++; | 390 | p += sizeof(struct ceph_osd_op); |
| 409 | } | 391 | } |
| 410 | 392 | ||
| 411 | if (req->r_trail) | 393 | /* snaps */ |
| 412 | data_len += req->r_trail->length; | 394 | ceph_encode_64(&p, req->r_snapid); |
| 413 | 395 | ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); | |
| 414 | if (snapc) { | 396 | ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); |
| 415 | head->snap_seq = cpu_to_le64(snapc->seq); | 397 | if (req->r_snapc) { |
| 416 | head->num_snaps = cpu_to_le32(snapc->num_snaps); | ||
| 417 | for (i = 0; i < snapc->num_snaps; i++) { | 398 | for (i = 0; i < snapc->num_snaps; i++) { |
| 418 | put_unaligned_le64(snapc->snaps[i], p); | 399 | ceph_encode_64(&p, req->r_snapc->snaps[i]); |
| 419 | p += sizeof(u64); | ||
| 420 | } | 400 | } |
| 421 | } | 401 | } |
| 422 | 402 | ||
| 403 | req->r_request_attempts = p; | ||
| 404 | p += 4; | ||
| 405 | |||
| 406 | data_len = req->r_trail.length; | ||
| 423 | if (flags & CEPH_OSD_FLAG_WRITE) { | 407 | if (flags & CEPH_OSD_FLAG_WRITE) { |
| 424 | req->r_request->hdr.data_off = cpu_to_le16(off); | 408 | req->r_request->hdr.data_off = cpu_to_le16(off); |
| 425 | req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len); | 409 | data_len += len; |
| 426 | } else if (data_len) { | ||
| 427 | req->r_request->hdr.data_off = 0; | ||
| 428 | req->r_request->hdr.data_len = cpu_to_le32(data_len); | ||
| 429 | } | 410 | } |
| 430 | 411 | req->r_request->hdr.data_len = cpu_to_le32(data_len); | |
| 431 | req->r_request->page_alignment = req->r_page_alignment; | 412 | req->r_request->page_alignment = req->r_page_alignment; |
| 432 | 413 | ||
| 433 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); | 414 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); |
| 434 | msg_size = p - msg->front.iov_base; | 415 | msg_size = p - msg->front.iov_base; |
| 435 | msg->front.iov_len = msg_size; | 416 | msg->front.iov_len = msg_size; |
| 436 | msg->hdr.front_len = cpu_to_le32(msg_size); | 417 | msg->hdr.front_len = cpu_to_le32(msg_size); |
| 418 | |||
| 419 | dout("build_request msg_size was %d num_ops %d\n", (int)msg_size, | ||
| 420 | num_ops); | ||
| 437 | return; | 421 | return; |
| 438 | } | 422 | } |
| 439 | EXPORT_SYMBOL(ceph_osdc_build_request); | 423 | EXPORT_SYMBOL(ceph_osdc_build_request); |
| @@ -459,34 +443,33 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
| 459 | u32 truncate_seq, | 443 | u32 truncate_seq, |
| 460 | u64 truncate_size, | 444 | u64 truncate_size, |
| 461 | struct timespec *mtime, | 445 | struct timespec *mtime, |
| 462 | bool use_mempool, int num_reply, | 446 | bool use_mempool, |
| 463 | int page_align) | 447 | int page_align) |
| 464 | { | 448 | { |
| 465 | struct ceph_osd_req_op ops[3]; | 449 | struct ceph_osd_req_op ops[2]; |
| 466 | struct ceph_osd_request *req; | 450 | struct ceph_osd_request *req; |
| 451 | unsigned int num_op = 1; | ||
| 467 | int r; | 452 | int r; |
| 468 | 453 | ||
| 454 | memset(&ops, 0, sizeof ops); | ||
| 455 | |||
| 469 | ops[0].op = opcode; | 456 | ops[0].op = opcode; |
| 470 | ops[0].extent.truncate_seq = truncate_seq; | 457 | ops[0].extent.truncate_seq = truncate_seq; |
| 471 | ops[0].extent.truncate_size = truncate_size; | 458 | ops[0].extent.truncate_size = truncate_size; |
| 472 | ops[0].payload_len = 0; | ||
| 473 | 459 | ||
| 474 | if (do_sync) { | 460 | if (do_sync) { |
| 475 | ops[1].op = CEPH_OSD_OP_STARTSYNC; | 461 | ops[1].op = CEPH_OSD_OP_STARTSYNC; |
| 476 | ops[1].payload_len = 0; | 462 | num_op++; |
| 477 | ops[2].op = 0; | 463 | } |
| 478 | } else | 464 | |
| 479 | ops[1].op = 0; | 465 | req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool, |
| 480 | 466 | GFP_NOFS); | |
| 481 | req = ceph_osdc_alloc_request(osdc, flags, | ||
| 482 | snapc, ops, | ||
| 483 | use_mempool, | ||
| 484 | GFP_NOFS, NULL, NULL); | ||
| 485 | if (!req) | 467 | if (!req) |
| 486 | return ERR_PTR(-ENOMEM); | 468 | return ERR_PTR(-ENOMEM); |
| 469 | req->r_flags = flags; | ||
| 487 | 470 | ||
| 488 | /* calculate max write size */ | 471 | /* calculate max write size */ |
| 489 | r = calc_layout(osdc, vino, layout, off, plen, req, ops); | 472 | r = calc_layout(vino, layout, off, plen, req, ops); |
| 490 | if (r < 0) | 473 | if (r < 0) |
| 491 | return ERR_PTR(r); | 474 | return ERR_PTR(r); |
| 492 | req->r_file_layout = *layout; /* keep a copy */ | 475 | req->r_file_layout = *layout; /* keep a copy */ |
| @@ -496,10 +479,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
| 496 | req->r_num_pages = calc_pages_for(page_align, *plen); | 479 | req->r_num_pages = calc_pages_for(page_align, *plen); |
| 497 | req->r_page_alignment = page_align; | 480 | req->r_page_alignment = page_align; |
| 498 | 481 | ||
| 499 | ceph_osdc_build_request(req, off, plen, ops, | 482 | ceph_osdc_build_request(req, off, *plen, num_op, ops, |
| 500 | snapc, | 483 | snapc, vino.snap, mtime); |
| 501 | mtime, | ||
| 502 | req->r_oid, req->r_oid_len); | ||
| 503 | 484 | ||
| 504 | return req; | 485 | return req; |
| 505 | } | 486 | } |
| @@ -623,8 +604,8 @@ static void osd_reset(struct ceph_connection *con) | |||
| 623 | down_read(&osdc->map_sem); | 604 | down_read(&osdc->map_sem); |
| 624 | mutex_lock(&osdc->request_mutex); | 605 | mutex_lock(&osdc->request_mutex); |
| 625 | __kick_osd_requests(osdc, osd); | 606 | __kick_osd_requests(osdc, osd); |
| 607 | __send_queued(osdc); | ||
| 626 | mutex_unlock(&osdc->request_mutex); | 608 | mutex_unlock(&osdc->request_mutex); |
| 627 | send_queued(osdc); | ||
| 628 | up_read(&osdc->map_sem); | 609 | up_read(&osdc->map_sem); |
| 629 | } | 610 | } |
| 630 | 611 | ||
| @@ -739,31 +720,35 @@ static void remove_old_osds(struct ceph_osd_client *osdc) | |||
| 739 | */ | 720 | */ |
| 740 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | 721 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
| 741 | { | 722 | { |
| 742 | struct ceph_osd_request *req; | 723 | struct ceph_entity_addr *peer_addr; |
| 743 | int ret = 0; | ||
| 744 | 724 | ||
| 745 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); | 725 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); |
| 746 | if (list_empty(&osd->o_requests) && | 726 | if (list_empty(&osd->o_requests) && |
| 747 | list_empty(&osd->o_linger_requests)) { | 727 | list_empty(&osd->o_linger_requests)) { |
| 748 | __remove_osd(osdc, osd); | 728 | __remove_osd(osdc, osd); |
| 749 | ret = -ENODEV; | 729 | |
| 750 | } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], | 730 | return -ENODEV; |
| 751 | &osd->o_con.peer_addr, | 731 | } |
| 752 | sizeof(osd->o_con.peer_addr)) == 0 && | 732 | |
| 753 | !ceph_con_opened(&osd->o_con)) { | 733 | peer_addr = &osdc->osdmap->osd_addr[osd->o_osd]; |
| 734 | if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && | ||
| 735 | !ceph_con_opened(&osd->o_con)) { | ||
| 736 | struct ceph_osd_request *req; | ||
| 737 | |||
| 754 | dout(" osd addr hasn't changed and connection never opened," | 738 | dout(" osd addr hasn't changed and connection never opened," |
| 755 | " letting msgr retry"); | 739 | " letting msgr retry"); |
| 756 | /* touch each r_stamp for handle_timeout()'s benfit */ | 740 | /* touch each r_stamp for handle_timeout()'s benfit */ |
| 757 | list_for_each_entry(req, &osd->o_requests, r_osd_item) | 741 | list_for_each_entry(req, &osd->o_requests, r_osd_item) |
| 758 | req->r_stamp = jiffies; | 742 | req->r_stamp = jiffies; |
| 759 | ret = -EAGAIN; | 743 | |
| 760 | } else { | 744 | return -EAGAIN; |
| 761 | ceph_con_close(&osd->o_con); | ||
| 762 | ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, | ||
| 763 | &osdc->osdmap->osd_addr[osd->o_osd]); | ||
| 764 | osd->o_incarnation++; | ||
| 765 | } | 745 | } |
| 766 | return ret; | 746 | |
| 747 | ceph_con_close(&osd->o_con); | ||
| 748 | ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); | ||
| 749 | osd->o_incarnation++; | ||
| 750 | |||
| 751 | return 0; | ||
| 767 | } | 752 | } |
| 768 | 753 | ||
| 769 | static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) | 754 | static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) |
| @@ -961,20 +946,18 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger); | |||
| 961 | static int __map_request(struct ceph_osd_client *osdc, | 946 | static int __map_request(struct ceph_osd_client *osdc, |
| 962 | struct ceph_osd_request *req, int force_resend) | 947 | struct ceph_osd_request *req, int force_resend) |
| 963 | { | 948 | { |
| 964 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | ||
| 965 | struct ceph_pg pgid; | 949 | struct ceph_pg pgid; |
| 966 | int acting[CEPH_PG_MAX_SIZE]; | 950 | int acting[CEPH_PG_MAX_SIZE]; |
| 967 | int o = -1, num = 0; | 951 | int o = -1, num = 0; |
| 968 | int err; | 952 | int err; |
| 969 | 953 | ||
| 970 | dout("map_request %p tid %lld\n", req, req->r_tid); | 954 | dout("map_request %p tid %lld\n", req, req->r_tid); |
| 971 | err = ceph_calc_object_layout(&reqhead->layout, req->r_oid, | 955 | err = ceph_calc_object_layout(&pgid, req->r_oid, |
| 972 | &req->r_file_layout, osdc->osdmap); | 956 | &req->r_file_layout, osdc->osdmap); |
| 973 | if (err) { | 957 | if (err) { |
| 974 | list_move(&req->r_req_lru_item, &osdc->req_notarget); | 958 | list_move(&req->r_req_lru_item, &osdc->req_notarget); |
| 975 | return err; | 959 | return err; |
| 976 | } | 960 | } |
| 977 | pgid = reqhead->layout.ol_pgid; | ||
| 978 | req->r_pgid = pgid; | 961 | req->r_pgid = pgid; |
| 979 | 962 | ||
| 980 | err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); | 963 | err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); |
| @@ -991,8 +974,8 @@ static int __map_request(struct ceph_osd_client *osdc, | |||
| 991 | (req->r_osd == NULL && o == -1)) | 974 | (req->r_osd == NULL && o == -1)) |
| 992 | return 0; /* no change */ | 975 | return 0; /* no change */ |
| 993 | 976 | ||
| 994 | dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n", | 977 | dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n", |
| 995 | req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o, | 978 | req->r_tid, pgid.pool, pgid.seed, o, |
| 996 | req->r_osd ? req->r_osd->o_osd : -1); | 979 | req->r_osd ? req->r_osd->o_osd : -1); |
| 997 | 980 | ||
| 998 | /* record full pg acting set */ | 981 | /* record full pg acting set */ |
| @@ -1041,15 +1024,22 @@ out: | |||
| 1041 | static void __send_request(struct ceph_osd_client *osdc, | 1024 | static void __send_request(struct ceph_osd_client *osdc, |
| 1042 | struct ceph_osd_request *req) | 1025 | struct ceph_osd_request *req) |
| 1043 | { | 1026 | { |
| 1044 | struct ceph_osd_request_head *reqhead; | 1027 | void *p; |
| 1045 | |||
| 1046 | dout("send_request %p tid %llu to osd%d flags %d\n", | ||
| 1047 | req, req->r_tid, req->r_osd->o_osd, req->r_flags); | ||
| 1048 | 1028 | ||
| 1049 | reqhead = req->r_request->front.iov_base; | 1029 | dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n", |
| 1050 | reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch); | 1030 | req, req->r_tid, req->r_osd->o_osd, req->r_flags, |
| 1051 | reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ | 1031 | (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); |
| 1052 | reqhead->reassert_version = req->r_reassert_version; | 1032 | |
| 1033 | /* fill in message content that changes each time we send it */ | ||
| 1034 | put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch); | ||
| 1035 | put_unaligned_le32(req->r_flags, req->r_request_flags); | ||
| 1036 | put_unaligned_le64(req->r_pgid.pool, req->r_request_pool); | ||
| 1037 | p = req->r_request_pgid; | ||
| 1038 | ceph_encode_64(&p, req->r_pgid.pool); | ||
| 1039 | ceph_encode_32(&p, req->r_pgid.seed); | ||
| 1040 | put_unaligned_le64(1, req->r_request_attempts); /* FIXME */ | ||
| 1041 | memcpy(req->r_request_reassert_version, &req->r_reassert_version, | ||
| 1042 | sizeof(req->r_reassert_version)); | ||
| 1053 | 1043 | ||
| 1054 | req->r_stamp = jiffies; | 1044 | req->r_stamp = jiffies; |
| 1055 | list_move_tail(&req->r_req_lru_item, &osdc->req_lru); | 1045 | list_move_tail(&req->r_req_lru_item, &osdc->req_lru); |
| @@ -1062,16 +1052,13 @@ static void __send_request(struct ceph_osd_client *osdc, | |||
| 1062 | /* | 1052 | /* |
| 1063 | * Send any requests in the queue (req_unsent). | 1053 | * Send any requests in the queue (req_unsent). |
| 1064 | */ | 1054 | */ |
| 1065 | static void send_queued(struct ceph_osd_client *osdc) | 1055 | static void __send_queued(struct ceph_osd_client *osdc) |
| 1066 | { | 1056 | { |
| 1067 | struct ceph_osd_request *req, *tmp; | 1057 | struct ceph_osd_request *req, *tmp; |
| 1068 | 1058 | ||
| 1069 | dout("send_queued\n"); | 1059 | dout("__send_queued\n"); |
| 1070 | mutex_lock(&osdc->request_mutex); | 1060 | list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) |
| 1071 | list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) { | ||
| 1072 | __send_request(osdc, req); | 1061 | __send_request(osdc, req); |
| 1073 | } | ||
| 1074 | mutex_unlock(&osdc->request_mutex); | ||
| 1075 | } | 1062 | } |
| 1076 | 1063 | ||
| 1077 | /* | 1064 | /* |
| @@ -1123,8 +1110,8 @@ static void handle_timeout(struct work_struct *work) | |||
| 1123 | } | 1110 | } |
| 1124 | 1111 | ||
| 1125 | __schedule_osd_timeout(osdc); | 1112 | __schedule_osd_timeout(osdc); |
| 1113 | __send_queued(osdc); | ||
| 1126 | mutex_unlock(&osdc->request_mutex); | 1114 | mutex_unlock(&osdc->request_mutex); |
| 1127 | send_queued(osdc); | ||
| 1128 | up_read(&osdc->map_sem); | 1115 | up_read(&osdc->map_sem); |
| 1129 | } | 1116 | } |
| 1130 | 1117 | ||
| @@ -1152,6 +1139,26 @@ static void complete_request(struct ceph_osd_request *req) | |||
| 1152 | complete_all(&req->r_safe_completion); /* fsync waiter */ | 1139 | complete_all(&req->r_safe_completion); /* fsync waiter */ |
| 1153 | } | 1140 | } |
| 1154 | 1141 | ||
| 1142 | static int __decode_pgid(void **p, void *end, struct ceph_pg *pgid) | ||
| 1143 | { | ||
| 1144 | __u8 v; | ||
| 1145 | |||
| 1146 | ceph_decode_need(p, end, 1 + 8 + 4 + 4, bad); | ||
| 1147 | v = ceph_decode_8(p); | ||
| 1148 | if (v > 1) { | ||
| 1149 | pr_warning("do not understand pg encoding %d > 1", v); | ||
| 1150 | return -EINVAL; | ||
| 1151 | } | ||
| 1152 | pgid->pool = ceph_decode_64(p); | ||
| 1153 | pgid->seed = ceph_decode_32(p); | ||
| 1154 | *p += 4; | ||
| 1155 | return 0; | ||
| 1156 | |||
| 1157 | bad: | ||
| 1158 | pr_warning("incomplete pg encoding"); | ||
| 1159 | return -EINVAL; | ||
| 1160 | } | ||
| 1161 | |||
| 1155 | /* | 1162 | /* |
| 1156 | * handle osd op reply. either call the callback if it is specified, | 1163 | * handle osd op reply. either call the callback if it is specified, |
| 1157 | * or do the completion to wake up the waiting thread. | 1164 | * or do the completion to wake up the waiting thread. |
| @@ -1159,22 +1166,42 @@ static void complete_request(struct ceph_osd_request *req) | |||
| 1159 | static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | 1166 | static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, |
| 1160 | struct ceph_connection *con) | 1167 | struct ceph_connection *con) |
| 1161 | { | 1168 | { |
| 1162 | struct ceph_osd_reply_head *rhead = msg->front.iov_base; | 1169 | void *p, *end; |
| 1163 | struct ceph_osd_request *req; | 1170 | struct ceph_osd_request *req; |
| 1164 | u64 tid; | 1171 | u64 tid; |
| 1165 | int numops, object_len, flags; | 1172 | int object_len; |
| 1173 | int numops, payload_len, flags; | ||
| 1166 | s32 result; | 1174 | s32 result; |
| 1175 | s32 retry_attempt; | ||
| 1176 | struct ceph_pg pg; | ||
| 1177 | int err; | ||
| 1178 | u32 reassert_epoch; | ||
| 1179 | u64 reassert_version; | ||
| 1180 | u32 osdmap_epoch; | ||
| 1181 | int i; | ||
| 1167 | 1182 | ||
| 1168 | tid = le64_to_cpu(msg->hdr.tid); | 1183 | tid = le64_to_cpu(msg->hdr.tid); |
| 1169 | if (msg->front.iov_len < sizeof(*rhead)) | 1184 | dout("handle_reply %p tid %llu\n", msg, tid); |
| 1170 | goto bad; | 1185 | |
| 1171 | numops = le32_to_cpu(rhead->num_ops); | 1186 | p = msg->front.iov_base; |
| 1172 | object_len = le32_to_cpu(rhead->object_len); | 1187 | end = p + msg->front.iov_len; |
| 1173 | result = le32_to_cpu(rhead->result); | 1188 | |
| 1174 | if (msg->front.iov_len != sizeof(*rhead) + object_len + | 1189 | ceph_decode_need(&p, end, 4, bad); |
| 1175 | numops * sizeof(struct ceph_osd_op)) | 1190 | object_len = ceph_decode_32(&p); |
| 1191 | ceph_decode_need(&p, end, object_len, bad); | ||
| 1192 | p += object_len; | ||
| 1193 | |||
| 1194 | err = __decode_pgid(&p, end, &pg); | ||
| 1195 | if (err) | ||
| 1176 | goto bad; | 1196 | goto bad; |
| 1177 | dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result); | 1197 | |
| 1198 | ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad); | ||
| 1199 | flags = ceph_decode_64(&p); | ||
| 1200 | result = ceph_decode_32(&p); | ||
| 1201 | reassert_epoch = ceph_decode_32(&p); | ||
| 1202 | reassert_version = ceph_decode_64(&p); | ||
| 1203 | osdmap_epoch = ceph_decode_32(&p); | ||
| 1204 | |||
| 1178 | /* lookup */ | 1205 | /* lookup */ |
| 1179 | mutex_lock(&osdc->request_mutex); | 1206 | mutex_lock(&osdc->request_mutex); |
| 1180 | req = __lookup_request(osdc, tid); | 1207 | req = __lookup_request(osdc, tid); |
| @@ -1184,7 +1211,38 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
| 1184 | return; | 1211 | return; |
| 1185 | } | 1212 | } |
| 1186 | ceph_osdc_get_request(req); | 1213 | ceph_osdc_get_request(req); |
| 1187 | flags = le32_to_cpu(rhead->flags); | 1214 | |
| 1215 | dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, | ||
| 1216 | req, result); | ||
| 1217 | |||
| 1218 | ceph_decode_need(&p, end, 4, bad); | ||
| 1219 | numops = ceph_decode_32(&p); | ||
| 1220 | if (numops > CEPH_OSD_MAX_OP) | ||
| 1221 | goto bad_put; | ||
| 1222 | if (numops != req->r_num_ops) | ||
| 1223 | goto bad_put; | ||
| 1224 | payload_len = 0; | ||
| 1225 | ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad); | ||
| 1226 | for (i = 0; i < numops; i++) { | ||
| 1227 | struct ceph_osd_op *op = p; | ||
| 1228 | int len; | ||
| 1229 | |||
| 1230 | len = le32_to_cpu(op->payload_len); | ||
| 1231 | req->r_reply_op_len[i] = len; | ||
| 1232 | dout(" op %d has %d bytes\n", i, len); | ||
| 1233 | payload_len += len; | ||
| 1234 | p += sizeof(*op); | ||
| 1235 | } | ||
| 1236 | if (payload_len != le32_to_cpu(msg->hdr.data_len)) { | ||
| 1237 | pr_warning("sum of op payload lens %d != data_len %d", | ||
| 1238 | payload_len, le32_to_cpu(msg->hdr.data_len)); | ||
| 1239 | goto bad_put; | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | ceph_decode_need(&p, end, 4 + numops * 4, bad); | ||
| 1243 | retry_attempt = ceph_decode_32(&p); | ||
| 1244 | for (i = 0; i < numops; i++) | ||
| 1245 | req->r_reply_op_result[i] = ceph_decode_32(&p); | ||
| 1188 | 1246 | ||
| 1189 | /* | 1247 | /* |
| 1190 | * if this connection filled our message, drop our reference now, to | 1248 | * if this connection filled our message, drop our reference now, to |
| @@ -1199,7 +1257,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
| 1199 | if (!req->r_got_reply) { | 1257 | if (!req->r_got_reply) { |
| 1200 | unsigned int bytes; | 1258 | unsigned int bytes; |
| 1201 | 1259 | ||
| 1202 | req->r_result = le32_to_cpu(rhead->result); | 1260 | req->r_result = result; |
| 1203 | bytes = le32_to_cpu(msg->hdr.data_len); | 1261 | bytes = le32_to_cpu(msg->hdr.data_len); |
| 1204 | dout("handle_reply result %d bytes %d\n", req->r_result, | 1262 | dout("handle_reply result %d bytes %d\n", req->r_result, |
| 1205 | bytes); | 1263 | bytes); |
| @@ -1207,7 +1265,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
| 1207 | req->r_result = bytes; | 1265 | req->r_result = bytes; |
| 1208 | 1266 | ||
| 1209 | /* in case this is a write and we need to replay, */ | 1267 | /* in case this is a write and we need to replay, */ |
| 1210 | req->r_reassert_version = rhead->reassert_version; | 1268 | req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch); |
| 1269 | req->r_reassert_version.version = cpu_to_le64(reassert_version); | ||
| 1211 | 1270 | ||
| 1212 | req->r_got_reply = 1; | 1271 | req->r_got_reply = 1; |
| 1213 | } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { | 1272 | } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { |
| @@ -1242,10 +1301,11 @@ done: | |||
| 1242 | ceph_osdc_put_request(req); | 1301 | ceph_osdc_put_request(req); |
| 1243 | return; | 1302 | return; |
| 1244 | 1303 | ||
| 1304 | bad_put: | ||
| 1305 | ceph_osdc_put_request(req); | ||
| 1245 | bad: | 1306 | bad: |
| 1246 | pr_err("corrupt osd_op_reply got %d %d expected %d\n", | 1307 | pr_err("corrupt osd_op_reply got %d %d\n", |
| 1247 | (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len), | 1308 | (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); |
| 1248 | (int)sizeof(*rhead)); | ||
| 1249 | ceph_msg_dump(msg); | 1309 | ceph_msg_dump(msg); |
| 1250 | } | 1310 | } |
| 1251 | 1311 | ||
| @@ -1462,7 +1522,9 @@ done: | |||
| 1462 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) | 1522 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) |
| 1463 | ceph_monc_request_next_osdmap(&osdc->client->monc); | 1523 | ceph_monc_request_next_osdmap(&osdc->client->monc); |
| 1464 | 1524 | ||
| 1465 | send_queued(osdc); | 1525 | mutex_lock(&osdc->request_mutex); |
| 1526 | __send_queued(osdc); | ||
| 1527 | mutex_unlock(&osdc->request_mutex); | ||
| 1466 | up_read(&osdc->map_sem); | 1528 | up_read(&osdc->map_sem); |
| 1467 | wake_up_all(&osdc->client->auth_wq); | 1529 | wake_up_all(&osdc->client->auth_wq); |
| 1468 | return; | 1530 | return; |
| @@ -1556,8 +1618,7 @@ static void __remove_event(struct ceph_osd_event *event) | |||
| 1556 | 1618 | ||
| 1557 | int ceph_osdc_create_event(struct ceph_osd_client *osdc, | 1619 | int ceph_osdc_create_event(struct ceph_osd_client *osdc, |
| 1558 | void (*event_cb)(u64, u64, u8, void *), | 1620 | void (*event_cb)(u64, u64, u8, void *), |
| 1559 | int one_shot, void *data, | 1621 | void *data, struct ceph_osd_event **pevent) |
| 1560 | struct ceph_osd_event **pevent) | ||
| 1561 | { | 1622 | { |
| 1562 | struct ceph_osd_event *event; | 1623 | struct ceph_osd_event *event; |
| 1563 | 1624 | ||
| @@ -1567,14 +1628,13 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc, | |||
| 1567 | 1628 | ||
| 1568 | dout("create_event %p\n", event); | 1629 | dout("create_event %p\n", event); |
| 1569 | event->cb = event_cb; | 1630 | event->cb = event_cb; |
| 1570 | event->one_shot = one_shot; | 1631 | event->one_shot = 0; |
| 1571 | event->data = data; | 1632 | event->data = data; |
| 1572 | event->osdc = osdc; | 1633 | event->osdc = osdc; |
| 1573 | INIT_LIST_HEAD(&event->osd_node); | 1634 | INIT_LIST_HEAD(&event->osd_node); |
| 1574 | RB_CLEAR_NODE(&event->node); | 1635 | RB_CLEAR_NODE(&event->node); |
| 1575 | kref_init(&event->kref); /* one ref for us */ | 1636 | kref_init(&event->kref); /* one ref for us */ |
| 1576 | kref_get(&event->kref); /* one ref for the caller */ | 1637 | kref_get(&event->kref); /* one ref for the caller */ |
| 1577 | init_completion(&event->completion); | ||
| 1578 | 1638 | ||
| 1579 | spin_lock(&osdc->event_lock); | 1639 | spin_lock(&osdc->event_lock); |
| 1580 | event->cookie = ++osdc->event_count; | 1640 | event->cookie = ++osdc->event_count; |
| @@ -1610,7 +1670,6 @@ static void do_event_work(struct work_struct *work) | |||
| 1610 | 1670 | ||
| 1611 | dout("do_event_work completing %p\n", event); | 1671 | dout("do_event_work completing %p\n", event); |
| 1612 | event->cb(ver, notify_id, opcode, event->data); | 1672 | event->cb(ver, notify_id, opcode, event->data); |
| 1613 | complete(&event->completion); | ||
| 1614 | dout("do_event_work completed %p\n", event); | 1673 | dout("do_event_work completed %p\n", event); |
| 1615 | ceph_osdc_put_event(event); | 1674 | ceph_osdc_put_event(event); |
| 1616 | kfree(event_work); | 1675 | kfree(event_work); |
| @@ -1620,7 +1679,8 @@ static void do_event_work(struct work_struct *work) | |||
| 1620 | /* | 1679 | /* |
| 1621 | * Process osd watch notifications | 1680 | * Process osd watch notifications |
| 1622 | */ | 1681 | */ |
| 1623 | void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg) | 1682 | static void handle_watch_notify(struct ceph_osd_client *osdc, |
| 1683 | struct ceph_msg *msg) | ||
| 1624 | { | 1684 | { |
| 1625 | void *p, *end; | 1685 | void *p, *end; |
| 1626 | u8 proto_ver; | 1686 | u8 proto_ver; |
| @@ -1641,9 +1701,8 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |||
| 1641 | spin_lock(&osdc->event_lock); | 1701 | spin_lock(&osdc->event_lock); |
| 1642 | event = __find_event(osdc, cookie); | 1702 | event = __find_event(osdc, cookie); |
| 1643 | if (event) { | 1703 | if (event) { |
| 1704 | BUG_ON(event->one_shot); | ||
| 1644 | get_event(event); | 1705 | get_event(event); |
| 1645 | if (event->one_shot) | ||
| 1646 | __remove_event(event); | ||
| 1647 | } | 1706 | } |
| 1648 | spin_unlock(&osdc->event_lock); | 1707 | spin_unlock(&osdc->event_lock); |
| 1649 | dout("handle_watch_notify cookie %lld ver %lld event %p\n", | 1708 | dout("handle_watch_notify cookie %lld ver %lld event %p\n", |
| @@ -1668,7 +1727,6 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |||
| 1668 | return; | 1727 | return; |
| 1669 | 1728 | ||
| 1670 | done_err: | 1729 | done_err: |
| 1671 | complete(&event->completion); | ||
| 1672 | ceph_osdc_put_event(event); | 1730 | ceph_osdc_put_event(event); |
| 1673 | return; | 1731 | return; |
| 1674 | 1732 | ||
| @@ -1677,21 +1735,6 @@ bad: | |||
| 1677 | return; | 1735 | return; |
| 1678 | } | 1736 | } |
| 1679 | 1737 | ||
| 1680 | int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout) | ||
| 1681 | { | ||
| 1682 | int err; | ||
| 1683 | |||
| 1684 | dout("wait_event %p\n", event); | ||
| 1685 | err = wait_for_completion_interruptible_timeout(&event->completion, | ||
| 1686 | timeout * HZ); | ||
| 1687 | ceph_osdc_put_event(event); | ||
| 1688 | if (err > 0) | ||
| 1689 | err = 0; | ||
| 1690 | dout("wait_event %p returns %d\n", event, err); | ||
| 1691 | return err; | ||
| 1692 | } | ||
| 1693 | EXPORT_SYMBOL(ceph_osdc_wait_event); | ||
| 1694 | |||
| 1695 | /* | 1738 | /* |
| 1696 | * Register request, send initial attempt. | 1739 | * Register request, send initial attempt. |
| 1697 | */ | 1740 | */ |
| @@ -1706,7 +1749,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |||
| 1706 | #ifdef CONFIG_BLOCK | 1749 | #ifdef CONFIG_BLOCK |
| 1707 | req->r_request->bio = req->r_bio; | 1750 | req->r_request->bio = req->r_bio; |
| 1708 | #endif | 1751 | #endif |
| 1709 | req->r_request->trail = req->r_trail; | 1752 | req->r_request->trail = &req->r_trail; |
| 1710 | 1753 | ||
| 1711 | register_request(osdc, req); | 1754 | register_request(osdc, req); |
| 1712 | 1755 | ||
| @@ -1865,7 +1908,6 @@ out_mempool: | |||
| 1865 | out: | 1908 | out: |
| 1866 | return err; | 1909 | return err; |
| 1867 | } | 1910 | } |
| 1868 | EXPORT_SYMBOL(ceph_osdc_init); | ||
| 1869 | 1911 | ||
| 1870 | void ceph_osdc_stop(struct ceph_osd_client *osdc) | 1912 | void ceph_osdc_stop(struct ceph_osd_client *osdc) |
| 1871 | { | 1913 | { |
| @@ -1882,7 +1924,6 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) | |||
| 1882 | ceph_msgpool_destroy(&osdc->msgpool_op); | 1924 | ceph_msgpool_destroy(&osdc->msgpool_op); |
| 1883 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); | 1925 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); |
| 1884 | } | 1926 | } |
| 1885 | EXPORT_SYMBOL(ceph_osdc_stop); | ||
| 1886 | 1927 | ||
| 1887 | /* | 1928 | /* |
| 1888 | * Read some contiguous pages. If we cross a stripe boundary, shorten | 1929 | * Read some contiguous pages. If we cross a stripe boundary, shorten |
| @@ -1902,7 +1943,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |||
| 1902 | req = ceph_osdc_new_request(osdc, layout, vino, off, plen, | 1943 | req = ceph_osdc_new_request(osdc, layout, vino, off, plen, |
| 1903 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, | 1944 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, |
| 1904 | NULL, 0, truncate_seq, truncate_size, NULL, | 1945 | NULL, 0, truncate_seq, truncate_size, NULL, |
| 1905 | false, 1, page_align); | 1946 | false, page_align); |
| 1906 | if (IS_ERR(req)) | 1947 | if (IS_ERR(req)) |
| 1907 | return PTR_ERR(req); | 1948 | return PTR_ERR(req); |
| 1908 | 1949 | ||
| @@ -1931,8 +1972,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |||
| 1931 | u64 off, u64 len, | 1972 | u64 off, u64 len, |
| 1932 | u32 truncate_seq, u64 truncate_size, | 1973 | u32 truncate_seq, u64 truncate_size, |
| 1933 | struct timespec *mtime, | 1974 | struct timespec *mtime, |
| 1934 | struct page **pages, int num_pages, | 1975 | struct page **pages, int num_pages) |
| 1935 | int flags, int do_sync, bool nofail) | ||
| 1936 | { | 1976 | { |
| 1937 | struct ceph_osd_request *req; | 1977 | struct ceph_osd_request *req; |
| 1938 | int rc = 0; | 1978 | int rc = 0; |
| @@ -1941,11 +1981,10 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |||
| 1941 | BUG_ON(vino.snap != CEPH_NOSNAP); | 1981 | BUG_ON(vino.snap != CEPH_NOSNAP); |
| 1942 | req = ceph_osdc_new_request(osdc, layout, vino, off, &len, | 1982 | req = ceph_osdc_new_request(osdc, layout, vino, off, &len, |
| 1943 | CEPH_OSD_OP_WRITE, | 1983 | CEPH_OSD_OP_WRITE, |
| 1944 | flags | CEPH_OSD_FLAG_ONDISK | | 1984 | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, |
| 1945 | CEPH_OSD_FLAG_WRITE, | 1985 | snapc, 0, |
| 1946 | snapc, do_sync, | ||
| 1947 | truncate_seq, truncate_size, mtime, | 1986 | truncate_seq, truncate_size, mtime, |
| 1948 | nofail, 1, page_align); | 1987 | true, page_align); |
| 1949 | if (IS_ERR(req)) | 1988 | if (IS_ERR(req)) |
| 1950 | return PTR_ERR(req); | 1989 | return PTR_ERR(req); |
| 1951 | 1990 | ||
| @@ -1954,7 +1993,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |||
| 1954 | dout("writepages %llu~%llu (%d pages)\n", off, len, | 1993 | dout("writepages %llu~%llu (%d pages)\n", off, len, |
| 1955 | req->r_num_pages); | 1994 | req->r_num_pages); |
| 1956 | 1995 | ||
| 1957 | rc = ceph_osdc_start_request(osdc, req, nofail); | 1996 | rc = ceph_osdc_start_request(osdc, req, true); |
| 1958 | if (!rc) | 1997 | if (!rc) |
| 1959 | rc = ceph_osdc_wait_request(osdc, req); | 1998 | rc = ceph_osdc_wait_request(osdc, req); |
| 1960 | 1999 | ||
| @@ -2047,7 +2086,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
| 2047 | if (data_len > 0) { | 2086 | if (data_len > 0) { |
| 2048 | int want = calc_pages_for(req->r_page_alignment, data_len); | 2087 | int want = calc_pages_for(req->r_page_alignment, data_len); |
| 2049 | 2088 | ||
| 2050 | if (unlikely(req->r_num_pages < want)) { | 2089 | if (req->r_pages && unlikely(req->r_num_pages < want)) { |
| 2051 | pr_warning("tid %lld reply has %d bytes %d pages, we" | 2090 | pr_warning("tid %lld reply has %d bytes %d pages, we" |
| 2052 | " had only %d pages ready\n", tid, data_len, | 2091 | " had only %d pages ready\n", tid, data_len, |
| 2053 | want, req->r_num_pages); | 2092 | want, req->r_num_pages); |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index de73214b5d26..69bc4bf89e3e 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
| @@ -13,26 +13,18 @@ | |||
| 13 | 13 | ||
| 14 | char *ceph_osdmap_state_str(char *str, int len, int state) | 14 | char *ceph_osdmap_state_str(char *str, int len, int state) |
| 15 | { | 15 | { |
| 16 | int flag = 0; | ||
| 17 | |||
| 18 | if (!len) | 16 | if (!len) |
| 19 | goto done; | 17 | return str; |
| 20 | 18 | ||
| 21 | *str = '\0'; | 19 | if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) |
| 22 | if (state) { | 20 | snprintf(str, len, "exists, up"); |
| 23 | if (state & CEPH_OSD_EXISTS) { | 21 | else if (state & CEPH_OSD_EXISTS) |
| 24 | snprintf(str, len, "exists"); | 22 | snprintf(str, len, "exists"); |
| 25 | flag = 1; | 23 | else if (state & CEPH_OSD_UP) |
| 26 | } | 24 | snprintf(str, len, "up"); |
| 27 | if (state & CEPH_OSD_UP) { | 25 | else |
| 28 | snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""), | ||
| 29 | "up"); | ||
| 30 | flag = 1; | ||
| 31 | } | ||
| 32 | } else { | ||
| 33 | snprintf(str, len, "doesn't exist"); | 26 | snprintf(str, len, "doesn't exist"); |
| 34 | } | 27 | |
| 35 | done: | ||
| 36 | return str; | 28 | return str; |
| 37 | } | 29 | } |
| 38 | 30 | ||
| @@ -53,13 +45,8 @@ static int calc_bits_of(unsigned int t) | |||
| 53 | */ | 45 | */ |
| 54 | static void calc_pg_masks(struct ceph_pg_pool_info *pi) | 46 | static void calc_pg_masks(struct ceph_pg_pool_info *pi) |
| 55 | { | 47 | { |
| 56 | pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1; | 48 | pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; |
| 57 | pi->pgp_num_mask = | 49 | pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; |
| 58 | (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1; | ||
| 59 | pi->lpg_num_mask = | ||
| 60 | (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1; | ||
| 61 | pi->lpgp_num_mask = | ||
| 62 | (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1; | ||
| 63 | } | 50 | } |
| 64 | 51 | ||
| 65 | /* | 52 | /* |
| @@ -170,6 +157,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) | |||
| 170 | c->choose_local_tries = 2; | 157 | c->choose_local_tries = 2; |
| 171 | c->choose_local_fallback_tries = 5; | 158 | c->choose_local_fallback_tries = 5; |
| 172 | c->choose_total_tries = 19; | 159 | c->choose_total_tries = 19; |
| 160 | c->chooseleaf_descend_once = 0; | ||
| 173 | 161 | ||
| 174 | ceph_decode_need(p, end, 4*sizeof(u32), bad); | 162 | ceph_decode_need(p, end, 4*sizeof(u32), bad); |
| 175 | magic = ceph_decode_32(p); | 163 | magic = ceph_decode_32(p); |
| @@ -336,6 +324,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end) | |||
| 336 | dout("crush decode tunable choose_total_tries = %d", | 324 | dout("crush decode tunable choose_total_tries = %d", |
| 337 | c->choose_total_tries); | 325 | c->choose_total_tries); |
| 338 | 326 | ||
| 327 | ceph_decode_need(p, end, sizeof(u32), done); | ||
| 328 | c->chooseleaf_descend_once = ceph_decode_32(p); | ||
| 329 | dout("crush decode tunable chooseleaf_descend_once = %d", | ||
| 330 | c->chooseleaf_descend_once); | ||
| 331 | |||
| 339 | done: | 332 | done: |
| 340 | dout("crush_decode success\n"); | 333 | dout("crush_decode success\n"); |
| 341 | return c; | 334 | return c; |
| @@ -354,12 +347,13 @@ bad: | |||
| 354 | */ | 347 | */ |
| 355 | static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) | 348 | static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) |
| 356 | { | 349 | { |
| 357 | u64 a = *(u64 *)&l; | 350 | if (l.pool < r.pool) |
| 358 | u64 b = *(u64 *)&r; | 351 | return -1; |
| 359 | 352 | if (l.pool > r.pool) | |
| 360 | if (a < b) | 353 | return 1; |
| 354 | if (l.seed < r.seed) | ||
| 361 | return -1; | 355 | return -1; |
| 362 | if (a > b) | 356 | if (l.seed > r.seed) |
| 363 | return 1; | 357 | return 1; |
| 364 | return 0; | 358 | return 0; |
| 365 | } | 359 | } |
| @@ -405,8 +399,8 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, | |||
| 405 | } else if (c > 0) { | 399 | } else if (c > 0) { |
| 406 | n = n->rb_right; | 400 | n = n->rb_right; |
| 407 | } else { | 401 | } else { |
| 408 | dout("__lookup_pg_mapping %llx got %p\n", | 402 | dout("__lookup_pg_mapping %lld.%x got %p\n", |
| 409 | *(u64 *)&pgid, pg); | 403 | pgid.pool, pgid.seed, pg); |
| 410 | return pg; | 404 | return pg; |
| 411 | } | 405 | } |
| 412 | } | 406 | } |
| @@ -418,12 +412,13 @@ static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) | |||
| 418 | struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); | 412 | struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); |
| 419 | 413 | ||
| 420 | if (pg) { | 414 | if (pg) { |
| 421 | dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg); | 415 | dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, |
| 416 | pg); | ||
| 422 | rb_erase(&pg->node, root); | 417 | rb_erase(&pg->node, root); |
| 423 | kfree(pg); | 418 | kfree(pg); |
| 424 | return 0; | 419 | return 0; |
| 425 | } | 420 | } |
| 426 | dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid); | 421 | dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); |
| 427 | return -ENOENT; | 422 | return -ENOENT; |
| 428 | } | 423 | } |
| 429 | 424 | ||
| @@ -452,7 +447,7 @@ static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) | |||
| 452 | return 0; | 447 | return 0; |
| 453 | } | 448 | } |
| 454 | 449 | ||
| 455 | static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) | 450 | static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) |
| 456 | { | 451 | { |
| 457 | struct ceph_pg_pool_info *pi; | 452 | struct ceph_pg_pool_info *pi; |
| 458 | struct rb_node *n = root->rb_node; | 453 | struct rb_node *n = root->rb_node; |
| @@ -508,24 +503,57 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) | |||
| 508 | 503 | ||
| 509 | static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) | 504 | static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) |
| 510 | { | 505 | { |
| 511 | unsigned int n, m; | 506 | u8 ev, cv; |
| 507 | unsigned len, num; | ||
| 508 | void *pool_end; | ||
| 509 | |||
| 510 | ceph_decode_need(p, end, 2 + 4, bad); | ||
| 511 | ev = ceph_decode_8(p); /* encoding version */ | ||
| 512 | cv = ceph_decode_8(p); /* compat version */ | ||
| 513 | if (ev < 5) { | ||
| 514 | pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); | ||
| 515 | return -EINVAL; | ||
| 516 | } | ||
| 517 | if (cv > 7) { | ||
| 518 | pr_warning("got v %d cv %d > 7 of ceph_pg_pool\n", ev, cv); | ||
| 519 | return -EINVAL; | ||
| 520 | } | ||
| 521 | len = ceph_decode_32(p); | ||
| 522 | ceph_decode_need(p, end, len, bad); | ||
| 523 | pool_end = *p + len; | ||
| 512 | 524 | ||
| 513 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); | 525 | pi->type = ceph_decode_8(p); |
| 514 | calc_pg_masks(pi); | 526 | pi->size = ceph_decode_8(p); |
| 527 | pi->crush_ruleset = ceph_decode_8(p); | ||
| 528 | pi->object_hash = ceph_decode_8(p); | ||
| 529 | |||
| 530 | pi->pg_num = ceph_decode_32(p); | ||
| 531 | pi->pgp_num = ceph_decode_32(p); | ||
| 532 | |||
| 533 | *p += 4 + 4; /* skip lpg* */ | ||
| 534 | *p += 4; /* skip last_change */ | ||
| 535 | *p += 8 + 4; /* skip snap_seq, snap_epoch */ | ||
| 515 | 536 | ||
| 516 | /* num_snaps * snap_info_t */ | 537 | /* skip snaps */ |
| 517 | n = le32_to_cpu(pi->v.num_snaps); | 538 | num = ceph_decode_32(p); |
| 518 | while (n--) { | 539 | while (num--) { |
| 519 | ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) + | 540 | *p += 8; /* snapid key */ |
| 520 | sizeof(struct ceph_timespec), bad); | 541 | *p += 1 + 1; /* versions */ |
| 521 | *p += sizeof(u64) + /* key */ | 542 | len = ceph_decode_32(p); |
| 522 | 1 + sizeof(u64) + /* u8, snapid */ | 543 | *p += len; |
| 523 | sizeof(struct ceph_timespec); | ||
| 524 | m = ceph_decode_32(p); /* snap name */ | ||
| 525 | *p += m; | ||
| 526 | } | 544 | } |
| 527 | 545 | ||
| 528 | *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; | 546 | /* skip removed snaps */ |
| 547 | num = ceph_decode_32(p); | ||
| 548 | *p += num * (8 + 8); | ||
| 549 | |||
| 550 | *p += 8; /* skip auid */ | ||
| 551 | pi->flags = ceph_decode_64(p); | ||
| 552 | |||
| 553 | /* ignore the rest */ | ||
| 554 | |||
| 555 | *p = pool_end; | ||
| 556 | calc_pg_masks(pi); | ||
| 529 | return 0; | 557 | return 0; |
| 530 | 558 | ||
| 531 | bad: | 559 | bad: |
| @@ -535,14 +563,15 @@ bad: | |||
| 535 | static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) | 563 | static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) |
| 536 | { | 564 | { |
| 537 | struct ceph_pg_pool_info *pi; | 565 | struct ceph_pg_pool_info *pi; |
| 538 | u32 num, len, pool; | 566 | u32 num, len; |
| 567 | u64 pool; | ||
| 539 | 568 | ||
| 540 | ceph_decode_32_safe(p, end, num, bad); | 569 | ceph_decode_32_safe(p, end, num, bad); |
| 541 | dout(" %d pool names\n", num); | 570 | dout(" %d pool names\n", num); |
| 542 | while (num--) { | 571 | while (num--) { |
| 543 | ceph_decode_32_safe(p, end, pool, bad); | 572 | ceph_decode_64_safe(p, end, pool, bad); |
| 544 | ceph_decode_32_safe(p, end, len, bad); | 573 | ceph_decode_32_safe(p, end, len, bad); |
| 545 | dout(" pool %d len %d\n", pool, len); | 574 | dout(" pool %llu len %d\n", pool, len); |
| 546 | ceph_decode_need(p, end, len, bad); | 575 | ceph_decode_need(p, end, len, bad); |
| 547 | pi = __lookup_pg_pool(&map->pg_pools, pool); | 576 | pi = __lookup_pg_pool(&map->pg_pools, pool); |
| 548 | if (pi) { | 577 | if (pi) { |
| @@ -633,7 +662,6 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 633 | struct ceph_osdmap *map; | 662 | struct ceph_osdmap *map; |
| 634 | u16 version; | 663 | u16 version; |
| 635 | u32 len, max, i; | 664 | u32 len, max, i; |
| 636 | u8 ev; | ||
| 637 | int err = -EINVAL; | 665 | int err = -EINVAL; |
| 638 | void *start = *p; | 666 | void *start = *p; |
| 639 | struct ceph_pg_pool_info *pi; | 667 | struct ceph_pg_pool_info *pi; |
| @@ -646,9 +674,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 646 | map->pg_temp = RB_ROOT; | 674 | map->pg_temp = RB_ROOT; |
| 647 | 675 | ||
| 648 | ceph_decode_16_safe(p, end, version, bad); | 676 | ceph_decode_16_safe(p, end, version, bad); |
| 649 | if (version > CEPH_OSDMAP_VERSION) { | 677 | if (version > 6) { |
| 650 | pr_warning("got unknown v %d > %d of osdmap\n", version, | 678 | pr_warning("got unknown v %d > 6 of osdmap\n", version); |
| 651 | CEPH_OSDMAP_VERSION); | 679 | goto bad; |
| 680 | } | ||
| 681 | if (version < 6) { | ||
| 682 | pr_warning("got old v %d < 6 of osdmap\n", version); | ||
| 652 | goto bad; | 683 | goto bad; |
| 653 | } | 684 | } |
| 654 | 685 | ||
| @@ -660,20 +691,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 660 | 691 | ||
| 661 | ceph_decode_32_safe(p, end, max, bad); | 692 | ceph_decode_32_safe(p, end, max, bad); |
| 662 | while (max--) { | 693 | while (max--) { |
| 663 | ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); | 694 | ceph_decode_need(p, end, 8 + 2, bad); |
| 664 | err = -ENOMEM; | 695 | err = -ENOMEM; |
| 665 | pi = kzalloc(sizeof(*pi), GFP_NOFS); | 696 | pi = kzalloc(sizeof(*pi), GFP_NOFS); |
| 666 | if (!pi) | 697 | if (!pi) |
| 667 | goto bad; | 698 | goto bad; |
| 668 | pi->id = ceph_decode_32(p); | 699 | pi->id = ceph_decode_64(p); |
| 669 | err = -EINVAL; | ||
| 670 | ev = ceph_decode_8(p); /* encoding version */ | ||
| 671 | if (ev > CEPH_PG_POOL_VERSION) { | ||
| 672 | pr_warning("got unknown v %d > %d of ceph_pg_pool\n", | ||
| 673 | ev, CEPH_PG_POOL_VERSION); | ||
| 674 | kfree(pi); | ||
| 675 | goto bad; | ||
| 676 | } | ||
| 677 | err = __decode_pool(p, end, pi); | 700 | err = __decode_pool(p, end, pi); |
| 678 | if (err < 0) { | 701 | if (err < 0) { |
| 679 | kfree(pi); | 702 | kfree(pi); |
| @@ -682,12 +705,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 682 | __insert_pg_pool(&map->pg_pools, pi); | 705 | __insert_pg_pool(&map->pg_pools, pi); |
| 683 | } | 706 | } |
| 684 | 707 | ||
| 685 | if (version >= 5) { | 708 | err = __decode_pool_names(p, end, map); |
| 686 | err = __decode_pool_names(p, end, map); | 709 | if (err < 0) { |
| 687 | if (err < 0) { | 710 | dout("fail to decode pool names"); |
| 688 | dout("fail to decode pool names"); | 711 | goto bad; |
| 689 | goto bad; | ||
| 690 | } | ||
| 691 | } | 712 | } |
| 692 | 713 | ||
| 693 | ceph_decode_32_safe(p, end, map->pool_max, bad); | 714 | ceph_decode_32_safe(p, end, map->pool_max, bad); |
| @@ -724,10 +745,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 724 | for (i = 0; i < len; i++) { | 745 | for (i = 0; i < len; i++) { |
| 725 | int n, j; | 746 | int n, j; |
| 726 | struct ceph_pg pgid; | 747 | struct ceph_pg pgid; |
| 748 | struct ceph_pg_v1 pgid_v1; | ||
| 727 | struct ceph_pg_mapping *pg; | 749 | struct ceph_pg_mapping *pg; |
| 728 | 750 | ||
| 729 | ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); | 751 | ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); |
| 730 | ceph_decode_copy(p, &pgid, sizeof(pgid)); | 752 | ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); |
| 753 | pgid.pool = le32_to_cpu(pgid_v1.pool); | ||
| 754 | pgid.seed = le16_to_cpu(pgid_v1.ps); | ||
| 731 | n = ceph_decode_32(p); | 755 | n = ceph_decode_32(p); |
| 732 | err = -EINVAL; | 756 | err = -EINVAL; |
| 733 | if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) | 757 | if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) |
| @@ -745,7 +769,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 745 | err = __insert_pg_mapping(pg, &map->pg_temp); | 769 | err = __insert_pg_mapping(pg, &map->pg_temp); |
| 746 | if (err) | 770 | if (err) |
| 747 | goto bad; | 771 | goto bad; |
| 748 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len); | 772 | dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed, |
| 773 | len); | ||
| 749 | } | 774 | } |
| 750 | 775 | ||
| 751 | /* crush */ | 776 | /* crush */ |
| @@ -784,16 +809,17 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 784 | struct ceph_fsid fsid; | 809 | struct ceph_fsid fsid; |
| 785 | u32 epoch = 0; | 810 | u32 epoch = 0; |
| 786 | struct ceph_timespec modified; | 811 | struct ceph_timespec modified; |
| 787 | u32 len, pool; | 812 | s32 len; |
| 788 | __s32 new_pool_max, new_flags, max; | 813 | u64 pool; |
| 814 | __s64 new_pool_max; | ||
| 815 | __s32 new_flags, max; | ||
| 789 | void *start = *p; | 816 | void *start = *p; |
| 790 | int err = -EINVAL; | 817 | int err = -EINVAL; |
| 791 | u16 version; | 818 | u16 version; |
| 792 | 819 | ||
| 793 | ceph_decode_16_safe(p, end, version, bad); | 820 | ceph_decode_16_safe(p, end, version, bad); |
| 794 | if (version > CEPH_OSDMAP_INC_VERSION) { | 821 | if (version > 6) { |
| 795 | pr_warning("got unknown v %d > %d of inc osdmap\n", version, | 822 | pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); |
| 796 | CEPH_OSDMAP_INC_VERSION); | ||
| 797 | goto bad; | 823 | goto bad; |
| 798 | } | 824 | } |
| 799 | 825 | ||
| @@ -803,7 +829,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 803 | epoch = ceph_decode_32(p); | 829 | epoch = ceph_decode_32(p); |
| 804 | BUG_ON(epoch != map->epoch+1); | 830 | BUG_ON(epoch != map->epoch+1); |
| 805 | ceph_decode_copy(p, &modified, sizeof(modified)); | 831 | ceph_decode_copy(p, &modified, sizeof(modified)); |
| 806 | new_pool_max = ceph_decode_32(p); | 832 | new_pool_max = ceph_decode_64(p); |
| 807 | new_flags = ceph_decode_32(p); | 833 | new_flags = ceph_decode_32(p); |
| 808 | 834 | ||
| 809 | /* full map? */ | 835 | /* full map? */ |
| @@ -853,18 +879,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 853 | /* new_pool */ | 879 | /* new_pool */ |
| 854 | ceph_decode_32_safe(p, end, len, bad); | 880 | ceph_decode_32_safe(p, end, len, bad); |
| 855 | while (len--) { | 881 | while (len--) { |
| 856 | __u8 ev; | ||
| 857 | struct ceph_pg_pool_info *pi; | 882 | struct ceph_pg_pool_info *pi; |
| 858 | 883 | ||
| 859 | ceph_decode_32_safe(p, end, pool, bad); | 884 | ceph_decode_64_safe(p, end, pool, bad); |
| 860 | ceph_decode_need(p, end, 1 + sizeof(pi->v), bad); | ||
| 861 | ev = ceph_decode_8(p); /* encoding version */ | ||
| 862 | if (ev > CEPH_PG_POOL_VERSION) { | ||
| 863 | pr_warning("got unknown v %d > %d of ceph_pg_pool\n", | ||
| 864 | ev, CEPH_PG_POOL_VERSION); | ||
| 865 | err = -EINVAL; | ||
| 866 | goto bad; | ||
| 867 | } | ||
| 868 | pi = __lookup_pg_pool(&map->pg_pools, pool); | 885 | pi = __lookup_pg_pool(&map->pg_pools, pool); |
| 869 | if (!pi) { | 886 | if (!pi) { |
| 870 | pi = kzalloc(sizeof(*pi), GFP_NOFS); | 887 | pi = kzalloc(sizeof(*pi), GFP_NOFS); |
| @@ -890,7 +907,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 890 | while (len--) { | 907 | while (len--) { |
| 891 | struct ceph_pg_pool_info *pi; | 908 | struct ceph_pg_pool_info *pi; |
| 892 | 909 | ||
| 893 | ceph_decode_32_safe(p, end, pool, bad); | 910 | ceph_decode_64_safe(p, end, pool, bad); |
| 894 | pi = __lookup_pg_pool(&map->pg_pools, pool); | 911 | pi = __lookup_pg_pool(&map->pg_pools, pool); |
| 895 | if (pi) | 912 | if (pi) |
| 896 | __remove_pg_pool(&map->pg_pools, pi); | 913 | __remove_pg_pool(&map->pg_pools, pi); |
| @@ -946,10 +963,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 946 | while (len--) { | 963 | while (len--) { |
| 947 | struct ceph_pg_mapping *pg; | 964 | struct ceph_pg_mapping *pg; |
| 948 | int j; | 965 | int j; |
| 966 | struct ceph_pg_v1 pgid_v1; | ||
| 949 | struct ceph_pg pgid; | 967 | struct ceph_pg pgid; |
| 950 | u32 pglen; | 968 | u32 pglen; |
| 951 | ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); | 969 | ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); |
| 952 | ceph_decode_copy(p, &pgid, sizeof(pgid)); | 970 | ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); |
| 971 | pgid.pool = le32_to_cpu(pgid_v1.pool); | ||
| 972 | pgid.seed = le16_to_cpu(pgid_v1.ps); | ||
| 953 | pglen = ceph_decode_32(p); | 973 | pglen = ceph_decode_32(p); |
| 954 | 974 | ||
| 955 | if (pglen) { | 975 | if (pglen) { |
| @@ -975,8 +995,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 975 | kfree(pg); | 995 | kfree(pg); |
| 976 | goto bad; | 996 | goto bad; |
| 977 | } | 997 | } |
| 978 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, | 998 | dout(" added pg_temp %lld.%x len %d\n", pgid.pool, |
| 979 | pglen); | 999 | pgid.seed, pglen); |
| 980 | } else { | 1000 | } else { |
| 981 | /* remove */ | 1001 | /* remove */ |
| 982 | __remove_pg_mapping(&map->pg_temp, pgid); | 1002 | __remove_pg_mapping(&map->pg_temp, pgid); |
| @@ -1010,7 +1030,7 @@ bad: | |||
| 1010 | * pass a stride back to the caller. | 1030 | * pass a stride back to the caller. |
| 1011 | */ | 1031 | */ |
| 1012 | int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | 1032 | int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, |
| 1013 | u64 off, u64 *plen, | 1033 | u64 off, u64 len, |
| 1014 | u64 *ono, | 1034 | u64 *ono, |
| 1015 | u64 *oxoff, u64 *oxlen) | 1035 | u64 *oxoff, u64 *oxlen) |
| 1016 | { | 1036 | { |
| @@ -1021,7 +1041,7 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | |||
| 1021 | u32 su_per_object; | 1041 | u32 su_per_object; |
| 1022 | u64 t, su_offset; | 1042 | u64 t, su_offset; |
| 1023 | 1043 | ||
| 1024 | dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, | 1044 | dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, |
| 1025 | osize, su); | 1045 | osize, su); |
| 1026 | if (su == 0 || sc == 0) | 1046 | if (su == 0 || sc == 0) |
| 1027 | goto invalid; | 1047 | goto invalid; |
| @@ -1054,11 +1074,10 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | |||
| 1054 | 1074 | ||
| 1055 | /* | 1075 | /* |
| 1056 | * Calculate the length of the extent being written to the selected | 1076 | * Calculate the length of the extent being written to the selected |
| 1057 | * object. This is the minimum of the full length requested (plen) or | 1077 | * object. This is the minimum of the full length requested (len) or |
| 1058 | * the remainder of the current stripe being written to. | 1078 | * the remainder of the current stripe being written to. |
| 1059 | */ | 1079 | */ |
| 1060 | *oxlen = min_t(u64, *plen, su - su_offset); | 1080 | *oxlen = min_t(u64, len, su - su_offset); |
| 1061 | *plen = *oxlen; | ||
| 1062 | 1081 | ||
| 1063 | dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); | 1082 | dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); |
| 1064 | return 0; | 1083 | return 0; |
| @@ -1076,33 +1095,24 @@ EXPORT_SYMBOL(ceph_calc_file_object_mapping); | |||
| 1076 | * calculate an object layout (i.e. pgid) from an oid, | 1095 | * calculate an object layout (i.e. pgid) from an oid, |
| 1077 | * file_layout, and osdmap | 1096 | * file_layout, and osdmap |
| 1078 | */ | 1097 | */ |
| 1079 | int ceph_calc_object_layout(struct ceph_object_layout *ol, | 1098 | int ceph_calc_object_layout(struct ceph_pg *pg, |
| 1080 | const char *oid, | 1099 | const char *oid, |
| 1081 | struct ceph_file_layout *fl, | 1100 | struct ceph_file_layout *fl, |
| 1082 | struct ceph_osdmap *osdmap) | 1101 | struct ceph_osdmap *osdmap) |
| 1083 | { | 1102 | { |
| 1084 | unsigned int num, num_mask; | 1103 | unsigned int num, num_mask; |
| 1085 | struct ceph_pg pgid; | ||
| 1086 | int poolid = le32_to_cpu(fl->fl_pg_pool); | ||
| 1087 | struct ceph_pg_pool_info *pool; | 1104 | struct ceph_pg_pool_info *pool; |
| 1088 | unsigned int ps; | ||
| 1089 | 1105 | ||
| 1090 | BUG_ON(!osdmap); | 1106 | BUG_ON(!osdmap); |
| 1091 | 1107 | pg->pool = le32_to_cpu(fl->fl_pg_pool); | |
| 1092 | pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); | 1108 | pool = __lookup_pg_pool(&osdmap->pg_pools, pg->pool); |
| 1093 | if (!pool) | 1109 | if (!pool) |
| 1094 | return -EIO; | 1110 | return -EIO; |
| 1095 | ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid)); | 1111 | pg->seed = ceph_str_hash(pool->object_hash, oid, strlen(oid)); |
| 1096 | num = le32_to_cpu(pool->v.pg_num); | 1112 | num = pool->pg_num; |
| 1097 | num_mask = pool->pg_num_mask; | 1113 | num_mask = pool->pg_num_mask; |
| 1098 | 1114 | ||
| 1099 | pgid.ps = cpu_to_le16(ps); | 1115 | dout("calc_object_layout '%s' pgid %lld.%x\n", oid, pg->pool, pg->seed); |
| 1100 | pgid.preferred = cpu_to_le16(-1); | ||
| 1101 | pgid.pool = fl->fl_pg_pool; | ||
| 1102 | dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps); | ||
| 1103 | |||
| 1104 | ol->ol_pgid = pgid; | ||
| 1105 | ol->ol_stripe_unit = fl->fl_object_stripe_unit; | ||
| 1106 | return 0; | 1116 | return 0; |
| 1107 | } | 1117 | } |
| 1108 | EXPORT_SYMBOL(ceph_calc_object_layout); | 1118 | EXPORT_SYMBOL(ceph_calc_object_layout); |
| @@ -1117,19 +1127,16 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
| 1117 | struct ceph_pg_mapping *pg; | 1127 | struct ceph_pg_mapping *pg; |
| 1118 | struct ceph_pg_pool_info *pool; | 1128 | struct ceph_pg_pool_info *pool; |
| 1119 | int ruleno; | 1129 | int ruleno; |
| 1120 | unsigned int poolid, ps, pps, t, r; | 1130 | int r; |
| 1121 | 1131 | u32 pps; | |
| 1122 | poolid = le32_to_cpu(pgid.pool); | ||
| 1123 | ps = le16_to_cpu(pgid.ps); | ||
| 1124 | 1132 | ||
| 1125 | pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); | 1133 | pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool); |
| 1126 | if (!pool) | 1134 | if (!pool) |
| 1127 | return NULL; | 1135 | return NULL; |
| 1128 | 1136 | ||
| 1129 | /* pg_temp? */ | 1137 | /* pg_temp? */ |
| 1130 | t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), | 1138 | pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num, |
| 1131 | pool->pgp_num_mask); | 1139 | pool->pgp_num_mask); |
| 1132 | pgid.ps = cpu_to_le16(t); | ||
| 1133 | pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); | 1140 | pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); |
| 1134 | if (pg) { | 1141 | if (pg) { |
| 1135 | *num = pg->len; | 1142 | *num = pg->len; |
| @@ -1137,26 +1144,39 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
| 1137 | } | 1144 | } |
| 1138 | 1145 | ||
| 1139 | /* crush */ | 1146 | /* crush */ |
| 1140 | ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, | 1147 | ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset, |
| 1141 | pool->v.type, pool->v.size); | 1148 | pool->type, pool->size); |
| 1142 | if (ruleno < 0) { | 1149 | if (ruleno < 0) { |
| 1143 | pr_err("no crush rule pool %d ruleset %d type %d size %d\n", | 1150 | pr_err("no crush rule pool %lld ruleset %d type %d size %d\n", |
| 1144 | poolid, pool->v.crush_ruleset, pool->v.type, | 1151 | pgid.pool, pool->crush_ruleset, pool->type, |
| 1145 | pool->v.size); | 1152 | pool->size); |
| 1146 | return NULL; | 1153 | return NULL; |
| 1147 | } | 1154 | } |
| 1148 | 1155 | ||
| 1149 | pps = ceph_stable_mod(ps, | 1156 | if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) { |
| 1150 | le32_to_cpu(pool->v.pgp_num), | 1157 | /* hash pool id and seed sothat pool PGs do not overlap */ |
| 1151 | pool->pgp_num_mask); | 1158 | pps = crush_hash32_2(CRUSH_HASH_RJENKINS1, |
| 1152 | pps += poolid; | 1159 | ceph_stable_mod(pgid.seed, pool->pgp_num, |
| 1160 | pool->pgp_num_mask), | ||
| 1161 | pgid.pool); | ||
| 1162 | } else { | ||
| 1163 | /* | ||
| 1164 | * legacy ehavior: add ps and pool together. this is | ||
| 1165 | * not a great approach because the PGs from each pool | ||
| 1166 | * will overlap on top of each other: 0.5 == 1.4 == | ||
| 1167 | * 2.3 == ... | ||
| 1168 | */ | ||
| 1169 | pps = ceph_stable_mod(pgid.seed, pool->pgp_num, | ||
| 1170 | pool->pgp_num_mask) + | ||
| 1171 | (unsigned)pgid.pool; | ||
| 1172 | } | ||
| 1153 | r = crush_do_rule(osdmap->crush, ruleno, pps, osds, | 1173 | r = crush_do_rule(osdmap->crush, ruleno, pps, osds, |
| 1154 | min_t(int, pool->v.size, *num), | 1174 | min_t(int, pool->size, *num), |
| 1155 | osdmap->osd_weight); | 1175 | osdmap->osd_weight); |
| 1156 | if (r < 0) { | 1176 | if (r < 0) { |
| 1157 | pr_err("error %d from crush rule: pool %d ruleset %d type %d" | 1177 | pr_err("error %d from crush rule: pool %lld ruleset %d type %d" |
| 1158 | " size %d\n", r, poolid, pool->v.crush_ruleset, | 1178 | " size %d\n", r, pgid.pool, pool->crush_ruleset, |
| 1159 | pool->v.type, pool->v.size); | 1179 | pool->type, pool->size); |
| 1160 | return NULL; | 1180 | return NULL; |
| 1161 | } | 1181 | } |
| 1162 | *num = r; | 1182 | *num = r; |
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index cd9c21df87d1..815a2249cfa9 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | /* | 12 | /* |
| 13 | * build a vector of user pages | 13 | * build a vector of user pages |
| 14 | */ | 14 | */ |
| 15 | struct page **ceph_get_direct_page_vector(const char __user *data, | 15 | struct page **ceph_get_direct_page_vector(const void __user *data, |
| 16 | int num_pages, bool write_page) | 16 | int num_pages, bool write_page) |
| 17 | { | 17 | { |
| 18 | struct page **pages; | 18 | struct page **pages; |
| @@ -93,7 +93,7 @@ EXPORT_SYMBOL(ceph_alloc_page_vector); | |||
| 93 | * copy user data into a page vector | 93 | * copy user data into a page vector |
| 94 | */ | 94 | */ |
| 95 | int ceph_copy_user_to_page_vector(struct page **pages, | 95 | int ceph_copy_user_to_page_vector(struct page **pages, |
| 96 | const char __user *data, | 96 | const void __user *data, |
| 97 | loff_t off, size_t len) | 97 | loff_t off, size_t len) |
| 98 | { | 98 | { |
| 99 | int i = 0; | 99 | int i = 0; |
| @@ -118,17 +118,17 @@ int ceph_copy_user_to_page_vector(struct page **pages, | |||
| 118 | } | 118 | } |
| 119 | EXPORT_SYMBOL(ceph_copy_user_to_page_vector); | 119 | EXPORT_SYMBOL(ceph_copy_user_to_page_vector); |
| 120 | 120 | ||
| 121 | int ceph_copy_to_page_vector(struct page **pages, | 121 | void ceph_copy_to_page_vector(struct page **pages, |
| 122 | const char *data, | 122 | const void *data, |
| 123 | loff_t off, size_t len) | 123 | loff_t off, size_t len) |
| 124 | { | 124 | { |
| 125 | int i = 0; | 125 | int i = 0; |
| 126 | size_t po = off & ~PAGE_CACHE_MASK; | 126 | size_t po = off & ~PAGE_CACHE_MASK; |
| 127 | size_t left = len; | 127 | size_t left = len; |
| 128 | size_t l; | ||
| 129 | 128 | ||
| 130 | while (left > 0) { | 129 | while (left > 0) { |
| 131 | l = min_t(size_t, PAGE_CACHE_SIZE-po, left); | 130 | size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); |
| 131 | |||
| 132 | memcpy(page_address(pages[i]) + po, data, l); | 132 | memcpy(page_address(pages[i]) + po, data, l); |
| 133 | data += l; | 133 | data += l; |
| 134 | left -= l; | 134 | left -= l; |
| @@ -138,21 +138,20 @@ int ceph_copy_to_page_vector(struct page **pages, | |||
| 138 | i++; | 138 | i++; |
| 139 | } | 139 | } |
| 140 | } | 140 | } |
| 141 | return len; | ||
| 142 | } | 141 | } |
| 143 | EXPORT_SYMBOL(ceph_copy_to_page_vector); | 142 | EXPORT_SYMBOL(ceph_copy_to_page_vector); |
| 144 | 143 | ||
| 145 | int ceph_copy_from_page_vector(struct page **pages, | 144 | void ceph_copy_from_page_vector(struct page **pages, |
| 146 | char *data, | 145 | void *data, |
| 147 | loff_t off, size_t len) | 146 | loff_t off, size_t len) |
| 148 | { | 147 | { |
| 149 | int i = 0; | 148 | int i = 0; |
| 150 | size_t po = off & ~PAGE_CACHE_MASK; | 149 | size_t po = off & ~PAGE_CACHE_MASK; |
| 151 | size_t left = len; | 150 | size_t left = len; |
| 152 | size_t l; | ||
| 153 | 151 | ||
| 154 | while (left > 0) { | 152 | while (left > 0) { |
| 155 | l = min_t(size_t, PAGE_CACHE_SIZE-po, left); | 153 | size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); |
| 154 | |||
| 156 | memcpy(data, page_address(pages[i]) + po, l); | 155 | memcpy(data, page_address(pages[i]) + po, l); |
| 157 | data += l; | 156 | data += l; |
| 158 | left -= l; | 157 | left -= l; |
| @@ -162,7 +161,6 @@ int ceph_copy_from_page_vector(struct page **pages, | |||
| 162 | i++; | 161 | i++; |
| 163 | } | 162 | } |
| 164 | } | 163 | } |
| 165 | return len; | ||
| 166 | } | 164 | } |
| 167 | EXPORT_SYMBOL(ceph_copy_from_page_vector); | 165 | EXPORT_SYMBOL(ceph_copy_from_page_vector); |
| 168 | 166 | ||
| @@ -170,7 +168,7 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector); | |||
| 170 | * copy user data from a page vector into a user pointer | 168 | * copy user data from a page vector into a user pointer |
| 171 | */ | 169 | */ |
| 172 | int ceph_copy_page_vector_to_user(struct page **pages, | 170 | int ceph_copy_page_vector_to_user(struct page **pages, |
| 173 | char __user *data, | 171 | void __user *data, |
| 174 | loff_t off, size_t len) | 172 | loff_t off, size_t len) |
| 175 | { | 173 | { |
| 176 | int i = 0; | 174 | int i = 0; |
diff --git a/net/core/dev.c b/net/core/dev.c index 17bc535115d3..a06a7a58dd11 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -658,11 +658,10 @@ __setup("netdev=", netdev_boot_setup); | |||
| 658 | 658 | ||
| 659 | struct net_device *__dev_get_by_name(struct net *net, const char *name) | 659 | struct net_device *__dev_get_by_name(struct net *net, const char *name) |
| 660 | { | 660 | { |
| 661 | struct hlist_node *p; | ||
| 662 | struct net_device *dev; | 661 | struct net_device *dev; |
| 663 | struct hlist_head *head = dev_name_hash(net, name); | 662 | struct hlist_head *head = dev_name_hash(net, name); |
| 664 | 663 | ||
| 665 | hlist_for_each_entry(dev, p, head, name_hlist) | 664 | hlist_for_each_entry(dev, head, name_hlist) |
| 666 | if (!strncmp(dev->name, name, IFNAMSIZ)) | 665 | if (!strncmp(dev->name, name, IFNAMSIZ)) |
| 667 | return dev; | 666 | return dev; |
| 668 | 667 | ||
| @@ -684,11 +683,10 @@ EXPORT_SYMBOL(__dev_get_by_name); | |||
| 684 | 683 | ||
| 685 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) | 684 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) |
| 686 | { | 685 | { |
| 687 | struct hlist_node *p; | ||
| 688 | struct net_device *dev; | 686 | struct net_device *dev; |
| 689 | struct hlist_head *head = dev_name_hash(net, name); | 687 | struct hlist_head *head = dev_name_hash(net, name); |
| 690 | 688 | ||
| 691 | hlist_for_each_entry_rcu(dev, p, head, name_hlist) | 689 | hlist_for_each_entry_rcu(dev, head, name_hlist) |
| 692 | if (!strncmp(dev->name, name, IFNAMSIZ)) | 690 | if (!strncmp(dev->name, name, IFNAMSIZ)) |
| 693 | return dev; | 691 | return dev; |
| 694 | 692 | ||
| @@ -735,11 +733,10 @@ EXPORT_SYMBOL(dev_get_by_name); | |||
| 735 | 733 | ||
| 736 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) | 734 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
| 737 | { | 735 | { |
| 738 | struct hlist_node *p; | ||
| 739 | struct net_device *dev; | 736 | struct net_device *dev; |
| 740 | struct hlist_head *head = dev_index_hash(net, ifindex); | 737 | struct hlist_head *head = dev_index_hash(net, ifindex); |
| 741 | 738 | ||
| 742 | hlist_for_each_entry(dev, p, head, index_hlist) | 739 | hlist_for_each_entry(dev, head, index_hlist) |
| 743 | if (dev->ifindex == ifindex) | 740 | if (dev->ifindex == ifindex) |
| 744 | return dev; | 741 | return dev; |
| 745 | 742 | ||
| @@ -760,11 +757,10 @@ EXPORT_SYMBOL(__dev_get_by_index); | |||
| 760 | 757 | ||
| 761 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) | 758 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) |
| 762 | { | 759 | { |
| 763 | struct hlist_node *p; | ||
| 764 | struct net_device *dev; | 760 | struct net_device *dev; |
| 765 | struct hlist_head *head = dev_index_hash(net, ifindex); | 761 | struct hlist_head *head = dev_index_hash(net, ifindex); |
| 766 | 762 | ||
| 767 | hlist_for_each_entry_rcu(dev, p, head, index_hlist) | 763 | hlist_for_each_entry_rcu(dev, head, index_hlist) |
| 768 | if (dev->ifindex == ifindex) | 764 | if (dev->ifindex == ifindex) |
| 769 | return dev; | 765 | return dev; |
| 770 | 766 | ||
| @@ -1882,8 +1878,10 @@ int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) | |||
| 1882 | 1878 | ||
| 1883 | if (!new_dev_maps) | 1879 | if (!new_dev_maps) |
| 1884 | new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); | 1880 | new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); |
| 1885 | if (!new_dev_maps) | 1881 | if (!new_dev_maps) { |
| 1882 | mutex_unlock(&xps_map_mutex); | ||
| 1886 | return -ENOMEM; | 1883 | return -ENOMEM; |
| 1884 | } | ||
| 1887 | 1885 | ||
| 1888 | map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : | 1886 | map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : |
| 1889 | NULL; | 1887 | NULL; |
diff --git a/net/core/flow.c b/net/core/flow.c index 43f7495df27a..c56ea6f7f6c7 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
| @@ -132,14 +132,14 @@ static void __flow_cache_shrink(struct flow_cache *fc, | |||
| 132 | int shrink_to) | 132 | int shrink_to) |
| 133 | { | 133 | { |
| 134 | struct flow_cache_entry *fle; | 134 | struct flow_cache_entry *fle; |
| 135 | struct hlist_node *entry, *tmp; | 135 | struct hlist_node *tmp; |
| 136 | LIST_HEAD(gc_list); | 136 | LIST_HEAD(gc_list); |
| 137 | int i, deleted = 0; | 137 | int i, deleted = 0; |
| 138 | 138 | ||
| 139 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | 139 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
| 140 | int saved = 0; | 140 | int saved = 0; |
| 141 | 141 | ||
| 142 | hlist_for_each_entry_safe(fle, entry, tmp, | 142 | hlist_for_each_entry_safe(fle, tmp, |
| 143 | &fcp->hash_table[i], u.hlist) { | 143 | &fcp->hash_table[i], u.hlist) { |
| 144 | if (saved < shrink_to && | 144 | if (saved < shrink_to && |
| 145 | flow_entry_valid(fle)) { | 145 | flow_entry_valid(fle)) { |
| @@ -211,7 +211,6 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, | |||
| 211 | struct flow_cache *fc = &flow_cache_global; | 211 | struct flow_cache *fc = &flow_cache_global; |
| 212 | struct flow_cache_percpu *fcp; | 212 | struct flow_cache_percpu *fcp; |
| 213 | struct flow_cache_entry *fle, *tfle; | 213 | struct flow_cache_entry *fle, *tfle; |
| 214 | struct hlist_node *entry; | ||
| 215 | struct flow_cache_object *flo; | 214 | struct flow_cache_object *flo; |
| 216 | size_t keysize; | 215 | size_t keysize; |
| 217 | unsigned int hash; | 216 | unsigned int hash; |
| @@ -235,7 +234,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, | |||
| 235 | flow_new_hash_rnd(fc, fcp); | 234 | flow_new_hash_rnd(fc, fcp); |
| 236 | 235 | ||
| 237 | hash = flow_hash_code(fc, fcp, key, keysize); | 236 | hash = flow_hash_code(fc, fcp, key, keysize); |
| 238 | hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { | 237 | hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) { |
| 239 | if (tfle->net == net && | 238 | if (tfle->net == net && |
| 240 | tfle->family == family && | 239 | tfle->family == family && |
| 241 | tfle->dir == dir && | 240 | tfle->dir == dir && |
| @@ -301,13 +300,13 @@ static void flow_cache_flush_tasklet(unsigned long data) | |||
| 301 | struct flow_cache *fc = info->cache; | 300 | struct flow_cache *fc = info->cache; |
| 302 | struct flow_cache_percpu *fcp; | 301 | struct flow_cache_percpu *fcp; |
| 303 | struct flow_cache_entry *fle; | 302 | struct flow_cache_entry *fle; |
| 304 | struct hlist_node *entry, *tmp; | 303 | struct hlist_node *tmp; |
| 305 | LIST_HEAD(gc_list); | 304 | LIST_HEAD(gc_list); |
| 306 | int i, deleted = 0; | 305 | int i, deleted = 0; |
| 307 | 306 | ||
| 308 | fcp = this_cpu_ptr(fc->percpu); | 307 | fcp = this_cpu_ptr(fc->percpu); |
| 309 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | 308 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
| 310 | hlist_for_each_entry_safe(fle, entry, tmp, | 309 | hlist_for_each_entry_safe(fle, tmp, |
| 311 | &fcp->hash_table[i], u.hlist) { | 310 | &fcp->hash_table[i], u.hlist) { |
| 312 | if (flow_entry_valid(fle)) | 311 | if (flow_entry_valid(fle)) |
| 313 | continue; | 312 | continue; |
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 0f6bb6f8d391..3174f1998ee6 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c | |||
| @@ -16,12 +16,11 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff | |||
| 16 | { | 16 | { |
| 17 | struct net *net = seq_file_net(seq); | 17 | struct net *net = seq_file_net(seq); |
| 18 | struct net_device *dev; | 18 | struct net_device *dev; |
| 19 | struct hlist_node *p; | ||
| 20 | struct hlist_head *h; | 19 | struct hlist_head *h; |
| 21 | unsigned int count = 0, offset = get_offset(*pos); | 20 | unsigned int count = 0, offset = get_offset(*pos); |
| 22 | 21 | ||
| 23 | h = &net->dev_name_head[get_bucket(*pos)]; | 22 | h = &net->dev_name_head[get_bucket(*pos)]; |
| 24 | hlist_for_each_entry_rcu(dev, p, h, name_hlist) { | 23 | hlist_for_each_entry_rcu(dev, h, name_hlist) { |
| 25 | if (++count == offset) | 24 | if (++count == offset) |
| 26 | return dev; | 25 | return dev; |
| 27 | } | 26 | } |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 8acce01b6dab..80e271d9e64b 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
| @@ -344,7 +344,7 @@ struct net *get_net_ns_by_fd(int fd) | |||
| 344 | if (IS_ERR(file)) | 344 | if (IS_ERR(file)) |
| 345 | return ERR_CAST(file); | 345 | return ERR_CAST(file); |
| 346 | 346 | ||
| 347 | ei = PROC_I(file->f_dentry->d_inode); | 347 | ei = PROC_I(file_inode(file)); |
| 348 | if (ei->ns_ops == &netns_operations) | 348 | if (ei->ns_ops == &netns_operations) |
| 349 | net = get_net(ei->ns); | 349 | net = get_net(ei->ns); |
| 350 | else | 350 | else |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d8aa20f6a46e..b376410ff259 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -1060,7 +1060,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1060 | int idx = 0, s_idx; | 1060 | int idx = 0, s_idx; |
| 1061 | struct net_device *dev; | 1061 | struct net_device *dev; |
| 1062 | struct hlist_head *head; | 1062 | struct hlist_head *head; |
| 1063 | struct hlist_node *node; | ||
| 1064 | struct nlattr *tb[IFLA_MAX+1]; | 1063 | struct nlattr *tb[IFLA_MAX+1]; |
| 1065 | u32 ext_filter_mask = 0; | 1064 | u32 ext_filter_mask = 0; |
| 1066 | 1065 | ||
| @@ -1080,7 +1079,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1080 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 1079 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
| 1081 | idx = 0; | 1080 | idx = 0; |
| 1082 | head = &net->dev_index_head[h]; | 1081 | head = &net->dev_index_head[h]; |
| 1083 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 1082 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
| 1084 | if (idx < s_idx) | 1083 | if (idx < s_idx) |
| 1085 | goto cont; | 1084 | goto cont; |
| 1086 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 1085 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
diff --git a/net/core/sock.c b/net/core/sock.c index fe96c5d34299..b261a7977746 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -186,8 +186,10 @@ void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) | |||
| 186 | static struct lock_class_key af_family_keys[AF_MAX]; | 186 | static struct lock_class_key af_family_keys[AF_MAX]; |
| 187 | static struct lock_class_key af_family_slock_keys[AF_MAX]; | 187 | static struct lock_class_key af_family_slock_keys[AF_MAX]; |
| 188 | 188 | ||
| 189 | #if defined(CONFIG_MEMCG_KMEM) | ||
| 189 | struct static_key memcg_socket_limit_enabled; | 190 | struct static_key memcg_socket_limit_enabled; |
| 190 | EXPORT_SYMBOL(memcg_socket_limit_enabled); | 191 | EXPORT_SYMBOL(memcg_socket_limit_enabled); |
| 192 | #endif | ||
| 191 | 193 | ||
| 192 | /* | 194 | /* |
| 193 | * Make lock validator output more readable. (we pre-construct these | 195 | * Make lock validator output more readable. (we pre-construct these |
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 602cd637182e..a29e90cf36b7 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c | |||
| @@ -97,21 +97,6 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld) | |||
| 97 | } | 97 | } |
| 98 | EXPORT_SYMBOL_GPL(sock_diag_unregister); | 98 | EXPORT_SYMBOL_GPL(sock_diag_unregister); |
| 99 | 99 | ||
| 100 | static const inline struct sock_diag_handler *sock_diag_lock_handler(int family) | ||
| 101 | { | ||
| 102 | if (sock_diag_handlers[family] == NULL) | ||
| 103 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | ||
| 104 | NETLINK_SOCK_DIAG, family); | ||
| 105 | |||
| 106 | mutex_lock(&sock_diag_table_mutex); | ||
| 107 | return sock_diag_handlers[family]; | ||
| 108 | } | ||
| 109 | |||
| 110 | static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h) | ||
| 111 | { | ||
| 112 | mutex_unlock(&sock_diag_table_mutex); | ||
| 113 | } | ||
| 114 | |||
| 115 | static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 100 | static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
| 116 | { | 101 | { |
| 117 | int err; | 102 | int err; |
| @@ -121,12 +106,20 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 121 | if (nlmsg_len(nlh) < sizeof(*req)) | 106 | if (nlmsg_len(nlh) < sizeof(*req)) |
| 122 | return -EINVAL; | 107 | return -EINVAL; |
| 123 | 108 | ||
| 124 | hndl = sock_diag_lock_handler(req->sdiag_family); | 109 | if (req->sdiag_family >= AF_MAX) |
| 110 | return -EINVAL; | ||
| 111 | |||
| 112 | if (sock_diag_handlers[req->sdiag_family] == NULL) | ||
| 113 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | ||
| 114 | NETLINK_SOCK_DIAG, req->sdiag_family); | ||
| 115 | |||
| 116 | mutex_lock(&sock_diag_table_mutex); | ||
| 117 | hndl = sock_diag_handlers[req->sdiag_family]; | ||
| 125 | if (hndl == NULL) | 118 | if (hndl == NULL) |
| 126 | err = -ENOENT; | 119 | err = -ENOENT; |
| 127 | else | 120 | else |
| 128 | err = hndl->dump(skb, nlh); | 121 | err = hndl->dump(skb, nlh); |
| 129 | sock_diag_unlock_handler(hndl); | 122 | mutex_unlock(&sock_diag_table_mutex); |
| 130 | 123 | ||
| 131 | return err; | 124 | return err; |
| 132 | } | 125 | } |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index c4a2def5b7bd..c21f200eed93 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
| @@ -175,12 +175,11 @@ static struct hlist_head *dn_find_list(struct sock *sk) | |||
| 175 | static int check_port(__le16 port) | 175 | static int check_port(__le16 port) |
| 176 | { | 176 | { |
| 177 | struct sock *sk; | 177 | struct sock *sk; |
| 178 | struct hlist_node *node; | ||
| 179 | 178 | ||
| 180 | if (port == 0) | 179 | if (port == 0) |
| 181 | return -1; | 180 | return -1; |
| 182 | 181 | ||
| 183 | sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { | 182 | sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { |
| 184 | struct dn_scp *scp = DN_SK(sk); | 183 | struct dn_scp *scp = DN_SK(sk); |
| 185 | if (scp->addrloc == port) | 184 | if (scp->addrloc == port) |
| 186 | return -1; | 185 | return -1; |
| @@ -374,11 +373,10 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, | |||
| 374 | struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) | 373 | struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) |
| 375 | { | 374 | { |
| 376 | struct hlist_head *list = listen_hash(addr); | 375 | struct hlist_head *list = listen_hash(addr); |
| 377 | struct hlist_node *node; | ||
| 378 | struct sock *sk; | 376 | struct sock *sk; |
| 379 | 377 | ||
| 380 | read_lock(&dn_hash_lock); | 378 | read_lock(&dn_hash_lock); |
| 381 | sk_for_each(sk, node, list) { | 379 | sk_for_each(sk, list) { |
| 382 | struct dn_scp *scp = DN_SK(sk); | 380 | struct dn_scp *scp = DN_SK(sk); |
| 383 | if (sk->sk_state != TCP_LISTEN) | 381 | if (sk->sk_state != TCP_LISTEN) |
| 384 | continue; | 382 | continue; |
| @@ -414,11 +412,10 @@ struct sock *dn_find_by_skb(struct sk_buff *skb) | |||
| 414 | { | 412 | { |
| 415 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 413 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
| 416 | struct sock *sk; | 414 | struct sock *sk; |
| 417 | struct hlist_node *node; | ||
| 418 | struct dn_scp *scp; | 415 | struct dn_scp *scp; |
| 419 | 416 | ||
| 420 | read_lock(&dn_hash_lock); | 417 | read_lock(&dn_hash_lock); |
| 421 | sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { | 418 | sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { |
| 422 | scp = DN_SK(sk); | 419 | scp = DN_SK(sk); |
| 423 | if (cb->src != dn_saddr2dn(&scp->peer)) | 420 | if (cb->src != dn_saddr2dn(&scp->peer)) |
| 424 | continue; | 421 | continue; |
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index f968c1b58f47..6c2445bcaba1 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c | |||
| @@ -483,7 +483,6 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 483 | unsigned int h, s_h; | 483 | unsigned int h, s_h; |
| 484 | unsigned int e = 0, s_e; | 484 | unsigned int e = 0, s_e; |
| 485 | struct dn_fib_table *tb; | 485 | struct dn_fib_table *tb; |
| 486 | struct hlist_node *node; | ||
| 487 | int dumped = 0; | 486 | int dumped = 0; |
| 488 | 487 | ||
| 489 | if (!net_eq(net, &init_net)) | 488 | if (!net_eq(net, &init_net)) |
| @@ -498,7 +497,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 498 | 497 | ||
| 499 | for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { | 498 | for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { |
| 500 | e = 0; | 499 | e = 0; |
| 501 | hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) { | 500 | hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) { |
| 502 | if (e < s_e) | 501 | if (e < s_e) |
| 503 | goto next; | 502 | goto next; |
| 504 | if (dumped) | 503 | if (dumped) |
| @@ -828,7 +827,6 @@ out: | |||
| 828 | struct dn_fib_table *dn_fib_get_table(u32 n, int create) | 827 | struct dn_fib_table *dn_fib_get_table(u32 n, int create) |
| 829 | { | 828 | { |
| 830 | struct dn_fib_table *t; | 829 | struct dn_fib_table *t; |
| 831 | struct hlist_node *node; | ||
| 832 | unsigned int h; | 830 | unsigned int h; |
| 833 | 831 | ||
| 834 | if (n < RT_TABLE_MIN) | 832 | if (n < RT_TABLE_MIN) |
| @@ -839,7 +837,7 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create) | |||
| 839 | 837 | ||
| 840 | h = n & (DN_FIB_TABLE_HASHSZ - 1); | 838 | h = n & (DN_FIB_TABLE_HASHSZ - 1); |
| 841 | rcu_read_lock(); | 839 | rcu_read_lock(); |
| 842 | hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) { | 840 | hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) { |
| 843 | if (t->n == n) { | 841 | if (t->n == n) { |
| 844 | rcu_read_unlock(); | 842 | rcu_read_unlock(); |
| 845 | return t; | 843 | return t; |
| @@ -885,11 +883,10 @@ void dn_fib_flush(void) | |||
| 885 | { | 883 | { |
| 886 | int flushed = 0; | 884 | int flushed = 0; |
| 887 | struct dn_fib_table *tb; | 885 | struct dn_fib_table *tb; |
| 888 | struct hlist_node *node; | ||
| 889 | unsigned int h; | 886 | unsigned int h; |
| 890 | 887 | ||
| 891 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { | 888 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { |
| 892 | hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) | 889 | hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) |
| 893 | flushed += tb->flush(tb); | 890 | flushed += tb->flush(tb); |
| 894 | } | 891 | } |
| 895 | 892 | ||
| @@ -908,12 +905,12 @@ void __init dn_fib_table_init(void) | |||
| 908 | void __exit dn_fib_table_cleanup(void) | 905 | void __exit dn_fib_table_cleanup(void) |
| 909 | { | 906 | { |
| 910 | struct dn_fib_table *t; | 907 | struct dn_fib_table *t; |
| 911 | struct hlist_node *node, *next; | 908 | struct hlist_node *next; |
| 912 | unsigned int h; | 909 | unsigned int h; |
| 913 | 910 | ||
| 914 | write_lock(&dn_fib_tables_lock); | 911 | write_lock(&dn_fib_tables_lock); |
| 915 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { | 912 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { |
| 916 | hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], | 913 | hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h], |
| 917 | hlist) { | 914 | hlist) { |
| 918 | hlist_del(&t->hlist); | 915 | hlist_del(&t->hlist); |
| 919 | kfree(t); | 916 | kfree(t); |
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 16705611589a..e0da175f8e5b 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
| @@ -350,7 +350,6 @@ static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id, | |||
| 350 | int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) | 350 | int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) |
| 351 | { | 351 | { |
| 352 | struct sock *sk, *prev = NULL; | 352 | struct sock *sk, *prev = NULL; |
| 353 | struct hlist_node *node; | ||
| 354 | int ret = NET_RX_SUCCESS; | 353 | int ret = NET_RX_SUCCESS; |
| 355 | u16 pan_id, short_addr; | 354 | u16 pan_id, short_addr; |
| 356 | 355 | ||
| @@ -361,7 +360,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) | |||
| 361 | short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); | 360 | short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); |
| 362 | 361 | ||
| 363 | read_lock(&dgram_lock); | 362 | read_lock(&dgram_lock); |
| 364 | sk_for_each(sk, node, &dgram_head) { | 363 | sk_for_each(sk, &dgram_head) { |
| 365 | if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, | 364 | if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, |
| 366 | dgram_sk(sk))) { | 365 | dgram_sk(sk))) { |
| 367 | if (prev) { | 366 | if (prev) { |
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 50e823927d49..41f538b8e59c 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c | |||
| @@ -221,10 +221,9 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 221 | void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) | 221 | void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) |
| 222 | { | 222 | { |
| 223 | struct sock *sk; | 223 | struct sock *sk; |
| 224 | struct hlist_node *node; | ||
| 225 | 224 | ||
| 226 | read_lock(&raw_lock); | 225 | read_lock(&raw_lock); |
| 227 | sk_for_each(sk, node, &raw_head) { | 226 | sk_for_each(sk, &raw_head) { |
| 228 | bh_lock_sock(sk); | 227 | bh_lock_sock(sk); |
| 229 | if (!sk->sk_bound_dev_if || | 228 | if (!sk->sk_bound_dev_if || |
| 230 | sk->sk_bound_dev_if == dev->ifindex) { | 229 | sk->sk_bound_dev_if == dev->ifindex) { |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e225a4e5b572..68f6a94f7661 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -248,8 +248,12 @@ EXPORT_SYMBOL(inet_listen); | |||
| 248 | u32 inet_ehash_secret __read_mostly; | 248 | u32 inet_ehash_secret __read_mostly; |
| 249 | EXPORT_SYMBOL(inet_ehash_secret); | 249 | EXPORT_SYMBOL(inet_ehash_secret); |
| 250 | 250 | ||
| 251 | u32 ipv6_hash_secret __read_mostly; | ||
| 252 | EXPORT_SYMBOL(ipv6_hash_secret); | ||
| 253 | |||
| 251 | /* | 254 | /* |
| 252 | * inet_ehash_secret must be set exactly once | 255 | * inet_ehash_secret must be set exactly once, and to a non nul value |
| 256 | * ipv6_hash_secret must be set exactly once. | ||
| 253 | */ | 257 | */ |
| 254 | void build_ehash_secret(void) | 258 | void build_ehash_secret(void) |
| 255 | { | 259 | { |
| @@ -259,7 +263,8 @@ void build_ehash_secret(void) | |||
| 259 | get_random_bytes(&rnd, sizeof(rnd)); | 263 | get_random_bytes(&rnd, sizeof(rnd)); |
| 260 | } while (rnd == 0); | 264 | } while (rnd == 0); |
| 261 | 265 | ||
| 262 | cmpxchg(&inet_ehash_secret, 0, rnd); | 266 | if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) |
| 267 | get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); | ||
| 263 | } | 268 | } |
| 264 | EXPORT_SYMBOL(build_ehash_secret); | 269 | EXPORT_SYMBOL(build_ehash_secret); |
| 265 | 270 | ||
| @@ -1327,8 +1332,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
| 1327 | if (skb->next != NULL) | 1332 | if (skb->next != NULL) |
| 1328 | iph->frag_off |= htons(IP_MF); | 1333 | iph->frag_off |= htons(IP_MF); |
| 1329 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
| 1330 | } else | 1335 | } else { |
| 1331 | iph->id = htons(id++); | 1336 | if (!(iph->frag_off & htons(IP_DF))) |
| 1337 | iph->id = htons(id++); | ||
| 1338 | } | ||
| 1332 | iph->tot_len = htons(skb->len - skb->mac_len); | 1339 | iph->tot_len = htons(skb->len - skb->mac_len); |
| 1333 | iph->check = 0; | 1340 | iph->check = 0; |
| 1334 | iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); | 1341 | iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); |
| @@ -1572,7 +1579,7 @@ static const struct net_offload udp_offload = { | |||
| 1572 | 1579 | ||
| 1573 | static const struct net_protocol icmp_protocol = { | 1580 | static const struct net_protocol icmp_protocol = { |
| 1574 | .handler = icmp_rcv, | 1581 | .handler = icmp_rcv, |
| 1575 | .err_handler = ping_err, | 1582 | .err_handler = icmp_err, |
| 1576 | .no_policy = 1, | 1583 | .no_policy = 1, |
| 1577 | .netns_ok = 1, | 1584 | .netns_ok = 1, |
| 1578 | }; | 1585 | }; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5281314886c1..f678507bc829 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -139,10 +139,9 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) | |||
| 139 | u32 hash = inet_addr_hash(net, addr); | 139 | u32 hash = inet_addr_hash(net, addr); |
| 140 | struct net_device *result = NULL; | 140 | struct net_device *result = NULL; |
| 141 | struct in_ifaddr *ifa; | 141 | struct in_ifaddr *ifa; |
| 142 | struct hlist_node *node; | ||
| 143 | 142 | ||
| 144 | rcu_read_lock(); | 143 | rcu_read_lock(); |
| 145 | hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { | 144 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) { |
| 146 | if (ifa->ifa_local == addr) { | 145 | if (ifa->ifa_local == addr) { |
| 147 | struct net_device *dev = ifa->ifa_dev->dev; | 146 | struct net_device *dev = ifa->ifa_dev->dev; |
| 148 | 147 | ||
| @@ -588,7 +587,6 @@ static void check_lifetime(struct work_struct *work) | |||
| 588 | { | 587 | { |
| 589 | unsigned long now, next, next_sec, next_sched; | 588 | unsigned long now, next, next_sec, next_sched; |
| 590 | struct in_ifaddr *ifa; | 589 | struct in_ifaddr *ifa; |
| 591 | struct hlist_node *node; | ||
| 592 | int i; | 590 | int i; |
| 593 | 591 | ||
| 594 | now = jiffies; | 592 | now = jiffies; |
| @@ -596,8 +594,7 @@ static void check_lifetime(struct work_struct *work) | |||
| 596 | 594 | ||
| 597 | rcu_read_lock(); | 595 | rcu_read_lock(); |
| 598 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { | 596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { |
| 599 | hlist_for_each_entry_rcu(ifa, node, | 597 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { |
| 600 | &inet_addr_lst[i], hash) { | ||
| 601 | unsigned long age; | 598 | unsigned long age; |
| 602 | 599 | ||
| 603 | if (ifa->ifa_flags & IFA_F_PERMANENT) | 600 | if (ifa->ifa_flags & IFA_F_PERMANENT) |
| @@ -1493,7 +1490,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1493 | struct in_device *in_dev; | 1490 | struct in_device *in_dev; |
| 1494 | struct in_ifaddr *ifa; | 1491 | struct in_ifaddr *ifa; |
| 1495 | struct hlist_head *head; | 1492 | struct hlist_head *head; |
| 1496 | struct hlist_node *node; | ||
| 1497 | 1493 | ||
| 1498 | s_h = cb->args[0]; | 1494 | s_h = cb->args[0]; |
| 1499 | s_idx = idx = cb->args[1]; | 1495 | s_idx = idx = cb->args[1]; |
| @@ -1503,7 +1499,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1503 | idx = 0; | 1499 | idx = 0; |
| 1504 | head = &net->dev_index_head[h]; | 1500 | head = &net->dev_index_head[h]; |
| 1505 | rcu_read_lock(); | 1501 | rcu_read_lock(); |
| 1506 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 1502 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
| 1507 | if (idx < s_idx) | 1503 | if (idx < s_idx) |
| 1508 | goto cont; | 1504 | goto cont; |
| 1509 | if (h > s_h || idx > s_idx) | 1505 | if (h > s_h || idx > s_idx) |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 99f00d39d10b..eb4bb12b3eb4 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -112,7 +112,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id) | |||
| 112 | struct fib_table *fib_get_table(struct net *net, u32 id) | 112 | struct fib_table *fib_get_table(struct net *net, u32 id) |
| 113 | { | 113 | { |
| 114 | struct fib_table *tb; | 114 | struct fib_table *tb; |
| 115 | struct hlist_node *node; | ||
| 116 | struct hlist_head *head; | 115 | struct hlist_head *head; |
| 117 | unsigned int h; | 116 | unsigned int h; |
| 118 | 117 | ||
| @@ -122,7 +121,7 @@ struct fib_table *fib_get_table(struct net *net, u32 id) | |||
| 122 | 121 | ||
| 123 | rcu_read_lock(); | 122 | rcu_read_lock(); |
| 124 | head = &net->ipv4.fib_table_hash[h]; | 123 | head = &net->ipv4.fib_table_hash[h]; |
| 125 | hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { | 124 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
| 126 | if (tb->tb_id == id) { | 125 | if (tb->tb_id == id) { |
| 127 | rcu_read_unlock(); | 126 | rcu_read_unlock(); |
| 128 | return tb; | 127 | return tb; |
| @@ -137,13 +136,12 @@ static void fib_flush(struct net *net) | |||
| 137 | { | 136 | { |
| 138 | int flushed = 0; | 137 | int flushed = 0; |
| 139 | struct fib_table *tb; | 138 | struct fib_table *tb; |
| 140 | struct hlist_node *node; | ||
| 141 | struct hlist_head *head; | 139 | struct hlist_head *head; |
| 142 | unsigned int h; | 140 | unsigned int h; |
| 143 | 141 | ||
| 144 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 142 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
| 145 | head = &net->ipv4.fib_table_hash[h]; | 143 | head = &net->ipv4.fib_table_hash[h]; |
| 146 | hlist_for_each_entry(tb, node, head, tb_hlist) | 144 | hlist_for_each_entry(tb, head, tb_hlist) |
| 147 | flushed += fib_table_flush(tb); | 145 | flushed += fib_table_flush(tb); |
| 148 | } | 146 | } |
| 149 | 147 | ||
| @@ -656,7 +654,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 656 | unsigned int h, s_h; | 654 | unsigned int h, s_h; |
| 657 | unsigned int e = 0, s_e; | 655 | unsigned int e = 0, s_e; |
| 658 | struct fib_table *tb; | 656 | struct fib_table *tb; |
| 659 | struct hlist_node *node; | ||
| 660 | struct hlist_head *head; | 657 | struct hlist_head *head; |
| 661 | int dumped = 0; | 658 | int dumped = 0; |
| 662 | 659 | ||
| @@ -670,7 +667,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 670 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { | 667 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { |
| 671 | e = 0; | 668 | e = 0; |
| 672 | head = &net->ipv4.fib_table_hash[h]; | 669 | head = &net->ipv4.fib_table_hash[h]; |
| 673 | hlist_for_each_entry(tb, node, head, tb_hlist) { | 670 | hlist_for_each_entry(tb, head, tb_hlist) { |
| 674 | if (e < s_e) | 671 | if (e < s_e) |
| 675 | goto next; | 672 | goto next; |
| 676 | if (dumped) | 673 | if (dumped) |
| @@ -1117,11 +1114,11 @@ static void ip_fib_net_exit(struct net *net) | |||
| 1117 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { | 1114 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { |
| 1118 | struct fib_table *tb; | 1115 | struct fib_table *tb; |
| 1119 | struct hlist_head *head; | 1116 | struct hlist_head *head; |
| 1120 | struct hlist_node *node, *tmp; | 1117 | struct hlist_node *tmp; |
| 1121 | 1118 | ||
| 1122 | head = &net->ipv4.fib_table_hash[i]; | 1119 | head = &net->ipv4.fib_table_hash[i]; |
| 1123 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { | 1120 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { |
| 1124 | hlist_del(node); | 1121 | hlist_del(&tb->tb_hlist); |
| 1125 | fib_table_flush(tb); | 1122 | fib_table_flush(tb); |
| 1126 | fib_free_table(tb); | 1123 | fib_free_table(tb); |
| 1127 | } | 1124 | } |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 4797a800faf8..8f6cb7a87cd6 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
| @@ -298,14 +298,13 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi) | |||
| 298 | static struct fib_info *fib_find_info(const struct fib_info *nfi) | 298 | static struct fib_info *fib_find_info(const struct fib_info *nfi) |
| 299 | { | 299 | { |
| 300 | struct hlist_head *head; | 300 | struct hlist_head *head; |
| 301 | struct hlist_node *node; | ||
| 302 | struct fib_info *fi; | 301 | struct fib_info *fi; |
| 303 | unsigned int hash; | 302 | unsigned int hash; |
| 304 | 303 | ||
| 305 | hash = fib_info_hashfn(nfi); | 304 | hash = fib_info_hashfn(nfi); |
| 306 | head = &fib_info_hash[hash]; | 305 | head = &fib_info_hash[hash]; |
| 307 | 306 | ||
| 308 | hlist_for_each_entry(fi, node, head, fib_hash) { | 307 | hlist_for_each_entry(fi, head, fib_hash) { |
| 309 | if (!net_eq(fi->fib_net, nfi->fib_net)) | 308 | if (!net_eq(fi->fib_net, nfi->fib_net)) |
| 310 | continue; | 309 | continue; |
| 311 | if (fi->fib_nhs != nfi->fib_nhs) | 310 | if (fi->fib_nhs != nfi->fib_nhs) |
| @@ -331,7 +330,6 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi) | |||
| 331 | int ip_fib_check_default(__be32 gw, struct net_device *dev) | 330 | int ip_fib_check_default(__be32 gw, struct net_device *dev) |
| 332 | { | 331 | { |
| 333 | struct hlist_head *head; | 332 | struct hlist_head *head; |
| 334 | struct hlist_node *node; | ||
| 335 | struct fib_nh *nh; | 333 | struct fib_nh *nh; |
| 336 | unsigned int hash; | 334 | unsigned int hash; |
| 337 | 335 | ||
| @@ -339,7 +337,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev) | |||
| 339 | 337 | ||
| 340 | hash = fib_devindex_hashfn(dev->ifindex); | 338 | hash = fib_devindex_hashfn(dev->ifindex); |
| 341 | head = &fib_info_devhash[hash]; | 339 | head = &fib_info_devhash[hash]; |
| 342 | hlist_for_each_entry(nh, node, head, nh_hash) { | 340 | hlist_for_each_entry(nh, head, nh_hash) { |
| 343 | if (nh->nh_dev == dev && | 341 | if (nh->nh_dev == dev && |
| 344 | nh->nh_gw == gw && | 342 | nh->nh_gw == gw && |
| 345 | !(nh->nh_flags & RTNH_F_DEAD)) { | 343 | !(nh->nh_flags & RTNH_F_DEAD)) { |
| @@ -721,10 +719,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash, | |||
| 721 | 719 | ||
| 722 | for (i = 0; i < old_size; i++) { | 720 | for (i = 0; i < old_size; i++) { |
| 723 | struct hlist_head *head = &fib_info_hash[i]; | 721 | struct hlist_head *head = &fib_info_hash[i]; |
| 724 | struct hlist_node *node, *n; | 722 | struct hlist_node *n; |
| 725 | struct fib_info *fi; | 723 | struct fib_info *fi; |
| 726 | 724 | ||
| 727 | hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { | 725 | hlist_for_each_entry_safe(fi, n, head, fib_hash) { |
| 728 | struct hlist_head *dest; | 726 | struct hlist_head *dest; |
| 729 | unsigned int new_hash; | 727 | unsigned int new_hash; |
| 730 | 728 | ||
| @@ -739,10 +737,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash, | |||
| 739 | 737 | ||
| 740 | for (i = 0; i < old_size; i++) { | 738 | for (i = 0; i < old_size; i++) { |
| 741 | struct hlist_head *lhead = &fib_info_laddrhash[i]; | 739 | struct hlist_head *lhead = &fib_info_laddrhash[i]; |
| 742 | struct hlist_node *node, *n; | 740 | struct hlist_node *n; |
| 743 | struct fib_info *fi; | 741 | struct fib_info *fi; |
| 744 | 742 | ||
| 745 | hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { | 743 | hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) { |
| 746 | struct hlist_head *ldest; | 744 | struct hlist_head *ldest; |
| 747 | unsigned int new_hash; | 745 | unsigned int new_hash; |
| 748 | 746 | ||
| @@ -1096,13 +1094,12 @@ int fib_sync_down_addr(struct net *net, __be32 local) | |||
| 1096 | int ret = 0; | 1094 | int ret = 0; |
| 1097 | unsigned int hash = fib_laddr_hashfn(local); | 1095 | unsigned int hash = fib_laddr_hashfn(local); |
| 1098 | struct hlist_head *head = &fib_info_laddrhash[hash]; | 1096 | struct hlist_head *head = &fib_info_laddrhash[hash]; |
| 1099 | struct hlist_node *node; | ||
| 1100 | struct fib_info *fi; | 1097 | struct fib_info *fi; |
| 1101 | 1098 | ||
| 1102 | if (fib_info_laddrhash == NULL || local == 0) | 1099 | if (fib_info_laddrhash == NULL || local == 0) |
| 1103 | return 0; | 1100 | return 0; |
| 1104 | 1101 | ||
| 1105 | hlist_for_each_entry(fi, node, head, fib_lhash) { | 1102 | hlist_for_each_entry(fi, head, fib_lhash) { |
| 1106 | if (!net_eq(fi->fib_net, net)) | 1103 | if (!net_eq(fi->fib_net, net)) |
| 1107 | continue; | 1104 | continue; |
| 1108 | if (fi->fib_prefsrc == local) { | 1105 | if (fi->fib_prefsrc == local) { |
| @@ -1120,13 +1117,12 @@ int fib_sync_down_dev(struct net_device *dev, int force) | |||
| 1120 | struct fib_info *prev_fi = NULL; | 1117 | struct fib_info *prev_fi = NULL; |
| 1121 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); | 1118 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); |
| 1122 | struct hlist_head *head = &fib_info_devhash[hash]; | 1119 | struct hlist_head *head = &fib_info_devhash[hash]; |
| 1123 | struct hlist_node *node; | ||
| 1124 | struct fib_nh *nh; | 1120 | struct fib_nh *nh; |
| 1125 | 1121 | ||
| 1126 | if (force) | 1122 | if (force) |
| 1127 | scope = -1; | 1123 | scope = -1; |
| 1128 | 1124 | ||
| 1129 | hlist_for_each_entry(nh, node, head, nh_hash) { | 1125 | hlist_for_each_entry(nh, head, nh_hash) { |
| 1130 | struct fib_info *fi = nh->nh_parent; | 1126 | struct fib_info *fi = nh->nh_parent; |
| 1131 | int dead; | 1127 | int dead; |
| 1132 | 1128 | ||
| @@ -1232,7 +1228,6 @@ int fib_sync_up(struct net_device *dev) | |||
| 1232 | struct fib_info *prev_fi; | 1228 | struct fib_info *prev_fi; |
| 1233 | unsigned int hash; | 1229 | unsigned int hash; |
| 1234 | struct hlist_head *head; | 1230 | struct hlist_head *head; |
| 1235 | struct hlist_node *node; | ||
| 1236 | struct fib_nh *nh; | 1231 | struct fib_nh *nh; |
| 1237 | int ret; | 1232 | int ret; |
| 1238 | 1233 | ||
| @@ -1244,7 +1239,7 @@ int fib_sync_up(struct net_device *dev) | |||
| 1244 | head = &fib_info_devhash[hash]; | 1239 | head = &fib_info_devhash[hash]; |
| 1245 | ret = 0; | 1240 | ret = 0; |
| 1246 | 1241 | ||
| 1247 | hlist_for_each_entry(nh, node, head, nh_hash) { | 1242 | hlist_for_each_entry(nh, head, nh_hash) { |
| 1248 | struct fib_info *fi = nh->nh_parent; | 1243 | struct fib_info *fi = nh->nh_parent; |
| 1249 | int alive; | 1244 | int alive; |
| 1250 | 1245 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 61e03da3e1f5..ff06b7543d9f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -920,10 +920,9 @@ nomem: | |||
| 920 | static struct leaf_info *find_leaf_info(struct leaf *l, int plen) | 920 | static struct leaf_info *find_leaf_info(struct leaf *l, int plen) |
| 921 | { | 921 | { |
| 922 | struct hlist_head *head = &l->list; | 922 | struct hlist_head *head = &l->list; |
| 923 | struct hlist_node *node; | ||
| 924 | struct leaf_info *li; | 923 | struct leaf_info *li; |
| 925 | 924 | ||
| 926 | hlist_for_each_entry_rcu(li, node, head, hlist) | 925 | hlist_for_each_entry_rcu(li, head, hlist) |
| 927 | if (li->plen == plen) | 926 | if (li->plen == plen) |
| 928 | return li; | 927 | return li; |
| 929 | 928 | ||
| @@ -943,12 +942,11 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen) | |||
| 943 | static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) | 942 | static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) |
| 944 | { | 943 | { |
| 945 | struct leaf_info *li = NULL, *last = NULL; | 944 | struct leaf_info *li = NULL, *last = NULL; |
| 946 | struct hlist_node *node; | ||
| 947 | 945 | ||
| 948 | if (hlist_empty(head)) { | 946 | if (hlist_empty(head)) { |
| 949 | hlist_add_head_rcu(&new->hlist, head); | 947 | hlist_add_head_rcu(&new->hlist, head); |
| 950 | } else { | 948 | } else { |
| 951 | hlist_for_each_entry(li, node, head, hlist) { | 949 | hlist_for_each_entry(li, head, hlist) { |
| 952 | if (new->plen > li->plen) | 950 | if (new->plen > li->plen) |
| 953 | break; | 951 | break; |
| 954 | 952 | ||
| @@ -1354,9 +1352,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, | |||
| 1354 | { | 1352 | { |
| 1355 | struct leaf_info *li; | 1353 | struct leaf_info *li; |
| 1356 | struct hlist_head *hhead = &l->list; | 1354 | struct hlist_head *hhead = &l->list; |
| 1357 | struct hlist_node *node; | ||
| 1358 | 1355 | ||
| 1359 | hlist_for_each_entry_rcu(li, node, hhead, hlist) { | 1356 | hlist_for_each_entry_rcu(li, hhead, hlist) { |
| 1360 | struct fib_alias *fa; | 1357 | struct fib_alias *fa; |
| 1361 | 1358 | ||
| 1362 | if (l->key != (key & li->mask_plen)) | 1359 | if (l->key != (key & li->mask_plen)) |
| @@ -1740,10 +1737,10 @@ static int trie_flush_leaf(struct leaf *l) | |||
| 1740 | { | 1737 | { |
| 1741 | int found = 0; | 1738 | int found = 0; |
| 1742 | struct hlist_head *lih = &l->list; | 1739 | struct hlist_head *lih = &l->list; |
| 1743 | struct hlist_node *node, *tmp; | 1740 | struct hlist_node *tmp; |
| 1744 | struct leaf_info *li = NULL; | 1741 | struct leaf_info *li = NULL; |
| 1745 | 1742 | ||
| 1746 | hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { | 1743 | hlist_for_each_entry_safe(li, tmp, lih, hlist) { |
| 1747 | found += trie_flush_list(&li->falh); | 1744 | found += trie_flush_list(&li->falh); |
| 1748 | 1745 | ||
| 1749 | if (list_empty(&li->falh)) { | 1746 | if (list_empty(&li->falh)) { |
| @@ -1895,14 +1892,13 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb, | |||
| 1895 | struct sk_buff *skb, struct netlink_callback *cb) | 1892 | struct sk_buff *skb, struct netlink_callback *cb) |
| 1896 | { | 1893 | { |
| 1897 | struct leaf_info *li; | 1894 | struct leaf_info *li; |
| 1898 | struct hlist_node *node; | ||
| 1899 | int i, s_i; | 1895 | int i, s_i; |
| 1900 | 1896 | ||
| 1901 | s_i = cb->args[4]; | 1897 | s_i = cb->args[4]; |
| 1902 | i = 0; | 1898 | i = 0; |
| 1903 | 1899 | ||
| 1904 | /* rcu_read_lock is hold by caller */ | 1900 | /* rcu_read_lock is hold by caller */ |
| 1905 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { | 1901 | hlist_for_each_entry_rcu(li, &l->list, hlist) { |
| 1906 | if (i < s_i) { | 1902 | if (i < s_i) { |
| 1907 | i++; | 1903 | i++; |
| 1908 | continue; | 1904 | continue; |
| @@ -2092,14 +2088,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s) | |||
| 2092 | if (IS_LEAF(n)) { | 2088 | if (IS_LEAF(n)) { |
| 2093 | struct leaf *l = (struct leaf *)n; | 2089 | struct leaf *l = (struct leaf *)n; |
| 2094 | struct leaf_info *li; | 2090 | struct leaf_info *li; |
| 2095 | struct hlist_node *tmp; | ||
| 2096 | 2091 | ||
| 2097 | s->leaves++; | 2092 | s->leaves++; |
| 2098 | s->totdepth += iter.depth; | 2093 | s->totdepth += iter.depth; |
| 2099 | if (iter.depth > s->maxdepth) | 2094 | if (iter.depth > s->maxdepth) |
| 2100 | s->maxdepth = iter.depth; | 2095 | s->maxdepth = iter.depth; |
| 2101 | 2096 | ||
| 2102 | hlist_for_each_entry_rcu(li, tmp, &l->list, hlist) | 2097 | hlist_for_each_entry_rcu(li, &l->list, hlist) |
| 2103 | ++s->prefixes; | 2098 | ++s->prefixes; |
| 2104 | } else { | 2099 | } else { |
| 2105 | const struct tnode *tn = (const struct tnode *) n; | 2100 | const struct tnode *tn = (const struct tnode *) n; |
| @@ -2200,10 +2195,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) | |||
| 2200 | 2195 | ||
| 2201 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 2196 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
| 2202 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; | 2197 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
| 2203 | struct hlist_node *node; | ||
| 2204 | struct fib_table *tb; | 2198 | struct fib_table *tb; |
| 2205 | 2199 | ||
| 2206 | hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { | 2200 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
| 2207 | struct trie *t = (struct trie *) tb->tb_data; | 2201 | struct trie *t = (struct trie *) tb->tb_data; |
| 2208 | struct trie_stat stat; | 2202 | struct trie_stat stat; |
| 2209 | 2203 | ||
| @@ -2245,10 +2239,9 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos) | |||
| 2245 | 2239 | ||
| 2246 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 2240 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
| 2247 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; | 2241 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
| 2248 | struct hlist_node *node; | ||
| 2249 | struct fib_table *tb; | 2242 | struct fib_table *tb; |
| 2250 | 2243 | ||
| 2251 | hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { | 2244 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
| 2252 | struct rt_trie_node *n; | 2245 | struct rt_trie_node *n; |
| 2253 | 2246 | ||
| 2254 | for (n = fib_trie_get_first(iter, | 2247 | for (n = fib_trie_get_first(iter, |
| @@ -2298,7 +2291,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 2298 | /* new hash chain */ | 2291 | /* new hash chain */ |
| 2299 | while (++h < FIB_TABLE_HASHSZ) { | 2292 | while (++h < FIB_TABLE_HASHSZ) { |
| 2300 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; | 2293 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
| 2301 | hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) { | 2294 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
| 2302 | n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); | 2295 | n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); |
| 2303 | if (n) | 2296 | if (n) |
| 2304 | goto found; | 2297 | goto found; |
| @@ -2381,13 +2374,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) | |||
| 2381 | } else { | 2374 | } else { |
| 2382 | struct leaf *l = (struct leaf *) n; | 2375 | struct leaf *l = (struct leaf *) n; |
| 2383 | struct leaf_info *li; | 2376 | struct leaf_info *li; |
| 2384 | struct hlist_node *node; | ||
| 2385 | __be32 val = htonl(l->key); | 2377 | __be32 val = htonl(l->key); |
| 2386 | 2378 | ||
| 2387 | seq_indent(seq, iter->depth); | 2379 | seq_indent(seq, iter->depth); |
| 2388 | seq_printf(seq, " |-- %pI4\n", &val); | 2380 | seq_printf(seq, " |-- %pI4\n", &val); |
| 2389 | 2381 | ||
| 2390 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { | 2382 | hlist_for_each_entry_rcu(li, &l->list, hlist) { |
| 2391 | struct fib_alias *fa; | 2383 | struct fib_alias *fa; |
| 2392 | 2384 | ||
| 2393 | list_for_each_entry_rcu(fa, &li->falh, fa_list) { | 2385 | list_for_each_entry_rcu(fa, &li->falh, fa_list) { |
| @@ -2532,7 +2524,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) | |||
| 2532 | { | 2524 | { |
| 2533 | struct leaf *l = v; | 2525 | struct leaf *l = v; |
| 2534 | struct leaf_info *li; | 2526 | struct leaf_info *li; |
| 2535 | struct hlist_node *node; | ||
| 2536 | 2527 | ||
| 2537 | if (v == SEQ_START_TOKEN) { | 2528 | if (v == SEQ_START_TOKEN) { |
| 2538 | seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " | 2529 | seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " |
| @@ -2541,7 +2532,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) | |||
| 2541 | return 0; | 2532 | return 0; |
| 2542 | } | 2533 | } |
| 2543 | 2534 | ||
| 2544 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { | 2535 | hlist_for_each_entry_rcu(li, &l->list, hlist) { |
| 2545 | struct fib_alias *fa; | 2536 | struct fib_alias *fa; |
| 2546 | __be32 mask, prefix; | 2537 | __be32 mask, prefix; |
| 2547 | 2538 | ||
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 17ff9fd7cdda..3ac5dff79627 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
| @@ -934,6 +934,29 @@ error: | |||
| 934 | goto drop; | 934 | goto drop; |
| 935 | } | 935 | } |
| 936 | 936 | ||
| 937 | void icmp_err(struct sk_buff *skb, u32 info) | ||
| 938 | { | ||
| 939 | struct iphdr *iph = (struct iphdr *)skb->data; | ||
| 940 | struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); | ||
| 941 | int type = icmp_hdr(skb)->type; | ||
| 942 | int code = icmp_hdr(skb)->code; | ||
| 943 | struct net *net = dev_net(skb->dev); | ||
| 944 | |||
| 945 | /* | ||
| 946 | * Use ping_err to handle all icmp errors except those | ||
| 947 | * triggered by ICMP_ECHOREPLY which sent from kernel. | ||
| 948 | */ | ||
| 949 | if (icmph->type != ICMP_ECHOREPLY) { | ||
| 950 | ping_err(skb, info); | ||
| 951 | return; | ||
| 952 | } | ||
| 953 | |||
| 954 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) | ||
| 955 | ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0); | ||
| 956 | else if (type == ICMP_REDIRECT) | ||
| 957 | ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0); | ||
| 958 | } | ||
| 959 | |||
| 937 | /* | 960 | /* |
| 938 | * This table is the definition of how we handle ICMP. | 961 | * This table is the definition of how we handle ICMP. |
| 939 | */ | 962 | */ |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 11cb4979a465..7d1874be1df3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -57,7 +57,6 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
| 57 | const struct inet_bind_bucket *tb, bool relax) | 57 | const struct inet_bind_bucket *tb, bool relax) |
| 58 | { | 58 | { |
| 59 | struct sock *sk2; | 59 | struct sock *sk2; |
| 60 | struct hlist_node *node; | ||
| 61 | int reuse = sk->sk_reuse; | 60 | int reuse = sk->sk_reuse; |
| 62 | int reuseport = sk->sk_reuseport; | 61 | int reuseport = sk->sk_reuseport; |
| 63 | kuid_t uid = sock_i_uid((struct sock *)sk); | 62 | kuid_t uid = sock_i_uid((struct sock *)sk); |
| @@ -69,7 +68,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
| 69 | * one this bucket belongs to. | 68 | * one this bucket belongs to. |
| 70 | */ | 69 | */ |
| 71 | 70 | ||
| 72 | sk_for_each_bound(sk2, node, &tb->owners) { | 71 | sk_for_each_bound(sk2, &tb->owners) { |
| 73 | if (sk != sk2 && | 72 | if (sk != sk2 && |
| 74 | !inet_v6_ipv6only(sk2) && | 73 | !inet_v6_ipv6only(sk2) && |
| 75 | (!sk->sk_bound_dev_if || | 74 | (!sk->sk_bound_dev_if || |
| @@ -95,7 +94,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
| 95 | } | 94 | } |
| 96 | } | 95 | } |
| 97 | } | 96 | } |
| 98 | return node != NULL; | 97 | return sk2 != NULL; |
| 99 | } | 98 | } |
| 100 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); | 99 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); |
| 101 | 100 | ||
| @@ -106,7 +105,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) | |||
| 106 | { | 105 | { |
| 107 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 106 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
| 108 | struct inet_bind_hashbucket *head; | 107 | struct inet_bind_hashbucket *head; |
| 109 | struct hlist_node *node; | ||
| 110 | struct inet_bind_bucket *tb; | 108 | struct inet_bind_bucket *tb; |
| 111 | int ret, attempts = 5; | 109 | int ret, attempts = 5; |
| 112 | struct net *net = sock_net(sk); | 110 | struct net *net = sock_net(sk); |
| @@ -129,7 +127,7 @@ again: | |||
| 129 | head = &hashinfo->bhash[inet_bhashfn(net, rover, | 127 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
| 130 | hashinfo->bhash_size)]; | 128 | hashinfo->bhash_size)]; |
| 131 | spin_lock(&head->lock); | 129 | spin_lock(&head->lock); |
| 132 | inet_bind_bucket_for_each(tb, node, &head->chain) | 130 | inet_bind_bucket_for_each(tb, &head->chain) |
| 133 | if (net_eq(ib_net(tb), net) && tb->port == rover) { | 131 | if (net_eq(ib_net(tb), net) && tb->port == rover) { |
| 134 | if (((tb->fastreuse > 0 && | 132 | if (((tb->fastreuse > 0 && |
| 135 | sk->sk_reuse && | 133 | sk->sk_reuse && |
| @@ -183,7 +181,7 @@ have_snum: | |||
| 183 | head = &hashinfo->bhash[inet_bhashfn(net, snum, | 181 | head = &hashinfo->bhash[inet_bhashfn(net, snum, |
| 184 | hashinfo->bhash_size)]; | 182 | hashinfo->bhash_size)]; |
| 185 | spin_lock(&head->lock); | 183 | spin_lock(&head->lock); |
| 186 | inet_bind_bucket_for_each(tb, node, &head->chain) | 184 | inet_bind_bucket_for_each(tb, &head->chain) |
| 187 | if (net_eq(ib_net(tb), net) && tb->port == snum) | 185 | if (net_eq(ib_net(tb), net) && tb->port == snum) |
| 188 | goto tb_found; | 186 | goto tb_found; |
| 189 | } | 187 | } |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 2e453bde6992..245ae078a07f 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -33,9 +33,9 @@ static void inet_frag_secret_rebuild(unsigned long dummy) | |||
| 33 | get_random_bytes(&f->rnd, sizeof(u32)); | 33 | get_random_bytes(&f->rnd, sizeof(u32)); |
| 34 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { | 34 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { |
| 35 | struct inet_frag_queue *q; | 35 | struct inet_frag_queue *q; |
| 36 | struct hlist_node *p, *n; | 36 | struct hlist_node *n; |
| 37 | 37 | ||
| 38 | hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { | 38 | hlist_for_each_entry_safe(q, n, &f->hash[i], list) { |
| 39 | unsigned int hval = f->hashfn(q); | 39 | unsigned int hval = f->hashfn(q); |
| 40 | 40 | ||
| 41 | if (hval != i) { | 41 | if (hval != i) { |
| @@ -203,7 +203,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
| 203 | { | 203 | { |
| 204 | struct inet_frag_queue *qp; | 204 | struct inet_frag_queue *qp; |
| 205 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
| 206 | struct hlist_node *n; | ||
| 207 | #endif | 206 | #endif |
| 208 | unsigned int hash; | 207 | unsigned int hash; |
| 209 | 208 | ||
| @@ -219,7 +218,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
| 219 | * such entry could be created on other cpu, while we | 218 | * such entry could be created on other cpu, while we |
| 220 | * promoted read lock to write lock. | 219 | * promoted read lock to write lock. |
| 221 | */ | 220 | */ |
| 222 | hlist_for_each_entry(qp, n, &f->hash[hash], list) { | 221 | hlist_for_each_entry(qp, &f->hash[hash], list) { |
| 223 | if (qp->net == nf && f->match(qp, arg)) { | 222 | if (qp->net == nf && f->match(qp, arg)) { |
| 224 | atomic_inc(&qp->refcnt); | 223 | atomic_inc(&qp->refcnt); |
| 225 | write_unlock(&f->lock); | 224 | write_unlock(&f->lock); |
| @@ -278,9 +277,8 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
| 278 | __releases(&f->lock) | 277 | __releases(&f->lock) |
| 279 | { | 278 | { |
| 280 | struct inet_frag_queue *q; | 279 | struct inet_frag_queue *q; |
| 281 | struct hlist_node *n; | ||
| 282 | 280 | ||
| 283 | hlist_for_each_entry(q, n, &f->hash[hash], list) { | 281 | hlist_for_each_entry(q, &f->hash[hash], list) { |
| 284 | if (q->net == nf && f->match(q, key)) { | 282 | if (q->net == nf && f->match(q, key)) { |
| 285 | atomic_inc(&q->refcnt); | 283 | atomic_inc(&q->refcnt); |
| 286 | read_unlock(&f->lock); | 284 | read_unlock(&f->lock); |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 0ce0595d9861..6af375afeeef 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
| @@ -120,13 +120,12 @@ int __inet_inherit_port(struct sock *sk, struct sock *child) | |||
| 120 | * that the listener socket's icsk_bind_hash is the same | 120 | * that the listener socket's icsk_bind_hash is the same |
| 121 | * as that of the child socket. We have to look up or | 121 | * as that of the child socket. We have to look up or |
| 122 | * create a new bind bucket for the child here. */ | 122 | * create a new bind bucket for the child here. */ |
| 123 | struct hlist_node *node; | 123 | inet_bind_bucket_for_each(tb, &head->chain) { |
| 124 | inet_bind_bucket_for_each(tb, node, &head->chain) { | ||
| 125 | if (net_eq(ib_net(tb), sock_net(sk)) && | 124 | if (net_eq(ib_net(tb), sock_net(sk)) && |
| 126 | tb->port == port) | 125 | tb->port == port) |
| 127 | break; | 126 | break; |
| 128 | } | 127 | } |
| 129 | if (!node) { | 128 | if (!tb) { |
| 130 | tb = inet_bind_bucket_create(table->bind_bucket_cachep, | 129 | tb = inet_bind_bucket_create(table->bind_bucket_cachep, |
| 131 | sock_net(sk), head, port); | 130 | sock_net(sk), head, port); |
| 132 | if (!tb) { | 131 | if (!tb) { |
| @@ -493,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
| 493 | int i, remaining, low, high, port; | 492 | int i, remaining, low, high, port; |
| 494 | static u32 hint; | 493 | static u32 hint; |
| 495 | u32 offset = hint + port_offset; | 494 | u32 offset = hint + port_offset; |
| 496 | struct hlist_node *node; | ||
| 497 | struct inet_timewait_sock *tw = NULL; | 495 | struct inet_timewait_sock *tw = NULL; |
| 498 | 496 | ||
| 499 | inet_get_local_port_range(&low, &high); | 497 | inet_get_local_port_range(&low, &high); |
| @@ -512,7 +510,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
| 512 | * because the established check is already | 510 | * because the established check is already |
| 513 | * unique enough. | 511 | * unique enough. |
| 514 | */ | 512 | */ |
| 515 | inet_bind_bucket_for_each(tb, node, &head->chain) { | 513 | inet_bind_bucket_for_each(tb, &head->chain) { |
| 516 | if (net_eq(ib_net(tb), net) && | 514 | if (net_eq(ib_net(tb), net) && |
| 517 | tb->port == port) { | 515 | tb->port == port) { |
| 518 | if (tb->fastreuse >= 0 || | 516 | if (tb->fastreuse >= 0 || |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 2784db3155fb..1f27c9f4afd0 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
| @@ -216,7 +216,6 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | |||
| 216 | const int slot) | 216 | const int slot) |
| 217 | { | 217 | { |
| 218 | struct inet_timewait_sock *tw; | 218 | struct inet_timewait_sock *tw; |
| 219 | struct hlist_node *node; | ||
| 220 | unsigned int killed; | 219 | unsigned int killed; |
| 221 | int ret; | 220 | int ret; |
| 222 | 221 | ||
| @@ -229,7 +228,7 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | |||
| 229 | killed = 0; | 228 | killed = 0; |
| 230 | ret = 0; | 229 | ret = 0; |
| 231 | rescan: | 230 | rescan: |
| 232 | inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { | 231 | inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) { |
| 233 | __inet_twsk_del_dead_node(tw); | 232 | __inet_twsk_del_dead_node(tw); |
| 234 | spin_unlock(&twdr->death_lock); | 233 | spin_unlock(&twdr->death_lock); |
| 235 | __inet_twsk_kill(tw, twdr->hashinfo); | 234 | __inet_twsk_kill(tw, twdr->hashinfo); |
| @@ -438,10 +437,10 @@ void inet_twdr_twcal_tick(unsigned long data) | |||
| 438 | 437 | ||
| 439 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { | 438 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { |
| 440 | if (time_before_eq(j, now)) { | 439 | if (time_before_eq(j, now)) { |
| 441 | struct hlist_node *node, *safe; | 440 | struct hlist_node *safe; |
| 442 | struct inet_timewait_sock *tw; | 441 | struct inet_timewait_sock *tw; |
| 443 | 442 | ||
| 444 | inet_twsk_for_each_inmate_safe(tw, node, safe, | 443 | inet_twsk_for_each_inmate_safe(tw, safe, |
| 445 | &twdr->twcal_row[slot]) { | 444 | &twdr->twcal_row[slot]) { |
| 446 | __inet_twsk_del_dead_node(tw); | 445 | __inet_twsk_del_dead_node(tw); |
| 447 | __inet_twsk_kill(tw, twdr->hashinfo); | 446 | __inet_twsk_kill(tw, twdr->hashinfo); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 5ef4da780ac1..d0ef0e674ec5 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -735,7 +735,7 @@ drop: | |||
| 735 | return 0; | 735 | return 0; |
| 736 | } | 736 | } |
| 737 | 737 | ||
| 738 | static struct sk_buff *handle_offloads(struct sk_buff *skb) | 738 | static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb) |
| 739 | { | 739 | { |
| 740 | int err; | 740 | int err; |
| 741 | 741 | ||
| @@ -745,8 +745,12 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb) | |||
| 745 | goto error; | 745 | goto error; |
| 746 | skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; | 746 | skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; |
| 747 | return skb; | 747 | return skb; |
| 748 | } | 748 | } else if (skb->ip_summed == CHECKSUM_PARTIAL && |
| 749 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 749 | tunnel->parms.o_flags&GRE_CSUM) { |
| 750 | err = skb_checksum_help(skb); | ||
| 751 | if (unlikely(err)) | ||
| 752 | goto error; | ||
| 753 | } else if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
| 750 | skb->ip_summed = CHECKSUM_NONE; | 754 | skb->ip_summed = CHECKSUM_NONE; |
| 751 | 755 | ||
| 752 | return skb; | 756 | return skb; |
| @@ -776,7 +780,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
| 776 | int err; | 780 | int err; |
| 777 | int pkt_len; | 781 | int pkt_len; |
| 778 | 782 | ||
| 779 | skb = handle_offloads(skb); | 783 | skb = handle_offloads(tunnel, skb); |
| 780 | if (IS_ERR(skb)) { | 784 | if (IS_ERR(skb)) { |
| 781 | dev->stats.tx_dropped++; | 785 | dev->stats.tx_dropped++; |
| 782 | return NETDEV_TX_OK; | 786 | return NETDEV_TX_OK; |
| @@ -970,7 +974,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
| 970 | iph->daddr = fl4.daddr; | 974 | iph->daddr = fl4.daddr; |
| 971 | iph->saddr = fl4.saddr; | 975 | iph->saddr = fl4.saddr; |
| 972 | iph->ttl = ttl; | 976 | iph->ttl = ttl; |
| 973 | iph->id = 0; | 977 | |
| 978 | tunnel_ip_select_ident(skb, old_iph, &rt->dst); | ||
| 974 | 979 | ||
| 975 | if (ttl == 0) { | 980 | if (ttl == 0) { |
| 976 | if (skb->protocol == htons(ETH_P_IP)) | 981 | if (skb->protocol == htons(ETH_P_IP)) |
| @@ -1101,14 +1106,8 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev) | |||
| 1101 | tunnel->hlen = addend; | 1106 | tunnel->hlen = addend; |
| 1102 | /* TCP offload with GRE SEQ is not supported. */ | 1107 | /* TCP offload with GRE SEQ is not supported. */ |
| 1103 | if (!(tunnel->parms.o_flags & GRE_SEQ)) { | 1108 | if (!(tunnel->parms.o_flags & GRE_SEQ)) { |
| 1104 | /* device supports enc gso offload*/ | 1109 | dev->features |= NETIF_F_GSO_SOFTWARE; |
| 1105 | if (tdev->hw_enc_features & NETIF_F_GRE_GSO) { | 1110 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; |
| 1106 | dev->features |= NETIF_F_TSO; | ||
| 1107 | dev->hw_features |= NETIF_F_TSO; | ||
| 1108 | } else { | ||
| 1109 | dev->features |= NETIF_F_GSO_SOFTWARE; | ||
| 1110 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; | ||
| 1111 | } | ||
| 1112 | } | 1111 | } |
| 1113 | 1112 | ||
| 1114 | return mtu; | 1113 | return mtu; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 75e33a7048f8..5852b249054f 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
| @@ -657,7 +657,7 @@ static int clusterip_proc_release(struct inode *inode, struct file *file) | |||
| 657 | static ssize_t clusterip_proc_write(struct file *file, const char __user *input, | 657 | static ssize_t clusterip_proc_write(struct file *file, const char __user *input, |
| 658 | size_t size, loff_t *ofs) | 658 | size_t size, loff_t *ofs) |
| 659 | { | 659 | { |
| 660 | struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data; | 660 | struct clusterip_config *c = PDE(file_inode(file))->data; |
| 661 | #define PROC_WRITELEN 10 | 661 | #define PROC_WRITELEN 10 |
| 662 | char buffer[PROC_WRITELEN+1]; | 662 | char buffer[PROC_WRITELEN+1]; |
| 663 | unsigned long nodenum; | 663 | unsigned long nodenum; |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 55c4ee1bba06..2e91006d6076 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
| @@ -322,8 +322,8 @@ void ping_err(struct sk_buff *skb, u32 info) | |||
| 322 | struct iphdr *iph = (struct iphdr *)skb->data; | 322 | struct iphdr *iph = (struct iphdr *)skb->data; |
| 323 | struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); | 323 | struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); |
| 324 | struct inet_sock *inet_sock; | 324 | struct inet_sock *inet_sock; |
| 325 | int type = icmph->type; | 325 | int type = icmp_hdr(skb)->type; |
| 326 | int code = icmph->code; | 326 | int code = icmp_hdr(skb)->code; |
| 327 | struct net *net = dev_net(skb->dev); | 327 | struct net *net = dev_net(skb->dev); |
| 328 | struct sock *sk; | 328 | struct sock *sk; |
| 329 | int harderr; | 329 | int harderr; |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 53ddebc292b6..dd44e0ab600c 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
| @@ -111,9 +111,7 @@ EXPORT_SYMBOL_GPL(raw_unhash_sk); | |||
| 111 | static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, | 111 | static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, |
| 112 | unsigned short num, __be32 raddr, __be32 laddr, int dif) | 112 | unsigned short num, __be32 raddr, __be32 laddr, int dif) |
| 113 | { | 113 | { |
| 114 | struct hlist_node *node; | 114 | sk_for_each_from(sk) { |
| 115 | |||
| 116 | sk_for_each_from(sk, node) { | ||
| 117 | struct inet_sock *inet = inet_sk(sk); | 115 | struct inet_sock *inet = inet_sk(sk); |
| 118 | 116 | ||
| 119 | if (net_eq(sock_net(sk), net) && inet->inet_num == num && | 117 | if (net_eq(sock_net(sk), net) && inet->inet_num == num && |
| @@ -914,9 +912,7 @@ static struct sock *raw_get_first(struct seq_file *seq) | |||
| 914 | 912 | ||
| 915 | for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; | 913 | for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; |
| 916 | ++state->bucket) { | 914 | ++state->bucket) { |
| 917 | struct hlist_node *node; | 915 | sk_for_each(sk, &state->h->ht[state->bucket]) |
| 918 | |||
| 919 | sk_for_each(sk, node, &state->h->ht[state->bucket]) | ||
| 920 | if (sock_net(sk) == seq_file_net(seq)) | 916 | if (sock_net(sk) == seq_file_net(seq)) |
| 921 | goto found; | 917 | goto found; |
| 922 | } | 918 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7a5ba48c2cc9..47e854fcae24 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1409,10 +1409,10 @@ static void tcp_service_net_dma(struct sock *sk, bool wait) | |||
| 1409 | return; | 1409 | return; |
| 1410 | 1410 | ||
| 1411 | last_issued = tp->ucopy.dma_cookie; | 1411 | last_issued = tp->ucopy.dma_cookie; |
| 1412 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1412 | dma_async_issue_pending(tp->ucopy.dma_chan); |
| 1413 | 1413 | ||
| 1414 | do { | 1414 | do { |
| 1415 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | 1415 | if (dma_async_is_tx_complete(tp->ucopy.dma_chan, |
| 1416 | last_issued, &done, | 1416 | last_issued, &done, |
| 1417 | &used) == DMA_SUCCESS) { | 1417 | &used) == DMA_SUCCESS) { |
| 1418 | /* Safe to free early-copied skbs now */ | 1418 | /* Safe to free early-copied skbs now */ |
| @@ -1754,7 +1754,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
| 1754 | tcp_service_net_dma(sk, true); | 1754 | tcp_service_net_dma(sk, true); |
| 1755 | tcp_cleanup_rbuf(sk, copied); | 1755 | tcp_cleanup_rbuf(sk, copied); |
| 1756 | } else | 1756 | } else |
| 1757 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1757 | dma_async_issue_pending(tp->ucopy.dma_chan); |
| 1758 | } | 1758 | } |
| 1759 | #endif | 1759 | #endif |
| 1760 | if (copied >= target) { | 1760 | if (copied >= target) { |
| @@ -1847,7 +1847,7 @@ do_prequeue: | |||
| 1847 | break; | 1847 | break; |
| 1848 | } | 1848 | } |
| 1849 | 1849 | ||
| 1850 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1850 | dma_async_issue_pending(tp->ucopy.dma_chan); |
| 1851 | 1851 | ||
| 1852 | if ((offset + used) == skb->len) | 1852 | if ((offset + used) == skb->len) |
| 1853 | copied_early = true; | 1853 | copied_early = true; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 145d3bf8df86..4a8ec457310f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -954,7 +954,6 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, | |||
| 954 | { | 954 | { |
| 955 | struct tcp_sock *tp = tcp_sk(sk); | 955 | struct tcp_sock *tp = tcp_sk(sk); |
| 956 | struct tcp_md5sig_key *key; | 956 | struct tcp_md5sig_key *key; |
| 957 | struct hlist_node *pos; | ||
| 958 | unsigned int size = sizeof(struct in_addr); | 957 | unsigned int size = sizeof(struct in_addr); |
| 959 | struct tcp_md5sig_info *md5sig; | 958 | struct tcp_md5sig_info *md5sig; |
| 960 | 959 | ||
| @@ -968,7 +967,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, | |||
| 968 | if (family == AF_INET6) | 967 | if (family == AF_INET6) |
| 969 | size = sizeof(struct in6_addr); | 968 | size = sizeof(struct in6_addr); |
| 970 | #endif | 969 | #endif |
| 971 | hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) { | 970 | hlist_for_each_entry_rcu(key, &md5sig->head, node) { |
| 972 | if (key->family != family) | 971 | if (key->family != family) |
| 973 | continue; | 972 | continue; |
| 974 | if (!memcmp(&key->addr, addr, size)) | 973 | if (!memcmp(&key->addr, addr, size)) |
| @@ -1069,14 +1068,14 @@ static void tcp_clear_md5_list(struct sock *sk) | |||
| 1069 | { | 1068 | { |
| 1070 | struct tcp_sock *tp = tcp_sk(sk); | 1069 | struct tcp_sock *tp = tcp_sk(sk); |
| 1071 | struct tcp_md5sig_key *key; | 1070 | struct tcp_md5sig_key *key; |
| 1072 | struct hlist_node *pos, *n; | 1071 | struct hlist_node *n; |
| 1073 | struct tcp_md5sig_info *md5sig; | 1072 | struct tcp_md5sig_info *md5sig; |
| 1074 | 1073 | ||
| 1075 | md5sig = rcu_dereference_protected(tp->md5sig_info, 1); | 1074 | md5sig = rcu_dereference_protected(tp->md5sig_info, 1); |
| 1076 | 1075 | ||
| 1077 | if (!hlist_empty(&md5sig->head)) | 1076 | if (!hlist_empty(&md5sig->head)) |
| 1078 | tcp_free_md5sig_pool(); | 1077 | tcp_free_md5sig_pool(); |
| 1079 | hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) { | 1078 | hlist_for_each_entry_safe(key, n, &md5sig->head, node) { |
| 1080 | hlist_del_rcu(&key->node); | 1079 | hlist_del_rcu(&key->node); |
| 1081 | atomic_sub(sizeof(*key), &sk->sk_omem_alloc); | 1080 | atomic_sub(sizeof(*key), &sk->sk_omem_alloc); |
| 1082 | kfree_rcu(key, rcu); | 1081 | kfree_rcu(key, rcu); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fd0cea114b5d..e2b4461074da 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -1351,8 +1351,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |||
| 1351 | return 0; | 1351 | return 0; |
| 1352 | } | 1352 | } |
| 1353 | 1353 | ||
| 1354 | /* Calculate MSS. Not accounting for SACKs here. */ | 1354 | /* Calculate MSS not accounting any TCP options. */ |
| 1355 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) | 1355 | static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) |
| 1356 | { | 1356 | { |
| 1357 | const struct tcp_sock *tp = tcp_sk(sk); | 1357 | const struct tcp_sock *tp = tcp_sk(sk); |
| 1358 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1358 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| @@ -1381,13 +1381,17 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu) | |||
| 1381 | /* Then reserve room for full set of TCP options and 8 bytes of data */ | 1381 | /* Then reserve room for full set of TCP options and 8 bytes of data */ |
| 1382 | if (mss_now < 48) | 1382 | if (mss_now < 48) |
| 1383 | mss_now = 48; | 1383 | mss_now = 48; |
| 1384 | |||
| 1385 | /* Now subtract TCP options size, not including SACKs */ | ||
| 1386 | mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); | ||
| 1387 | |||
| 1388 | return mss_now; | 1384 | return mss_now; |
| 1389 | } | 1385 | } |
| 1390 | 1386 | ||
| 1387 | /* Calculate MSS. Not accounting for SACKs here. */ | ||
| 1388 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) | ||
| 1389 | { | ||
| 1390 | /* Subtract TCP options size, not including SACKs */ | ||
| 1391 | return __tcp_mtu_to_mss(sk, pmtu) - | ||
| 1392 | (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); | ||
| 1393 | } | ||
| 1394 | |||
| 1391 | /* Inverse of above */ | 1395 | /* Inverse of above */ |
| 1392 | int tcp_mss_to_mtu(struct sock *sk, int mss) | 1396 | int tcp_mss_to_mtu(struct sock *sk, int mss) |
| 1393 | { | 1397 | { |
| @@ -2930,7 +2934,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
| 2930 | */ | 2934 | */ |
| 2931 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) | 2935 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) |
| 2932 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; | 2936 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; |
| 2933 | space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - | 2937 | space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - |
| 2934 | MAX_TCP_OPTION_SPACE; | 2938 | MAX_TCP_OPTION_SPACE; |
| 2935 | 2939 | ||
| 2936 | syn_data = skb_copy_expand(syn, skb_headroom(syn), space, | 2940 | syn_data = skb_copy_expand(syn, skb_headroom(syn), space, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4dc0d44a5d31..f2c7e615f902 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -1419,11 +1419,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, | |||
| 1419 | struct net_device *dev, int strict) | 1419 | struct net_device *dev, int strict) |
| 1420 | { | 1420 | { |
| 1421 | struct inet6_ifaddr *ifp; | 1421 | struct inet6_ifaddr *ifp; |
| 1422 | struct hlist_node *node; | ||
| 1423 | unsigned int hash = inet6_addr_hash(addr); | 1422 | unsigned int hash = inet6_addr_hash(addr); |
| 1424 | 1423 | ||
| 1425 | rcu_read_lock_bh(); | 1424 | rcu_read_lock_bh(); |
| 1426 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1425 | hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { |
| 1427 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1426 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
| 1428 | continue; | 1427 | continue; |
| 1429 | if (ipv6_addr_equal(&ifp->addr, addr) && | 1428 | if (ipv6_addr_equal(&ifp->addr, addr) && |
| @@ -1445,9 +1444,8 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | |||
| 1445 | { | 1444 | { |
| 1446 | unsigned int hash = inet6_addr_hash(addr); | 1445 | unsigned int hash = inet6_addr_hash(addr); |
| 1447 | struct inet6_ifaddr *ifp; | 1446 | struct inet6_ifaddr *ifp; |
| 1448 | struct hlist_node *node; | ||
| 1449 | 1447 | ||
| 1450 | hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1448 | hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) { |
| 1451 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1449 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
| 1452 | continue; | 1450 | continue; |
| 1453 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1451 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
| @@ -1487,10 +1485,9 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add | |||
| 1487 | { | 1485 | { |
| 1488 | struct inet6_ifaddr *ifp, *result = NULL; | 1486 | struct inet6_ifaddr *ifp, *result = NULL; |
| 1489 | unsigned int hash = inet6_addr_hash(addr); | 1487 | unsigned int hash = inet6_addr_hash(addr); |
| 1490 | struct hlist_node *node; | ||
| 1491 | 1488 | ||
| 1492 | rcu_read_lock_bh(); | 1489 | rcu_read_lock_bh(); |
| 1493 | hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1490 | hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) { |
| 1494 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1491 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
| 1495 | continue; | 1492 | continue; |
| 1496 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1493 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
| @@ -2907,11 +2904,10 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
| 2907 | /* Step 2: clear hash table */ | 2904 | /* Step 2: clear hash table */ |
| 2908 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 2905 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
| 2909 | struct hlist_head *h = &inet6_addr_lst[i]; | 2906 | struct hlist_head *h = &inet6_addr_lst[i]; |
| 2910 | struct hlist_node *n; | ||
| 2911 | 2907 | ||
| 2912 | spin_lock_bh(&addrconf_hash_lock); | 2908 | spin_lock_bh(&addrconf_hash_lock); |
| 2913 | restart: | 2909 | restart: |
| 2914 | hlist_for_each_entry_rcu(ifa, n, h, addr_lst) { | 2910 | hlist_for_each_entry_rcu(ifa, h, addr_lst) { |
| 2915 | if (ifa->idev == idev) { | 2911 | if (ifa->idev == idev) { |
| 2916 | hlist_del_init_rcu(&ifa->addr_lst); | 2912 | hlist_del_init_rcu(&ifa->addr_lst); |
| 2917 | addrconf_del_timer(ifa); | 2913 | addrconf_del_timer(ifa); |
| @@ -3218,8 +3214,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) | |||
| 3218 | } | 3214 | } |
| 3219 | 3215 | ||
| 3220 | for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { | 3216 | for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { |
| 3221 | struct hlist_node *n; | 3217 | hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket], |
| 3222 | hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], | ||
| 3223 | addr_lst) { | 3218 | addr_lst) { |
| 3224 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 3219 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
| 3225 | continue; | 3220 | continue; |
| @@ -3244,9 +3239,8 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
| 3244 | { | 3239 | { |
| 3245 | struct if6_iter_state *state = seq->private; | 3240 | struct if6_iter_state *state = seq->private; |
| 3246 | struct net *net = seq_file_net(seq); | 3241 | struct net *net = seq_file_net(seq); |
| 3247 | struct hlist_node *n = &ifa->addr_lst; | ||
| 3248 | 3242 | ||
| 3249 | hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { | 3243 | hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) { |
| 3250 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 3244 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
| 3251 | continue; | 3245 | continue; |
| 3252 | state->offset++; | 3246 | state->offset++; |
| @@ -3255,7 +3249,7 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
| 3255 | 3249 | ||
| 3256 | while (++state->bucket < IN6_ADDR_HSIZE) { | 3250 | while (++state->bucket < IN6_ADDR_HSIZE) { |
| 3257 | state->offset = 0; | 3251 | state->offset = 0; |
| 3258 | hlist_for_each_entry_rcu_bh(ifa, n, | 3252 | hlist_for_each_entry_rcu_bh(ifa, |
| 3259 | &inet6_addr_lst[state->bucket], addr_lst) { | 3253 | &inet6_addr_lst[state->bucket], addr_lst) { |
| 3260 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 3254 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
| 3261 | continue; | 3255 | continue; |
| @@ -3357,11 +3351,10 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) | |||
| 3357 | { | 3351 | { |
| 3358 | int ret = 0; | 3352 | int ret = 0; |
| 3359 | struct inet6_ifaddr *ifp = NULL; | 3353 | struct inet6_ifaddr *ifp = NULL; |
| 3360 | struct hlist_node *n; | ||
| 3361 | unsigned int hash = inet6_addr_hash(addr); | 3354 | unsigned int hash = inet6_addr_hash(addr); |
| 3362 | 3355 | ||
| 3363 | rcu_read_lock_bh(); | 3356 | rcu_read_lock_bh(); |
| 3364 | hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { | 3357 | hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) { |
| 3365 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 3358 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
| 3366 | continue; | 3359 | continue; |
| 3367 | if (ipv6_addr_equal(&ifp->addr, addr) && | 3360 | if (ipv6_addr_equal(&ifp->addr, addr) && |
| @@ -3383,7 +3376,6 @@ static void addrconf_verify(unsigned long foo) | |||
| 3383 | { | 3376 | { |
| 3384 | unsigned long now, next, next_sec, next_sched; | 3377 | unsigned long now, next, next_sec, next_sched; |
| 3385 | struct inet6_ifaddr *ifp; | 3378 | struct inet6_ifaddr *ifp; |
| 3386 | struct hlist_node *node; | ||
| 3387 | int i; | 3379 | int i; |
| 3388 | 3380 | ||
| 3389 | rcu_read_lock_bh(); | 3381 | rcu_read_lock_bh(); |
| @@ -3395,7 +3387,7 @@ static void addrconf_verify(unsigned long foo) | |||
| 3395 | 3387 | ||
| 3396 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 3388 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
| 3397 | restart: | 3389 | restart: |
| 3398 | hlist_for_each_entry_rcu_bh(ifp, node, | 3390 | hlist_for_each_entry_rcu_bh(ifp, |
| 3399 | &inet6_addr_lst[i], addr_lst) { | 3391 | &inet6_addr_lst[i], addr_lst) { |
| 3400 | unsigned long age; | 3392 | unsigned long age; |
| 3401 | 3393 | ||
| @@ -3866,7 +3858,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 3866 | struct net_device *dev; | 3858 | struct net_device *dev; |
| 3867 | struct inet6_dev *idev; | 3859 | struct inet6_dev *idev; |
| 3868 | struct hlist_head *head; | 3860 | struct hlist_head *head; |
| 3869 | struct hlist_node *node; | ||
| 3870 | 3861 | ||
| 3871 | s_h = cb->args[0]; | 3862 | s_h = cb->args[0]; |
| 3872 | s_idx = idx = cb->args[1]; | 3863 | s_idx = idx = cb->args[1]; |
| @@ -3876,7 +3867,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 3876 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 3867 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
| 3877 | idx = 0; | 3868 | idx = 0; |
| 3878 | head = &net->dev_index_head[h]; | 3869 | head = &net->dev_index_head[h]; |
| 3879 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 3870 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
| 3880 | if (idx < s_idx) | 3871 | if (idx < s_idx) |
| 3881 | goto cont; | 3872 | goto cont; |
| 3882 | if (h > s_h || idx > s_idx) | 3873 | if (h > s_h || idx > s_idx) |
| @@ -4222,7 +4213,6 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 4222 | struct net_device *dev; | 4213 | struct net_device *dev; |
| 4223 | struct inet6_dev *idev; | 4214 | struct inet6_dev *idev; |
| 4224 | struct hlist_head *head; | 4215 | struct hlist_head *head; |
| 4225 | struct hlist_node *node; | ||
| 4226 | 4216 | ||
| 4227 | s_h = cb->args[0]; | 4217 | s_h = cb->args[0]; |
| 4228 | s_idx = cb->args[1]; | 4218 | s_idx = cb->args[1]; |
| @@ -4231,7 +4221,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 4231 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 4221 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
| 4232 | idx = 0; | 4222 | idx = 0; |
| 4233 | head = &net->dev_index_head[h]; | 4223 | head = &net->dev_index_head[h]; |
| 4234 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 4224 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
| 4235 | if (idx < s_idx) | 4225 | if (idx < s_idx) |
| 4236 | goto cont; | 4226 | goto cont; |
| 4237 | idev = __in6_dev_get(dev); | 4227 | idev = __in6_dev_get(dev); |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index ff76eecfd622..aad64352cb60 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
| @@ -173,9 +173,8 @@ static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, | |||
| 173 | const struct in6_addr *addr, | 173 | const struct in6_addr *addr, |
| 174 | int type, int ifindex) | 174 | int type, int ifindex) |
| 175 | { | 175 | { |
| 176 | struct hlist_node *pos; | ||
| 177 | struct ip6addrlbl_entry *p; | 176 | struct ip6addrlbl_entry *p; |
| 178 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { | 177 | hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) { |
| 179 | if (__ip6addrlbl_match(net, p, addr, type, ifindex)) | 178 | if (__ip6addrlbl_match(net, p, addr, type, ifindex)) |
| 180 | return p; | 179 | return p; |
| 181 | } | 180 | } |
| @@ -261,9 +260,9 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) | |||
| 261 | if (hlist_empty(&ip6addrlbl_table.head)) { | 260 | if (hlist_empty(&ip6addrlbl_table.head)) { |
| 262 | hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); | 261 | hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); |
| 263 | } else { | 262 | } else { |
| 264 | struct hlist_node *pos, *n; | 263 | struct hlist_node *n; |
| 265 | struct ip6addrlbl_entry *p = NULL; | 264 | struct ip6addrlbl_entry *p = NULL; |
| 266 | hlist_for_each_entry_safe(p, pos, n, | 265 | hlist_for_each_entry_safe(p, n, |
| 267 | &ip6addrlbl_table.head, list) { | 266 | &ip6addrlbl_table.head, list) { |
| 268 | if (p->prefixlen == newp->prefixlen && | 267 | if (p->prefixlen == newp->prefixlen && |
| 269 | net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && | 268 | net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && |
| @@ -319,13 +318,13 @@ static int __ip6addrlbl_del(struct net *net, | |||
| 319 | int ifindex) | 318 | int ifindex) |
| 320 | { | 319 | { |
| 321 | struct ip6addrlbl_entry *p = NULL; | 320 | struct ip6addrlbl_entry *p = NULL; |
| 322 | struct hlist_node *pos, *n; | 321 | struct hlist_node *n; |
| 323 | int ret = -ESRCH; | 322 | int ret = -ESRCH; |
| 324 | 323 | ||
| 325 | ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", | 324 | ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", |
| 326 | __func__, prefix, prefixlen, ifindex); | 325 | __func__, prefix, prefixlen, ifindex); |
| 327 | 326 | ||
| 328 | hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { | 327 | hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) { |
| 329 | if (p->prefixlen == prefixlen && | 328 | if (p->prefixlen == prefixlen && |
| 330 | net_eq(ip6addrlbl_net(p), net) && | 329 | net_eq(ip6addrlbl_net(p), net) && |
| 331 | p->ifindex == ifindex && | 330 | p->ifindex == ifindex && |
| @@ -380,11 +379,11 @@ static int __net_init ip6addrlbl_net_init(struct net *net) | |||
| 380 | static void __net_exit ip6addrlbl_net_exit(struct net *net) | 379 | static void __net_exit ip6addrlbl_net_exit(struct net *net) |
| 381 | { | 380 | { |
| 382 | struct ip6addrlbl_entry *p = NULL; | 381 | struct ip6addrlbl_entry *p = NULL; |
| 383 | struct hlist_node *pos, *n; | 382 | struct hlist_node *n; |
| 384 | 383 | ||
| 385 | /* Remove all labels belonging to the exiting net */ | 384 | /* Remove all labels belonging to the exiting net */ |
| 386 | spin_lock(&ip6addrlbl_table.lock); | 385 | spin_lock(&ip6addrlbl_table.lock); |
| 387 | hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { | 386 | hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) { |
| 388 | if (net_eq(ip6addrlbl_net(p), net)) { | 387 | if (net_eq(ip6addrlbl_net(p), net)) { |
| 389 | hlist_del_rcu(&p->list); | 388 | hlist_del_rcu(&p->list); |
| 390 | ip6addrlbl_put(p); | 389 | ip6addrlbl_put(p); |
| @@ -505,12 +504,11 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 505 | { | 504 | { |
| 506 | struct net *net = sock_net(skb->sk); | 505 | struct net *net = sock_net(skb->sk); |
| 507 | struct ip6addrlbl_entry *p; | 506 | struct ip6addrlbl_entry *p; |
| 508 | struct hlist_node *pos; | ||
| 509 | int idx = 0, s_idx = cb->args[0]; | 507 | int idx = 0, s_idx = cb->args[0]; |
| 510 | int err; | 508 | int err; |
| 511 | 509 | ||
| 512 | rcu_read_lock(); | 510 | rcu_read_lock(); |
| 513 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { | 511 | hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) { |
| 514 | if (idx >= s_idx && | 512 | if (idx >= s_idx && |
| 515 | net_eq(ip6addrlbl_net(p), net)) { | 513 | net_eq(ip6addrlbl_net(p), net)) { |
| 516 | if ((err = ip6addrlbl_fill(skb, p, | 514 | if ((err = ip6addrlbl_fill(skb, p, |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index b386a2ce4c6f..9bfab19ff3c0 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
| @@ -31,7 +31,6 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
| 31 | const struct inet_bind_bucket *tb, bool relax) | 31 | const struct inet_bind_bucket *tb, bool relax) |
| 32 | { | 32 | { |
| 33 | const struct sock *sk2; | 33 | const struct sock *sk2; |
| 34 | const struct hlist_node *node; | ||
| 35 | int reuse = sk->sk_reuse; | 34 | int reuse = sk->sk_reuse; |
| 36 | int reuseport = sk->sk_reuseport; | 35 | int reuseport = sk->sk_reuseport; |
| 37 | kuid_t uid = sock_i_uid((struct sock *)sk); | 36 | kuid_t uid = sock_i_uid((struct sock *)sk); |
| @@ -41,7 +40,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
| 41 | * See comment in inet_csk_bind_conflict about sock lookup | 40 | * See comment in inet_csk_bind_conflict about sock lookup |
| 42 | * vs net namespaces issues. | 41 | * vs net namespaces issues. |
| 43 | */ | 42 | */ |
| 44 | sk_for_each_bound(sk2, node, &tb->owners) { | 43 | sk_for_each_bound(sk2, &tb->owners) { |
| 45 | if (sk != sk2 && | 44 | if (sk != sk2 && |
| 46 | (!sk->sk_bound_dev_if || | 45 | (!sk->sk_bound_dev_if || |
| 47 | !sk2->sk_bound_dev_if || | 46 | !sk2->sk_bound_dev_if || |
| @@ -58,7 +57,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
| 58 | } | 57 | } |
| 59 | } | 58 | } |
| 60 | 59 | ||
| 61 | return node != NULL; | 60 | return sk2 != NULL; |
| 62 | } | 61 | } |
| 63 | 62 | ||
| 64 | EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); | 63 | EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 710cafd2e1a9..192dd1a0e188 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -224,7 +224,6 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id) | |||
| 224 | { | 224 | { |
| 225 | struct fib6_table *tb; | 225 | struct fib6_table *tb; |
| 226 | struct hlist_head *head; | 226 | struct hlist_head *head; |
| 227 | struct hlist_node *node; | ||
| 228 | unsigned int h; | 227 | unsigned int h; |
| 229 | 228 | ||
| 230 | if (id == 0) | 229 | if (id == 0) |
| @@ -232,7 +231,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id) | |||
| 232 | h = id & (FIB6_TABLE_HASHSZ - 1); | 231 | h = id & (FIB6_TABLE_HASHSZ - 1); |
| 233 | rcu_read_lock(); | 232 | rcu_read_lock(); |
| 234 | head = &net->ipv6.fib_table_hash[h]; | 233 | head = &net->ipv6.fib_table_hash[h]; |
| 235 | hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { | 234 | hlist_for_each_entry_rcu(tb, head, tb6_hlist) { |
| 236 | if (tb->tb6_id == id) { | 235 | if (tb->tb6_id == id) { |
| 237 | rcu_read_unlock(); | 236 | rcu_read_unlock(); |
| 238 | return tb; | 237 | return tb; |
| @@ -363,7 +362,6 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 363 | struct rt6_rtnl_dump_arg arg; | 362 | struct rt6_rtnl_dump_arg arg; |
| 364 | struct fib6_walker_t *w; | 363 | struct fib6_walker_t *w; |
| 365 | struct fib6_table *tb; | 364 | struct fib6_table *tb; |
| 366 | struct hlist_node *node; | ||
| 367 | struct hlist_head *head; | 365 | struct hlist_head *head; |
| 368 | int res = 0; | 366 | int res = 0; |
| 369 | 367 | ||
| @@ -398,7 +396,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 398 | for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { | 396 | for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { |
| 399 | e = 0; | 397 | e = 0; |
| 400 | head = &net->ipv6.fib_table_hash[h]; | 398 | head = &net->ipv6.fib_table_hash[h]; |
| 401 | hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { | 399 | hlist_for_each_entry_rcu(tb, head, tb6_hlist) { |
| 402 | if (e < s_e) | 400 | if (e < s_e) |
| 403 | goto next; | 401 | goto next; |
| 404 | res = fib6_dump_table(tb, skb, cb); | 402 | res = fib6_dump_table(tb, skb, cb); |
| @@ -1520,14 +1518,13 @@ void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg | |||
| 1520 | int prune, void *arg) | 1518 | int prune, void *arg) |
| 1521 | { | 1519 | { |
| 1522 | struct fib6_table *table; | 1520 | struct fib6_table *table; |
| 1523 | struct hlist_node *node; | ||
| 1524 | struct hlist_head *head; | 1521 | struct hlist_head *head; |
| 1525 | unsigned int h; | 1522 | unsigned int h; |
| 1526 | 1523 | ||
| 1527 | rcu_read_lock(); | 1524 | rcu_read_lock(); |
| 1528 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { | 1525 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { |
| 1529 | head = &net->ipv6.fib_table_hash[h]; | 1526 | head = &net->ipv6.fib_table_hash[h]; |
| 1530 | hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { | 1527 | hlist_for_each_entry_rcu(table, head, tb6_hlist) { |
| 1531 | read_lock_bh(&table->tb6_lock); | 1528 | read_lock_bh(&table->tb6_lock); |
| 1532 | fib6_clean_tree(net, &table->tb6_root, | 1529 | fib6_clean_tree(net, &table->tb6_root, |
| 1533 | func, prune, arg); | 1530 | func, prune, arg); |
| @@ -1540,14 +1537,13 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), | |||
| 1540 | int prune, void *arg) | 1537 | int prune, void *arg) |
| 1541 | { | 1538 | { |
| 1542 | struct fib6_table *table; | 1539 | struct fib6_table *table; |
| 1543 | struct hlist_node *node; | ||
| 1544 | struct hlist_head *head; | 1540 | struct hlist_head *head; |
| 1545 | unsigned int h; | 1541 | unsigned int h; |
| 1546 | 1542 | ||
| 1547 | rcu_read_lock(); | 1543 | rcu_read_lock(); |
| 1548 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { | 1544 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { |
| 1549 | head = &net->ipv6.fib_table_hash[h]; | 1545 | head = &net->ipv6.fib_table_hash[h]; |
| 1550 | hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { | 1546 | hlist_for_each_entry_rcu(table, head, tb6_hlist) { |
| 1551 | write_lock_bh(&table->tb6_lock); | 1547 | write_lock_bh(&table->tb6_lock); |
| 1552 | fib6_clean_tree(net, &table->tb6_root, | 1548 | fib6_clean_tree(net, &table->tb6_root, |
| 1553 | func, prune, arg); | 1549 | func, prune, arg); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c65907db8c44..330b5e7b7df6 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
| @@ -71,10 +71,9 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, | |||
| 71 | unsigned short num, const struct in6_addr *loc_addr, | 71 | unsigned short num, const struct in6_addr *loc_addr, |
| 72 | const struct in6_addr *rmt_addr, int dif) | 72 | const struct in6_addr *rmt_addr, int dif) |
| 73 | { | 73 | { |
| 74 | struct hlist_node *node; | ||
| 75 | bool is_multicast = ipv6_addr_is_multicast(loc_addr); | 74 | bool is_multicast = ipv6_addr_is_multicast(loc_addr); |
| 76 | 75 | ||
| 77 | sk_for_each_from(sk, node) | 76 | sk_for_each_from(sk) |
| 78 | if (inet_sk(sk)->inet_num == num) { | 77 | if (inet_sk(sk)->inet_num == num) { |
| 79 | struct ipv6_pinfo *np = inet6_sk(sk); | 78 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 80 | 79 | ||
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 6cc48012b730..de2bcfaaf759 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
| @@ -89,9 +89,8 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const | |||
| 89 | { | 89 | { |
| 90 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 90 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
| 91 | struct xfrm6_tunnel_spi *x6spi; | 91 | struct xfrm6_tunnel_spi *x6spi; |
| 92 | struct hlist_node *pos; | ||
| 93 | 92 | ||
| 94 | hlist_for_each_entry_rcu(x6spi, pos, | 93 | hlist_for_each_entry_rcu(x6spi, |
| 95 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 94 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
| 96 | list_byaddr) { | 95 | list_byaddr) { |
| 97 | if (xfrm6_addr_equal(&x6spi->addr, saddr)) | 96 | if (xfrm6_addr_equal(&x6spi->addr, saddr)) |
| @@ -120,9 +119,8 @@ static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi) | |||
| 120 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 119 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
| 121 | struct xfrm6_tunnel_spi *x6spi; | 120 | struct xfrm6_tunnel_spi *x6spi; |
| 122 | int index = xfrm6_tunnel_spi_hash_byspi(spi); | 121 | int index = xfrm6_tunnel_spi_hash_byspi(spi); |
| 123 | struct hlist_node *pos; | ||
| 124 | 122 | ||
| 125 | hlist_for_each_entry(x6spi, pos, | 123 | hlist_for_each_entry(x6spi, |
| 126 | &xfrm6_tn->spi_byspi[index], | 124 | &xfrm6_tn->spi_byspi[index], |
| 127 | list_byspi) { | 125 | list_byspi) { |
| 128 | if (x6spi->spi == spi) | 126 | if (x6spi->spi == spi) |
| @@ -203,11 +201,11 @@ static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr) | |||
| 203 | { | 201 | { |
| 204 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 202 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
| 205 | struct xfrm6_tunnel_spi *x6spi; | 203 | struct xfrm6_tunnel_spi *x6spi; |
| 206 | struct hlist_node *pos, *n; | 204 | struct hlist_node *n; |
| 207 | 205 | ||
| 208 | spin_lock_bh(&xfrm6_tunnel_spi_lock); | 206 | spin_lock_bh(&xfrm6_tunnel_spi_lock); |
| 209 | 207 | ||
| 210 | hlist_for_each_entry_safe(x6spi, pos, n, | 208 | hlist_for_each_entry_safe(x6spi, n, |
| 211 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 209 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
| 212 | list_byaddr) | 210 | list_byaddr) |
| 213 | { | 211 | { |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index dfd6faaf0ea7..f547a47d381c 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
| @@ -228,9 +228,8 @@ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc, | |||
| 228 | __be16 port) | 228 | __be16 port) |
| 229 | { | 229 | { |
| 230 | struct sock *s; | 230 | struct sock *s; |
| 231 | struct hlist_node *node; | ||
| 232 | 231 | ||
| 233 | sk_for_each(s, node, &intrfc->if_sklist) | 232 | sk_for_each(s, &intrfc->if_sklist) |
| 234 | if (ipx_sk(s)->port == port) | 233 | if (ipx_sk(s)->port == port) |
| 235 | goto found; | 234 | goto found; |
| 236 | s = NULL; | 235 | s = NULL; |
| @@ -259,12 +258,11 @@ static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc, | |||
| 259 | __be16 port) | 258 | __be16 port) |
| 260 | { | 259 | { |
| 261 | struct sock *s; | 260 | struct sock *s; |
| 262 | struct hlist_node *node; | ||
| 263 | 261 | ||
| 264 | ipxitf_hold(intrfc); | 262 | ipxitf_hold(intrfc); |
| 265 | spin_lock_bh(&intrfc->if_sklist_lock); | 263 | spin_lock_bh(&intrfc->if_sklist_lock); |
| 266 | 264 | ||
| 267 | sk_for_each(s, node, &intrfc->if_sklist) { | 265 | sk_for_each(s, &intrfc->if_sklist) { |
| 268 | struct ipx_sock *ipxs = ipx_sk(s); | 266 | struct ipx_sock *ipxs = ipx_sk(s); |
| 269 | 267 | ||
| 270 | if (ipxs->port == port && | 268 | if (ipxs->port == port && |
| @@ -282,14 +280,14 @@ found: | |||
| 282 | static void __ipxitf_down(struct ipx_interface *intrfc) | 280 | static void __ipxitf_down(struct ipx_interface *intrfc) |
| 283 | { | 281 | { |
| 284 | struct sock *s; | 282 | struct sock *s; |
| 285 | struct hlist_node *node, *t; | 283 | struct hlist_node *t; |
| 286 | 284 | ||
| 287 | /* Delete all routes associated with this interface */ | 285 | /* Delete all routes associated with this interface */ |
| 288 | ipxrtr_del_routes(intrfc); | 286 | ipxrtr_del_routes(intrfc); |
| 289 | 287 | ||
| 290 | spin_lock_bh(&intrfc->if_sklist_lock); | 288 | spin_lock_bh(&intrfc->if_sklist_lock); |
| 291 | /* error sockets */ | 289 | /* error sockets */ |
| 292 | sk_for_each_safe(s, node, t, &intrfc->if_sklist) { | 290 | sk_for_each_safe(s, t, &intrfc->if_sklist) { |
| 293 | struct ipx_sock *ipxs = ipx_sk(s); | 291 | struct ipx_sock *ipxs = ipx_sk(s); |
| 294 | 292 | ||
| 295 | s->sk_err = ENOLINK; | 293 | s->sk_err = ENOLINK; |
| @@ -385,12 +383,11 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc, | |||
| 385 | int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, | 383 | int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, |
| 386 | IPX_NODE_LEN); | 384 | IPX_NODE_LEN); |
| 387 | struct sock *s; | 385 | struct sock *s; |
| 388 | struct hlist_node *node; | ||
| 389 | int rc; | 386 | int rc; |
| 390 | 387 | ||
| 391 | spin_lock_bh(&intrfc->if_sklist_lock); | 388 | spin_lock_bh(&intrfc->if_sklist_lock); |
| 392 | 389 | ||
| 393 | sk_for_each(s, node, &intrfc->if_sklist) { | 390 | sk_for_each(s, &intrfc->if_sklist) { |
| 394 | struct ipx_sock *ipxs = ipx_sk(s); | 391 | struct ipx_sock *ipxs = ipx_sk(s); |
| 395 | 392 | ||
| 396 | if (ipxs->port == ipx->ipx_dest.sock && | 393 | if (ipxs->port == ipx->ipx_dest.sock && |
| @@ -446,12 +443,11 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc, | |||
| 446 | connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); | 443 | connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); |
| 447 | 444 | ||
| 448 | if (connection) { | 445 | if (connection) { |
| 449 | struct hlist_node *node; | ||
| 450 | /* Now we have to look for a special NCP connection handling | 446 | /* Now we have to look for a special NCP connection handling |
| 451 | * socket. Only these sockets have ipx_ncp_conn != 0, set by | 447 | * socket. Only these sockets have ipx_ncp_conn != 0, set by |
| 452 | * SIOCIPXNCPCONN. */ | 448 | * SIOCIPXNCPCONN. */ |
| 453 | spin_lock_bh(&intrfc->if_sklist_lock); | 449 | spin_lock_bh(&intrfc->if_sklist_lock); |
| 454 | sk_for_each(sk, node, &intrfc->if_sklist) | 450 | sk_for_each(sk, &intrfc->if_sklist) |
| 455 | if (ipx_sk(sk)->ipx_ncp_conn == connection) { | 451 | if (ipx_sk(sk)->ipx_ncp_conn == connection) { |
| 456 | sock_hold(sk); | 452 | sock_hold(sk); |
| 457 | goto found; | 453 | goto found; |
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index 02ff7f2f60d4..65e8833a2510 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c | |||
| @@ -103,19 +103,18 @@ out: | |||
| 103 | static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) | 103 | static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) |
| 104 | { | 104 | { |
| 105 | struct sock *s = NULL; | 105 | struct sock *s = NULL; |
| 106 | struct hlist_node *node; | ||
| 107 | struct ipx_interface *i; | 106 | struct ipx_interface *i; |
| 108 | 107 | ||
| 109 | list_for_each_entry(i, &ipx_interfaces, node) { | 108 | list_for_each_entry(i, &ipx_interfaces, node) { |
| 110 | spin_lock_bh(&i->if_sklist_lock); | 109 | spin_lock_bh(&i->if_sklist_lock); |
| 111 | sk_for_each(s, node, &i->if_sklist) { | 110 | sk_for_each(s, &i->if_sklist) { |
| 112 | if (!pos) | 111 | if (!pos) |
| 113 | break; | 112 | break; |
| 114 | --pos; | 113 | --pos; |
| 115 | } | 114 | } |
| 116 | spin_unlock_bh(&i->if_sklist_lock); | 115 | spin_unlock_bh(&i->if_sklist_lock); |
| 117 | if (!pos) { | 116 | if (!pos) { |
| 118 | if (node) | 117 | if (s) |
| 119 | goto found; | 118 | goto found; |
| 120 | break; | 119 | break; |
| 121 | } | 120 | } |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index cd6f7a991d80..a7d11ffe4284 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -156,14 +156,13 @@ static int afiucv_pm_freeze(struct device *dev) | |||
| 156 | { | 156 | { |
| 157 | struct iucv_sock *iucv; | 157 | struct iucv_sock *iucv; |
| 158 | struct sock *sk; | 158 | struct sock *sk; |
| 159 | struct hlist_node *node; | ||
| 160 | int err = 0; | 159 | int err = 0; |
| 161 | 160 | ||
| 162 | #ifdef CONFIG_PM_DEBUG | 161 | #ifdef CONFIG_PM_DEBUG |
| 163 | printk(KERN_WARNING "afiucv_pm_freeze\n"); | 162 | printk(KERN_WARNING "afiucv_pm_freeze\n"); |
| 164 | #endif | 163 | #endif |
| 165 | read_lock(&iucv_sk_list.lock); | 164 | read_lock(&iucv_sk_list.lock); |
| 166 | sk_for_each(sk, node, &iucv_sk_list.head) { | 165 | sk_for_each(sk, &iucv_sk_list.head) { |
| 167 | iucv = iucv_sk(sk); | 166 | iucv = iucv_sk(sk); |
| 168 | switch (sk->sk_state) { | 167 | switch (sk->sk_state) { |
| 169 | case IUCV_DISCONN: | 168 | case IUCV_DISCONN: |
| @@ -194,13 +193,12 @@ static int afiucv_pm_freeze(struct device *dev) | |||
| 194 | static int afiucv_pm_restore_thaw(struct device *dev) | 193 | static int afiucv_pm_restore_thaw(struct device *dev) |
| 195 | { | 194 | { |
| 196 | struct sock *sk; | 195 | struct sock *sk; |
| 197 | struct hlist_node *node; | ||
| 198 | 196 | ||
| 199 | #ifdef CONFIG_PM_DEBUG | 197 | #ifdef CONFIG_PM_DEBUG |
| 200 | printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); | 198 | printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); |
| 201 | #endif | 199 | #endif |
| 202 | read_lock(&iucv_sk_list.lock); | 200 | read_lock(&iucv_sk_list.lock); |
| 203 | sk_for_each(sk, node, &iucv_sk_list.head) { | 201 | sk_for_each(sk, &iucv_sk_list.head) { |
| 204 | switch (sk->sk_state) { | 202 | switch (sk->sk_state) { |
| 205 | case IUCV_CONNECTED: | 203 | case IUCV_CONNECTED: |
| 206 | sk->sk_err = EPIPE; | 204 | sk->sk_err = EPIPE; |
| @@ -390,9 +388,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
| 390 | static struct sock *__iucv_get_sock_by_name(char *nm) | 388 | static struct sock *__iucv_get_sock_by_name(char *nm) |
| 391 | { | 389 | { |
| 392 | struct sock *sk; | 390 | struct sock *sk; |
| 393 | struct hlist_node *node; | ||
| 394 | 391 | ||
| 395 | sk_for_each(sk, node, &iucv_sk_list.head) | 392 | sk_for_each(sk, &iucv_sk_list.head) |
| 396 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) | 393 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) |
| 397 | return sk; | 394 | return sk; |
| 398 | 395 | ||
| @@ -1678,7 +1675,6 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
| 1678 | unsigned char user_data[16]; | 1675 | unsigned char user_data[16]; |
| 1679 | unsigned char nuser_data[16]; | 1676 | unsigned char nuser_data[16]; |
| 1680 | unsigned char src_name[8]; | 1677 | unsigned char src_name[8]; |
| 1681 | struct hlist_node *node; | ||
| 1682 | struct sock *sk, *nsk; | 1678 | struct sock *sk, *nsk; |
| 1683 | struct iucv_sock *iucv, *niucv; | 1679 | struct iucv_sock *iucv, *niucv; |
| 1684 | int err; | 1680 | int err; |
| @@ -1689,7 +1685,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
| 1689 | read_lock(&iucv_sk_list.lock); | 1685 | read_lock(&iucv_sk_list.lock); |
| 1690 | iucv = NULL; | 1686 | iucv = NULL; |
| 1691 | sk = NULL; | 1687 | sk = NULL; |
| 1692 | sk_for_each(sk, node, &iucv_sk_list.head) | 1688 | sk_for_each(sk, &iucv_sk_list.head) |
| 1693 | if (sk->sk_state == IUCV_LISTEN && | 1689 | if (sk->sk_state == IUCV_LISTEN && |
| 1694 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { | 1690 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { |
| 1695 | /* | 1691 | /* |
| @@ -2115,7 +2111,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) | |||
| 2115 | static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | 2111 | static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, |
| 2116 | struct packet_type *pt, struct net_device *orig_dev) | 2112 | struct packet_type *pt, struct net_device *orig_dev) |
| 2117 | { | 2113 | { |
| 2118 | struct hlist_node *node; | ||
| 2119 | struct sock *sk; | 2114 | struct sock *sk; |
| 2120 | struct iucv_sock *iucv; | 2115 | struct iucv_sock *iucv; |
| 2121 | struct af_iucv_trans_hdr *trans_hdr; | 2116 | struct af_iucv_trans_hdr *trans_hdr; |
| @@ -2132,7 +2127,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 2132 | iucv = NULL; | 2127 | iucv = NULL; |
| 2133 | sk = NULL; | 2128 | sk = NULL; |
| 2134 | read_lock(&iucv_sk_list.lock); | 2129 | read_lock(&iucv_sk_list.lock); |
| 2135 | sk_for_each(sk, node, &iucv_sk_list.head) { | 2130 | sk_for_each(sk, &iucv_sk_list.head) { |
| 2136 | if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { | 2131 | if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { |
| 2137 | if ((!memcmp(&iucv_sk(sk)->src_name, | 2132 | if ((!memcmp(&iucv_sk(sk)->src_name, |
| 2138 | trans_hdr->destAppName, 8)) && | 2133 | trans_hdr->destAppName, 8)) && |
| @@ -2225,10 +2220,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | |||
| 2225 | struct sk_buff *list_skb; | 2220 | struct sk_buff *list_skb; |
| 2226 | struct sk_buff *nskb; | 2221 | struct sk_buff *nskb; |
| 2227 | unsigned long flags; | 2222 | unsigned long flags; |
| 2228 | struct hlist_node *node; | ||
| 2229 | 2223 | ||
| 2230 | read_lock_irqsave(&iucv_sk_list.lock, flags); | 2224 | read_lock_irqsave(&iucv_sk_list.lock, flags); |
| 2231 | sk_for_each(sk, node, &iucv_sk_list.head) | 2225 | sk_for_each(sk, &iucv_sk_list.head) |
| 2232 | if (sk == isk) { | 2226 | if (sk == isk) { |
| 2233 | iucv = iucv_sk(sk); | 2227 | iucv = iucv_sk(sk); |
| 2234 | break; | 2228 | break; |
| @@ -2299,14 +2293,13 @@ static int afiucv_netdev_event(struct notifier_block *this, | |||
| 2299 | unsigned long event, void *ptr) | 2293 | unsigned long event, void *ptr) |
| 2300 | { | 2294 | { |
| 2301 | struct net_device *event_dev = (struct net_device *)ptr; | 2295 | struct net_device *event_dev = (struct net_device *)ptr; |
| 2302 | struct hlist_node *node; | ||
| 2303 | struct sock *sk; | 2296 | struct sock *sk; |
| 2304 | struct iucv_sock *iucv; | 2297 | struct iucv_sock *iucv; |
| 2305 | 2298 | ||
| 2306 | switch (event) { | 2299 | switch (event) { |
| 2307 | case NETDEV_REBOOT: | 2300 | case NETDEV_REBOOT: |
| 2308 | case NETDEV_GOING_DOWN: | 2301 | case NETDEV_GOING_DOWN: |
| 2309 | sk_for_each(sk, node, &iucv_sk_list.head) { | 2302 | sk_for_each(sk, &iucv_sk_list.head) { |
| 2310 | iucv = iucv_sk(sk); | 2303 | iucv = iucv_sk(sk); |
| 2311 | if ((iucv->hs_dev == event_dev) && | 2304 | if ((iucv->hs_dev == event_dev) && |
| 2312 | (sk->sk_state == IUCV_CONNECTED)) { | 2305 | (sk->sk_state == IUCV_CONNECTED)) { |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 9ef79851f297..556fdafdd1ea 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -225,7 +225,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
| 225 | { | 225 | { |
| 226 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); | 226 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); |
| 227 | struct sock *sk; | 227 | struct sock *sk; |
| 228 | struct hlist_node *node; | ||
| 229 | struct sk_buff *skb2 = NULL; | 228 | struct sk_buff *skb2 = NULL; |
| 230 | int err = -ESRCH; | 229 | int err = -ESRCH; |
| 231 | 230 | ||
| @@ -236,7 +235,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
| 236 | return -ENOMEM; | 235 | return -ENOMEM; |
| 237 | 236 | ||
| 238 | rcu_read_lock(); | 237 | rcu_read_lock(); |
| 239 | sk_for_each_rcu(sk, node, &net_pfkey->table) { | 238 | sk_for_each_rcu(sk, &net_pfkey->table) { |
| 240 | struct pfkey_sock *pfk = pfkey_sk(sk); | 239 | struct pfkey_sock *pfk = pfkey_sk(sk); |
| 241 | int err2; | 240 | int err2; |
| 242 | 241 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index dcfd64e83ab7..d36875f3427e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -221,10 +221,9 @@ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) | |||
| 221 | struct hlist_head *session_list = | 221 | struct hlist_head *session_list = |
| 222 | l2tp_session_id_hash_2(pn, session_id); | 222 | l2tp_session_id_hash_2(pn, session_id); |
| 223 | struct l2tp_session *session; | 223 | struct l2tp_session *session; |
| 224 | struct hlist_node *walk; | ||
| 225 | 224 | ||
| 226 | rcu_read_lock_bh(); | 225 | rcu_read_lock_bh(); |
| 227 | hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) { | 226 | hlist_for_each_entry_rcu(session, session_list, global_hlist) { |
| 228 | if (session->session_id == session_id) { | 227 | if (session->session_id == session_id) { |
| 229 | rcu_read_unlock_bh(); | 228 | rcu_read_unlock_bh(); |
| 230 | return session; | 229 | return session; |
| @@ -253,7 +252,6 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn | |||
| 253 | { | 252 | { |
| 254 | struct hlist_head *session_list; | 253 | struct hlist_head *session_list; |
| 255 | struct l2tp_session *session; | 254 | struct l2tp_session *session; |
| 256 | struct hlist_node *walk; | ||
| 257 | 255 | ||
| 258 | /* In L2TPv3, session_ids are unique over all tunnels and we | 256 | /* In L2TPv3, session_ids are unique over all tunnels and we |
| 259 | * sometimes need to look them up before we know the | 257 | * sometimes need to look them up before we know the |
| @@ -264,7 +262,7 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn | |||
| 264 | 262 | ||
| 265 | session_list = l2tp_session_id_hash(tunnel, session_id); | 263 | session_list = l2tp_session_id_hash(tunnel, session_id); |
| 266 | read_lock_bh(&tunnel->hlist_lock); | 264 | read_lock_bh(&tunnel->hlist_lock); |
| 267 | hlist_for_each_entry(session, walk, session_list, hlist) { | 265 | hlist_for_each_entry(session, session_list, hlist) { |
| 268 | if (session->session_id == session_id) { | 266 | if (session->session_id == session_id) { |
| 269 | read_unlock_bh(&tunnel->hlist_lock); | 267 | read_unlock_bh(&tunnel->hlist_lock); |
| 270 | return session; | 268 | return session; |
| @@ -279,13 +277,12 @@ EXPORT_SYMBOL_GPL(l2tp_session_find); | |||
| 279 | struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) | 277 | struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) |
| 280 | { | 278 | { |
| 281 | int hash; | 279 | int hash; |
| 282 | struct hlist_node *walk; | ||
| 283 | struct l2tp_session *session; | 280 | struct l2tp_session *session; |
| 284 | int count = 0; | 281 | int count = 0; |
| 285 | 282 | ||
| 286 | read_lock_bh(&tunnel->hlist_lock); | 283 | read_lock_bh(&tunnel->hlist_lock); |
| 287 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | 284 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { |
| 288 | hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) { | 285 | hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { |
| 289 | if (++count > nth) { | 286 | if (++count > nth) { |
| 290 | read_unlock_bh(&tunnel->hlist_lock); | 287 | read_unlock_bh(&tunnel->hlist_lock); |
| 291 | return session; | 288 | return session; |
| @@ -306,12 +303,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) | |||
| 306 | { | 303 | { |
| 307 | struct l2tp_net *pn = l2tp_pernet(net); | 304 | struct l2tp_net *pn = l2tp_pernet(net); |
| 308 | int hash; | 305 | int hash; |
| 309 | struct hlist_node *walk; | ||
| 310 | struct l2tp_session *session; | 306 | struct l2tp_session *session; |
| 311 | 307 | ||
| 312 | rcu_read_lock_bh(); | 308 | rcu_read_lock_bh(); |
| 313 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { | 309 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { |
| 314 | hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) { | 310 | hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { |
| 315 | if (!strcmp(session->ifname, ifname)) { | 311 | if (!strcmp(session->ifname, ifname)) { |
| 316 | rcu_read_unlock_bh(); | 312 | rcu_read_unlock_bh(); |
| 317 | return session; | 313 | return session; |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index f7ac8f42fee2..7f41b7051269 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
| @@ -49,10 +49,9 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) | |||
| 49 | 49 | ||
| 50 | static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | 50 | static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) |
| 51 | { | 51 | { |
| 52 | struct hlist_node *node; | ||
| 53 | struct sock *sk; | 52 | struct sock *sk; |
| 54 | 53 | ||
| 55 | sk_for_each_bound(sk, node, &l2tp_ip_bind_table) { | 54 | sk_for_each_bound(sk, &l2tp_ip_bind_table) { |
| 56 | struct inet_sock *inet = inet_sk(sk); | 55 | struct inet_sock *inet = inet_sk(sk); |
| 57 | struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); | 56 | struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); |
| 58 | 57 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 8ee4a86ae996..41f2f8126ebc 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
| @@ -60,10 +60,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, | |||
| 60 | struct in6_addr *laddr, | 60 | struct in6_addr *laddr, |
| 61 | int dif, u32 tunnel_id) | 61 | int dif, u32 tunnel_id) |
| 62 | { | 62 | { |
| 63 | struct hlist_node *node; | ||
| 64 | struct sock *sk; | 63 | struct sock *sk; |
| 65 | 64 | ||
| 66 | sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) { | 65 | sk_for_each_bound(sk, &l2tp_ip6_bind_table) { |
| 67 | struct in6_addr *addr = inet6_rcv_saddr(sk); | 66 | struct in6_addr *addr = inet6_rcv_saddr(sk); |
| 68 | struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); | 67 | struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); |
| 69 | 68 | ||
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 7c5073badc73..78be45cda5c1 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c | |||
| @@ -393,12 +393,11 @@ static void llc_sap_mcast(struct llc_sap *sap, | |||
| 393 | { | 393 | { |
| 394 | int i = 0, count = 256 / sizeof(struct sock *); | 394 | int i = 0, count = 256 / sizeof(struct sock *); |
| 395 | struct sock *sk, *stack[count]; | 395 | struct sock *sk, *stack[count]; |
| 396 | struct hlist_node *node; | ||
| 397 | struct llc_sock *llc; | 396 | struct llc_sock *llc; |
| 398 | struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex); | 397 | struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex); |
| 399 | 398 | ||
| 400 | spin_lock_bh(&sap->sk_lock); | 399 | spin_lock_bh(&sap->sk_lock); |
| 401 | hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) { | 400 | hlist_for_each_entry(llc, dev_hb, dev_hash_node) { |
| 402 | 401 | ||
| 403 | sk = &llc->sk; | 402 | sk = &llc->sk; |
| 404 | 403 | ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index d0dd11153a6c..1a8591b77a13 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
| @@ -647,8 +647,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
| 647 | 647 | ||
| 648 | spin_lock_init(&local->ack_status_lock); | 648 | spin_lock_init(&local->ack_status_lock); |
| 649 | idr_init(&local->ack_status_frames); | 649 | idr_init(&local->ack_status_frames); |
| 650 | /* preallocate at least one entry */ | ||
| 651 | idr_pre_get(&local->ack_status_frames, GFP_KERNEL); | ||
| 652 | 650 | ||
| 653 | sta_info_init(local); | 651 | sta_info_init(local); |
| 654 | 652 | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 6b3c4e119c63..dc7c8df40c2c 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
| @@ -72,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) | |||
| 72 | * it's used twice. So it is illegal to do | 72 | * it's used twice. So it is illegal to do |
| 73 | * for_each_mesh_entry(rcu_dereference(...), ...) | 73 | * for_each_mesh_entry(rcu_dereference(...), ...) |
| 74 | */ | 74 | */ |
| 75 | #define for_each_mesh_entry(tbl, p, node, i) \ | 75 | #define for_each_mesh_entry(tbl, node, i) \ |
| 76 | for (i = 0; i <= tbl->hash_mask; i++) \ | 76 | for (i = 0; i <= tbl->hash_mask; i++) \ |
| 77 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) | 77 | hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list) |
| 78 | 78 | ||
| 79 | 79 | ||
| 80 | static struct mesh_table *mesh_table_alloc(int size_order) | 80 | static struct mesh_table *mesh_table_alloc(int size_order) |
| @@ -139,7 +139,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
| 139 | } | 139 | } |
| 140 | if (free_leafs) { | 140 | if (free_leafs) { |
| 141 | spin_lock_bh(&tbl->gates_lock); | 141 | spin_lock_bh(&tbl->gates_lock); |
| 142 | hlist_for_each_entry_safe(gate, p, q, | 142 | hlist_for_each_entry_safe(gate, q, |
| 143 | tbl->known_gates, list) { | 143 | tbl->known_gates, list) { |
| 144 | hlist_del(&gate->list); | 144 | hlist_del(&gate->list); |
| 145 | kfree(gate); | 145 | kfree(gate); |
| @@ -333,12 +333,11 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, | |||
| 333 | struct ieee80211_sub_if_data *sdata) | 333 | struct ieee80211_sub_if_data *sdata) |
| 334 | { | 334 | { |
| 335 | struct mesh_path *mpath; | 335 | struct mesh_path *mpath; |
| 336 | struct hlist_node *n; | ||
| 337 | struct hlist_head *bucket; | 336 | struct hlist_head *bucket; |
| 338 | struct mpath_node *node; | 337 | struct mpath_node *node; |
| 339 | 338 | ||
| 340 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 339 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
| 341 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 340 | hlist_for_each_entry_rcu(node, bucket, list) { |
| 342 | mpath = node->mpath; | 341 | mpath = node->mpath; |
| 343 | if (mpath->sdata == sdata && | 342 | if (mpath->sdata == sdata && |
| 344 | ether_addr_equal(dst, mpath->dst)) { | 343 | ether_addr_equal(dst, mpath->dst)) { |
| @@ -389,11 +388,10 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | |||
| 389 | { | 388 | { |
| 390 | struct mesh_table *tbl = rcu_dereference(mesh_paths); | 389 | struct mesh_table *tbl = rcu_dereference(mesh_paths); |
| 391 | struct mpath_node *node; | 390 | struct mpath_node *node; |
| 392 | struct hlist_node *p; | ||
| 393 | int i; | 391 | int i; |
| 394 | int j = 0; | 392 | int j = 0; |
| 395 | 393 | ||
| 396 | for_each_mesh_entry(tbl, p, node, i) { | 394 | for_each_mesh_entry(tbl, node, i) { |
| 397 | if (sdata && node->mpath->sdata != sdata) | 395 | if (sdata && node->mpath->sdata != sdata) |
| 398 | continue; | 396 | continue; |
| 399 | if (j++ == idx) { | 397 | if (j++ == idx) { |
| @@ -417,13 +415,12 @@ int mesh_path_add_gate(struct mesh_path *mpath) | |||
| 417 | { | 415 | { |
| 418 | struct mesh_table *tbl; | 416 | struct mesh_table *tbl; |
| 419 | struct mpath_node *gate, *new_gate; | 417 | struct mpath_node *gate, *new_gate; |
| 420 | struct hlist_node *n; | ||
| 421 | int err; | 418 | int err; |
| 422 | 419 | ||
| 423 | rcu_read_lock(); | 420 | rcu_read_lock(); |
| 424 | tbl = rcu_dereference(mesh_paths); | 421 | tbl = rcu_dereference(mesh_paths); |
| 425 | 422 | ||
| 426 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) | 423 | hlist_for_each_entry_rcu(gate, tbl->known_gates, list) |
| 427 | if (gate->mpath == mpath) { | 424 | if (gate->mpath == mpath) { |
| 428 | err = -EEXIST; | 425 | err = -EEXIST; |
| 429 | goto err_rcu; | 426 | goto err_rcu; |
| @@ -460,9 +457,9 @@ err_rcu: | |||
| 460 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) | 457 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) |
| 461 | { | 458 | { |
| 462 | struct mpath_node *gate; | 459 | struct mpath_node *gate; |
| 463 | struct hlist_node *p, *q; | 460 | struct hlist_node *q; |
| 464 | 461 | ||
| 465 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) { | 462 | hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) { |
| 466 | if (gate->mpath != mpath) | 463 | if (gate->mpath != mpath) |
| 467 | continue; | 464 | continue; |
| 468 | spin_lock_bh(&tbl->gates_lock); | 465 | spin_lock_bh(&tbl->gates_lock); |
| @@ -504,7 +501,6 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
| 504 | struct mesh_path *mpath, *new_mpath; | 501 | struct mesh_path *mpath, *new_mpath; |
| 505 | struct mpath_node *node, *new_node; | 502 | struct mpath_node *node, *new_node; |
| 506 | struct hlist_head *bucket; | 503 | struct hlist_head *bucket; |
| 507 | struct hlist_node *n; | ||
| 508 | int grow = 0; | 504 | int grow = 0; |
| 509 | int err = 0; | 505 | int err = 0; |
| 510 | u32 hash_idx; | 506 | u32 hash_idx; |
| @@ -550,7 +546,7 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
| 550 | spin_lock(&tbl->hashwlock[hash_idx]); | 546 | spin_lock(&tbl->hashwlock[hash_idx]); |
| 551 | 547 | ||
| 552 | err = -EEXIST; | 548 | err = -EEXIST; |
| 553 | hlist_for_each_entry(node, n, bucket, list) { | 549 | hlist_for_each_entry(node, bucket, list) { |
| 554 | mpath = node->mpath; | 550 | mpath = node->mpath; |
| 555 | if (mpath->sdata == sdata && | 551 | if (mpath->sdata == sdata && |
| 556 | ether_addr_equal(dst, mpath->dst)) | 552 | ether_addr_equal(dst, mpath->dst)) |
| @@ -640,7 +636,6 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
| 640 | struct mesh_path *mpath, *new_mpath; | 636 | struct mesh_path *mpath, *new_mpath; |
| 641 | struct mpath_node *node, *new_node; | 637 | struct mpath_node *node, *new_node; |
| 642 | struct hlist_head *bucket; | 638 | struct hlist_head *bucket; |
| 643 | struct hlist_node *n; | ||
| 644 | int grow = 0; | 639 | int grow = 0; |
| 645 | int err = 0; | 640 | int err = 0; |
| 646 | u32 hash_idx; | 641 | u32 hash_idx; |
| @@ -680,7 +675,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
| 680 | spin_lock(&tbl->hashwlock[hash_idx]); | 675 | spin_lock(&tbl->hashwlock[hash_idx]); |
| 681 | 676 | ||
| 682 | err = -EEXIST; | 677 | err = -EEXIST; |
| 683 | hlist_for_each_entry(node, n, bucket, list) { | 678 | hlist_for_each_entry(node, bucket, list) { |
| 684 | mpath = node->mpath; | 679 | mpath = node->mpath; |
| 685 | if (mpath->sdata == sdata && | 680 | if (mpath->sdata == sdata && |
| 686 | ether_addr_equal(dst, mpath->dst)) | 681 | ether_addr_equal(dst, mpath->dst)) |
| @@ -725,14 +720,13 @@ void mesh_plink_broken(struct sta_info *sta) | |||
| 725 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 720 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
| 726 | struct mesh_path *mpath; | 721 | struct mesh_path *mpath; |
| 727 | struct mpath_node *node; | 722 | struct mpath_node *node; |
| 728 | struct hlist_node *p; | ||
| 729 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 723 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
| 730 | int i; | 724 | int i; |
| 731 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); | 725 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); |
| 732 | 726 | ||
| 733 | rcu_read_lock(); | 727 | rcu_read_lock(); |
| 734 | tbl = rcu_dereference(mesh_paths); | 728 | tbl = rcu_dereference(mesh_paths); |
| 735 | for_each_mesh_entry(tbl, p, node, i) { | 729 | for_each_mesh_entry(tbl, node, i) { |
| 736 | mpath = node->mpath; | 730 | mpath = node->mpath; |
| 737 | if (rcu_dereference(mpath->next_hop) == sta && | 731 | if (rcu_dereference(mpath->next_hop) == sta && |
| 738 | mpath->flags & MESH_PATH_ACTIVE && | 732 | mpath->flags & MESH_PATH_ACTIVE && |
| @@ -792,13 +786,12 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
| 792 | struct mesh_table *tbl; | 786 | struct mesh_table *tbl; |
| 793 | struct mesh_path *mpath; | 787 | struct mesh_path *mpath; |
| 794 | struct mpath_node *node; | 788 | struct mpath_node *node; |
| 795 | struct hlist_node *p; | ||
| 796 | int i; | 789 | int i; |
| 797 | 790 | ||
| 798 | rcu_read_lock(); | 791 | rcu_read_lock(); |
| 799 | read_lock_bh(&pathtbl_resize_lock); | 792 | read_lock_bh(&pathtbl_resize_lock); |
| 800 | tbl = resize_dereference_mesh_paths(); | 793 | tbl = resize_dereference_mesh_paths(); |
| 801 | for_each_mesh_entry(tbl, p, node, i) { | 794 | for_each_mesh_entry(tbl, node, i) { |
| 802 | mpath = node->mpath; | 795 | mpath = node->mpath; |
| 803 | if (rcu_dereference(mpath->next_hop) == sta) { | 796 | if (rcu_dereference(mpath->next_hop) == sta) { |
| 804 | spin_lock(&tbl->hashwlock[i]); | 797 | spin_lock(&tbl->hashwlock[i]); |
| @@ -815,11 +808,10 @@ static void table_flush_by_iface(struct mesh_table *tbl, | |||
| 815 | { | 808 | { |
| 816 | struct mesh_path *mpath; | 809 | struct mesh_path *mpath; |
| 817 | struct mpath_node *node; | 810 | struct mpath_node *node; |
| 818 | struct hlist_node *p; | ||
| 819 | int i; | 811 | int i; |
| 820 | 812 | ||
| 821 | WARN_ON(!rcu_read_lock_held()); | 813 | WARN_ON(!rcu_read_lock_held()); |
| 822 | for_each_mesh_entry(tbl, p, node, i) { | 814 | for_each_mesh_entry(tbl, node, i) { |
| 823 | mpath = node->mpath; | 815 | mpath = node->mpath; |
| 824 | if (mpath->sdata != sdata) | 816 | if (mpath->sdata != sdata) |
| 825 | continue; | 817 | continue; |
| @@ -865,7 +857,6 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
| 865 | struct mesh_path *mpath; | 857 | struct mesh_path *mpath; |
| 866 | struct mpath_node *node; | 858 | struct mpath_node *node; |
| 867 | struct hlist_head *bucket; | 859 | struct hlist_head *bucket; |
| 868 | struct hlist_node *n; | ||
| 869 | int hash_idx; | 860 | int hash_idx; |
| 870 | int err = 0; | 861 | int err = 0; |
| 871 | 862 | ||
| @@ -875,7 +866,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
| 875 | bucket = &tbl->hash_buckets[hash_idx]; | 866 | bucket = &tbl->hash_buckets[hash_idx]; |
| 876 | 867 | ||
| 877 | spin_lock(&tbl->hashwlock[hash_idx]); | 868 | spin_lock(&tbl->hashwlock[hash_idx]); |
| 878 | hlist_for_each_entry(node, n, bucket, list) { | 869 | hlist_for_each_entry(node, bucket, list) { |
| 879 | mpath = node->mpath; | 870 | mpath = node->mpath; |
| 880 | if (mpath->sdata == sdata && | 871 | if (mpath->sdata == sdata && |
| 881 | ether_addr_equal(addr, mpath->dst)) { | 872 | ether_addr_equal(addr, mpath->dst)) { |
| @@ -920,7 +911,6 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
| 920 | int mesh_path_send_to_gates(struct mesh_path *mpath) | 911 | int mesh_path_send_to_gates(struct mesh_path *mpath) |
| 921 | { | 912 | { |
| 922 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | 913 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
| 923 | struct hlist_node *n; | ||
| 924 | struct mesh_table *tbl; | 914 | struct mesh_table *tbl; |
| 925 | struct mesh_path *from_mpath = mpath; | 915 | struct mesh_path *from_mpath = mpath; |
| 926 | struct mpath_node *gate = NULL; | 916 | struct mpath_node *gate = NULL; |
| @@ -935,7 +925,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
| 935 | if (!known_gates) | 925 | if (!known_gates) |
| 936 | return -EHOSTUNREACH; | 926 | return -EHOSTUNREACH; |
| 937 | 927 | ||
| 938 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { | 928 | hlist_for_each_entry_rcu(gate, known_gates, list) { |
| 939 | if (gate->mpath->sdata != sdata) | 929 | if (gate->mpath->sdata != sdata) |
| 940 | continue; | 930 | continue; |
| 941 | 931 | ||
| @@ -951,7 +941,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
| 951 | } | 941 | } |
| 952 | } | 942 | } |
| 953 | 943 | ||
| 954 | hlist_for_each_entry_rcu(gate, n, known_gates, list) | 944 | hlist_for_each_entry_rcu(gate, known_gates, list) |
| 955 | if (gate->mpath->sdata == sdata) { | 945 | if (gate->mpath->sdata == sdata) { |
| 956 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); | 946 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); |
| 957 | mesh_path_tx_pending(gate->mpath); | 947 | mesh_path_tx_pending(gate->mpath); |
| @@ -1096,12 +1086,11 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
| 1096 | struct mesh_table *tbl; | 1086 | struct mesh_table *tbl; |
| 1097 | struct mesh_path *mpath; | 1087 | struct mesh_path *mpath; |
| 1098 | struct mpath_node *node; | 1088 | struct mpath_node *node; |
| 1099 | struct hlist_node *p; | ||
| 1100 | int i; | 1089 | int i; |
| 1101 | 1090 | ||
| 1102 | rcu_read_lock(); | 1091 | rcu_read_lock(); |
| 1103 | tbl = rcu_dereference(mesh_paths); | 1092 | tbl = rcu_dereference(mesh_paths); |
| 1104 | for_each_mesh_entry(tbl, p, node, i) { | 1093 | for_each_mesh_entry(tbl, node, i) { |
| 1105 | if (node->mpath->sdata != sdata) | 1094 | if (node->mpath->sdata != sdata) |
| 1106 | continue; | 1095 | continue; |
| 1107 | mpath = node->mpath; | 1096 | mpath = node->mpath; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 5b9602b62405..de8548bf0a7f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -2017,24 +2017,14 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
| 2017 | skb = skb_clone(skb, GFP_ATOMIC); | 2017 | skb = skb_clone(skb, GFP_ATOMIC); |
| 2018 | if (skb) { | 2018 | if (skb) { |
| 2019 | unsigned long flags; | 2019 | unsigned long flags; |
| 2020 | int id, r; | 2020 | int id; |
| 2021 | 2021 | ||
| 2022 | spin_lock_irqsave(&local->ack_status_lock, flags); | 2022 | spin_lock_irqsave(&local->ack_status_lock, flags); |
| 2023 | r = idr_get_new_above(&local->ack_status_frames, | 2023 | id = idr_alloc(&local->ack_status_frames, orig_skb, |
| 2024 | orig_skb, 1, &id); | 2024 | 1, 0x10000, GFP_ATOMIC); |
| 2025 | if (r == -EAGAIN) { | ||
| 2026 | idr_pre_get(&local->ack_status_frames, | ||
| 2027 | GFP_ATOMIC); | ||
| 2028 | r = idr_get_new_above(&local->ack_status_frames, | ||
| 2029 | orig_skb, 1, &id); | ||
| 2030 | } | ||
| 2031 | if (WARN_ON(!id) || id > 0xffff) { | ||
| 2032 | idr_remove(&local->ack_status_frames, id); | ||
| 2033 | r = -ERANGE; | ||
| 2034 | } | ||
| 2035 | spin_unlock_irqrestore(&local->ack_status_lock, flags); | 2025 | spin_unlock_irqrestore(&local->ack_status_lock, flags); |
| 2036 | 2026 | ||
| 2037 | if (!r) { | 2027 | if (id >= 0) { |
| 2038 | info_id = id; | 2028 | info_id = id; |
| 2039 | info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; | 2029 | info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; |
| 2040 | } else if (skb_shared(skb)) { | 2030 | } else if (skb_shared(skb)) { |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 9f00db7e03f2..704e514e02ab 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
| @@ -259,13 +259,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p) | |||
| 259 | { | 259 | { |
| 260 | unsigned int hash; | 260 | unsigned int hash; |
| 261 | struct ip_vs_conn *cp; | 261 | struct ip_vs_conn *cp; |
| 262 | struct hlist_node *n; | ||
| 263 | 262 | ||
| 264 | hash = ip_vs_conn_hashkey_param(p, false); | 263 | hash = ip_vs_conn_hashkey_param(p, false); |
| 265 | 264 | ||
| 266 | ct_read_lock(hash); | 265 | ct_read_lock(hash); |
| 267 | 266 | ||
| 268 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 267 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
| 269 | if (cp->af == p->af && | 268 | if (cp->af == p->af && |
| 270 | p->cport == cp->cport && p->vport == cp->vport && | 269 | p->cport == cp->cport && p->vport == cp->vport && |
| 271 | ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && | 270 | ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && |
| @@ -344,13 +343,12 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) | |||
| 344 | { | 343 | { |
| 345 | unsigned int hash; | 344 | unsigned int hash; |
| 346 | struct ip_vs_conn *cp; | 345 | struct ip_vs_conn *cp; |
| 347 | struct hlist_node *n; | ||
| 348 | 346 | ||
| 349 | hash = ip_vs_conn_hashkey_param(p, false); | 347 | hash = ip_vs_conn_hashkey_param(p, false); |
| 350 | 348 | ||
| 351 | ct_read_lock(hash); | 349 | ct_read_lock(hash); |
| 352 | 350 | ||
| 353 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 351 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
| 354 | if (!ip_vs_conn_net_eq(cp, p->net)) | 352 | if (!ip_vs_conn_net_eq(cp, p->net)) |
| 355 | continue; | 353 | continue; |
| 356 | if (p->pe_data && p->pe->ct_match) { | 354 | if (p->pe_data && p->pe->ct_match) { |
| @@ -394,7 +392,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) | |||
| 394 | { | 392 | { |
| 395 | unsigned int hash; | 393 | unsigned int hash; |
| 396 | struct ip_vs_conn *cp, *ret=NULL; | 394 | struct ip_vs_conn *cp, *ret=NULL; |
| 397 | struct hlist_node *n; | ||
| 398 | 395 | ||
| 399 | /* | 396 | /* |
| 400 | * Check for "full" addressed entries | 397 | * Check for "full" addressed entries |
| @@ -403,7 +400,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) | |||
| 403 | 400 | ||
| 404 | ct_read_lock(hash); | 401 | ct_read_lock(hash); |
| 405 | 402 | ||
| 406 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 403 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
| 407 | if (cp->af == p->af && | 404 | if (cp->af == p->af && |
| 408 | p->vport == cp->cport && p->cport == cp->dport && | 405 | p->vport == cp->cport && p->cport == cp->dport && |
| 409 | ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && | 406 | ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && |
| @@ -953,11 +950,10 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) | |||
| 953 | int idx; | 950 | int idx; |
| 954 | struct ip_vs_conn *cp; | 951 | struct ip_vs_conn *cp; |
| 955 | struct ip_vs_iter_state *iter = seq->private; | 952 | struct ip_vs_iter_state *iter = seq->private; |
| 956 | struct hlist_node *n; | ||
| 957 | 953 | ||
| 958 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { | 954 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { |
| 959 | ct_read_lock_bh(idx); | 955 | ct_read_lock_bh(idx); |
| 960 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { | 956 | hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
| 961 | if (pos-- == 0) { | 957 | if (pos-- == 0) { |
| 962 | iter->l = &ip_vs_conn_tab[idx]; | 958 | iter->l = &ip_vs_conn_tab[idx]; |
| 963 | return cp; | 959 | return cp; |
| @@ -981,7 +977,6 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 981 | { | 977 | { |
| 982 | struct ip_vs_conn *cp = v; | 978 | struct ip_vs_conn *cp = v; |
| 983 | struct ip_vs_iter_state *iter = seq->private; | 979 | struct ip_vs_iter_state *iter = seq->private; |
| 984 | struct hlist_node *e; | ||
| 985 | struct hlist_head *l = iter->l; | 980 | struct hlist_head *l = iter->l; |
| 986 | int idx; | 981 | int idx; |
| 987 | 982 | ||
| @@ -990,15 +985,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 990 | return ip_vs_conn_array(seq, 0); | 985 | return ip_vs_conn_array(seq, 0); |
| 991 | 986 | ||
| 992 | /* more on same hash chain? */ | 987 | /* more on same hash chain? */ |
| 993 | if ((e = cp->c_list.next)) | 988 | if (cp->c_list.next) |
| 994 | return hlist_entry(e, struct ip_vs_conn, c_list); | 989 | return hlist_entry(cp->c_list.next, struct ip_vs_conn, c_list); |
| 995 | 990 | ||
| 996 | idx = l - ip_vs_conn_tab; | 991 | idx = l - ip_vs_conn_tab; |
| 997 | ct_read_unlock_bh(idx); | 992 | ct_read_unlock_bh(idx); |
| 998 | 993 | ||
| 999 | while (++idx < ip_vs_conn_tab_size) { | 994 | while (++idx < ip_vs_conn_tab_size) { |
| 1000 | ct_read_lock_bh(idx); | 995 | ct_read_lock_bh(idx); |
| 1001 | hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) { | 996 | hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
| 1002 | iter->l = &ip_vs_conn_tab[idx]; | 997 | iter->l = &ip_vs_conn_tab[idx]; |
| 1003 | return cp; | 998 | return cp; |
| 1004 | } | 999 | } |
| @@ -1200,14 +1195,13 @@ void ip_vs_random_dropentry(struct net *net) | |||
| 1200 | */ | 1195 | */ |
| 1201 | for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { | 1196 | for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { |
| 1202 | unsigned int hash = net_random() & ip_vs_conn_tab_mask; | 1197 | unsigned int hash = net_random() & ip_vs_conn_tab_mask; |
| 1203 | struct hlist_node *n; | ||
| 1204 | 1198 | ||
| 1205 | /* | 1199 | /* |
| 1206 | * Lock is actually needed in this loop. | 1200 | * Lock is actually needed in this loop. |
| 1207 | */ | 1201 | */ |
| 1208 | ct_write_lock_bh(hash); | 1202 | ct_write_lock_bh(hash); |
| 1209 | 1203 | ||
| 1210 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 1204 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
| 1211 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) | 1205 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) |
| 1212 | /* connection template */ | 1206 | /* connection template */ |
| 1213 | continue; | 1207 | continue; |
| @@ -1255,14 +1249,12 @@ static void ip_vs_conn_flush(struct net *net) | |||
| 1255 | 1249 | ||
| 1256 | flush_again: | 1250 | flush_again: |
| 1257 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { | 1251 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { |
| 1258 | struct hlist_node *n; | ||
| 1259 | |||
| 1260 | /* | 1252 | /* |
| 1261 | * Lock is actually needed in this loop. | 1253 | * Lock is actually needed in this loop. |
| 1262 | */ | 1254 | */ |
| 1263 | ct_write_lock_bh(idx); | 1255 | ct_write_lock_bh(idx); |
| 1264 | 1256 | ||
| 1265 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { | 1257 | hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
| 1266 | if (!ip_vs_conn_net_eq(cp, net)) | 1258 | if (!ip_vs_conn_net_eq(cp, net)) |
| 1267 | continue; | 1259 | continue; |
| 1268 | IP_VS_DBG(4, "del connection\n"); | 1260 | IP_VS_DBG(4, "del connection\n"); |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 3921e5bc1235..8c10e3db3d9b 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
| @@ -90,14 +90,13 @@ __nf_ct_expect_find(struct net *net, u16 zone, | |||
| 90 | const struct nf_conntrack_tuple *tuple) | 90 | const struct nf_conntrack_tuple *tuple) |
| 91 | { | 91 | { |
| 92 | struct nf_conntrack_expect *i; | 92 | struct nf_conntrack_expect *i; |
| 93 | struct hlist_node *n; | ||
| 94 | unsigned int h; | 93 | unsigned int h; |
| 95 | 94 | ||
| 96 | if (!net->ct.expect_count) | 95 | if (!net->ct.expect_count) |
| 97 | return NULL; | 96 | return NULL; |
| 98 | 97 | ||
| 99 | h = nf_ct_expect_dst_hash(tuple); | 98 | h = nf_ct_expect_dst_hash(tuple); |
| 100 | hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { | 99 | hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { |
| 101 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && | 100 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && |
| 102 | nf_ct_zone(i->master) == zone) | 101 | nf_ct_zone(i->master) == zone) |
| 103 | return i; | 102 | return i; |
| @@ -130,14 +129,13 @@ nf_ct_find_expectation(struct net *net, u16 zone, | |||
| 130 | const struct nf_conntrack_tuple *tuple) | 129 | const struct nf_conntrack_tuple *tuple) |
| 131 | { | 130 | { |
| 132 | struct nf_conntrack_expect *i, *exp = NULL; | 131 | struct nf_conntrack_expect *i, *exp = NULL; |
| 133 | struct hlist_node *n; | ||
| 134 | unsigned int h; | 132 | unsigned int h; |
| 135 | 133 | ||
| 136 | if (!net->ct.expect_count) | 134 | if (!net->ct.expect_count) |
| 137 | return NULL; | 135 | return NULL; |
| 138 | 136 | ||
| 139 | h = nf_ct_expect_dst_hash(tuple); | 137 | h = nf_ct_expect_dst_hash(tuple); |
| 140 | hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { | 138 | hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { |
| 141 | if (!(i->flags & NF_CT_EXPECT_INACTIVE) && | 139 | if (!(i->flags & NF_CT_EXPECT_INACTIVE) && |
| 142 | nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && | 140 | nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && |
| 143 | nf_ct_zone(i->master) == zone) { | 141 | nf_ct_zone(i->master) == zone) { |
| @@ -172,13 +170,13 @@ void nf_ct_remove_expectations(struct nf_conn *ct) | |||
| 172 | { | 170 | { |
| 173 | struct nf_conn_help *help = nfct_help(ct); | 171 | struct nf_conn_help *help = nfct_help(ct); |
| 174 | struct nf_conntrack_expect *exp; | 172 | struct nf_conntrack_expect *exp; |
| 175 | struct hlist_node *n, *next; | 173 | struct hlist_node *next; |
| 176 | 174 | ||
| 177 | /* Optimization: most connection never expect any others. */ | 175 | /* Optimization: most connection never expect any others. */ |
| 178 | if (!help) | 176 | if (!help) |
| 179 | return; | 177 | return; |
| 180 | 178 | ||
| 181 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 179 | hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { |
| 182 | if (del_timer(&exp->timeout)) { | 180 | if (del_timer(&exp->timeout)) { |
| 183 | nf_ct_unlink_expect(exp); | 181 | nf_ct_unlink_expect(exp); |
| 184 | nf_ct_expect_put(exp); | 182 | nf_ct_expect_put(exp); |
| @@ -348,9 +346,8 @@ static void evict_oldest_expect(struct nf_conn *master, | |||
| 348 | { | 346 | { |
| 349 | struct nf_conn_help *master_help = nfct_help(master); | 347 | struct nf_conn_help *master_help = nfct_help(master); |
| 350 | struct nf_conntrack_expect *exp, *last = NULL; | 348 | struct nf_conntrack_expect *exp, *last = NULL; |
| 351 | struct hlist_node *n; | ||
| 352 | 349 | ||
| 353 | hlist_for_each_entry(exp, n, &master_help->expectations, lnode) { | 350 | hlist_for_each_entry(exp, &master_help->expectations, lnode) { |
| 354 | if (exp->class == new->class) | 351 | if (exp->class == new->class) |
| 355 | last = exp; | 352 | last = exp; |
| 356 | } | 353 | } |
| @@ -369,7 +366,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | |||
| 369 | struct nf_conn_help *master_help = nfct_help(master); | 366 | struct nf_conn_help *master_help = nfct_help(master); |
| 370 | struct nf_conntrack_helper *helper; | 367 | struct nf_conntrack_helper *helper; |
| 371 | struct net *net = nf_ct_exp_net(expect); | 368 | struct net *net = nf_ct_exp_net(expect); |
| 372 | struct hlist_node *n, *next; | 369 | struct hlist_node *next; |
| 373 | unsigned int h; | 370 | unsigned int h; |
| 374 | int ret = 1; | 371 | int ret = 1; |
| 375 | 372 | ||
| @@ -378,7 +375,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | |||
| 378 | goto out; | 375 | goto out; |
| 379 | } | 376 | } |
| 380 | h = nf_ct_expect_dst_hash(&expect->tuple); | 377 | h = nf_ct_expect_dst_hash(&expect->tuple); |
| 381 | hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { | 378 | hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) { |
| 382 | if (expect_matches(i, expect)) { | 379 | if (expect_matches(i, expect)) { |
| 383 | if (del_timer(&i->timeout)) { | 380 | if (del_timer(&i->timeout)) { |
| 384 | nf_ct_unlink_expect(i); | 381 | nf_ct_unlink_expect(i); |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 013cdf69fe29..a9740bd6fe54 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
| @@ -116,14 +116,13 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) | |||
| 116 | { | 116 | { |
| 117 | struct nf_conntrack_helper *helper; | 117 | struct nf_conntrack_helper *helper; |
| 118 | struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; | 118 | struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; |
| 119 | struct hlist_node *n; | ||
| 120 | unsigned int h; | 119 | unsigned int h; |
| 121 | 120 | ||
| 122 | if (!nf_ct_helper_count) | 121 | if (!nf_ct_helper_count) |
| 123 | return NULL; | 122 | return NULL; |
| 124 | 123 | ||
| 125 | h = helper_hash(tuple); | 124 | h = helper_hash(tuple); |
| 126 | hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) { | 125 | hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) { |
| 127 | if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) | 126 | if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) |
| 128 | return helper; | 127 | return helper; |
| 129 | } | 128 | } |
| @@ -134,11 +133,10 @@ struct nf_conntrack_helper * | |||
| 134 | __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) | 133 | __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) |
| 135 | { | 134 | { |
| 136 | struct nf_conntrack_helper *h; | 135 | struct nf_conntrack_helper *h; |
| 137 | struct hlist_node *n; | ||
| 138 | unsigned int i; | 136 | unsigned int i; |
| 139 | 137 | ||
| 140 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 138 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
| 141 | hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { | 139 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { |
| 142 | if (!strcmp(h->name, name) && | 140 | if (!strcmp(h->name, name) && |
| 143 | h->tuple.src.l3num == l3num && | 141 | h->tuple.src.l3num == l3num && |
| 144 | h->tuple.dst.protonum == protonum) | 142 | h->tuple.dst.protonum == protonum) |
| @@ -357,7 +355,6 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | |||
| 357 | { | 355 | { |
| 358 | int ret = 0; | 356 | int ret = 0; |
| 359 | struct nf_conntrack_helper *cur; | 357 | struct nf_conntrack_helper *cur; |
| 360 | struct hlist_node *n; | ||
| 361 | unsigned int h = helper_hash(&me->tuple); | 358 | unsigned int h = helper_hash(&me->tuple); |
| 362 | 359 | ||
| 363 | BUG_ON(me->expect_policy == NULL); | 360 | BUG_ON(me->expect_policy == NULL); |
| @@ -365,7 +362,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | |||
| 365 | BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); | 362 | BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); |
| 366 | 363 | ||
| 367 | mutex_lock(&nf_ct_helper_mutex); | 364 | mutex_lock(&nf_ct_helper_mutex); |
| 368 | hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) { | 365 | hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { |
| 369 | if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && | 366 | if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && |
| 370 | cur->tuple.src.l3num == me->tuple.src.l3num && | 367 | cur->tuple.src.l3num == me->tuple.src.l3num && |
| 371 | cur->tuple.dst.protonum == me->tuple.dst.protonum) { | 368 | cur->tuple.dst.protonum == me->tuple.dst.protonum) { |
| @@ -386,13 +383,13 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, | |||
| 386 | { | 383 | { |
| 387 | struct nf_conntrack_tuple_hash *h; | 384 | struct nf_conntrack_tuple_hash *h; |
| 388 | struct nf_conntrack_expect *exp; | 385 | struct nf_conntrack_expect *exp; |
| 389 | const struct hlist_node *n, *next; | 386 | const struct hlist_node *next; |
| 390 | const struct hlist_nulls_node *nn; | 387 | const struct hlist_nulls_node *nn; |
| 391 | unsigned int i; | 388 | unsigned int i; |
| 392 | 389 | ||
| 393 | /* Get rid of expectations */ | 390 | /* Get rid of expectations */ |
| 394 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 391 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
| 395 | hlist_for_each_entry_safe(exp, n, next, | 392 | hlist_for_each_entry_safe(exp, next, |
| 396 | &net->ct.expect_hash[i], hnode) { | 393 | &net->ct.expect_hash[i], hnode) { |
| 397 | struct nf_conn_help *help = nfct_help(exp->master); | 394 | struct nf_conn_help *help = nfct_help(exp->master); |
| 398 | if ((rcu_dereference_protected( | 395 | if ((rcu_dereference_protected( |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 5d60e04f9679..9904b15f600e 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
| @@ -2370,14 +2370,13 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2370 | struct net *net = sock_net(skb->sk); | 2370 | struct net *net = sock_net(skb->sk); |
| 2371 | struct nf_conntrack_expect *exp, *last; | 2371 | struct nf_conntrack_expect *exp, *last; |
| 2372 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); | 2372 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); |
| 2373 | struct hlist_node *n; | ||
| 2374 | u_int8_t l3proto = nfmsg->nfgen_family; | 2373 | u_int8_t l3proto = nfmsg->nfgen_family; |
| 2375 | 2374 | ||
| 2376 | rcu_read_lock(); | 2375 | rcu_read_lock(); |
| 2377 | last = (struct nf_conntrack_expect *)cb->args[1]; | 2376 | last = (struct nf_conntrack_expect *)cb->args[1]; |
| 2378 | for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { | 2377 | for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { |
| 2379 | restart: | 2378 | restart: |
| 2380 | hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]], | 2379 | hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]], |
| 2381 | hnode) { | 2380 | hnode) { |
| 2382 | if (l3proto && exp->tuple.src.l3num != l3proto) | 2381 | if (l3proto && exp->tuple.src.l3num != l3proto) |
| 2383 | continue; | 2382 | continue; |
| @@ -2510,7 +2509,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
| 2510 | struct nf_conntrack_expect *exp; | 2509 | struct nf_conntrack_expect *exp; |
| 2511 | struct nf_conntrack_tuple tuple; | 2510 | struct nf_conntrack_tuple tuple; |
| 2512 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2511 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 2513 | struct hlist_node *n, *next; | 2512 | struct hlist_node *next; |
| 2514 | u_int8_t u3 = nfmsg->nfgen_family; | 2513 | u_int8_t u3 = nfmsg->nfgen_family; |
| 2515 | unsigned int i; | 2514 | unsigned int i; |
| 2516 | u16 zone; | 2515 | u16 zone; |
| @@ -2557,7 +2556,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
| 2557 | /* delete all expectations for this helper */ | 2556 | /* delete all expectations for this helper */ |
| 2558 | spin_lock_bh(&nf_conntrack_lock); | 2557 | spin_lock_bh(&nf_conntrack_lock); |
| 2559 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 2558 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
| 2560 | hlist_for_each_entry_safe(exp, n, next, | 2559 | hlist_for_each_entry_safe(exp, next, |
| 2561 | &net->ct.expect_hash[i], | 2560 | &net->ct.expect_hash[i], |
| 2562 | hnode) { | 2561 | hnode) { |
| 2563 | m_help = nfct_help(exp->master); | 2562 | m_help = nfct_help(exp->master); |
| @@ -2575,7 +2574,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
| 2575 | /* This basically means we have to flush everything*/ | 2574 | /* This basically means we have to flush everything*/ |
| 2576 | spin_lock_bh(&nf_conntrack_lock); | 2575 | spin_lock_bh(&nf_conntrack_lock); |
| 2577 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 2576 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
| 2578 | hlist_for_each_entry_safe(exp, n, next, | 2577 | hlist_for_each_entry_safe(exp, next, |
| 2579 | &net->ct.expect_hash[i], | 2578 | &net->ct.expect_hash[i], |
| 2580 | hnode) { | 2579 | hnode) { |
| 2581 | if (del_timer(&exp->timeout)) { | 2580 | if (del_timer(&exp->timeout)) { |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 069229d919b6..0e7d423324c3 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -855,11 +855,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct, | |||
| 855 | { | 855 | { |
| 856 | struct nf_conn_help *help = nfct_help(ct); | 856 | struct nf_conn_help *help = nfct_help(ct); |
| 857 | struct nf_conntrack_expect *exp; | 857 | struct nf_conntrack_expect *exp; |
| 858 | struct hlist_node *n, *next; | 858 | struct hlist_node *next; |
| 859 | int found = 0; | 859 | int found = 0; |
| 860 | 860 | ||
| 861 | spin_lock_bh(&nf_conntrack_lock); | 861 | spin_lock_bh(&nf_conntrack_lock); |
| 862 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 862 | hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { |
| 863 | if (exp->class != SIP_EXPECT_SIGNALLING || | 863 | if (exp->class != SIP_EXPECT_SIGNALLING || |
| 864 | !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || | 864 | !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || |
| 865 | exp->tuple.dst.protonum != proto || | 865 | exp->tuple.dst.protonum != proto || |
| @@ -881,10 +881,10 @@ static void flush_expectations(struct nf_conn *ct, bool media) | |||
| 881 | { | 881 | { |
| 882 | struct nf_conn_help *help = nfct_help(ct); | 882 | struct nf_conn_help *help = nfct_help(ct); |
| 883 | struct nf_conntrack_expect *exp; | 883 | struct nf_conntrack_expect *exp; |
| 884 | struct hlist_node *n, *next; | 884 | struct hlist_node *next; |
| 885 | 885 | ||
| 886 | spin_lock_bh(&nf_conntrack_lock); | 886 | spin_lock_bh(&nf_conntrack_lock); |
| 887 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 887 | hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { |
| 888 | if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) | 888 | if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) |
| 889 | continue; | 889 | continue; |
| 890 | if (!del_timer(&exp->timeout)) | 890 | if (!del_timer(&exp->timeout)) |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 5f2f9109f461..8d5769c6d16e 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
| @@ -191,9 +191,8 @@ find_appropriate_src(struct net *net, u16 zone, | |||
| 191 | unsigned int h = hash_by_src(net, zone, tuple); | 191 | unsigned int h = hash_by_src(net, zone, tuple); |
| 192 | const struct nf_conn_nat *nat; | 192 | const struct nf_conn_nat *nat; |
| 193 | const struct nf_conn *ct; | 193 | const struct nf_conn *ct; |
| 194 | const struct hlist_node *n; | ||
| 195 | 194 | ||
| 196 | hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) { | 195 | hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) { |
| 197 | ct = nat->ct; | 196 | ct = nat->ct; |
| 198 | if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { | 197 | if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { |
| 199 | /* Copy source part from reply tuple. */ | 198 | /* Copy source part from reply tuple. */ |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 945950a8b1f1..a191b6db657e 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
| @@ -282,7 +282,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, | |||
| 282 | const char *helper_name; | 282 | const char *helper_name; |
| 283 | struct nf_conntrack_helper *cur, *helper = NULL; | 283 | struct nf_conntrack_helper *cur, *helper = NULL; |
| 284 | struct nf_conntrack_tuple tuple; | 284 | struct nf_conntrack_tuple tuple; |
| 285 | struct hlist_node *n; | ||
| 286 | int ret = 0, i; | 285 | int ret = 0, i; |
| 287 | 286 | ||
| 288 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) | 287 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) |
| @@ -296,7 +295,7 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, | |||
| 296 | 295 | ||
| 297 | rcu_read_lock(); | 296 | rcu_read_lock(); |
| 298 | for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { | 297 | for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { |
| 299 | hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { | 298 | hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { |
| 300 | 299 | ||
| 301 | /* skip non-userspace conntrack helpers. */ | 300 | /* skip non-userspace conntrack helpers. */ |
| 302 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 301 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
| @@ -452,13 +451,12 @@ static int | |||
| 452 | nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | 451 | nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb) |
| 453 | { | 452 | { |
| 454 | struct nf_conntrack_helper *cur, *last; | 453 | struct nf_conntrack_helper *cur, *last; |
| 455 | struct hlist_node *n; | ||
| 456 | 454 | ||
| 457 | rcu_read_lock(); | 455 | rcu_read_lock(); |
| 458 | last = (struct nf_conntrack_helper *)cb->args[1]; | 456 | last = (struct nf_conntrack_helper *)cb->args[1]; |
| 459 | for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) { | 457 | for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) { |
| 460 | restart: | 458 | restart: |
| 461 | hlist_for_each_entry_rcu(cur, n, | 459 | hlist_for_each_entry_rcu(cur, |
| 462 | &nf_ct_helper_hash[cb->args[0]], hnode) { | 460 | &nf_ct_helper_hash[cb->args[0]], hnode) { |
| 463 | 461 | ||
| 464 | /* skip non-userspace conntrack helpers. */ | 462 | /* skip non-userspace conntrack helpers. */ |
| @@ -495,7 +493,6 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, | |||
| 495 | { | 493 | { |
| 496 | int ret = -ENOENT, i; | 494 | int ret = -ENOENT, i; |
| 497 | struct nf_conntrack_helper *cur; | 495 | struct nf_conntrack_helper *cur; |
| 498 | struct hlist_node *n; | ||
| 499 | struct sk_buff *skb2; | 496 | struct sk_buff *skb2; |
| 500 | char *helper_name = NULL; | 497 | char *helper_name = NULL; |
| 501 | struct nf_conntrack_tuple tuple; | 498 | struct nf_conntrack_tuple tuple; |
| @@ -520,7 +517,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, | |||
| 520 | } | 517 | } |
| 521 | 518 | ||
| 522 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 519 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
| 523 | hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { | 520 | hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { |
| 524 | 521 | ||
| 525 | /* skip non-userspace conntrack helpers. */ | 522 | /* skip non-userspace conntrack helpers. */ |
| 526 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 523 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
| @@ -568,7 +565,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, | |||
| 568 | { | 565 | { |
| 569 | char *helper_name = NULL; | 566 | char *helper_name = NULL; |
| 570 | struct nf_conntrack_helper *cur; | 567 | struct nf_conntrack_helper *cur; |
| 571 | struct hlist_node *n, *tmp; | 568 | struct hlist_node *tmp; |
| 572 | struct nf_conntrack_tuple tuple; | 569 | struct nf_conntrack_tuple tuple; |
| 573 | bool tuple_set = false, found = false; | 570 | bool tuple_set = false, found = false; |
| 574 | int i, j = 0, ret; | 571 | int i, j = 0, ret; |
| @@ -585,7 +582,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, | |||
| 585 | } | 582 | } |
| 586 | 583 | ||
| 587 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 584 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
| 588 | hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], | 585 | hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], |
| 589 | hnode) { | 586 | hnode) { |
| 590 | /* skip non-userspace conntrack helpers. */ | 587 | /* skip non-userspace conntrack helpers. */ |
| 591 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 588 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
| @@ -654,13 +651,13 @@ err_out: | |||
| 654 | static void __exit nfnl_cthelper_exit(void) | 651 | static void __exit nfnl_cthelper_exit(void) |
| 655 | { | 652 | { |
| 656 | struct nf_conntrack_helper *cur; | 653 | struct nf_conntrack_helper *cur; |
| 657 | struct hlist_node *n, *tmp; | 654 | struct hlist_node *tmp; |
| 658 | int i; | 655 | int i; |
| 659 | 656 | ||
| 660 | nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); | 657 | nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); |
| 661 | 658 | ||
| 662 | for (i=0; i<nf_ct_helper_hsize; i++) { | 659 | for (i=0; i<nf_ct_helper_hsize; i++) { |
| 663 | hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], | 660 | hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], |
| 664 | hnode) { | 661 | hnode) { |
| 665 | /* skip non-userspace conntrack helpers. */ | 662 | /* skip non-userspace conntrack helpers. */ |
| 666 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 663 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 92fd8eca0d31..f248db572972 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
| @@ -87,11 +87,10 @@ static struct nfulnl_instance * | |||
| 87 | __instance_lookup(u_int16_t group_num) | 87 | __instance_lookup(u_int16_t group_num) |
| 88 | { | 88 | { |
| 89 | struct hlist_head *head; | 89 | struct hlist_head *head; |
| 90 | struct hlist_node *pos; | ||
| 91 | struct nfulnl_instance *inst; | 90 | struct nfulnl_instance *inst; |
| 92 | 91 | ||
| 93 | head = &instance_table[instance_hashfn(group_num)]; | 92 | head = &instance_table[instance_hashfn(group_num)]; |
| 94 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { | 93 | hlist_for_each_entry_rcu(inst, head, hlist) { |
| 95 | if (inst->group_num == group_num) | 94 | if (inst->group_num == group_num) |
| 96 | return inst; | 95 | return inst; |
| 97 | } | 96 | } |
| @@ -717,11 +716,11 @@ nfulnl_rcv_nl_event(struct notifier_block *this, | |||
| 717 | /* destroy all instances for this portid */ | 716 | /* destroy all instances for this portid */ |
| 718 | spin_lock_bh(&instances_lock); | 717 | spin_lock_bh(&instances_lock); |
| 719 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 718 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
| 720 | struct hlist_node *tmp, *t2; | 719 | struct hlist_node *t2; |
| 721 | struct nfulnl_instance *inst; | 720 | struct nfulnl_instance *inst; |
| 722 | struct hlist_head *head = &instance_table[i]; | 721 | struct hlist_head *head = &instance_table[i]; |
| 723 | 722 | ||
| 724 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 723 | hlist_for_each_entry_safe(inst, t2, head, hlist) { |
| 725 | if ((net_eq(n->net, &init_net)) && | 724 | if ((net_eq(n->net, &init_net)) && |
| 726 | (n->portid == inst->peer_portid)) | 725 | (n->portid == inst->peer_portid)) |
| 727 | __instance_destroy(inst); | 726 | __instance_destroy(inst); |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 3158d87b56a8..858fd52c1040 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
| @@ -80,11 +80,10 @@ static struct nfqnl_instance * | |||
| 80 | instance_lookup(u_int16_t queue_num) | 80 | instance_lookup(u_int16_t queue_num) |
| 81 | { | 81 | { |
| 82 | struct hlist_head *head; | 82 | struct hlist_head *head; |
| 83 | struct hlist_node *pos; | ||
| 84 | struct nfqnl_instance *inst; | 83 | struct nfqnl_instance *inst; |
| 85 | 84 | ||
| 86 | head = &instance_table[instance_hashfn(queue_num)]; | 85 | head = &instance_table[instance_hashfn(queue_num)]; |
| 87 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { | 86 | hlist_for_each_entry_rcu(inst, head, hlist) { |
| 88 | if (inst->queue_num == queue_num) | 87 | if (inst->queue_num == queue_num) |
| 89 | return inst; | 88 | return inst; |
| 90 | } | 89 | } |
| @@ -583,11 +582,10 @@ nfqnl_dev_drop(int ifindex) | |||
| 583 | rcu_read_lock(); | 582 | rcu_read_lock(); |
| 584 | 583 | ||
| 585 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 584 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
| 586 | struct hlist_node *tmp; | ||
| 587 | struct nfqnl_instance *inst; | 585 | struct nfqnl_instance *inst; |
| 588 | struct hlist_head *head = &instance_table[i]; | 586 | struct hlist_head *head = &instance_table[i]; |
| 589 | 587 | ||
| 590 | hlist_for_each_entry_rcu(inst, tmp, head, hlist) | 588 | hlist_for_each_entry_rcu(inst, head, hlist) |
| 591 | nfqnl_flush(inst, dev_cmp, ifindex); | 589 | nfqnl_flush(inst, dev_cmp, ifindex); |
| 592 | } | 590 | } |
| 593 | 591 | ||
| @@ -627,11 +625,11 @@ nfqnl_rcv_nl_event(struct notifier_block *this, | |||
| 627 | /* destroy all instances for this portid */ | 625 | /* destroy all instances for this portid */ |
| 628 | spin_lock(&instances_lock); | 626 | spin_lock(&instances_lock); |
| 629 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 627 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
| 630 | struct hlist_node *tmp, *t2; | 628 | struct hlist_node *t2; |
| 631 | struct nfqnl_instance *inst; | 629 | struct nfqnl_instance *inst; |
| 632 | struct hlist_head *head = &instance_table[i]; | 630 | struct hlist_head *head = &instance_table[i]; |
| 633 | 631 | ||
| 634 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 632 | hlist_for_each_entry_safe(inst, t2, head, hlist) { |
| 635 | if ((n->net == &init_net) && | 633 | if ((n->net == &init_net) && |
| 636 | (n->portid == inst->peer_portid)) | 634 | (n->portid == inst->peer_portid)) |
| 637 | __instance_destroy(inst); | 635 | __instance_destroy(inst); |
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index f264032b8c56..370adf622cef 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c | |||
| @@ -43,12 +43,11 @@ static void xt_rateest_hash_insert(struct xt_rateest *est) | |||
| 43 | struct xt_rateest *xt_rateest_lookup(const char *name) | 43 | struct xt_rateest *xt_rateest_lookup(const char *name) |
| 44 | { | 44 | { |
| 45 | struct xt_rateest *est; | 45 | struct xt_rateest *est; |
| 46 | struct hlist_node *n; | ||
| 47 | unsigned int h; | 46 | unsigned int h; |
| 48 | 47 | ||
| 49 | h = xt_rateest_hash(name); | 48 | h = xt_rateest_hash(name); |
| 50 | mutex_lock(&xt_rateest_mutex); | 49 | mutex_lock(&xt_rateest_mutex); |
| 51 | hlist_for_each_entry(est, n, &rateest_hash[h], list) { | 50 | hlist_for_each_entry(est, &rateest_hash[h], list) { |
| 52 | if (strcmp(est->name, name) == 0) { | 51 | if (strcmp(est->name, name) == 0) { |
| 53 | est->refcnt++; | 52 | est->refcnt++; |
| 54 | mutex_unlock(&xt_rateest_mutex); | 53 | mutex_unlock(&xt_rateest_mutex); |
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 70b5591a2586..c40b2695633b 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c | |||
| @@ -101,7 +101,7 @@ static int count_them(struct net *net, | |||
| 101 | { | 101 | { |
| 102 | const struct nf_conntrack_tuple_hash *found; | 102 | const struct nf_conntrack_tuple_hash *found; |
| 103 | struct xt_connlimit_conn *conn; | 103 | struct xt_connlimit_conn *conn; |
| 104 | struct hlist_node *pos, *n; | 104 | struct hlist_node *n; |
| 105 | struct nf_conn *found_ct; | 105 | struct nf_conn *found_ct; |
| 106 | struct hlist_head *hash; | 106 | struct hlist_head *hash; |
| 107 | bool addit = true; | 107 | bool addit = true; |
| @@ -115,7 +115,7 @@ static int count_them(struct net *net, | |||
| 115 | rcu_read_lock(); | 115 | rcu_read_lock(); |
| 116 | 116 | ||
| 117 | /* check the saved connections */ | 117 | /* check the saved connections */ |
| 118 | hlist_for_each_entry_safe(conn, pos, n, hash, node) { | 118 | hlist_for_each_entry_safe(conn, n, hash, node) { |
| 119 | found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, | 119 | found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, |
| 120 | &conn->tuple); | 120 | &conn->tuple); |
| 121 | found_ct = NULL; | 121 | found_ct = NULL; |
| @@ -258,14 +258,14 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) | |||
| 258 | { | 258 | { |
| 259 | const struct xt_connlimit_info *info = par->matchinfo; | 259 | const struct xt_connlimit_info *info = par->matchinfo; |
| 260 | struct xt_connlimit_conn *conn; | 260 | struct xt_connlimit_conn *conn; |
| 261 | struct hlist_node *pos, *n; | 261 | struct hlist_node *n; |
| 262 | struct hlist_head *hash = info->data->iphash; | 262 | struct hlist_head *hash = info->data->iphash; |
| 263 | unsigned int i; | 263 | unsigned int i; |
| 264 | 264 | ||
| 265 | nf_ct_l3proto_module_put(par->family); | 265 | nf_ct_l3proto_module_put(par->family); |
| 266 | 266 | ||
| 267 | for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) { | 267 | for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) { |
| 268 | hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) { | 268 | hlist_for_each_entry_safe(conn, n, &hash[i], node) { |
| 269 | hlist_del(&conn->node); | 269 | hlist_del(&conn->node); |
| 270 | kfree(conn); | 270 | kfree(conn); |
| 271 | } | 271 | } |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 98218c896d2e..f330e8beaf69 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
| @@ -141,11 +141,10 @@ dsthash_find(const struct xt_hashlimit_htable *ht, | |||
| 141 | const struct dsthash_dst *dst) | 141 | const struct dsthash_dst *dst) |
| 142 | { | 142 | { |
| 143 | struct dsthash_ent *ent; | 143 | struct dsthash_ent *ent; |
| 144 | struct hlist_node *pos; | ||
| 145 | u_int32_t hash = hash_dst(ht, dst); | 144 | u_int32_t hash = hash_dst(ht, dst); |
| 146 | 145 | ||
| 147 | if (!hlist_empty(&ht->hash[hash])) { | 146 | if (!hlist_empty(&ht->hash[hash])) { |
| 148 | hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node) | 147 | hlist_for_each_entry_rcu(ent, &ht->hash[hash], node) |
| 149 | if (dst_cmp(ent, dst)) { | 148 | if (dst_cmp(ent, dst)) { |
| 150 | spin_lock(&ent->lock); | 149 | spin_lock(&ent->lock); |
| 151 | return ent; | 150 | return ent; |
| @@ -297,8 +296,8 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, | |||
| 297 | spin_lock_bh(&ht->lock); | 296 | spin_lock_bh(&ht->lock); |
| 298 | for (i = 0; i < ht->cfg.size; i++) { | 297 | for (i = 0; i < ht->cfg.size; i++) { |
| 299 | struct dsthash_ent *dh; | 298 | struct dsthash_ent *dh; |
| 300 | struct hlist_node *pos, *n; | 299 | struct hlist_node *n; |
| 301 | hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) { | 300 | hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { |
| 302 | if ((*select)(ht, dh)) | 301 | if ((*select)(ht, dh)) |
| 303 | dsthash_free(ht, dh); | 302 | dsthash_free(ht, dh); |
| 304 | } | 303 | } |
| @@ -343,9 +342,8 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net, | |||
| 343 | { | 342 | { |
| 344 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); | 343 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); |
| 345 | struct xt_hashlimit_htable *hinfo; | 344 | struct xt_hashlimit_htable *hinfo; |
| 346 | struct hlist_node *pos; | ||
| 347 | 345 | ||
| 348 | hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) { | 346 | hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) { |
| 349 | if (!strcmp(name, hinfo->pde->name) && | 347 | if (!strcmp(name, hinfo->pde->name) && |
| 350 | hinfo->family == family) { | 348 | hinfo->family == family) { |
| 351 | hinfo->use++; | 349 | hinfo->use++; |
| @@ -821,10 +819,9 @@ static int dl_seq_show(struct seq_file *s, void *v) | |||
| 821 | struct xt_hashlimit_htable *htable = s->private; | 819 | struct xt_hashlimit_htable *htable = s->private; |
| 822 | unsigned int *bucket = (unsigned int *)v; | 820 | unsigned int *bucket = (unsigned int *)v; |
| 823 | struct dsthash_ent *ent; | 821 | struct dsthash_ent *ent; |
| 824 | struct hlist_node *pos; | ||
| 825 | 822 | ||
| 826 | if (!hlist_empty(&htable->hash[*bucket])) { | 823 | if (!hlist_empty(&htable->hash[*bucket])) { |
| 827 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) | 824 | hlist_for_each_entry(ent, &htable->hash[*bucket], node) |
| 828 | if (dl_seq_real_show(ent, htable->family, s)) | 825 | if (dl_seq_real_show(ent, htable->family, s)) |
| 829 | return -1; | 826 | return -1; |
| 830 | } | 827 | } |
| @@ -877,7 +874,6 @@ static int __net_init hashlimit_proc_net_init(struct net *net) | |||
| 877 | static void __net_exit hashlimit_proc_net_exit(struct net *net) | 874 | static void __net_exit hashlimit_proc_net_exit(struct net *net) |
| 878 | { | 875 | { |
| 879 | struct xt_hashlimit_htable *hinfo; | 876 | struct xt_hashlimit_htable *hinfo; |
| 880 | struct hlist_node *pos; | ||
| 881 | struct proc_dir_entry *pde; | 877 | struct proc_dir_entry *pde; |
| 882 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); | 878 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); |
| 883 | 879 | ||
| @@ -890,7 +886,7 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net) | |||
| 890 | if (pde == NULL) | 886 | if (pde == NULL) |
| 891 | pde = hashlimit_net->ip6t_hashlimit; | 887 | pde = hashlimit_net->ip6t_hashlimit; |
| 892 | 888 | ||
| 893 | hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) | 889 | hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) |
| 894 | remove_proc_entry(hinfo->pde->name, pde); | 890 | remove_proc_entry(hinfo->pde->name, pde); |
| 895 | 891 | ||
| 896 | hashlimit_net->ipt_hashlimit = NULL; | 892 | hashlimit_net->ipt_hashlimit = NULL; |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 31bf233dae97..d9cad315229d 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
| @@ -540,7 +540,7 @@ static ssize_t | |||
| 540 | recent_mt_proc_write(struct file *file, const char __user *input, | 540 | recent_mt_proc_write(struct file *file, const char __user *input, |
| 541 | size_t size, loff_t *loff) | 541 | size_t size, loff_t *loff) |
| 542 | { | 542 | { |
| 543 | const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); | 543 | const struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 544 | struct recent_table *t = pde->data; | 544 | struct recent_table *t = pde->data; |
| 545 | struct recent_entry *e; | 545 | struct recent_entry *e; |
| 546 | char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; | 546 | char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 3d55e0c713e2..1e3fd5bfcd86 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -248,11 +248,10 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) | |||
| 248 | struct nl_portid_hash *hash = &nl_table[protocol].hash; | 248 | struct nl_portid_hash *hash = &nl_table[protocol].hash; |
| 249 | struct hlist_head *head; | 249 | struct hlist_head *head; |
| 250 | struct sock *sk; | 250 | struct sock *sk; |
| 251 | struct hlist_node *node; | ||
| 252 | 251 | ||
| 253 | read_lock(&nl_table_lock); | 252 | read_lock(&nl_table_lock); |
| 254 | head = nl_portid_hashfn(hash, portid); | 253 | head = nl_portid_hashfn(hash, portid); |
| 255 | sk_for_each(sk, node, head) { | 254 | sk_for_each(sk, head) { |
| 256 | if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { | 255 | if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { |
| 257 | sock_hold(sk); | 256 | sock_hold(sk); |
| 258 | goto found; | 257 | goto found; |
| @@ -312,9 +311,9 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow) | |||
| 312 | 311 | ||
| 313 | for (i = 0; i <= omask; i++) { | 312 | for (i = 0; i <= omask; i++) { |
| 314 | struct sock *sk; | 313 | struct sock *sk; |
| 315 | struct hlist_node *node, *tmp; | 314 | struct hlist_node *tmp; |
| 316 | 315 | ||
| 317 | sk_for_each_safe(sk, node, tmp, &otable[i]) | 316 | sk_for_each_safe(sk, tmp, &otable[i]) |
| 318 | __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid)); | 317 | __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid)); |
| 319 | } | 318 | } |
| 320 | 319 | ||
| @@ -344,7 +343,6 @@ static void | |||
| 344 | netlink_update_listeners(struct sock *sk) | 343 | netlink_update_listeners(struct sock *sk) |
| 345 | { | 344 | { |
| 346 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | 345 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; |
| 347 | struct hlist_node *node; | ||
| 348 | unsigned long mask; | 346 | unsigned long mask; |
| 349 | unsigned int i; | 347 | unsigned int i; |
| 350 | struct listeners *listeners; | 348 | struct listeners *listeners; |
| @@ -355,7 +353,7 @@ netlink_update_listeners(struct sock *sk) | |||
| 355 | 353 | ||
| 356 | for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { | 354 | for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { |
| 357 | mask = 0; | 355 | mask = 0; |
| 358 | sk_for_each_bound(sk, node, &tbl->mc_list) { | 356 | sk_for_each_bound(sk, &tbl->mc_list) { |
| 359 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) | 357 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) |
| 360 | mask |= nlk_sk(sk)->groups[i]; | 358 | mask |= nlk_sk(sk)->groups[i]; |
| 361 | } | 359 | } |
| @@ -371,18 +369,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) | |||
| 371 | struct hlist_head *head; | 369 | struct hlist_head *head; |
| 372 | int err = -EADDRINUSE; | 370 | int err = -EADDRINUSE; |
| 373 | struct sock *osk; | 371 | struct sock *osk; |
| 374 | struct hlist_node *node; | ||
| 375 | int len; | 372 | int len; |
| 376 | 373 | ||
| 377 | netlink_table_grab(); | 374 | netlink_table_grab(); |
| 378 | head = nl_portid_hashfn(hash, portid); | 375 | head = nl_portid_hashfn(hash, portid); |
| 379 | len = 0; | 376 | len = 0; |
| 380 | sk_for_each(osk, node, head) { | 377 | sk_for_each(osk, head) { |
| 381 | if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) | 378 | if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) |
| 382 | break; | 379 | break; |
| 383 | len++; | 380 | len++; |
| 384 | } | 381 | } |
| 385 | if (node) | 382 | if (osk) |
| 386 | goto err; | 383 | goto err; |
| 387 | 384 | ||
| 388 | err = -EBUSY; | 385 | err = -EBUSY; |
| @@ -575,7 +572,6 @@ static int netlink_autobind(struct socket *sock) | |||
| 575 | struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; | 572 | struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; |
| 576 | struct hlist_head *head; | 573 | struct hlist_head *head; |
| 577 | struct sock *osk; | 574 | struct sock *osk; |
| 578 | struct hlist_node *node; | ||
| 579 | s32 portid = task_tgid_vnr(current); | 575 | s32 portid = task_tgid_vnr(current); |
| 580 | int err; | 576 | int err; |
| 581 | static s32 rover = -4097; | 577 | static s32 rover = -4097; |
| @@ -584,7 +580,7 @@ retry: | |||
| 584 | cond_resched(); | 580 | cond_resched(); |
| 585 | netlink_table_grab(); | 581 | netlink_table_grab(); |
| 586 | head = nl_portid_hashfn(hash, portid); | 582 | head = nl_portid_hashfn(hash, portid); |
| 587 | sk_for_each(osk, node, head) { | 583 | sk_for_each(osk, head) { |
| 588 | if (!net_eq(sock_net(osk), net)) | 584 | if (!net_eq(sock_net(osk), net)) |
| 589 | continue; | 585 | continue; |
| 590 | if (nlk_sk(osk)->portid == portid) { | 586 | if (nlk_sk(osk)->portid == portid) { |
| @@ -809,7 +805,7 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) | |||
| 809 | 805 | ||
| 810 | struct sock *netlink_getsockbyfilp(struct file *filp) | 806 | struct sock *netlink_getsockbyfilp(struct file *filp) |
| 811 | { | 807 | { |
| 812 | struct inode *inode = filp->f_path.dentry->d_inode; | 808 | struct inode *inode = file_inode(filp); |
| 813 | struct sock *sock; | 809 | struct sock *sock; |
| 814 | 810 | ||
| 815 | if (!S_ISSOCK(inode->i_mode)) | 811 | if (!S_ISSOCK(inode->i_mode)) |
| @@ -1101,7 +1097,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid | |||
| 1101 | { | 1097 | { |
| 1102 | struct net *net = sock_net(ssk); | 1098 | struct net *net = sock_net(ssk); |
| 1103 | struct netlink_broadcast_data info; | 1099 | struct netlink_broadcast_data info; |
| 1104 | struct hlist_node *node; | ||
| 1105 | struct sock *sk; | 1100 | struct sock *sk; |
| 1106 | 1101 | ||
| 1107 | skb = netlink_trim(skb, allocation); | 1102 | skb = netlink_trim(skb, allocation); |
| @@ -1124,7 +1119,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid | |||
| 1124 | 1119 | ||
| 1125 | netlink_lock_table(); | 1120 | netlink_lock_table(); |
| 1126 | 1121 | ||
| 1127 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 1122 | sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) |
| 1128 | do_one_broadcast(sk, &info); | 1123 | do_one_broadcast(sk, &info); |
| 1129 | 1124 | ||
| 1130 | consume_skb(skb); | 1125 | consume_skb(skb); |
| @@ -1200,7 +1195,6 @@ out: | |||
| 1200 | int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) | 1195 | int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) |
| 1201 | { | 1196 | { |
| 1202 | struct netlink_set_err_data info; | 1197 | struct netlink_set_err_data info; |
| 1203 | struct hlist_node *node; | ||
| 1204 | struct sock *sk; | 1198 | struct sock *sk; |
| 1205 | int ret = 0; | 1199 | int ret = 0; |
| 1206 | 1200 | ||
| @@ -1212,7 +1206,7 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) | |||
| 1212 | 1206 | ||
| 1213 | read_lock(&nl_table_lock); | 1207 | read_lock(&nl_table_lock); |
| 1214 | 1208 | ||
| 1215 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 1209 | sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) |
| 1216 | ret += do_one_set_err(sk, &info); | 1210 | ret += do_one_set_err(sk, &info); |
| 1217 | 1211 | ||
| 1218 | read_unlock(&nl_table_lock); | 1212 | read_unlock(&nl_table_lock); |
| @@ -1676,10 +1670,9 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
| 1676 | void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | 1670 | void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) |
| 1677 | { | 1671 | { |
| 1678 | struct sock *sk; | 1672 | struct sock *sk; |
| 1679 | struct hlist_node *node; | ||
| 1680 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; | 1673 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; |
| 1681 | 1674 | ||
| 1682 | sk_for_each_bound(sk, node, &tbl->mc_list) | 1675 | sk_for_each_bound(sk, &tbl->mc_list) |
| 1683 | netlink_update_socket_mc(nlk_sk(sk), group, 0); | 1676 | netlink_update_socket_mc(nlk_sk(sk), group, 0); |
| 1684 | } | 1677 | } |
| 1685 | 1678 | ||
| @@ -1974,14 +1967,13 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | |||
| 1974 | struct nl_seq_iter *iter = seq->private; | 1967 | struct nl_seq_iter *iter = seq->private; |
| 1975 | int i, j; | 1968 | int i, j; |
| 1976 | struct sock *s; | 1969 | struct sock *s; |
| 1977 | struct hlist_node *node; | ||
| 1978 | loff_t off = 0; | 1970 | loff_t off = 0; |
| 1979 | 1971 | ||
| 1980 | for (i = 0; i < MAX_LINKS; i++) { | 1972 | for (i = 0; i < MAX_LINKS; i++) { |
| 1981 | struct nl_portid_hash *hash = &nl_table[i].hash; | 1973 | struct nl_portid_hash *hash = &nl_table[i].hash; |
| 1982 | 1974 | ||
| 1983 | for (j = 0; j <= hash->mask; j++) { | 1975 | for (j = 0; j <= hash->mask; j++) { |
| 1984 | sk_for_each(s, node, &hash->table[j]) { | 1976 | sk_for_each(s, &hash->table[j]) { |
| 1985 | if (sock_net(s) != seq_file_net(seq)) | 1977 | if (sock_net(s) != seq_file_net(seq)) |
| 1986 | continue; | 1978 | continue; |
| 1987 | if (off == pos) { | 1979 | if (off == pos) { |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 297b07a029de..d1fa1d9ffd2e 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -104,10 +104,9 @@ static void nr_remove_socket(struct sock *sk) | |||
| 104 | static void nr_kill_by_device(struct net_device *dev) | 104 | static void nr_kill_by_device(struct net_device *dev) |
| 105 | { | 105 | { |
| 106 | struct sock *s; | 106 | struct sock *s; |
| 107 | struct hlist_node *node; | ||
| 108 | 107 | ||
| 109 | spin_lock_bh(&nr_list_lock); | 108 | spin_lock_bh(&nr_list_lock); |
| 110 | sk_for_each(s, node, &nr_list) | 109 | sk_for_each(s, &nr_list) |
| 111 | if (nr_sk(s)->device == dev) | 110 | if (nr_sk(s)->device == dev) |
| 112 | nr_disconnect(s, ENETUNREACH); | 111 | nr_disconnect(s, ENETUNREACH); |
| 113 | spin_unlock_bh(&nr_list_lock); | 112 | spin_unlock_bh(&nr_list_lock); |
| @@ -149,10 +148,9 @@ static void nr_insert_socket(struct sock *sk) | |||
| 149 | static struct sock *nr_find_listener(ax25_address *addr) | 148 | static struct sock *nr_find_listener(ax25_address *addr) |
| 150 | { | 149 | { |
| 151 | struct sock *s; | 150 | struct sock *s; |
| 152 | struct hlist_node *node; | ||
| 153 | 151 | ||
| 154 | spin_lock_bh(&nr_list_lock); | 152 | spin_lock_bh(&nr_list_lock); |
| 155 | sk_for_each(s, node, &nr_list) | 153 | sk_for_each(s, &nr_list) |
| 156 | if (!ax25cmp(&nr_sk(s)->source_addr, addr) && | 154 | if (!ax25cmp(&nr_sk(s)->source_addr, addr) && |
| 157 | s->sk_state == TCP_LISTEN) { | 155 | s->sk_state == TCP_LISTEN) { |
| 158 | bh_lock_sock(s); | 156 | bh_lock_sock(s); |
| @@ -170,10 +168,9 @@ found: | |||
| 170 | static struct sock *nr_find_socket(unsigned char index, unsigned char id) | 168 | static struct sock *nr_find_socket(unsigned char index, unsigned char id) |
| 171 | { | 169 | { |
| 172 | struct sock *s; | 170 | struct sock *s; |
| 173 | struct hlist_node *node; | ||
| 174 | 171 | ||
| 175 | spin_lock_bh(&nr_list_lock); | 172 | spin_lock_bh(&nr_list_lock); |
| 176 | sk_for_each(s, node, &nr_list) { | 173 | sk_for_each(s, &nr_list) { |
| 177 | struct nr_sock *nr = nr_sk(s); | 174 | struct nr_sock *nr = nr_sk(s); |
| 178 | 175 | ||
| 179 | if (nr->my_index == index && nr->my_id == id) { | 176 | if (nr->my_index == index && nr->my_id == id) { |
| @@ -194,10 +191,9 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id, | |||
| 194 | ax25_address *dest) | 191 | ax25_address *dest) |
| 195 | { | 192 | { |
| 196 | struct sock *s; | 193 | struct sock *s; |
| 197 | struct hlist_node *node; | ||
| 198 | 194 | ||
| 199 | spin_lock_bh(&nr_list_lock); | 195 | spin_lock_bh(&nr_list_lock); |
| 200 | sk_for_each(s, node, &nr_list) { | 196 | sk_for_each(s, &nr_list) { |
| 201 | struct nr_sock *nr = nr_sk(s); | 197 | struct nr_sock *nr = nr_sk(s); |
| 202 | 198 | ||
| 203 | if (nr->your_index == index && nr->your_id == id && | 199 | if (nr->your_index == index && nr->your_id == id && |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 70ffff76a967..b976d5eff2de 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
| @@ -49,10 +49,9 @@ static struct nr_node *nr_node_get(ax25_address *callsign) | |||
| 49 | { | 49 | { |
| 50 | struct nr_node *found = NULL; | 50 | struct nr_node *found = NULL; |
| 51 | struct nr_node *nr_node; | 51 | struct nr_node *nr_node; |
| 52 | struct hlist_node *node; | ||
| 53 | 52 | ||
| 54 | spin_lock_bh(&nr_node_list_lock); | 53 | spin_lock_bh(&nr_node_list_lock); |
| 55 | nr_node_for_each(nr_node, node, &nr_node_list) | 54 | nr_node_for_each(nr_node, &nr_node_list) |
| 56 | if (ax25cmp(callsign, &nr_node->callsign) == 0) { | 55 | if (ax25cmp(callsign, &nr_node->callsign) == 0) { |
| 57 | nr_node_hold(nr_node); | 56 | nr_node_hold(nr_node); |
| 58 | found = nr_node; | 57 | found = nr_node; |
| @@ -67,10 +66,9 @@ static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, | |||
| 67 | { | 66 | { |
| 68 | struct nr_neigh *found = NULL; | 67 | struct nr_neigh *found = NULL; |
| 69 | struct nr_neigh *nr_neigh; | 68 | struct nr_neigh *nr_neigh; |
| 70 | struct hlist_node *node; | ||
| 71 | 69 | ||
| 72 | spin_lock_bh(&nr_neigh_list_lock); | 70 | spin_lock_bh(&nr_neigh_list_lock); |
| 73 | nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) | 71 | nr_neigh_for_each(nr_neigh, &nr_neigh_list) |
| 74 | if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && | 72 | if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && |
| 75 | nr_neigh->dev == dev) { | 73 | nr_neigh->dev == dev) { |
| 76 | nr_neigh_hold(nr_neigh); | 74 | nr_neigh_hold(nr_neigh); |
| @@ -114,10 +112,9 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, | |||
| 114 | */ | 112 | */ |
| 115 | if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { | 113 | if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { |
| 116 | struct nr_node *nr_nodet; | 114 | struct nr_node *nr_nodet; |
| 117 | struct hlist_node *node; | ||
| 118 | 115 | ||
| 119 | spin_lock_bh(&nr_node_list_lock); | 116 | spin_lock_bh(&nr_node_list_lock); |
| 120 | nr_node_for_each(nr_nodet, node, &nr_node_list) { | 117 | nr_node_for_each(nr_nodet, &nr_node_list) { |
| 121 | nr_node_lock(nr_nodet); | 118 | nr_node_lock(nr_nodet); |
| 122 | for (i = 0; i < nr_nodet->count; i++) | 119 | for (i = 0; i < nr_nodet->count; i++) |
| 123 | if (nr_nodet->routes[i].neighbour == nr_neigh) | 120 | if (nr_nodet->routes[i].neighbour == nr_neigh) |
| @@ -485,11 +482,11 @@ static int nr_dec_obs(void) | |||
| 485 | { | 482 | { |
| 486 | struct nr_neigh *nr_neigh; | 483 | struct nr_neigh *nr_neigh; |
| 487 | struct nr_node *s; | 484 | struct nr_node *s; |
| 488 | struct hlist_node *node, *nodet; | 485 | struct hlist_node *nodet; |
| 489 | int i; | 486 | int i; |
| 490 | 487 | ||
| 491 | spin_lock_bh(&nr_node_list_lock); | 488 | spin_lock_bh(&nr_node_list_lock); |
| 492 | nr_node_for_each_safe(s, node, nodet, &nr_node_list) { | 489 | nr_node_for_each_safe(s, nodet, &nr_node_list) { |
| 493 | nr_node_lock(s); | 490 | nr_node_lock(s); |
| 494 | for (i = 0; i < s->count; i++) { | 491 | for (i = 0; i < s->count; i++) { |
| 495 | switch (s->routes[i].obs_count) { | 492 | switch (s->routes[i].obs_count) { |
| @@ -540,15 +537,15 @@ static int nr_dec_obs(void) | |||
| 540 | void nr_rt_device_down(struct net_device *dev) | 537 | void nr_rt_device_down(struct net_device *dev) |
| 541 | { | 538 | { |
| 542 | struct nr_neigh *s; | 539 | struct nr_neigh *s; |
| 543 | struct hlist_node *node, *nodet, *node2, *node2t; | 540 | struct hlist_node *nodet, *node2t; |
| 544 | struct nr_node *t; | 541 | struct nr_node *t; |
| 545 | int i; | 542 | int i; |
| 546 | 543 | ||
| 547 | spin_lock_bh(&nr_neigh_list_lock); | 544 | spin_lock_bh(&nr_neigh_list_lock); |
| 548 | nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { | 545 | nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { |
| 549 | if (s->dev == dev) { | 546 | if (s->dev == dev) { |
| 550 | spin_lock_bh(&nr_node_list_lock); | 547 | spin_lock_bh(&nr_node_list_lock); |
| 551 | nr_node_for_each_safe(t, node2, node2t, &nr_node_list) { | 548 | nr_node_for_each_safe(t, node2t, &nr_node_list) { |
| 552 | nr_node_lock(t); | 549 | nr_node_lock(t); |
| 553 | for (i = 0; i < t->count; i++) { | 550 | for (i = 0; i < t->count; i++) { |
| 554 | if (t->routes[i].neighbour == s) { | 551 | if (t->routes[i].neighbour == s) { |
| @@ -737,11 +734,10 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg) | |||
| 737 | void nr_link_failed(ax25_cb *ax25, int reason) | 734 | void nr_link_failed(ax25_cb *ax25, int reason) |
| 738 | { | 735 | { |
| 739 | struct nr_neigh *s, *nr_neigh = NULL; | 736 | struct nr_neigh *s, *nr_neigh = NULL; |
| 740 | struct hlist_node *node; | ||
| 741 | struct nr_node *nr_node = NULL; | 737 | struct nr_node *nr_node = NULL; |
| 742 | 738 | ||
| 743 | spin_lock_bh(&nr_neigh_list_lock); | 739 | spin_lock_bh(&nr_neigh_list_lock); |
| 744 | nr_neigh_for_each(s, node, &nr_neigh_list) { | 740 | nr_neigh_for_each(s, &nr_neigh_list) { |
| 745 | if (s->ax25 == ax25) { | 741 | if (s->ax25 == ax25) { |
| 746 | nr_neigh_hold(s); | 742 | nr_neigh_hold(s); |
| 747 | nr_neigh = s; | 743 | nr_neigh = s; |
| @@ -761,7 +757,7 @@ void nr_link_failed(ax25_cb *ax25, int reason) | |||
| 761 | return; | 757 | return; |
| 762 | } | 758 | } |
| 763 | spin_lock_bh(&nr_node_list_lock); | 759 | spin_lock_bh(&nr_node_list_lock); |
| 764 | nr_node_for_each(nr_node, node, &nr_node_list) { | 760 | nr_node_for_each(nr_node, &nr_node_list) { |
| 765 | nr_node_lock(nr_node); | 761 | nr_node_lock(nr_node); |
| 766 | if (nr_node->which < nr_node->count && | 762 | if (nr_node->which < nr_node->count && |
| 767 | nr_node->routes[nr_node->which].neighbour == nr_neigh) | 763 | nr_node->routes[nr_node->which].neighbour == nr_neigh) |
| @@ -1013,16 +1009,16 @@ void __exit nr_rt_free(void) | |||
| 1013 | { | 1009 | { |
| 1014 | struct nr_neigh *s = NULL; | 1010 | struct nr_neigh *s = NULL; |
| 1015 | struct nr_node *t = NULL; | 1011 | struct nr_node *t = NULL; |
| 1016 | struct hlist_node *node, *nodet; | 1012 | struct hlist_node *nodet; |
| 1017 | 1013 | ||
| 1018 | spin_lock_bh(&nr_neigh_list_lock); | 1014 | spin_lock_bh(&nr_neigh_list_lock); |
| 1019 | spin_lock_bh(&nr_node_list_lock); | 1015 | spin_lock_bh(&nr_node_list_lock); |
| 1020 | nr_node_for_each_safe(t, node, nodet, &nr_node_list) { | 1016 | nr_node_for_each_safe(t, nodet, &nr_node_list) { |
| 1021 | nr_node_lock(t); | 1017 | nr_node_lock(t); |
| 1022 | nr_remove_node_locked(t); | 1018 | nr_remove_node_locked(t); |
| 1023 | nr_node_unlock(t); | 1019 | nr_node_unlock(t); |
| 1024 | } | 1020 | } |
| 1025 | nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { | 1021 | nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { |
| 1026 | while(s->count) { | 1022 | while(s->count) { |
| 1027 | s->count--; | 1023 | s->count--; |
| 1028 | nr_neigh_put(s); | 1024 | nr_neigh_put(s); |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 746f5a2f9804..7f8266dd14cb 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
| @@ -71,14 +71,14 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) | |||
| 71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | 71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) |
| 72 | { | 72 | { |
| 73 | struct sock *sk; | 73 | struct sock *sk; |
| 74 | struct hlist_node *node, *tmp; | 74 | struct hlist_node *tmp; |
| 75 | struct nfc_llcp_sock *llcp_sock; | 75 | struct nfc_llcp_sock *llcp_sock; |
| 76 | 76 | ||
| 77 | skb_queue_purge(&local->tx_queue); | 77 | skb_queue_purge(&local->tx_queue); |
| 78 | 78 | ||
| 79 | write_lock(&local->sockets.lock); | 79 | write_lock(&local->sockets.lock); |
| 80 | 80 | ||
| 81 | sk_for_each_safe(sk, node, tmp, &local->sockets.head) { | 81 | sk_for_each_safe(sk, tmp, &local->sockets.head) { |
| 82 | llcp_sock = nfc_llcp_sock(sk); | 82 | llcp_sock = nfc_llcp_sock(sk); |
| 83 | 83 | ||
| 84 | bh_lock_sock(sk); | 84 | bh_lock_sock(sk); |
| @@ -171,7 +171,6 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, | |||
| 171 | u8 ssap, u8 dsap) | 171 | u8 ssap, u8 dsap) |
| 172 | { | 172 | { |
| 173 | struct sock *sk; | 173 | struct sock *sk; |
| 174 | struct hlist_node *node; | ||
| 175 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; | 174 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; |
| 176 | 175 | ||
| 177 | pr_debug("ssap dsap %d %d\n", ssap, dsap); | 176 | pr_debug("ssap dsap %d %d\n", ssap, dsap); |
| @@ -183,7 +182,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, | |||
| 183 | 182 | ||
| 184 | llcp_sock = NULL; | 183 | llcp_sock = NULL; |
| 185 | 184 | ||
| 186 | sk_for_each(sk, node, &local->sockets.head) { | 185 | sk_for_each(sk, &local->sockets.head) { |
| 187 | tmp_sock = nfc_llcp_sock(sk); | 186 | tmp_sock = nfc_llcp_sock(sk); |
| 188 | 187 | ||
| 189 | if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { | 188 | if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { |
| @@ -272,7 +271,6 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, | |||
| 272 | u8 *sn, size_t sn_len) | 271 | u8 *sn, size_t sn_len) |
| 273 | { | 272 | { |
| 274 | struct sock *sk; | 273 | struct sock *sk; |
| 275 | struct hlist_node *node; | ||
| 276 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; | 274 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; |
| 277 | 275 | ||
| 278 | pr_debug("sn %zd %p\n", sn_len, sn); | 276 | pr_debug("sn %zd %p\n", sn_len, sn); |
| @@ -284,7 +282,7 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, | |||
| 284 | 282 | ||
| 285 | llcp_sock = NULL; | 283 | llcp_sock = NULL; |
| 286 | 284 | ||
| 287 | sk_for_each(sk, node, &local->sockets.head) { | 285 | sk_for_each(sk, &local->sockets.head) { |
| 288 | tmp_sock = nfc_llcp_sock(sk); | 286 | tmp_sock = nfc_llcp_sock(sk); |
| 289 | 287 | ||
| 290 | pr_debug("llcp sock %p\n", tmp_sock); | 288 | pr_debug("llcp sock %p\n", tmp_sock); |
| @@ -601,14 +599,13 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) | |||
| 601 | void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, | 599 | void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, |
| 602 | struct sk_buff *skb, u8 direction) | 600 | struct sk_buff *skb, u8 direction) |
| 603 | { | 601 | { |
| 604 | struct hlist_node *node; | ||
| 605 | struct sk_buff *skb_copy = NULL, *nskb; | 602 | struct sk_buff *skb_copy = NULL, *nskb; |
| 606 | struct sock *sk; | 603 | struct sock *sk; |
| 607 | u8 *data; | 604 | u8 *data; |
| 608 | 605 | ||
| 609 | read_lock(&local->raw_sockets.lock); | 606 | read_lock(&local->raw_sockets.lock); |
| 610 | 607 | ||
| 611 | sk_for_each(sk, node, &local->raw_sockets.head) { | 608 | sk_for_each(sk, &local->raw_sockets.head) { |
| 612 | if (sk->sk_state != LLCP_BOUND) | 609 | if (sk->sk_state != LLCP_BOUND) |
| 613 | continue; | 610 | continue; |
| 614 | 611 | ||
| @@ -697,11 +694,10 @@ static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local | |||
| 697 | { | 694 | { |
| 698 | struct sock *sk; | 695 | struct sock *sk; |
| 699 | struct nfc_llcp_sock *llcp_sock; | 696 | struct nfc_llcp_sock *llcp_sock; |
| 700 | struct hlist_node *node; | ||
| 701 | 697 | ||
| 702 | read_lock(&local->connecting_sockets.lock); | 698 | read_lock(&local->connecting_sockets.lock); |
| 703 | 699 | ||
| 704 | sk_for_each(sk, node, &local->connecting_sockets.head) { | 700 | sk_for_each(sk, &local->connecting_sockets.head) { |
| 705 | llcp_sock = nfc_llcp_sock(sk); | 701 | llcp_sock = nfc_llcp_sock(sk); |
| 706 | 702 | ||
| 707 | if (llcp_sock->ssap == ssap) { | 703 | if (llcp_sock->ssap == ssap) { |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 9dc537df46c4..e87a26506dba 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -158,11 +158,10 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp, | |||
| 158 | struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) | 158 | struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) |
| 159 | { | 159 | { |
| 160 | struct vport *vport; | 160 | struct vport *vport; |
| 161 | struct hlist_node *n; | ||
| 162 | struct hlist_head *head; | 161 | struct hlist_head *head; |
| 163 | 162 | ||
| 164 | head = vport_hash_bucket(dp, port_no); | 163 | head = vport_hash_bucket(dp, port_no); |
| 165 | hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) { | 164 | hlist_for_each_entry_rcu(vport, head, dp_hash_node) { |
| 166 | if (vport->port_no == port_no) | 165 | if (vport->port_no == port_no) |
| 167 | return vport; | 166 | return vport; |
| 168 | } | 167 | } |
| @@ -1386,9 +1385,9 @@ static void __dp_destroy(struct datapath *dp) | |||
| 1386 | 1385 | ||
| 1387 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { | 1386 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { |
| 1388 | struct vport *vport; | 1387 | struct vport *vport; |
| 1389 | struct hlist_node *node, *n; | 1388 | struct hlist_node *n; |
| 1390 | 1389 | ||
| 1391 | hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node) | 1390 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) |
| 1392 | if (vport->port_no != OVSP_LOCAL) | 1391 | if (vport->port_no != OVSP_LOCAL) |
| 1393 | ovs_dp_detach_port(vport); | 1392 | ovs_dp_detach_port(vport); |
| 1394 | } | 1393 | } |
| @@ -1825,10 +1824,9 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1825 | rcu_read_lock(); | 1824 | rcu_read_lock(); |
| 1826 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { | 1825 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { |
| 1827 | struct vport *vport; | 1826 | struct vport *vport; |
| 1828 | struct hlist_node *n; | ||
| 1829 | 1827 | ||
| 1830 | j = 0; | 1828 | j = 0; |
| 1831 | hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) { | 1829 | hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { |
| 1832 | if (j >= skip && | 1830 | if (j >= skip && |
| 1833 | ovs_vport_cmd_fill_info(vport, skb, | 1831 | ovs_vport_cmd_fill_info(vport, skb, |
| 1834 | NETLINK_CB(cb->skb).portid, | 1832 | NETLINK_CB(cb->skb).portid, |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index c3294cebc4f2..20605ecf100b 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -299,10 +299,10 @@ void ovs_flow_tbl_destroy(struct flow_table *table) | |||
| 299 | for (i = 0; i < table->n_buckets; i++) { | 299 | for (i = 0; i < table->n_buckets; i++) { |
| 300 | struct sw_flow *flow; | 300 | struct sw_flow *flow; |
| 301 | struct hlist_head *head = flex_array_get(table->buckets, i); | 301 | struct hlist_head *head = flex_array_get(table->buckets, i); |
| 302 | struct hlist_node *node, *n; | 302 | struct hlist_node *n; |
| 303 | int ver = table->node_ver; | 303 | int ver = table->node_ver; |
| 304 | 304 | ||
| 305 | hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { | 305 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { |
| 306 | hlist_del_rcu(&flow->hash_node[ver]); | 306 | hlist_del_rcu(&flow->hash_node[ver]); |
| 307 | ovs_flow_free(flow); | 307 | ovs_flow_free(flow); |
| 308 | } | 308 | } |
| @@ -332,7 +332,6 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la | |||
| 332 | { | 332 | { |
| 333 | struct sw_flow *flow; | 333 | struct sw_flow *flow; |
| 334 | struct hlist_head *head; | 334 | struct hlist_head *head; |
| 335 | struct hlist_node *n; | ||
| 336 | int ver; | 335 | int ver; |
| 337 | int i; | 336 | int i; |
| 338 | 337 | ||
| @@ -340,7 +339,7 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la | |||
| 340 | while (*bucket < table->n_buckets) { | 339 | while (*bucket < table->n_buckets) { |
| 341 | i = 0; | 340 | i = 0; |
| 342 | head = flex_array_get(table->buckets, *bucket); | 341 | head = flex_array_get(table->buckets, *bucket); |
| 343 | hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { | 342 | hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { |
| 344 | if (i < *last) { | 343 | if (i < *last) { |
| 345 | i++; | 344 | i++; |
| 346 | continue; | 345 | continue; |
| @@ -367,11 +366,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new | |||
| 367 | for (i = 0; i < old->n_buckets; i++) { | 366 | for (i = 0; i < old->n_buckets; i++) { |
| 368 | struct sw_flow *flow; | 367 | struct sw_flow *flow; |
| 369 | struct hlist_head *head; | 368 | struct hlist_head *head; |
| 370 | struct hlist_node *n; | ||
| 371 | 369 | ||
| 372 | head = flex_array_get(old->buckets, i); | 370 | head = flex_array_get(old->buckets, i); |
| 373 | 371 | ||
| 374 | hlist_for_each_entry(flow, n, head, hash_node[old_ver]) | 372 | hlist_for_each_entry(flow, head, hash_node[old_ver]) |
| 375 | ovs_flow_tbl_insert(new, flow); | 373 | ovs_flow_tbl_insert(new, flow); |
| 376 | } | 374 | } |
| 377 | old->keep_flows = true; | 375 | old->keep_flows = true; |
| @@ -766,14 +764,13 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, | |||
| 766 | struct sw_flow_key *key, int key_len) | 764 | struct sw_flow_key *key, int key_len) |
| 767 | { | 765 | { |
| 768 | struct sw_flow *flow; | 766 | struct sw_flow *flow; |
| 769 | struct hlist_node *n; | ||
| 770 | struct hlist_head *head; | 767 | struct hlist_head *head; |
| 771 | u32 hash; | 768 | u32 hash; |
| 772 | 769 | ||
| 773 | hash = ovs_flow_hash(key, key_len); | 770 | hash = ovs_flow_hash(key, key_len); |
| 774 | 771 | ||
| 775 | head = find_bucket(table, hash); | 772 | head = find_bucket(table, hash); |
| 776 | hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { | 773 | hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { |
| 777 | 774 | ||
| 778 | if (flow->hash == hash && | 775 | if (flow->hash == hash && |
| 779 | !memcmp(&flow->key, key, key_len)) { | 776 | !memcmp(&flow->key, key, key_len)) { |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 70af0bedbac4..ba717cc038b3 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
| @@ -86,9 +86,8 @@ struct vport *ovs_vport_locate(struct net *net, const char *name) | |||
| 86 | { | 86 | { |
| 87 | struct hlist_head *bucket = hash_bucket(net, name); | 87 | struct hlist_head *bucket = hash_bucket(net, name); |
| 88 | struct vport *vport; | 88 | struct vport *vport; |
| 89 | struct hlist_node *node; | ||
| 90 | 89 | ||
| 91 | hlist_for_each_entry_rcu(vport, node, bucket, hash_node) | 90 | hlist_for_each_entry_rcu(vport, bucket, hash_node) |
| 92 | if (!strcmp(name, vport->ops->get_name(vport)) && | 91 | if (!strcmp(name, vport->ops->get_name(vport)) && |
| 93 | net_eq(ovs_dp_get_net(vport->dp), net)) | 92 | net_eq(ovs_dp_get_net(vport->dp), net)) |
| 94 | return vport; | 93 | return vport; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c7bfeff10767..1d6793dbfbae 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -3263,12 +3263,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
| 3263 | static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) | 3263 | static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) |
| 3264 | { | 3264 | { |
| 3265 | struct sock *sk; | 3265 | struct sock *sk; |
| 3266 | struct hlist_node *node; | ||
| 3267 | struct net_device *dev = data; | 3266 | struct net_device *dev = data; |
| 3268 | struct net *net = dev_net(dev); | 3267 | struct net *net = dev_net(dev); |
| 3269 | 3268 | ||
| 3270 | rcu_read_lock(); | 3269 | rcu_read_lock(); |
| 3271 | sk_for_each_rcu(sk, node, &net->packet.sklist) { | 3270 | sk_for_each_rcu(sk, &net->packet.sklist) { |
| 3272 | struct packet_sock *po = pkt_sk(sk); | 3271 | struct packet_sock *po = pkt_sk(sk); |
| 3273 | 3272 | ||
| 3274 | switch (msg) { | 3273 | switch (msg) { |
diff --git a/net/packet/diag.c b/net/packet/diag.c index 8db6e21c46bd..d3fcd1ebef7e 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c | |||
| @@ -172,13 +172,12 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 172 | struct packet_diag_req *req; | 172 | struct packet_diag_req *req; |
| 173 | struct net *net; | 173 | struct net *net; |
| 174 | struct sock *sk; | 174 | struct sock *sk; |
| 175 | struct hlist_node *node; | ||
| 176 | 175 | ||
| 177 | net = sock_net(skb->sk); | 176 | net = sock_net(skb->sk); |
| 178 | req = nlmsg_data(cb->nlh); | 177 | req = nlmsg_data(cb->nlh); |
| 179 | 178 | ||
| 180 | mutex_lock(&net->packet.sklist_lock); | 179 | mutex_lock(&net->packet.sklist_lock); |
| 181 | sk_for_each(sk, node, &net->packet.sklist) { | 180 | sk_for_each(sk, &net->packet.sklist) { |
| 182 | if (!net_eq(sock_net(sk), net)) | 181 | if (!net_eq(sock_net(sk), net)) |
| 183 | continue; | 182 | continue; |
| 184 | if (num < s_num) | 183 | if (num < s_num) |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 576f22c9c76e..e77411735de8 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
| @@ -640,11 +640,10 @@ static struct sock *pep_find_pipe(const struct hlist_head *hlist, | |||
| 640 | const struct sockaddr_pn *dst, | 640 | const struct sockaddr_pn *dst, |
| 641 | u8 pipe_handle) | 641 | u8 pipe_handle) |
| 642 | { | 642 | { |
| 643 | struct hlist_node *node; | ||
| 644 | struct sock *sknode; | 643 | struct sock *sknode; |
| 645 | u16 dobj = pn_sockaddr_get_object(dst); | 644 | u16 dobj = pn_sockaddr_get_object(dst); |
| 646 | 645 | ||
| 647 | sk_for_each(sknode, node, hlist) { | 646 | sk_for_each(sknode, hlist) { |
| 648 | struct pep_sock *pnnode = pep_sk(sknode); | 647 | struct pep_sock *pnnode = pep_sk(sknode); |
| 649 | 648 | ||
| 650 | /* Ports match, but addresses might not: */ | 649 | /* Ports match, but addresses might not: */ |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index b7e982782255..1afd1381cdc7 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
| @@ -76,7 +76,6 @@ static struct hlist_head *pn_hash_list(u16 obj) | |||
| 76 | */ | 76 | */ |
| 77 | struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | 77 | struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) |
| 78 | { | 78 | { |
| 79 | struct hlist_node *node; | ||
| 80 | struct sock *sknode; | 79 | struct sock *sknode; |
| 81 | struct sock *rval = NULL; | 80 | struct sock *rval = NULL; |
| 82 | u16 obj = pn_sockaddr_get_object(spn); | 81 | u16 obj = pn_sockaddr_get_object(spn); |
| @@ -84,7 +83,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | |||
| 84 | struct hlist_head *hlist = pn_hash_list(obj); | 83 | struct hlist_head *hlist = pn_hash_list(obj); |
| 85 | 84 | ||
| 86 | rcu_read_lock(); | 85 | rcu_read_lock(); |
| 87 | sk_for_each_rcu(sknode, node, hlist) { | 86 | sk_for_each_rcu(sknode, hlist) { |
| 88 | struct pn_sock *pn = pn_sk(sknode); | 87 | struct pn_sock *pn = pn_sk(sknode); |
| 89 | BUG_ON(!pn->sobject); /* unbound socket */ | 88 | BUG_ON(!pn->sobject); /* unbound socket */ |
| 90 | 89 | ||
| @@ -120,10 +119,9 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) | |||
| 120 | 119 | ||
| 121 | rcu_read_lock(); | 120 | rcu_read_lock(); |
| 122 | for (h = 0; h < PN_HASHSIZE; h++) { | 121 | for (h = 0; h < PN_HASHSIZE; h++) { |
| 123 | struct hlist_node *node; | ||
| 124 | struct sock *sknode; | 122 | struct sock *sknode; |
| 125 | 123 | ||
| 126 | sk_for_each(sknode, node, hlist) { | 124 | sk_for_each(sknode, hlist) { |
| 127 | struct sk_buff *clone; | 125 | struct sk_buff *clone; |
| 128 | 126 | ||
| 129 | if (!net_eq(sock_net(sknode), net)) | 127 | if (!net_eq(sock_net(sknode), net)) |
| @@ -543,12 +541,11 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) | |||
| 543 | { | 541 | { |
| 544 | struct net *net = seq_file_net(seq); | 542 | struct net *net = seq_file_net(seq); |
| 545 | struct hlist_head *hlist = pnsocks.hlist; | 543 | struct hlist_head *hlist = pnsocks.hlist; |
| 546 | struct hlist_node *node; | ||
| 547 | struct sock *sknode; | 544 | struct sock *sknode; |
| 548 | unsigned int h; | 545 | unsigned int h; |
| 549 | 546 | ||
| 550 | for (h = 0; h < PN_HASHSIZE; h++) { | 547 | for (h = 0; h < PN_HASHSIZE; h++) { |
| 551 | sk_for_each_rcu(sknode, node, hlist) { | 548 | sk_for_each_rcu(sknode, hlist) { |
| 552 | if (!net_eq(net, sock_net(sknode))) | 549 | if (!net_eq(net, sock_net(sknode))) |
| 553 | continue; | 550 | continue; |
| 554 | if (!pos) | 551 | if (!pos) |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 637bde56c9db..b5ad65a0067e 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
| @@ -52,13 +52,12 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port, | |||
| 52 | struct rds_sock *insert) | 52 | struct rds_sock *insert) |
| 53 | { | 53 | { |
| 54 | struct rds_sock *rs; | 54 | struct rds_sock *rs; |
| 55 | struct hlist_node *node; | ||
| 56 | struct hlist_head *head = hash_to_bucket(addr, port); | 55 | struct hlist_head *head = hash_to_bucket(addr, port); |
| 57 | u64 cmp; | 56 | u64 cmp; |
| 58 | u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); | 57 | u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); |
| 59 | 58 | ||
| 60 | rcu_read_lock(); | 59 | rcu_read_lock(); |
| 61 | hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) { | 60 | hlist_for_each_entry_rcu(rs, head, rs_bound_node) { |
| 62 | cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | | 61 | cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | |
| 63 | be16_to_cpu(rs->rs_bound_port); | 62 | be16_to_cpu(rs->rs_bound_port); |
| 64 | 63 | ||
diff --git a/net/rds/connection.c b/net/rds/connection.c index 9e07c756d1f9..642ad42c416b 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
| @@ -69,9 +69,8 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head, | |||
| 69 | struct rds_transport *trans) | 69 | struct rds_transport *trans) |
| 70 | { | 70 | { |
| 71 | struct rds_connection *conn, *ret = NULL; | 71 | struct rds_connection *conn, *ret = NULL; |
| 72 | struct hlist_node *pos; | ||
| 73 | 72 | ||
| 74 | hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 73 | hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
| 75 | if (conn->c_faddr == faddr && conn->c_laddr == laddr && | 74 | if (conn->c_faddr == faddr && conn->c_laddr == laddr && |
| 76 | conn->c_trans == trans) { | 75 | conn->c_trans == trans) { |
| 77 | ret = conn; | 76 | ret = conn; |
| @@ -376,7 +375,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, | |||
| 376 | int want_send) | 375 | int want_send) |
| 377 | { | 376 | { |
| 378 | struct hlist_head *head; | 377 | struct hlist_head *head; |
| 379 | struct hlist_node *pos; | ||
| 380 | struct list_head *list; | 378 | struct list_head *list; |
| 381 | struct rds_connection *conn; | 379 | struct rds_connection *conn; |
| 382 | struct rds_message *rm; | 380 | struct rds_message *rm; |
| @@ -390,7 +388,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, | |||
| 390 | 388 | ||
| 391 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); | 389 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); |
| 392 | i++, head++) { | 390 | i++, head++) { |
| 393 | hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 391 | hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
| 394 | if (want_send) | 392 | if (want_send) |
| 395 | list = &conn->c_send_queue; | 393 | list = &conn->c_send_queue; |
| 396 | else | 394 | else |
| @@ -439,7 +437,6 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len, | |||
| 439 | { | 437 | { |
| 440 | uint64_t buffer[(item_len + 7) / 8]; | 438 | uint64_t buffer[(item_len + 7) / 8]; |
| 441 | struct hlist_head *head; | 439 | struct hlist_head *head; |
| 442 | struct hlist_node *pos; | ||
| 443 | struct rds_connection *conn; | 440 | struct rds_connection *conn; |
| 444 | size_t i; | 441 | size_t i; |
| 445 | 442 | ||
| @@ -450,7 +447,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len, | |||
| 450 | 447 | ||
| 451 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); | 448 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); |
| 452 | i++, head++) { | 449 | i++, head++) { |
| 453 | hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 450 | hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
| 454 | 451 | ||
| 455 | /* XXX no c_lock usage.. */ | 452 | /* XXX no c_lock usage.. */ |
| 456 | if (!visitor(conn, buffer)) | 453 | if (!visitor(conn, buffer)) |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index b768fe9d5e7a..cf68e6e4054a 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -165,10 +165,9 @@ static void rose_remove_socket(struct sock *sk) | |||
| 165 | void rose_kill_by_neigh(struct rose_neigh *neigh) | 165 | void rose_kill_by_neigh(struct rose_neigh *neigh) |
| 166 | { | 166 | { |
| 167 | struct sock *s; | 167 | struct sock *s; |
| 168 | struct hlist_node *node; | ||
| 169 | 168 | ||
| 170 | spin_lock_bh(&rose_list_lock); | 169 | spin_lock_bh(&rose_list_lock); |
| 171 | sk_for_each(s, node, &rose_list) { | 170 | sk_for_each(s, &rose_list) { |
| 172 | struct rose_sock *rose = rose_sk(s); | 171 | struct rose_sock *rose = rose_sk(s); |
| 173 | 172 | ||
| 174 | if (rose->neighbour == neigh) { | 173 | if (rose->neighbour == neigh) { |
| @@ -186,10 +185,9 @@ void rose_kill_by_neigh(struct rose_neigh *neigh) | |||
| 186 | static void rose_kill_by_device(struct net_device *dev) | 185 | static void rose_kill_by_device(struct net_device *dev) |
| 187 | { | 186 | { |
| 188 | struct sock *s; | 187 | struct sock *s; |
| 189 | struct hlist_node *node; | ||
| 190 | 188 | ||
| 191 | spin_lock_bh(&rose_list_lock); | 189 | spin_lock_bh(&rose_list_lock); |
| 192 | sk_for_each(s, node, &rose_list) { | 190 | sk_for_each(s, &rose_list) { |
| 193 | struct rose_sock *rose = rose_sk(s); | 191 | struct rose_sock *rose = rose_sk(s); |
| 194 | 192 | ||
| 195 | if (rose->device == dev) { | 193 | if (rose->device == dev) { |
| @@ -246,10 +244,9 @@ static void rose_insert_socket(struct sock *sk) | |||
| 246 | static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) | 244 | static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) |
| 247 | { | 245 | { |
| 248 | struct sock *s; | 246 | struct sock *s; |
| 249 | struct hlist_node *node; | ||
| 250 | 247 | ||
| 251 | spin_lock_bh(&rose_list_lock); | 248 | spin_lock_bh(&rose_list_lock); |
| 252 | sk_for_each(s, node, &rose_list) { | 249 | sk_for_each(s, &rose_list) { |
| 253 | struct rose_sock *rose = rose_sk(s); | 250 | struct rose_sock *rose = rose_sk(s); |
| 254 | 251 | ||
| 255 | if (!rosecmp(&rose->source_addr, addr) && | 252 | if (!rosecmp(&rose->source_addr, addr) && |
| @@ -258,7 +255,7 @@ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) | |||
| 258 | goto found; | 255 | goto found; |
| 259 | } | 256 | } |
| 260 | 257 | ||
| 261 | sk_for_each(s, node, &rose_list) { | 258 | sk_for_each(s, &rose_list) { |
| 262 | struct rose_sock *rose = rose_sk(s); | 259 | struct rose_sock *rose = rose_sk(s); |
| 263 | 260 | ||
| 264 | if (!rosecmp(&rose->source_addr, addr) && | 261 | if (!rosecmp(&rose->source_addr, addr) && |
| @@ -278,10 +275,9 @@ found: | |||
| 278 | struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) | 275 | struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) |
| 279 | { | 276 | { |
| 280 | struct sock *s; | 277 | struct sock *s; |
| 281 | struct hlist_node *node; | ||
| 282 | 278 | ||
| 283 | spin_lock_bh(&rose_list_lock); | 279 | spin_lock_bh(&rose_list_lock); |
| 284 | sk_for_each(s, node, &rose_list) { | 280 | sk_for_each(s, &rose_list) { |
| 285 | struct rose_sock *rose = rose_sk(s); | 281 | struct rose_sock *rose = rose_sk(s); |
| 286 | 282 | ||
| 287 | if (rose->lci == lci && rose->neighbour == neigh) | 283 | if (rose->lci == lci && rose->neighbour == neigh) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index a181b484812a..c297e2a8e2a1 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -545,7 +545,7 @@ static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n) | |||
| 545 | void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) | 545 | void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) |
| 546 | { | 546 | { |
| 547 | struct Qdisc_class_common *cl; | 547 | struct Qdisc_class_common *cl; |
| 548 | struct hlist_node *n, *next; | 548 | struct hlist_node *next; |
| 549 | struct hlist_head *nhash, *ohash; | 549 | struct hlist_head *nhash, *ohash; |
| 550 | unsigned int nsize, nmask, osize; | 550 | unsigned int nsize, nmask, osize; |
| 551 | unsigned int i, h; | 551 | unsigned int i, h; |
| @@ -564,7 +564,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) | |||
| 564 | 564 | ||
| 565 | sch_tree_lock(sch); | 565 | sch_tree_lock(sch); |
| 566 | for (i = 0; i < osize; i++) { | 566 | for (i = 0; i < osize; i++) { |
| 567 | hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) { | 567 | hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) { |
| 568 | h = qdisc_class_hash(cl->classid, nmask); | 568 | h = qdisc_class_hash(cl->classid, nmask); |
| 569 | hlist_add_head(&cl->hnode, &nhash[h]); | 569 | hlist_add_head(&cl->hnode, &nhash[h]); |
| 570 | } | 570 | } |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 0e19948470b8..13aa47aa2ffb 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
| @@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this) | |||
| 1041 | static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | 1041 | static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) |
| 1042 | { | 1042 | { |
| 1043 | struct cbq_class *cl; | 1043 | struct cbq_class *cl; |
| 1044 | struct hlist_node *n; | ||
| 1045 | unsigned int h; | 1044 | unsigned int h; |
| 1046 | 1045 | ||
| 1047 | if (q->quanta[prio] == 0) | 1046 | if (q->quanta[prio] == 0) |
| 1048 | return; | 1047 | return; |
| 1049 | 1048 | ||
| 1050 | for (h = 0; h < q->clhash.hashsize; h++) { | 1049 | for (h = 0; h < q->clhash.hashsize; h++) { |
| 1051 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 1050 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
| 1052 | /* BUGGGG... Beware! This expression suffer of | 1051 | /* BUGGGG... Beware! This expression suffer of |
| 1053 | * arithmetic overflows! | 1052 | * arithmetic overflows! |
| 1054 | */ | 1053 | */ |
| @@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl) | |||
| 1087 | continue; | 1086 | continue; |
| 1088 | 1087 | ||
| 1089 | for (h = 0; h < q->clhash.hashsize; h++) { | 1088 | for (h = 0; h < q->clhash.hashsize; h++) { |
| 1090 | struct hlist_node *n; | ||
| 1091 | struct cbq_class *c; | 1089 | struct cbq_class *c; |
| 1092 | 1090 | ||
| 1093 | hlist_for_each_entry(c, n, &q->clhash.hash[h], | 1091 | hlist_for_each_entry(c, &q->clhash.hash[h], |
| 1094 | common.hnode) { | 1092 | common.hnode) { |
| 1095 | if (c->split == split && c->level < level && | 1093 | if (c->split == split && c->level < level && |
| 1096 | c->defmap & (1<<i)) { | 1094 | c->defmap & (1<<i)) { |
| @@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch) | |||
| 1210 | { | 1208 | { |
| 1211 | struct cbq_sched_data *q = qdisc_priv(sch); | 1209 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 1212 | struct cbq_class *cl; | 1210 | struct cbq_class *cl; |
| 1213 | struct hlist_node *n; | ||
| 1214 | int prio; | 1211 | int prio; |
| 1215 | unsigned int h; | 1212 | unsigned int h; |
| 1216 | 1213 | ||
| @@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch) | |||
| 1228 | q->active[prio] = NULL; | 1225 | q->active[prio] = NULL; |
| 1229 | 1226 | ||
| 1230 | for (h = 0; h < q->clhash.hashsize; h++) { | 1227 | for (h = 0; h < q->clhash.hashsize; h++) { |
| 1231 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 1228 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
| 1232 | qdisc_reset(cl->q); | 1229 | qdisc_reset(cl->q); |
| 1233 | 1230 | ||
| 1234 | cl->next_alive = NULL; | 1231 | cl->next_alive = NULL; |
| @@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) | |||
| 1697 | static void cbq_destroy(struct Qdisc *sch) | 1694 | static void cbq_destroy(struct Qdisc *sch) |
| 1698 | { | 1695 | { |
| 1699 | struct cbq_sched_data *q = qdisc_priv(sch); | 1696 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 1700 | struct hlist_node *n, *next; | 1697 | struct hlist_node *next; |
| 1701 | struct cbq_class *cl; | 1698 | struct cbq_class *cl; |
| 1702 | unsigned int h; | 1699 | unsigned int h; |
| 1703 | 1700 | ||
| @@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch) | |||
| 1710 | * be bound to classes which have been destroyed already. --TGR '04 | 1707 | * be bound to classes which have been destroyed already. --TGR '04 |
| 1711 | */ | 1708 | */ |
| 1712 | for (h = 0; h < q->clhash.hashsize; h++) { | 1709 | for (h = 0; h < q->clhash.hashsize; h++) { |
| 1713 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) | 1710 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) |
| 1714 | tcf_destroy_chain(&cl->filter_list); | 1711 | tcf_destroy_chain(&cl->filter_list); |
| 1715 | } | 1712 | } |
| 1716 | for (h = 0; h < q->clhash.hashsize; h++) { | 1713 | for (h = 0; h < q->clhash.hashsize; h++) { |
| 1717 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], | 1714 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], |
| 1718 | common.hnode) | 1715 | common.hnode) |
| 1719 | cbq_destroy_class(sch, cl); | 1716 | cbq_destroy_class(sch, cl); |
| 1720 | } | 1717 | } |
| @@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
| 2013 | { | 2010 | { |
| 2014 | struct cbq_sched_data *q = qdisc_priv(sch); | 2011 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 2015 | struct cbq_class *cl; | 2012 | struct cbq_class *cl; |
| 2016 | struct hlist_node *n; | ||
| 2017 | unsigned int h; | 2013 | unsigned int h; |
| 2018 | 2014 | ||
| 2019 | if (arg->stop) | 2015 | if (arg->stop) |
| 2020 | return; | 2016 | return; |
| 2021 | 2017 | ||
| 2022 | for (h = 0; h < q->clhash.hashsize; h++) { | 2018 | for (h = 0; h < q->clhash.hashsize; h++) { |
| 2023 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 2019 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
| 2024 | if (arg->count < arg->skip) { | 2020 | if (arg->count < arg->skip) { |
| 2025 | arg->count++; | 2021 | arg->count++; |
| 2026 | continue; | 2022 | continue; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 71e50c80315f..759b308d1a8d 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
| @@ -293,14 +293,13 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
| 293 | { | 293 | { |
| 294 | struct drr_sched *q = qdisc_priv(sch); | 294 | struct drr_sched *q = qdisc_priv(sch); |
| 295 | struct drr_class *cl; | 295 | struct drr_class *cl; |
| 296 | struct hlist_node *n; | ||
| 297 | unsigned int i; | 296 | unsigned int i; |
| 298 | 297 | ||
| 299 | if (arg->stop) | 298 | if (arg->stop) |
| 300 | return; | 299 | return; |
| 301 | 300 | ||
| 302 | for (i = 0; i < q->clhash.hashsize; i++) { | 301 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 303 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 302 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
| 304 | if (arg->count < arg->skip) { | 303 | if (arg->count < arg->skip) { |
| 305 | arg->count++; | 304 | arg->count++; |
| 306 | continue; | 305 | continue; |
| @@ -451,11 +450,10 @@ static void drr_reset_qdisc(struct Qdisc *sch) | |||
| 451 | { | 450 | { |
| 452 | struct drr_sched *q = qdisc_priv(sch); | 451 | struct drr_sched *q = qdisc_priv(sch); |
| 453 | struct drr_class *cl; | 452 | struct drr_class *cl; |
| 454 | struct hlist_node *n; | ||
| 455 | unsigned int i; | 453 | unsigned int i; |
| 456 | 454 | ||
| 457 | for (i = 0; i < q->clhash.hashsize; i++) { | 455 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 458 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 456 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
| 459 | if (cl->qdisc->q.qlen) | 457 | if (cl->qdisc->q.qlen) |
| 460 | list_del(&cl->alist); | 458 | list_del(&cl->alist); |
| 461 | qdisc_reset(cl->qdisc); | 459 | qdisc_reset(cl->qdisc); |
| @@ -468,13 +466,13 @@ static void drr_destroy_qdisc(struct Qdisc *sch) | |||
| 468 | { | 466 | { |
| 469 | struct drr_sched *q = qdisc_priv(sch); | 467 | struct drr_sched *q = qdisc_priv(sch); |
| 470 | struct drr_class *cl; | 468 | struct drr_class *cl; |
| 471 | struct hlist_node *n, *next; | 469 | struct hlist_node *next; |
| 472 | unsigned int i; | 470 | unsigned int i; |
| 473 | 471 | ||
| 474 | tcf_destroy_chain(&q->filter_list); | 472 | tcf_destroy_chain(&q->filter_list); |
| 475 | 473 | ||
| 476 | for (i = 0; i < q->clhash.hashsize; i++) { | 474 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 477 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 475 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
| 478 | common.hnode) | 476 | common.hnode) |
| 479 | drr_destroy_class(sch, cl); | 477 | drr_destroy_class(sch, cl); |
| 480 | } | 478 | } |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6c2ec4510540..9facea03faeb 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -1389,7 +1389,6 @@ static void | |||
| 1389 | hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 1389 | hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
| 1390 | { | 1390 | { |
| 1391 | struct hfsc_sched *q = qdisc_priv(sch); | 1391 | struct hfsc_sched *q = qdisc_priv(sch); |
| 1392 | struct hlist_node *n; | ||
| 1393 | struct hfsc_class *cl; | 1392 | struct hfsc_class *cl; |
| 1394 | unsigned int i; | 1393 | unsigned int i; |
| 1395 | 1394 | ||
| @@ -1397,7 +1396,7 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
| 1397 | return; | 1396 | return; |
| 1398 | 1397 | ||
| 1399 | for (i = 0; i < q->clhash.hashsize; i++) { | 1398 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1400 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], | 1399 | hlist_for_each_entry(cl, &q->clhash.hash[i], |
| 1401 | cl_common.hnode) { | 1400 | cl_common.hnode) { |
| 1402 | if (arg->count < arg->skip) { | 1401 | if (arg->count < arg->skip) { |
| 1403 | arg->count++; | 1402 | arg->count++; |
| @@ -1523,11 +1522,10 @@ hfsc_reset_qdisc(struct Qdisc *sch) | |||
| 1523 | { | 1522 | { |
| 1524 | struct hfsc_sched *q = qdisc_priv(sch); | 1523 | struct hfsc_sched *q = qdisc_priv(sch); |
| 1525 | struct hfsc_class *cl; | 1524 | struct hfsc_class *cl; |
| 1526 | struct hlist_node *n; | ||
| 1527 | unsigned int i; | 1525 | unsigned int i; |
| 1528 | 1526 | ||
| 1529 | for (i = 0; i < q->clhash.hashsize; i++) { | 1527 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1530 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | 1528 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) |
| 1531 | hfsc_reset_class(cl); | 1529 | hfsc_reset_class(cl); |
| 1532 | } | 1530 | } |
| 1533 | q->eligible = RB_ROOT; | 1531 | q->eligible = RB_ROOT; |
| @@ -1540,16 +1538,16 @@ static void | |||
| 1540 | hfsc_destroy_qdisc(struct Qdisc *sch) | 1538 | hfsc_destroy_qdisc(struct Qdisc *sch) |
| 1541 | { | 1539 | { |
| 1542 | struct hfsc_sched *q = qdisc_priv(sch); | 1540 | struct hfsc_sched *q = qdisc_priv(sch); |
| 1543 | struct hlist_node *n, *next; | 1541 | struct hlist_node *next; |
| 1544 | struct hfsc_class *cl; | 1542 | struct hfsc_class *cl; |
| 1545 | unsigned int i; | 1543 | unsigned int i; |
| 1546 | 1544 | ||
| 1547 | for (i = 0; i < q->clhash.hashsize; i++) { | 1545 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1548 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | 1546 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) |
| 1549 | tcf_destroy_chain(&cl->filter_list); | 1547 | tcf_destroy_chain(&cl->filter_list); |
| 1550 | } | 1548 | } |
| 1551 | for (i = 0; i < q->clhash.hashsize; i++) { | 1549 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1552 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 1550 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
| 1553 | cl_common.hnode) | 1551 | cl_common.hnode) |
| 1554 | hfsc_destroy_class(sch, cl); | 1552 | hfsc_destroy_class(sch, cl); |
| 1555 | } | 1553 | } |
| @@ -1564,12 +1562,11 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
| 1564 | unsigned char *b = skb_tail_pointer(skb); | 1562 | unsigned char *b = skb_tail_pointer(skb); |
| 1565 | struct tc_hfsc_qopt qopt; | 1563 | struct tc_hfsc_qopt qopt; |
| 1566 | struct hfsc_class *cl; | 1564 | struct hfsc_class *cl; |
| 1567 | struct hlist_node *n; | ||
| 1568 | unsigned int i; | 1565 | unsigned int i; |
| 1569 | 1566 | ||
| 1570 | sch->qstats.backlog = 0; | 1567 | sch->qstats.backlog = 0; |
| 1571 | for (i = 0; i < q->clhash.hashsize; i++) { | 1568 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1572 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | 1569 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) |
| 1573 | sch->qstats.backlog += cl->qdisc->qstats.backlog; | 1570 | sch->qstats.backlog += cl->qdisc->qstats.backlog; |
| 1574 | } | 1571 | } |
| 1575 | 1572 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 03c2692ca01e..571f1d211f4d 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -949,11 +949,10 @@ static void htb_reset(struct Qdisc *sch) | |||
| 949 | { | 949 | { |
| 950 | struct htb_sched *q = qdisc_priv(sch); | 950 | struct htb_sched *q = qdisc_priv(sch); |
| 951 | struct htb_class *cl; | 951 | struct htb_class *cl; |
| 952 | struct hlist_node *n; | ||
| 953 | unsigned int i; | 952 | unsigned int i; |
| 954 | 953 | ||
| 955 | for (i = 0; i < q->clhash.hashsize; i++) { | 954 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 956 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 955 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
| 957 | if (cl->level) | 956 | if (cl->level) |
| 958 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); | 957 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); |
| 959 | else { | 958 | else { |
| @@ -1218,7 +1217,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
| 1218 | static void htb_destroy(struct Qdisc *sch) | 1217 | static void htb_destroy(struct Qdisc *sch) |
| 1219 | { | 1218 | { |
| 1220 | struct htb_sched *q = qdisc_priv(sch); | 1219 | struct htb_sched *q = qdisc_priv(sch); |
| 1221 | struct hlist_node *n, *next; | 1220 | struct hlist_node *next; |
| 1222 | struct htb_class *cl; | 1221 | struct htb_class *cl; |
| 1223 | unsigned int i; | 1222 | unsigned int i; |
| 1224 | 1223 | ||
| @@ -1232,11 +1231,11 @@ static void htb_destroy(struct Qdisc *sch) | |||
| 1232 | tcf_destroy_chain(&q->filter_list); | 1231 | tcf_destroy_chain(&q->filter_list); |
| 1233 | 1232 | ||
| 1234 | for (i = 0; i < q->clhash.hashsize; i++) { | 1233 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1235 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) | 1234 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) |
| 1236 | tcf_destroy_chain(&cl->filter_list); | 1235 | tcf_destroy_chain(&cl->filter_list); |
| 1237 | } | 1236 | } |
| 1238 | for (i = 0; i < q->clhash.hashsize; i++) { | 1237 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1239 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 1238 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
| 1240 | common.hnode) | 1239 | common.hnode) |
| 1241 | htb_destroy_class(sch, cl); | 1240 | htb_destroy_class(sch, cl); |
| 1242 | } | 1241 | } |
| @@ -1516,14 +1515,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
| 1516 | { | 1515 | { |
| 1517 | struct htb_sched *q = qdisc_priv(sch); | 1516 | struct htb_sched *q = qdisc_priv(sch); |
| 1518 | struct htb_class *cl; | 1517 | struct htb_class *cl; |
| 1519 | struct hlist_node *n; | ||
| 1520 | unsigned int i; | 1518 | unsigned int i; |
| 1521 | 1519 | ||
| 1522 | if (arg->stop) | 1520 | if (arg->stop) |
| 1523 | return; | 1521 | return; |
| 1524 | 1522 | ||
| 1525 | for (i = 0; i < q->clhash.hashsize; i++) { | 1523 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1526 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 1524 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
| 1527 | if (arg->count < arg->skip) { | 1525 | if (arg->count < arg->skip) { |
| 1528 | arg->count++; | 1526 | arg->count++; |
| 1529 | continue; | 1527 | continue; |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 6ed37652a4c3..e9a77f621c3d 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -276,9 +276,8 @@ static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, | |||
| 276 | u32 lmax, u32 weight) | 276 | u32 lmax, u32 weight) |
| 277 | { | 277 | { |
| 278 | struct qfq_aggregate *agg; | 278 | struct qfq_aggregate *agg; |
| 279 | struct hlist_node *n; | ||
| 280 | 279 | ||
| 281 | hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next) | 280 | hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) |
| 282 | if (agg->lmax == lmax && agg->class_weight == weight) | 281 | if (agg->lmax == lmax && agg->class_weight == weight) |
| 283 | return agg; | 282 | return agg; |
| 284 | 283 | ||
| @@ -670,14 +669,13 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
| 670 | { | 669 | { |
| 671 | struct qfq_sched *q = qdisc_priv(sch); | 670 | struct qfq_sched *q = qdisc_priv(sch); |
| 672 | struct qfq_class *cl; | 671 | struct qfq_class *cl; |
| 673 | struct hlist_node *n; | ||
| 674 | unsigned int i; | 672 | unsigned int i; |
| 675 | 673 | ||
| 676 | if (arg->stop) | 674 | if (arg->stop) |
| 677 | return; | 675 | return; |
| 678 | 676 | ||
| 679 | for (i = 0; i < q->clhash.hashsize; i++) { | 677 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 680 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 678 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
| 681 | if (arg->count < arg->skip) { | 679 | if (arg->count < arg->skip) { |
| 682 | arg->count++; | 680 | arg->count++; |
| 683 | continue; | 681 | continue; |
| @@ -1376,11 +1374,10 @@ static unsigned int qfq_drop_from_slot(struct qfq_sched *q, | |||
| 1376 | struct hlist_head *slot) | 1374 | struct hlist_head *slot) |
| 1377 | { | 1375 | { |
| 1378 | struct qfq_aggregate *agg; | 1376 | struct qfq_aggregate *agg; |
| 1379 | struct hlist_node *n; | ||
| 1380 | struct qfq_class *cl; | 1377 | struct qfq_class *cl; |
| 1381 | unsigned int len; | 1378 | unsigned int len; |
| 1382 | 1379 | ||
| 1383 | hlist_for_each_entry(agg, n, slot, next) { | 1380 | hlist_for_each_entry(agg, slot, next) { |
| 1384 | list_for_each_entry(cl, &agg->active, alist) { | 1381 | list_for_each_entry(cl, &agg->active, alist) { |
| 1385 | 1382 | ||
| 1386 | if (!cl->qdisc->ops->drop) | 1383 | if (!cl->qdisc->ops->drop) |
| @@ -1459,11 +1456,10 @@ static void qfq_reset_qdisc(struct Qdisc *sch) | |||
| 1459 | { | 1456 | { |
| 1460 | struct qfq_sched *q = qdisc_priv(sch); | 1457 | struct qfq_sched *q = qdisc_priv(sch); |
| 1461 | struct qfq_class *cl; | 1458 | struct qfq_class *cl; |
| 1462 | struct hlist_node *n; | ||
| 1463 | unsigned int i; | 1459 | unsigned int i; |
| 1464 | 1460 | ||
| 1465 | for (i = 0; i < q->clhash.hashsize; i++) { | 1461 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1466 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 1462 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
| 1467 | if (cl->qdisc->q.qlen > 0) | 1463 | if (cl->qdisc->q.qlen > 0) |
| 1468 | qfq_deactivate_class(q, cl); | 1464 | qfq_deactivate_class(q, cl); |
| 1469 | 1465 | ||
| @@ -1477,13 +1473,13 @@ static void qfq_destroy_qdisc(struct Qdisc *sch) | |||
| 1477 | { | 1473 | { |
| 1478 | struct qfq_sched *q = qdisc_priv(sch); | 1474 | struct qfq_sched *q = qdisc_priv(sch); |
| 1479 | struct qfq_class *cl; | 1475 | struct qfq_class *cl; |
| 1480 | struct hlist_node *n, *next; | 1476 | struct hlist_node *next; |
| 1481 | unsigned int i; | 1477 | unsigned int i; |
| 1482 | 1478 | ||
| 1483 | tcf_destroy_chain(&q->filter_list); | 1479 | tcf_destroy_chain(&q->filter_list); |
| 1484 | 1480 | ||
| 1485 | for (i = 0; i < q->clhash.hashsize; i++) { | 1481 | for (i = 0; i < q->clhash.hashsize; i++) { |
| 1486 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 1482 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
| 1487 | common.hnode) { | 1483 | common.hnode) { |
| 1488 | qfq_destroy_class(sch, cl); | 1484 | qfq_destroy_class(sch, cl); |
| 1489 | } | 1485 | } |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 2f95f5a5145d..43cd0dd9149d 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -1591,32 +1591,31 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc, | |||
| 1591 | /* Set an association id for a given association */ | 1591 | /* Set an association id for a given association */ |
| 1592 | int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) | 1592 | int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) |
| 1593 | { | 1593 | { |
| 1594 | int assoc_id; | 1594 | bool preload = gfp & __GFP_WAIT; |
| 1595 | int error = 0; | 1595 | int ret; |
| 1596 | 1596 | ||
| 1597 | /* If the id is already assigned, keep it. */ | 1597 | /* If the id is already assigned, keep it. */ |
| 1598 | if (asoc->assoc_id) | 1598 | if (asoc->assoc_id) |
| 1599 | return error; | 1599 | return 0; |
| 1600 | retry: | ||
| 1601 | if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) | ||
| 1602 | return -ENOMEM; | ||
| 1603 | 1600 | ||
| 1601 | if (preload) | ||
| 1602 | idr_preload(gfp); | ||
| 1604 | spin_lock_bh(&sctp_assocs_id_lock); | 1603 | spin_lock_bh(&sctp_assocs_id_lock); |
| 1605 | error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, | 1604 | /* 0 is not a valid id, idr_low is always >= 1 */ |
| 1606 | idr_low, &assoc_id); | 1605 | ret = idr_alloc(&sctp_assocs_id, asoc, idr_low, 0, GFP_NOWAIT); |
| 1607 | if (!error) { | 1606 | if (ret >= 0) { |
| 1608 | idr_low = assoc_id + 1; | 1607 | idr_low = ret + 1; |
| 1609 | if (idr_low == INT_MAX) | 1608 | if (idr_low == INT_MAX) |
| 1610 | idr_low = 1; | 1609 | idr_low = 1; |
| 1611 | } | 1610 | } |
| 1612 | spin_unlock_bh(&sctp_assocs_id_lock); | 1611 | spin_unlock_bh(&sctp_assocs_id_lock); |
| 1613 | if (error == -EAGAIN) | 1612 | if (preload) |
| 1614 | goto retry; | 1613 | idr_preload_end(); |
| 1615 | else if (error) | 1614 | if (ret < 0) |
| 1616 | return error; | 1615 | return ret; |
| 1617 | 1616 | ||
| 1618 | asoc->assoc_id = (sctp_assoc_t) assoc_id; | 1617 | asoc->assoc_id = (sctp_assoc_t)ret; |
| 1619 | return error; | 1618 | return 0; |
| 1620 | } | 1619 | } |
| 1621 | 1620 | ||
| 1622 | /* Free the ASCONF queue */ | 1621 | /* Free the ASCONF queue */ |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 73aad3d16a45..2b3ef03c6098 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
| @@ -332,7 +332,6 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( | |||
| 332 | struct sctp_transport *t = NULL; | 332 | struct sctp_transport *t = NULL; |
| 333 | struct sctp_hashbucket *head; | 333 | struct sctp_hashbucket *head; |
| 334 | struct sctp_ep_common *epb; | 334 | struct sctp_ep_common *epb; |
| 335 | struct hlist_node *node; | ||
| 336 | int hash; | 335 | int hash; |
| 337 | int rport; | 336 | int rport; |
| 338 | 337 | ||
| @@ -350,7 +349,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( | |||
| 350 | rport); | 349 | rport); |
| 351 | head = &sctp_assoc_hashtable[hash]; | 350 | head = &sctp_assoc_hashtable[hash]; |
| 352 | read_lock(&head->lock); | 351 | read_lock(&head->lock); |
| 353 | sctp_for_each_hentry(epb, node, &head->chain) { | 352 | sctp_for_each_hentry(epb, &head->chain) { |
| 354 | tmp = sctp_assoc(epb); | 353 | tmp = sctp_assoc(epb); |
| 355 | if (tmp->ep != ep || rport != tmp->peer.port) | 354 | if (tmp->ep != ep || rport != tmp->peer.port) |
| 356 | continue; | 355 | continue; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 965bbbbe48d4..4b2c83146aa7 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
| @@ -784,13 +784,12 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net, | |||
| 784 | struct sctp_hashbucket *head; | 784 | struct sctp_hashbucket *head; |
| 785 | struct sctp_ep_common *epb; | 785 | struct sctp_ep_common *epb; |
| 786 | struct sctp_endpoint *ep; | 786 | struct sctp_endpoint *ep; |
| 787 | struct hlist_node *node; | ||
| 788 | int hash; | 787 | int hash; |
| 789 | 788 | ||
| 790 | hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port)); | 789 | hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port)); |
| 791 | head = &sctp_ep_hashtable[hash]; | 790 | head = &sctp_ep_hashtable[hash]; |
| 792 | read_lock(&head->lock); | 791 | read_lock(&head->lock); |
| 793 | sctp_for_each_hentry(epb, node, &head->chain) { | 792 | sctp_for_each_hentry(epb, &head->chain) { |
| 794 | ep = sctp_ep(epb); | 793 | ep = sctp_ep(epb); |
| 795 | if (sctp_endpoint_is_match(ep, net, laddr)) | 794 | if (sctp_endpoint_is_match(ep, net, laddr)) |
| 796 | goto hit; | 795 | goto hit; |
| @@ -876,7 +875,6 @@ static struct sctp_association *__sctp_lookup_association( | |||
| 876 | struct sctp_ep_common *epb; | 875 | struct sctp_ep_common *epb; |
| 877 | struct sctp_association *asoc; | 876 | struct sctp_association *asoc; |
| 878 | struct sctp_transport *transport; | 877 | struct sctp_transport *transport; |
| 879 | struct hlist_node *node; | ||
| 880 | int hash; | 878 | int hash; |
| 881 | 879 | ||
| 882 | /* Optimize here for direct hit, only listening connections can | 880 | /* Optimize here for direct hit, only listening connections can |
| @@ -886,7 +884,7 @@ static struct sctp_association *__sctp_lookup_association( | |||
| 886 | ntohs(peer->v4.sin_port)); | 884 | ntohs(peer->v4.sin_port)); |
| 887 | head = &sctp_assoc_hashtable[hash]; | 885 | head = &sctp_assoc_hashtable[hash]; |
| 888 | read_lock(&head->lock); | 886 | read_lock(&head->lock); |
| 889 | sctp_for_each_hentry(epb, node, &head->chain) { | 887 | sctp_for_each_hentry(epb, &head->chain) { |
| 890 | asoc = sctp_assoc(epb); | 888 | asoc = sctp_assoc(epb); |
| 891 | transport = sctp_assoc_is_match(asoc, net, local, peer); | 889 | transport = sctp_assoc_is_match(asoc, net, local, peer); |
| 892 | if (transport) | 890 | if (transport) |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 8c19e97262ca..ab3bba8cb0a8 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
| @@ -213,7 +213,6 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) | |||
| 213 | struct sctp_ep_common *epb; | 213 | struct sctp_ep_common *epb; |
| 214 | struct sctp_endpoint *ep; | 214 | struct sctp_endpoint *ep; |
| 215 | struct sock *sk; | 215 | struct sock *sk; |
| 216 | struct hlist_node *node; | ||
| 217 | int hash = *(loff_t *)v; | 216 | int hash = *(loff_t *)v; |
| 218 | 217 | ||
| 219 | if (hash >= sctp_ep_hashsize) | 218 | if (hash >= sctp_ep_hashsize) |
| @@ -222,7 +221,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) | |||
| 222 | head = &sctp_ep_hashtable[hash]; | 221 | head = &sctp_ep_hashtable[hash]; |
| 223 | sctp_local_bh_disable(); | 222 | sctp_local_bh_disable(); |
| 224 | read_lock(&head->lock); | 223 | read_lock(&head->lock); |
| 225 | sctp_for_each_hentry(epb, node, &head->chain) { | 224 | sctp_for_each_hentry(epb, &head->chain) { |
| 226 | ep = sctp_ep(epb); | 225 | ep = sctp_ep(epb); |
| 227 | sk = epb->sk; | 226 | sk = epb->sk; |
| 228 | if (!net_eq(sock_net(sk), seq_file_net(seq))) | 227 | if (!net_eq(sock_net(sk), seq_file_net(seq))) |
| @@ -321,7 +320,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
| 321 | struct sctp_ep_common *epb; | 320 | struct sctp_ep_common *epb; |
| 322 | struct sctp_association *assoc; | 321 | struct sctp_association *assoc; |
| 323 | struct sock *sk; | 322 | struct sock *sk; |
| 324 | struct hlist_node *node; | ||
| 325 | int hash = *(loff_t *)v; | 323 | int hash = *(loff_t *)v; |
| 326 | 324 | ||
| 327 | if (hash >= sctp_assoc_hashsize) | 325 | if (hash >= sctp_assoc_hashsize) |
| @@ -330,7 +328,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
| 330 | head = &sctp_assoc_hashtable[hash]; | 328 | head = &sctp_assoc_hashtable[hash]; |
| 331 | sctp_local_bh_disable(); | 329 | sctp_local_bh_disable(); |
| 332 | read_lock(&head->lock); | 330 | read_lock(&head->lock); |
| 333 | sctp_for_each_hentry(epb, node, &head->chain) { | 331 | sctp_for_each_hentry(epb, &head->chain) { |
| 334 | assoc = sctp_assoc(epb); | 332 | assoc = sctp_assoc(epb); |
| 335 | sk = epb->sk; | 333 | sk = epb->sk; |
| 336 | if (!net_eq(sock_net(sk), seq_file_net(seq))) | 334 | if (!net_eq(sock_net(sk), seq_file_net(seq))) |
| @@ -436,7 +434,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
| 436 | struct sctp_hashbucket *head; | 434 | struct sctp_hashbucket *head; |
| 437 | struct sctp_ep_common *epb; | 435 | struct sctp_ep_common *epb; |
| 438 | struct sctp_association *assoc; | 436 | struct sctp_association *assoc; |
| 439 | struct hlist_node *node; | ||
| 440 | struct sctp_transport *tsp; | 437 | struct sctp_transport *tsp; |
| 441 | int hash = *(loff_t *)v; | 438 | int hash = *(loff_t *)v; |
| 442 | 439 | ||
| @@ -447,7 +444,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
| 447 | sctp_local_bh_disable(); | 444 | sctp_local_bh_disable(); |
| 448 | read_lock(&head->lock); | 445 | read_lock(&head->lock); |
| 449 | rcu_read_lock(); | 446 | rcu_read_lock(); |
| 450 | sctp_for_each_hentry(epb, node, &head->chain) { | 447 | sctp_for_each_hentry(epb, &head->chain) { |
| 451 | if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) | 448 | if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) |
| 452 | continue; | 449 | continue; |
| 453 | assoc = sctp_assoc(epb); | 450 | assoc = sctp_assoc(epb); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index cedd9bf67b8c..c99458df3f3f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -5882,8 +5882,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( | |||
| 5882 | static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | 5882 | static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) |
| 5883 | { | 5883 | { |
| 5884 | struct sctp_bind_hashbucket *head; /* hash list */ | 5884 | struct sctp_bind_hashbucket *head; /* hash list */ |
| 5885 | struct sctp_bind_bucket *pp; /* hash list port iterator */ | 5885 | struct sctp_bind_bucket *pp; |
| 5886 | struct hlist_node *node; | ||
| 5887 | unsigned short snum; | 5886 | unsigned short snum; |
| 5888 | int ret; | 5887 | int ret; |
| 5889 | 5888 | ||
| @@ -5910,7 +5909,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | |||
| 5910 | index = sctp_phashfn(sock_net(sk), rover); | 5909 | index = sctp_phashfn(sock_net(sk), rover); |
| 5911 | head = &sctp_port_hashtable[index]; | 5910 | head = &sctp_port_hashtable[index]; |
| 5912 | sctp_spin_lock(&head->lock); | 5911 | sctp_spin_lock(&head->lock); |
| 5913 | sctp_for_each_hentry(pp, node, &head->chain) | 5912 | sctp_for_each_hentry(pp, &head->chain) |
| 5914 | if ((pp->port == rover) && | 5913 | if ((pp->port == rover) && |
| 5915 | net_eq(sock_net(sk), pp->net)) | 5914 | net_eq(sock_net(sk), pp->net)) |
| 5916 | goto next; | 5915 | goto next; |
| @@ -5938,7 +5937,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | |||
| 5938 | */ | 5937 | */ |
| 5939 | head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; | 5938 | head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; |
| 5940 | sctp_spin_lock(&head->lock); | 5939 | sctp_spin_lock(&head->lock); |
| 5941 | sctp_for_each_hentry(pp, node, &head->chain) { | 5940 | sctp_for_each_hentry(pp, &head->chain) { |
| 5942 | if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) | 5941 | if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) |
| 5943 | goto pp_found; | 5942 | goto pp_found; |
| 5944 | } | 5943 | } |
| @@ -5970,7 +5969,7 @@ pp_found: | |||
| 5970 | * that this port/socket (sk) combination are already | 5969 | * that this port/socket (sk) combination are already |
| 5971 | * in an endpoint. | 5970 | * in an endpoint. |
| 5972 | */ | 5971 | */ |
| 5973 | sk_for_each_bound(sk2, node, &pp->owner) { | 5972 | sk_for_each_bound(sk2, &pp->owner) { |
| 5974 | struct sctp_endpoint *ep2; | 5973 | struct sctp_endpoint *ep2; |
| 5975 | ep2 = sctp_sk(sk2)->ep; | 5974 | ep2 = sctp_sk(sk2)->ep; |
| 5976 | 5975 | ||
diff --git a/net/socket.c b/net/socket.c index ee0d029e5130..88f759adf3af 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -369,16 +369,15 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) | |||
| 369 | 369 | ||
| 370 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, | 370 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, |
| 371 | &socket_file_ops); | 371 | &socket_file_ops); |
| 372 | if (unlikely(!file)) { | 372 | if (unlikely(IS_ERR(file))) { |
| 373 | /* drop dentry, keep inode */ | 373 | /* drop dentry, keep inode */ |
| 374 | ihold(path.dentry->d_inode); | 374 | ihold(path.dentry->d_inode); |
| 375 | path_put(&path); | 375 | path_put(&path); |
| 376 | return ERR_PTR(-ENFILE); | 376 | return file; |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | sock->file = file; | 379 | sock->file = file; |
| 380 | file->f_flags = O_RDWR | (flags & O_NONBLOCK); | 380 | file->f_flags = O_RDWR | (flags & O_NONBLOCK); |
| 381 | file->f_pos = 0; | ||
| 382 | file->private_data = sock; | 381 | file->private_data = sock; |
| 383 | return file; | 382 | return file; |
| 384 | } | 383 | } |
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index d11418f97f1f..a622ad64acd8 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c | |||
| @@ -17,7 +17,8 @@ | |||
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <net/ipv6.h> | 19 | #include <net/ipv6.h> |
| 20 | #include <linux/sunrpc/clnt.h> | 20 | #include <linux/sunrpc/addr.h> |
| 21 | #include <linux/sunrpc/msg_prot.h> | ||
| 21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 22 | #include <linux/export.h> | 23 | #include <linux/export.h> |
| 23 | 24 | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index b5c067bccc45..f5294047df77 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -407,15 +407,14 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
| 407 | { | 407 | { |
| 408 | LIST_HEAD(free); | 408 | LIST_HEAD(free); |
| 409 | struct rpc_cred_cache *cache = auth->au_credcache; | 409 | struct rpc_cred_cache *cache = auth->au_credcache; |
| 410 | struct hlist_node *pos; | ||
| 411 | struct rpc_cred *cred = NULL, | 410 | struct rpc_cred *cred = NULL, |
| 412 | *entry, *new; | 411 | *entry, *new; |
| 413 | unsigned int nr; | 412 | unsigned int nr; |
| 414 | 413 | ||
| 415 | nr = hash_long(acred->uid, cache->hashbits); | 414 | nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits); |
| 416 | 415 | ||
| 417 | rcu_read_lock(); | 416 | rcu_read_lock(); |
| 418 | hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { | 417 | hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) { |
| 419 | if (!entry->cr_ops->crmatch(acred, entry, flags)) | 418 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
| 420 | continue; | 419 | continue; |
| 421 | spin_lock(&cache->lock); | 420 | spin_lock(&cache->lock); |
| @@ -439,7 +438,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
| 439 | } | 438 | } |
| 440 | 439 | ||
| 441 | spin_lock(&cache->lock); | 440 | spin_lock(&cache->lock); |
| 442 | hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { | 441 | hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) { |
| 443 | if (!entry->cr_ops->crmatch(acred, entry, flags)) | 442 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
| 444 | continue; | 443 | continue; |
| 445 | cred = get_rpccred(entry); | 444 | cred = get_rpccred(entry); |
| @@ -519,8 +518,8 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) | |||
| 519 | { | 518 | { |
| 520 | struct rpc_auth *auth = task->tk_client->cl_auth; | 519 | struct rpc_auth *auth = task->tk_client->cl_auth; |
| 521 | struct auth_cred acred = { | 520 | struct auth_cred acred = { |
| 522 | .uid = 0, | 521 | .uid = GLOBAL_ROOT_UID, |
| 523 | .gid = 0, | 522 | .gid = GLOBAL_ROOT_GID, |
| 524 | }; | 523 | }; |
| 525 | 524 | ||
| 526 | dprintk("RPC: %5u looking up %s cred\n", | 525 | dprintk("RPC: %5u looking up %s cred\n", |
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 6ed6f201b022..b6badafc6494 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c | |||
| @@ -18,8 +18,8 @@ | |||
| 18 | # define RPCDBG_FACILITY RPCDBG_AUTH | 18 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
| 21 | #define RPC_MACHINE_CRED_USERID ((uid_t)0) | 21 | #define RPC_MACHINE_CRED_USERID GLOBAL_ROOT_UID |
| 22 | #define RPC_MACHINE_CRED_GROUPID ((gid_t)0) | 22 | #define RPC_MACHINE_CRED_GROUPID GLOBAL_ROOT_GID |
| 23 | 23 | ||
| 24 | struct generic_cred { | 24 | struct generic_cred { |
| 25 | struct rpc_cred gc_base; | 25 | struct rpc_cred gc_base; |
| @@ -96,7 +96,9 @@ generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 96 | 96 | ||
| 97 | dprintk("RPC: allocated %s cred %p for uid %d gid %d\n", | 97 | dprintk("RPC: allocated %s cred %p for uid %d gid %d\n", |
| 98 | gcred->acred.machine_cred ? "machine" : "generic", | 98 | gcred->acred.machine_cred ? "machine" : "generic", |
| 99 | gcred, acred->uid, acred->gid); | 99 | gcred, |
| 100 | from_kuid(&init_user_ns, acred->uid), | ||
| 101 | from_kgid(&init_user_ns, acred->gid)); | ||
| 100 | return &gcred->gc_base; | 102 | return &gcred->gc_base; |
| 101 | } | 103 | } |
| 102 | 104 | ||
| @@ -129,8 +131,8 @@ machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flag | |||
| 129 | { | 131 | { |
| 130 | if (!gcred->acred.machine_cred || | 132 | if (!gcred->acred.machine_cred || |
| 131 | gcred->acred.principal != acred->principal || | 133 | gcred->acred.principal != acred->principal || |
| 132 | gcred->acred.uid != acred->uid || | 134 | !uid_eq(gcred->acred.uid, acred->uid) || |
| 133 | gcred->acred.gid != acred->gid) | 135 | !gid_eq(gcred->acred.gid, acred->gid)) |
| 134 | return 0; | 136 | return 0; |
| 135 | return 1; | 137 | return 1; |
| 136 | } | 138 | } |
| @@ -147,8 +149,8 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags) | |||
| 147 | if (acred->machine_cred) | 149 | if (acred->machine_cred) |
| 148 | return machine_cred_match(acred, gcred, flags); | 150 | return machine_cred_match(acred, gcred, flags); |
| 149 | 151 | ||
| 150 | if (gcred->acred.uid != acred->uid || | 152 | if (!uid_eq(gcred->acred.uid, acred->uid) || |
| 151 | gcred->acred.gid != acred->gid || | 153 | !gid_eq(gcred->acred.gid, acred->gid) || |
| 152 | gcred->acred.machine_cred != 0) | 154 | gcred->acred.machine_cred != 0) |
| 153 | goto out_nomatch; | 155 | goto out_nomatch; |
| 154 | 156 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 911ef008b701..5257d2982ba5 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -255,7 +255,7 @@ err: | |||
| 255 | 255 | ||
| 256 | struct gss_upcall_msg { | 256 | struct gss_upcall_msg { |
| 257 | atomic_t count; | 257 | atomic_t count; |
| 258 | uid_t uid; | 258 | kuid_t uid; |
| 259 | struct rpc_pipe_msg msg; | 259 | struct rpc_pipe_msg msg; |
| 260 | struct list_head list; | 260 | struct list_head list; |
| 261 | struct gss_auth *auth; | 261 | struct gss_auth *auth; |
| @@ -302,11 +302,11 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | static struct gss_upcall_msg * | 304 | static struct gss_upcall_msg * |
| 305 | __gss_find_upcall(struct rpc_pipe *pipe, uid_t uid) | 305 | __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) |
| 306 | { | 306 | { |
| 307 | struct gss_upcall_msg *pos; | 307 | struct gss_upcall_msg *pos; |
| 308 | list_for_each_entry(pos, &pipe->in_downcall, list) { | 308 | list_for_each_entry(pos, &pipe->in_downcall, list) { |
| 309 | if (pos->uid != uid) | 309 | if (!uid_eq(pos->uid, uid)) |
| 310 | continue; | 310 | continue; |
| 311 | atomic_inc(&pos->count); | 311 | atomic_inc(&pos->count); |
| 312 | dprintk("RPC: %s found msg %p\n", __func__, pos); | 312 | dprintk("RPC: %s found msg %p\n", __func__, pos); |
| @@ -394,8 +394,11 @@ gss_upcall_callback(struct rpc_task *task) | |||
| 394 | 394 | ||
| 395 | static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) | 395 | static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) |
| 396 | { | 396 | { |
| 397 | gss_msg->msg.data = &gss_msg->uid; | 397 | uid_t uid = from_kuid(&init_user_ns, gss_msg->uid); |
| 398 | gss_msg->msg.len = sizeof(gss_msg->uid); | 398 | memcpy(gss_msg->databuf, &uid, sizeof(uid)); |
| 399 | gss_msg->msg.data = gss_msg->databuf; | ||
| 400 | gss_msg->msg.len = sizeof(uid); | ||
| 401 | BUG_ON(sizeof(uid) > UPCALL_BUF_LEN); | ||
| 399 | } | 402 | } |
| 400 | 403 | ||
| 401 | static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, | 404 | static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, |
| @@ -408,7 +411,7 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, | |||
| 408 | 411 | ||
| 409 | gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", | 412 | gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", |
| 410 | mech->gm_name, | 413 | mech->gm_name, |
| 411 | gss_msg->uid); | 414 | from_kuid(&init_user_ns, gss_msg->uid)); |
| 412 | p += gss_msg->msg.len; | 415 | p += gss_msg->msg.len; |
| 413 | if (clnt->cl_principal) { | 416 | if (clnt->cl_principal) { |
| 414 | len = sprintf(p, "target=%s ", clnt->cl_principal); | 417 | len = sprintf(p, "target=%s ", clnt->cl_principal); |
| @@ -444,7 +447,7 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg, | |||
| 444 | 447 | ||
| 445 | static struct gss_upcall_msg * | 448 | static struct gss_upcall_msg * |
| 446 | gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt, | 449 | gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt, |
| 447 | uid_t uid, const char *service_name) | 450 | kuid_t uid, const char *service_name) |
| 448 | { | 451 | { |
| 449 | struct gss_upcall_msg *gss_msg; | 452 | struct gss_upcall_msg *gss_msg; |
| 450 | int vers; | 453 | int vers; |
| @@ -474,7 +477,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr | |||
| 474 | struct gss_cred *gss_cred = container_of(cred, | 477 | struct gss_cred *gss_cred = container_of(cred, |
| 475 | struct gss_cred, gc_base); | 478 | struct gss_cred, gc_base); |
| 476 | struct gss_upcall_msg *gss_new, *gss_msg; | 479 | struct gss_upcall_msg *gss_new, *gss_msg; |
| 477 | uid_t uid = cred->cr_uid; | 480 | kuid_t uid = cred->cr_uid; |
| 478 | 481 | ||
| 479 | gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal); | 482 | gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal); |
| 480 | if (IS_ERR(gss_new)) | 483 | if (IS_ERR(gss_new)) |
| @@ -516,7 +519,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
| 516 | int err = 0; | 519 | int err = 0; |
| 517 | 520 | ||
| 518 | dprintk("RPC: %5u %s for uid %u\n", | 521 | dprintk("RPC: %5u %s for uid %u\n", |
| 519 | task->tk_pid, __func__, cred->cr_uid); | 522 | task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid)); |
| 520 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); | 523 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); |
| 521 | if (PTR_ERR(gss_msg) == -EAGAIN) { | 524 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
| 522 | /* XXX: warning on the first, under the assumption we | 525 | /* XXX: warning on the first, under the assumption we |
| @@ -548,7 +551,8 @@ gss_refresh_upcall(struct rpc_task *task) | |||
| 548 | gss_release_msg(gss_msg); | 551 | gss_release_msg(gss_msg); |
| 549 | out: | 552 | out: |
| 550 | dprintk("RPC: %5u %s for uid %u result %d\n", | 553 | dprintk("RPC: %5u %s for uid %u result %d\n", |
| 551 | task->tk_pid, __func__, cred->cr_uid, err); | 554 | task->tk_pid, __func__, |
| 555 | from_kuid(&init_user_ns, cred->cr_uid), err); | ||
| 552 | return err; | 556 | return err; |
| 553 | } | 557 | } |
| 554 | 558 | ||
| @@ -561,7 +565,8 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
| 561 | DEFINE_WAIT(wait); | 565 | DEFINE_WAIT(wait); |
| 562 | int err = 0; | 566 | int err = 0; |
| 563 | 567 | ||
| 564 | dprintk("RPC: %s for uid %u\n", __func__, cred->cr_uid); | 568 | dprintk("RPC: %s for uid %u\n", |
| 569 | __func__, from_kuid(&init_user_ns, cred->cr_uid)); | ||
| 565 | retry: | 570 | retry: |
| 566 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); | 571 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); |
| 567 | if (PTR_ERR(gss_msg) == -EAGAIN) { | 572 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
| @@ -603,7 +608,7 @@ out_intr: | |||
| 603 | gss_release_msg(gss_msg); | 608 | gss_release_msg(gss_msg); |
| 604 | out: | 609 | out: |
| 605 | dprintk("RPC: %s for uid %u result %d\n", | 610 | dprintk("RPC: %s for uid %u result %d\n", |
| 606 | __func__, cred->cr_uid, err); | 611 | __func__, from_kuid(&init_user_ns, cred->cr_uid), err); |
| 607 | return err; | 612 | return err; |
| 608 | } | 613 | } |
| 609 | 614 | ||
| @@ -615,9 +620,10 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
| 615 | const void *p, *end; | 620 | const void *p, *end; |
| 616 | void *buf; | 621 | void *buf; |
| 617 | struct gss_upcall_msg *gss_msg; | 622 | struct gss_upcall_msg *gss_msg; |
| 618 | struct rpc_pipe *pipe = RPC_I(filp->f_dentry->d_inode)->pipe; | 623 | struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe; |
| 619 | struct gss_cl_ctx *ctx; | 624 | struct gss_cl_ctx *ctx; |
| 620 | uid_t uid; | 625 | uid_t id; |
| 626 | kuid_t uid; | ||
| 621 | ssize_t err = -EFBIG; | 627 | ssize_t err = -EFBIG; |
| 622 | 628 | ||
| 623 | if (mlen > MSG_BUF_MAXSIZE) | 629 | if (mlen > MSG_BUF_MAXSIZE) |
| @@ -632,12 +638,18 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
| 632 | goto err; | 638 | goto err; |
| 633 | 639 | ||
| 634 | end = (const void *)((char *)buf + mlen); | 640 | end = (const void *)((char *)buf + mlen); |
| 635 | p = simple_get_bytes(buf, end, &uid, sizeof(uid)); | 641 | p = simple_get_bytes(buf, end, &id, sizeof(id)); |
| 636 | if (IS_ERR(p)) { | 642 | if (IS_ERR(p)) { |
| 637 | err = PTR_ERR(p); | 643 | err = PTR_ERR(p); |
| 638 | goto err; | 644 | goto err; |
| 639 | } | 645 | } |
| 640 | 646 | ||
| 647 | uid = make_kuid(&init_user_ns, id); | ||
| 648 | if (!uid_valid(uid)) { | ||
| 649 | err = -EINVAL; | ||
| 650 | goto err; | ||
| 651 | } | ||
| 652 | |||
| 641 | err = -ENOMEM; | 653 | err = -ENOMEM; |
| 642 | ctx = gss_alloc_context(); | 654 | ctx = gss_alloc_context(); |
| 643 | if (ctx == NULL) | 655 | if (ctx == NULL) |
| @@ -1058,7 +1070,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 1058 | int err = -ENOMEM; | 1070 | int err = -ENOMEM; |
| 1059 | 1071 | ||
| 1060 | dprintk("RPC: %s for uid %d, flavor %d\n", | 1072 | dprintk("RPC: %s for uid %d, flavor %d\n", |
| 1061 | __func__, acred->uid, auth->au_flavor); | 1073 | __func__, from_kuid(&init_user_ns, acred->uid), |
| 1074 | auth->au_flavor); | ||
| 1062 | 1075 | ||
| 1063 | if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) | 1076 | if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) |
| 1064 | goto out_err; | 1077 | goto out_err; |
| @@ -1114,7 +1127,7 @@ out: | |||
| 1114 | } | 1127 | } |
| 1115 | if (gss_cred->gc_principal != NULL) | 1128 | if (gss_cred->gc_principal != NULL) |
| 1116 | return 0; | 1129 | return 0; |
| 1117 | return rc->cr_uid == acred->uid; | 1130 | return uid_eq(rc->cr_uid, acred->uid); |
| 1118 | } | 1131 | } |
| 1119 | 1132 | ||
| 1120 | /* | 1133 | /* |
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 107c4528654f..88edec929d73 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
| @@ -574,6 +574,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) | |||
| 574 | buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; | 574 | buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; |
| 575 | buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; | 575 | buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; |
| 576 | 576 | ||
| 577 | /* Trim off the checksum blob */ | ||
| 578 | xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip); | ||
| 577 | return GSS_S_COMPLETE; | 579 | return GSS_S_COMPLETE; |
| 578 | } | 580 | } |
| 579 | 581 | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 73e957386600..f7d34e7b6f81 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -182,12 +182,6 @@ static void rsi_request(struct cache_detail *cd, | |||
| 182 | (*bpp)[-1] = '\n'; | 182 | (*bpp)[-1] = '\n'; |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | static int rsi_upcall(struct cache_detail *cd, struct cache_head *h) | ||
| 186 | { | ||
| 187 | return sunrpc_cache_pipe_upcall(cd, h, rsi_request); | ||
| 188 | } | ||
| 189 | |||
| 190 | |||
| 191 | static int rsi_parse(struct cache_detail *cd, | 185 | static int rsi_parse(struct cache_detail *cd, |
| 192 | char *mesg, int mlen) | 186 | char *mesg, int mlen) |
| 193 | { | 187 | { |
| @@ -275,7 +269,7 @@ static struct cache_detail rsi_cache_template = { | |||
| 275 | .hash_size = RSI_HASHMAX, | 269 | .hash_size = RSI_HASHMAX, |
| 276 | .name = "auth.rpcsec.init", | 270 | .name = "auth.rpcsec.init", |
| 277 | .cache_put = rsi_put, | 271 | .cache_put = rsi_put, |
| 278 | .cache_upcall = rsi_upcall, | 272 | .cache_request = rsi_request, |
| 279 | .cache_parse = rsi_parse, | 273 | .cache_parse = rsi_parse, |
| 280 | .match = rsi_match, | 274 | .match = rsi_match, |
| 281 | .init = rsi_init, | 275 | .init = rsi_init, |
| @@ -418,6 +412,7 @@ static int rsc_parse(struct cache_detail *cd, | |||
| 418 | { | 412 | { |
| 419 | /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */ | 413 | /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */ |
| 420 | char *buf = mesg; | 414 | char *buf = mesg; |
| 415 | int id; | ||
| 421 | int len, rv; | 416 | int len, rv; |
| 422 | struct rsc rsci, *rscp = NULL; | 417 | struct rsc rsci, *rscp = NULL; |
| 423 | time_t expiry; | 418 | time_t expiry; |
| @@ -444,7 +439,7 @@ static int rsc_parse(struct cache_detail *cd, | |||
| 444 | goto out; | 439 | goto out; |
| 445 | 440 | ||
| 446 | /* uid, or NEGATIVE */ | 441 | /* uid, or NEGATIVE */ |
| 447 | rv = get_int(&mesg, &rsci.cred.cr_uid); | 442 | rv = get_int(&mesg, &id); |
| 448 | if (rv == -EINVAL) | 443 | if (rv == -EINVAL) |
| 449 | goto out; | 444 | goto out; |
| 450 | if (rv == -ENOENT) | 445 | if (rv == -ENOENT) |
| @@ -452,8 +447,16 @@ static int rsc_parse(struct cache_detail *cd, | |||
| 452 | else { | 447 | else { |
| 453 | int N, i; | 448 | int N, i; |
| 454 | 449 | ||
| 450 | /* uid */ | ||
| 451 | rsci.cred.cr_uid = make_kuid(&init_user_ns, id); | ||
| 452 | if (!uid_valid(rsci.cred.cr_uid)) | ||
| 453 | goto out; | ||
| 454 | |||
| 455 | /* gid */ | 455 | /* gid */ |
| 456 | if (get_int(&mesg, &rsci.cred.cr_gid)) | 456 | if (get_int(&mesg, &id)) |
| 457 | goto out; | ||
| 458 | rsci.cred.cr_gid = make_kgid(&init_user_ns, id); | ||
| 459 | if (!gid_valid(rsci.cred.cr_gid)) | ||
| 457 | goto out; | 460 | goto out; |
| 458 | 461 | ||
| 459 | /* number of additional gid's */ | 462 | /* number of additional gid's */ |
| @@ -467,11 +470,10 @@ static int rsc_parse(struct cache_detail *cd, | |||
| 467 | /* gid's */ | 470 | /* gid's */ |
| 468 | status = -EINVAL; | 471 | status = -EINVAL; |
| 469 | for (i=0; i<N; i++) { | 472 | for (i=0; i<N; i++) { |
| 470 | gid_t gid; | ||
| 471 | kgid_t kgid; | 473 | kgid_t kgid; |
| 472 | if (get_int(&mesg, &gid)) | 474 | if (get_int(&mesg, &id)) |
| 473 | goto out; | 475 | goto out; |
| 474 | kgid = make_kgid(&init_user_ns, gid); | 476 | kgid = make_kgid(&init_user_ns, id); |
| 475 | if (!gid_valid(kgid)) | 477 | if (!gid_valid(kgid)) |
| 476 | goto out; | 478 | goto out; |
| 477 | GROUP_AT(rsci.cred.cr_group_info, i) = kgid; | 479 | GROUP_AT(rsci.cred.cr_group_info, i) = kgid; |
| @@ -817,13 +819,17 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) | |||
| 817 | * The server uses base of head iovec as read pointer, while the | 819 | * The server uses base of head iovec as read pointer, while the |
| 818 | * client uses separate pointer. */ | 820 | * client uses separate pointer. */ |
| 819 | static int | 821 | static int |
| 820 | unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) | 822 | unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) |
| 821 | { | 823 | { |
| 822 | int stat = -EINVAL; | 824 | int stat = -EINVAL; |
| 823 | u32 integ_len, maj_stat; | 825 | u32 integ_len, maj_stat; |
| 824 | struct xdr_netobj mic; | 826 | struct xdr_netobj mic; |
| 825 | struct xdr_buf integ_buf; | 827 | struct xdr_buf integ_buf; |
| 826 | 828 | ||
| 829 | /* Did we already verify the signature on the original pass through? */ | ||
| 830 | if (rqstp->rq_deferred) | ||
| 831 | return 0; | ||
| 832 | |||
| 827 | integ_len = svc_getnl(&buf->head[0]); | 833 | integ_len = svc_getnl(&buf->head[0]); |
| 828 | if (integ_len & 3) | 834 | if (integ_len & 3) |
| 829 | return stat; | 835 | return stat; |
| @@ -846,6 +852,8 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) | |||
| 846 | goto out; | 852 | goto out; |
| 847 | if (svc_getnl(&buf->head[0]) != seq) | 853 | if (svc_getnl(&buf->head[0]) != seq) |
| 848 | goto out; | 854 | goto out; |
| 855 | /* trim off the mic at the end before returning */ | ||
| 856 | xdr_buf_trim(buf, mic.len + 4); | ||
| 849 | stat = 0; | 857 | stat = 0; |
| 850 | out: | 858 | out: |
| 851 | kfree(mic.data); | 859 | kfree(mic.data); |
| @@ -1190,7 +1198,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 1190 | /* placeholders for length and seq. number: */ | 1198 | /* placeholders for length and seq. number: */ |
| 1191 | svc_putnl(resv, 0); | 1199 | svc_putnl(resv, 0); |
| 1192 | svc_putnl(resv, 0); | 1200 | svc_putnl(resv, 0); |
| 1193 | if (unwrap_integ_data(&rqstp->rq_arg, | 1201 | if (unwrap_integ_data(rqstp, &rqstp->rq_arg, |
| 1194 | gc->gc_seq, rsci->mechctx)) | 1202 | gc->gc_seq, rsci->mechctx)) |
| 1195 | goto garbage_args; | 1203 | goto garbage_args; |
| 1196 | break; | 1204 | break; |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 52c5abdee211..dc37021fc3e5 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
| @@ -18,8 +18,8 @@ | |||
| 18 | 18 | ||
| 19 | struct unx_cred { | 19 | struct unx_cred { |
| 20 | struct rpc_cred uc_base; | 20 | struct rpc_cred uc_base; |
| 21 | gid_t uc_gid; | 21 | kgid_t uc_gid; |
| 22 | gid_t uc_gids[NFS_NGROUPS]; | 22 | kgid_t uc_gids[NFS_NGROUPS]; |
| 23 | }; | 23 | }; |
| 24 | #define uc_uid uc_base.cr_uid | 24 | #define uc_uid uc_base.cr_uid |
| 25 | 25 | ||
| @@ -65,7 +65,8 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 65 | unsigned int i; | 65 | unsigned int i; |
| 66 | 66 | ||
| 67 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", | 67 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", |
| 68 | acred->uid, acred->gid); | 68 | from_kuid(&init_user_ns, acred->uid), |
| 69 | from_kgid(&init_user_ns, acred->gid)); | ||
| 69 | 70 | ||
| 70 | if (!(cred = kmalloc(sizeof(*cred), GFP_NOFS))) | 71 | if (!(cred = kmalloc(sizeof(*cred), GFP_NOFS))) |
| 71 | return ERR_PTR(-ENOMEM); | 72 | return ERR_PTR(-ENOMEM); |
| @@ -79,13 +80,10 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 79 | groups = NFS_NGROUPS; | 80 | groups = NFS_NGROUPS; |
| 80 | 81 | ||
| 81 | cred->uc_gid = acred->gid; | 82 | cred->uc_gid = acred->gid; |
| 82 | for (i = 0; i < groups; i++) { | 83 | for (i = 0; i < groups; i++) |
| 83 | gid_t gid; | 84 | cred->uc_gids[i] = GROUP_AT(acred->group_info, i); |
| 84 | gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i)); | ||
| 85 | cred->uc_gids[i] = gid; | ||
| 86 | } | ||
| 87 | if (i < NFS_NGROUPS) | 85 | if (i < NFS_NGROUPS) |
| 88 | cred->uc_gids[i] = NOGROUP; | 86 | cred->uc_gids[i] = INVALID_GID; |
| 89 | 87 | ||
| 90 | return &cred->uc_base; | 88 | return &cred->uc_base; |
| 91 | } | 89 | } |
| @@ -123,21 +121,17 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) | |||
| 123 | unsigned int i; | 121 | unsigned int i; |
| 124 | 122 | ||
| 125 | 123 | ||
| 126 | if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid) | 124 | if (!uid_eq(cred->uc_uid, acred->uid) || !gid_eq(cred->uc_gid, acred->gid)) |
| 127 | return 0; | 125 | return 0; |
| 128 | 126 | ||
| 129 | if (acred->group_info != NULL) | 127 | if (acred->group_info != NULL) |
| 130 | groups = acred->group_info->ngroups; | 128 | groups = acred->group_info->ngroups; |
| 131 | if (groups > NFS_NGROUPS) | 129 | if (groups > NFS_NGROUPS) |
| 132 | groups = NFS_NGROUPS; | 130 | groups = NFS_NGROUPS; |
| 133 | for (i = 0; i < groups ; i++) { | 131 | for (i = 0; i < groups ; i++) |
| 134 | gid_t gid; | 132 | if (!gid_eq(cred->uc_gids[i], GROUP_AT(acred->group_info, i))) |
| 135 | gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i)); | ||
| 136 | if (cred->uc_gids[i] != gid) | ||
| 137 | return 0; | 133 | return 0; |
| 138 | } | 134 | if (groups < NFS_NGROUPS && gid_valid(cred->uc_gids[groups])) |
| 139 | if (groups < NFS_NGROUPS && | ||
| 140 | cred->uc_gids[groups] != NOGROUP) | ||
| 141 | return 0; | 135 | return 0; |
| 142 | return 1; | 136 | return 1; |
| 143 | } | 137 | } |
| @@ -163,11 +157,11 @@ unx_marshal(struct rpc_task *task, __be32 *p) | |||
| 163 | */ | 157 | */ |
| 164 | p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen); | 158 | p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen); |
| 165 | 159 | ||
| 166 | *p++ = htonl((u32) cred->uc_uid); | 160 | *p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid)); |
| 167 | *p++ = htonl((u32) cred->uc_gid); | 161 | *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid)); |
| 168 | hold = p++; | 162 | hold = p++; |
| 169 | for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++) | 163 | for (i = 0; i < 16 && gid_valid(cred->uc_gids[i]); i++) |
| 170 | *p++ = htonl((u32) cred->uc_gids[i]); | 164 | *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i])); |
| 171 | *hold = htonl(p - hold - 1); /* gid array length */ | 165 | *hold = htonl(p - hold - 1); /* gid array length */ |
| 172 | *base = htonl((p - base - 1) << 2); /* cred length */ | 166 | *base = htonl((p - base - 1) << 2); /* cred length */ |
| 173 | 167 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 9afa4393c217..25d58e766014 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -196,9 +196,9 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_update); | |||
| 196 | 196 | ||
| 197 | static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) | 197 | static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) |
| 198 | { | 198 | { |
| 199 | if (!cd->cache_upcall) | 199 | if (cd->cache_upcall) |
| 200 | return -EINVAL; | 200 | return cd->cache_upcall(cd, h); |
| 201 | return cd->cache_upcall(cd, h); | 201 | return sunrpc_cache_pipe_upcall(cd, h); |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) | 204 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) |
| @@ -670,13 +670,13 @@ static void cache_revisit_request(struct cache_head *item) | |||
| 670 | { | 670 | { |
| 671 | struct cache_deferred_req *dreq; | 671 | struct cache_deferred_req *dreq; |
| 672 | struct list_head pending; | 672 | struct list_head pending; |
| 673 | struct hlist_node *lp, *tmp; | 673 | struct hlist_node *tmp; |
| 674 | int hash = DFR_HASH(item); | 674 | int hash = DFR_HASH(item); |
| 675 | 675 | ||
| 676 | INIT_LIST_HEAD(&pending); | 676 | INIT_LIST_HEAD(&pending); |
| 677 | spin_lock(&cache_defer_lock); | 677 | spin_lock(&cache_defer_lock); |
| 678 | 678 | ||
| 679 | hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) | 679 | hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) |
| 680 | if (dreq->item == item) { | 680 | if (dreq->item == item) { |
| 681 | __unhash_deferred_req(dreq); | 681 | __unhash_deferred_req(dreq); |
| 682 | list_add(&dreq->recent, &pending); | 682 | list_add(&dreq->recent, &pending); |
| @@ -750,12 +750,24 @@ struct cache_reader { | |||
| 750 | int offset; /* if non-0, we have a refcnt on next request */ | 750 | int offset; /* if non-0, we have a refcnt on next request */ |
| 751 | }; | 751 | }; |
| 752 | 752 | ||
| 753 | static int cache_request(struct cache_detail *detail, | ||
| 754 | struct cache_request *crq) | ||
| 755 | { | ||
| 756 | char *bp = crq->buf; | ||
| 757 | int len = PAGE_SIZE; | ||
| 758 | |||
| 759 | detail->cache_request(detail, crq->item, &bp, &len); | ||
| 760 | if (len < 0) | ||
| 761 | return -EAGAIN; | ||
| 762 | return PAGE_SIZE - len; | ||
| 763 | } | ||
| 764 | |||
| 753 | static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, | 765 | static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, |
| 754 | loff_t *ppos, struct cache_detail *cd) | 766 | loff_t *ppos, struct cache_detail *cd) |
| 755 | { | 767 | { |
| 756 | struct cache_reader *rp = filp->private_data; | 768 | struct cache_reader *rp = filp->private_data; |
| 757 | struct cache_request *rq; | 769 | struct cache_request *rq; |
| 758 | struct inode *inode = filp->f_path.dentry->d_inode; | 770 | struct inode *inode = file_inode(filp); |
| 759 | int err; | 771 | int err; |
| 760 | 772 | ||
| 761 | if (count == 0) | 773 | if (count == 0) |
| @@ -784,6 +796,13 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, | |||
| 784 | rq->readers++; | 796 | rq->readers++; |
| 785 | spin_unlock(&queue_lock); | 797 | spin_unlock(&queue_lock); |
| 786 | 798 | ||
| 799 | if (rq->len == 0) { | ||
| 800 | err = cache_request(cd, rq); | ||
| 801 | if (err < 0) | ||
| 802 | goto out; | ||
| 803 | rq->len = err; | ||
| 804 | } | ||
| 805 | |||
| 787 | if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { | 806 | if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { |
| 788 | err = -EAGAIN; | 807 | err = -EAGAIN; |
| 789 | spin_lock(&queue_lock); | 808 | spin_lock(&queue_lock); |
| @@ -886,7 +905,7 @@ static ssize_t cache_write(struct file *filp, const char __user *buf, | |||
| 886 | struct cache_detail *cd) | 905 | struct cache_detail *cd) |
| 887 | { | 906 | { |
| 888 | struct address_space *mapping = filp->f_mapping; | 907 | struct address_space *mapping = filp->f_mapping; |
| 889 | struct inode *inode = filp->f_path.dentry->d_inode; | 908 | struct inode *inode = file_inode(filp); |
| 890 | ssize_t ret = -EINVAL; | 909 | ssize_t ret = -EINVAL; |
| 891 | 910 | ||
| 892 | if (!cd->cache_parse) | 911 | if (!cd->cache_parse) |
| @@ -1140,17 +1159,14 @@ static bool cache_listeners_exist(struct cache_detail *detail) | |||
| 1140 | * | 1159 | * |
| 1141 | * Each request is at most one page long. | 1160 | * Each request is at most one page long. |
| 1142 | */ | 1161 | */ |
| 1143 | int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, | 1162 | int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) |
| 1144 | void (*cache_request)(struct cache_detail *, | ||
| 1145 | struct cache_head *, | ||
| 1146 | char **, | ||
| 1147 | int *)) | ||
| 1148 | { | 1163 | { |
| 1149 | 1164 | ||
| 1150 | char *buf; | 1165 | char *buf; |
| 1151 | struct cache_request *crq; | 1166 | struct cache_request *crq; |
| 1152 | char *bp; | 1167 | |
| 1153 | int len; | 1168 | if (!detail->cache_request) |
| 1169 | return -EINVAL; | ||
| 1154 | 1170 | ||
| 1155 | if (!cache_listeners_exist(detail)) { | 1171 | if (!cache_listeners_exist(detail)) { |
| 1156 | warn_no_listener(detail); | 1172 | warn_no_listener(detail); |
| @@ -1167,19 +1183,10 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, | |||
| 1167 | return -EAGAIN; | 1183 | return -EAGAIN; |
| 1168 | } | 1184 | } |
| 1169 | 1185 | ||
| 1170 | bp = buf; len = PAGE_SIZE; | ||
| 1171 | |||
| 1172 | cache_request(detail, h, &bp, &len); | ||
| 1173 | |||
| 1174 | if (len < 0) { | ||
| 1175 | kfree(buf); | ||
| 1176 | kfree(crq); | ||
| 1177 | return -EAGAIN; | ||
| 1178 | } | ||
| 1179 | crq->q.reader = 0; | 1186 | crq->q.reader = 0; |
| 1180 | crq->item = cache_get(h); | 1187 | crq->item = cache_get(h); |
| 1181 | crq->buf = buf; | 1188 | crq->buf = buf; |
| 1182 | crq->len = PAGE_SIZE - len; | 1189 | crq->len = 0; |
| 1183 | crq->readers = 0; | 1190 | crq->readers = 0; |
| 1184 | spin_lock(&queue_lock); | 1191 | spin_lock(&queue_lock); |
| 1185 | list_add_tail(&crq->q.list, &detail->queue); | 1192 | list_add_tail(&crq->q.list, &detail->queue); |
| @@ -1454,7 +1461,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf, | |||
| 1454 | static ssize_t cache_read_procfs(struct file *filp, char __user *buf, | 1461 | static ssize_t cache_read_procfs(struct file *filp, char __user *buf, |
| 1455 | size_t count, loff_t *ppos) | 1462 | size_t count, loff_t *ppos) |
| 1456 | { | 1463 | { |
| 1457 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 1464 | struct cache_detail *cd = PDE(file_inode(filp))->data; |
| 1458 | 1465 | ||
| 1459 | return cache_read(filp, buf, count, ppos, cd); | 1466 | return cache_read(filp, buf, count, ppos, cd); |
| 1460 | } | 1467 | } |
| @@ -1462,14 +1469,14 @@ static ssize_t cache_read_procfs(struct file *filp, char __user *buf, | |||
| 1462 | static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, | 1469 | static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, |
| 1463 | size_t count, loff_t *ppos) | 1470 | size_t count, loff_t *ppos) |
| 1464 | { | 1471 | { |
| 1465 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 1472 | struct cache_detail *cd = PDE(file_inode(filp))->data; |
| 1466 | 1473 | ||
| 1467 | return cache_write(filp, buf, count, ppos, cd); | 1474 | return cache_write(filp, buf, count, ppos, cd); |
| 1468 | } | 1475 | } |
| 1469 | 1476 | ||
| 1470 | static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) | 1477 | static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) |
| 1471 | { | 1478 | { |
| 1472 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 1479 | struct cache_detail *cd = PDE(file_inode(filp))->data; |
| 1473 | 1480 | ||
| 1474 | return cache_poll(filp, wait, cd); | 1481 | return cache_poll(filp, wait, cd); |
| 1475 | } | 1482 | } |
| @@ -1477,7 +1484,7 @@ static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) | |||
| 1477 | static long cache_ioctl_procfs(struct file *filp, | 1484 | static long cache_ioctl_procfs(struct file *filp, |
| 1478 | unsigned int cmd, unsigned long arg) | 1485 | unsigned int cmd, unsigned long arg) |
| 1479 | { | 1486 | { |
| 1480 | struct inode *inode = filp->f_path.dentry->d_inode; | 1487 | struct inode *inode = file_inode(filp); |
| 1481 | struct cache_detail *cd = PDE(inode)->data; | 1488 | struct cache_detail *cd = PDE(inode)->data; |
| 1482 | 1489 | ||
| 1483 | return cache_ioctl(inode, filp, cmd, arg, cd); | 1490 | return cache_ioctl(inode, filp, cmd, arg, cd); |
| @@ -1546,7 +1553,7 @@ static int release_flush_procfs(struct inode *inode, struct file *filp) | |||
| 1546 | static ssize_t read_flush_procfs(struct file *filp, char __user *buf, | 1553 | static ssize_t read_flush_procfs(struct file *filp, char __user *buf, |
| 1547 | size_t count, loff_t *ppos) | 1554 | size_t count, loff_t *ppos) |
| 1548 | { | 1555 | { |
| 1549 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 1556 | struct cache_detail *cd = PDE(file_inode(filp))->data; |
| 1550 | 1557 | ||
| 1551 | return read_flush(filp, buf, count, ppos, cd); | 1558 | return read_flush(filp, buf, count, ppos, cd); |
| 1552 | } | 1559 | } |
| @@ -1555,7 +1562,7 @@ static ssize_t write_flush_procfs(struct file *filp, | |||
| 1555 | const char __user *buf, | 1562 | const char __user *buf, |
| 1556 | size_t count, loff_t *ppos) | 1563 | size_t count, loff_t *ppos) |
| 1557 | { | 1564 | { |
| 1558 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 1565 | struct cache_detail *cd = PDE(file_inode(filp))->data; |
| 1559 | 1566 | ||
| 1560 | return write_flush(filp, buf, count, ppos, cd); | 1567 | return write_flush(filp, buf, count, ppos, cd); |
| 1561 | } | 1568 | } |
| @@ -1605,7 +1612,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) | |||
| 1605 | if (p == NULL) | 1612 | if (p == NULL) |
| 1606 | goto out_nomem; | 1613 | goto out_nomem; |
| 1607 | 1614 | ||
| 1608 | if (cd->cache_upcall || cd->cache_parse) { | 1615 | if (cd->cache_request || cd->cache_parse) { |
| 1609 | p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, | 1616 | p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, |
| 1610 | cd->u.procfs.proc_ent, | 1617 | cd->u.procfs.proc_ent, |
| 1611 | &cache_file_operations_procfs, cd); | 1618 | &cache_file_operations_procfs, cd); |
| @@ -1614,7 +1621,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) | |||
| 1614 | goto out_nomem; | 1621 | goto out_nomem; |
| 1615 | } | 1622 | } |
| 1616 | if (cd->cache_show) { | 1623 | if (cd->cache_show) { |
| 1617 | p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR, | 1624 | p = proc_create_data("content", S_IFREG|S_IRUSR, |
| 1618 | cd->u.procfs.proc_ent, | 1625 | cd->u.procfs.proc_ent, |
| 1619 | &content_file_operations_procfs, cd); | 1626 | &content_file_operations_procfs, cd); |
| 1620 | cd->u.procfs.content_ent = p; | 1627 | cd->u.procfs.content_ent = p; |
| @@ -1686,7 +1693,7 @@ EXPORT_SYMBOL_GPL(cache_destroy_net); | |||
| 1686 | static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, | 1693 | static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, |
| 1687 | size_t count, loff_t *ppos) | 1694 | size_t count, loff_t *ppos) |
| 1688 | { | 1695 | { |
| 1689 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 1696 | struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
| 1690 | 1697 | ||
| 1691 | return cache_read(filp, buf, count, ppos, cd); | 1698 | return cache_read(filp, buf, count, ppos, cd); |
| 1692 | } | 1699 | } |
| @@ -1694,14 +1701,14 @@ static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, | |||
| 1694 | static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, | 1701 | static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, |
| 1695 | size_t count, loff_t *ppos) | 1702 | size_t count, loff_t *ppos) |
| 1696 | { | 1703 | { |
| 1697 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 1704 | struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
| 1698 | 1705 | ||
| 1699 | return cache_write(filp, buf, count, ppos, cd); | 1706 | return cache_write(filp, buf, count, ppos, cd); |
| 1700 | } | 1707 | } |
| 1701 | 1708 | ||
| 1702 | static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) | 1709 | static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) |
| 1703 | { | 1710 | { |
| 1704 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 1711 | struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
| 1705 | 1712 | ||
| 1706 | return cache_poll(filp, wait, cd); | 1713 | return cache_poll(filp, wait, cd); |
| 1707 | } | 1714 | } |
| @@ -1709,7 +1716,7 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) | |||
| 1709 | static long cache_ioctl_pipefs(struct file *filp, | 1716 | static long cache_ioctl_pipefs(struct file *filp, |
| 1710 | unsigned int cmd, unsigned long arg) | 1717 | unsigned int cmd, unsigned long arg) |
| 1711 | { | 1718 | { |
| 1712 | struct inode *inode = filp->f_dentry->d_inode; | 1719 | struct inode *inode = file_inode(filp); |
| 1713 | struct cache_detail *cd = RPC_I(inode)->private; | 1720 | struct cache_detail *cd = RPC_I(inode)->private; |
| 1714 | 1721 | ||
| 1715 | return cache_ioctl(inode, filp, cmd, arg, cd); | 1722 | return cache_ioctl(inode, filp, cmd, arg, cd); |
| @@ -1778,7 +1785,7 @@ static int release_flush_pipefs(struct inode *inode, struct file *filp) | |||
| 1778 | static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, | 1785 | static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, |
| 1779 | size_t count, loff_t *ppos) | 1786 | size_t count, loff_t *ppos) |
| 1780 | { | 1787 | { |
| 1781 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 1788 | struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
| 1782 | 1789 | ||
| 1783 | return read_flush(filp, buf, count, ppos, cd); | 1790 | return read_flush(filp, buf, count, ppos, cd); |
| 1784 | } | 1791 | } |
| @@ -1787,7 +1794,7 @@ static ssize_t write_flush_pipefs(struct file *filp, | |||
| 1787 | const char __user *buf, | 1794 | const char __user *buf, |
| 1788 | size_t count, loff_t *ppos) | 1795 | size_t count, loff_t *ppos) |
| 1789 | { | 1796 | { |
| 1790 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 1797 | struct cache_detail *cd = RPC_I(file_inode(filp))->private; |
| 1791 | 1798 | ||
| 1792 | return write_flush(filp, buf, count, ppos, cd); | 1799 | return write_flush(filp, buf, count, ppos, cd); |
| 1793 | } | 1800 | } |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index a9f7906c1a6a..dcc446e7fbf6 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/rcupdate.h> | 33 | #include <linux/rcupdate.h> |
| 34 | 34 | ||
| 35 | #include <linux/sunrpc/clnt.h> | 35 | #include <linux/sunrpc/clnt.h> |
| 36 | #include <linux/sunrpc/addr.h> | ||
| 36 | #include <linux/sunrpc/rpc_pipe_fs.h> | 37 | #include <linux/sunrpc/rpc_pipe_fs.h> |
| 37 | #include <linux/sunrpc/metrics.h> | 38 | #include <linux/sunrpc/metrics.h> |
| 38 | #include <linux/sunrpc/bc_xprt.h> | 39 | #include <linux/sunrpc/bc_xprt.h> |
| @@ -1196,6 +1197,21 @@ size_t rpc_max_payload(struct rpc_clnt *clnt) | |||
| 1196 | EXPORT_SYMBOL_GPL(rpc_max_payload); | 1197 | EXPORT_SYMBOL_GPL(rpc_max_payload); |
| 1197 | 1198 | ||
| 1198 | /** | 1199 | /** |
| 1200 | * rpc_get_timeout - Get timeout for transport in units of HZ | ||
| 1201 | * @clnt: RPC client to query | ||
| 1202 | */ | ||
| 1203 | unsigned long rpc_get_timeout(struct rpc_clnt *clnt) | ||
| 1204 | { | ||
| 1205 | unsigned long ret; | ||
| 1206 | |||
| 1207 | rcu_read_lock(); | ||
| 1208 | ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval; | ||
| 1209 | rcu_read_unlock(); | ||
| 1210 | return ret; | ||
| 1211 | } | ||
| 1212 | EXPORT_SYMBOL_GPL(rpc_get_timeout); | ||
| 1213 | |||
| 1214 | /** | ||
| 1199 | * rpc_force_rebind - force transport to check that remote port is unchanged | 1215 | * rpc_force_rebind - force transport to check that remote port is unchanged |
| 1200 | * @clnt: client to rebind | 1216 | * @clnt: client to rebind |
| 1201 | * | 1217 | * |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index fd10981ea792..7b9b40224a27 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -284,7 +284,7 @@ out: | |||
| 284 | static ssize_t | 284 | static ssize_t |
| 285 | rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | 285 | rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) |
| 286 | { | 286 | { |
| 287 | struct inode *inode = filp->f_path.dentry->d_inode; | 287 | struct inode *inode = file_inode(filp); |
| 288 | struct rpc_pipe *pipe; | 288 | struct rpc_pipe *pipe; |
| 289 | struct rpc_pipe_msg *msg; | 289 | struct rpc_pipe_msg *msg; |
| 290 | int res = 0; | 290 | int res = 0; |
| @@ -328,7 +328,7 @@ out_unlock: | |||
| 328 | static ssize_t | 328 | static ssize_t |
| 329 | rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) | 329 | rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) |
| 330 | { | 330 | { |
| 331 | struct inode *inode = filp->f_path.dentry->d_inode; | 331 | struct inode *inode = file_inode(filp); |
| 332 | int res; | 332 | int res; |
| 333 | 333 | ||
| 334 | mutex_lock(&inode->i_mutex); | 334 | mutex_lock(&inode->i_mutex); |
| @@ -342,7 +342,7 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of | |||
| 342 | static unsigned int | 342 | static unsigned int |
| 343 | rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) | 343 | rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) |
| 344 | { | 344 | { |
| 345 | struct inode *inode = filp->f_path.dentry->d_inode; | 345 | struct inode *inode = file_inode(filp); |
| 346 | struct rpc_inode *rpci = RPC_I(inode); | 346 | struct rpc_inode *rpci = RPC_I(inode); |
| 347 | unsigned int mask = POLLOUT | POLLWRNORM; | 347 | unsigned int mask = POLLOUT | POLLWRNORM; |
| 348 | 348 | ||
| @@ -360,7 +360,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) | |||
| 360 | static long | 360 | static long |
| 361 | rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 361 | rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| 362 | { | 362 | { |
| 363 | struct inode *inode = filp->f_path.dentry->d_inode; | 363 | struct inode *inode = file_inode(filp); |
| 364 | struct rpc_pipe *pipe; | 364 | struct rpc_pipe *pipe; |
| 365 | int len; | 365 | int len; |
| 366 | 366 | ||
| @@ -830,7 +830,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry, | |||
| 830 | * responses to upcalls. They will result in calls to @msg->downcall. | 830 | * responses to upcalls. They will result in calls to @msg->downcall. |
| 831 | * | 831 | * |
| 832 | * The @private argument passed here will be available to all these methods | 832 | * The @private argument passed here will be available to all these methods |
| 833 | * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private. | 833 | * from the file pointer, via RPC_I(file_inode(file))->private. |
| 834 | */ | 834 | */ |
| 835 | struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name, | 835 | struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name, |
| 836 | void *private, struct rpc_pipe *pipe) | 836 | void *private, struct rpc_pipe *pipe) |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 795a0f4e920b..3df764dc330c 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <net/ipv6.h> | 26 | #include <net/ipv6.h> |
| 27 | 27 | ||
| 28 | #include <linux/sunrpc/clnt.h> | 28 | #include <linux/sunrpc/clnt.h> |
| 29 | #include <linux/sunrpc/addr.h> | ||
| 29 | #include <linux/sunrpc/sched.h> | 30 | #include <linux/sunrpc/sched.h> |
| 30 | #include <linux/sunrpc/xprtsock.h> | 31 | #include <linux/sunrpc/xprtsock.h> |
| 31 | 32 | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index dbf12ac5ecb7..89a588b4478b 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled); | |||
| 515 | 515 | ||
| 516 | void svc_shutdown_net(struct svc_serv *serv, struct net *net) | 516 | void svc_shutdown_net(struct svc_serv *serv, struct net *net) |
| 517 | { | 517 | { |
| 518 | /* | ||
| 519 | * The set of xprts (contained in the sv_tempsocks and | ||
| 520 | * sv_permsocks lists) is now constant, since it is modified | ||
| 521 | * only by accepting new sockets (done by service threads in | ||
| 522 | * svc_recv) or aging old ones (done by sv_temptimer), or | ||
| 523 | * configuration changes (excluded by whatever locking the | ||
| 524 | * caller is using--nfsd_mutex in the case of nfsd). So it's | ||
| 525 | * safe to traverse those lists and shut everything down: | ||
| 526 | */ | ||
| 527 | svc_close_net(serv, net); | 518 | svc_close_net(serv, net); |
| 528 | 519 | ||
| 529 | if (serv->sv_shutdown) | 520 | if (serv->sv_shutdown) |
| @@ -1042,6 +1033,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net) | |||
| 1042 | /* | 1033 | /* |
| 1043 | * dprintk the given error with the address of the client that caused it. | 1034 | * dprintk the given error with the address of the client that caused it. |
| 1044 | */ | 1035 | */ |
| 1036 | #ifdef RPC_DEBUG | ||
| 1045 | static __printf(2, 3) | 1037 | static __printf(2, 3) |
| 1046 | void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) | 1038 | void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) |
| 1047 | { | 1039 | { |
| @@ -1058,6 +1050,9 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) | |||
| 1058 | 1050 | ||
| 1059 | va_end(args); | 1051 | va_end(args); |
| 1060 | } | 1052 | } |
| 1053 | #else | ||
| 1054 | static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} | ||
| 1055 | #endif | ||
| 1061 | 1056 | ||
| 1062 | /* | 1057 | /* |
| 1063 | * Common routine for processing the RPC request. | 1058 | * Common routine for processing the RPC request. |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b8e47fac7315..80a6640f329b 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -499,7 +499,8 @@ void svc_wake_up(struct svc_serv *serv) | |||
| 499 | rqstp->rq_xprt = NULL; | 499 | rqstp->rq_xprt = NULL; |
| 500 | */ | 500 | */ |
| 501 | wake_up(&rqstp->rq_wait); | 501 | wake_up(&rqstp->rq_wait); |
| 502 | } | 502 | } else |
| 503 | pool->sp_task_pending = 1; | ||
| 503 | spin_unlock_bh(&pool->sp_lock); | 504 | spin_unlock_bh(&pool->sp_lock); |
| 504 | } | 505 | } |
| 505 | } | 506 | } |
| @@ -634,7 +635,13 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
| 634 | * long for cache updates. | 635 | * long for cache updates. |
| 635 | */ | 636 | */ |
| 636 | rqstp->rq_chandle.thread_wait = 1*HZ; | 637 | rqstp->rq_chandle.thread_wait = 1*HZ; |
| 638 | pool->sp_task_pending = 0; | ||
| 637 | } else { | 639 | } else { |
| 640 | if (pool->sp_task_pending) { | ||
| 641 | pool->sp_task_pending = 0; | ||
| 642 | spin_unlock_bh(&pool->sp_lock); | ||
| 643 | return ERR_PTR(-EAGAIN); | ||
| 644 | } | ||
| 638 | /* No data pending. Go to sleep */ | 645 | /* No data pending. Go to sleep */ |
| 639 | svc_thread_enqueue(pool, rqstp); | 646 | svc_thread_enqueue(pool, rqstp); |
| 640 | 647 | ||
| @@ -856,7 +863,6 @@ static void svc_age_temp_xprts(unsigned long closure) | |||
| 856 | struct svc_serv *serv = (struct svc_serv *)closure; | 863 | struct svc_serv *serv = (struct svc_serv *)closure; |
| 857 | struct svc_xprt *xprt; | 864 | struct svc_xprt *xprt; |
| 858 | struct list_head *le, *next; | 865 | struct list_head *le, *next; |
| 859 | LIST_HEAD(to_be_aged); | ||
| 860 | 866 | ||
| 861 | dprintk("svc_age_temp_xprts\n"); | 867 | dprintk("svc_age_temp_xprts\n"); |
| 862 | 868 | ||
| @@ -877,25 +883,15 @@ static void svc_age_temp_xprts(unsigned long closure) | |||
| 877 | if (atomic_read(&xprt->xpt_ref.refcount) > 1 || | 883 | if (atomic_read(&xprt->xpt_ref.refcount) > 1 || |
| 878 | test_bit(XPT_BUSY, &xprt->xpt_flags)) | 884 | test_bit(XPT_BUSY, &xprt->xpt_flags)) |
| 879 | continue; | 885 | continue; |
| 880 | svc_xprt_get(xprt); | 886 | list_del_init(le); |
| 881 | list_move(le, &to_be_aged); | ||
| 882 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 887 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 883 | set_bit(XPT_DETACHED, &xprt->xpt_flags); | 888 | set_bit(XPT_DETACHED, &xprt->xpt_flags); |
| 884 | } | ||
| 885 | spin_unlock_bh(&serv->sv_lock); | ||
| 886 | |||
| 887 | while (!list_empty(&to_be_aged)) { | ||
| 888 | le = to_be_aged.next; | ||
| 889 | /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ | ||
| 890 | list_del_init(le); | ||
| 891 | xprt = list_entry(le, struct svc_xprt, xpt_list); | ||
| 892 | |||
| 893 | dprintk("queuing xprt %p for closing\n", xprt); | 889 | dprintk("queuing xprt %p for closing\n", xprt); |
| 894 | 890 | ||
| 895 | /* a thread will dequeue and close it soon */ | 891 | /* a thread will dequeue and close it soon */ |
| 896 | svc_xprt_enqueue(xprt); | 892 | svc_xprt_enqueue(xprt); |
| 897 | svc_xprt_put(xprt); | ||
| 898 | } | 893 | } |
| 894 | spin_unlock_bh(&serv->sv_lock); | ||
| 899 | 895 | ||
| 900 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); | 896 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); |
| 901 | } | 897 | } |
| @@ -959,21 +955,24 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
| 959 | } | 955 | } |
| 960 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 956 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
| 961 | 957 | ||
| 962 | static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) | 958 | static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
| 963 | { | 959 | { |
| 964 | struct svc_xprt *xprt; | 960 | struct svc_xprt *xprt; |
| 961 | int ret = 0; | ||
| 965 | 962 | ||
| 966 | spin_lock(&serv->sv_lock); | 963 | spin_lock(&serv->sv_lock); |
| 967 | list_for_each_entry(xprt, xprt_list, xpt_list) { | 964 | list_for_each_entry(xprt, xprt_list, xpt_list) { |
| 968 | if (xprt->xpt_net != net) | 965 | if (xprt->xpt_net != net) |
| 969 | continue; | 966 | continue; |
| 967 | ret++; | ||
| 970 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 968 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 971 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 969 | svc_xprt_enqueue(xprt); |
| 972 | } | 970 | } |
| 973 | spin_unlock(&serv->sv_lock); | 971 | spin_unlock(&serv->sv_lock); |
| 972 | return ret; | ||
| 974 | } | 973 | } |
| 975 | 974 | ||
| 976 | static void svc_clear_pools(struct svc_serv *serv, struct net *net) | 975 | static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) |
| 977 | { | 976 | { |
| 978 | struct svc_pool *pool; | 977 | struct svc_pool *pool; |
| 979 | struct svc_xprt *xprt; | 978 | struct svc_xprt *xprt; |
| @@ -988,42 +987,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net) | |||
| 988 | if (xprt->xpt_net != net) | 987 | if (xprt->xpt_net != net) |
| 989 | continue; | 988 | continue; |
| 990 | list_del_init(&xprt->xpt_ready); | 989 | list_del_init(&xprt->xpt_ready); |
| 990 | spin_unlock_bh(&pool->sp_lock); | ||
| 991 | return xprt; | ||
| 991 | } | 992 | } |
| 992 | spin_unlock_bh(&pool->sp_lock); | 993 | spin_unlock_bh(&pool->sp_lock); |
| 993 | } | 994 | } |
| 995 | return NULL; | ||
| 994 | } | 996 | } |
| 995 | 997 | ||
| 996 | static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) | 998 | static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) |
| 997 | { | 999 | { |
| 998 | struct svc_xprt *xprt; | 1000 | struct svc_xprt *xprt; |
| 999 | struct svc_xprt *tmp; | ||
| 1000 | LIST_HEAD(victims); | ||
| 1001 | 1001 | ||
| 1002 | spin_lock(&serv->sv_lock); | 1002 | while ((xprt = svc_dequeue_net(serv, net))) { |
| 1003 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 1003 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 1004 | if (xprt->xpt_net != net) | ||
| 1005 | continue; | ||
| 1006 | list_move(&xprt->xpt_list, &victims); | ||
| 1007 | } | ||
| 1008 | spin_unlock(&serv->sv_lock); | ||
| 1009 | |||
| 1010 | list_for_each_entry_safe(xprt, tmp, &victims, xpt_list) | ||
| 1011 | svc_delete_xprt(xprt); | 1004 | svc_delete_xprt(xprt); |
| 1005 | } | ||
| 1012 | } | 1006 | } |
| 1013 | 1007 | ||
| 1008 | /* | ||
| 1009 | * Server threads may still be running (especially in the case where the | ||
| 1010 | * service is still running in other network namespaces). | ||
| 1011 | * | ||
| 1012 | * So we shut down sockets the same way we would on a running server, by | ||
| 1013 | * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do | ||
| 1014 | * the close. In the case there are no such other threads, | ||
| 1015 | * threads running, svc_clean_up_xprts() does a simple version of a | ||
| 1016 | * server's main event loop, and in the case where there are other | ||
| 1017 | * threads, we may need to wait a little while and then check again to | ||
| 1018 | * see if they're done. | ||
| 1019 | */ | ||
| 1014 | void svc_close_net(struct svc_serv *serv, struct net *net) | 1020 | void svc_close_net(struct svc_serv *serv, struct net *net) |
| 1015 | { | 1021 | { |
| 1016 | svc_close_list(serv, &serv->sv_tempsocks, net); | 1022 | int delay = 0; |
| 1017 | svc_close_list(serv, &serv->sv_permsocks, net); | ||
| 1018 | 1023 | ||
| 1019 | svc_clear_pools(serv, net); | 1024 | while (svc_close_list(serv, &serv->sv_permsocks, net) + |
| 1020 | /* | 1025 | svc_close_list(serv, &serv->sv_tempsocks, net)) { |
| 1021 | * At this point the sp_sockets lists will stay empty, since | 1026 | |
| 1022 | * svc_xprt_enqueue will not add new entries without taking the | 1027 | svc_clean_up_xprts(serv, net); |
| 1023 | * sp_lock and checking XPT_BUSY. | 1028 | msleep(delay++); |
| 1024 | */ | 1029 | } |
| 1025 | svc_clear_list(serv, &serv->sv_tempsocks, net); | ||
| 1026 | svc_clear_list(serv, &serv->sv_permsocks, net); | ||
| 1027 | } | 1030 | } |
| 1028 | 1031 | ||
| 1029 | /* | 1032 | /* |
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 7963569fc04f..2af7b0cba43a 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
| @@ -138,13 +138,12 @@ auth_domain_lookup(char *name, struct auth_domain *new) | |||
| 138 | { | 138 | { |
| 139 | struct auth_domain *hp; | 139 | struct auth_domain *hp; |
| 140 | struct hlist_head *head; | 140 | struct hlist_head *head; |
| 141 | struct hlist_node *np; | ||
| 142 | 141 | ||
| 143 | head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; | 142 | head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; |
| 144 | 143 | ||
| 145 | spin_lock(&auth_domain_lock); | 144 | spin_lock(&auth_domain_lock); |
| 146 | 145 | ||
| 147 | hlist_for_each_entry(hp, np, head, hash) { | 146 | hlist_for_each_entry(hp, head, hash) { |
| 148 | if (strcmp(hp->name, name)==0) { | 147 | if (strcmp(hp->name, name)==0) { |
| 149 | kref_get(&hp->ref); | 148 | kref_get(&hp->ref); |
| 150 | spin_unlock(&auth_domain_lock); | 149 | spin_unlock(&auth_domain_lock); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 4d0129203733..c3f9e1ef7f53 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/sunrpc/svcsock.h> | 6 | #include <linux/sunrpc/svcsock.h> |
| 7 | #include <linux/sunrpc/svcauth.h> | 7 | #include <linux/sunrpc/svcauth.h> |
| 8 | #include <linux/sunrpc/gss_api.h> | 8 | #include <linux/sunrpc/gss_api.h> |
| 9 | #include <linux/sunrpc/addr.h> | ||
| 9 | #include <linux/err.h> | 10 | #include <linux/err.h> |
| 10 | #include <linux/seq_file.h> | 11 | #include <linux/seq_file.h> |
| 11 | #include <linux/hash.h> | 12 | #include <linux/hash.h> |
| @@ -17,7 +18,6 @@ | |||
| 17 | #include <linux/user_namespace.h> | 18 | #include <linux/user_namespace.h> |
| 18 | #define RPCDBG_FACILITY RPCDBG_AUTH | 19 | #define RPCDBG_FACILITY RPCDBG_AUTH |
| 19 | 20 | ||
| 20 | #include <linux/sunrpc/clnt.h> | ||
| 21 | 21 | ||
| 22 | #include "netns.h" | 22 | #include "netns.h" |
| 23 | 23 | ||
| @@ -157,11 +157,6 @@ static void ip_map_request(struct cache_detail *cd, | |||
| 157 | (*bpp)[-1] = '\n'; | 157 | (*bpp)[-1] = '\n'; |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h) | ||
| 161 | { | ||
| 162 | return sunrpc_cache_pipe_upcall(cd, h, ip_map_request); | ||
| 163 | } | ||
| 164 | |||
| 165 | static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); | 160 | static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); |
| 166 | static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry); | 161 | static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry); |
| 167 | 162 | ||
| @@ -415,10 +410,15 @@ svcauth_unix_info_release(struct svc_xprt *xpt) | |||
| 415 | 410 | ||
| 416 | struct unix_gid { | 411 | struct unix_gid { |
| 417 | struct cache_head h; | 412 | struct cache_head h; |
| 418 | uid_t uid; | 413 | kuid_t uid; |
| 419 | struct group_info *gi; | 414 | struct group_info *gi; |
| 420 | }; | 415 | }; |
| 421 | 416 | ||
| 417 | static int unix_gid_hash(kuid_t uid) | ||
| 418 | { | ||
| 419 | return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS); | ||
| 420 | } | ||
| 421 | |||
| 422 | static void unix_gid_put(struct kref *kref) | 422 | static void unix_gid_put(struct kref *kref) |
| 423 | { | 423 | { |
| 424 | struct cache_head *item = container_of(kref, struct cache_head, ref); | 424 | struct cache_head *item = container_of(kref, struct cache_head, ref); |
| @@ -433,7 +433,7 @@ static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew) | |||
| 433 | { | 433 | { |
| 434 | struct unix_gid *orig = container_of(corig, struct unix_gid, h); | 434 | struct unix_gid *orig = container_of(corig, struct unix_gid, h); |
| 435 | struct unix_gid *new = container_of(cnew, struct unix_gid, h); | 435 | struct unix_gid *new = container_of(cnew, struct unix_gid, h); |
| 436 | return orig->uid == new->uid; | 436 | return uid_eq(orig->uid, new->uid); |
| 437 | } | 437 | } |
| 438 | static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem) | 438 | static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem) |
| 439 | { | 439 | { |
| @@ -465,23 +465,19 @@ static void unix_gid_request(struct cache_detail *cd, | |||
| 465 | char tuid[20]; | 465 | char tuid[20]; |
| 466 | struct unix_gid *ug = container_of(h, struct unix_gid, h); | 466 | struct unix_gid *ug = container_of(h, struct unix_gid, h); |
| 467 | 467 | ||
| 468 | snprintf(tuid, 20, "%u", ug->uid); | 468 | snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid)); |
| 469 | qword_add(bpp, blen, tuid); | 469 | qword_add(bpp, blen, tuid); |
| 470 | (*bpp)[-1] = '\n'; | 470 | (*bpp)[-1] = '\n'; |
| 471 | } | 471 | } |
| 472 | 472 | ||
| 473 | static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h) | 473 | static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid); |
| 474 | { | ||
| 475 | return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request); | ||
| 476 | } | ||
| 477 | |||
| 478 | static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, uid_t uid); | ||
| 479 | 474 | ||
| 480 | static int unix_gid_parse(struct cache_detail *cd, | 475 | static int unix_gid_parse(struct cache_detail *cd, |
| 481 | char *mesg, int mlen) | 476 | char *mesg, int mlen) |
| 482 | { | 477 | { |
| 483 | /* uid expiry Ngid gid0 gid1 ... gidN-1 */ | 478 | /* uid expiry Ngid gid0 gid1 ... gidN-1 */ |
| 484 | int uid; | 479 | int id; |
| 480 | kuid_t uid; | ||
| 485 | int gids; | 481 | int gids; |
| 486 | int rv; | 482 | int rv; |
| 487 | int i; | 483 | int i; |
| @@ -493,9 +489,12 @@ static int unix_gid_parse(struct cache_detail *cd, | |||
| 493 | return -EINVAL; | 489 | return -EINVAL; |
| 494 | mesg[mlen-1] = 0; | 490 | mesg[mlen-1] = 0; |
| 495 | 491 | ||
| 496 | rv = get_int(&mesg, &uid); | 492 | rv = get_int(&mesg, &id); |
| 497 | if (rv) | 493 | if (rv) |
| 498 | return -EINVAL; | 494 | return -EINVAL; |
| 495 | uid = make_kuid(&init_user_ns, id); | ||
| 496 | if (!uid_valid(uid)) | ||
| 497 | return -EINVAL; | ||
| 499 | ug.uid = uid; | 498 | ug.uid = uid; |
| 500 | 499 | ||
| 501 | expiry = get_expiry(&mesg); | 500 | expiry = get_expiry(&mesg); |
| @@ -530,7 +529,7 @@ static int unix_gid_parse(struct cache_detail *cd, | |||
| 530 | ug.h.expiry_time = expiry; | 529 | ug.h.expiry_time = expiry; |
| 531 | ch = sunrpc_cache_update(cd, | 530 | ch = sunrpc_cache_update(cd, |
| 532 | &ug.h, &ugp->h, | 531 | &ug.h, &ugp->h, |
| 533 | hash_long(uid, GID_HASHBITS)); | 532 | unix_gid_hash(uid)); |
| 534 | if (!ch) | 533 | if (!ch) |
| 535 | err = -ENOMEM; | 534 | err = -ENOMEM; |
| 536 | else { | 535 | else { |
| @@ -549,7 +548,7 @@ static int unix_gid_show(struct seq_file *m, | |||
| 549 | struct cache_detail *cd, | 548 | struct cache_detail *cd, |
| 550 | struct cache_head *h) | 549 | struct cache_head *h) |
| 551 | { | 550 | { |
| 552 | struct user_namespace *user_ns = current_user_ns(); | 551 | struct user_namespace *user_ns = &init_user_ns; |
| 553 | struct unix_gid *ug; | 552 | struct unix_gid *ug; |
| 554 | int i; | 553 | int i; |
| 555 | int glen; | 554 | int glen; |
| @@ -565,7 +564,7 @@ static int unix_gid_show(struct seq_file *m, | |||
| 565 | else | 564 | else |
| 566 | glen = 0; | 565 | glen = 0; |
| 567 | 566 | ||
| 568 | seq_printf(m, "%u %d:", ug->uid, glen); | 567 | seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen); |
| 569 | for (i = 0; i < glen; i++) | 568 | for (i = 0; i < glen; i++) |
| 570 | seq_printf(m, " %d", from_kgid_munged(user_ns, GROUP_AT(ug->gi, i))); | 569 | seq_printf(m, " %d", from_kgid_munged(user_ns, GROUP_AT(ug->gi, i))); |
| 571 | seq_printf(m, "\n"); | 570 | seq_printf(m, "\n"); |
| @@ -577,7 +576,7 @@ static struct cache_detail unix_gid_cache_template = { | |||
| 577 | .hash_size = GID_HASHMAX, | 576 | .hash_size = GID_HASHMAX, |
| 578 | .name = "auth.unix.gid", | 577 | .name = "auth.unix.gid", |
| 579 | .cache_put = unix_gid_put, | 578 | .cache_put = unix_gid_put, |
| 580 | .cache_upcall = unix_gid_upcall, | 579 | .cache_request = unix_gid_request, |
| 581 | .cache_parse = unix_gid_parse, | 580 | .cache_parse = unix_gid_parse, |
| 582 | .cache_show = unix_gid_show, | 581 | .cache_show = unix_gid_show, |
| 583 | .match = unix_gid_match, | 582 | .match = unix_gid_match, |
| @@ -615,20 +614,20 @@ void unix_gid_cache_destroy(struct net *net) | |||
| 615 | cache_destroy_net(cd, net); | 614 | cache_destroy_net(cd, net); |
| 616 | } | 615 | } |
| 617 | 616 | ||
| 618 | static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, uid_t uid) | 617 | static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid) |
| 619 | { | 618 | { |
| 620 | struct unix_gid ug; | 619 | struct unix_gid ug; |
| 621 | struct cache_head *ch; | 620 | struct cache_head *ch; |
| 622 | 621 | ||
| 623 | ug.uid = uid; | 622 | ug.uid = uid; |
| 624 | ch = sunrpc_cache_lookup(cd, &ug.h, hash_long(uid, GID_HASHBITS)); | 623 | ch = sunrpc_cache_lookup(cd, &ug.h, unix_gid_hash(uid)); |
| 625 | if (ch) | 624 | if (ch) |
| 626 | return container_of(ch, struct unix_gid, h); | 625 | return container_of(ch, struct unix_gid, h); |
| 627 | else | 626 | else |
| 628 | return NULL; | 627 | return NULL; |
| 629 | } | 628 | } |
| 630 | 629 | ||
| 631 | static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp) | 630 | static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) |
| 632 | { | 631 | { |
| 633 | struct unix_gid *ug; | 632 | struct unix_gid *ug; |
| 634 | struct group_info *gi; | 633 | struct group_info *gi; |
| @@ -750,8 +749,8 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 750 | } | 749 | } |
| 751 | 750 | ||
| 752 | /* Signal that mapping to nobody uid/gid is required */ | 751 | /* Signal that mapping to nobody uid/gid is required */ |
| 753 | cred->cr_uid = (uid_t) -1; | 752 | cred->cr_uid = INVALID_UID; |
| 754 | cred->cr_gid = (gid_t) -1; | 753 | cred->cr_gid = INVALID_GID; |
| 755 | cred->cr_group_info = groups_alloc(0); | 754 | cred->cr_group_info = groups_alloc(0); |
| 756 | if (cred->cr_group_info == NULL) | 755 | if (cred->cr_group_info == NULL) |
| 757 | return SVC_CLOSE; /* kmalloc failure - client must retry */ | 756 | return SVC_CLOSE; /* kmalloc failure - client must retry */ |
| @@ -812,8 +811,10 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 812 | argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ | 811 | argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ |
| 813 | argv->iov_len -= slen*4; | 812 | argv->iov_len -= slen*4; |
| 814 | 813 | ||
| 815 | cred->cr_uid = svc_getnl(argv); /* uid */ | 814 | cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ |
| 816 | cred->cr_gid = svc_getnl(argv); /* gid */ | 815 | cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ |
| 816 | if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid)) | ||
| 817 | goto badcred; | ||
| 817 | slen = svc_getnl(argv); /* gids length */ | 818 | slen = svc_getnl(argv); /* gids length */ |
| 818 | if (slen > 16 || (len -= (slen + 2)*4) < 0) | 819 | if (slen > 16 || (len -= (slen + 2)*4) < 0) |
| 819 | goto badcred; | 820 | goto badcred; |
| @@ -874,7 +875,7 @@ static struct cache_detail ip_map_cache_template = { | |||
| 874 | .hash_size = IP_HASHMAX, | 875 | .hash_size = IP_HASHMAX, |
| 875 | .name = "auth.unix.ip", | 876 | .name = "auth.unix.ip", |
| 876 | .cache_put = ip_map_put, | 877 | .cache_put = ip_map_put, |
| 877 | .cache_upcall = ip_map_upcall, | 878 | .cache_request = ip_map_request, |
| 878 | .cache_parse = ip_map_parse, | 879 | .cache_parse = ip_map_parse, |
| 879 | .cache_show = ip_map_show, | 880 | .cache_show = ip_map_show, |
| 880 | .match = ip_map_match, | 881 | .match = ip_map_match, |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 56055632f151..75edcfad6e26 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
| @@ -879,6 +879,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | |||
| 879 | } | 879 | } |
| 880 | EXPORT_SYMBOL_GPL(xdr_buf_subsegment); | 880 | EXPORT_SYMBOL_GPL(xdr_buf_subsegment); |
| 881 | 881 | ||
| 882 | /** | ||
| 883 | * xdr_buf_trim - lop at most "len" bytes off the end of "buf" | ||
| 884 | * @buf: buf to be trimmed | ||
| 885 | * @len: number of bytes to reduce "buf" by | ||
| 886 | * | ||
| 887 | * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note | ||
| 888 | * that it's possible that we'll trim less than that amount if the xdr_buf is | ||
| 889 | * too small, or if (for instance) it's all in the head and the parser has | ||
| 890 | * already read too far into it. | ||
| 891 | */ | ||
| 892 | void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) | ||
| 893 | { | ||
| 894 | size_t cur; | ||
| 895 | unsigned int trim = len; | ||
| 896 | |||
| 897 | if (buf->tail[0].iov_len) { | ||
| 898 | cur = min_t(size_t, buf->tail[0].iov_len, trim); | ||
| 899 | buf->tail[0].iov_len -= cur; | ||
| 900 | trim -= cur; | ||
| 901 | if (!trim) | ||
| 902 | goto fix_len; | ||
| 903 | } | ||
| 904 | |||
| 905 | if (buf->page_len) { | ||
| 906 | cur = min_t(unsigned int, buf->page_len, trim); | ||
| 907 | buf->page_len -= cur; | ||
| 908 | trim -= cur; | ||
| 909 | if (!trim) | ||
| 910 | goto fix_len; | ||
| 911 | } | ||
| 912 | |||
| 913 | if (buf->head[0].iov_len) { | ||
| 914 | cur = min_t(size_t, buf->head[0].iov_len, trim); | ||
| 915 | buf->head[0].iov_len -= cur; | ||
| 916 | trim -= cur; | ||
| 917 | } | ||
| 918 | fix_len: | ||
| 919 | buf->len -= (len - trim); | ||
| 920 | } | ||
| 921 | EXPORT_SYMBOL_GPL(xdr_buf_trim); | ||
| 922 | |||
| 882 | static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) | 923 | static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) |
| 883 | { | 924 | { |
| 884 | unsigned int this_len; | 925 | unsigned int this_len; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 846c34fdee9f..b7478d5e7ffd 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -487,13 +487,17 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); | |||
| 487 | * xprt_wait_for_buffer_space - wait for transport output buffer to clear | 487 | * xprt_wait_for_buffer_space - wait for transport output buffer to clear |
| 488 | * @task: task to be put to sleep | 488 | * @task: task to be put to sleep |
| 489 | * @action: function pointer to be executed after wait | 489 | * @action: function pointer to be executed after wait |
| 490 | * | ||
| 491 | * Note that we only set the timer for the case of RPC_IS_SOFT(), since | ||
| 492 | * we don't in general want to force a socket disconnection due to | ||
| 493 | * an incomplete RPC call transmission. | ||
| 490 | */ | 494 | */ |
| 491 | void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) | 495 | void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) |
| 492 | { | 496 | { |
| 493 | struct rpc_rqst *req = task->tk_rqstp; | 497 | struct rpc_rqst *req = task->tk_rqstp; |
| 494 | struct rpc_xprt *xprt = req->rq_xprt; | 498 | struct rpc_xprt *xprt = req->rq_xprt; |
| 495 | 499 | ||
| 496 | task->tk_timeout = req->rq_timeout; | 500 | task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; |
| 497 | rpc_sleep_on(&xprt->pending, task, action); | 501 | rpc_sleep_on(&xprt->pending, task, action); |
| 498 | } | 502 | } |
| 499 | EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); | 503 | EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index d0074289708e..794312f22b9b 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <linux/init.h> | 51 | #include <linux/init.h> |
| 52 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
| 53 | #include <linux/seq_file.h> | 53 | #include <linux/seq_file.h> |
| 54 | #include <linux/sunrpc/addr.h> | ||
| 54 | 55 | ||
| 55 | #include "xprt_rdma.h" | 56 | #include "xprt_rdma.h" |
| 56 | 57 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 745973b729af..93726560eaa8 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -1086,7 +1086,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
| 1086 | case RPCRDMA_MEMWINDOWS: | 1086 | case RPCRDMA_MEMWINDOWS: |
| 1087 | /* Allocate one extra request's worth, for full cycling */ | 1087 | /* Allocate one extra request's worth, for full cycling */ |
| 1088 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { | 1088 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { |
| 1089 | r->r.mw = ib_alloc_mw(ia->ri_pd); | 1089 | r->r.mw = ib_alloc_mw(ia->ri_pd, IB_MW_TYPE_1); |
| 1090 | if (IS_ERR(r->r.mw)) { | 1090 | if (IS_ERR(r->r.mw)) { |
| 1091 | rc = PTR_ERR(r->r.mw); | 1091 | rc = PTR_ERR(r->r.mw); |
| 1092 | dprintk("RPC: %s: ib_alloc_mw" | 1092 | dprintk("RPC: %s: ib_alloc_mw" |
| @@ -1673,12 +1673,12 @@ rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg, | |||
| 1673 | 1673 | ||
| 1674 | *nsegs = 1; | 1674 | *nsegs = 1; |
| 1675 | rpcrdma_map_one(ia, seg, writing); | 1675 | rpcrdma_map_one(ia, seg, writing); |
| 1676 | param.mr = ia->ri_bind_mem; | 1676 | param.bind_info.mr = ia->ri_bind_mem; |
| 1677 | param.wr_id = 0ULL; /* no send cookie */ | 1677 | param.wr_id = 0ULL; /* no send cookie */ |
| 1678 | param.addr = seg->mr_dma; | 1678 | param.bind_info.addr = seg->mr_dma; |
| 1679 | param.length = seg->mr_len; | 1679 | param.bind_info.length = seg->mr_len; |
| 1680 | param.send_flags = 0; | 1680 | param.send_flags = 0; |
| 1681 | param.mw_access_flags = mem_priv; | 1681 | param.bind_info.mw_access_flags = mem_priv; |
| 1682 | 1682 | ||
| 1683 | DECR_CQCOUNT(&r_xprt->rx_ep); | 1683 | DECR_CQCOUNT(&r_xprt->rx_ep); |
| 1684 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); | 1684 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); |
| @@ -1690,7 +1690,7 @@ rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg, | |||
| 1690 | rpcrdma_unmap_one(ia, seg); | 1690 | rpcrdma_unmap_one(ia, seg); |
| 1691 | } else { | 1691 | } else { |
| 1692 | seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; | 1692 | seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; |
| 1693 | seg->mr_base = param.addr; | 1693 | seg->mr_base = param.bind_info.addr; |
| 1694 | seg->mr_nsegs = 1; | 1694 | seg->mr_nsegs = 1; |
| 1695 | } | 1695 | } |
| 1696 | return rc; | 1696 | return rc; |
| @@ -1706,10 +1706,10 @@ rpcrdma_deregister_memwin_external(struct rpcrdma_mr_seg *seg, | |||
| 1706 | int rc; | 1706 | int rc; |
| 1707 | 1707 | ||
| 1708 | BUG_ON(seg->mr_nsegs != 1); | 1708 | BUG_ON(seg->mr_nsegs != 1); |
| 1709 | param.mr = ia->ri_bind_mem; | 1709 | param.bind_info.mr = ia->ri_bind_mem; |
| 1710 | param.addr = 0ULL; /* unbind */ | 1710 | param.bind_info.addr = 0ULL; /* unbind */ |
| 1711 | param.length = 0; | 1711 | param.bind_info.length = 0; |
| 1712 | param.mw_access_flags = 0; | 1712 | param.bind_info.mw_access_flags = 0; |
| 1713 | if (*r) { | 1713 | if (*r) { |
| 1714 | param.wr_id = (u64) (unsigned long) *r; | 1714 | param.wr_id = (u64) (unsigned long) *r; |
| 1715 | param.send_flags = IB_SEND_SIGNALED; | 1715 | param.send_flags = IB_SEND_SIGNALED; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 37cbda63f45c..c1d8476b7692 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/udp.h> | 33 | #include <linux/udp.h> |
| 34 | #include <linux/tcp.h> | 34 | #include <linux/tcp.h> |
| 35 | #include <linux/sunrpc/clnt.h> | 35 | #include <linux/sunrpc/clnt.h> |
| 36 | #include <linux/sunrpc/addr.h> | ||
| 36 | #include <linux/sunrpc/sched.h> | 37 | #include <linux/sunrpc/sched.h> |
| 37 | #include <linux/sunrpc/svcsock.h> | 38 | #include <linux/sunrpc/svcsock.h> |
| 38 | #include <linux/sunrpc/xprtsock.h> | 39 | #include <linux/sunrpc/xprtsock.h> |
| @@ -1867,13 +1868,9 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, | |||
| 1867 | * @xprt: RPC transport to connect | 1868 | * @xprt: RPC transport to connect |
| 1868 | * @transport: socket transport to connect | 1869 | * @transport: socket transport to connect |
| 1869 | * @create_sock: function to create a socket of the correct type | 1870 | * @create_sock: function to create a socket of the correct type |
| 1870 | * | ||
| 1871 | * Invoked by a work queue tasklet. | ||
| 1872 | */ | 1871 | */ |
| 1873 | static void xs_local_setup_socket(struct work_struct *work) | 1872 | static int xs_local_setup_socket(struct sock_xprt *transport) |
| 1874 | { | 1873 | { |
| 1875 | struct sock_xprt *transport = | ||
| 1876 | container_of(work, struct sock_xprt, connect_worker.work); | ||
| 1877 | struct rpc_xprt *xprt = &transport->xprt; | 1874 | struct rpc_xprt *xprt = &transport->xprt; |
| 1878 | struct socket *sock; | 1875 | struct socket *sock; |
| 1879 | int status = -EIO; | 1876 | int status = -EIO; |
| @@ -1918,6 +1915,30 @@ out: | |||
| 1918 | xprt_clear_connecting(xprt); | 1915 | xprt_clear_connecting(xprt); |
| 1919 | xprt_wake_pending_tasks(xprt, status); | 1916 | xprt_wake_pending_tasks(xprt, status); |
| 1920 | current->flags &= ~PF_FSTRANS; | 1917 | current->flags &= ~PF_FSTRANS; |
| 1918 | return status; | ||
| 1919 | } | ||
| 1920 | |||
| 1921 | static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) | ||
| 1922 | { | ||
| 1923 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
| 1924 | int ret; | ||
| 1925 | |||
| 1926 | if (RPC_IS_ASYNC(task)) { | ||
| 1927 | /* | ||
| 1928 | * We want the AF_LOCAL connect to be resolved in the | ||
| 1929 | * filesystem namespace of the process making the rpc | ||
| 1930 | * call. Thus we connect synchronously. | ||
| 1931 | * | ||
| 1932 | * If we want to support asynchronous AF_LOCAL calls, | ||
| 1933 | * we'll need to figure out how to pass a namespace to | ||
| 1934 | * connect. | ||
| 1935 | */ | ||
| 1936 | rpc_exit(task, -ENOTCONN); | ||
| 1937 | return; | ||
| 1938 | } | ||
| 1939 | ret = xs_local_setup_socket(transport); | ||
| 1940 | if (ret && !RPC_IS_SOFTCONN(task)) | ||
| 1941 | msleep_interruptible(15000); | ||
| 1921 | } | 1942 | } |
| 1922 | 1943 | ||
| 1923 | #ifdef CONFIG_SUNRPC_SWAP | 1944 | #ifdef CONFIG_SUNRPC_SWAP |
| @@ -2455,7 +2476,7 @@ static struct rpc_xprt_ops xs_local_ops = { | |||
| 2455 | .alloc_slot = xprt_alloc_slot, | 2476 | .alloc_slot = xprt_alloc_slot, |
| 2456 | .rpcbind = xs_local_rpcbind, | 2477 | .rpcbind = xs_local_rpcbind, |
| 2457 | .set_port = xs_local_set_port, | 2478 | .set_port = xs_local_set_port, |
| 2458 | .connect = xs_connect, | 2479 | .connect = xs_local_connect, |
| 2459 | .buf_alloc = rpc_malloc, | 2480 | .buf_alloc = rpc_malloc, |
| 2460 | .buf_free = rpc_free, | 2481 | .buf_free = rpc_free, |
| 2461 | .send_request = xs_local_send_request, | 2482 | .send_request = xs_local_send_request, |
| @@ -2628,8 +2649,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) | |||
| 2628 | goto out_err; | 2649 | goto out_err; |
| 2629 | } | 2650 | } |
| 2630 | xprt_set_bound(xprt); | 2651 | xprt_set_bound(xprt); |
| 2631 | INIT_DELAYED_WORK(&transport->connect_worker, | ||
| 2632 | xs_local_setup_socket); | ||
| 2633 | xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); | 2652 | xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); |
| 2634 | break; | 2653 | break; |
| 2635 | default: | 2654 | default: |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 46754779fd3d..24b167914311 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
| @@ -473,11 +473,10 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, | |||
| 473 | static struct name_seq *nametbl_find_seq(u32 type) | 473 | static struct name_seq *nametbl_find_seq(u32 type) |
| 474 | { | 474 | { |
| 475 | struct hlist_head *seq_head; | 475 | struct hlist_head *seq_head; |
| 476 | struct hlist_node *seq_node; | ||
| 477 | struct name_seq *ns; | 476 | struct name_seq *ns; |
| 478 | 477 | ||
| 479 | seq_head = &table.types[hash(type)]; | 478 | seq_head = &table.types[hash(type)]; |
| 480 | hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { | 479 | hlist_for_each_entry(ns, seq_head, ns_list) { |
| 481 | if (ns->type == type) | 480 | if (ns->type == type) |
| 482 | return ns; | 481 | return ns; |
| 483 | } | 482 | } |
| @@ -853,7 +852,6 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
| 853 | u32 type, u32 lowbound, u32 upbound) | 852 | u32 type, u32 lowbound, u32 upbound) |
| 854 | { | 853 | { |
| 855 | struct hlist_head *seq_head; | 854 | struct hlist_head *seq_head; |
| 856 | struct hlist_node *seq_node; | ||
| 857 | struct name_seq *seq; | 855 | struct name_seq *seq; |
| 858 | int all_types; | 856 | int all_types; |
| 859 | int ret = 0; | 857 | int ret = 0; |
| @@ -873,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
| 873 | upbound = ~0; | 871 | upbound = ~0; |
| 874 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | 872 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { |
| 875 | seq_head = &table.types[i]; | 873 | seq_head = &table.types[i]; |
| 876 | hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { | 874 | hlist_for_each_entry(seq, seq_head, ns_list) { |
| 877 | ret += nameseq_list(seq, buf + ret, len - ret, | 875 | ret += nameseq_list(seq, buf + ret, len - ret, |
| 878 | depth, seq->type, | 876 | depth, seq->type, |
| 879 | lowbound, upbound, i); | 877 | lowbound, upbound, i); |
| @@ -889,7 +887,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
| 889 | ret += nametbl_header(buf + ret, len - ret, depth); | 887 | ret += nametbl_header(buf + ret, len - ret, depth); |
| 890 | i = hash(type); | 888 | i = hash(type); |
| 891 | seq_head = &table.types[i]; | 889 | seq_head = &table.types[i]; |
| 892 | hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { | 890 | hlist_for_each_entry(seq, seq_head, ns_list) { |
| 893 | if (seq->type == type) { | 891 | if (seq->type == type) { |
| 894 | ret += nameseq_list(seq, buf + ret, len - ret, | 892 | ret += nameseq_list(seq, buf + ret, len - ret, |
| 895 | depth, type, | 893 | depth, type, |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 48f39dd3eae8..6e6c434872e8 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -69,12 +69,11 @@ static unsigned int tipc_hashfn(u32 addr) | |||
| 69 | struct tipc_node *tipc_node_find(u32 addr) | 69 | struct tipc_node *tipc_node_find(u32 addr) |
| 70 | { | 70 | { |
| 71 | struct tipc_node *node; | 71 | struct tipc_node *node; |
| 72 | struct hlist_node *pos; | ||
| 73 | 72 | ||
| 74 | if (unlikely(!in_own_cluster_exact(addr))) | 73 | if (unlikely(!in_own_cluster_exact(addr))) |
| 75 | return NULL; | 74 | return NULL; |
| 76 | 75 | ||
| 77 | hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { | 76 | hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { |
| 78 | if (node->addr == addr) | 77 | if (node->addr == addr) |
| 79 | return node; | 78 | return node; |
| 80 | } | 79 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 87d284289012..51be64f163ec 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -263,9 +263,8 @@ static struct sock *__unix_find_socket_byname(struct net *net, | |||
| 263 | int len, int type, unsigned int hash) | 263 | int len, int type, unsigned int hash) |
| 264 | { | 264 | { |
| 265 | struct sock *s; | 265 | struct sock *s; |
| 266 | struct hlist_node *node; | ||
| 267 | 266 | ||
| 268 | sk_for_each(s, node, &unix_socket_table[hash ^ type]) { | 267 | sk_for_each(s, &unix_socket_table[hash ^ type]) { |
| 269 | struct unix_sock *u = unix_sk(s); | 268 | struct unix_sock *u = unix_sk(s); |
| 270 | 269 | ||
| 271 | if (!net_eq(sock_net(s), net)) | 270 | if (!net_eq(sock_net(s), net)) |
| @@ -298,10 +297,9 @@ static inline struct sock *unix_find_socket_byname(struct net *net, | |||
| 298 | static struct sock *unix_find_socket_byinode(struct inode *i) | 297 | static struct sock *unix_find_socket_byinode(struct inode *i) |
| 299 | { | 298 | { |
| 300 | struct sock *s; | 299 | struct sock *s; |
| 301 | struct hlist_node *node; | ||
| 302 | 300 | ||
| 303 | spin_lock(&unix_table_lock); | 301 | spin_lock(&unix_table_lock); |
| 304 | sk_for_each(s, node, | 302 | sk_for_each(s, |
| 305 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { | 303 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { |
| 306 | struct dentry *dentry = unix_sk(s)->path.dentry; | 304 | struct dentry *dentry = unix_sk(s)->path.dentry; |
| 307 | 305 | ||
diff --git a/net/unix/diag.c b/net/unix/diag.c index 5ac19dc1d5e4..d591091603bf 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c | |||
| @@ -192,10 +192,9 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 192 | slot < ARRAY_SIZE(unix_socket_table); | 192 | slot < ARRAY_SIZE(unix_socket_table); |
| 193 | s_num = 0, slot++) { | 193 | s_num = 0, slot++) { |
| 194 | struct sock *sk; | 194 | struct sock *sk; |
| 195 | struct hlist_node *node; | ||
| 196 | 195 | ||
| 197 | num = 0; | 196 | num = 0; |
| 198 | sk_for_each(sk, node, &unix_socket_table[slot]) { | 197 | sk_for_each(sk, &unix_socket_table[slot]) { |
| 199 | if (!net_eq(sock_net(sk), net)) | 198 | if (!net_eq(sock_net(sk), net)) |
| 200 | continue; | 199 | continue; |
| 201 | if (num < s_num) | 200 | if (num < s_num) |
| @@ -226,9 +225,7 @@ static struct sock *unix_lookup_by_ino(int ino) | |||
| 226 | 225 | ||
| 227 | spin_lock(&unix_table_lock); | 226 | spin_lock(&unix_table_lock); |
| 228 | for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { | 227 | for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { |
| 229 | struct hlist_node *node; | 228 | sk_for_each(sk, &unix_socket_table[i]) |
| 230 | |||
| 231 | sk_for_each(sk, node, &unix_socket_table[i]) | ||
| 232 | if (ino == sock_i_ino(sk)) { | 229 | if (ino == sock_i_ino(sk)) { |
| 233 | sock_hold(sk); | 230 | sock_hold(sk); |
| 234 | spin_unlock(&unix_table_lock); | 231 | spin_unlock(&unix_table_lock); |
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index b6f4b994eb35..d0f6545b0010 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
| @@ -99,7 +99,7 @@ unsigned int unix_tot_inflight; | |||
| 99 | struct sock *unix_get_socket(struct file *filp) | 99 | struct sock *unix_get_socket(struct file *filp) |
| 100 | { | 100 | { |
| 101 | struct sock *u_sock = NULL; | 101 | struct sock *u_sock = NULL; |
| 102 | struct inode *inode = filp->f_path.dentry->d_inode; | 102 | struct inode *inode = file_inode(filp); |
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| 105 | * Socket ? | 105 | * Socket ? |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index a306bc66000e..37ca9694aabe 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
| @@ -208,11 +208,10 @@ static void x25_remove_socket(struct sock *sk) | |||
| 208 | static void x25_kill_by_device(struct net_device *dev) | 208 | static void x25_kill_by_device(struct net_device *dev) |
| 209 | { | 209 | { |
| 210 | struct sock *s; | 210 | struct sock *s; |
| 211 | struct hlist_node *node; | ||
| 212 | 211 | ||
| 213 | write_lock_bh(&x25_list_lock); | 212 | write_lock_bh(&x25_list_lock); |
| 214 | 213 | ||
| 215 | sk_for_each(s, node, &x25_list) | 214 | sk_for_each(s, &x25_list) |
| 216 | if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) | 215 | if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) |
| 217 | x25_disconnect(s, ENETUNREACH, 0, 0); | 216 | x25_disconnect(s, ENETUNREACH, 0, 0); |
| 218 | 217 | ||
| @@ -280,12 +279,11 @@ static struct sock *x25_find_listener(struct x25_address *addr, | |||
| 280 | { | 279 | { |
| 281 | struct sock *s; | 280 | struct sock *s; |
| 282 | struct sock *next_best; | 281 | struct sock *next_best; |
| 283 | struct hlist_node *node; | ||
| 284 | 282 | ||
| 285 | read_lock_bh(&x25_list_lock); | 283 | read_lock_bh(&x25_list_lock); |
| 286 | next_best = NULL; | 284 | next_best = NULL; |
| 287 | 285 | ||
| 288 | sk_for_each(s, node, &x25_list) | 286 | sk_for_each(s, &x25_list) |
| 289 | if ((!strcmp(addr->x25_addr, | 287 | if ((!strcmp(addr->x25_addr, |
| 290 | x25_sk(s)->source_addr.x25_addr) || | 288 | x25_sk(s)->source_addr.x25_addr) || |
| 291 | !strcmp(addr->x25_addr, | 289 | !strcmp(addr->x25_addr, |
| @@ -323,9 +321,8 @@ found: | |||
| 323 | static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) | 321 | static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) |
| 324 | { | 322 | { |
| 325 | struct sock *s; | 323 | struct sock *s; |
| 326 | struct hlist_node *node; | ||
| 327 | 324 | ||
| 328 | sk_for_each(s, node, &x25_list) | 325 | sk_for_each(s, &x25_list) |
| 329 | if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { | 326 | if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { |
| 330 | sock_hold(s); | 327 | sock_hold(s); |
| 331 | goto found; | 328 | goto found; |
| @@ -1782,11 +1779,10 @@ static struct notifier_block x25_dev_notifier = { | |||
| 1782 | void x25_kill_by_neigh(struct x25_neigh *nb) | 1779 | void x25_kill_by_neigh(struct x25_neigh *nb) |
| 1783 | { | 1780 | { |
| 1784 | struct sock *s; | 1781 | struct sock *s; |
| 1785 | struct hlist_node *node; | ||
| 1786 | 1782 | ||
| 1787 | write_lock_bh(&x25_list_lock); | 1783 | write_lock_bh(&x25_list_lock); |
| 1788 | 1784 | ||
| 1789 | sk_for_each(s, node, &x25_list) | 1785 | sk_for_each(s, &x25_list) |
| 1790 | if (x25_sk(s)->neighbour == nb) | 1786 | if (x25_sk(s)->neighbour == nb) |
| 1791 | x25_disconnect(s, ENETUNREACH, 0, 0); | 1787 | x25_disconnect(s, ENETUNREACH, 0, 0); |
| 1792 | 1788 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5b47180986f8..167c67d46c6a 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -379,27 +379,27 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list, | |||
| 379 | struct hlist_head *ndsttable, | 379 | struct hlist_head *ndsttable, |
| 380 | unsigned int nhashmask) | 380 | unsigned int nhashmask) |
| 381 | { | 381 | { |
| 382 | struct hlist_node *entry, *tmp, *entry0 = NULL; | 382 | struct hlist_node *tmp, *entry0 = NULL; |
| 383 | struct xfrm_policy *pol; | 383 | struct xfrm_policy *pol; |
| 384 | unsigned int h0 = 0; | 384 | unsigned int h0 = 0; |
| 385 | 385 | ||
| 386 | redo: | 386 | redo: |
| 387 | hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { | 387 | hlist_for_each_entry_safe(pol, tmp, list, bydst) { |
| 388 | unsigned int h; | 388 | unsigned int h; |
| 389 | 389 | ||
| 390 | h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, | 390 | h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, |
| 391 | pol->family, nhashmask); | 391 | pol->family, nhashmask); |
| 392 | if (!entry0) { | 392 | if (!entry0) { |
| 393 | hlist_del(entry); | 393 | hlist_del(&pol->bydst); |
| 394 | hlist_add_head(&pol->bydst, ndsttable+h); | 394 | hlist_add_head(&pol->bydst, ndsttable+h); |
| 395 | h0 = h; | 395 | h0 = h; |
| 396 | } else { | 396 | } else { |
| 397 | if (h != h0) | 397 | if (h != h0) |
| 398 | continue; | 398 | continue; |
| 399 | hlist_del(entry); | 399 | hlist_del(&pol->bydst); |
| 400 | hlist_add_after(entry0, &pol->bydst); | 400 | hlist_add_after(entry0, &pol->bydst); |
| 401 | } | 401 | } |
| 402 | entry0 = entry; | 402 | entry0 = &pol->bydst; |
| 403 | } | 403 | } |
| 404 | if (!hlist_empty(list)) { | 404 | if (!hlist_empty(list)) { |
| 405 | entry0 = NULL; | 405 | entry0 = NULL; |
| @@ -411,10 +411,10 @@ static void xfrm_idx_hash_transfer(struct hlist_head *list, | |||
| 411 | struct hlist_head *nidxtable, | 411 | struct hlist_head *nidxtable, |
| 412 | unsigned int nhashmask) | 412 | unsigned int nhashmask) |
| 413 | { | 413 | { |
| 414 | struct hlist_node *entry, *tmp; | 414 | struct hlist_node *tmp; |
| 415 | struct xfrm_policy *pol; | 415 | struct xfrm_policy *pol; |
| 416 | 416 | ||
| 417 | hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { | 417 | hlist_for_each_entry_safe(pol, tmp, list, byidx) { |
| 418 | unsigned int h; | 418 | unsigned int h; |
| 419 | 419 | ||
| 420 | h = __idx_hash(pol->index, nhashmask); | 420 | h = __idx_hash(pol->index, nhashmask); |
| @@ -544,7 +544,6 @@ static u32 xfrm_gen_index(struct net *net, int dir) | |||
| 544 | static u32 idx_generator; | 544 | static u32 idx_generator; |
| 545 | 545 | ||
| 546 | for (;;) { | 546 | for (;;) { |
| 547 | struct hlist_node *entry; | ||
| 548 | struct hlist_head *list; | 547 | struct hlist_head *list; |
| 549 | struct xfrm_policy *p; | 548 | struct xfrm_policy *p; |
| 550 | u32 idx; | 549 | u32 idx; |
| @@ -556,7 +555,7 @@ static u32 xfrm_gen_index(struct net *net, int dir) | |||
| 556 | idx = 8; | 555 | idx = 8; |
| 557 | list = net->xfrm.policy_byidx + idx_hash(net, idx); | 556 | list = net->xfrm.policy_byidx + idx_hash(net, idx); |
| 558 | found = 0; | 557 | found = 0; |
| 559 | hlist_for_each_entry(p, entry, list, byidx) { | 558 | hlist_for_each_entry(p, list, byidx) { |
| 560 | if (p->index == idx) { | 559 | if (p->index == idx) { |
| 561 | found = 1; | 560 | found = 1; |
| 562 | break; | 561 | break; |
| @@ -628,13 +627,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
| 628 | struct xfrm_policy *pol; | 627 | struct xfrm_policy *pol; |
| 629 | struct xfrm_policy *delpol; | 628 | struct xfrm_policy *delpol; |
| 630 | struct hlist_head *chain; | 629 | struct hlist_head *chain; |
| 631 | struct hlist_node *entry, *newpos; | 630 | struct hlist_node *newpos; |
| 632 | 631 | ||
| 633 | write_lock_bh(&xfrm_policy_lock); | 632 | write_lock_bh(&xfrm_policy_lock); |
| 634 | chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); | 633 | chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); |
| 635 | delpol = NULL; | 634 | delpol = NULL; |
| 636 | newpos = NULL; | 635 | newpos = NULL; |
| 637 | hlist_for_each_entry(pol, entry, chain, bydst) { | 636 | hlist_for_each_entry(pol, chain, bydst) { |
| 638 | if (pol->type == policy->type && | 637 | if (pol->type == policy->type && |
| 639 | !selector_cmp(&pol->selector, &policy->selector) && | 638 | !selector_cmp(&pol->selector, &policy->selector) && |
| 640 | xfrm_policy_mark_match(policy, pol) && | 639 | xfrm_policy_mark_match(policy, pol) && |
| @@ -691,13 +690,12 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
| 691 | { | 690 | { |
| 692 | struct xfrm_policy *pol, *ret; | 691 | struct xfrm_policy *pol, *ret; |
| 693 | struct hlist_head *chain; | 692 | struct hlist_head *chain; |
| 694 | struct hlist_node *entry; | ||
| 695 | 693 | ||
| 696 | *err = 0; | 694 | *err = 0; |
| 697 | write_lock_bh(&xfrm_policy_lock); | 695 | write_lock_bh(&xfrm_policy_lock); |
| 698 | chain = policy_hash_bysel(net, sel, sel->family, dir); | 696 | chain = policy_hash_bysel(net, sel, sel->family, dir); |
| 699 | ret = NULL; | 697 | ret = NULL; |
| 700 | hlist_for_each_entry(pol, entry, chain, bydst) { | 698 | hlist_for_each_entry(pol, chain, bydst) { |
| 701 | if (pol->type == type && | 699 | if (pol->type == type && |
| 702 | (mark & pol->mark.m) == pol->mark.v && | 700 | (mark & pol->mark.m) == pol->mark.v && |
| 703 | !selector_cmp(sel, &pol->selector) && | 701 | !selector_cmp(sel, &pol->selector) && |
| @@ -729,7 +727,6 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
| 729 | { | 727 | { |
| 730 | struct xfrm_policy *pol, *ret; | 728 | struct xfrm_policy *pol, *ret; |
| 731 | struct hlist_head *chain; | 729 | struct hlist_head *chain; |
| 732 | struct hlist_node *entry; | ||
| 733 | 730 | ||
| 734 | *err = -ENOENT; | 731 | *err = -ENOENT; |
| 735 | if (xfrm_policy_id2dir(id) != dir) | 732 | if (xfrm_policy_id2dir(id) != dir) |
| @@ -739,7 +736,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
| 739 | write_lock_bh(&xfrm_policy_lock); | 736 | write_lock_bh(&xfrm_policy_lock); |
| 740 | chain = net->xfrm.policy_byidx + idx_hash(net, id); | 737 | chain = net->xfrm.policy_byidx + idx_hash(net, id); |
| 741 | ret = NULL; | 738 | ret = NULL; |
| 742 | hlist_for_each_entry(pol, entry, chain, byidx) { | 739 | hlist_for_each_entry(pol, chain, byidx) { |
| 743 | if (pol->type == type && pol->index == id && | 740 | if (pol->type == type && pol->index == id && |
| 744 | (mark & pol->mark.m) == pol->mark.v) { | 741 | (mark & pol->mark.m) == pol->mark.v) { |
| 745 | xfrm_pol_hold(pol); | 742 | xfrm_pol_hold(pol); |
| @@ -772,10 +769,9 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi | |||
| 772 | 769 | ||
| 773 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { | 770 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { |
| 774 | struct xfrm_policy *pol; | 771 | struct xfrm_policy *pol; |
| 775 | struct hlist_node *entry; | ||
| 776 | int i; | 772 | int i; |
| 777 | 773 | ||
| 778 | hlist_for_each_entry(pol, entry, | 774 | hlist_for_each_entry(pol, |
| 779 | &net->xfrm.policy_inexact[dir], bydst) { | 775 | &net->xfrm.policy_inexact[dir], bydst) { |
| 780 | if (pol->type != type) | 776 | if (pol->type != type) |
| 781 | continue; | 777 | continue; |
| @@ -789,7 +785,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi | |||
| 789 | } | 785 | } |
| 790 | } | 786 | } |
| 791 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { | 787 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { |
| 792 | hlist_for_each_entry(pol, entry, | 788 | hlist_for_each_entry(pol, |
| 793 | net->xfrm.policy_bydst[dir].table + i, | 789 | net->xfrm.policy_bydst[dir].table + i, |
| 794 | bydst) { | 790 | bydst) { |
| 795 | if (pol->type != type) | 791 | if (pol->type != type) |
| @@ -828,11 +824,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
| 828 | 824 | ||
| 829 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { | 825 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { |
| 830 | struct xfrm_policy *pol; | 826 | struct xfrm_policy *pol; |
| 831 | struct hlist_node *entry; | ||
| 832 | int i; | 827 | int i; |
| 833 | 828 | ||
| 834 | again1: | 829 | again1: |
| 835 | hlist_for_each_entry(pol, entry, | 830 | hlist_for_each_entry(pol, |
| 836 | &net->xfrm.policy_inexact[dir], bydst) { | 831 | &net->xfrm.policy_inexact[dir], bydst) { |
| 837 | if (pol->type != type) | 832 | if (pol->type != type) |
| 838 | continue; | 833 | continue; |
| @@ -852,7 +847,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
| 852 | 847 | ||
| 853 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { | 848 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { |
| 854 | again2: | 849 | again2: |
| 855 | hlist_for_each_entry(pol, entry, | 850 | hlist_for_each_entry(pol, |
| 856 | net->xfrm.policy_bydst[dir].table + i, | 851 | net->xfrm.policy_bydst[dir].table + i, |
| 857 | bydst) { | 852 | bydst) { |
| 858 | if (pol->type != type) | 853 | if (pol->type != type) |
| @@ -980,7 +975,6 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
| 980 | int err; | 975 | int err; |
| 981 | struct xfrm_policy *pol, *ret; | 976 | struct xfrm_policy *pol, *ret; |
| 982 | const xfrm_address_t *daddr, *saddr; | 977 | const xfrm_address_t *daddr, *saddr; |
| 983 | struct hlist_node *entry; | ||
| 984 | struct hlist_head *chain; | 978 | struct hlist_head *chain; |
| 985 | u32 priority = ~0U; | 979 | u32 priority = ~0U; |
| 986 | 980 | ||
| @@ -992,7 +986,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
| 992 | read_lock_bh(&xfrm_policy_lock); | 986 | read_lock_bh(&xfrm_policy_lock); |
| 993 | chain = policy_hash_direct(net, daddr, saddr, family, dir); | 987 | chain = policy_hash_direct(net, daddr, saddr, family, dir); |
| 994 | ret = NULL; | 988 | ret = NULL; |
| 995 | hlist_for_each_entry(pol, entry, chain, bydst) { | 989 | hlist_for_each_entry(pol, chain, bydst) { |
| 996 | err = xfrm_policy_match(pol, fl, type, family, dir); | 990 | err = xfrm_policy_match(pol, fl, type, family, dir); |
| 997 | if (err) { | 991 | if (err) { |
| 998 | if (err == -ESRCH) | 992 | if (err == -ESRCH) |
| @@ -1008,7 +1002,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
| 1008 | } | 1002 | } |
| 1009 | } | 1003 | } |
| 1010 | chain = &net->xfrm.policy_inexact[dir]; | 1004 | chain = &net->xfrm.policy_inexact[dir]; |
| 1011 | hlist_for_each_entry(pol, entry, chain, bydst) { | 1005 | hlist_for_each_entry(pol, chain, bydst) { |
| 1012 | err = xfrm_policy_match(pol, fl, type, family, dir); | 1006 | err = xfrm_policy_match(pol, fl, type, family, dir); |
| 1013 | if (err) { | 1007 | if (err) { |
| 1014 | if (err == -ESRCH) | 1008 | if (err == -ESRCH) |
| @@ -3041,13 +3035,12 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector | |||
| 3041 | u8 dir, u8 type) | 3035 | u8 dir, u8 type) |
| 3042 | { | 3036 | { |
| 3043 | struct xfrm_policy *pol, *ret = NULL; | 3037 | struct xfrm_policy *pol, *ret = NULL; |
| 3044 | struct hlist_node *entry; | ||
| 3045 | struct hlist_head *chain; | 3038 | struct hlist_head *chain; |
| 3046 | u32 priority = ~0U; | 3039 | u32 priority = ~0U; |
| 3047 | 3040 | ||
| 3048 | read_lock_bh(&xfrm_policy_lock); | 3041 | read_lock_bh(&xfrm_policy_lock); |
| 3049 | chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); | 3042 | chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); |
| 3050 | hlist_for_each_entry(pol, entry, chain, bydst) { | 3043 | hlist_for_each_entry(pol, chain, bydst) { |
| 3051 | if (xfrm_migrate_selector_match(sel, &pol->selector) && | 3044 | if (xfrm_migrate_selector_match(sel, &pol->selector) && |
| 3052 | pol->type == type) { | 3045 | pol->type == type) { |
| 3053 | ret = pol; | 3046 | ret = pol; |
| @@ -3056,7 +3049,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector | |||
| 3056 | } | 3049 | } |
| 3057 | } | 3050 | } |
| 3058 | chain = &init_net.xfrm.policy_inexact[dir]; | 3051 | chain = &init_net.xfrm.policy_inexact[dir]; |
| 3059 | hlist_for_each_entry(pol, entry, chain, bydst) { | 3052 | hlist_for_each_entry(pol, chain, bydst) { |
| 3060 | if (xfrm_migrate_selector_match(sel, &pol->selector) && | 3053 | if (xfrm_migrate_selector_match(sel, &pol->selector) && |
| 3061 | pol->type == type && | 3054 | pol->type == type && |
| 3062 | pol->priority < priority) { | 3055 | pol->priority < priority) { |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index ae01bdbcb294..2c341bdaf47c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
| @@ -72,10 +72,10 @@ static void xfrm_hash_transfer(struct hlist_head *list, | |||
| 72 | struct hlist_head *nspitable, | 72 | struct hlist_head *nspitable, |
| 73 | unsigned int nhashmask) | 73 | unsigned int nhashmask) |
| 74 | { | 74 | { |
| 75 | struct hlist_node *entry, *tmp; | 75 | struct hlist_node *tmp; |
| 76 | struct xfrm_state *x; | 76 | struct xfrm_state *x; |
| 77 | 77 | ||
| 78 | hlist_for_each_entry_safe(x, entry, tmp, list, bydst) { | 78 | hlist_for_each_entry_safe(x, tmp, list, bydst) { |
| 79 | unsigned int h; | 79 | unsigned int h; |
| 80 | 80 | ||
| 81 | h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, | 81 | h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, |
| @@ -368,14 +368,14 @@ static void xfrm_state_gc_task(struct work_struct *work) | |||
| 368 | { | 368 | { |
| 369 | struct net *net = container_of(work, struct net, xfrm.state_gc_work); | 369 | struct net *net = container_of(work, struct net, xfrm.state_gc_work); |
| 370 | struct xfrm_state *x; | 370 | struct xfrm_state *x; |
| 371 | struct hlist_node *entry, *tmp; | 371 | struct hlist_node *tmp; |
| 372 | struct hlist_head gc_list; | 372 | struct hlist_head gc_list; |
| 373 | 373 | ||
| 374 | spin_lock_bh(&xfrm_state_gc_lock); | 374 | spin_lock_bh(&xfrm_state_gc_lock); |
| 375 | hlist_move_list(&net->xfrm.state_gc_list, &gc_list); | 375 | hlist_move_list(&net->xfrm.state_gc_list, &gc_list); |
| 376 | spin_unlock_bh(&xfrm_state_gc_lock); | 376 | spin_unlock_bh(&xfrm_state_gc_lock); |
| 377 | 377 | ||
| 378 | hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist) | 378 | hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) |
| 379 | xfrm_state_gc_destroy(x); | 379 | xfrm_state_gc_destroy(x); |
| 380 | 380 | ||
| 381 | wake_up(&net->xfrm.km_waitq); | 381 | wake_up(&net->xfrm.km_waitq); |
| @@ -577,10 +577,9 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi | |||
| 577 | int i, err = 0; | 577 | int i, err = 0; |
| 578 | 578 | ||
| 579 | for (i = 0; i <= net->xfrm.state_hmask; i++) { | 579 | for (i = 0; i <= net->xfrm.state_hmask; i++) { |
| 580 | struct hlist_node *entry; | ||
| 581 | struct xfrm_state *x; | 580 | struct xfrm_state *x; |
| 582 | 581 | ||
| 583 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { | 582 | hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { |
| 584 | if (xfrm_id_proto_match(x->id.proto, proto) && | 583 | if (xfrm_id_proto_match(x->id.proto, proto) && |
| 585 | (err = security_xfrm_state_delete(x)) != 0) { | 584 | (err = security_xfrm_state_delete(x)) != 0) { |
| 586 | xfrm_audit_state_delete(x, 0, | 585 | xfrm_audit_state_delete(x, 0, |
| @@ -613,10 +612,9 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) | |||
| 613 | 612 | ||
| 614 | err = -ESRCH; | 613 | err = -ESRCH; |
| 615 | for (i = 0; i <= net->xfrm.state_hmask; i++) { | 614 | for (i = 0; i <= net->xfrm.state_hmask; i++) { |
| 616 | struct hlist_node *entry; | ||
| 617 | struct xfrm_state *x; | 615 | struct xfrm_state *x; |
| 618 | restart: | 616 | restart: |
| 619 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { | 617 | hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { |
| 620 | if (!xfrm_state_kern(x) && | 618 | if (!xfrm_state_kern(x) && |
| 621 | xfrm_id_proto_match(x->id.proto, proto)) { | 619 | xfrm_id_proto_match(x->id.proto, proto)) { |
| 622 | xfrm_state_hold(x); | 620 | xfrm_state_hold(x); |
| @@ -685,9 +683,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, | |||
| 685 | { | 683 | { |
| 686 | unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); | 684 | unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); |
| 687 | struct xfrm_state *x; | 685 | struct xfrm_state *x; |
| 688 | struct hlist_node *entry; | ||
| 689 | 686 | ||
| 690 | hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) { | 687 | hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) { |
| 691 | if (x->props.family != family || | 688 | if (x->props.family != family || |
| 692 | x->id.spi != spi || | 689 | x->id.spi != spi || |
| 693 | x->id.proto != proto || | 690 | x->id.proto != proto || |
| @@ -710,9 +707,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, | |||
| 710 | { | 707 | { |
| 711 | unsigned int h = xfrm_src_hash(net, daddr, saddr, family); | 708 | unsigned int h = xfrm_src_hash(net, daddr, saddr, family); |
| 712 | struct xfrm_state *x; | 709 | struct xfrm_state *x; |
| 713 | struct hlist_node *entry; | ||
| 714 | 710 | ||
| 715 | hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { | 711 | hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { |
| 716 | if (x->props.family != family || | 712 | if (x->props.family != family || |
| 717 | x->id.proto != proto || | 713 | x->id.proto != proto || |
| 718 | !xfrm_addr_equal(&x->id.daddr, daddr, family) || | 714 | !xfrm_addr_equal(&x->id.daddr, daddr, family) || |
| @@ -798,7 +794,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, | |||
| 798 | static xfrm_address_t saddr_wildcard = { }; | 794 | static xfrm_address_t saddr_wildcard = { }; |
| 799 | struct net *net = xp_net(pol); | 795 | struct net *net = xp_net(pol); |
| 800 | unsigned int h, h_wildcard; | 796 | unsigned int h, h_wildcard; |
| 801 | struct hlist_node *entry; | ||
| 802 | struct xfrm_state *x, *x0, *to_put; | 797 | struct xfrm_state *x, *x0, *to_put; |
| 803 | int acquire_in_progress = 0; | 798 | int acquire_in_progress = 0; |
| 804 | int error = 0; | 799 | int error = 0; |
| @@ -810,7 +805,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, | |||
| 810 | 805 | ||
| 811 | spin_lock_bh(&xfrm_state_lock); | 806 | spin_lock_bh(&xfrm_state_lock); |
| 812 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); | 807 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); |
| 813 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 808 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
| 814 | if (x->props.family == encap_family && | 809 | if (x->props.family == encap_family && |
| 815 | x->props.reqid == tmpl->reqid && | 810 | x->props.reqid == tmpl->reqid && |
| 816 | (mark & x->mark.m) == x->mark.v && | 811 | (mark & x->mark.m) == x->mark.v && |
| @@ -826,7 +821,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, | |||
| 826 | goto found; | 821 | goto found; |
| 827 | 822 | ||
| 828 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); | 823 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); |
| 829 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { | 824 | hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) { |
| 830 | if (x->props.family == encap_family && | 825 | if (x->props.family == encap_family && |
| 831 | x->props.reqid == tmpl->reqid && | 826 | x->props.reqid == tmpl->reqid && |
| 832 | (mark & x->mark.m) == x->mark.v && | 827 | (mark & x->mark.m) == x->mark.v && |
| @@ -906,11 +901,10 @@ xfrm_stateonly_find(struct net *net, u32 mark, | |||
| 906 | { | 901 | { |
| 907 | unsigned int h; | 902 | unsigned int h; |
| 908 | struct xfrm_state *rx = NULL, *x = NULL; | 903 | struct xfrm_state *rx = NULL, *x = NULL; |
| 909 | struct hlist_node *entry; | ||
| 910 | 904 | ||
| 911 | spin_lock(&xfrm_state_lock); | 905 | spin_lock(&xfrm_state_lock); |
| 912 | h = xfrm_dst_hash(net, daddr, saddr, reqid, family); | 906 | h = xfrm_dst_hash(net, daddr, saddr, reqid, family); |
| 913 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 907 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
| 914 | if (x->props.family == family && | 908 | if (x->props.family == family && |
| 915 | x->props.reqid == reqid && | 909 | x->props.reqid == reqid && |
| 916 | (mark & x->mark.m) == x->mark.v && | 910 | (mark & x->mark.m) == x->mark.v && |
| @@ -972,12 +966,11 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | |||
| 972 | unsigned short family = xnew->props.family; | 966 | unsigned short family = xnew->props.family; |
| 973 | u32 reqid = xnew->props.reqid; | 967 | u32 reqid = xnew->props.reqid; |
| 974 | struct xfrm_state *x; | 968 | struct xfrm_state *x; |
| 975 | struct hlist_node *entry; | ||
| 976 | unsigned int h; | 969 | unsigned int h; |
| 977 | u32 mark = xnew->mark.v & xnew->mark.m; | 970 | u32 mark = xnew->mark.v & xnew->mark.m; |
| 978 | 971 | ||
| 979 | h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); | 972 | h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); |
| 980 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 973 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
| 981 | if (x->props.family == family && | 974 | if (x->props.family == family && |
| 982 | x->props.reqid == reqid && | 975 | x->props.reqid == reqid && |
| 983 | (mark & x->mark.m) == x->mark.v && | 976 | (mark & x->mark.m) == x->mark.v && |
| @@ -1004,11 +997,10 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, | |||
| 1004 | const xfrm_address_t *saddr, int create) | 997 | const xfrm_address_t *saddr, int create) |
| 1005 | { | 998 | { |
| 1006 | unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); | 999 | unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); |
| 1007 | struct hlist_node *entry; | ||
| 1008 | struct xfrm_state *x; | 1000 | struct xfrm_state *x; |
| 1009 | u32 mark = m->v & m->m; | 1001 | u32 mark = m->v & m->m; |
| 1010 | 1002 | ||
| 1011 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 1003 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
| 1012 | if (x->props.reqid != reqid || | 1004 | if (x->props.reqid != reqid || |
| 1013 | x->props.mode != mode || | 1005 | x->props.mode != mode || |
| 1014 | x->props.family != family || | 1006 | x->props.family != family || |
| @@ -1215,12 +1207,11 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) | |||
| 1215 | { | 1207 | { |
| 1216 | unsigned int h; | 1208 | unsigned int h; |
| 1217 | struct xfrm_state *x; | 1209 | struct xfrm_state *x; |
| 1218 | struct hlist_node *entry; | ||
| 1219 | 1210 | ||
| 1220 | if (m->reqid) { | 1211 | if (m->reqid) { |
| 1221 | h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, | 1212 | h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, |
| 1222 | m->reqid, m->old_family); | 1213 | m->reqid, m->old_family); |
| 1223 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { | 1214 | hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) { |
| 1224 | if (x->props.mode != m->mode || | 1215 | if (x->props.mode != m->mode || |
| 1225 | x->id.proto != m->proto) | 1216 | x->id.proto != m->proto) |
| 1226 | continue; | 1217 | continue; |
| @@ -1237,7 +1228,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) | |||
| 1237 | } else { | 1228 | } else { |
| 1238 | h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, | 1229 | h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, |
| 1239 | m->old_family); | 1230 | m->old_family); |
| 1240 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) { | 1231 | hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) { |
| 1241 | if (x->props.mode != m->mode || | 1232 | if (x->props.mode != m->mode || |
| 1242 | x->id.proto != m->proto) | 1233 | x->id.proto != m->proto) |
| 1243 | continue; | 1234 | continue; |
| @@ -1466,10 +1457,9 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s | |||
| 1466 | int i; | 1457 | int i; |
| 1467 | 1458 | ||
| 1468 | for (i = 0; i <= net->xfrm.state_hmask; i++) { | 1459 | for (i = 0; i <= net->xfrm.state_hmask; i++) { |
| 1469 | struct hlist_node *entry; | ||
| 1470 | struct xfrm_state *x; | 1460 | struct xfrm_state *x; |
| 1471 | 1461 | ||
| 1472 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { | 1462 | hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { |
| 1473 | if (x->km.seq == seq && | 1463 | if (x->km.seq == seq && |
| 1474 | (mark & x->mark.m) == x->mark.v && | 1464 | (mark & x->mark.m) == x->mark.v && |
| 1475 | x->km.state == XFRM_STATE_ACQ) { | 1465 | x->km.state == XFRM_STATE_ACQ) { |
