diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 16 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_crypt_tkip.c | 11 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_crypt_wep.c | 2 | ||||
-rw-r--r-- | net/ipv4/esp4.c | 2 | ||||
-rw-r--r-- | net/ipv4/proc.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 16 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 5 | ||||
-rw-r--r-- | net/ipv4/udp.c | 2 | ||||
-rw-r--r-- | net/ipv6/esp6.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 4 | ||||
-rw-r--r-- | net/mac80211/ieee80211_sta.c | 5 | ||||
-rw-r--r-- | net/sched/act_mirred.c | 2 | ||||
-rw-r--r-- | net/sctp/auth.c | 4 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 8 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_crypto.c | 14 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 4 | ||||
-rw-r--r-- | net/xfrm/xfrm_algo.c | 10 |
17 files changed, 47 insertions, 68 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e2c84f..573e172 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -415,13 +415,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; n->nohdr = 0; n->destructor = NULL; -#ifdef CONFIG_NET_CLS_ACT - /* FIXME What is this and why don't we do it in copy_skb_header? */ - n->tc_verd = SET_TC_VERD(n->tc_verd,0); - n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); - n->tc_verd = CLR_TC_MUNGED(n->tc_verd); - C(iif); -#endif C(truesize); atomic_set(&n->users, 1); C(head); @@ -2045,9 +2038,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) if (copy > 0) { if (copy > len) copy = len; - sg_set_page(&sg[elt], virt_to_page(skb->data + offset)); - sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; - sg[elt].length = copy; + sg_set_buf(sg, skb->data + offset, copy); elt++; if ((len -= copy) == 0) return elt; @@ -2065,9 +2056,8 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) if (copy > len) copy = len; - sg_set_page(&sg[elt], frag->page); - sg[elt].offset = frag->page_offset+offset-start; - sg[elt].length = copy; + sg_set_page(&sg[elt], frag->page, copy, + frag->page_offset+offset-start); elt++; if (!(len -= copy)) return elt; diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 8117776..4cce353 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -25,7 +25,7 @@ #include <net/ieee80211.h> #include <linux/crypto.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/crc32.h> MODULE_AUTHOR("Jouni Malinen"); @@ -537,13 +537,8 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, return -1; } sg_init_table(sg, 2); - sg_set_page(&sg[0], virt_to_page(hdr)); - sg[0].offset = offset_in_page(hdr); - sg[0].length = 16; - - sg_set_page(&sg[1], virt_to_page(data)); - sg[1].offset = offset_in_page(data); - sg[1].length = data_len; + sg_set_buf(&sg[0], hdr, 16); + sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, 8)) return -1; diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 9693429..866fc04 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -22,7 +22,7 @@ #include <net/ieee80211.h> #include <linux/crypto.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/crc32.h> MODULE_AUTHOR("Jouni Malinen"); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 6b1a31a..ba98401 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -110,6 +110,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) if (!sg) goto unlock; } + sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) @@ -201,6 +202,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) if (!sg) goto out; } + sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index fd16cb8..9be0daa 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -121,14 +121,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_SENTINEL }; -static const struct snmp_mib snmp4_icmp_list[] = { - SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS), - SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS), - SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS), - SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS), - SNMP_MIB_SENTINEL -}; - static struct { char *name; int index; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3dbbb44..47f4f35 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly; #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ -#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ +#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) @@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp) tp->rx_opt.sack_ok &= ~2; } -/* Take a notice that peer is sending DSACKs */ +/* Take a notice that peer is sending D-SACKs */ static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= 4; @@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, * * With D-SACK the lower bound is extended to cover sequence space below * SND.UNA down to undo_marker, which is the last point of interest. Yet - * again, DSACK block must not to go across snd_una (for the same reason as + * again, D-SACK block must not to go across snd_una (for the same reason as * for the normal SACK blocks, explained above). But there all simplicity * ends, TCP might receive valid D-SACKs below that. As long as they reside * fully below undo_marker they do not affect behavior in anyway and can @@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, if (!before(start_seq, tp->snd_nxt)) return 0; - /* In outstanding window? ...This is valid exit for DSACKs too. + /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) @@ -1615,7 +1615,7 @@ void tcp_enter_frto(struct sock *sk) !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); /* Our state is too optimistic in ssthresh() call because cwnd - * is not reduced until tcp_enter_frto_loss() when previous FRTO + * is not reduced until tcp_enter_frto_loss() when previous F-RTO * recovery has not yet completed. Pattern would be this: RTO, * Cumulative ACK, RTO (2xRTO for the same segment does not end * up here twice). @@ -1801,7 +1801,7 @@ void tcp_enter_loss(struct sock *sk, int how) tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); - /* Abort FRTO algorithm if one is in progress */ + /* Abort F-RTO algorithm if one is in progress */ tp->frto_counter = 0; } @@ -1946,7 +1946,7 @@ static int tcp_time_to_recover(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; - /* Do not perform any recovery during FRTO algorithm */ + /* Do not perform any recovery during F-RTO algorithm */ if (tp->frto_counter) return 0; @@ -2962,7 +2962,7 @@ static int tcp_process_frto(struct sock *sk, int flag) } if (tp->frto_counter == 1) { - /* Sending of the next skb must be allowed or no FRTO */ + /* Sending of the next skb must be allowed or no F-RTO */ if (!tcp_send_head(sk) || after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tp->snd_una + tp->snd_wnd)) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 38cf73a..ad759f1 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1055,6 +1055,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, bp->pad = 0; bp->protocol = protocol; bp->len = htons(tcplen); + + sg_init_table(sg, 4); + sg_set_buf(&sg[block++], bp, sizeof(*bp)); nbytes += sizeof(*bp); @@ -1080,6 +1083,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, sg_set_buf(&sg[block++], key->key, key->keylen); nbytes += key->keylen; + sg_mark_end(sg, block); + /* Now store the Hash into the packet */ err = crypto_hash_init(desc); if (err) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 35d2b0e..4bc25b4 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1152,7 +1152,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, - skb->dev->ifindex, udptable ); + inet_iif(skb), udptable); if (sk != NULL) { int ret = udp_queue_rcv_skb(sk, skb); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 72a6598..f67d51a 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -109,6 +109,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) if (!sg) goto unlock; } + sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) @@ -205,6 +206,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) goto out; } } + sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 737b755..32dc3297 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -757,6 +757,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, bp->len = htonl(tcplen); bp->protocol = htonl(protocol); + sg_init_table(sg, 4); + sg_set_buf(&sg[block++], bp, sizeof(*bp)); nbytes += sizeof(*bp); @@ -778,6 +780,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, sg_set_buf(&sg[block++], key->key, key->keylen); nbytes += key->keylen; + sg_mark_end(sg, block); + /* Now store the hash into the packet */ err = crypto_hash_init(desc); if (err) { diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index f7ffeec..fda0e06 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c @@ -1184,7 +1184,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " "status=%d aid=%d)\n", dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), - capab_info, status_code, aid & ~(BIT(15) | BIT(14))); + capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); if (status_code != WLAN_STATUS_SUCCESS) { printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", @@ -2096,7 +2096,8 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, { int tmp, hidden_ssid; - if (!memcmp(ifsta->ssid, ssid, ssid_len)) + if (ssid_len == ifsta->ssid_len && + !memcmp(ifsta->ssid, ssid, ssid_len)) return 1; if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fd7bca4..c3fde91 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -166,7 +166,7 @@ bad_mirred: return TC_ACT_SHOT; } - skb2 = skb_clone(skb, GFP_ATOMIC); + skb2 = skb_act_clone(skb, GFP_ATOMIC); if (skb2 == NULL) goto bad_mirred; if (m->tcfm_eaction != TCA_EGRESS_MIRROR && diff --git a/net/sctp/auth.c b/net/sctp/auth.c index cbd64b2..621113a 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -727,9 +727,7 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc, /* set up scatter list */ end = skb_tail_pointer(skb); sg_init_table(&sg, 1); - sg_set_page(&sg, virt_to_page(auth)); - sg.offset = (unsigned long)(auth) % PAGE_SIZE; - sg.length = end - (unsigned char *)auth; + sg_set_buf(&sg, auth, end - (unsigned char *)auth); desc.tfm = asoc->ep->auth_hmacs[hmac_id]; desc.flags = 0; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 658476c..c055212 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1514,9 +1514,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, /* Sign the message. */ sg_init_table(&sg, 1); - sg_set_page(&sg, virt_to_page(&cookie->c)); - sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; - sg.length = bodysize; + sg_set_buf(&sg, &cookie->c, bodysize); keylen = SCTP_SECRET_SIZE; key = (char *)ep->secret_key[ep->current_key]; desc.tfm = sctp_sk(ep->base.sk)->hmac; @@ -1587,9 +1585,7 @@ struct sctp_association *sctp_unpack_cookie( /* Check the signature. */ keylen = SCTP_SECRET_SIZE; sg_init_table(&sg, 1); - sg_set_page(&sg, virt_to_page(bear_cookie)); - sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; - sg.length = bodysize; + sg_set_buf(&sg, bear_cookie, bodysize); key = (char *)ep->secret_key[ep->current_key]; desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 32be431..24711be 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -199,7 +199,7 @@ encryptor(struct scatterlist *sg, void *data) } else { in_page = sg_page(sg); } - sg_set_page(&desc->infrags[desc->fragno], in_page); + sg_assign_page(&desc->infrags[desc->fragno], in_page); desc->fragno++; desc->fraglen += sg->length; desc->pos += sg->length; @@ -215,11 +215,10 @@ encryptor(struct scatterlist *sg, void *data) if (ret) return ret; if (fraglen) { - sg_set_page(&desc->outfrags[0], sg_page(sg)); - desc->outfrags[0].offset = sg->offset + sg->length - fraglen; - desc->outfrags[0].length = fraglen; + sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, + sg->offset + sg->length - fraglen); desc->infrags[0] = desc->outfrags[0]; - sg_set_page(&desc->infrags[0], in_page); + sg_assign_page(&desc->infrags[0], in_page); desc->fragno = 1; desc->fraglen = fraglen; } else { @@ -287,9 +286,8 @@ decryptor(struct scatterlist *sg, void *data) if (ret) return ret; if (fraglen) { - sg_set_page(&desc->frags[0], sg_page(sg)); - desc->frags[0].offset = sg->offset + sg->length - fraglen; - desc->frags[0].length = fraglen; + sg_set_page(&desc->frags[0], sg_page(sg), fraglen, + sg->offset + sg->length - fraglen); desc->fragno = 1; desc->fraglen = fraglen; } else { diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 3d1f7cd..f38dac3 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1059,9 +1059,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, do { if (thislen > page_len) thislen = page_len; - sg_set_page(sg, buf->pages[i]); - sg->offset = page_offset; - sg->length = thislen; + sg_set_page(sg, buf->pages[i], thislen, page_offset); ret = actor(sg, data); if (ret) goto out; diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 313d4be..0426388 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -553,9 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, if (copy > len) copy = len; - sg_set_page(&sg, virt_to_page(skb->data + offset)); - sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; - sg.length = copy; + sg_init_one(&sg, skb->data + offset, copy); err = icv_update(desc, &sg, copy); if (unlikely(err)) @@ -578,9 +576,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, if (copy > len) copy = len; - sg_set_page(&sg, frag->page); - sg.offset = frag->page_offset + offset-start; - sg.length = copy; + sg_init_table(&sg, 1); + sg_set_page(&sg, frag->page, copy, + frag->page_offset + offset-start); err = icv_update(desc, &sg, copy); if (unlikely(err)) |