diff options
Diffstat (limited to 'net')
187 files changed, 4983 insertions, 1663 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index c584a0a..bd537fc 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -61,7 +61,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; - }; + } return 0; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index b5249c5..55be908 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -327,7 +327,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, len = skb->len; ret = dev_queue_xmit(skb); - if (likely(ret == NET_XMIT_SUCCESS)) { + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { txq->tx_packets++; txq->tx_bytes += len; } else @@ -353,7 +353,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, len = skb->len; ret = dev_queue_xmit(skb); - if (likely(ret == NET_XMIT_SUCCESS)) { + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { txq->tx_packets++; txq->tx_bytes += len; } else diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 041101a..0ea20c3 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -308,7 +308,6 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, req, err, status); rdma->state = P9_RDMA_FLUSHING; client->status = Disconnected; - return; } static void diff --git a/net/atm/br2684.c b/net/atm/br2684.c index d6c7cea..6719af6 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -446,7 +446,6 @@ error: net_dev->stats.rx_errors++; free_skb: dev_kfree_skb(skb); - return; } /* diff --git a/net/atm/lec.c b/net/atm/lec.c index feeaf57..d98bde1 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -161,8 +161,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } - - return; } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ @@ -640,7 +638,6 @@ static void lec_set_multicast_list(struct net_device *dev) * by default, all multicast frames arrive over the bus. * eventually support selective multicast service */ - return; } static const struct net_device_ops lec_netdev_ops = { @@ -1199,8 +1196,6 @@ static void __exit lane_module_cleanup(void) dev_lec[i] = NULL; } } - - return; } module_init(lane_module_init); @@ -1334,7 +1329,6 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr, priv->lane2_ops->associate_indicator(dev, mac_addr, tlvs, sizeoftlvs); } - return; } /* diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 436f2e1..622b471 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -455,7 +455,6 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr, if (end_of_tlvs - tlvs != 0) pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n", dev->name, end_of_tlvs - tlvs); - return; } /* @@ -684,8 +683,6 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev) if (in_entry == NULL && eg_entry == NULL) dprintk("(%s) unused vcc closed\n", dev->name); - - return; } static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) @@ -783,8 +780,6 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(new_skb); - - return; } static struct atmdev_ops mpc_ops = { /* only send is required */ @@ -873,8 +868,6 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc) mesg.type = SET_MPS_CTRL_ADDR; memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); msg_to_mpoad(&mesg, mpc); - - return; } static void mpoad_close(struct atm_vcc *vcc) @@ -911,8 +904,6 @@ static void mpoad_close(struct atm_vcc *vcc) pr_info("(%s) going down\n", (mpc->dev) ? mpc->dev->name : "<unknown>"); module_put(THIS_MODULE); - - return; } /* @@ -1122,7 +1113,6 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc) pr_info("(%s) entry already in resolving state\n", (mpc->dev) ? mpc->dev->name : "<unknown>"); mpc->in_ops->put(entry); - return; } /* @@ -1166,7 +1156,6 @@ static void check_qos_and_open_shortcut(struct k_message *msg, } else memset(&msg->qos, 0, sizeof(struct atm_qos)); msg_to_mpoad(msg, client); - return; } static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) @@ -1240,8 +1229,6 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) mpc->in_ops->put(entry); entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); } while (entry != NULL); - - return; } static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) @@ -1260,8 +1247,6 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) write_unlock_irq(&mpc->egress_lock); mpc->eg_ops->put(entry); - - return; } static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) @@ -1295,8 +1280,6 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); dprintk("exiting\n"); - - return; } /* @@ -1325,8 +1308,6 @@ static void mps_death(struct k_message *msg, struct mpoa_client *mpc) mpc->in_ops->destroy_cache(mpc); mpc->eg_ops->destroy_cache(mpc); - - return; } static void MPOA_cache_impos_rcvd(struct k_message *msg, @@ -1353,8 +1334,6 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg, write_unlock_irq(&mpc->egress_lock); mpc->eg_ops->put(entry); - - return; } static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, @@ -1392,8 +1371,6 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, pr_info("(%s) targetless LE_ARP request failed\n", mpc->dev->name); } - - return; } static void set_mps_mac_addr_rcvd(struct k_message *msg, @@ -1409,8 +1386,6 @@ static void set_mps_mac_addr_rcvd(struct k_message *msg, return; } client->number_of_mps_macs = 1; - - return; } /* @@ -1436,7 +1411,6 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action) msg->type = action; msg_to_mpoad(msg, mpc); - return; } static void mpc_timer_refresh(void) @@ -1445,8 +1419,6 @@ static void mpc_timer_refresh(void) mpc_timer.data = mpc_timer.expires; mpc_timer.function = mpc_cache_check; add_timer(&mpc_timer); - - return; } static void mpc_cache_check(unsigned long checking_time) @@ -1471,8 +1443,6 @@ static void mpc_cache_check(unsigned long checking_time) mpc = mpc->next; } mpc_timer_refresh(); - - return; } static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, @@ -1561,8 +1531,6 @@ static void __exit atm_mpoa_cleanup(void) kfree(qos); qos = nextqos; } - - return; } module_init(atm_mpoa_init); diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c index e773d83..d1b2d9a 100644 --- a/net/atm/mpoa_caches.c +++ b/net/atm/mpoa_caches.c @@ -182,8 +182,6 @@ static void in_cache_put(in_cache_entry *entry) memset(entry, 0, sizeof(in_cache_entry)); kfree(entry); } - - return; } /* @@ -221,8 +219,6 @@ static void in_cache_remove_entry(in_cache_entry *entry, } vcc_release_async(vcc, -EPIPE); } - - return; } /* Call this every MPC-p2 seconds... Not exactly correct solution, @@ -248,8 +244,6 @@ static void clear_count_and_expired(struct mpoa_client *client) entry = next_entry; } write_unlock_bh(&client->ingress_lock); - - return; } /* Call this every MPC-p4 seconds. */ @@ -334,8 +328,6 @@ static void in_destroy_cache(struct mpoa_client *mpc) while (mpc->in_cache != NULL) mpc->in_ops->remove_entry(mpc->in_cache, mpc); write_unlock_irq(&mpc->ingress_lock); - - return; } static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, @@ -427,8 +419,6 @@ static void eg_cache_put(eg_cache_entry *entry) memset(entry, 0, sizeof(eg_cache_entry)); kfree(entry); } - - return; } /* @@ -463,8 +453,6 @@ static void eg_cache_remove_entry(eg_cache_entry *entry, } vcc_release_async(vcc, -EPIPE); } - - return; } static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, @@ -509,8 +497,6 @@ static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time) do_gettimeofday(&(entry->tv)); entry->entry_state = EGRESS_RESOLVED; entry->ctrl_info.holding_time = holding_time; - - return; } static void clear_expired(struct mpoa_client *client) @@ -537,8 +523,6 @@ static void clear_expired(struct mpoa_client *client) entry = next_entry; } write_unlock_irq(&client->egress_lock); - - return; } static void eg_destroy_cache(struct mpoa_client *mpc) @@ -547,8 +531,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc) while (mpc->eg_cache != NULL) mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); write_unlock_irq(&mpc->egress_lock); - - return; } @@ -584,6 +566,4 @@ void atm_mpoa_init_cache(struct mpoa_client *mpc) { mpc->in_ops = &ingress_ops; mpc->eg_ops = &egress_ops; - - return; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 5e83f8e..2f768de 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1316,8 +1316,6 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) } tasklet_schedule(&hdev->tx_task); - - return; } EXPORT_SYMBOL(hci_send_acl); diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 673a368..1b682a5 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -1322,8 +1322,6 @@ static void l2cap_drop_acked_frames(struct sock *sk) if (!l2cap_pi(sk)->unacked_frames) del_timer(&l2cap_pi(sk)->retrans_timer); - - return; } static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) @@ -4667,7 +4665,6 @@ void l2cap_load(void) /* Dummy function to trigger automatic L2CAP module loading by * other modules that use L2CAP sockets but don't use any other * symbols from it. */ - return; } EXPORT_SYMBOL(l2cap_load); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index cab71ea..309b6c2 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -1014,8 +1014,6 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, data_bits, stop_bits, parity, RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); - - return; } static void rfcomm_tty_throttle(struct tty_struct *tty) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 4767928a..d0927d1 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -273,7 +273,6 @@ static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) drop: kfree_skb(skb); - return; } /* -------- Socket interface ---------- */ diff --git a/net/bridge/br.c b/net/bridge/br.c index e1241c7..76357b5 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -38,7 +38,7 @@ static int __init br_init(void) err = stp_proto_register(&br_stp_proto); if (err < 0) { - printk(KERN_ERR "bridge: can't register sap for STP\n"); + pr_err("bridge: can't register sap for STP\n"); return err; } diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index f15f9c4..eedf2c9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -199,7 +199,7 @@ static int br_set_tx_csum(struct net_device *dev, u32 data) } #ifdef CONFIG_NET_POLL_CONTROLLER -bool br_devices_support_netpoll(struct net_bridge *br) +static bool br_devices_support_netpoll(struct net_bridge *br) { struct net_bridge_port *p; bool ret = true; @@ -225,9 +225,9 @@ static void br_poll_controller(struct net_device *br_dev) netpoll_poll_dev(np->real_dev); } -void br_netpoll_cleanup(struct net_device *br_dev) +void br_netpoll_cleanup(struct net_device *dev) { - struct net_bridge *br = netdev_priv(br_dev); + struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; const struct net_device_ops *ops; @@ -243,10 +243,29 @@ void br_netpoll_cleanup(struct net_device *br_dev) } } -#else +void br_netpoll_disable(struct net_bridge *br, + struct net_device *dev) +{ + if (br_devices_support_netpoll(br)) + br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; + if (dev->netdev_ops->ndo_netpoll_cleanup) + dev->netdev_ops->ndo_netpoll_cleanup(dev); + else + dev->npinfo = NULL; +} -void br_netpoll_cleanup(struct net_device *br_dev) +void br_netpoll_enable(struct net_bridge *br, + struct net_device *dev) { + if (br_devices_support_netpoll(br)) { + br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; + if (br->dev->npinfo) + dev->npinfo = br->dev->npinfo; + } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) { + br->dev->priv_flags |= IFF_DISABLE_NETPOLL; + br_info(br,"new device %s does not support netpoll (disabling)", + dev->name); + } } #endif diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 9101a4e..2663743 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -353,8 +353,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, */ if (fdb->is_local) return 0; - - printk(KERN_WARNING "%s adding interface with same address " + br_warn(br, "adding interface %s with same address " "as a received packet\n", source->dev->name); fdb_delete(fdb); @@ -397,9 +396,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, /* attempt to update an entry for a local interface */ if (unlikely(fdb->is_local)) { if (net_ratelimit()) - printk(KERN_WARNING "%s: received packet with " - "own address as source address\n", - source->dev->name); + br_warn(br, "received packet on %s with " + "own address as source address\n", + source->dev->name); } else { /* fastpath: update of existing entry */ fdb->dst = source; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 537bdd6..18b245e 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -133,7 +133,7 @@ static void del_nbp(struct net_bridge_port *p) struct net_bridge *br = p->br; struct net_device *dev = p->dev; - sysfs_remove_link(br->ifobj, dev->name); + sysfs_remove_link(br->ifobj, p->dev->name); dev_set_promiscuity(dev, -1); @@ -154,14 +154,7 @@ static void del_nbp(struct net_bridge_port *p) kobject_uevent(&p->kobj, KOBJ_REMOVE); kobject_del(&p->kobj); -#ifdef CONFIG_NET_POLL_CONTROLLER - if (br_devices_support_netpoll(br)) - br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; - if (dev->netdev_ops->ndo_netpoll_cleanup) - dev->netdev_ops->ndo_netpoll_cleanup(dev); - else - dev->npinfo = NULL; -#endif + br_netpoll_disable(br, dev); call_rcu(&p->rcu, destroy_nbp_rcu); } @@ -455,19 +448,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) kobject_uevent(&p->kobj, KOBJ_ADD); -#ifdef CONFIG_NET_POLL_CONTROLLER - if (br_devices_support_netpoll(br)) { - br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; - if (br->dev->npinfo) - dev->npinfo = br->dev->npinfo; - } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) { - br->dev->priv_flags |= IFF_DISABLE_NETPOLL; - printk(KERN_INFO "New device %s does not support netpoll\n", - dev->name); - printk(KERN_INFO "Disabling netpoll for %s\n", - br->dev->name); - } -#endif + br_netpoll_enable(br, dev); return 0; err2: diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 995afc4b..cb43312 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -412,6 +412,6 @@ int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } - pr_debug("Bridge does not support ioctl 0x%x\n", cmd); + br_debug(br, "Bridge does not support ioctl 0x%x\n", cmd); return -EOPNOTSUPP; } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index c8419e2..9d21d98 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -585,10 +585,9 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( if (unlikely(count > br->hash_elasticity && count)) { if (net_ratelimit()) - printk(KERN_INFO "%s: Multicast hash table " - "chain limit reached: %s\n", - br->dev->name, port ? port->dev->name : - br->dev->name); + br_info(br, "Multicast hash table " + "chain limit reached: %s\n", + port ? port->dev->name : br->dev->name); elasticity = br->hash_elasticity; } @@ -596,11 +595,9 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( if (mdb->size >= max) { max *= 2; if (unlikely(max >= br->hash_max)) { - printk(KERN_WARNING "%s: Multicast hash table maximum " - "reached, disabling snooping: %s, %d\n", - br->dev->name, port ? port->dev->name : - br->dev->name, - max); + br_warn(br, "Multicast hash table maximum " + "reached, disabling snooping: %s, %d\n", + port ? port->dev->name : br->dev->name, max); err = -E2BIG; disable: br->multicast_disabled = 1; @@ -611,22 +608,19 @@ disable: if (max > mdb->max || elasticity) { if (mdb->old) { if (net_ratelimit()) - printk(KERN_INFO "%s: Multicast hash table " - "on fire: %s\n", - br->dev->name, port ? port->dev->name : - br->dev->name); + br_info(br, "Multicast hash table " + "on fire: %s\n", + port ? port->dev->name : br->dev->name); err = -EEXIST; goto err; } err = br_mdb_rehash(&br->mdb, max, elasticity); if (err) { - printk(KERN_WARNING "%s: Cannot rehash multicast " - "hash table, disabling snooping: " - "%s, %d, %d\n", - br->dev->name, port ? port->dev->name : - br->dev->name, - mdb->size, err); + br_warn(br, "Cannot rehash multicast " + "hash table, disabling snooping: %s, %d, %d\n", + port ? port->dev->name : br->dev->name, + mdb->size, err); goto disable; } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index aa56ac2..fe0a790 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -42,8 +42,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por struct nlmsghdr *nlh; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; - pr_debug("br_fill_info event %d port %s master %s\n", - event, dev->name, br->dev->name); + br_debug(br, "br_fill_info event %d port %s master %s\n", + event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); if (nlh == NULL) @@ -87,7 +87,9 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) struct sk_buff *skb; int err = -ENOBUFS; - pr_debug("bridge notify event=%d\n", event); + br_debug(port->br, "port %u(%s) event %d\n", + (unsigned)port->port_no, port->dev->name, event); + skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) goto errout; diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 1413b72..717e1fd 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v struct net_device *dev = ptr; struct net_bridge_port *p = dev->br_port; struct net_bridge *br; + int err; /* not a port of a bridge */ if (p == NULL) @@ -83,6 +84,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v br_del_if(br, dev); break; + case NETDEV_CHANGENAME: + err = br_sysfs_renameif(p); + if (err) + return notifier_from_errno(err); + break; + case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ return NOTIFY_BAD; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 3d2d3fe..0f4a74b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -139,6 +139,10 @@ struct net_bridge_port struct hlist_head mglist; struct hlist_node rlist; #endif + +#ifdef CONFIG_SYSFS + char sysfs_name[IFNAMSIZ]; +#endif }; struct br_cpu_netstats { @@ -240,6 +244,21 @@ struct br_input_skb_cb { # define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0) #endif +#define br_printk(level, br, format, args...) \ + printk(level "%s: " format, (br)->dev->name, ##args) + +#define br_err(__br, format, args...) \ + br_printk(KERN_ERR, __br, format, ##args) +#define br_warn(__br, format, args...) \ + br_printk(KERN_WARNING, __br, format, ##args) +#define br_notice(__br, format, args...) \ + br_printk(KERN_NOTICE, __br, format, ##args) +#define br_info(__br, format, args...) \ + br_printk(KERN_INFO, __br, format, ##args) + +#define br_debug(br, format, args...) \ + pr_debug("%s: " format, (br)->dev->name, ##args) + extern struct notifier_block br_device_notifier; extern const u8 br_group_address[ETH_ALEN]; @@ -253,8 +272,18 @@ static inline int br_is_root_bridge(const struct net_bridge *br) extern void br_dev_setup(struct net_device *dev); extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev); -extern bool br_devices_support_netpoll(struct net_bridge *br); -extern void br_netpoll_cleanup(struct net_device *br_dev); +#ifdef CONFIG_NET_POLL_CONTROLLER +extern void br_netpoll_cleanup(struct net_device *dev); +extern void br_netpoll_enable(struct net_bridge *br, + struct net_device *dev); +extern void br_netpoll_disable(struct net_bridge *br, + struct net_device *dev); +#else +#define br_netpoll_cleanup(br) +#define br_netpoll_enable(br, dev) +#define br_netpoll_disable(br, dev) + +#endif /* br_fdb.c */ extern int br_fdb_init(void); @@ -455,6 +484,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port); /* br_sysfs_if.c */ extern const struct sysfs_ops brport_sysfs_ops; extern int br_sysfs_addif(struct net_bridge_port *p); +extern int br_sysfs_renameif(struct net_bridge_port *p); /* br_sysfs_br.c */ extern int br_sysfs_addbr(struct net_device *dev); @@ -463,6 +493,7 @@ extern void br_sysfs_delbr(struct net_device *dev); #else #define br_sysfs_addif(p) (0) +#define br_sysfs_renameif(p) (0) #define br_sysfs_addbr(dev) (0) #define br_sysfs_delbr(dev) do { } while(0) #endif /* CONFIG_SYSFS */ diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index edcf14b..57186d8 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -31,10 +31,9 @@ static const char *const br_port_state_names[] = { void br_log_state(const struct net_bridge_port *p) { - pr_info("%s: port %d(%s) entering %s state\n", - p->br->dev->name, p->port_no, p->dev->name, + br_info(p->br, "port %u(%s) entering %s state\n", + (unsigned) p->port_no, p->dev->name, br_port_state_names[p->state]); - } /* called under bridge lock */ @@ -300,7 +299,7 @@ void br_topology_change_detection(struct net_bridge *br) if (br->stp_enabled != BR_KERNEL_STP) return; - pr_info("%s: topology change detected, %s\n", br->dev->name, + br_info(br, "topology change detected, %s\n", isroot ? "propagating" : "sending tcn bpdu"); if (isroot) { @@ -469,8 +468,8 @@ void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *b void br_received_tcn_bpdu(struct net_bridge_port *p) { if (br_is_designated_port(p)) { - pr_info("%s: received tcn bpdu on port %i(%s)\n", - p->br->dev->name, p->port_no, p->dev->name); + br_info(p->br, "port %u(%s) received tcn bpdu\n", + (unsigned) p->port_no, p->dev->name); br_topology_change_detection(p->br); br_topology_change_acknowledge(p); diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index d527119..1d88269 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -85,17 +85,16 @@ void br_stp_enable_port(struct net_bridge_port *p) { br_init_port(p); br_port_state_selection(p->br); + br_log_state(p); } /* called under bridge lock */ void br_stp_disable_port(struct net_bridge_port *p) { - struct net_bridge *br; + struct net_bridge *br = p->br; int wasroot; - br = p->br; - printk(KERN_INFO "%s: port %i(%s) entering %s state\n", - br->dev->name, p->port_no, p->dev->name, "disabled"); + br_log_state(p); wasroot = br_is_root_bridge(br); br_become_designated_port(p); @@ -127,11 +126,10 @@ static void br_stp_start(struct net_bridge *br) r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); if (r == 0) { br->stp_enabled = BR_USER_STP; - printk(KERN_INFO "%s: userspace STP started\n", br->dev->name); + br_debug(br, "userspace STP started\n"); } else { br->stp_enabled = BR_KERNEL_STP; - printk(KERN_INFO "%s: starting userspace STP failed, " - "starting kernel STP\n", br->dev->name); + br_debug(br, "using kernel STP\n"); /* To start timers on any ports left in blocking */ spin_lock_bh(&br->lock); @@ -148,9 +146,7 @@ static void br_stp_stop(struct net_bridge *br) if (br->stp_enabled == BR_USER_STP) { r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); - printk(KERN_INFO "%s: userspace STP stopped, return code %d\n", - br->dev->name, r); - + br_info(br, "userspace STP stopped, return code %d\n", r); /* To start timers on any ports left in blocking */ spin_lock_bh(&br->lock); diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 772a140..7b22456 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -35,7 +35,7 @@ static void br_hello_timer_expired(unsigned long arg) { struct net_bridge *br = (struct net_bridge *)arg; - pr_debug("%s: hello timer expired\n", br->dev->name); + br_debug(br, "hello timer expired\n"); spin_lock(&br->lock); if (br->dev->flags & IFF_UP) { br_config_bpdu_generation(br); @@ -55,13 +55,9 @@ static void br_message_age_timer_expired(unsigned long arg) if (p->state == BR_STATE_DISABLED) return; - - pr_info("%s: neighbor %.2x%.2x.%.2x:%.2x:%.2x:%.2x:%.2x:%.2x lost on port %d(%s)\n", - br->dev->name, - id->prio[0], id->prio[1], - id->addr[0], id->addr[1], id->addr[2], - id->addr[3], id->addr[4], id->addr[5], - p->port_no, p->dev->name); + br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", + (unsigned) p->port_no, p->dev->name, + id->prio[0], id->prio[1], &id->addr); /* * According to the spec, the message age timer cannot be @@ -87,8 +83,8 @@ static void br_forward_delay_timer_expired(unsigned long arg) struct net_bridge_port *p = (struct net_bridge_port *) arg; struct net_bridge *br = p->br; - pr_debug("%s: %d(%s) forward delay timer\n", - br->dev->name, p->port_no, p->dev->name); + br_debug(br, "port %u(%s) forward delay timer\n", + (unsigned) p->port_no, p->dev->name); spin_lock(&br->lock); if (p->state == BR_STATE_LISTENING) { p->state = BR_STATE_LEARNING; @@ -107,7 +103,7 @@ static void br_tcn_timer_expired(unsigned long arg) { struct net_bridge *br = (struct net_bridge *) arg; - pr_debug("%s: tcn timer expired\n", br->dev->name); + br_debug(br, "tcn timer expired\n"); spin_lock(&br->lock); if (br->dev->flags & IFF_UP) { br_transmit_tcn(br); @@ -121,7 +117,7 @@ static void br_topology_change_timer_expired(unsigned long arg) { struct net_bridge *br = (struct net_bridge *) arg; - pr_debug("%s: topo change timer expired\n", br->dev->name); + br_debug(br, "topo change timer expired\n"); spin_lock(&br->lock); br->topology_change_detected = 0; br->topology_change = 0; @@ -132,8 +128,8 @@ static void br_hold_timer_expired(unsigned long arg) { struct net_bridge_port *p = (struct net_bridge_port *) arg; - pr_debug("%s: %d(%s) hold timer expired\n", - p->br->dev->name, p->port_no, p->dev->name); + br_debug(p->br, "port %u(%s) hold timer expired\n", + (unsigned) p->port_no, p->dev->name); spin_lock(&p->br->lock); if (p->config_pending) diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 0b99164..fd5799c 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -246,7 +246,7 @@ const struct sysfs_ops brport_sysfs_ops = { /* * Add sysfs entries to ethernet device added to a bridge. * Creates a brport subdirectory with bridge attributes. - * Puts symlink in bridge's brport subdirectory + * Puts symlink in bridge's brif subdirectory */ int br_sysfs_addif(struct net_bridge_port *p) { @@ -257,15 +257,37 @@ int br_sysfs_addif(struct net_bridge_port *p) err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj, SYSFS_BRIDGE_PORT_LINK); if (err) - goto out2; + return err; for (a = brport_attrs; *a; ++a) { err = sysfs_create_file(&p->kobj, &((*a)->attr)); if (err) - goto out2; + return err; } - err = sysfs_create_link(br->ifobj, &p->kobj, p->dev->name); -out2: + strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ); + return sysfs_create_link(br->ifobj, &p->kobj, p->sysfs_name); +} + +/* Rename bridge's brif symlink */ +int br_sysfs_renameif(struct net_bridge_port *p) +{ + struct net_bridge *br = p->br; + int err; + + /* If a rename fails, the rollback will cause another + * rename call with the existing name. + */ + if (!strncmp(p->sysfs_name, p->dev->name, IFNAMSIZ)) + return 0; + + err = sysfs_rename_link(br->ifobj, &p->kobj, + p->sysfs_name, p->dev->name); + if (err) + netdev_notice(br->dev, "unable to rename link %s to %s", + p->sysfs_name, p->dev->name); + else + strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ); + return err; } diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 024fd5b..e2b86f1 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -112,7 +112,6 @@ static void caif_device_destroy(struct net_device *dev) spin_unlock_bh(&caifdevs->lock); kfree(caifd); - return; } static int transmit(struct cflayer *layer, struct cfpkt *pkt) diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 471c629..df43f26 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -65,12 +65,11 @@ struct cfcnfg *cfcnfg_create(void) struct cfcnfg *this; struct cfctrl_rsp *resp; /* Initiate this layer */ - this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC); + this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); if (!this) { pr_warning("CAIF: %s(): Out of memory\n", __func__); return NULL; } - memset(this, 0, sizeof(struct cfcnfg)); this->mux = cfmuxl_create(); if (!this->mux) goto out_of_mem; diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index a521d32..0ffe1e1 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -284,12 +284,11 @@ int cfctrl_linkup_request(struct cflayer *layer, __func__, param->linktype); return -EINVAL; } - req = kmalloc(sizeof(*req), GFP_KERNEL); + req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) { pr_warning("CAIF: %s(): Out of memory\n", __func__); return -ENOMEM; } - memset(req, 0, sizeof(*req)); req->client_layer = user_layer; req->cmd = CFCTRL_CMD_LINK_SETUP; req->param = *param; diff --git a/net/can/bcm.c b/net/can/bcm.c index 907dc87..9c65e9d 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -713,8 +713,6 @@ static void bcm_remove_op(struct bcm_op *op) kfree(op->last_frames); kfree(op); - - return; } static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) diff --git a/net/core/dev.c b/net/core/dev.c index 32611c8..6c82065 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1454,7 +1454,7 @@ void net_disable_timestamp(void) } EXPORT_SYMBOL(net_disable_timestamp); -static inline void net_timestamp(struct sk_buff *skb) +static inline void net_timestamp_set(struct sk_buff *skb) { if (atomic_read(&netstamp_needed)) __net_timestamp(skb); @@ -1462,6 +1462,12 @@ static inline void net_timestamp(struct sk_buff *skb) skb->tstamp.tv64 = 0; } +static inline void net_timestamp_check(struct sk_buff *skb) +{ + if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) + __net_timestamp(skb); +} + /** * dev_forward_skb - loopback an skb to another netif * @@ -1470,7 +1476,7 @@ static inline void net_timestamp(struct sk_buff *skb) * * return values: * NET_RX_SUCCESS (no congestion) - * NET_RX_DROP (packet was dropped) + * NET_RX_DROP (packet was dropped, but freed) * * dev_forward_skb can be used for injecting an skb from the * start_xmit function of one device into the receive queue @@ -1484,12 +1490,11 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { skb_orphan(skb); - if (!(dev->flags & IFF_UP)) - return NET_RX_DROP; - - if (skb->len > (dev->mtu + dev->hard_header_len)) + if (!(dev->flags & IFF_UP) || + (skb->len > (dev->mtu + dev->hard_header_len))) { + kfree_skb(skb); return NET_RX_DROP; - + } skb_set_dev(skb, dev); skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; @@ -1509,9 +1514,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_NET_CLS_ACT if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) - net_timestamp(skb); + net_timestamp_set(skb); #else - net_timestamp(skb); + net_timestamp_set(skb); #endif rcu_read_lock(); @@ -2047,6 +2052,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, * waiting to be sent out; and the qdisc is not running - * xmit the skb directly. */ + if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) + skb_dst_force(skb); __qdisc_update_bstats(q, skb->len); if (sch_direct_xmit(skb, q, dev, txq, root_lock)) __qdisc_run(q); @@ -2055,6 +2062,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, rc = NET_XMIT_SUCCESS; } else { + skb_dst_force(skb); rc = qdisc_enqueue_root(skb, q); qdisc_run(q); } @@ -2202,6 +2210,7 @@ EXPORT_SYMBOL(dev_queue_xmit); =======================================================================*/ int netdev_max_backlog __read_mostly = 1000; +int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; int weight_p __read_mostly = 64; /* old backlog weight */ @@ -2426,8 +2435,10 @@ enqueue: return NET_RX_SUCCESS; } - /* Schedule NAPI for backlog device */ - if (napi_schedule_prep(&sd->backlog)) { + /* Schedule NAPI for backlog device + * We can use non atomic operation since we own the queue lock + */ + if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { if (!rps_ipi_queued(sd)) ____napi_schedule(sd, &sd->backlog); } @@ -2466,8 +2477,8 @@ int netif_rx(struct sk_buff *skb) if (netpoll_rx(skb)) return NET_RX_DROP; - if (!skb->tstamp.tv64) - net_timestamp(skb); + if (netdev_tstamp_prequeue) + net_timestamp_check(skb); #ifdef CONFIG_RPS { @@ -2613,7 +2624,8 @@ static inline struct sk_buff *handle_bridge(struct sk_buff *skb, #endif #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) -struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly; +struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p, + struct sk_buff *skb) __read_mostly; EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, @@ -2621,14 +2633,17 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, int *ret, struct net_device *orig_dev) { - if (skb->dev->macvlan_port == NULL) + struct macvlan_port *port; + + port = rcu_dereference(skb->dev->macvlan_port); + if (!port) return skb; if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } - return macvlan_handle_frame_hook(skb); + return macvlan_handle_frame_hook(port, skb); } #else #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) @@ -2788,8 +2803,8 @@ static int __netif_receive_skb(struct sk_buff *skb) int ret = NET_RX_DROP; __be16 type; - if (!skb->tstamp.tv64) - net_timestamp(skb); + if (!netdev_tstamp_prequeue) + net_timestamp_check(skb); if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) return NET_RX_SUCCESS; @@ -2907,23 +2922,28 @@ out: */ int netif_receive_skb(struct sk_buff *skb) { + if (netdev_tstamp_prequeue) + net_timestamp_check(skb); + #ifdef CONFIG_RPS - struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu, ret; + { + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu, ret; - rcu_read_lock(); + rcu_read_lock(); - cpu = get_rps_cpu(skb->dev, skb, &rflow); + cpu = get_rps_cpu(skb->dev, skb, &rflow); - if (cpu >= 0) { - ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); - rcu_read_unlock(); - } else { - rcu_read_unlock(); - ret = __netif_receive_skb(skb); - } + if (cpu >= 0) { + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + rcu_read_unlock(); + } else { + rcu_read_unlock(); + ret = __netif_receive_skb(skb); + } - return ret; + return ret; + } #else return __netif_receive_skb(skb); #endif diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1a7db92..a0f4964 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -522,7 +522,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) p += ETH_GSTRING_LEN; num_strings++; goto unknown_filter; - }; + } /* now the rest of the filters */ switch (fsc->fs.flow_type) { @@ -646,7 +646,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) p += ETH_GSTRING_LEN; num_strings++; break; - }; + } sprintf(p, "\tVLAN: %d, mask: 0x%x\n", fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); p += ETH_GSTRING_LEN; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 23a71cb..e4b9870 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -644,15 +644,47 @@ static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) memcpy(v, &a, sizeof(a)); } +/* All VF info */ static inline int rtnl_vfinfo_size(const struct net_device *dev) { - if (dev->dev.parent && dev_is_pci(dev->dev.parent)) - return dev_num_vf(dev->dev.parent) * - sizeof(struct ifla_vf_info); - else + if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { + + int num_vfs = dev_num_vf(dev->dev.parent); + size_t size = nlmsg_total_size(sizeof(struct nlattr)); + size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); + size += num_vfs * (sizeof(struct ifla_vf_mac) + + sizeof(struct ifla_vf_vlan) + + sizeof(struct ifla_vf_tx_rate)); + return size; + } else return 0; } +static size_t rtnl_port_size(const struct net_device *dev) +{ + size_t port_size = nla_total_size(4) /* PORT_VF */ + + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ + + nla_total_size(sizeof(struct ifla_port_vsi)) + /* PORT_VSI_TYPE */ + + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ + + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ + + nla_total_size(1) /* PROT_VDP_REQUEST */ + + nla_total_size(2); /* PORT_VDP_RESPONSE */ + size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); + size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) + + port_size; + size_t port_self_size = nla_total_size(sizeof(struct nlattr)) + + port_size; + + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) + return 0; + if (dev_num_vf(dev->dev.parent)) + return port_self_size + vf_ports_size + + vf_port_size * dev_num_vf(dev->dev.parent); + else + return port_self_size; +} + static inline size_t if_nlmsg_size(const struct net_device *dev) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) @@ -672,10 +704,83 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_NUM_VF */ - + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ + + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ + + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ } +static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) +{ + struct nlattr *vf_ports; + struct nlattr *vf_port; + int vf; + int err; + + vf_ports = nla_nest_start(skb, IFLA_VF_PORTS); + if (!vf_ports) + return -EMSGSIZE; + + for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { + vf_port = nla_nest_start(skb, IFLA_VF_PORT); + if (!vf_port) { + nla_nest_cancel(skb, vf_ports); + return -EMSGSIZE; + } + NLA_PUT_U32(skb, IFLA_PORT_VF, vf); + err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); + if (err) { +nla_put_failure: + nla_nest_cancel(skb, vf_port); + continue; + } + nla_nest_end(skb, vf_port); + } + + nla_nest_end(skb, vf_ports); + + return 0; +} + +static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) +{ + struct nlattr *port_self; + int err; + + port_self = nla_nest_start(skb, IFLA_PORT_SELF); + if (!port_self) + return -EMSGSIZE; + + err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); + if (err) { + nla_nest_cancel(skb, port_self); + return err; + } + + nla_nest_end(skb, port_self); + + return 0; +} + +static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev) +{ + int err; + + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) + return 0; + + err = rtnl_port_self_fill(skb, dev); + if (err) + return err; + + if (dev_num_vf(dev->dev.parent)) { + err = rtnl_vf_ports_fill(skb, dev); + if (err) + return err; + } + + return 0; +} + static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags) @@ -747,17 +852,46 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, goto nla_put_failure; copy_rtnl_link_stats64(nla_data(attr), stats); + if (dev->dev.parent) + NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); + if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { int i; - struct ifla_vf_info ivi; - NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); - for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { + struct nlattr *vfinfo, *vf; + int num_vfs = dev_num_vf(dev->dev.parent); + + vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); + if (!vfinfo) + goto nla_put_failure; + for (i = 0; i < num_vfs; i++) { + struct ifla_vf_info ivi; + struct ifla_vf_mac vf_mac; + struct ifla_vf_vlan vf_vlan; + struct ifla_vf_tx_rate vf_tx_rate; if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; - NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); + vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; + memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); + vf_vlan.vlan = ivi.vlan; + vf_vlan.qos = ivi.qos; + vf_tx_rate.rate = ivi.tx_rate; + vf = nla_nest_start(skb, IFLA_VF_INFO); + if (!vf) { + nla_nest_cancel(skb, vfinfo); + goto nla_put_failure; + } + NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); + NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); + NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); + nla_nest_end(skb, vf); } + nla_nest_end(skb, vfinfo); } + + if (rtnl_port_fill(skb, dev)) + goto nla_put_failure; + if (dev->rtnl_link_ops) { if (rtnl_link_fill(skb, dev) < 0) goto nla_put_failure; @@ -818,6 +952,22 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINKINFO] = { .type = NLA_NESTED }, [IFLA_NET_NS_PID] = { .type = NLA_U32 }, [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, + [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, + [IFLA_VF_PORTS] = { .type = NLA_NESTED }, + [IFLA_PORT_SELF] = { .type = NLA_NESTED }, +}; +EXPORT_SYMBOL(ifla_policy); + +static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { + [IFLA_INFO_KIND] = { .type = NLA_STRING }, + [IFLA_INFO_DATA] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { + [IFLA_VF_INFO] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_MAC] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .type = NLA_BINARY, @@ -825,11 +975,19 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_tx_rate) }, }; -EXPORT_SYMBOL(ifla_policy); -static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { - [IFLA_INFO_KIND] = { .type = NLA_STRING }, - [IFLA_INFO_DATA] = { .type = NLA_NESTED }, +static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { + [IFLA_PORT_VF] = { .type = NLA_U32 }, + [IFLA_PORT_PROFILE] = { .type = NLA_STRING, + .len = PORT_PROFILE_MAX }, + [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_port_vsi)}, + [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, + .len = PORT_UUID_MAX }, + [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, + .len = PORT_UUID_MAX }, + [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, + [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, }; struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) @@ -861,6 +1019,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) return 0; } +static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) +{ + int rem, err = -EINVAL; + struct nlattr *vf; + const struct net_device_ops *ops = dev->netdev_ops; + + nla_for_each_nested(vf, attr, rem) { + switch (nla_type(vf)) { + case IFLA_VF_MAC: { + struct ifla_vf_mac *ivm; + ivm = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_mac) + err = ops->ndo_set_vf_mac(dev, ivm->vf, + ivm->mac); + break; + } + case IFLA_VF_VLAN: { + struct ifla_vf_vlan *ivv; + ivv = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_vlan) + err = ops->ndo_set_vf_vlan(dev, ivv->vf, + ivv->vlan, + ivv->qos); + break; + } + case IFLA_VF_TX_RATE: { + struct ifla_vf_tx_rate *ivt; + ivt = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_tx_rate) + err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, + ivt->rate); + break; + } + default: + err = -EINVAL; + break; + } + if (err) + break; + } + return err; +} + static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int modified) { @@ -991,37 +1195,61 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, write_unlock_bh(&dev_base_lock); } - if (tb[IFLA_VF_MAC]) { - struct ifla_vf_mac *ivm; - ivm = nla_data(tb[IFLA_VF_MAC]); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_mac) - err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); - if (err < 0) - goto errout; - modified = 1; + if (tb[IFLA_VFINFO_LIST]) { + struct nlattr *attr; + int rem; + nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { + if (nla_type(attr) != IFLA_VF_INFO) + goto errout; + err = do_setvfinfo(dev, attr); + if (err < 0) + goto errout; + modified = 1; + } } + err = 0; + + if (tb[IFLA_VF_PORTS]) { + struct nlattr *port[IFLA_PORT_MAX+1]; + struct nlattr *attr; + int vf; + int rem; - if (tb[IFLA_VF_VLAN]) { - struct ifla_vf_vlan *ivv; - ivv = nla_data(tb[IFLA_VF_VLAN]); err = -EOPNOTSUPP; - if (ops->ndo_set_vf_vlan) - err = ops->ndo_set_vf_vlan(dev, ivv->vf, - ivv->vlan, - ivv->qos); - if (err < 0) + if (!ops->ndo_set_vf_port) goto errout; - modified = 1; + + nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { + if (nla_type(attr) != IFLA_VF_PORT) + continue; + err = nla_parse_nested(port, IFLA_PORT_MAX, + attr, ifla_port_policy); + if (err < 0) + goto errout; + if (!port[IFLA_PORT_VF]) { + err = -EOPNOTSUPP; + goto errout; + } + vf = nla_get_u32(port[IFLA_PORT_VF]); + err = ops->ndo_set_vf_port(dev, vf, port); + if (err < 0) + goto errout; + modified = 1; + } } err = 0; - if (tb[IFLA_VF_TX_RATE]) { - struct ifla_vf_tx_rate *ivt; - ivt = nla_data(tb[IFLA_VF_TX_RATE]); + if (tb[IFLA_PORT_SELF]) { + struct nlattr *port[IFLA_PORT_MAX+1]; + + err = nla_parse_nested(port, IFLA_PORT_MAX, + tb[IFLA_PORT_SELF], ifla_port_policy); + if (err < 0) + goto errout; + err = -EOPNOTSUPP; - if (ops->ndo_set_vf_tx_rate) - err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); + if (ops->ndo_set_vf_port) + err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); if (err < 0) goto errout; modified = 1; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a9b0e1f..4c11000 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -520,7 +520,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->transport_header = old->transport_header; new->network_header = old->network_header; new->mac_header = old->mac_header; - skb_dst_set(new, dst_clone(skb_dst(old))); + skb_dst_copy(new, old); new->rxhash = old->rxhash; #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); @@ -2718,6 +2718,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); skb_shinfo(nskb)->frag_list = p; skb_shinfo(nskb)->gso_size = pinfo->gso_size; + pinfo->gso_size = 0; skb_header_release(p); nskb->prev = p; diff --git a/net/core/sock.c b/net/core/sock.c index 94c4aff..bf88a16 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -307,6 +307,11 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) */ skb_len = skb->len; + /* we escape from rcu protected region, make sure we dont leak + * a norefcounted dst + */ + skb_dst_force(skb); + spin_lock_irqsave(&list->lock, flags); skb->dropcount = atomic_read(&sk->sk_drops); __skb_queue_tail(list, skb); @@ -1231,6 +1236,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) sk->sk_route_caps = dst->dev->features; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; + sk->sk_route_caps &= ~sk->sk_route_nocaps; if (sk_can_gso(sk)) { if (dst->header_len) { sk->sk_route_caps &= ~NETIF_F_GSO_MASK; @@ -1535,6 +1541,7 @@ static void __release_sock(struct sock *sk) do { struct sk_buff *next = skb->next; + WARN_ON_ONCE(skb_dst_is_noref(skb)); skb->next = NULL; sk_backlog_rcv(sk, skb); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index dcc7d25..01eee5d 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -122,6 +122,13 @@ static struct ctl_table net_core_table[] = { .proc_handler = proc_dointvec }, { + .procname = "netdev_tstamp_prequeue", + .data = &netdev_tstamp_prequeue, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "message_cost", .data = &net_ratelimit_state.interval, .maxlen = sizeof(int), diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 615dbe3..4c409b4 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1220,17 +1220,14 @@ void dn_dev_down(struct net_device *dev) void dn_dev_init_pkt(struct sk_buff *skb) { - return; } void dn_dev_veri_pkt(struct sk_buff *skb) { - return; } void dn_dev_hello(struct sk_buff *skb) { - return; } void dn_dev_devices_off(void) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index a8432e3..812e6df 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -264,7 +264,6 @@ static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) static void dn_dst_link_failure(struct sk_buff *skb) { - return; } static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c6c43bc..551ce56 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1573,9 +1573,13 @@ static int __init inet_init(void) BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb)); + sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); + if (!sysctl_local_reserved_ports) + goto out; + rc = proto_register(&tcp_prot, 1); if (rc) - goto out; + goto out_free_reserved_ports; rc = proto_register(&udp_prot, 1); if (rc) @@ -1674,6 +1678,8 @@ out_unregister_udp_proto: proto_unregister(&udp_prot); out_unregister_tcp_proto: proto_unregister(&tcp_prot); +out_free_reserved_ports: + kfree(sysctl_local_reserved_ports); goto out; } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 6e74706..f094b75 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -661,13 +661,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, #endif #endif -#ifdef CONFIG_FDDI +#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) case ARPHRD_FDDI: arp->ar_hrd = htons(ARPHRD_ETHER); arp->ar_pro = htons(ETH_P_IP); break; #endif -#ifdef CONFIG_TR +#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) case ARPHRD_IEEE802_TR: arp->ar_hrd = htons(ARPHRD_IEEE802); arp->ar_pro = htons(ETH_P_IP); @@ -854,7 +854,7 @@ static int arp_process(struct sk_buff *skb) } if (arp->ar_op == htons(ARPOP_REQUEST) && - ip_route_input(skb, tip, sip, 0, dev) == 0) { + ip_route_input_noref(skb, tip, sip, 0, dev) == 0) { rt = skb_rtable(skb); addr_type = rt->rt_type; @@ -1051,7 +1051,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, return -EINVAL; } switch (dev->type) { -#ifdef CONFIG_FDDI +#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) case ARPHRD_FDDI: /* * According to RFC 1390, FDDI devices should accept ARP diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index c97cd9f..3a92a76 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -290,8 +290,6 @@ void cipso_v4_cache_invalidate(void) cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } - - return; } /** diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index c98f115..79d057a 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1022,8 +1022,6 @@ static void trie_rebalance(struct trie *t, struct tnode *tn) rcu_assign_pointer(t->trie, (struct node *)tn); tnode_free_flush(); - - return; } /* only used from updater-side */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index f3d339f..d65e9215 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -587,20 +587,20 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) err = __ip_route_output_key(net, &rt2, &fl); else { struct flowi fl2 = {}; - struct dst_entry *odst; + unsigned long orefdst; fl2.fl4_dst = fl.fl4_src; if (ip_route_output_key(net, &rt2, &fl2)) goto relookup_failed; /* Ugh! */ - odst = skb_dst(skb_in); + orefdst = skb_in->_skb_refdst; /* save old refdst */ err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, RT_TOS(tos), rt2->u.dst.dev); dst_release(&rt2->u.dst); rt2 = skb_rtable(skb_in); - skb_dst_set(skb_in, odst); + skb_in->_skb_refdst = orefdst; /* restore old refdst */ } if (err) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index e0a3e35..70eb350 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -37,6 +37,9 @@ struct local_ports sysctl_local_ports __read_mostly = { .range = { 32768, 61000 }, }; +unsigned long *sysctl_local_reserved_ports; +EXPORT_SYMBOL(sysctl_local_reserved_ports); + void inet_get_local_port_range(int *low, int *high) { unsigned seq; @@ -108,6 +111,8 @@ again: smallest_size = -1; do { + if (inet_is_reserved_local_port(rover)) + goto next_nolock; head = &hashinfo->bhash[inet_bhashfn(net, rover, hashinfo->bhash_size)]; spin_lock(&head->lock); @@ -130,6 +135,7 @@ again: break; next: spin_unlock(&head->lock); + next_nolock: if (++rover > high) rover = low; } while (--remaining > 0); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2b79377..d3e160a 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -456,6 +456,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, local_bh_disable(); for (i = 1; i <= remaining; i++) { port = low + (i + offset) % remaining; + if (inet_is_reserved_local_port(port)) + continue; head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; spin_lock(&head->lock); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index fe381d1..32618e1 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -502,7 +502,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info) t->err_time = jiffies; out: rcu_read_unlock(); - return; } static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) @@ -538,7 +537,6 @@ static int ipgre_rcv(struct sk_buff *skb) struct ip_tunnel *tunnel; int offset = 4; __be16 gre_proto; - unsigned int len; if (!pskb_may_pull(skb, 16)) goto drop_nolock; @@ -629,8 +627,6 @@ static int ipgre_rcv(struct sk_buff *skb) tunnel->i_seqno = seqno + 1; } - len = skb->len; - /* Warning: All skb pointers will be invalidated! */ if (tunnel->dev->type == ARPHRD_ETHER) { if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -644,11 +640,7 @@ static int ipgre_rcv(struct sk_buff *skb) skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } - stats->rx_packets++; - stats->rx_bytes += len; - skb->dev = tunnel->dev; - skb_dst_drop(skb); - nf_reset(skb); + skb_tunnel_rx(skb, tunnel->dev); skb_reset_network_header(skb); ipgre_ecn_decapsulate(iph, skb); diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index af76de5..d930dc5 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -331,8 +331,8 @@ static int ip_rcv_finish(struct sk_buff *skb) * how the packet travels inside Linux networking. */ if (skb_dst(skb) == NULL) { - int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, - skb->dev); + int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, + iph->tos, skb->dev); if (unlikely(err)) { if (err == -EHOSTUNREACH) IP_INC_STATS_BH(dev_net(skb->dev), diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 4c09a31..ba9836c 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -238,7 +238,6 @@ void ip_options_fragment(struct sk_buff * skb) opt->rr_needaddr = 0; opt->ts_needaddr = 0; opt->ts_needtime = 0; - return; } /* @@ -601,6 +600,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) unsigned char *optptr = skb_network_header(skb) + opt->srr; struct rtable *rt = skb_rtable(skb); struct rtable *rt2; + unsigned long orefdst; int err; if (!opt->srr) @@ -624,16 +624,16 @@ int ip_options_rcv_srr(struct sk_buff *skb) } memcpy(&nexthop, &optptr[srrptr-1], 4); - rt = skb_rtable(skb); + orefdst = skb->_skb_refdst; skb_dst_set(skb, NULL); err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { - ip_rt_put(rt2); - skb_dst_set(skb, &rt->u.dst); + skb_dst_drop(skb); + skb->_skb_refdst = orefdst; return -EINVAL; } - ip_rt_put(rt); + refdst_drop(orefdst); if (rt2->rt_type != RTN_LOCAL) break; /* Superfast 8) loopback forward */ diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 2528974..9a4a6c9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -318,10 +318,12 @@ int ip_queue_xmit(struct sk_buff *skb) struct ip_options *opt = inet->opt; struct rtable *rt; struct iphdr *iph; + int res; /* Skip all of this if the packet is already routed, * f.e. by something like SCTP. */ + rcu_read_lock(); rt = skb_rtable(skb); if (rt != NULL) goto packet_routed; @@ -359,7 +361,7 @@ int ip_queue_xmit(struct sk_buff *skb) } sk_setup_caps(sk, &rt->u.dst); } - skb_dst_set(skb, dst_clone(&rt->u.dst)); + skb_dst_set_noref(skb, &rt->u.dst); packet_routed: if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) @@ -391,9 +393,12 @@ packet_routed: skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - return ip_local_out(skb); + res = ip_local_out(skb); + rcu_read_unlock(); + return res; no_route: + rcu_read_unlock(); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EHOSTUNREACH; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 0b27b14..7fd6367 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -374,11 +374,8 @@ static int ipip_rcv(struct sk_buff *skb) skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; - tunnel->dev->stats.rx_packets++; - tunnel->dev->stats.rx_bytes += skb->len; - skb->dev = tunnel->dev; - skb_dst_drop(skb); - nf_reset(skb); + skb_tunnel_rx(skb, tunnel->dev); + ipip_ecn_decapsulate(iph, skb); netif_rx(skb); rcu_read_unlock(); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index f3f1c6b..4588910 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -22,7 +22,7 @@ * overflow. * Carlos Picoto : PIMv1 Support * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header - * Relax this requrement to work with older peers. + * Relax this requirement to work with older peers. * */ @@ -998,7 +998,8 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) atomic_inc(&mrt->cache_resolve_queue_len); list_add(&c->list, &mrt->mfc_unres_queue); - mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); + if (atomic_read(&mrt->cache_resolve_queue_len) == 1) + mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); } /* @@ -1605,7 +1606,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, out_free: kfree_skb(skb); - return; } static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) @@ -1830,14 +1830,12 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, skb->mac_header = skb->network_header; skb_pull(skb, (u8*)encap - skb->data); skb_reset_network_header(skb); - skb->dev = reg_dev; skb->protocol = htons(ETH_P_IP); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; - skb_dst_drop(skb); - reg_dev->stats.rx_bytes += skb->len; - reg_dev->stats.rx_packets++; - nf_reset(skb); + + skb_tunnel_rx(skb, reg_dev); + netif_rx(skb); dev_put(reg_dev); diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 82fb43c..07de855 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -17,7 +17,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi fl = {}; - struct dst_entry *odst; + unsigned long orefdst; unsigned int hh_len; unsigned int type; @@ -51,14 +51,14 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) if (ip_route_output_key(net, &rt, &fl) != 0) return -1; - odst = skb_dst(skb); + orefdst = skb->_skb_refdst; if (ip_route_input(skb, iph->daddr, iph->saddr, RT_TOS(iph->tos), rt->u.dst.dev) != 0) { dst_release(&rt->u.dst); return -1; } dst_release(&rt->u.dst); - dst_release(odst); + refdst_drop(orefdst); } if (skb_dst(skb)->error) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index dea3f92..560acc6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2277,8 +2277,8 @@ martian_source: goto e_inval; } -int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev) +int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, + u8 tos, struct net_device *dev, bool noref) { struct rtable * rth; unsigned hash; @@ -2304,10 +2304,15 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->fl.mark == skb->mark && net_eq(dev_net(rth->u.dst.dev), net) && !rt_is_expired(rth)) { - dst_use(&rth->u.dst, jiffies); + if (noref) { + dst_use_noref(&rth->u.dst, jiffies); + skb_dst_set_noref(skb, &rth->u.dst); + } else { + dst_use(&rth->u.dst, jiffies); + skb_dst_set(skb, &rth->u.dst); + } RT_CACHE_STAT_INC(in_hit); rcu_read_unlock(); - skb_dst_set(skb, &rth->u.dst); return 0; } RT_CACHE_STAT_INC(in_hlist_search); @@ -2350,6 +2355,7 @@ skip_cache: } return ip_route_input_slow(skb, daddr, saddr, tos, dev); } +EXPORT_SYMBOL(ip_route_input_common); static int __mkroute_output(struct rtable **result, struct fib_result *res, @@ -3033,7 +3039,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) continue; if (rt_is_expired(rt)) continue; - skb_dst_set(skb, dst_clone(&rt->u.dst)); + skb_dst_set_noref(skb, &rt->u.dst); if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1, NLM_F_MULTI) <= 0) { @@ -3361,5 +3367,4 @@ void __init ip_static_sysctl_init(void) #endif EXPORT_SYMBOL(__ip_select_ident); -EXPORT_SYMBOL(ip_route_input); EXPORT_SYMBOL(ip_route_output_key); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1cd5c15..d96c1da 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -299,6 +299,13 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = ipv4_local_port_range, }, + { + .procname = "ip_local_reserved_ports", + .data = NULL, /* initialized in sysctl_ipv4_init */ + .maxlen = 65536, + .mode = 0644, + .proc_handler = proc_do_large_bitmap, + }, #ifdef CONFIG_IP_MULTICAST { .procname = "igmp_max_memberships", @@ -736,6 +743,16 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = { static __init int sysctl_ipv4_init(void) { struct ctl_table_header *hdr; + struct ctl_table *i; + + for (i = ipv4_table; i->procname; i++) { + if (strcmp(i->procname, "ip_local_reserved_ports") == 0) { + i->data = sysctl_local_reserved_ports; + break; + } + } + if (!i->procname) + return -EINVAL; hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); if (hdr == NULL) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8ce2974..6596b4f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2215,7 +2215,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, default: /* fallthru */ break; - }; + } if (optlen < sizeof(int)) return -EINVAL; @@ -2840,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) if (p->md5_desc.tfm) crypto_free_hash(p->md5_desc.tfm); kfree(p); - p = NULL; } } free_percpu(pool); @@ -2938,25 +2937,40 @@ retry: EXPORT_SYMBOL(tcp_alloc_md5sig_pool); -struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) + +/** + * tcp_get_md5sig_pool - get md5sig_pool for this user + * + * We use percpu structure, so if we succeed, we exit with preemption + * and BH disabled, to make sure another thread or softirq handling + * wont try to get same context. + */ +struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { struct tcp_md5sig_pool * __percpu *p; - spin_lock_bh(&tcp_md5sig_pool_lock); + + local_bh_disable(); + + spin_lock(&tcp_md5sig_pool_lock); p = tcp_md5sig_pool; if (p) tcp_md5sig_users++; - spin_unlock_bh(&tcp_md5sig_pool_lock); - return (p ? *per_cpu_ptr(p, cpu) : NULL); -} + spin_unlock(&tcp_md5sig_pool_lock); -EXPORT_SYMBOL(__tcp_get_md5sig_pool); + if (p) + return *per_cpu_ptr(p, smp_processor_id()); -void __tcp_put_md5sig_pool(void) + local_bh_enable(); + return NULL; +} +EXPORT_SYMBOL(tcp_get_md5sig_pool); + +void tcp_put_md5sig_pool(void) { + local_bh_enable(); tcp_free_md5sig_pool(); } - -EXPORT_SYMBOL(__tcp_put_md5sig_pool); +EXPORT_SYMBOL(tcp_put_md5sig_pool); int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, struct tcphdr *th) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e82162c..3e6dafc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3845,12 +3845,13 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, /* 16-bit multiple */ opt_rx->cookie_plus = opsize; *hvpp = ptr; + break; default: /* ignore option */ break; - }; + } break; - }; + } ptr += opsize-2; length -= opsize; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 771f814..202cf09 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -891,7 +891,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, kfree(newkey); return -ENOMEM; } - sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + sk_nocaps_add(sk, NETIF_F_GSO_MASK); } if (tcp_alloc_md5sig_pool(sk) == NULL) { kfree(newkey); @@ -1021,7 +1021,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, return -EINVAL; tp->md5sig_info = p; - sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + sk_nocaps_add(sk, NETIF_F_GSO_MASK); } newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation); @@ -1462,7 +1462,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, if (newkey != NULL) tcp_v4_md5_do_add(newsk, newinet->inet_daddr, newkey, key->keylen); - newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; + sk_nocaps_add(newsk, NETIF_F_GSO_MASK); } #endif diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5db3a2c..b4ed957 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -668,7 +668,6 @@ static unsigned tcp_synack_options(struct sock *sk, u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? xvp->cookie_plus : 0; - bool doing_ts = ireq->tstamp_ok; #ifdef CONFIG_TCP_MD5SIG *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); @@ -681,7 +680,7 @@ static unsigned tcp_synack_options(struct sock *sk, * rather than TS in order to fit in better with old, * buggy kernels, but that was deemed to be unnecessary. */ - doing_ts &= !ireq->sack_ok; + ireq->tstamp_ok &= !ireq->sack_ok; } #else *md5 = NULL; @@ -696,7 +695,7 @@ static unsigned tcp_synack_options(struct sock *sk, opts->options |= OPTION_WSCALE; remaining -= TCPOLEN_WSCALE_ALIGNED; } - if (likely(doing_ts)) { + if (likely(ireq->tstamp_ok)) { opts->options |= OPTION_TS; opts->tsval = TCP_SKB_CB(skb)->when; opts->tsecr = req->ts_recent; @@ -704,7 +703,7 @@ static unsigned tcp_synack_options(struct sock *sk, } if (likely(ireq->sack_ok)) { opts->options |= OPTION_SACK_ADVERTISE; - if (unlikely(!doing_ts)) + if (unlikely(!ireq->tstamp_ok)) remaining -= TCPOLEN_SACKPERM_ALIGNED; } @@ -712,7 +711,7 @@ static unsigned tcp_synack_options(struct sock *sk, * If the <SYN> options fit, the same options should fit now! */ if (*md5 == NULL && - doing_ts && + ireq->tstamp_ok && cookie_plus > TCPOLEN_COOKIE_BASE) { int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ @@ -873,7 +872,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, #ifdef CONFIG_TCP_MD5SIG /* Calculate the MD5 hash, as we have all we need now */ if (md5) { - sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + sk_nocaps_add(sk, NETIF_F_GSO_MASK); tp->af_specific->calc_md5_hash(opts.hash_location, md5, sk, NULL, skb); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4560b29..9de6a69 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -233,7 +233,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, */ do { if (low <= snum && snum <= high && - !test_bit(snum >> udptable->log, bitmap)) + !test_bit(snum >> udptable->log, bitmap) && + !inet_is_reserved_local_port(snum)) goto found; snum += rand; } while (snum != first); @@ -1536,6 +1537,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, uh = udp_hdr(skb); ulen = ntohs(uh->len); + saddr = ip_hdr(skb)->saddr; + daddr = ip_hdr(skb)->daddr; + if (ulen > skb->len) goto short_packet; @@ -1549,9 +1553,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (udp4_csum_init(skb, uh, proto)) goto csum_error; - saddr = ip_hdr(skb)->saddr; - daddr = ip_hdr(skb)->daddr; - if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable); diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index abcd7ed..ad8fbb8 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -27,8 +27,8 @@ static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) if (skb_dst(skb) == NULL) { const struct iphdr *iph = ip_hdr(skb); - if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, - skb->dev)) + if (ip_route_input_noref(skb, iph->daddr, iph->saddr, + iph->tos, skb->dev)) goto drop; } return dst_input(skb); diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index a578096..36d7437 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -229,6 +229,20 @@ config IPV6_MROUTE Experimental support for IPv6 multicast forwarding. If unsure, say N. +config IPV6_MROUTE_MULTIPLE_TABLES + bool "IPv6: multicast policy routing" + depends on IPV6_MROUTE + select FIB_RULES + help + Normally, a multicast router runs a userspace daemon and decides + what to do with a multicast packet based on the source and + destination addresses. If you say Y here, the multicast router + will also be able to take interfaces and packet marks into + account and run multiple instances of userspace daemons + simultaneously, each one handling a single table. + + If unsure, say N. + config IPV6_PIMSM_V2 bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)" depends on IPV6_MROUTE diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3984f52..e1a698d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -553,7 +553,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) if (del_timer(&ifp->timer)) pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); - if (!ifp->dead) { + if (ifp->state != INET6_IFADDR_STATE_DEAD) { pr_warning("Freeing alive inet6 address %p\n", ifp); return; } @@ -648,6 +648,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, ipv6_addr_copy(&ifa->addr, addr); spin_lock_init(&ifa->lock); + spin_lock_init(&ifa->state_lock); init_timer(&ifa->timer); INIT_HLIST_NODE(&ifa->addr_lst); ifa->timer.data = (unsigned long) ifa; @@ -714,13 +715,20 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) { struct inet6_ifaddr *ifa, *ifn; struct inet6_dev *idev = ifp->idev; + int state; int hash; int deleted = 0, onlink = 0; unsigned long expires = jiffies; hash = ipv6_addr_hash(&ifp->addr); - ifp->dead = 1; + spin_lock_bh(&ifp->state_lock); + state = ifp->state; + ifp->state = INET6_IFADDR_STATE_DEAD; + spin_unlock_bh(&ifp->state_lock); + + if (state == INET6_IFADDR_STATE_DEAD) + goto out; spin_lock_bh(&addrconf_hash_lock); hlist_del_init_rcu(&ifp->addr_lst); @@ -818,6 +826,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) dst_release(&rt->u.dst); } +out: in6_ifa_put(ifp); } @@ -1274,7 +1283,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev) int ipv6_chk_addr(struct net *net, struct in6_addr *addr, struct net_device *dev, int strict) { - struct inet6_ifaddr *ifp = NULL; + struct inet6_ifaddr *ifp; struct hlist_node *node; unsigned int hash = ipv6_addr_hash(addr); @@ -1283,15 +1292,16 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr, if (!net_eq(dev_net(ifp->idev->dev), net)) continue; if (ipv6_addr_equal(&ifp->addr, addr) && - !(ifp->flags&IFA_F_TENTATIVE)) { - if (dev == NULL || ifp->idev->dev == dev || - !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) - break; + !(ifp->flags&IFA_F_TENTATIVE) && + (dev == NULL || ifp->idev->dev == dev || + !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) { + rcu_read_unlock_bh(); + return 1; } } - rcu_read_unlock_bh(); - return ifp != NULL; + rcu_read_unlock_bh(); + return 0; } EXPORT_SYMBOL(ipv6_chk_addr); @@ -1396,10 +1406,27 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) ipv6_del_addr(ifp); } +static int addrconf_dad_end(struct inet6_ifaddr *ifp) +{ + int err = -ENOENT; + + spin_lock(&ifp->state_lock); + if (ifp->state == INET6_IFADDR_STATE_DAD) { + ifp->state = INET6_IFADDR_STATE_POSTDAD; + err = 0; + } + spin_unlock(&ifp->state_lock); + + return err; +} + void addrconf_dad_failure(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; + if (addrconf_dad_end(ifp)) + return; + if (net_ratelimit()) printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", ifp->idev->dev->name, &ifp->addr); @@ -2624,6 +2651,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) struct inet6_dev *idev; struct inet6_ifaddr *ifa; LIST_HEAD(keep_list); + int state; ASSERT_RTNL(); @@ -2664,7 +2692,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) ifa = list_first_entry(&idev->tempaddr_list, struct inet6_ifaddr, tmp_list); list_del(&ifa->tmp_list); - ifa->dead = 1; write_unlock_bh(&idev->lock); spin_lock_bh(&ifa->lock); @@ -2702,23 +2729,35 @@ static int addrconf_ifdown(struct net_device *dev, int how) /* Flag it for later restoration when link comes up */ ifa->flags |= IFA_F_TENTATIVE; - in6_ifa_hold(ifa); + ifa->state = INET6_IFADDR_STATE_DAD; + write_unlock_bh(&idev->lock); + + in6_ifa_hold(ifa); } else { list_del(&ifa->if_list); - ifa->dead = 1; - write_unlock_bh(&idev->lock); /* clear hash table */ spin_lock_bh(&addrconf_hash_lock); hlist_del_init_rcu(&ifa->addr_lst); spin_unlock_bh(&addrconf_hash_lock); + + write_unlock_bh(&idev->lock); + spin_lock_bh(&ifa->state_lock); + state = ifa->state; + ifa->state = INET6_IFADDR_STATE_DEAD; + spin_unlock_bh(&ifa->state_lock); + + if (state == INET6_IFADDR_STATE_DEAD) + goto put_ifa; } __ipv6_ifa_notify(RTM_DELADDR, ifa); - if (ifa->dead) + if (ifa->state == INET6_IFADDR_STATE_DEAD) atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); + +put_ifa: in6_ifa_put(ifa); write_lock_bh(&idev->lock); @@ -2814,10 +2853,10 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) net_srandom(ifp->addr.s6_addr32[3]); read_lock_bh(&idev->lock); - if (ifp->dead) + spin_lock(&ifp->lock); + if (ifp->state == INET6_IFADDR_STATE_DEAD) goto out; - spin_lock(&ifp->lock); if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || idev->cnf.accept_dad < 1 || !(ifp->flags&IFA_F_TENTATIVE) || @@ -2851,8 +2890,8 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) ip6_ins_rt(ifp->rt); addrconf_dad_kick(ifp); - spin_unlock(&ifp->lock); out: + spin_unlock(&ifp->lock); read_unlock_bh(&idev->lock); } @@ -2862,6 +2901,9 @@ static void addrconf_dad_timer(unsigned long data) struct inet6_dev *idev = ifp->idev; struct in6_addr mcaddr; + if (!ifp->probes && addrconf_dad_end(ifp)) + goto out; + read_lock(&idev->lock); if (idev->dead || !(idev->if_flags & IF_READY)) { read_unlock(&idev->lock); @@ -2869,6 +2911,12 @@ static void addrconf_dad_timer(unsigned long data) } spin_lock(&ifp->lock); + if (ifp->state == INET6_IFADDR_STATE_DEAD) { + spin_unlock(&ifp->lock); + read_unlock(&idev->lock); + goto out; + } + if (ifp->probes == 0) { /* * DAD was successful @@ -2935,12 +2983,10 @@ static void addrconf_dad_run(struct inet6_dev *idev) read_lock_bh(&idev->lock); list_for_each_entry(ifp, &idev->addr_list, if_list) { spin_lock(&ifp->lock); - if (!(ifp->flags & IFA_F_TENTATIVE)) { - spin_unlock(&ifp->lock); - continue; - } + if (ifp->flags & IFA_F_TENTATIVE && + ifp->state == INET6_IFADDR_STATE_DAD) + addrconf_dad_kick(ifp); spin_unlock(&ifp->lock); - addrconf_dad_kick(ifp); } read_unlock_bh(&idev->lock); } @@ -4049,7 +4095,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) addrconf_leave_solict(ifp->idev, &ifp->addr); dst_hold(&ifp->rt->u.dst); - if (ifp->dead && ip6_del_rt(ifp->rt)) + if (ifp->state == INET6_IFADDR_STATE_DEAD && + ip6_del_rt(ifp->rt)) dst_free(&ifp->rt->u.dst); break; } diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index ae404c9..8c4348c 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -422,10 +422,6 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, ifal->ifal_prefixlen > 128) return -EINVAL; - if (ifal->ifal_index && - !__dev_get_by_index(net, ifal->ifal_index)) - return -EINVAL; - if (!tb[IFAL_ADDRESS]) return -EINVAL; @@ -441,6 +437,10 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, switch(nlh->nlmsg_type) { case RTM_NEWADDRLABEL: + if (ifal->ifal_index && + !__dev_get_by_index(net, ifal->ifal_index)) + return -EINVAL; + err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen, ifal->ifal_index, label, nlh->nlmsg_flags & NLM_F_REPLACE); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index d2df314..e733942 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -200,7 +200,7 @@ lookup_protocol: inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk); np->hop_limit = -1; - np->mcast_hops = -1; + np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; np->mc_loop = 1; np->pmtudisc = IPV6_PMTUDISC_WANT; np->ipv6only = net->ipv6.sysctl.bindv6only; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 5959230..7126846 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -222,6 +222,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, if (!skb) return; + skb->protocol = htons(ETH_P_IPV6); + serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6; @@ -255,6 +257,8 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) if (!skb) return; + skb->protocol = htons(ETH_P_IPV6); + skb_put(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); iph = ipv6_hdr(skb); @@ -358,7 +362,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_flowinfo = 0; sin->sin6_port = serr->port; sin->sin6_scope_id = 0; - if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { + if (skb->protocol == htons(ETH_P_IPV6)) { ipv6_addr_copy(&sin->sin6_addr, (struct in6_addr *)(nh + serr->addr_offset)); if (np->sndflow) @@ -380,7 +384,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; sin->sin6_scope_id = 0; - if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { + if (skb->protocol == htons(ETH_P_IPV6)) { ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 5173aca..cd963f6 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -108,7 +108,7 @@ static int ip6_finish_output2(struct sk_buff *skb) struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && - ((mroute6_socket(dev_net(dev)) && + ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2599870..8f39893 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -723,14 +723,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, skb->protocol = htons(protocol); skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); - skb->dev = t->dev; - skb_dst_drop(skb); - nf_reset(skb); - dscp_ecn_decapsulate(t, ipv6h, skb); + skb_tunnel_rx(skb, t->dev); - t->dev->stats.rx_packets++; - t->dev->stats.rx_bytes += skb->len; + dscp_ecn_decapsulate(t, ipv6h, skb); netif_rx(skb); rcu_read_unlock(); return 0; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index e0b530c..bd9e7d3 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -42,6 +42,7 @@ #include <linux/if_arp.h> #include <net/checksum.h> #include <net/netlink.h> +#include <net/fib_rules.h> #include <net/ipv6.h> #include <net/ip6_route.h> @@ -51,6 +52,34 @@ #include <linux/netfilter_ipv6.h> #include <net/ip6_checksum.h> +struct mr6_table { + struct list_head list; +#ifdef CONFIG_NET_NS + struct net *net; +#endif + u32 id; + struct sock *mroute6_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc6_unres_queue; + struct list_head mfc6_cache_array[MFC6_LINES]; + struct mif_device vif6_table[MAXMIFS]; + int maxvif; + atomic_t cache_resolve_queue_len; + int mroute_do_assert; + int mroute_do_pim; +#ifdef CONFIG_IPV6_PIMSM_V2 + int mroute_reg_vif_num; +#endif +}; + +struct ip6mr_rule { + struct fib_rule common; +}; + +struct ip6mr_result { + struct mr6_table *mrt; +}; + /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. */ @@ -61,9 +90,7 @@ static DEFINE_RWLOCK(mrt_lock); * Multicast router control variables */ -#define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) - -static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ +#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL) /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); @@ -78,20 +105,233 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; -static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache); -static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, +static struct mr6_table *ip6mr_new_table(struct net *net, u32 id); +static void ip6mr_free_table(struct mr6_table *mrt); + +static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *cache); +static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, mifi_t mifi, int assert); -static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); -static void mroute_clean_tables(struct net *net); +static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + struct mfc6_cache *c, struct rtmsg *rtm); +static int ip6mr_rtm_dumproute(struct sk_buff *skb, + struct netlink_callback *cb); +static void mroute_clean_tables(struct mr6_table *mrt); +static void ipmr_expire_process(unsigned long arg); + +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES +#define ip6mr_for_each_table(mrt, met) \ + list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) + +static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) +{ + struct mr6_table *mrt; + + ip6mr_for_each_table(mrt, net) { + if (mrt->id == id) + return mrt; + } + return NULL; +} + +static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, + struct mr6_table **mrt) +{ + struct ip6mr_result res; + struct fib_lookup_arg arg = { .result = &res, }; + int err; + + err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg); + if (err < 0) + return err; + *mrt = res.mrt; + return 0; +} + +static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, + int flags, struct fib_lookup_arg *arg) +{ + struct ip6mr_result *res = arg->result; + struct mr6_table *mrt; + + switch (rule->action) { + case FR_ACT_TO_TBL: + break; + case FR_ACT_UNREACHABLE: + return -ENETUNREACH; + case FR_ACT_PROHIBIT: + return -EACCES; + case FR_ACT_BLACKHOLE: + default: + return -EINVAL; + } + + mrt = ip6mr_get_table(rule->fr_net, rule->table); + if (mrt == NULL) + return -EAGAIN; + res->mrt = mrt; + return 0; +} + +static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) +{ + return 1; +} + +static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = { + FRA_GENERIC_POLICY, +}; + +static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh, struct nlattr **tb) +{ + return 0; +} + +static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, + struct nlattr **tb) +{ + return 1; +} + +static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh) +{ + frh->dst_len = 0; + frh->src_len = 0; + frh->tos = 0; + return 0; +} + +static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = { + .family = RTNL_FAMILY_IP6MR, + .rule_size = sizeof(struct ip6mr_rule), + .addr_size = sizeof(struct in6_addr), + .action = ip6mr_rule_action, + .match = ip6mr_rule_match, + .configure = ip6mr_rule_configure, + .compare = ip6mr_rule_compare, + .default_pref = fib_default_rule_pref, + .fill = ip6mr_rule_fill, + .nlgroup = RTNLGRP_IPV6_RULE, + .policy = ip6mr_rule_policy, + .owner = THIS_MODULE, +}; + +static int __net_init ip6mr_rules_init(struct net *net) +{ + struct fib_rules_ops *ops; + struct mr6_table *mrt; + int err; + + ops = fib_rules_register(&ip6mr_rules_ops_template, net); + if (IS_ERR(ops)) + return PTR_ERR(ops); + + INIT_LIST_HEAD(&net->ipv6.mr6_tables); + + mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) { + err = -ENOMEM; + goto err1; + } + + err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0); + if (err < 0) + goto err2; + + net->ipv6.mr6_rules_ops = ops; + return 0; + +err2: + kfree(mrt); +err1: + fib_rules_unregister(ops); + return err; +} + +static void __net_exit ip6mr_rules_exit(struct net *net) +{ + struct mr6_table *mrt, *next; + + list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) + ip6mr_free_table(mrt); + fib_rules_unregister(net->ipv6.mr6_rules_ops); +} +#else +#define ip6mr_for_each_table(mrt, net) \ + for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) + +static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) +{ + return net->ipv6.mrt6; +} + +static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, + struct mr6_table **mrt) +{ + *mrt = net->ipv6.mrt6; + return 0; +} + +static int __net_init ip6mr_rules_init(struct net *net) +{ + net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT); + return net->ipv6.mrt6 ? 0 : -ENOMEM; +} -static struct timer_list ipmr_expire_timer; +static void __net_exit ip6mr_rules_exit(struct net *net) +{ + ip6mr_free_table(net->ipv6.mrt6); +} +#endif + +static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) +{ + struct mr6_table *mrt; + unsigned int i; + + mrt = ip6mr_get_table(net, id); + if (mrt != NULL) + return mrt; + + mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); + if (mrt == NULL) + return NULL; + mrt->id = id; + write_pnet(&mrt->net, net); + + /* Forwarding cache */ + for (i = 0; i < MFC6_LINES; i++) + INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]); + + INIT_LIST_HEAD(&mrt->mfc6_unres_queue); + setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, + (unsigned long)mrt); + +#ifdef CONFIG_IPV6_PIMSM_V2 + mrt->mroute_reg_vif_num = -1; +#endif +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES + list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables); +#endif + return mrt; +} + +static void ip6mr_free_table(struct mr6_table *mrt) +{ + del_timer(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt); + kfree(mrt); +} #ifdef CONFIG_PROC_FS struct ipmr_mfc_iter { struct seq_net_private p; - struct mfc6_cache **cache; + struct mr6_table *mrt; + struct list_head *cache; int ct; }; @@ -99,22 +339,22 @@ struct ipmr_mfc_iter { static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { + struct mr6_table *mrt = it->mrt; struct mfc6_cache *mfc; - it->cache = net->ipv6.mfc6_cache_array; read_lock(&mrt_lock); - for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) - for (mfc = net->ipv6.mfc6_cache_array[it->ct]; - mfc; mfc = mfc->next) + for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) { + it->cache = &mrt->mfc6_cache_array[it->ct]; + list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; + } read_unlock(&mrt_lock); - it->cache = &mfc_unres_queue; spin_lock_bh(&mfc_unres_lock); - for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) - if (net_eq(mfc6_net(mfc), net) && - pos-- == 0) + it->cache = &mrt->mfc6_unres_queue; + list_for_each_entry(mfc, it->cache, list) + if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@ -122,15 +362,13 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, return NULL; } - - - /* * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif */ struct ipmr_vif_iter { struct seq_net_private p; + struct mr6_table *mrt; int ct; }; @@ -138,11 +376,13 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { - for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) { - if (!MIF_EXISTS(net, iter->ct)) + struct mr6_table *mrt = iter->mrt; + + for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { + if (!MIF_EXISTS(mrt, iter->ct)) continue; if (pos-- == 0) - return &net->ipv6.vif6_table[iter->ct]; + return &mrt->vif6_table[iter->ct]; } return NULL; } @@ -150,7 +390,15 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(mrt_lock) { + struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + + iter->mrt = mrt; read_lock(&mrt_lock); return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1) @@ -161,15 +409,16 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt = iter->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ip6mr_vif_seq_idx(net, iter, 0); - while (++iter->ct < net->ipv6.maxvif) { - if (!MIF_EXISTS(net, iter->ct)) + while (++iter->ct < mrt->maxvif) { + if (!MIF_EXISTS(mrt, iter->ct)) continue; - return &net->ipv6.vif6_table[iter->ct]; + return &mrt->vif6_table[iter->ct]; } return NULL; } @@ -182,7 +431,8 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v) static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) { - struct net *net = seq_file_net(seq); + struct ipmr_vif_iter *iter = seq->private; + struct mr6_table *mrt = iter->mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -193,7 +443,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", - vif - net->ipv6.vif6_table, + vif - mrt->vif6_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags); @@ -224,8 +474,15 @@ static const struct file_operations ip6mr_vif_fops = { static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) { + struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt; + mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + + it->mrt = mrt; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } @@ -235,35 +492,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct mfc6_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt = it->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_mfc_seq_idx(net, seq->private, 0); - if (mfc->next) - return mfc->next; + if (mfc->list.next != it->cache) + return list_entry(mfc->list.next, struct mfc6_cache, list); - if (it->cache == &mfc_unres_queue) + if (it->cache == &mrt->mfc6_unres_queue) goto end_of_list; - BUG_ON(it->cache != net->ipv6.mfc6_cache_array); + BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]); while (++it->ct < MFC6_LINES) { - mfc = net->ipv6.mfc6_cache_array[it->ct]; - if (mfc) - return mfc; + it->cache = &mrt->mfc6_cache_array[it->ct]; + if (list_empty(it->cache)) + continue; + return list_first_entry(it->cache, struct mfc6_cache, list); } /* exhausted cache_array, show unresolved */ read_unlock(&mrt_lock); - it->cache = &mfc_unres_queue; + it->cache = &mrt->mfc6_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); - mfc = mfc_unres_queue; - if (mfc) - return mfc; + if (!list_empty(it->cache)) + return list_first_entry(it->cache, struct mfc6_cache, list); end_of_list: spin_unlock_bh(&mfc_unres_lock); @@ -275,18 +533,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; - struct net *net = seq_file_net(seq); + struct mr6_table *mrt = it->mrt; - if (it->cache == &mfc_unres_queue) + if (it->cache == &mrt->mfc6_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == net->ipv6.mfc6_cache_array) + else if (it->cache == mrt->mfc6_cache_array) read_unlock(&mrt_lock); } static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; - struct net *net = seq_file_net(seq); if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -296,19 +553,20 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) } else { const struct mfc6_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; + struct mr6_table *mrt = it->mrt; seq_printf(seq, "%pI6 %pI6 %-3hd", &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, mfc->mf6c_parent); - if (it->cache != &mfc_unres_queue) { + if (it->cache != &mrt->mfc6_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if); for (n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++) { - if (MIF_EXISTS(net, n) && + if (MIF_EXISTS(mrt, n) && mfc->mfc_un.res.ttls[n] < 255) seq_printf(seq, " %2d:%-3d", @@ -355,7 +613,12 @@ static int pim6_rcv(struct sk_buff *skb) struct ipv6hdr *encap; struct net_device *reg_dev = NULL; struct net *net = dev_net(skb->dev); - int reg_vif_num = net->ipv6.mroute_reg_vif_num; + struct mr6_table *mrt; + struct flowi fl = { + .iif = skb->dev->ifindex, + .mark = skb->mark, + }; + int reg_vif_num; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; @@ -378,9 +641,13 @@ static int pim6_rcv(struct sk_buff *skb) ntohs(encap->payload_len) + sizeof(*pim) > skb->len) goto drop; + if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) + goto drop; + reg_vif_num = mrt->mroute_reg_vif_num; + read_lock(&mrt_lock); if (reg_vif_num >= 0) - reg_dev = net->ipv6.vif6_table[reg_vif_num].dev; + reg_dev = mrt->vif6_table[reg_vif_num].dev; if (reg_dev) dev_hold(reg_dev); read_unlock(&mrt_lock); @@ -391,14 +658,12 @@ static int pim6_rcv(struct sk_buff *skb) skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); - skb->dev = reg_dev; skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; - skb_dst_drop(skb); - reg_dev->stats.rx_bytes += skb->len; - reg_dev->stats.rx_packets++; - nf_reset(skb); + + skb_tunnel_rx(skb, reg_dev); + netif_rx(skb); dev_put(reg_dev); return 0; @@ -417,12 +682,22 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); + struct mr6_table *mrt; + struct flowi fl = { + .oif = dev->ifindex, + .iif = skb->skb_iif, + .mark = skb->mark, + }; + int err; + + err = ip6mr_fib_lookup(net, &fl, &mrt); + if (err < 0) + return err; read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; - ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num, - MRT6MSG_WHOLEPKT); + ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return NETDEV_TX_OK; @@ -442,11 +717,17 @@ static void reg_vif_setup(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; } -static struct net_device *ip6mr_reg_vif(struct net *net) +static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) { struct net_device *dev; + char name[IFNAMSIZ]; + + if (mrt->id == RT6_TABLE_DFLT) + sprintf(name, "pim6reg"); + else + sprintf(name, "pim6reg%u", mrt->id); - dev = alloc_netdev(0, "pim6reg", reg_vif_setup); + dev = alloc_netdev(0, name, reg_vif_setup); if (dev == NULL) return NULL; @@ -478,15 +759,16 @@ failure: * Delete a VIF entry */ -static int mif6_delete(struct net *net, int vifi, struct list_head *head) +static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) { struct mif_device *v; struct net_device *dev; struct inet6_dev *in6_dev; - if (vifi < 0 || vifi >= net->ipv6.maxvif) + + if (vifi < 0 || vifi >= mrt->maxvif) return -EADDRNOTAVAIL; - v = &net->ipv6.vif6_table[vifi]; + v = &mrt->vif6_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; @@ -498,17 +780,17 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) } #ifdef CONFIG_IPV6_PIMSM_V2 - if (vifi == net->ipv6.mroute_reg_vif_num) - net->ipv6.mroute_reg_vif_num = -1; + if (vifi == mrt->mroute_reg_vif_num) + mrt->mroute_reg_vif_num = -1; #endif - if (vifi + 1 == net->ipv6.maxvif) { + if (vifi + 1 == mrt->maxvif) { int tmp; for (tmp = vifi - 1; tmp >= 0; tmp--) { - if (MIF_EXISTS(net, tmp)) + if (MIF_EXISTS(mrt, tmp)) break; } - net->ipv6.maxvif = tmp + 1; + mrt->maxvif = tmp + 1; } write_unlock_bh(&mrt_lock); @@ -528,7 +810,6 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) static inline void ip6mr_cache_free(struct mfc6_cache *c) { - release_net(mfc6_net(c)); kmem_cache_free(mrt_cachep, c); } @@ -536,12 +817,12 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c) and reporting error to netlink readers. */ -static void ip6mr_destroy_unres(struct mfc6_cache *c) +static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c) { + struct net *net = read_pnet(&mrt->net); struct sk_buff *skb; - struct net *net = mfc6_net(c); - atomic_dec(&net->ipv6.cache_resolve_queue_len); + atomic_dec(&mrt->cache_resolve_queue_len); while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { if (ipv6_hdr(skb)->version == 0) { @@ -559,60 +840,59 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c) } -/* Single timer process for all the unresolved queue. */ +/* Timer process for all the unresolved queue. */ -static void ipmr_do_expire_process(unsigned long dummy) +static void ipmr_do_expire_process(struct mr6_table *mrt) { unsigned long now = jiffies; unsigned long expires = 10 * HZ; - struct mfc6_cache *c, **cp; - - cp = &mfc_unres_queue; + struct mfc6_cache *c, *next; - while ((c = *cp) != NULL) { + list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { /* not yet... */ unsigned long interval = c->mfc_un.unres.expires - now; if (interval < expires) expires = interval; - cp = &c->next; continue; } - *cp = c->next; - ip6mr_destroy_unres(c); + list_del(&c->list); + ip6mr_destroy_unres(mrt, c); } - if (mfc_unres_queue != NULL) - mod_timer(&ipmr_expire_timer, jiffies + expires); + if (!list_empty(&mrt->mfc6_unres_queue)) + mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); } -static void ipmr_expire_process(unsigned long dummy) +static void ipmr_expire_process(unsigned long arg) { + struct mr6_table *mrt = (struct mr6_table *)arg; + if (!spin_trylock(&mfc_unres_lock)) { - mod_timer(&ipmr_expire_timer, jiffies + 1); + mod_timer(&mrt->ipmr_expire_timer, jiffies + 1); return; } - if (mfc_unres_queue != NULL) - ipmr_do_expire_process(dummy); + if (!list_empty(&mrt->mfc6_unres_queue)) + ipmr_do_expire_process(mrt); spin_unlock(&mfc_unres_lock); } /* Fill oifs list. It is called under write locked mrt_lock. */ -static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls) +static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache, + unsigned char *ttls) { int vifi; - struct net *net = mfc6_net(cache); cache->mfc_un.res.minvif = MAXMIFS; cache->mfc_un.res.maxvif = 0; memset(cache->mfc_un.res.ttls, 255, MAXMIFS); - for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) { - if (MIF_EXISTS(net, vifi) && + for (vifi = 0; vifi < mrt->maxvif; vifi++) { + if (MIF_EXISTS(mrt, vifi) && ttls[vifi] && ttls[vifi] < 255) { cache->mfc_un.res.ttls[vifi] = ttls[vifi]; if (cache->mfc_un.res.minvif > vifi) @@ -623,16 +903,17 @@ static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttl } } -static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) +static int mif6_add(struct net *net, struct mr6_table *mrt, + struct mif6ctl *vifc, int mrtsock) { int vifi = vifc->mif6c_mifi; - struct mif_device *v = &net->ipv6.vif6_table[vifi]; + struct mif_device *v = &mrt->vif6_table[vifi]; struct net_device *dev; struct inet6_dev *in6_dev; int err; /* Is vif busy ? */ - if (MIF_EXISTS(net, vifi)) + if (MIF_EXISTS(mrt, vifi)) return -EADDRINUSE; switch (vifc->mif6c_flags) { @@ -642,9 +923,9 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) * Special Purpose VIF in PIM * All the packets will be sent to the daemon */ - if (net->ipv6.mroute_reg_vif_num >= 0) + if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; - dev = ip6mr_reg_vif(net); + dev = ip6mr_reg_vif(net, mrt); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); @@ -694,50 +975,48 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) v->dev = dev; #ifdef CONFIG_IPV6_PIMSM_V2 if (v->flags & MIFF_REGISTER) - net->ipv6.mroute_reg_vif_num = vifi; + mrt->mroute_reg_vif_num = vifi; #endif - if (vifi + 1 > net->ipv6.maxvif) - net->ipv6.maxvif = vifi + 1; + if (vifi + 1 > mrt->maxvif) + mrt->maxvif = vifi + 1; write_unlock_bh(&mrt_lock); return 0; } -static struct mfc6_cache *ip6mr_cache_find(struct net *net, +static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, struct in6_addr *origin, struct in6_addr *mcastgrp) { int line = MFC6_HASH(mcastgrp, origin); struct mfc6_cache *c; - for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) { + list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, origin) && ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) - break; + return c; } - return c; + return NULL; } /* * Allocate a multicast cache entry */ -static struct mfc6_cache *ip6mr_cache_alloc(struct net *net) +static struct mfc6_cache *ip6mr_cache_alloc(void) { struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if (c == NULL) return NULL; c->mfc_un.res.minvif = MAXMIFS; - mfc6_net_set(c, net); return c; } -static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) +static struct mfc6_cache *ip6mr_cache_alloc_unres(void) { struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); if (c == NULL) return NULL; skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10 * HZ; - mfc6_net_set(c, net); return c; } @@ -745,7 +1024,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) * A cache entry has gone into a resolved state from queued */ -static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) +static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt, + struct mfc6_cache *uc, struct mfc6_cache *c) { struct sk_buff *skb; @@ -758,7 +1038,7 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) int err; struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); - if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { + if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; @@ -766,9 +1046,9 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) skb_trim(skb, nlh->nlmsg_len); ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; } - err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid); + err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else - ip6_mr_forward(skb, c); + ip6_mr_forward(net, mrt, skb, c); } } @@ -779,8 +1059,8 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) * Called under mrt_lock. */ -static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, - int assert) +static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, + mifi_t mifi, int assert) { struct sk_buff *skb; struct mrt6msg *msg; @@ -816,7 +1096,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, msg = (struct mrt6msg *)skb_transport_header(skb); msg->im6_mbz = 0; msg->im6_msgtype = MRT6MSG_WHOLEPKT; - msg->im6_mif = net->ipv6.mroute_reg_vif_num; + msg->im6_mif = mrt->mroute_reg_vif_num; msg->im6_pad = 0; ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); @@ -851,7 +1131,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, skb->ip_summed = CHECKSUM_UNNECESSARY; } - if (net->ipv6.mroute6_sk == NULL) { + if (mrt->mroute6_sk == NULL) { kfree_skb(skb); return -EINVAL; } @@ -859,7 +1139,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, /* * Deliver to user space multicast routing algorithms */ - ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb); + ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb); if (ret < 0) { if (net_ratelimit()) printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); @@ -874,26 +1154,28 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, */ static int -ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) +ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb) { + bool found = false; int err; struct mfc6_cache *c; spin_lock_bh(&mfc_unres_lock); - for (c = mfc_unres_queue; c; c = c->next) { - if (net_eq(mfc6_net(c), net) && - ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && - ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) + list_for_each_entry(c, &mrt->mfc6_unres_queue, list) { + if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && + ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { + found = true; break; + } } - if (c == NULL) { + if (!found) { /* * Create a new entry if allowable */ - if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 || - (c = ip6mr_cache_alloc_unres(net)) == NULL) { + if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || + (c = ip6mr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); kfree_skb(skb); @@ -910,7 +1192,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) /* * Reflect first query at pim6sd */ - err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE); + err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE); if (err < 0) { /* If the report failed throw the cache entry out - Brad Parker @@ -922,11 +1204,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) return err; } - atomic_inc(&net->ipv6.cache_resolve_queue_len); - c->next = mfc_unres_queue; - mfc_unres_queue = c; + atomic_inc(&mrt->cache_resolve_queue_len); + list_add(&c->list, &mrt->mfc6_unres_queue); - ipmr_do_expire_process(1); + ipmr_do_expire_process(mrt); } /* @@ -948,19 +1229,18 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) * MFC6 cache manipulation by user space */ -static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc) +static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc) { int line; - struct mfc6_cache *c, **cp; + struct mfc6_cache *c, *next; line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); - for (cp = &net->ipv6.mfc6_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { write_lock_bh(&mrt_lock); - *cp = c->next; + list_del(&c->list); write_unlock_bh(&mrt_lock); ip6mr_cache_free(c); @@ -975,6 +1255,7 @@ static int ip6mr_device_event(struct notifier_block *this, { struct net_device *dev = ptr; struct net *net = dev_net(dev); + struct mr6_table *mrt; struct mif_device *v; int ct; LIST_HEAD(list); @@ -982,10 +1263,12 @@ static int ip6mr_device_event(struct notifier_block *this, if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; - v = &net->ipv6.vif6_table[0]; - for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { - if (v->dev == dev) - mif6_delete(net, ct, &list); + ip6mr_for_each_table(mrt, net) { + v = &mrt->vif6_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { + if (v->dev == dev) + mif6_delete(mrt, ct, &list); + } } unregister_netdevice_many(&list); @@ -1002,26 +1285,11 @@ static struct notifier_block ip6_mr_notifier = { static int __net_init ip6mr_net_init(struct net *net) { - int err = 0; - net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device), - GFP_KERNEL); - if (!net->ipv6.vif6_table) { - err = -ENOMEM; - goto fail; - } - - /* Forwarding cache */ - net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES, - sizeof(struct mfc6_cache *), - GFP_KERNEL); - if (!net->ipv6.mfc6_cache_array) { - err = -ENOMEM; - goto fail_mfc6_cache; - } + int err; -#ifdef CONFIG_IPV6_PIMSM_V2 - net->ipv6.mroute_reg_vif_num = -1; -#endif + err = ip6mr_rules_init(net); + if (err < 0) + goto fail; #ifdef CONFIG_PROC_FS err = -ENOMEM; @@ -1030,16 +1298,15 @@ static int __net_init ip6mr_net_init(struct net *net) if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) goto proc_cache_fail; #endif + return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: proc_net_remove(net, "ip6_mr_vif"); proc_vif_fail: - kfree(net->ipv6.mfc6_cache_array); + ip6mr_rules_exit(net); #endif -fail_mfc6_cache: - kfree(net->ipv6.vif6_table); fail: return err; } @@ -1050,9 +1317,7 @@ static void __net_exit ip6mr_net_exit(struct net *net) proc_net_remove(net, "ip6_mr_cache"); proc_net_remove(net, "ip6_mr_vif"); #endif - mroute_clean_tables(net); - kfree(net->ipv6.mfc6_cache_array); - kfree(net->ipv6.vif6_table); + ip6mr_rules_exit(net); } static struct pernet_operations ip6mr_net_ops = { @@ -1075,7 +1340,6 @@ int __init ip6_mr_init(void) if (err) goto reg_pernet_fail; - setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); err = register_netdevice_notifier(&ip6_mr_notifier); if (err) goto reg_notif_fail; @@ -1086,13 +1350,13 @@ int __init ip6_mr_init(void) goto add_proto_fail; } #endif + rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute); return 0; #ifdef CONFIG_IPV6_PIMSM_V2 add_proto_fail: unregister_netdevice_notifier(&ip6_mr_notifier); #endif reg_notif_fail: - del_timer(&ipmr_expire_timer); unregister_pernet_subsys(&ip6mr_net_ops); reg_pernet_fail: kmem_cache_destroy(mrt_cachep); @@ -1102,15 +1366,16 @@ reg_pernet_fail: void ip6_mr_cleanup(void) { unregister_netdevice_notifier(&ip6_mr_notifier); - del_timer(&ipmr_expire_timer); unregister_pernet_subsys(&ip6mr_net_ops); kmem_cache_destroy(mrt_cachep); } -static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) +static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, + struct mf6cctl *mfc, int mrtsock) { + bool found = false; int line; - struct mfc6_cache *uc, *c, **cp; + struct mfc6_cache *uc, *c; unsigned char ttls[MAXMIFS]; int i; @@ -1126,17 +1391,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); - for (cp = &net->ipv6.mfc6_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && - ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) + ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { + found = true; break; + } } - if (c != NULL) { + if (found) { write_lock_bh(&mrt_lock); c->mf6c_parent = mfc->mf6cc_parent; - ip6mr_update_thresholds(c, ttls); + ip6mr_update_thresholds(mrt, c, ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); @@ -1146,43 +1412,42 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) return -EINVAL; - c = ip6mr_cache_alloc(net); + c = ip6mr_cache_alloc(); if (c == NULL) return -ENOMEM; c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; c->mf6c_parent = mfc->mf6cc_parent; - ip6mr_update_thresholds(c, ttls); + ip6mr_update_thresholds(mrt, c, ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_lock_bh(&mrt_lock); - c->next = net->ipv6.mfc6_cache_array[line]; - net->ipv6.mfc6_cache_array[line] = c; + list_add(&c->list, &mrt->mfc6_cache_array[line]); write_unlock_bh(&mrt_lock); /* * Check to see if we resolved a queued list. If so we * need to send on the frames and tidy up. */ + found = false; spin_lock_bh(&mfc_unres_lock); - for (cp = &mfc_unres_queue; (uc = *cp) != NULL; - cp = &uc->next) { - if (net_eq(mfc6_net(uc), net) && - ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && + list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) { + if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { - *cp = uc->next; - atomic_dec(&net->ipv6.cache_resolve_queue_len); + list_del(&uc->list); + atomic_dec(&mrt->cache_resolve_queue_len); + found = true; break; } } - if (mfc_unres_queue == NULL) - del_timer(&ipmr_expire_timer); + if (list_empty(&mrt->mfc6_unres_queue)) + del_timer(&mrt->ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); - if (uc) { - ip6mr_cache_resolve(uc, c); + if (found) { + ip6mr_cache_resolve(net, mrt, uc, c); ip6mr_cache_free(uc); } return 0; @@ -1192,17 +1457,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) * Close the multicast socket, and clear the vif tables etc */ -static void mroute_clean_tables(struct net *net) +static void mroute_clean_tables(struct mr6_table *mrt) { int i; LIST_HEAD(list); + struct mfc6_cache *c, *next; /* * Shut down all active vif entries */ - for (i = 0; i < net->ipv6.maxvif; i++) { - if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) - mif6_delete(net, i, &list); + for (i = 0; i < mrt->maxvif; i++) { + if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) + mif6_delete(mrt, i, &list); } unregister_netdevice_many(&list); @@ -1210,48 +1476,36 @@ static void mroute_clean_tables(struct net *net) * Wipe the cache */ for (i = 0; i < MFC6_LINES; i++) { - struct mfc6_cache *c, **cp; - - cp = &net->ipv6.mfc6_cache_array[i]; - while ((c = *cp) != NULL) { - if (c->mfc_flags & MFC_STATIC) { - cp = &c->next; + list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { + if (c->mfc_flags & MFC_STATIC) continue; - } write_lock_bh(&mrt_lock); - *cp = c->next; + list_del(&c->list); write_unlock_bh(&mrt_lock); ip6mr_cache_free(c); } } - if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) { - struct mfc6_cache *c, **cp; - + if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); - cp = &mfc_unres_queue; - while ((c = *cp) != NULL) { - if (!net_eq(mfc6_net(c), net)) { - cp = &c->next; - continue; - } - *cp = c->next; - ip6mr_destroy_unres(c); + list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { + list_del(&c->list); + ip6mr_destroy_unres(mrt, c); } spin_unlock_bh(&mfc_unres_lock); } } -static int ip6mr_sk_init(struct sock *sk) +static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk) { int err = 0; struct net *net = sock_net(sk); rtnl_lock(); write_lock_bh(&mrt_lock); - if (likely(net->ipv6.mroute6_sk == NULL)) { - net->ipv6.mroute6_sk = sk; + if (likely(mrt->mroute6_sk == NULL)) { + mrt->mroute6_sk = sk; net->ipv6.devconf_all->mc_forwarding++; } else @@ -1265,24 +1519,43 @@ static int ip6mr_sk_init(struct sock *sk) int ip6mr_sk_done(struct sock *sk) { - int err = 0; + int err = -EACCES; struct net *net = sock_net(sk); + struct mr6_table *mrt; rtnl_lock(); - if (sk == net->ipv6.mroute6_sk) { - write_lock_bh(&mrt_lock); - net->ipv6.mroute6_sk = NULL; - net->ipv6.devconf_all->mc_forwarding--; - write_unlock_bh(&mrt_lock); + ip6mr_for_each_table(mrt, net) { + if (sk == mrt->mroute6_sk) { + write_lock_bh(&mrt_lock); + mrt->mroute6_sk = NULL; + net->ipv6.devconf_all->mc_forwarding--; + write_unlock_bh(&mrt_lock); - mroute_clean_tables(net); - } else - err = -EACCES; + mroute_clean_tables(mrt); + err = 0; + break; + } + } rtnl_unlock(); return err; } +struct sock *mroute6_socket(struct net *net, struct sk_buff *skb) +{ + struct mr6_table *mrt; + struct flowi fl = { + .iif = skb->skb_iif, + .oif = skb->dev->ifindex, + .mark = skb->mark, + }; + + if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) + return NULL; + + return mrt->mroute6_sk; +} + /* * Socket options and virtual interface manipulation. The whole * virtual interface system is a complete heap, but unfortunately @@ -1297,9 +1570,14 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns struct mf6cctl mfc; mifi_t mifi; struct net *net = sock_net(sk); + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; if (optname != MRT6_INIT) { - if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN)) + if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN)) return -EACCES; } @@ -1311,7 +1589,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen < sizeof(int)) return -EINVAL; - return ip6mr_sk_init(sk); + return ip6mr_sk_init(mrt, sk); case MRT6_DONE: return ip6mr_sk_done(sk); @@ -1324,7 +1602,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (vif.mif6c_mifi >= MAXMIFS) return -ENFILE; rtnl_lock(); - ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk); + ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk); rtnl_unlock(); return ret; @@ -1334,7 +1612,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (copy_from_user(&mifi, optval, sizeof(mifi_t))) return -EFAULT; rtnl_lock(); - ret = mif6_delete(net, mifi, NULL); + ret = mif6_delete(mrt, mifi, NULL); rtnl_unlock(); return ret; @@ -1350,10 +1628,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns return -EFAULT; rtnl_lock(); if (optname == MRT6_DEL_MFC) - ret = ip6mr_mfc_delete(net, &mfc); + ret = ip6mr_mfc_delete(mrt, &mfc); else - ret = ip6mr_mfc_add(net, &mfc, - sk == net->ipv6.mroute6_sk); + ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk); rtnl_unlock(); return ret; @@ -1365,7 +1642,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns int v; if (get_user(v, (int __user *)optval)) return -EFAULT; - net->ipv6.mroute_do_assert = !!v; + mrt->mroute_do_assert = !!v; return 0; } @@ -1378,15 +1655,36 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns v = !!v; rtnl_lock(); ret = 0; - if (v != net->ipv6.mroute_do_pim) { - net->ipv6.mroute_do_pim = v; - net->ipv6.mroute_do_assert = v; + if (v != mrt->mroute_do_pim) { + mrt->mroute_do_pim = v; + mrt->mroute_do_assert = v; } rtnl_unlock(); return ret; } #endif +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES + case MRT6_TABLE: + { + u32 v; + + if (optlen != sizeof(u32)) + return -EINVAL; + if (get_user(v, (u32 __user *)optval)) + return -EFAULT; + if (sk == mrt->mroute6_sk) + return -EBUSY; + + rtnl_lock(); + ret = 0; + if (!ip6mr_new_table(net, v)) + ret = -ENOMEM; + raw6_sk(sk)->ip6mr_table = v; + rtnl_unlock(); + return ret; + } +#endif /* * Spurious command, or MRT6_VERSION which you cannot * set. @@ -1406,6 +1704,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int olr; int val; struct net *net = sock_net(sk); + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; switch (optname) { case MRT6_VERSION: @@ -1413,11 +1716,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, break; #ifdef CONFIG_IPV6_PIMSM_V2 case MRT6_PIM: - val = net->ipv6.mroute_do_pim; + val = mrt->mroute_do_pim; break; #endif case MRT6_ASSERT: - val = net->ipv6.mroute_do_assert; + val = mrt->mroute_do_assert; break; default: return -ENOPROTOOPT; @@ -1448,16 +1751,21 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) struct mif_device *vif; struct mfc6_cache *c; struct net *net = sock_net(sk); + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; switch (cmd) { case SIOCGETMIFCNT_IN6: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; - if (vr.mifi >= net->ipv6.maxvif) + if (vr.mifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); - vif = &net->ipv6.vif6_table[vr.mifi]; - if (MIF_EXISTS(net, vr.mifi)) { + vif = &mrt->vif6_table[vr.mifi]; + if (MIF_EXISTS(mrt, vr.mifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; @@ -1475,7 +1783,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) return -EFAULT; read_lock(&mrt_lock); - c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr); + c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; @@ -1505,11 +1813,11 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) * Processing handlers for ip6mr_forward */ -static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) +static int ip6mr_forward2(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *c, int vifi) { struct ipv6hdr *ipv6h; - struct net *net = mfc6_net(c); - struct mif_device *vif = &net->ipv6.vif6_table[vifi]; + struct mif_device *vif = &mrt->vif6_table[vifi]; struct net_device *dev; struct dst_entry *dst; struct flowi fl; @@ -1523,7 +1831,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; - ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT); + ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); goto out_free; } #endif @@ -1578,22 +1886,22 @@ out_free: return 0; } -static int ip6mr_find_vif(struct net_device *dev) +static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev) { - struct net *net = dev_net(dev); int ct; - for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) { - if (net->ipv6.vif6_table[ct].dev == dev) + + for (ct = mrt->maxvif - 1; ct >= 0; ct--) { + if (mrt->vif6_table[ct].dev == dev) break; } return ct; } -static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) +static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *cache) { int psend = -1; int vif, ct; - struct net *net = mfc6_net(cache); vif = cache->mf6c_parent; cache->mfc_un.res.pkt++; @@ -1602,30 +1910,30 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) /* * Wrong interface: drop packet and (maybe) send PIM assert. */ - if (net->ipv6.vif6_table[vif].dev != skb->dev) { + if (mrt->vif6_table[vif].dev != skb->dev) { int true_vifi; cache->mfc_un.res.wrong_if++; - true_vifi = ip6mr_find_vif(skb->dev); + true_vifi = ip6mr_find_vif(mrt, skb->dev); - if (true_vifi >= 0 && net->ipv6.mroute_do_assert && + if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, so that we cannot check that packet arrived on an oif. It is bad, but otherwise we would need to move pretty large chunk of pimd to kernel. Ough... --ANK */ - (net->ipv6.mroute_do_pim || + (mrt->mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; - ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF); + ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF); } goto dont_forward; } - net->ipv6.vif6_table[vif].pkt_in++; - net->ipv6.vif6_table[vif].bytes_in += skb->len; + mrt->vif6_table[vif].pkt_in++; + mrt->vif6_table[vif].bytes_in += skb->len; /* * Forward the frame @@ -1635,13 +1943,13 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ip6mr_forward2(skb2, cache, psend); + ip6mr_forward2(net, mrt, skb2, cache, psend); } psend = ct; } } if (psend != -1) { - ip6mr_forward2(skb, cache, psend); + ip6mr_forward2(net, mrt, skb, cache, psend); return 0; } @@ -1659,9 +1967,19 @@ int ip6_mr_input(struct sk_buff *skb) { struct mfc6_cache *cache; struct net *net = dev_net(skb->dev); + struct mr6_table *mrt; + struct flowi fl = { + .iif = skb->dev->ifindex, + .mark = skb->mark, + }; + int err; + + err = ip6mr_fib_lookup(net, &fl, &mrt); + if (err < 0) + return err; read_lock(&mrt_lock); - cache = ip6mr_cache_find(net, + cache = ip6mr_cache_find(mrt, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); /* @@ -1670,9 +1988,9 @@ int ip6_mr_input(struct sk_buff *skb) if (cache == NULL) { int vif; - vif = ip6mr_find_vif(skb->dev); + vif = ip6mr_find_vif(mrt, skb->dev); if (vif >= 0) { - int err = ip6mr_cache_unresolved(net, vif, skb); + int err = ip6mr_cache_unresolved(mrt, vif, skb); read_unlock(&mrt_lock); return err; @@ -1682,7 +2000,7 @@ int ip6_mr_input(struct sk_buff *skb) return -ENODEV; } - ip6_mr_forward(skb, cache); + ip6_mr_forward(net, mrt, skb, cache); read_unlock(&mrt_lock); @@ -1690,12 +2008,11 @@ int ip6_mr_input(struct sk_buff *skb) } -static int -ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) +static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + struct mfc6_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; - struct net *net = mfc6_net(c); u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; @@ -1703,19 +2020,19 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) if (c->mf6c_parent > MAXMIFS) return -ENOENT; - if (MIF_EXISTS(net, c->mf6c_parent)) - RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); + if (MIF_EXISTS(mrt, c->mf6c_parent)) + RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex); mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { - if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { + if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; - nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex; + nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex; nhp->rtnh_len = sizeof(*nhp); } } @@ -1733,11 +2050,16 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, int nowait) { int err; + struct mr6_table *mrt; struct mfc6_cache *cache; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); + mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; + read_lock(&mrt_lock); - cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); + cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); if (!cache) { struct sk_buff *skb2; @@ -1751,7 +2073,7 @@ int ip6mr_get_route(struct net *net, } dev = skb->dev; - if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) { + if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) { read_unlock(&mrt_lock); return -ENODEV; } @@ -1780,7 +2102,7 @@ int ip6mr_get_route(struct net *net, ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); - err = ip6mr_cache_unresolved(net, vif, skb2); + err = ip6mr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); return err; @@ -1789,8 +2111,88 @@ int ip6mr_get_route(struct net *net, if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ip6mr_fill_mroute(skb, cache, rtm); + err = __ip6mr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); return err; } +static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + u32 pid, u32 seq, struct mfc6_cache *c) +{ + struct nlmsghdr *nlh; + struct rtmsg *rtm; + + nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); + if (nlh == NULL) + return -EMSGSIZE; + + rtm = nlmsg_data(nlh); + rtm->rtm_family = RTNL_FAMILY_IPMR; + rtm->rtm_dst_len = 128; + rtm->rtm_src_len = 128; + rtm->rtm_tos = 0; + rtm->rtm_table = mrt->id; + NLA_PUT_U32(skb, RTA_TABLE, mrt->id); + rtm->rtm_scope = RT_SCOPE_UNIVERSE; + rtm->rtm_protocol = RTPROT_UNSPEC; + rtm->rtm_flags = 0; + + NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); + NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); + + if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct mr6_table *mrt; + struct mfc6_cache *mfc; + unsigned int t = 0, s_t; + unsigned int h = 0, s_h; + unsigned int e = 0, s_e; + + s_t = cb->args[0]; + s_h = cb->args[1]; + s_e = cb->args[2]; + + read_lock(&mrt_lock); + ip6mr_for_each_table(mrt, net) { + if (t < s_t) + goto next_table; + if (t > s_t) + s_h = 0; + for (h = s_h; h < MFC6_LINES; h++) { + list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) { + if (e < s_e) + goto next_entry; + if (ip6mr_fill_mroute(mrt, skb, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + mfc) < 0) + goto done; +next_entry: + e++; + } + e = s_e = 0; + } + s_h = 0; +next_table: + t++; + } +done: + read_unlock(&mrt_lock); + + cb->args[2] = e; + cb->args[1] = h; + cb->args[0] = t; + + return skb->len; +} diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3f7c12b..0abdc24 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -890,8 +890,6 @@ out: in6_ifa_put(ifp); else in6_dev_put(idev); - - return; } static void ndisc_recv_na(struct sk_buff *skb) diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 458eabf..566798d 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -168,7 +168,6 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib) i & 0x100 ? "Out" : "In", i & 0xff); seq_printf(seq, "%-32s\t%lu\n", name, val); } - return; } static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 05ebd78..294cbe8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -316,7 +316,6 @@ static void rt6_probe(struct rt6_info *rt) #else static inline void rt6_probe(struct rt6_info *rt) { - return; } #endif @@ -1553,7 +1552,6 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, out: dst_release(&rt->u.dst); - return; } /* diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 5abae10..e51e650 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -566,11 +566,9 @@ static int ipip6_rcv(struct sk_buff *skb) kfree_skb(skb); return 0; } - tunnel->dev->stats.rx_packets++; - tunnel->dev->stats.rx_bytes += skb->len; - skb->dev = tunnel->dev; - skb_dst_drop(skb); - nf_reset(skb); + + skb_tunnel_rx(skb, tunnel->dev); + ipip6_ecn_decapsulate(iph, skb); netif_rx(skb); rcu_read_unlock(); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6603511..2b7c3a1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -604,7 +604,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, kfree(newkey); return -ENOMEM; } - sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + sk_nocaps_add(sk, NETIF_F_GSO_MASK); } if (tcp_alloc_md5sig_pool(sk) == NULL) { kfree(newkey); @@ -741,7 +741,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, return -ENOMEM; tp->md5sig_info = p; - sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + sk_nocaps_add(sk, NETIF_F_GSO_MASK); } newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 79a1e5a..fce364c 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -685,8 +685,6 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, /* We have a match; send the value. */ iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS, attrib->value); - - return; } /* diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c index df18ab4..e98e40d 100644 --- a/net/irda/irnet/irnet_irda.c +++ b/net/irda/irnet/irnet_irda.c @@ -678,7 +678,6 @@ irda_irnet_destroy(irnet_socket * self) self->stsap_sel = 0; DEXIT(IRDA_SOCK_TRACE, "\n"); - return; } @@ -928,7 +927,6 @@ irnet_disconnect_server(irnet_socket * self, irttp_listen(self->tsap); DEXIT(IRDA_SERV_TRACE, "\n"); - return; } /*------------------------------------------------------------------*/ @@ -1013,7 +1011,6 @@ irnet_destroy_server(void) irda_irnet_destroy(&irnet_server.s); DEXIT(IRDA_SERV_TRACE, "\n"); - return; } diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 8be324f..c8b4599 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -136,7 +136,6 @@ static void afiucv_pm_complete(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_complete\n"); #endif - return; } /** diff --git a/net/key/af_key.c b/net/key/af_key.c index ba9a3fc..43040e9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -99,7 +99,7 @@ static void pfkey_sock_destruct(struct sock *sk) skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { - printk("Attempt to release alive pfkey socket: %p\n", sk); + pr_err("Attempt to release alive pfkey socket: %p\n", sk); return; } @@ -1402,7 +1402,7 @@ static inline int event2poltype(int event) case XFRM_MSG_POLEXPIRE: // return SADB_X_SPDEXPIRE; default: - printk("pfkey: Unknown policy event %d\n", event); + pr_err("pfkey: Unknown policy event %d\n", event); break; } @@ -1421,7 +1421,7 @@ static inline int event2keytype(int event) case XFRM_MSG_EXPIRE: return SADB_EXPIRE; default: - printk("pfkey: Unknown SA event %d\n", event); + pr_err("pfkey: Unknown SA event %d\n", event); break; } @@ -2969,7 +2969,7 @@ static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c) case XFRM_MSG_NEWAE: /* not yet supported */ break; default: - printk("pfkey: Unknown SA event %d\n", c->event); + pr_err("pfkey: Unknown SA event %d\n", c->event); break; } @@ -2993,7 +2993,7 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e break; return key_notify_policy_flush(c); default: - printk("pfkey: Unknown policy event %d\n", c->event); + pr_err("pfkey: Unknown policy event %d\n", c->event); break; } diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index a432f0e..94e7fca 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -31,7 +31,7 @@ static int llc_mac_header_len(unsigned short devtype) case ARPHRD_ETHER: case ARPHRD_LOOPBACK: return sizeof(struct ethhdr); -#ifdef CONFIG_TR +#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) case ARPHRD_IEEE802_TR: return sizeof(struct trh_hdr); #endif diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 0442029..84b48ba 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -23,7 +23,8 @@ mac80211-y := \ key.o \ util.o \ wme.o \ - event.o + event.o \ + chan.o mac80211-$(CONFIG_MAC80211_LEDS) += led.o mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index ae37270..c7000a6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1162,15 +1162,39 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, } static int ieee80211_set_channel(struct wiphy *wiphy, + struct net_device *netdev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = NULL; + + if (netdev) + sdata = IEEE80211_DEV_TO_SUB_IF(netdev); + + switch (ieee80211_get_channel_mode(local, NULL)) { + case CHAN_MODE_HOPPING: + return -EBUSY; + case CHAN_MODE_FIXED: + if (local->oper_channel != chan) + return -EBUSY; + if (!sdata && local->_oper_channel_type == channel_type) + return 0; + break; + case CHAN_MODE_UNDEFINED: + break; + } local->oper_channel = chan; - local->oper_channel_type = channel_type; - return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + if (!ieee80211_set_channel_type(local, sdata, channel_type)) + return -EBUSY; + + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); + + return 0; } #ifdef CONFIG_PM @@ -1214,6 +1238,20 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_assoc_request *req) { + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + switch (ieee80211_get_channel_mode(local, sdata)) { + case CHAN_MODE_HOPPING: + return -EBUSY; + case CHAN_MODE_FIXED: + if (local->oper_channel == req->bss->channel) + break; + return -EBUSY; + case CHAN_MODE_UNDEFINED: + break; + } + return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); } @@ -1236,8 +1274,22 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { + struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + switch (ieee80211_get_channel_mode(local, sdata)) { + case CHAN_MODE_HOPPING: + return -EBUSY; + case CHAN_MODE_FIXED: + if (!params->channel_fixed) + return -EBUSY; + if (local->oper_channel == params->channel) + break; + return -EBUSY; + case CHAN_MODE_UNDEFINED: + break; + } + return ieee80211_ibss_join(sdata, params); } @@ -1366,7 +1418,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, * association, there's no need to send an action frame. */ if (!sdata->u.mgd.associated || - sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) { + sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) { mutex_lock(&sdata->local->iflist_mtx); ieee80211_recalc_smps(sdata->local, sdata); mutex_unlock(&sdata->local->iflist_mtx); diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c new file mode 100644 index 0000000..5d218c5 --- /dev/null +++ b/net/mac80211/chan.c @@ -0,0 +1,127 @@ +/* + * mac80211 - channel management + */ + +#include <linux/nl80211.h> +#include "ieee80211_i.h" + +enum ieee80211_chan_mode +__ieee80211_get_channel_mode(struct ieee80211_local *local, + struct ieee80211_sub_if_data *ignore) +{ + struct ieee80211_sub_if_data *sdata; + + WARN_ON(!mutex_is_locked(&local->iflist_mtx)); + + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata == ignore) + continue; + + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !sdata->u.mgd.associated) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + if (!sdata->u.ibss.ssid_len) + continue; + if (!sdata->u.ibss.fixed_channel) + return CHAN_MODE_HOPPING; + } + + if (sdata->vif.type == NL80211_IFTYPE_AP && + !sdata->u.ap.beacon) + continue; + + return CHAN_MODE_FIXED; + } + + return CHAN_MODE_UNDEFINED; +} + +enum ieee80211_chan_mode +ieee80211_get_channel_mode(struct ieee80211_local *local, + struct ieee80211_sub_if_data *ignore) +{ + enum ieee80211_chan_mode mode; + + mutex_lock(&local->iflist_mtx); + mode = __ieee80211_get_channel_mode(local, ignore); + mutex_unlock(&local->iflist_mtx); + + return mode; +} + +bool ieee80211_set_channel_type(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum nl80211_channel_type chantype) +{ + struct ieee80211_sub_if_data *tmp; + enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT; + bool result; + + mutex_lock(&local->iflist_mtx); + + list_for_each_entry(tmp, &local->interfaces, list) { + if (tmp == sdata) + continue; + + if (!ieee80211_sdata_running(tmp)) + continue; + + switch (tmp->vif.bss_conf.channel_type) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: + superchan = tmp->vif.bss_conf.channel_type; + break; + case NL80211_CHAN_HT40PLUS: + WARN_ON(superchan == NL80211_CHAN_HT40MINUS); + superchan = NL80211_CHAN_HT40PLUS; + break; + case NL80211_CHAN_HT40MINUS: + WARN_ON(superchan == NL80211_CHAN_HT40PLUS); + superchan = NL80211_CHAN_HT40MINUS; + break; + } + } + + switch (superchan) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: + /* + * allow any change that doesn't go to no-HT + * (if it already is no-HT no change is needed) + */ + if (chantype == NL80211_CHAN_NO_HT) + break; + superchan = chantype; + break; + case NL80211_CHAN_HT40PLUS: + case NL80211_CHAN_HT40MINUS: + /* allow smaller bandwidth and same */ + if (chantype == NL80211_CHAN_NO_HT) + break; + if (chantype == NL80211_CHAN_HT20) + break; + if (superchan == chantype) + break; + result = false; + goto out; + } + + local->_oper_channel_type = superchan; + + if (sdata) + sdata->vif.bss_conf.channel_type = chantype; + + result = true; + out: + mutex_unlock(&local->iflist_mtx); + + return result; +} diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h index 68e6a20..09cc9be 100644 --- a/net/mac80211/debugfs.h +++ b/net/mac80211/debugfs.h @@ -7,7 +7,6 @@ extern int mac80211_open_file_generic(struct inode *inode, struct file *file); #else static inline void debugfs_hw_add(struct ieee80211_local *local) { - return; } #endif diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index ee8b63f..4f22713 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -371,4 +371,15 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop) if (local->ops->flush) local->ops->flush(&local->hw, drop); } + +static inline void drv_channel_switch(struct ieee80211_local *local, + struct ieee80211_channel_switch *ch_switch) +{ + might_sleep(); + + local->ops->channel_switch(&local->hw, ch_switch); + + trace_drv_channel_switch(local, ch_switch); +} + #endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index ce734b5..6a9b234 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -774,6 +774,34 @@ TRACE_EVENT(drv_flush, ) ); +TRACE_EVENT(drv_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_channel_switch *ch_switch), + + TP_ARGS(local, ch_switch), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u64, timestamp) + __field(bool, block_tx) + __field(u16, freq) + __field(u8, count) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->timestamp = ch_switch->timestamp; + __entry->block_tx = ch_switch->block_tx; + __entry->freq = ch_switch->channel->center_freq; + __entry->count = ch_switch->count; + ), + + TP_printk( + LOCAL_PR_FMT " new freq:%u count:%d", + LOCAL_PR_ARG, __entry->freq, __entry->count + ) +); + /* * Tracing for API calls that drivers call. */ @@ -992,6 +1020,27 @@ TRACE_EVENT(api_sta_block_awake, ) ); +TRACE_EVENT(api_chswitch_done, + TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success), + + TP_ARGS(sdata, success), + + TP_STRUCT__entry( + VIF_ENTRY + __field(bool, success) + ), + + TP_fast_assign( + VIF_ASSIGN; + __entry->success = success; + ), + + TP_printk( + VIF_PR_FMT " success=%d", + VIF_PR_ARG, __entry->success + ) +); + /* * Tracing for internal functions * (which may also be called in response to driver calls) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b72ee64..b2cc1fd 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -103,7 +103,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; local->oper_channel = chan; - local->oper_channel_type = NL80211_CHAN_NO_HT; + WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT)); ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); sband = local->hw.wiphy->bands[chan->band]; @@ -911,7 +911,8 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, /* fix ourselves to that channel now already */ if (params->channel_fixed) { sdata->local->oper_channel = params->channel; - sdata->local->oper_channel_type = NL80211_CHAN_NO_HT; + WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata, + NL80211_CHAN_NO_HT)); } if (params->ie) { diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index cbaf498..1a9e2da 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -767,7 +767,7 @@ struct ieee80211_local { enum mac80211_scan_state next_scan_state; struct delayed_work scan_work; struct ieee80211_sub_if_data *scan_sdata; - enum nl80211_channel_type oper_channel_type; + enum nl80211_channel_type _oper_channel_type; struct ieee80211_channel *oper_channel, *csa_channel; /* Temporary remain-on-channel for off-channel operations */ @@ -998,7 +998,8 @@ int ieee80211_max_network_latency(struct notifier_block *nb, unsigned long data, void *dummy); void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_sw_ie *sw_elem, - struct ieee80211_bss *bss); + struct ieee80211_bss *bss, + u64 timestamp); void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); @@ -1228,6 +1229,20 @@ int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, int ieee80211_wk_cancel_remain_on_channel( struct ieee80211_sub_if_data *sdata, u64 cookie); +/* channel management */ +enum ieee80211_chan_mode { + CHAN_MODE_UNDEFINED, + CHAN_MODE_HOPPING, + CHAN_MODE_FIXED, +}; + +enum ieee80211_chan_mode +ieee80211_get_channel_mode(struct ieee80211_local *local, + struct ieee80211_sub_if_data *ignore); +bool ieee80211_set_channel_type(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum nl80211_channel_type chantype); + #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline #else diff --git a/net/mac80211/main.c b/net/mac80211/main.c index bd632e1..22a384d 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -111,7 +111,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) channel_type = local->tmp_channel_type; } else { chan = local->oper_channel; - channel_type = local->oper_channel_type; + channel_type = local->_oper_channel_type; } if (chan != local->hw.conf.channel || diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 7e93524..bde8103 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -287,8 +287,6 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) *pos++ |= sdata->u.mesh.accepting_plinks ? MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; *pos++ = 0x00; - - return; } u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index d89ed7f..0705018 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -624,7 +624,6 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, fail: rcu_read_unlock(); sdata->u.mesh.mshstats.dropped_frames_no_route++; - return; } static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 358226f..0839c4e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -137,11 +137,14 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; u32 changed = 0; u16 ht_opmode; - bool enable_ht = true, ht_changed; + bool enable_ht = true; + enum nl80211_channel_type prev_chantype; enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + prev_chantype = sdata->vif.bss_conf.channel_type; + /* HT is not supported */ if (!sband->ht_cap.ht_supported) enable_ht = false; @@ -172,38 +175,37 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, } } - ht_changed = conf_is_ht(&local->hw.conf) != enable_ht || - channel_type != local->hw.conf.channel_type; - if (local->tmp_channel) local->tmp_channel_type = channel_type; - local->oper_channel_type = channel_type; - if (ht_changed) { - /* channel_type change automatically detected */ - ieee80211_hw_config(local, 0); + if (!ieee80211_set_channel_type(local, sdata, channel_type)) { + /* can only fail due to HT40+/- mismatch */ + channel_type = NL80211_CHAN_HT20; + WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); + } + /* channel_type change automatically detected */ + ieee80211_hw_config(local, 0); + + if (prev_chantype != channel_type) { rcu_read_lock(); sta = sta_info_get(sdata, bssid); if (sta) rate_control_rate_update(local, sband, sta, IEEE80211_RC_HT_CHANGED, - local->oper_channel_type); + channel_type); rcu_read_unlock(); - } - - /* disable HT */ - if (!enable_ht) - return 0; + } ht_opmode = le16_to_cpu(hti->operation_mode); /* if bss configuration changed store the new one */ - if (!sdata->ht_opmode_valid || - sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { + if (sdata->ht_opmode_valid != enable_ht || + sdata->vif.bss_conf.ht_operation_mode != ht_opmode || + prev_chantype != channel_type) { changed |= BSS_CHANGED_HT; sdata->vif.bss_conf.ht_operation_mode = ht_opmode; - sdata->ht_opmode_valid = true; + sdata->ht_opmode_valid = enable_ht; } return changed; @@ -340,7 +342,11 @@ static void ieee80211_chswitch_work(struct work_struct *work) goto out; sdata->local->oper_channel = sdata->local->csa_channel; - ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); + if (!sdata->local->ops->channel_switch) { + /* call "hw_config" only if doing sw channel switch */ + ieee80211_hw_config(sdata->local, + IEEE80211_CONF_CHANGE_CHANNEL); + } /* XXX: shouldn't really modify cfg80211-owned data! */ ifmgd->associated->channel = sdata->local->oper_channel; @@ -352,6 +358,29 @@ static void ieee80211_chswitch_work(struct work_struct *work) mutex_unlock(&ifmgd->mtx); } +void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_if_managed *ifmgd; + + sdata = vif_to_sdata(vif); + ifmgd = &sdata->u.mgd; + + trace_api_chswitch_done(sdata, success); + if (!success) { + /* + * If the channel switch was not successful, stay + * around on the old channel. We currently lack + * good handling of this situation, possibly we + * should just drop the association. + */ + sdata->local->csa_channel = sdata->local->oper_channel; + } + + ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); +} +EXPORT_SYMBOL(ieee80211_chswitch_done); + static void ieee80211_chswitch_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = @@ -368,7 +397,8 @@ static void ieee80211_chswitch_timer(unsigned long data) void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_sw_ie *sw_elem, - struct ieee80211_bss *bss) + struct ieee80211_bss *bss, + u64 timestamp) { struct cfg80211_bss *cbss = container_of((void *)bss, struct cfg80211_bss, priv); @@ -396,10 +426,29 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, sdata->local->csa_channel = new_ch; + if (sdata->local->ops->channel_switch) { + /* use driver's channel switch callback */ + struct ieee80211_channel_switch ch_switch; + memset(&ch_switch, 0, sizeof(ch_switch)); + ch_switch.timestamp = timestamp; + if (sw_elem->mode) { + ch_switch.block_tx = true; + ieee80211_stop_queues_by_reason(&sdata->local->hw, + IEEE80211_QUEUE_STOP_REASON_CSA); + } + ch_switch.channel = new_ch; + ch_switch.count = sw_elem->count; + ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; + drv_channel_switch(sdata->local, &ch_switch); + return; + } + + /* channel switch handled in software */ if (sw_elem->count <= 1) { ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); } else { - ieee80211_stop_queues_by_reason(&sdata->local->hw, + if (sw_elem->mode) + ieee80211_stop_queues_by_reason(&sdata->local->hw, IEEE80211_QUEUE_STOP_REASON_CSA); ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; mod_timer(&ifmgd->chswitch_timer, @@ -507,7 +556,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) s32 beaconint_us; if (latency < 0) - latency = pm_qos_requirement(PM_QOS_NETWORK_LATENCY); + latency = pm_qos_request(PM_QOS_NETWORK_LATENCY); beaconint_us = ieee80211_tu_to_usec( found->vif.bss_conf.beacon_int); @@ -866,7 +915,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_wmm_default(sdata); /* channel(_type) changes are handled by ieee80211_hw_config */ - local->oper_channel_type = NL80211_CHAN_NO_HT; + WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT)); /* on the next assoc, re-program HT parameters */ sdata->ht_opmode_valid = false; @@ -883,8 +932,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_hw_config(local, config_changed); - /* And the BSSID changed -- not very interesting here */ - changed |= BSS_CHANGED_BSSID; + /* The BSSID (not really interesting) and HT changed */ + changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; ieee80211_bss_info_change_notify(sdata, changed); if (remove_sta) @@ -1315,7 +1364,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, ETH_ALEN) == 0)) { struct ieee80211_channel_sw_ie *sw_elem = (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; - ieee80211_sta_process_chanswitch(sdata, sw_elem, bss); + ieee80211_sta_process_chanswitch(sdata, sw_elem, + bss, rx_status->mactime); } } @@ -1647,7 +1697,8 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee80211_sta_process_chanswitch(sdata, &mgmt->u.action.u.chan_switch.sw_elem, - (void *)ifmgd->associated->priv); + (void *)ifmgd->associated->priv, + rx_status->mactime); break; } mutex_unlock(&ifmgd->mtx); @@ -2176,7 +2227,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, continue; if (wk->type != IEEE80211_WORK_DIRECT_PROBE && - wk->type != IEEE80211_WORK_AUTH) + wk->type != IEEE80211_WORK_AUTH && + wk->type != IEEE80211_WORK_ASSOC) continue; if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) @@ -2266,7 +2318,7 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, if ((chan != local->tmp_channel || channel_type != local->tmp_channel_type) && (chan != local->oper_channel || - channel_type != local->oper_channel_type)) + channel_type != local->_oper_channel_type)) return -EBUSY; skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 9a08f2c..6e2a7bc 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1253,6 +1253,12 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; + /* + * skb_linearize() might change the skb->data and + * previously cached variables (in this case, hdr) need to + * be refreshed with the new data. + */ + hdr = (struct ieee80211_hdr *)rx->skb->data; seq = (sc & IEEE80211_SCTL_SEQ) >> 4; if (frag == 0) { diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index e14c441..e1b0be7 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -510,7 +510,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, bad_latency = time_after(jiffies + ieee80211_scan_get_channel_time(next_chan), local->leave_oper_channel_time + - usecs_to_jiffies(pm_qos_requirement(PM_QOS_NETWORK_LATENCY))); + usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY))); listen_int_exceeded = time_after(jiffies + ieee80211_scan_get_channel_time(next_chan), diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f3841f4..680bcb7 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2251,8 +2251,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, info->control.vif = vif; - info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; - info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_CTL_ASSIGN_SEQ | + IEEE80211_TX_CTL_FIRST_FRAGMENT; out: rcu_read_unlock(); return skb; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 2b75b4f..5b79d55 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1160,18 +1160,33 @@ int ieee80211_reconfig(struct ieee80211_local *local) /* Finally also reconfigure all the BSS information */ list_for_each_entry(sdata, &local->interfaces, list) { - u32 changed = ~0; + u32 changed; + if (!ieee80211_sdata_running(sdata)) continue; + + /* common change flags for all interface types */ + changed = BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BEACON_INT | + BSS_CHANGED_BSSID | + BSS_CHANGED_CQM; + switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: - /* disable beacon change bits */ - changed &= ~(BSS_CHANGED_BEACON | - BSS_CHANGED_BEACON_ENABLED); - /* fall through */ + changed |= BSS_CHANGED_ASSOC; + ieee80211_bss_info_change_notify(sdata, changed); + break; case NL80211_IFTYPE_ADHOC: + changed |= BSS_CHANGED_IBSS; + /* fall through */ case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: + changed |= BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED; ieee80211_bss_info_change_notify(sdata, changed); break; case NL80211_IFTYPE_WDS: diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 3dd0760..be3d4a6 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c @@ -33,6 +33,7 @@ #define IEEE80211_MAX_PROBE_TRIES 5 enum work_action { + WORK_ACT_MISMATCH, WORK_ACT_NONE, WORK_ACT_TIMEOUT, WORK_ACT_DONE, @@ -585,7 +586,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_work *wk, u16 auth_alg, auth_transaction, status_code; if (wk->type != IEEE80211_WORK_AUTH) - return WORK_ACT_NONE; + return WORK_ACT_MISMATCH; if (len < 24 + 6) return WORK_ACT_NONE; @@ -636,6 +637,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk, struct ieee802_11_elems elems; u8 *pos; + if (wk->type != IEEE80211_WORK_ASSOC) + return WORK_ACT_MISMATCH; + /* * AssocResp and ReassocResp have identical structure, so process both * of them in this function. @@ -691,6 +695,12 @@ ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk, ASSERT_WORK_MTX(local); + if (wk->type != IEEE80211_WORK_DIRECT_PROBE) + return WORK_ACT_MISMATCH; + + if (len < 24 + 12) + return WORK_ACT_NONE; + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return WORK_ACT_NONE; @@ -705,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; struct ieee80211_work *wk; - enum work_action rma = WORK_ACT_NONE; + enum work_action rma; u16 fc; rx_status = (struct ieee80211_rx_status *) skb->cb; @@ -752,7 +762,17 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, break; default: WARN_ON(1); + rma = WORK_ACT_NONE; } + + /* + * We've either received an unexpected frame, or we have + * multiple work items and need to match the frame to the + * right one. + */ + if (rma == WORK_ACT_MISMATCH) + continue; + /* * We've processed this frame for that work, so it can't * belong to another work struct. @@ -762,6 +782,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, } switch (rma) { + case WORK_ACT_MISMATCH: + /* ignore this unmatched frame */ + break; case WORK_ACT_NONE: break; case WORK_ACT_DONE: diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 673a6c8..8593a77 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -505,6 +505,8 @@ config NETFILTER_XT_TARGET_RATEEST config NETFILTER_XT_TARGET_TEE tristate '"TEE" - packet cloning to alternate destiantion' depends on NETFILTER_ADVANCED + depends on (IPV6 || IPV6=n) + depends on !NF_CONNTRACK || NF_CONNTRACK ---help--- This option adds a "TEE" target with which a packet can be cloned and this clone be rerouted to another nexthop. diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 0b1103c..78b3cf9 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -9,6 +9,7 @@ #include <linux/rcupdate.h> #include <net/protocol.h> #include <net/netfilter/nf_queue.h> +#include <net/dst.h> #include "nf_internals.h" @@ -170,6 +171,7 @@ static int __nf_queue(struct sk_buff *skb, dev_hold(physoutdev); } #endif + skb_dst_force(skb); afinfo->saveroute(skb, entry); status = qh->outfn(entry, queuenum); diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h index 07ae7fd..1c1c093 100644 --- a/net/netlabel/netlabel_addrlist.h +++ b/net/netlabel/netlabel_addrlist.h @@ -130,7 +130,6 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, __be32 addr, __be32 mask) { - return; } #endif @@ -203,7 +202,6 @@ static inline void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, const struct in6_addr *addr, const struct in6_addr *mask) { - return; } #endif #endif /* IPV6 */ diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index a3d64aa..e2b0a68 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -670,7 +670,6 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface) unlhsh_condremove_failure: spin_unlock(&netlbl_unlhsh_lock); - return; } /** diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index 0562562..c397524 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -141,7 +141,7 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn) release_sock(sock->sk); sock_release(sock); - }; + } if (tc->t_tinc) { rds_inc_put(&tc->t_tinc->ti_inc); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 0190451..972378f 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -153,7 +153,7 @@ int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, } else if (type == RTM_GETACTION) { return tcf_dump_walker(skb, cb, a, hinfo); } else { - printk("tcf_generic_walker: unknown action %d\n", type); + WARN(1, "tcf_generic_walker: unknown action %d\n", type); return -EINVAL; } } @@ -403,8 +403,9 @@ void tcf_action_destroy(struct tc_action *act, int bind) module_put(a->ops->owner); act = act->next; kfree(a); - } else { /*FIXME: Remove later - catch insertion bugs*/ - printk("tcf_action_destroy: BUG? destroying NULL ops\n"); + } else { + /*FIXME: Remove later - catch insertion bugs*/ + WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n"); act = act->next; kfree(a); } @@ -744,7 +745,7 @@ static struct tc_action *create_a(int i) act = kzalloc(sizeof(*act), GFP_KERNEL); if (act == NULL) { - printk("create_a: failed to alloc!\n"); + pr_debug("create_a: failed to alloc!\n"); return NULL; } act->order = i; @@ -766,13 +767,13 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, int err = -ENOMEM; if (a == NULL) { - printk("tca_action_flush: couldnt create tc_action\n"); + pr_debug("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { - printk("tca_action_flush: failed skb alloc\n"); + pr_debug("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } @@ -979,7 +980,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) return ret; if (tca[TCA_ACT_TAB] == NULL) { - printk("tc_ctl_action: received NO action attribs\n"); + pr_notice("tc_ctl_action: received NO action attribs\n"); return -EINVAL; } @@ -1056,7 +1057,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) struct nlattr *kind = find_dump_kind(cb->nlh); if (kind == NULL) { - printk("tc_dump_action: action bad kind\n"); + pr_info("tc_dump_action: action bad kind\n"); return 0; } @@ -1069,7 +1070,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) a.ops = a_o; if (a_o->walk == NULL) { - printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind); + WARN(1, "tc_dump_action: %s !capable of dumping table\n", + a_o->kind); goto nla_put_failure; } diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index e7f796a..8406c66 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -202,9 +202,9 @@ MODULE_LICENSE("GPL"); static int __init gact_init_module(void) { #ifdef CONFIG_GACT_PROB - printk("GACT probability on\n"); + printk(KERN_INFO "GACT probability on\n"); #else - printk("GACT probability NOT on\n"); + printk(KERN_INFO "GACT probability NOT on\n"); #endif return tcf_register_action(&act_gact_ops); } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 1f95954..c7e59e6 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -235,7 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, break; default: if (net_ratelimit()) - printk("Bogus netfilter code %d assume ACCEPT\n", ret); + pr_notice("tc filter: Bogus netfilter code" + " %d assume ACCEPT\n", ret); result = TC_POLICE_OK; break; } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index c046682..c0b6863 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -164,8 +164,8 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, dev = m->tcfm_dev; if (!(dev->flags & IFF_UP)) { if (net_ratelimit()) - printk("mirred to Houston: device %s is gone!\n", - dev->name); + pr_notice("tc mirred to Houston: device %s is gone!\n", + dev->name); goto out; } @@ -252,7 +252,7 @@ MODULE_LICENSE("GPL"); static int __init mirred_init_module(void) { - printk("Mirror/redirect action on\n"); + pr_info("Mirror/redirect action on\n"); return tcf_register_action(&act_mirred_ops); } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index b7dcfed..fdbd0b7 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -158,11 +158,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, } if (offset % 4) { - printk("offset must be on 32 bit boundaries\n"); + pr_info("tc filter pedit" + " offset must be on 32 bit boundaries\n"); goto bad; } if (offset > 0 && offset > skb->len) { - printk("offset %d cant exceed pkt length %d\n", + pr_info("tc filter pedit" + " offset %d cant exceed pkt length %d\n", offset, skb->len); goto bad; } @@ -176,9 +178,8 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, if (munged) skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); goto done; - } else { - printk("pedit BUG: index %d\n", p->tcf_index); - } + } else + WARN(1, "pedit BUG: index %d\n", p->tcf_index); bad: p->tcf_qstats.overlimits++; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 622ca80..1b4bc69 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -49,7 +49,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result * Example if this was the 3rd packet and the string was "hello" * then it would look like "hello_3" (without quotes) **/ - printk("simple: %s_%d\n", + pr_info("simple: %s_%d\n", (char *)d->tcfd_defdata, d->tcf_bstats.packets); spin_unlock(&d->tcf_lock); return d->tcf_action; @@ -205,7 +205,7 @@ static int __init simp_init_module(void) { int ret = tcf_register_action(&act_simp_ops); if (!ret) - printk("Simple TC action Loaded\n"); + pr_info("Simple TC action Loaded\n"); return ret; } diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 6ed61b1..f73542d 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -602,7 +602,6 @@ static unsigned long flow_get(struct tcf_proto *tp, u32 handle) static void flow_put(struct tcf_proto *tp, unsigned long f) { - return; } static int flow_dump(struct tcf_proto *tp, unsigned long fh, diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 593eac0..9627542 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -211,7 +211,7 @@ check_terminal: deadloop: if (net_ratelimit()) - printk("cls_u32: dead loop\n"); + printk(KERN_WARNING "cls_u32: dead loop\n"); return -1; } @@ -768,15 +768,15 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = { static int __init init_u32(void) { - printk("u32 classifier\n"); + pr_info("u32 classifier\n"); #ifdef CONFIG_CLS_U32_PERF - printk(" Performance counters on\n"); + pr_info(" Performance counters on\n"); #endif #ifdef CONFIG_NET_CLS_IND - printk(" input device check on\n"); + pr_info(" input device check on\n"); #endif #ifdef CONFIG_NET_CLS_ACT - printk(" Actions configured\n"); + pr_info(" Actions configured\n"); #endif return register_tcf_proto_ops(&cls_u32_ops); } diff --git a/net/sched/ematch.c b/net/sched/ematch.c index e782bde..5e37da9 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -527,7 +527,8 @@ pop_stack: stack_overflow: if (net_ratelimit()) - printk("Local stack overflow, increase NET_EMATCH_STACK\n"); + printk(KERN_WARNING "tc ematch: local stack overflow," + " increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 9839b26..fe35c1f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1637,9 +1637,12 @@ reclassify: tp = otp; if (verd++ >= MAX_REC_LOOP) { - printk("rule prio %u protocol %02x reclassify loop, " - "packet dropped\n", - tp->prio&0xffff, ntohs(tp->protocol)); + if (net_ratelimit()) + printk(KERN_NOTICE + "%s: packet reclassify loop" + " rule prio %u protocol %02x\n", + tp->q->ops->id, + tp->prio & 0xffff, ntohs(tp->protocol)); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a969b11..a63029e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -26,6 +26,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <net/pkt_sched.h> +#include <net/dst.h> /* Main transmission queue. */ @@ -40,6 +41,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { + skb_dst_force(skb); q->gso_skb = skb; q->qstats.requeues++; q->q.qlen++; /* it's still part of the queue */ @@ -179,7 +181,7 @@ static inline int qdisc_restart(struct Qdisc *q) skb = dequeue_skb(q); if (unlikely(!skb)) return 0; - + WARN_ON_ONCE(skb_dst_is_noref(skb)); root_lock = qdisc_lock(q); dev = qdisc_dev(q); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b38b39c..abd904b 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -617,7 +617,6 @@ rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) rtsc->y = y; rtsc->dx = dx; rtsc->dy = dy; - return; } static void @@ -1155,7 +1154,7 @@ static struct hfsc_class * hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct hfsc_sched *q = qdisc_priv(sch); - struct hfsc_class *cl; + struct hfsc_class *head, *cl; struct tcf_result res; struct tcf_proto *tcf; int result; @@ -1166,6 +1165,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return cl; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + head = &q->root; tcf = q->root.filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT @@ -1180,6 +1180,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) if ((cl = (struct hfsc_class *)res.class) == NULL) { if ((cl = hfsc_find_class(res.classid, sch)) == NULL) break; /* filter selected invalid classid */ + if (cl->level >= head->level) + break; /* filter may only point downwards */ } if (cl->level == 0) @@ -1187,6 +1189,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) /* apply inner filter chain */ tcf = cl->filter_list; + head = cl; } /* classification failed, try default class */ diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index a9e646b..f10e34a 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -44,7 +44,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl) static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { - return; } static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index b2aba3f..fe91e50 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -174,7 +174,6 @@ static unsigned long mq_get(struct Qdisc *sch, u32 classid) static void mq_put(struct Qdisc *sch, unsigned long cl) { - return; } static int mq_dump_class(struct Qdisc *sch, unsigned long cl, diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index c50876c..6ae2512 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -340,7 +340,6 @@ static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, static void multiq_put(struct Qdisc *q, unsigned long cl) { - return; } static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 81672e0..0748fb1 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -303,7 +303,6 @@ static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 clas static void prio_put(struct Qdisc *q, unsigned long cl) { - return; } static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 072cdf4..8d42bb3 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -303,7 +303,6 @@ static unsigned long red_get(struct Qdisc *sch, u32 classid) static void red_put(struct Qdisc *sch, unsigned long arg) { - return; } static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 8fb8107..0991c64 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -273,7 +273,11 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) if (max_size < 0) goto done; - if (qopt->limit > 0) { + if (q->qdisc != &noop_qdisc) { + err = fifo_set_limit(q->qdisc, qopt->limit); + if (err) + goto done; + } else if (qopt->limit > 0) { child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); if (IS_ERR(child)) { err = PTR_ERR(child); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 3912420..e41feff 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -816,8 +816,6 @@ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, if (t != primary) sctp_assoc_rm_peer(asoc, t); } - - return; } /* Engage in transport control operations. diff --git a/net/sctp/input.c b/net/sctp/input.c index 2a57018..ea21924 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -440,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk, { SCTP_DEBUG_PRINTK("%s\n", __func__); - sctp_do_sm(SCTP_EVENT_T_OTHER, - SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), - asoc->state, asoc->ep, asoc, t, - GFP_ATOMIC); + if (sock_owned_by_user(sk)) { + if (timer_pending(&t->proto_unreach_timer)) + return; + else { + if (!mod_timer(&t->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + } + + } else { + if (timer_pending(&t->proto_unreach_timer) && + del_timer(&t->proto_unreach_timer)) + sctp_association_put(asoc); + sctp_do_sm(SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, t, + GFP_ATOMIC); + } } /* Common lookup code for icmp/icmpv6 error handler. */ diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 5d05717..c04b2eb 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -80,7 +80,6 @@ static inline void sctp_outq_head_data(struct sctp_outq *q, { list_add(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; - return; } /* Take data from the front of the queue. */ @@ -103,7 +102,6 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q, { list_add_tail(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; - return; } /* diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 784bcc9..61aacfb 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -181,7 +181,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) static void sctp_eps_seq_stop(struct seq_file *seq, void *v) { - return; } @@ -286,7 +285,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) { - return; } @@ -409,7 +407,6 @@ static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) { - return; } static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index d8261f3..bd2a50b 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -141,7 +141,7 @@ int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); - if (skb_tailroom(chunk->skb) > len) + if (skb_tailroom(chunk->skb) < len) return -ENOSPC; chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(sctp_errhdr_t), @@ -1415,7 +1415,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, int len, const void *data) { - if (skb_tailroom(chunk->skb) > len) + if (skb_tailroom(chunk->skb) >= len) return sctp_addto_chunk(chunk, len, data); else return NULL; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 3b7230e..f5e5e27 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -397,6 +397,41 @@ out_unlock: sctp_transport_put(transport); } +/* Handle the timeout of the ICMP protocol unreachable timer. Trigger + * the correct state machine transition that will close the association. + */ +void sctp_generate_proto_unreach_event(unsigned long data) +{ + struct sctp_transport *transport = (struct sctp_transport *) data; + struct sctp_association *asoc = transport->asoc; + + sctp_bh_lock_sock(asoc->base.sk); + if (sock_owned_by_user(asoc->base.sk)) { + SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + goto out_unlock; + } + + /* Is this structure just waiting around for us to actually + * get destroyed? + */ + if (asoc->base.dead) + goto out_unlock; + + sctp_do_sm(SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); + +out_unlock: + sctp_bh_unlock_sock(asoc->base.sk); + sctp_association_put(asoc); +} + + /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(unsigned long data) { @@ -857,8 +892,6 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, sctp_walk_fwdtsn(skip, chunk) { sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); } - - return; } /* Helper function to remove the association non-primary peer @@ -877,8 +910,6 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc) sctp_assoc_del_peer(asoc, &t->ipaddr); } } - - return; } /* Helper function to set sk_err on a 1-1 style socket. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ba1add0..ca44917 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5433,6 +5433,8 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) rover++; if ((rover < low) || (rover > high)) rover = low; + if (inet_is_reserved_local_port(rover)) + continue; index = sctp_phashfn(rover); head = &sctp_port_hashtable[index]; sctp_spin_lock(&head->lock); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index fccf494..132046c 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -92,6 +92,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, (unsigned long)peer); setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, (unsigned long)peer); + setup_timer(&peer->proto_unreach_timer, + sctp_generate_proto_unreach_event, (unsigned long)peer); /* Initialize the 64-bit random nonce sent with heartbeat. */ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); @@ -146,6 +148,10 @@ void sctp_transport_free(struct sctp_transport *transport) del_timer(&transport->T3_rtx_timer)) sctp_transport_put(transport); + /* Delete the ICMP proto unreachable timer if it's active. */ + if (timer_pending(&transport->proto_unreach_timer) && + del_timer(&transport->proto_unreach_timer)) + sctp_association_put(transport->asoc); sctp_transport_put(transport); } diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 3a44853..c7f7e49 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -955,7 +955,6 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) * ordering and deliver them if needed. */ sctp_ulpq_reap_ordered(ulpq, sid); - return; } static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, @@ -1064,7 +1063,6 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, } sk_mem_reclaim(asoc->base.sk); - return; } diff --git a/net/socket.c b/net/socket.c index dae8c6b..f9f7d08 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2615,7 +2615,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, return dev_ioctl(net, cmd, uifr); default: return -EINVAL; - }; + } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index f394fc1..73affb8 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -236,10 +236,15 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { - /* Enforce a 60 second garbage collection moratorium */ - if (time_in_range_open(cred->cr_expire, expired, jiffies) && + if (nr_to_scan-- == 0) + break; + /* + * Enforce a 60 second garbage collection moratorium + * Note that the cred_unused list must be time-ordered. + */ + if (time_in_range(cred->cr_expire, expired, jiffies) && test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) - continue; + return 0; list_del_init(&cred->cr_lru); number_cred_unused--; @@ -252,13 +257,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) get_rpccred(cred); list_add_tail(&cred->cr_lru, free); rpcauth_unhash_cred_locked(cred); - nr_to_scan--; } spin_unlock(cache_lock); - if (nr_to_scan == 0) - break; } - return nr_to_scan; + return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; } /* @@ -270,11 +272,12 @@ rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) LIST_HEAD(free); int res; + if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) + return (nr_to_scan == 0) ? 0 : -1; if (list_empty(&cred_unused)) return 0; spin_lock(&rpc_credcache_lock); - nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); - res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure; + res = rpcauth_prune_expired(&free, nr_to_scan); spin_unlock(&rpc_credcache_lock); rpcauth_destroy_credlist(&free); return res; diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index 4de8bcf..74a2317 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile @@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ - gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o + gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index c389ccf..8da2a0e 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -57,11 +57,14 @@ static const struct rpc_authops authgss_ops; static const struct rpc_credops gss_credops; static const struct rpc_credops gss_nullops; +#define GSS_RETRY_EXPIRED 5 +static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; + #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif -#define GSS_CRED_SLACK 1024 +#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) /* length of a krb5 verifier (48), plus data added before arguments when * using integrity (two 4-byte integers): */ #define GSS_VERF_SLACK 100 @@ -229,7 +232,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct p = ERR_PTR(-EFAULT); goto err; } - ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx); + ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS); if (ret < 0) { p = ERR_PTR(ret); goto err; @@ -350,6 +353,24 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg) } static void +gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) +{ + switch (gss_msg->msg.errno) { + case 0: + if (gss_msg->ctx == NULL) + break; + clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); + gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); + break; + case -EKEYEXPIRED: + set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); + } + gss_cred->gc_upcall_timestamp = jiffies; + gss_cred->gc_upcall = NULL; + rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); +} + +static void gss_upcall_callback(struct rpc_task *task) { struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, @@ -358,13 +379,9 @@ gss_upcall_callback(struct rpc_task *task) struct inode *inode = &gss_msg->inode->vfs_inode; spin_lock(&inode->i_lock); - if (gss_msg->ctx) - gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx); - else - task->tk_status = gss_msg->msg.errno; - gss_cred->gc_upcall = NULL; - rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); + gss_handle_downcall_result(gss_cred, gss_msg); spin_unlock(&inode->i_lock); + task->tk_status = gss_msg->msg.errno; gss_release_msg(gss_msg); } @@ -377,11 +394,12 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, struct rpc_clnt *clnt, int machine_cred) { + struct gss_api_mech *mech = gss_msg->auth->mech; char *p = gss_msg->databuf; int len = 0; gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", - gss_msg->auth->mech->gm_name, + mech->gm_name, gss_msg->uid); p += gss_msg->msg.len; if (clnt->cl_principal) { @@ -398,6 +416,11 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, p += len; gss_msg->msg.len += len; } + if (mech->gm_upcall_enctypes) { + len = sprintf(p, mech->gm_upcall_enctypes); + p += len; + gss_msg->msg.len += len; + } len = sprintf(p, "\n"); gss_msg->msg.len += len; @@ -507,18 +530,16 @@ gss_refresh_upcall(struct rpc_task *task) spin_lock(&inode->i_lock); if (gss_cred->gc_upcall != NULL) rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); - else if (gss_msg->ctx != NULL) { - gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx); - gss_cred->gc_upcall = NULL; - rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); - } else if (gss_msg->msg.errno >= 0) { + else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { task->tk_timeout = 0; gss_cred->gc_upcall = gss_msg; /* gss_upcall_callback will release the reference to gss_upcall_msg */ atomic_inc(&gss_msg->count); rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); - } else + } else { + gss_handle_downcall_result(gss_cred, gss_msg); err = gss_msg->msg.errno; + } spin_unlock(&inode->i_lock); gss_release_msg(gss_msg); out: @@ -1117,6 +1138,23 @@ static int gss_renew_cred(struct rpc_task *task) return 0; } +static int gss_cred_is_negative_entry(struct rpc_cred *cred) +{ + if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { + unsigned long now = jiffies; + unsigned long begin, expire; + struct gss_cred *gss_cred; + + gss_cred = container_of(cred, struct gss_cred, gc_base); + begin = gss_cred->gc_upcall_timestamp; + expire = begin + gss_expired_cred_retry_delay * HZ; + + if (time_in_range_open(now, begin, expire)) + return 1; + } + return 0; +} + /* * Refresh credentials. XXX - finish */ @@ -1126,6 +1164,9 @@ gss_refresh(struct rpc_task *task) struct rpc_cred *cred = task->tk_msg.rpc_cred; int ret = 0; + if (gss_cred_is_negative_entry(cred)) + return -EKEYEXPIRED; + if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { ret = gss_renew_cred(task); @@ -1316,15 +1357,21 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, inpages = snd_buf->pages + first; snd_buf->pages = rqstp->rq_enc_pages; snd_buf->page_base -= first << PAGE_CACHE_SHIFT; - /* Give the tail its own page, in case we need extra space in the - * head when wrapping: */ + /* + * Give the tail its own page, in case we need extra space in the + * head when wrapping: + * + * call_allocate() allocates twice the slack space required + * by the authentication flavor to rq_callsize. + * For GSS, slack is GSS_CRED_SLACK. + */ if (snd_buf->page_len || snd_buf->tail[0].iov_len) { tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); snd_buf->tail[0].iov_base = tmp; } maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); - /* RPC_SLACK_SPACE should prevent this ever happening: */ + /* slack space should prevent this ever happening: */ BUG_ON(snd_buf->len > snd_buf->buflen); status = -EIO; /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was @@ -1573,5 +1620,11 @@ static void __exit exit_rpcsec_gss(void) } MODULE_LICENSE("GPL"); +module_param_named(expired_cred_retry_delay, + gss_expired_cred_retry_delay, + uint, 0644); +MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " + "the RPC engine retries an expired credential"); + module_init(init_rpcsec_gss) module_exit(exit_rpcsec_gss) diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index e9b6361..75ee993 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -1,7 +1,7 @@ /* * linux/net/sunrpc/gss_krb5_crypto.c * - * Copyright (c) 2000 The Regents of the University of Michigan. + * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> @@ -41,6 +41,7 @@ #include <linux/crypto.h> #include <linux/highmem.h> #include <linux/pagemap.h> +#include <linux/random.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/sunrpc/xdr.h> @@ -58,13 +59,13 @@ krb5_encrypt( { u32 ret = -EINVAL; struct scatterlist sg[1]; - u8 local_iv[16] = {0}; + u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; - if (crypto_blkcipher_ivsize(tfm) > 16) { + if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; @@ -92,13 +93,13 @@ krb5_decrypt( { u32 ret = -EINVAL; struct scatterlist sg[1]; - u8 local_iv[16] = {0}; + u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; - if (crypto_blkcipher_ivsize(tfm) > 16) { + if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; @@ -123,21 +124,155 @@ checksummer(struct scatterlist *sg, void *data) return crypto_hash_update(desc, sg, sg->length); } -/* checksum the plaintext data and hdrlen bytes of the token header */ -s32 -make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, - int body_offset, struct xdr_netobj *cksum) +static int +arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) +{ + unsigned int ms_usage; + + switch (usage) { + case KG_USAGE_SIGN: + ms_usage = 15; + break; + case KG_USAGE_SEAL: + ms_usage = 13; + break; + default: + return EINVAL;; + } + salt[0] = (ms_usage >> 0) & 0xff; + salt[1] = (ms_usage >> 8) & 0xff; + salt[2] = (ms_usage >> 16) & 0xff; + salt[3] = (ms_usage >> 24) & 0xff; + + return 0; +} + +static u32 +make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + unsigned int usage, struct xdr_netobj *cksumout) { - struct hash_desc desc; /* XXX add to ctx? */ + struct hash_desc desc; struct scatterlist sg[1]; int err; + u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + u8 rc4salt[4]; + struct crypto_hash *md5; + struct crypto_hash *hmac_md5; + + if (cksumkey == NULL) + return GSS_S_FAILURE; + + if (cksumout->len < kctx->gk5e->cksumlength) { + dprintk("%s: checksum buffer length, %u, too small for %s\n", + __func__, cksumout->len, kctx->gk5e->name); + return GSS_S_FAILURE; + } + + if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { + dprintk("%s: invalid usage value %u\n", __func__, usage); + return GSS_S_FAILURE; + } + + md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(md5)) + return GSS_S_FAILURE; + + hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac_md5)) { + crypto_free_hash(md5); + return GSS_S_FAILURE; + } + + desc.tfm = md5; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_init(&desc); + if (err) + goto out; + sg_init_one(sg, rc4salt, 4); + err = crypto_hash_update(&desc, sg, 4); + if (err) + goto out; + + sg_init_one(sg, header, hdrlen); + err = crypto_hash_update(&desc, sg, hdrlen); + if (err) + goto out; + err = xdr_process_buf(body, body_offset, body->len - body_offset, + checksummer, &desc); + if (err) + goto out; + err = crypto_hash_final(&desc, checksumdata); + if (err) + goto out; + + desc.tfm = hmac_md5; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_init(&desc); + if (err) + goto out; + err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); + if (err) + goto out; + + sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5)); + err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5), + checksumdata); + if (err) + goto out; + + memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); + cksumout->len = kctx->gk5e->cksumlength; +out: + crypto_free_hash(md5); + crypto_free_hash(hmac_md5); + return err ? GSS_S_FAILURE : 0; +} + +/* + * checksum the plaintext data and hdrlen bytes of the token header + * The checksum is performed over the first 8 bytes of the + * gss token header and then over the data body + */ +u32 +make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + unsigned int usage, struct xdr_netobj *cksumout) +{ + struct hash_desc desc; + struct scatterlist sg[1]; + int err; + u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + unsigned int checksumlen; + + if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) + return make_checksum_hmac_md5(kctx, header, hdrlen, + body, body_offset, + cksumkey, usage, cksumout); + + if (cksumout->len < kctx->gk5e->cksumlength) { + dprintk("%s: checksum buffer length, %u, too small for %s\n", + __func__, cksumout->len, kctx->gk5e->name); + return GSS_S_FAILURE; + } - desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); + desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return GSS_S_FAILURE; - cksum->len = crypto_hash_digestsize(desc.tfm); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + checksumlen = crypto_hash_digestsize(desc.tfm); + + if (cksumkey != NULL) { + err = crypto_hash_setkey(desc.tfm, cksumkey, + kctx->gk5e->keylength); + if (err) + goto out; + } + err = crypto_hash_init(&desc); if (err) goto out; @@ -149,15 +284,109 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, checksummer, &desc); if (err) goto out; - err = crypto_hash_final(&desc, cksum->data); + err = crypto_hash_final(&desc, checksumdata); + if (err) + goto out; + switch (kctx->gk5e->ctype) { + case CKSUMTYPE_RSA_MD5: + err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, + checksumdata, checksumlen); + if (err) + goto out; + memcpy(cksumout->data, + checksumdata + checksumlen - kctx->gk5e->cksumlength, + kctx->gk5e->cksumlength); + break; + case CKSUMTYPE_HMAC_SHA1_DES3: + memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); + break; + default: + BUG(); + break; + } + cksumout->len = kctx->gk5e->cksumlength; +out: + crypto_free_hash(desc.tfm); + return err ? GSS_S_FAILURE : 0; +} + +/* + * checksum the plaintext data and hdrlen bytes of the token header + * Per rfc4121, sec. 4.2.4, the checksum is performed over the data + * body then over the first 16 octets of the MIC token + * Inclusion of the header data in the calculation of the + * checksum is optional. + */ +u32 +make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + unsigned int usage, struct xdr_netobj *cksumout) +{ + struct hash_desc desc; + struct scatterlist sg[1]; + int err; + u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + unsigned int checksumlen; + + if (kctx->gk5e->keyed_cksum == 0) { + dprintk("%s: expected keyed hash for %s\n", + __func__, kctx->gk5e->name); + return GSS_S_FAILURE; + } + if (cksumkey == NULL) { + dprintk("%s: no key supplied for %s\n", + __func__, kctx->gk5e->name); + return GSS_S_FAILURE; + } + + desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(desc.tfm)) + return GSS_S_FAILURE; + checksumlen = crypto_hash_digestsize(desc.tfm); + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength); + if (err) + goto out; + + err = crypto_hash_init(&desc); + if (err) + goto out; + err = xdr_process_buf(body, body_offset, body->len - body_offset, + checksummer, &desc); + if (err) + goto out; + if (header != NULL) { + sg_init_one(sg, header, hdrlen); + err = crypto_hash_update(&desc, sg, hdrlen); + if (err) + goto out; + } + err = crypto_hash_final(&desc, checksumdata); + if (err) + goto out; + + cksumout->len = kctx->gk5e->cksumlength; + + switch (kctx->gk5e->ctype) { + case CKSUMTYPE_HMAC_SHA1_96_AES128: + case CKSUMTYPE_HMAC_SHA1_96_AES256: + /* note that this truncates the hash */ + memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); + break; + default: + BUG(); + break; + } out: crypto_free_hash(desc.tfm); return err ? GSS_S_FAILURE : 0; } struct encryptor_desc { - u8 iv[8]; /* XXX hard-coded blocksize */ + u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; struct blkcipher_desc desc; int pos; struct xdr_buf *outbuf; @@ -198,7 +427,7 @@ encryptor(struct scatterlist *sg, void *data) desc->fraglen += sg->length; desc->pos += sg->length; - fraglen = thislen & 7; /* XXX hardcoded blocksize */ + fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); thislen -= fraglen; if (thislen == 0) @@ -256,7 +485,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, } struct decryptor_desc { - u8 iv[8]; /* XXX hard-coded blocksize */ + u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; struct blkcipher_desc desc; struct scatterlist frags[4]; int fragno; @@ -278,7 +507,7 @@ decryptor(struct scatterlist *sg, void *data) desc->fragno++; desc->fraglen += sg->length; - fraglen = thislen & 7; /* XXX hardcoded blocksize */ + fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); thislen -= fraglen; if (thislen == 0) @@ -325,3 +554,437 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); } + +/* + * This function makes the assumption that it was ultimately called + * from gss_wrap(). + * + * The client auth_gss code moves any existing tail data into a + * separate page before calling gss_wrap. + * The server svcauth_gss code ensures that both the head and the + * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. + * + * Even with that guarantee, this function may be called more than + * once in the processing of gss_wrap(). The best we can do is + * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the + * largest expected shift will fit within RPC_MAX_AUTH_SIZE. + * At run-time we can verify that a single invocation of this + * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. + */ + +int +xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) +{ + u8 *p; + + if (shiftlen == 0) + return 0; + + BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); + BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); + + p = buf->head[0].iov_base + base; + + memmove(p + shiftlen, p, buf->head[0].iov_len - base); + + buf->head[0].iov_len += shiftlen; + buf->len += shiftlen; + + return 0; +} + +static u32 +gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf, + u32 offset, u8 *iv, struct page **pages, int encrypt) +{ + u32 ret; + struct scatterlist sg[1]; + struct blkcipher_desc desc = { .tfm = cipher, .info = iv }; + u8 data[crypto_blkcipher_blocksize(cipher) * 2]; + struct page **save_pages; + u32 len = buf->len - offset; + + BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2); + + /* + * For encryption, we want to read from the cleartext + * page cache pages, and write the encrypted data to + * the supplied xdr_buf pages. + */ + save_pages = buf->pages; + if (encrypt) + buf->pages = pages; + + ret = read_bytes_from_xdr_buf(buf, offset, data, len); + buf->pages = save_pages; + if (ret) + goto out; + + sg_init_one(sg, data, len); + + if (encrypt) + ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); + else + ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len); + + if (ret) + goto out; + + ret = write_bytes_to_xdr_buf(buf, offset, data, len); + +out: + return ret; +} + +u32 +gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, int ec, struct page **pages) +{ + u32 err; + struct xdr_netobj hmac; + u8 *cksumkey; + u8 *ecptr; + struct crypto_blkcipher *cipher, *aux_cipher; + int blocksize; + struct page **save_pages; + int nblocks, nbytes; + struct encryptor_desc desc; + u32 cbcbytes; + unsigned int usage; + + if (kctx->initiate) { + cipher = kctx->initiator_enc; + aux_cipher = kctx->initiator_enc_aux; + cksumkey = kctx->initiator_integ; + usage = KG_USAGE_INITIATOR_SEAL; + } else { + cipher = kctx->acceptor_enc; + aux_cipher = kctx->acceptor_enc_aux; + cksumkey = kctx->acceptor_integ; + usage = KG_USAGE_ACCEPTOR_SEAL; + } + blocksize = crypto_blkcipher_blocksize(cipher); + + /* hide the gss token header and insert the confounder */ + offset += GSS_KRB5_TOK_HDR_LEN; + if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) + return GSS_S_FAILURE; + gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); + offset -= GSS_KRB5_TOK_HDR_LEN; + + if (buf->tail[0].iov_base != NULL) { + ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; + } else { + buf->tail[0].iov_base = buf->head[0].iov_base + + buf->head[0].iov_len; + buf->tail[0].iov_len = 0; + ecptr = buf->tail[0].iov_base; + } + + memset(ecptr, 'X', ec); + buf->tail[0].iov_len += ec; + buf->len += ec; + + /* copy plaintext gss token header after filler (if any) */ + memcpy(ecptr + ec, buf->head[0].iov_base + offset, + GSS_KRB5_TOK_HDR_LEN); + buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; + buf->len += GSS_KRB5_TOK_HDR_LEN; + + /* Do the HMAC */ + hmac.len = GSS_KRB5_MAX_CKSUM_LEN; + hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; + + /* + * When we are called, pages points to the real page cache + * data -- which we can't go and encrypt! buf->pages points + * to scratch pages which we are going to send off to the + * client/server. Swap in the plaintext pages to calculate + * the hmac. + */ + save_pages = buf->pages; + buf->pages = pages; + + err = make_checksum_v2(kctx, NULL, 0, buf, + offset + GSS_KRB5_TOK_HDR_LEN, + cksumkey, usage, &hmac); + buf->pages = save_pages; + if (err) + return GSS_S_FAILURE; + + nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; + nblocks = (nbytes + blocksize - 1) / blocksize; + cbcbytes = 0; + if (nblocks > 2) + cbcbytes = (nblocks - 2) * blocksize; + + memset(desc.iv, 0, sizeof(desc.iv)); + + if (cbcbytes) { + desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; + desc.fragno = 0; + desc.fraglen = 0; + desc.pages = pages; + desc.outbuf = buf; + desc.desc.info = desc.iv; + desc.desc.flags = 0; + desc.desc.tfm = aux_cipher; + + sg_init_table(desc.infrags, 4); + sg_init_table(desc.outfrags, 4); + + err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, + cbcbytes, encryptor, &desc); + if (err) + goto out_err; + } + + /* Make sure IV carries forward from any CBC results. */ + err = gss_krb5_cts_crypt(cipher, buf, + offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, + desc.iv, pages, 1); + if (err) { + err = GSS_S_FAILURE; + goto out_err; + } + + /* Now update buf to account for HMAC */ + buf->tail[0].iov_len += kctx->gk5e->cksumlength; + buf->len += kctx->gk5e->cksumlength; + +out_err: + if (err) + err = GSS_S_FAILURE; + return err; +} + +u32 +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, + u32 *headskip, u32 *tailskip) +{ + struct xdr_buf subbuf; + u32 ret = 0; + u8 *cksum_key; + struct crypto_blkcipher *cipher, *aux_cipher; + struct xdr_netobj our_hmac_obj; + u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; + u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; + int nblocks, blocksize, cbcbytes; + struct decryptor_desc desc; + unsigned int usage; + + if (kctx->initiate) { + cipher = kctx->acceptor_enc; + aux_cipher = kctx->acceptor_enc_aux; + cksum_key = kctx->acceptor_integ; + usage = KG_USAGE_ACCEPTOR_SEAL; + } else { + cipher = kctx->initiator_enc; + aux_cipher = kctx->initiator_enc_aux; + cksum_key = kctx->initiator_integ; + usage = KG_USAGE_INITIATOR_SEAL; + } + blocksize = crypto_blkcipher_blocksize(cipher); + + + /* create a segment skipping the header and leaving out the checksum */ + xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, + (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - + kctx->gk5e->cksumlength)); + + nblocks = (subbuf.len + blocksize - 1) / blocksize; + + cbcbytes = 0; + if (nblocks > 2) + cbcbytes = (nblocks - 2) * blocksize; + + memset(desc.iv, 0, sizeof(desc.iv)); + + if (cbcbytes) { + desc.fragno = 0; + desc.fraglen = 0; + desc.desc.info = desc.iv; + desc.desc.flags = 0; + desc.desc.tfm = aux_cipher; + + sg_init_table(desc.frags, 4); + + ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); + if (ret) + goto out_err; + } + + /* Make sure IV carries forward from any CBC results. */ + ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); + if (ret) + goto out_err; + + + /* Calculate our hmac over the plaintext data */ + our_hmac_obj.len = sizeof(our_hmac); + our_hmac_obj.data = our_hmac; + + ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, + cksum_key, usage, &our_hmac_obj); + if (ret) + goto out_err; + + /* Get the packet's hmac value */ + ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, + pkt_hmac, kctx->gk5e->cksumlength); + if (ret) + goto out_err; + + if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { + ret = GSS_S_BAD_SIG; + goto out_err; + } + *headskip = kctx->gk5e->conflen; + *tailskip = kctx->gk5e->cksumlength; +out_err: + if (ret && ret != GSS_S_BAD_SIG) + ret = GSS_S_FAILURE; + return ret; +} + +/* + * Compute Kseq given the initial session key and the checksum. + * Set the key of the given cipher. + */ +int +krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, + unsigned char *cksum) +{ + struct crypto_hash *hmac; + struct hash_desc desc; + struct scatterlist sg[1]; + u8 Kseq[GSS_KRB5_MAX_KEYLEN]; + u32 zeroconstant = 0; + int err; + + dprintk("%s: entered\n", __func__); + + hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac)) { + dprintk("%s: error %ld, allocating hash '%s'\n", + __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); + return PTR_ERR(hmac); + } + + desc.tfm = hmac; + desc.flags = 0; + + err = crypto_hash_init(&desc); + if (err) + goto out_err; + + /* Compute intermediate Kseq from session key */ + err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); + if (err) + goto out_err; + + sg_init_table(sg, 1); + sg_set_buf(sg, &zeroconstant, 4); + + err = crypto_hash_digest(&desc, sg, 4, Kseq); + if (err) + goto out_err; + + /* Compute final Kseq from the checksum and intermediate Kseq */ + err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength); + if (err) + goto out_err; + + sg_set_buf(sg, cksum, 8); + + err = crypto_hash_digest(&desc, sg, 8, Kseq); + if (err) + goto out_err; + + err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); + if (err) + goto out_err; + + err = 0; + +out_err: + crypto_free_hash(hmac); + dprintk("%s: returning %d\n", __func__, err); + return err; +} + +/* + * Compute Kcrypt given the initial session key and the plaintext seqnum. + * Set the key of cipher kctx->enc. + */ +int +krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, + s32 seqnum) +{ + struct crypto_hash *hmac; + struct hash_desc desc; + struct scatterlist sg[1]; + u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; + u8 zeroconstant[4] = {0}; + u8 seqnumarray[4]; + int err, i; + + dprintk("%s: entered, seqnum %u\n", __func__, seqnum); + + hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac)) { + dprintk("%s: error %ld, allocating hash '%s'\n", + __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); + return PTR_ERR(hmac); + } + + desc.tfm = hmac; + desc.flags = 0; + + err = crypto_hash_init(&desc); + if (err) + goto out_err; + + /* Compute intermediate Kcrypt from session key */ + for (i = 0; i < kctx->gk5e->keylength; i++) + Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; + + err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); + if (err) + goto out_err; + + sg_init_table(sg, 1); + sg_set_buf(sg, zeroconstant, 4); + + err = crypto_hash_digest(&desc, sg, 4, Kcrypt); + if (err) + goto out_err; + + /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ + err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); + if (err) + goto out_err; + + seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); + seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); + seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); + seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); + + sg_set_buf(sg, seqnumarray, 4); + + err = crypto_hash_digest(&desc, sg, 4, Kcrypt); + if (err) + goto out_err; + + err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); + if (err) + goto out_err; + + err = 0; + +out_err: + crypto_free_hash(hmac); + dprintk("%s: returning %d\n", __func__, err); + return err; +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c new file mode 100644 index 0000000..76e42e6 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -0,0 +1,336 @@ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include <linux/err.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <linux/sunrpc/gss_krb5.h> +#include <linux/sunrpc/xdr.h> + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +/* + * This is the n-fold function as described in rfc3961, sec 5.1 + * Taken from MIT Kerberos and modified. + */ + +static void krb5_nfold(u32 inbits, const u8 *in, + u32 outbits, u8 *out) +{ + int a, b, c, lcm; + int byte, i, msbit; + + /* the code below is more readable if I make these bytes + instead of bits */ + + inbits >>= 3; + outbits >>= 3; + + /* first compute lcm(n,k) */ + + a = outbits; + b = inbits; + + while (b != 0) { + c = b; + b = a%b; + a = c; + } + + lcm = outbits*inbits/a; + + /* now do the real work */ + + memset(out, 0, outbits); + byte = 0; + + /* this will end up cycling through k lcm(k,n)/k times, which + is correct */ + for (i = lcm-1; i >= 0; i--) { + /* compute the msbit in k which gets added into this byte */ + msbit = ( + /* first, start with the msbit in the first, + * unrotated byte */ + ((inbits << 3) - 1) + /* then, for each byte, shift to the right + * for each repetition */ + + (((inbits << 3) + 13) * (i/inbits)) + /* last, pick out the correct byte within + * that shifted repetition */ + + ((inbits - (i % inbits)) << 3) + ) % (inbits << 3); + + /* pull out the byte value itself */ + byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)| + (in[((inbits) - (msbit >> 3)) % inbits])) + >> ((msbit & 7) + 1)) & 0xff; + + /* do the addition */ + byte += out[i % outbits]; + out[i % outbits] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + + } + + /* if there's a carry bit left over, add it back in */ + if (byte) { + for (i = outbits - 1; i >= 0; i--) { + /* do the addition */ + byte += out[i]; + out[i] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + } +} + +/* + * This is the DK (derive_key) function as described in rfc3961, sec 5.1 + * Taken from MIT Kerberos and modified. + */ + +u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, + const struct xdr_netobj *inkey, + struct xdr_netobj *outkey, + const struct xdr_netobj *in_constant, + gfp_t gfp_mask) +{ + size_t blocksize, keybytes, keylength, n; + unsigned char *inblockdata, *outblockdata, *rawkey; + struct xdr_netobj inblock, outblock; + struct crypto_blkcipher *cipher; + u32 ret = EINVAL; + + blocksize = gk5e->blocksize; + keybytes = gk5e->keybytes; + keylength = gk5e->keylength; + + if ((inkey->len != keylength) || (outkey->len != keylength)) + goto err_return; + + cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + goto err_return; + if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len)) + goto err_return; + + /* allocate and set up buffers */ + + ret = ENOMEM; + inblockdata = kmalloc(blocksize, gfp_mask); + if (inblockdata == NULL) + goto err_free_cipher; + + outblockdata = kmalloc(blocksize, gfp_mask); + if (outblockdata == NULL) + goto err_free_in; + + rawkey = kmalloc(keybytes, gfp_mask); + if (rawkey == NULL) + goto err_free_out; + + inblock.data = (char *) inblockdata; + inblock.len = blocksize; + + outblock.data = (char *) outblockdata; + outblock.len = blocksize; + + /* initialize the input block */ + + if (in_constant->len == inblock.len) { + memcpy(inblock.data, in_constant->data, inblock.len); + } else { + krb5_nfold(in_constant->len * 8, in_constant->data, + inblock.len * 8, inblock.data); + } + + /* loop encrypting the blocks until enough key bytes are generated */ + + n = 0; + while (n < keybytes) { + (*(gk5e->encrypt))(cipher, NULL, inblock.data, + outblock.data, inblock.len); + + if ((keybytes - n) <= outblock.len) { + memcpy(rawkey + n, outblock.data, (keybytes - n)); + break; + } + + memcpy(rawkey + n, outblock.data, outblock.len); + memcpy(inblock.data, outblock.data, outblock.len); + n += outblock.len; + } + + /* postprocess the key */ + + inblock.data = (char *) rawkey; + inblock.len = keybytes; + + BUG_ON(gk5e->mk_key == NULL); + ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey); + if (ret) { + dprintk("%s: got %d from mk_key function for '%s'\n", + __func__, ret, gk5e->encrypt_name); + goto err_free_raw; + } + + /* clean memory, free resources and exit */ + + ret = 0; + +err_free_raw: + memset(rawkey, 0, keybytes); + kfree(rawkey); +err_free_out: + memset(outblockdata, 0, blocksize); + kfree(outblockdata); +err_free_in: + memset(inblockdata, 0, blocksize); + kfree(inblockdata); +err_free_cipher: + crypto_free_blkcipher(cipher); +err_return: + return ret; +} + +#define smask(step) ((1<<step)-1) +#define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step))) +#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1) + +static void mit_des_fixup_key_parity(u8 key[8]) +{ + int i; + for (i = 0; i < 8; i++) { + key[i] &= 0xfe; + key[i] |= 1^parity_char(key[i]); + } +} + +/* + * This is the des3 key derivation postprocess function + */ +u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key) +{ + int i; + u32 ret = EINVAL; + + if (key->len != 24) { + dprintk("%s: key->len is %d\n", __func__, key->len); + goto err_out; + } + if (randombits->len != 21) { + dprintk("%s: randombits->len is %d\n", + __func__, randombits->len); + goto err_out; + } + + /* take the seven bytes, move them around into the top 7 bits of the + 8 key bytes, then compute the parity bits. Do this three times. */ + + for (i = 0; i < 3; i++) { + memcpy(key->data + i*8, randombits->data + i*7, 7); + key->data[i*8+7] = (((key->data[i*8]&1)<<1) | + ((key->data[i*8+1]&1)<<2) | + ((key->data[i*8+2]&1)<<3) | + ((key->data[i*8+3]&1)<<4) | + ((key->data[i*8+4]&1)<<5) | + ((key->data[i*8+5]&1)<<6) | + ((key->data[i*8+6]&1)<<7)); + + mit_des_fixup_key_parity(key->data + i*8); + } + ret = 0; +err_out: + return ret; +} + +/* + * This is the aes key derivation postprocess function + */ +u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key) +{ + u32 ret = EINVAL; + + if (key->len != 16 && key->len != 32) { + dprintk("%s: key->len is %d\n", __func__, key->len); + goto err_out; + } + if (randombits->len != 16 && randombits->len != 32) { + dprintk("%s: randombits->len is %d\n", + __func__, randombits->len); + goto err_out; + } + if (randombits->len != key->len) { + dprintk("%s: randombits->len is %d, key->len is %d\n", + __func__, randombits->len, key->len); + goto err_out; + } + memcpy(key->data, randombits->data, key->len); + ret = 0; +err_out: + return ret; +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 2deb0ed..0326446 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -1,7 +1,7 @@ /* * linux/net/sunrpc/gss_krb5_mech.c * - * Copyright (c) 2001 The Regents of the University of Michigan. + * Copyright (c) 2001-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> @@ -48,6 +48,143 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif +static struct gss_api_mech gss_kerberos_mech; /* forward declaration */ + +static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { + /* + * DES (All DES enctypes are mapped to the same gss functionality) + */ + { + .etype = ENCTYPE_DES_CBC_RAW, + .ctype = CKSUMTYPE_RSA_MD5, + .name = "des-cbc-crc", + .encrypt_name = "cbc(des)", + .cksum_name = "md5", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = NULL, + .signalg = SGN_ALG_DES_MAC_MD5, + .sealalg = SEAL_ALG_DES, + .keybytes = 7, + .keylength = 8, + .blocksize = 8, + .conflen = 8, + .cksumlength = 8, + .keyed_cksum = 0, + }, + /* + * RC4-HMAC + */ + { + .etype = ENCTYPE_ARCFOUR_HMAC, + .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR, + .name = "rc4-hmac", + .encrypt_name = "ecb(arc4)", + .cksum_name = "hmac(md5)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = NULL, + .signalg = SGN_ALG_HMAC_MD5, + .sealalg = SEAL_ALG_MICROSOFT_RC4, + .keybytes = 16, + .keylength = 16, + .blocksize = 1, + .conflen = 8, + .cksumlength = 8, + .keyed_cksum = 1, + }, + /* + * 3DES + */ + { + .etype = ENCTYPE_DES3_CBC_RAW, + .ctype = CKSUMTYPE_HMAC_SHA1_DES3, + .name = "des3-hmac-sha1", + .encrypt_name = "cbc(des3_ede)", + .cksum_name = "hmac(sha1)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = gss_krb5_des3_make_key, + .signalg = SGN_ALG_HMAC_SHA1_DES3_KD, + .sealalg = SEAL_ALG_DES3KD, + .keybytes = 21, + .keylength = 24, + .blocksize = 8, + .conflen = 8, + .cksumlength = 20, + .keyed_cksum = 1, + }, + /* + * AES128 + */ + { + .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96, + .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128, + .name = "aes128-cts", + .encrypt_name = "cts(cbc(aes))", + .cksum_name = "hmac(sha1)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = gss_krb5_aes_make_key, + .encrypt_v2 = gss_krb5_aes_encrypt, + .decrypt_v2 = gss_krb5_aes_decrypt, + .signalg = -1, + .sealalg = -1, + .keybytes = 16, + .keylength = 16, + .blocksize = 16, + .conflen = 16, + .cksumlength = 12, + .keyed_cksum = 1, + }, + /* + * AES256 + */ + { + .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96, + .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256, + .name = "aes256-cts", + .encrypt_name = "cts(cbc(aes))", + .cksum_name = "hmac(sha1)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = gss_krb5_aes_make_key, + .encrypt_v2 = gss_krb5_aes_encrypt, + .decrypt_v2 = gss_krb5_aes_decrypt, + .signalg = -1, + .sealalg = -1, + .keybytes = 32, + .keylength = 32, + .blocksize = 16, + .conflen = 16, + .cksumlength = 12, + .keyed_cksum = 1, + }, +}; + +static const int num_supported_enctypes = + ARRAY_SIZE(supported_gss_krb5_enctypes); + +static int +supported_gss_krb5_enctype(int etype) +{ + int i; + for (i = 0; i < num_supported_enctypes; i++) + if (supported_gss_krb5_enctypes[i].etype == etype) + return 1; + return 0; +} + +static const struct gss_krb5_enctype * +get_gss_krb5_enctype(int etype) +{ + int i; + for (i = 0; i < num_supported_enctypes; i++) + if (supported_gss_krb5_enctypes[i].etype == etype) + return &supported_gss_krb5_enctypes[i]; + return NULL; +} + static const void * simple_get_bytes(const void *p, const void *end, void *res, int len) { @@ -78,35 +215,45 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) } static inline const void * -get_key(const void *p, const void *end, struct crypto_blkcipher **res) +get_key(const void *p, const void *end, + struct krb5_ctx *ctx, struct crypto_blkcipher **res) { struct xdr_netobj key; int alg; - char *alg_name; p = simple_get_bytes(p, end, &alg, sizeof(alg)); if (IS_ERR(p)) goto out_err; + + switch (alg) { + case ENCTYPE_DES_CBC_CRC: + case ENCTYPE_DES_CBC_MD4: + case ENCTYPE_DES_CBC_MD5: + /* Map all these key types to ENCTYPE_DES_CBC_RAW */ + alg = ENCTYPE_DES_CBC_RAW; + break; + } + + if (!supported_gss_krb5_enctype(alg)) { + printk(KERN_WARNING "gss_kerberos_mech: unsupported " + "encryption key algorithm %d\n", alg); + goto out_err; + } p = simple_get_netobj(p, end, &key); if (IS_ERR(p)) goto out_err; - switch (alg) { - case ENCTYPE_DES_CBC_RAW: - alg_name = "cbc(des)"; - break; - default: - printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); - goto out_err_free_key; - } - *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); + *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(*res)) { - printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); + printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " + "crypto algorithm %s\n", ctx->gk5e->encrypt_name); *res = NULL; goto out_err_free_key; } if (crypto_blkcipher_setkey(*res, key.data, key.len)) { - printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); + printk(KERN_WARNING "gss_kerberos_mech: error setting key for " + "crypto algorithm %s\n", ctx->gk5e->encrypt_name); goto out_err_free_tfm; } @@ -123,56 +270,55 @@ out_err: } static int -gss_import_sec_context_kerberos(const void *p, - size_t len, - struct gss_ctx *ctx_id) +gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) { - const void *end = (const void *)((const char *)p + len); - struct krb5_ctx *ctx; int tmp; - if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { - p = ERR_PTR(-ENOMEM); - goto out_err; - } - p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; + + /* Old format supports only DES! Any other enctype uses new format */ + ctx->enctype = ENCTYPE_DES_CBC_RAW; + + ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); + if (ctx->gk5e == NULL) + goto out_err; + /* The downcall format was designed before we completely understood * the uses of the context fields; so it includes some stuff we * just give some minimal sanity-checking, and some we ignore * completely (like the next twenty bytes): */ if (unlikely(p + 20 > end || p + 20 < p)) - goto out_err_free_ctx; + goto out_err; p += 20; p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; if (tmp != SGN_ALG_DES_MAC_MD5) { p = ERR_PTR(-ENOSYS); - goto out_err_free_ctx; + goto out_err; } p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; if (tmp != SEAL_ALG_DES) { p = ERR_PTR(-ENOSYS); - goto out_err_free_ctx; + goto out_err; } p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; p = simple_get_netobj(p, end, &ctx->mech_used); if (IS_ERR(p)) - goto out_err_free_ctx; - p = get_key(p, end, &ctx->enc); + goto out_err; + p = get_key(p, end, ctx, &ctx->enc); if (IS_ERR(p)) goto out_err_free_mech; - p = get_key(p, end, &ctx->seq); + p = get_key(p, end, ctx, &ctx->seq); if (IS_ERR(p)) goto out_err_free_key1; if (p != end) { @@ -180,9 +326,6 @@ gss_import_sec_context_kerberos(const void *p, goto out_err_free_key2; } - ctx_id->internal_ctx_id = ctx; - - dprintk("RPC: Successfully imported new context.\n"); return 0; out_err_free_key2: @@ -191,18 +334,378 @@ out_err_free_key1: crypto_free_blkcipher(ctx->enc); out_err_free_mech: kfree(ctx->mech_used.data); -out_err_free_ctx: - kfree(ctx); out_err: return PTR_ERR(p); } +struct crypto_blkcipher * +context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) +{ + struct crypto_blkcipher *cp; + + cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(cp)) { + dprintk("gss_kerberos_mech: unable to initialize " + "crypto algorithm %s\n", cname); + return NULL; + } + if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { + dprintk("gss_kerberos_mech: error setting key for " + "crypto algorithm %s\n", cname); + crypto_free_blkcipher(cp); + return NULL; + } + return cp; +} + +static inline void +set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed) +{ + cdata[0] = (usage>>24)&0xff; + cdata[1] = (usage>>16)&0xff; + cdata[2] = (usage>>8)&0xff; + cdata[3] = usage&0xff; + cdata[4] = seed; +} + +static int +context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) +{ + struct xdr_netobj c, keyin, keyout; + u8 cdata[GSS_KRB5_K5CLENGTH]; + u32 err; + + c.len = GSS_KRB5_K5CLENGTH; + c.data = cdata; + + keyin.data = ctx->Ksess; + keyin.len = ctx->gk5e->keylength; + keyout.len = ctx->gk5e->keylength; + + /* seq uses the raw key */ + ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, + ctx->Ksess); + if (ctx->seq == NULL) + goto out_err; + + ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, + ctx->Ksess); + if (ctx->enc == NULL) + goto out_free_seq; + + /* derive cksum */ + set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM); + keyout.data = ctx->cksum; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); + if (err) { + dprintk("%s: Error %d deriving cksum key\n", + __func__, err); + goto out_free_enc; + } + + return 0; + +out_free_enc: + crypto_free_blkcipher(ctx->enc); +out_free_seq: + crypto_free_blkcipher(ctx->seq); +out_err: + return -EINVAL; +} + +/* + * Note that RC4 depends on deriving keys using the sequence + * number or the checksum of a token. Therefore, the final keys + * cannot be calculated until the token is being constructed! + */ +static int +context_derive_keys_rc4(struct krb5_ctx *ctx) +{ + struct crypto_hash *hmac; + char sigkeyconstant[] = "signaturekey"; + int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ + struct hash_desc desc; + struct scatterlist sg[1]; + int err; + + dprintk("RPC: %s: entered\n", __func__); + /* + * derive cksum (aka Ksign) key + */ + hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac)) { + dprintk("%s: error %ld allocating hash '%s'\n", + __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name); + err = PTR_ERR(hmac); + goto out_err; + } + + err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); + if (err) + goto out_err_free_hmac; + + sg_init_table(sg, 1); + sg_set_buf(sg, sigkeyconstant, slen); + + desc.tfm = hmac; + desc.flags = 0; + + err = crypto_hash_init(&desc); + if (err) + goto out_err_free_hmac; + + err = crypto_hash_digest(&desc, sg, slen, ctx->cksum); + if (err) + goto out_err_free_hmac; + /* + * allocate hash, and blkciphers for data and seqnum encryption + */ + ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->enc)) { + err = PTR_ERR(ctx->enc); + goto out_err_free_hmac; + } + + ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->seq)) { + crypto_free_blkcipher(ctx->enc); + err = PTR_ERR(ctx->seq); + goto out_err_free_hmac; + } + + dprintk("RPC: %s: returning success\n", __func__); + + err = 0; + +out_err_free_hmac: + crypto_free_hash(hmac); +out_err: + dprintk("RPC: %s: returning %d\n", __func__, err); + return err; +} + +static int +context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) +{ + struct xdr_netobj c, keyin, keyout; + u8 cdata[GSS_KRB5_K5CLENGTH]; + u32 err; + + c.len = GSS_KRB5_K5CLENGTH; + c.data = cdata; + + keyin.data = ctx->Ksess; + keyin.len = ctx->gk5e->keylength; + keyout.len = ctx->gk5e->keylength; + + /* initiator seal encryption */ + set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); + keyout.data = ctx->initiator_seal; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); + if (err) { + dprintk("%s: Error %d deriving initiator_seal key\n", + __func__, err); + goto out_err; + } + ctx->initiator_enc = context_v2_alloc_cipher(ctx, + ctx->gk5e->encrypt_name, + ctx->initiator_seal); + if (ctx->initiator_enc == NULL) + goto out_err; + + /* acceptor seal encryption */ + set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); + keyout.data = ctx->acceptor_seal; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); + if (err) { + dprintk("%s: Error %d deriving acceptor_seal key\n", + __func__, err); + goto out_free_initiator_enc; + } + ctx->acceptor_enc = context_v2_alloc_cipher(ctx, + ctx->gk5e->encrypt_name, + ctx->acceptor_seal); + if (ctx->acceptor_enc == NULL) + goto out_free_initiator_enc; + + /* initiator sign checksum */ + set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM); + keyout.data = ctx->initiator_sign; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); + if (err) { + dprintk("%s: Error %d deriving initiator_sign key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + /* acceptor sign checksum */ + set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM); + keyout.data = ctx->acceptor_sign; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); + if (err) { + dprintk("%s: Error %d deriving acceptor_sign key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + /* initiator seal integrity */ + set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY); + keyout.data = ctx->initiator_integ; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); + if (err) { + dprintk("%s: Error %d deriving initiator_integ key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + /* acceptor seal integrity */ + set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY); + keyout.data = ctx->acceptor_integ; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); + if (err) { + dprintk("%s: Error %d deriving acceptor_integ key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + switch (ctx->enctype) { + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + ctx->initiator_enc_aux = + context_v2_alloc_cipher(ctx, "cbc(aes)", + ctx->initiator_seal); + if (ctx->initiator_enc_aux == NULL) + goto out_free_acceptor_enc; + ctx->acceptor_enc_aux = + context_v2_alloc_cipher(ctx, "cbc(aes)", + ctx->acceptor_seal); + if (ctx->acceptor_enc_aux == NULL) { + crypto_free_blkcipher(ctx->initiator_enc_aux); + goto out_free_acceptor_enc; + } + } + + return 0; + +out_free_acceptor_enc: + crypto_free_blkcipher(ctx->acceptor_enc); +out_free_initiator_enc: + crypto_free_blkcipher(ctx->initiator_enc); +out_err: + return -EINVAL; +} + +static int +gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, + gfp_t gfp_mask) +{ + int keylen; + + p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); + if (IS_ERR(p)) + goto out_err; + ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR; + + p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); + if (IS_ERR(p)) + goto out_err; + p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); + if (IS_ERR(p)) + goto out_err; + /* set seq_send for use by "older" enctypes */ + ctx->seq_send = ctx->seq_send64; + if (ctx->seq_send64 != ctx->seq_send) { + dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, + (long unsigned)ctx->seq_send64, ctx->seq_send); + goto out_err; + } + p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); + if (IS_ERR(p)) + goto out_err; + /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */ + if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1) + ctx->enctype = ENCTYPE_DES3_CBC_RAW; + ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); + if (ctx->gk5e == NULL) { + dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n", + ctx->enctype); + p = ERR_PTR(-EINVAL); + goto out_err; + } + keylen = ctx->gk5e->keylength; + + p = simple_get_bytes(p, end, ctx->Ksess, keylen); + if (IS_ERR(p)) + goto out_err; + + if (p != end) { + p = ERR_PTR(-EINVAL); + goto out_err; + } + + ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data, + gss_kerberos_mech.gm_oid.len, gfp_mask); + if (unlikely(ctx->mech_used.data == NULL)) { + p = ERR_PTR(-ENOMEM); + goto out_err; + } + ctx->mech_used.len = gss_kerberos_mech.gm_oid.len; + + switch (ctx->enctype) { + case ENCTYPE_DES3_CBC_RAW: + return context_derive_keys_des3(ctx, gfp_mask); + case ENCTYPE_ARCFOUR_HMAC: + return context_derive_keys_rc4(ctx); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return context_derive_keys_new(ctx, gfp_mask); + default: + return -EINVAL; + } + +out_err: + return PTR_ERR(p); +} + +static int +gss_import_sec_context_kerberos(const void *p, size_t len, + struct gss_ctx *ctx_id, + gfp_t gfp_mask) +{ + const void *end = (const void *)((const char *)p + len); + struct krb5_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), gfp_mask); + if (ctx == NULL) + return -ENOMEM; + + if (len == 85) + ret = gss_import_v1_context(p, end, ctx); + else + ret = gss_import_v2_context(p, end, ctx, gfp_mask); + + if (ret == 0) + ctx_id->internal_ctx_id = ctx; + else + kfree(ctx); + + dprintk("RPC: %s: returning %d\n", __func__, ret); + return ret; +} + static void gss_delete_sec_context_kerberos(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; crypto_free_blkcipher(kctx->seq); crypto_free_blkcipher(kctx->enc); + crypto_free_blkcipher(kctx->acceptor_enc); + crypto_free_blkcipher(kctx->initiator_enc); + crypto_free_blkcipher(kctx->acceptor_enc_aux); + crypto_free_blkcipher(kctx->initiator_enc_aux); kfree(kctx->mech_used.data); kfree(kctx); } @@ -241,6 +744,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, + .gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ", }; static int __init init_kerberos_module(void) diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 88fe6e7..d7941ea 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -3,7 +3,7 @@ * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c * - * Copyright (c) 2000 The Regents of the University of Michigan. + * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> @@ -70,53 +70,154 @@ DEFINE_SPINLOCK(krb5_seq_lock); -u32 -gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, +static char * +setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) +{ + __be16 *ptr, *krb5_hdr; + int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; + + token->len = g_token_size(&ctx->mech_used, body_size); + + ptr = (__be16 *)token->data; + g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr); + + /* ptr now at start of header described in rfc 1964, section 1.2.1: */ + krb5_hdr = ptr; + *ptr++ = KG_TOK_MIC_MSG; + *ptr++ = cpu_to_le16(ctx->gk5e->signalg); + *ptr++ = SEAL_ALG_NONE; + *ptr++ = 0xffff; + + return (char *)krb5_hdr; +} + +static void * +setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) +{ + __be16 *ptr, *krb5_hdr; + u8 *p, flags = 0x00; + + if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) + flags |= 0x01; + if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) + flags |= 0x04; + + /* Per rfc 4121, sec 4.2.6.1, there is no header, + * just start the token */ + krb5_hdr = ptr = (__be16 *)token->data; + + *ptr++ = KG2_TOK_MIC; + p = (u8 *)ptr; + *p++ = flags; + *p++ = 0xff; + ptr = (__be16 *)p; + *ptr++ = 0xffff; + *ptr++ = 0xffff; + + token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; + return krb5_hdr; +} + +static u32 +gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) { - struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; - unsigned char *ptr, *msg_start; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; + void *ptr; s32 now; u32 seq_send; + u8 *cksumkey; - dprintk("RPC: gss_krb5_seal\n"); + dprintk("RPC: %s\n", __func__); BUG_ON(ctx == NULL); now = get_seconds(); - token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8); + ptr = setup_token(ctx, token); - ptr = token->data; - g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr); + if (ctx->gk5e->keyed_cksum) + cksumkey = ctx->cksum; + else + cksumkey = NULL; - /* ptr now at header described in rfc 1964, section 1.2.1: */ - ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff); - ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff); + if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, + KG_USAGE_SIGN, &md5cksum)) + return GSS_S_FAILURE; - msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8; + memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); - *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); - memset(ptr + 4, 0xff, 4); + spin_lock(&krb5_seq_lock); + seq_send = ctx->seq_send++; + spin_unlock(&krb5_seq_lock); - if (make_checksum("md5", ptr, 8, text, 0, &md5cksum)) + if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, + seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) return GSS_S_FAILURE; - if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len)) - return GSS_S_FAILURE; + return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; +} + +u32 +gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, + struct xdr_netobj *token) +{ + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj cksumobj = { .len = sizeof(cksumdata), + .data = cksumdata}; + void *krb5_hdr; + s32 now; + u64 seq_send; + u8 *cksumkey; + unsigned int cksum_usage; - memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); + dprintk("RPC: %s\n", __func__); + krb5_hdr = setup_token_v2(ctx, token); + + /* Set up the sequence number. Now 64-bits in clear + * text and w/o direction indicator */ spin_lock(&krb5_seq_lock); - seq_send = ctx->seq_send++; + seq_send = ctx->seq_send64++; spin_unlock(&krb5_seq_lock); - - if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, - seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, - ptr + 8)) + *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send); + + if (ctx->initiate) { + cksumkey = ctx->initiator_sign; + cksum_usage = KG_USAGE_INITIATOR_SIGN; + } else { + cksumkey = ctx->acceptor_sign; + cksum_usage = KG_USAGE_ACCEPTOR_SIGN; + } + + if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN, + text, 0, cksumkey, cksum_usage, &cksumobj)) return GSS_S_FAILURE; + memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len); + + now = get_seconds(); + return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } + +u32 +gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, + struct xdr_netobj *token) +{ + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; + + switch (ctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: + return gss_get_mic_v1(ctx, text, token); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_get_mic_v2(ctx, text, token); + } +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 6331cd6..415c013 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -39,14 +39,51 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif +static s32 +krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, + unsigned char *cksum, unsigned char *buf) +{ + struct crypto_blkcipher *cipher; + unsigned char plain[8]; + s32 code; + + dprintk("RPC: %s:\n", __func__); + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); + plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); + plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); + plain[3] = (unsigned char) ((seqnum >> 0) & 0xff); + plain[4] = direction; + plain[5] = direction; + plain[6] = direction; + plain[7] = direction; + + code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); + if (code) + goto out; + + code = krb5_encrypt(cipher, cksum, plain, buf, 8); +out: + crypto_free_blkcipher(cipher); + return code; +} s32 -krb5_make_seq_num(struct crypto_blkcipher *key, +krb5_make_seq_num(struct krb5_ctx *kctx, + struct crypto_blkcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf) { unsigned char plain[8]; + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) + return krb5_make_rc4_seq_num(kctx, direction, seqnum, + cksum, buf); + plain[0] = (unsigned char) (seqnum & 0xff); plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); @@ -60,17 +97,59 @@ krb5_make_seq_num(struct crypto_blkcipher *key, return krb5_encrypt(key, cksum, plain, buf, 8); } +static s32 +krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, + unsigned char *buf, int *direction, s32 *seqnum) +{ + struct crypto_blkcipher *cipher; + unsigned char plain[8]; + s32 code; + + dprintk("RPC: %s:\n", __func__); + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); + if (code) + goto out; + + code = krb5_decrypt(cipher, cksum, buf, plain, 8); + if (code) + goto out; + + if ((plain[4] != plain[5]) || (plain[4] != plain[6]) + || (plain[4] != plain[7])) { + code = (s32)KG_BAD_SEQ; + goto out; + } + + *direction = plain[4]; + + *seqnum = ((plain[0] << 24) | (plain[1] << 16) | + (plain[2] << 8) | (plain[3])); +out: + crypto_free_blkcipher(cipher); + return code; +} + s32 -krb5_get_seq_num(struct crypto_blkcipher *key, +krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, u32 *seqnum) { s32 code; unsigned char plain[8]; + struct crypto_blkcipher *key = kctx->seq; dprintk("RPC: krb5_get_seq_num:\n"); + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) + return krb5_get_rc4_seq_num(kctx, cksum, buf, + direction, seqnum); + if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) return code; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index ce6c247..6cd930f 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -3,7 +3,7 @@ * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c * - * Copyright (c) 2000 The Regents of the University of Michigan. + * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> @@ -70,20 +70,21 @@ /* read_token is a mic token, and message_buffer is the data that the mic was * supposedly taken over. */ -u32 -gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, +static u32 +gss_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer, struct xdr_netobj *read_token) { - struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; int signalg; int sealalg; - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; s32 now; int direction; u32 seqnum; unsigned char *ptr = (unsigned char *)read_token->data; int bodysize; + u8 *cksumkey; dprintk("RPC: krb5_read_token\n"); @@ -98,7 +99,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, /* XXX sanity-check bodysize?? */ signalg = ptr[2] + (ptr[3] << 8); - if (signalg != SGN_ALG_DES_MAC_MD5) + if (signalg != ctx->gk5e->signalg) return GSS_S_DEFECTIVE_TOKEN; sealalg = ptr[4] + (ptr[5] << 8); @@ -108,13 +109,17 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) return GSS_S_DEFECTIVE_TOKEN; - if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum)) - return GSS_S_FAILURE; + if (ctx->gk5e->keyed_cksum) + cksumkey = ctx->cksum; + else + cksumkey = NULL; - if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) + if (make_checksum(ctx, ptr, 8, message_buffer, 0, + cksumkey, KG_USAGE_SIGN, &md5cksum)) return GSS_S_FAILURE; - if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) + if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, + ctx->gk5e->cksumlength)) return GSS_S_BAD_SIG; /* it got through unscathed. Make sure the context is unexpired */ @@ -126,7 +131,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, /* do sequencing checks */ - if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum)) + if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, + &direction, &seqnum)) return GSS_S_FAILURE; if ((ctx->initiate && direction != 0xff) || @@ -135,3 +141,86 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, return GSS_S_COMPLETE; } + +static u32 +gss_verify_mic_v2(struct krb5_ctx *ctx, + struct xdr_buf *message_buffer, struct xdr_netobj *read_token) +{ + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj cksumobj = {.len = sizeof(cksumdata), + .data = cksumdata}; + s32 now; + u64 seqnum; + u8 *ptr = read_token->data; + u8 *cksumkey; + u8 flags; + int i; + unsigned int cksum_usage; + + dprintk("RPC: %s\n", __func__); + + if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_MIC) + return GSS_S_DEFECTIVE_TOKEN; + + flags = ptr[2]; + if ((!ctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || + (ctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) + return GSS_S_BAD_SIG; + + if (flags & KG2_TOKEN_FLAG_SEALED) { + dprintk("%s: token has unexpected sealed flag\n", __func__); + return GSS_S_FAILURE; + } + + for (i = 3; i < 8; i++) + if (ptr[i] != 0xff) + return GSS_S_DEFECTIVE_TOKEN; + + if (ctx->initiate) { + cksumkey = ctx->acceptor_sign; + cksum_usage = KG_USAGE_ACCEPTOR_SIGN; + } else { + cksumkey = ctx->initiator_sign; + cksum_usage = KG_USAGE_INITIATOR_SIGN; + } + + if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0, + cksumkey, cksum_usage, &cksumobj)) + return GSS_S_FAILURE; + + if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN, + ctx->gk5e->cksumlength)) + return GSS_S_BAD_SIG; + + /* it got through unscathed. Make sure the context is unexpired */ + now = get_seconds(); + if (now > ctx->endtime) + return GSS_S_CONTEXT_EXPIRED; + + /* do sequencing checks */ + + seqnum = be64_to_cpup((__be64 *)ptr + 8); + + return GSS_S_COMPLETE; +} + +u32 +gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, + struct xdr_buf *message_buffer, + struct xdr_netobj *read_token) +{ + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; + + switch (ctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: + return gss_verify_mic_v1(ctx, message_buffer, read_token); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_verify_mic_v2(ctx, message_buffer, read_token); + } +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index a6e9056..2763e3e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -1,3 +1,33 @@ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + #include <linux/types.h> #include <linux/jiffies.h> #include <linux/sunrpc/gss_krb5.h> @@ -12,10 +42,7 @@ static inline int gss_krb5_padding(int blocksize, int length) { - /* Most of the code is block-size independent but currently we - * use only 8: */ - BUG_ON(blocksize != 8); - return 8 - (length & 7); + return blocksize - (length % blocksize); } static inline void @@ -86,8 +113,8 @@ out: return 0; } -static void -make_confounder(char *p, u32 conflen) +void +gss_krb5_make_confounder(char *p, u32 conflen) { static u64 i = 0; u64 *q = (u64 *)p; @@ -127,69 +154,73 @@ make_confounder(char *p, u32 conflen) /* XXX factor out common code with seal/unseal. */ -u32 -gss_wrap_kerberos(struct gss_ctx *ctx, int offset, +static u32 +gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf, struct page **pages) { - struct krb5_ctx *kctx = ctx->internal_ctx_id; - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; int blocksize = 0, plainlen; unsigned char *ptr, *msg_start; s32 now; int headlen; struct page **tmp_pages; u32 seq_send; + u8 *cksumkey; + u32 conflen = kctx->gk5e->conflen; - dprintk("RPC: gss_wrap_kerberos\n"); + dprintk("RPC: %s\n", __func__); now = get_seconds(); blocksize = crypto_blkcipher_blocksize(kctx->enc); gss_krb5_add_padding(buf, offset, blocksize); BUG_ON((buf->len - offset) % blocksize); - plainlen = blocksize + buf->len - offset; + plainlen = conflen + buf->len - offset; - headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - - (buf->len - offset); + headlen = g_token_size(&kctx->mech_used, + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - + (buf->len - offset); ptr = buf->head[0].iov_base + offset; /* shift data to make room for header. */ + xdr_extend_head(buf, offset, headlen); + /* XXX Would be cleverer to encrypt while copying. */ - /* XXX bounds checking, slack, etc. */ - memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); - buf->head[0].iov_len += headlen; - buf->len += headlen; BUG_ON((buf->len - offset - headlen) % blocksize); g_make_token_header(&kctx->mech_used, - GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); + GSS_KRB5_TOK_HDR_LEN + + kctx->gk5e->cksumlength + plainlen, &ptr); /* ptr now at header described in rfc 1964, section 1.2.1: */ ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); - msg_start = ptr + 24; + msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; - *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); + *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); memset(ptr + 4, 0xff, 4); - *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); + *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); - make_confounder(msg_start, blocksize); + gss_krb5_make_confounder(msg_start, conflen); + + if (kctx->gk5e->keyed_cksum) + cksumkey = kctx->cksum; + else + cksumkey = NULL; /* XXXJBF: UGH!: */ tmp_pages = buf->pages; buf->pages = pages; - if (make_checksum("md5", ptr, 8, buf, - offset + headlen - blocksize, &md5cksum)) + if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen, + cksumkey, KG_USAGE_SEAL, &md5cksum)) return GSS_S_FAILURE; buf->pages = tmp_pages; - if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len)) - return GSS_S_FAILURE; - memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); + memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); spin_lock(&krb5_seq_lock); seq_send = kctx->seq_send++; @@ -197,25 +228,42 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, /* XXX would probably be more efficient to compute checksum * and encrypt at the same time: */ - if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, + if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) return GSS_S_FAILURE; - if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, - pages)) - return GSS_S_FAILURE; + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { + struct crypto_blkcipher *cipher; + int err; + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return GSS_S_FAILURE; + + krb5_rc4_setup_enc_key(kctx, cipher, seq_send); + + err = gss_encrypt_xdr_buf(cipher, buf, + offset + headlen - conflen, pages); + crypto_free_blkcipher(cipher); + if (err) + return GSS_S_FAILURE; + } else { + if (gss_encrypt_xdr_buf(kctx->enc, buf, + offset + headlen - conflen, pages)) + return GSS_S_FAILURE; + } return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } -u32 -gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) +static u32 +gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) { - struct krb5_ctx *kctx = ctx->internal_ctx_id; int signalg; int sealalg; - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; s32 now; int direction; s32 seqnum; @@ -224,6 +272,9 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) void *data_start, *orig_start; int data_len; int blocksize; + u32 conflen = kctx->gk5e->conflen; + int crypt_offset; + u8 *cksumkey; dprintk("RPC: gss_unwrap_kerberos\n"); @@ -241,29 +292,65 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) /* get the sign and seal algorithms */ signalg = ptr[2] + (ptr[3] << 8); - if (signalg != SGN_ALG_DES_MAC_MD5) + if (signalg != kctx->gk5e->signalg) return GSS_S_DEFECTIVE_TOKEN; sealalg = ptr[4] + (ptr[5] << 8); - if (sealalg != SEAL_ALG_DES) + if (sealalg != kctx->gk5e->sealalg) return GSS_S_DEFECTIVE_TOKEN; if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) return GSS_S_DEFECTIVE_TOKEN; - if (gss_decrypt_xdr_buf(kctx->enc, buf, - ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) - return GSS_S_DEFECTIVE_TOKEN; + /* + * Data starts after token header and checksum. ptr points + * to the beginning of the token header + */ + crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - + (unsigned char *)buf->head[0].iov_base; + + /* + * Need plaintext seqnum to derive encryption key for arcfour-hmac + */ + if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, + ptr + 8, &direction, &seqnum)) + return GSS_S_BAD_SIG; - if (make_checksum("md5", ptr, 8, buf, - ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) - return GSS_S_FAILURE; + if ((kctx->initiate && direction != 0xff) || + (!kctx->initiate && direction != 0)) + return GSS_S_BAD_SIG; + + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { + struct crypto_blkcipher *cipher; + int err; + + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return GSS_S_FAILURE; + + krb5_rc4_setup_enc_key(kctx, cipher, seqnum); - if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len)) + err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); + crypto_free_blkcipher(cipher); + if (err) + return GSS_S_DEFECTIVE_TOKEN; + } else { + if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) + return GSS_S_DEFECTIVE_TOKEN; + } + + if (kctx->gk5e->keyed_cksum) + cksumkey = kctx->cksum; + else + cksumkey = NULL; + + if (make_checksum(kctx, ptr, 8, buf, crypt_offset, + cksumkey, KG_USAGE_SEAL, &md5cksum)) return GSS_S_FAILURE; - if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) + if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, + kctx->gk5e->cksumlength)) return GSS_S_BAD_SIG; /* it got through unscathed. Make sure the context is unexpired */ @@ -275,19 +362,12 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) /* do sequencing checks */ - if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, - &direction, &seqnum)) - return GSS_S_BAD_SIG; - - if ((kctx->initiate && direction != 0xff) || - (!kctx->initiate && direction != 0)) - return GSS_S_BAD_SIG; - /* Copy the data back to the right position. XXX: Would probably be * better to copy and encrypt at the same time. */ blocksize = crypto_blkcipher_blocksize(kctx->enc); - data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; + data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + + conflen; orig_start = buf->head[0].iov_base + offset; data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; memmove(orig_start, data_start, data_len); @@ -299,3 +379,209 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) return GSS_S_COMPLETE; } + +/* + * We cannot currently handle tokens with rotated data. We need a + * generalized routine to rotate the data in place. It is anticipated + * that we won't encounter rotated data in the general case. + */ +static u32 +rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) +{ + unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); + + if (realrrc == 0) + return 0; + + dprintk("%s: cannot process token with rotated data: " + "rrc %u, realrrc %u\n", __func__, rrc, realrrc); + return 1; +} + +static u32 +gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, struct page **pages) +{ + int blocksize; + u8 *ptr, *plainhdr; + s32 now; + u8 flags = 0x00; + __be16 *be16ptr, ec = 0; + __be64 *be64ptr; + u32 err; + + dprintk("RPC: %s\n", __func__); + + if (kctx->gk5e->encrypt_v2 == NULL) + return GSS_S_FAILURE; + + /* make room for gss token header */ + if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) + return GSS_S_FAILURE; + + /* construct gss token header */ + ptr = plainhdr = buf->head[0].iov_base + offset; + *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); + *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); + + if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) + flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; + if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) + flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; + /* We always do confidentiality in wrap tokens */ + flags |= KG2_TOKEN_FLAG_SEALED; + + *ptr++ = flags; + *ptr++ = 0xff; + be16ptr = (__be16 *)ptr; + + blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc); + *be16ptr++ = cpu_to_be16(ec); + /* "inner" token header always uses 0 for RRC */ + *be16ptr++ = cpu_to_be16(0); + + be64ptr = (__be64 *)be16ptr; + spin_lock(&krb5_seq_lock); + *be64ptr = cpu_to_be64(kctx->seq_send64++); + spin_unlock(&krb5_seq_lock); + + err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages); + if (err) + return err; + + now = get_seconds(); + return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; +} + +static u32 +gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) +{ + s32 now; + u64 seqnum; + u8 *ptr; + u8 flags = 0x00; + u16 ec, rrc; + int err; + u32 headskip, tailskip; + u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; + unsigned int movelen; + + + dprintk("RPC: %s\n", __func__); + + if (kctx->gk5e->decrypt_v2 == NULL) + return GSS_S_FAILURE; + + ptr = buf->head[0].iov_base + offset; + + if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) + return GSS_S_DEFECTIVE_TOKEN; + + flags = ptr[2]; + if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || + (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) + return GSS_S_BAD_SIG; + + if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { + dprintk("%s: token missing expected sealed flag\n", __func__); + return GSS_S_DEFECTIVE_TOKEN; + } + + if (ptr[3] != 0xff) + return GSS_S_DEFECTIVE_TOKEN; + + ec = be16_to_cpup((__be16 *)(ptr + 4)); + rrc = be16_to_cpup((__be16 *)(ptr + 6)); + + seqnum = be64_to_cpup((__be64 *)(ptr + 8)); + + if (rrc != 0) { + err = rotate_left(kctx, offset, buf, rrc); + if (err) + return GSS_S_FAILURE; + } + + err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, + &headskip, &tailskip); + if (err) + return GSS_S_FAILURE; + + /* + * Retrieve the decrypted gss token header and verify + * it against the original + */ + err = read_bytes_from_xdr_buf(buf, + buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, + decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); + if (err) { + dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); + return GSS_S_FAILURE; + } + if (memcmp(ptr, decrypted_hdr, 6) + || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { + dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); + return GSS_S_FAILURE; + } + + /* do sequencing checks */ + + /* it got through unscathed. Make sure the context is unexpired */ + now = get_seconds(); + if (now > kctx->endtime) + return GSS_S_CONTEXT_EXPIRED; + + /* + * Move the head data back to the right position in xdr_buf. + * We ignore any "ec" data since it might be in the head or + * the tail, and we really don't need to deal with it. + * Note that buf->head[0].iov_len may indicate the available + * head buffer space rather than that actually occupied. + */ + movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); + movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; + BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > + buf->head[0].iov_len); + memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); + buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; + buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; + + return GSS_S_COMPLETE; +} + +u32 +gss_wrap_kerberos(struct gss_ctx *gctx, int offset, + struct xdr_buf *buf, struct page **pages) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + + switch (kctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: + return gss_wrap_kerberos_v1(kctx, offset, buf, pages); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_wrap_kerberos_v2(kctx, offset, buf, pages); + } +} + +u32 +gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + + switch (kctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: + return gss_unwrap_kerberos_v1(kctx, offset, buf); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_unwrap_kerberos_v2(kctx, offset, buf); + } +} + diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 76e4c6f..2689de3 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -249,14 +249,15 @@ EXPORT_SYMBOL_GPL(gss_mech_put); int gss_import_sec_context(const void *input_token, size_t bufsize, struct gss_api_mech *mech, - struct gss_ctx **ctx_id) + struct gss_ctx **ctx_id, + gfp_t gfp_mask) { - if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) + if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask))) return -ENOMEM; (*ctx_id)->mech_type = gss_mech_get(mech); return mech->gm_ops - ->gss_import_sec_context(input_token, bufsize, *ctx_id); + ->gss_import_sec_context(input_token, bufsize, *ctx_id, gfp_mask); } /* gss_get_mic: compute a mic over message and return mic_token. */ @@ -285,6 +286,20 @@ gss_verify_mic(struct gss_ctx *context_handle, mic_token); } +/* + * This function is called from both the client and server code. + * Each makes guarantees about how much "slack" space is available + * for the underlying function in "buf"'s head and tail while + * performing the wrap. + * + * The client and server code allocate RPC_MAX_AUTH_SIZE extra + * space in both the head and tail which is available for use by + * the wrap function. + * + * Underlying functions should verify they do not use more than + * RPC_MAX_AUTH_SIZE of extra space in either the head or tail + * when performing the wrap. + */ u32 gss_wrap(struct gss_ctx *ctx_id, int offset, diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 035e1dd..dc3f1f5 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -84,13 +84,14 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) static int gss_import_sec_context_spkm3(const void *p, size_t len, - struct gss_ctx *ctx_id) + struct gss_ctx *ctx_id, + gfp_t gfp_mask) { const void *end = (const void *)((const char *)p + len); struct spkm3_ctx *ctx; int version; - if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) + if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask))) goto out_err; p = simple_get_bytes(p, end, &version, sizeof(version)); diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index b81e790..cc385b3 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -494,7 +494,7 @@ static int rsc_parse(struct cache_detail *cd, len = qword_get(&mesg, buf, mlen); if (len < 0) goto out; - status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); + status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, GFP_KERNEL); if (status) goto out; @@ -1315,6 +1315,14 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp) inpages = resbuf->pages; /* XXX: Would be better to write some xdr helper functions for * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ + + /* + * If there is currently tail data, make sure there is + * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in + * the page, and move the current tail data such that + * there is RPC_MAX_AUTH_SIZE slack space available in + * both the head and tail. + */ if (resbuf->tail[0].iov_base) { BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base + PAGE_SIZE); @@ -1327,6 +1335,13 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp) resbuf->tail[0].iov_len); resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; } + /* + * If there is no current tail data, make sure there is + * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the + * allotted page, and set up tail information such that there + * is RPC_MAX_AUTH_SIZE slack space available in both the + * head and tail. + */ if (resbuf->tail[0].iov_base == NULL) { if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) return -ENOMEM; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 39bddba..c2173eb 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -28,6 +28,7 @@ #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/pagemap.h> +#include <linux/smp_lock.h> #include <asm/ioctls.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/cache.h> @@ -49,11 +50,17 @@ static void cache_init(struct cache_head *h) h->last_refresh = now; } +static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) +{ + return (h->expiry_time < get_seconds()) || + (detail->flush_time > h->last_refresh); +} + struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, struct cache_head *key, int hash) { struct cache_head **head, **hp; - struct cache_head *new = NULL; + struct cache_head *new = NULL, *freeme = NULL; head = &detail->hash_table[hash]; @@ -62,6 +69,9 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, for (hp=head; *hp != NULL ; hp = &(*hp)->next) { struct cache_head *tmp = *hp; if (detail->match(tmp, key)) { + if (cache_is_expired(detail, tmp)) + /* This entry is expired, we will discard it. */ + break; cache_get(tmp); read_unlock(&detail->hash_lock); return tmp; @@ -86,6 +96,13 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, for (hp=head; *hp != NULL ; hp = &(*hp)->next) { struct cache_head *tmp = *hp; if (detail->match(tmp, key)) { + if (cache_is_expired(detail, tmp)) { + *hp = tmp->next; + tmp->next = NULL; + detail->entries --; + freeme = tmp; + break; + } cache_get(tmp); write_unlock(&detail->hash_lock); cache_put(new, detail); @@ -98,6 +115,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, cache_get(new); write_unlock(&detail->hash_lock); + if (freeme) + cache_put(freeme, detail); return new; } EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); @@ -183,10 +202,7 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) { - if (!test_bit(CACHE_VALID, &h->flags) || - h->expiry_time < get_seconds()) - return -EAGAIN; - else if (detail->flush_time > h->last_refresh) + if (!test_bit(CACHE_VALID, &h->flags)) return -EAGAIN; else { /* entry is valid */ @@ -397,31 +413,27 @@ static int cache_clean(void) /* Ok, now to clean this strand */ cp = & current_detail->hash_table[current_index]; - ch = *cp; - for (; ch; cp= & ch->next, ch= *cp) { + for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) { if (current_detail->nextcheck > ch->expiry_time) current_detail->nextcheck = ch->expiry_time+1; - if (ch->expiry_time >= get_seconds() && - ch->last_refresh >= current_detail->flush_time) + if (!cache_is_expired(current_detail, ch)) continue; - if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) - cache_dequeue(current_detail, ch); - if (atomic_read(&ch->ref.refcount) == 1) - break; - } - if (ch) { *cp = ch->next; ch->next = NULL; current_detail->entries--; rv = 1; + break; } + write_unlock(¤t_detail->hash_lock); d = current_detail; if (!ch) current_index ++; spin_unlock(&cache_list_lock); if (ch) { + if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) + cache_dequeue(current_detail, ch); cache_revisit_request(ch); cache_put(ch, d); } @@ -1233,8 +1245,10 @@ static int content_open(struct inode *inode, struct file *file, if (!cd || !try_module_get(cd->owner)) return -EACCES; han = __seq_open_private(file, &cache_content_op, sizeof(*han)); - if (han == NULL) + if (han == NULL) { + module_put(cd->owner); return -ENOMEM; + } han->cd = cd; return 0; @@ -1331,12 +1345,18 @@ static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) return cache_poll(filp, wait, cd); } -static int cache_ioctl_procfs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static long cache_ioctl_procfs(struct file *filp, + unsigned int cmd, unsigned long arg) { + long ret; + struct inode *inode = filp->f_path.dentry->d_inode; struct cache_detail *cd = PDE(inode)->data; - return cache_ioctl(inode, filp, cmd, arg, cd); + lock_kernel(); + ret = cache_ioctl(inode, filp, cmd, arg, cd); + unlock_kernel(); + + return ret; } static int cache_open_procfs(struct inode *inode, struct file *filp) @@ -1359,7 +1379,7 @@ static const struct file_operations cache_file_operations_procfs = { .read = cache_read_procfs, .write = cache_write_procfs, .poll = cache_poll_procfs, - .ioctl = cache_ioctl_procfs, /* for FIONREAD */ + .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */ .open = cache_open_procfs, .release = cache_release_procfs, }; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 19c9983..756fc32 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -556,26 +556,16 @@ static const struct rpc_call_ops rpc_default_ops = { */ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) { - struct rpc_task *task, *ret; + struct rpc_task *task; task = rpc_new_task(task_setup_data); - if (task == NULL) { - rpc_release_calldata(task_setup_data->callback_ops, - task_setup_data->callback_data); - ret = ERR_PTR(-ENOMEM); + if (IS_ERR(task)) goto out; - } - if (task->tk_status != 0) { - ret = ERR_PTR(task->tk_status); - rpc_put_task(task); - goto out; - } atomic_inc(&task->tk_count); rpc_execute(task); - ret = task; out: - return ret; + return task; } EXPORT_SYMBOL_GPL(rpc_run_task); @@ -657,9 +647,8 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, * Create an rpc_task to send the data */ task = rpc_new_task(&task_setup_data); - if (!task) { + if (IS_ERR(task)) { xprt_free_bc_request(req); - task = ERR_PTR(-ENOMEM); goto out; } task->tk_rqstp = req; @@ -1518,7 +1507,6 @@ call_refreshresult(struct rpc_task *task) task->tk_action = call_refresh; if (status != -ETIMEDOUT) rpc_delay(task, 3*HZ); - return; } static __be32 * diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index aae6907..4a843b8 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -25,7 +25,6 @@ #ifdef RPC_DEBUG #define RPCDBG_FACILITY RPCDBG_SCHED -#define RPC_TASK_MAGIC_ID 0xf00baa #endif /* @@ -237,7 +236,6 @@ static void rpc_task_set_debuginfo(struct rpc_task *task) { static atomic_t rpc_pid; - task->tk_magic = RPC_TASK_MAGIC_ID; task->tk_pid = atomic_inc_return(&rpc_pid); } #else @@ -360,9 +358,6 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", task->tk_pid, jiffies); -#ifdef RPC_DEBUG - BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); -#endif /* Has the task been executed yet? If not, we cannot wake it up! */ if (!RPC_IS_ACTIVATED(task)) { printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); @@ -834,7 +829,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta } /* starting timestamp */ - task->tk_start = jiffies; + task->tk_start = ktime_get(); dprintk("RPC: new task initialized, procpid %u\n", task_pid_nr(current)); @@ -856,16 +851,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) if (task == NULL) { task = rpc_alloc_task(); - if (task == NULL) - goto out; + if (task == NULL) { + rpc_release_calldata(setup_data->callback_ops, + setup_data->callback_data); + return ERR_PTR(-ENOMEM); + } flags = RPC_TASK_DYNAMIC; } rpc_init_task(task, setup_data); + if (task->tk_status < 0) { + int err = task->tk_status; + rpc_put_task(task); + return ERR_PTR(err); + } task->tk_flags |= flags; dprintk("RPC: allocated task %p\n", task); -out: return task; } @@ -909,9 +911,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task); static void rpc_release_task(struct rpc_task *task) { -#ifdef RPC_DEBUG - BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); -#endif dprintk("RPC: %5u release task\n", task->tk_pid); if (!list_empty(&task->tk_task)) { @@ -923,9 +922,6 @@ static void rpc_release_task(struct rpc_task *task) } BUG_ON (RPC_IS_QUEUED(task)); -#ifdef RPC_DEBUG - task->tk_magic = 0; -#endif /* Wake up anyone who is waiting for task completion */ rpc_mark_complete_task(task); diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 5785d20..ea1046f 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -144,7 +144,7 @@ void rpc_count_iostats(struct rpc_task *task) struct rpc_rqst *req = task->tk_rqstp; struct rpc_iostats *stats; struct rpc_iostats *op_metrics; - long rtt, execute, queue; + ktime_t delta; if (!task->tk_client || !task->tk_client->cl_metrics || !req) return; @@ -156,23 +156,16 @@ void rpc_count_iostats(struct rpc_task *task) op_metrics->om_ntrans += req->rq_ntrans; op_metrics->om_timeouts += task->tk_timeouts; - op_metrics->om_bytes_sent += task->tk_bytes_sent; + op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent; op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; - queue = (long)req->rq_xtime - task->tk_start; - if (queue < 0) - queue = -queue; - op_metrics->om_queue += queue; + delta = ktime_sub(req->rq_xtime, task->tk_start); + op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta); - rtt = task->tk_rtt; - if (rtt < 0) - rtt = -rtt; - op_metrics->om_rtt += rtt; + op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); - execute = (long)jiffies - task->tk_start; - if (execute < 0) - execute = -execute; - op_metrics->om_execute += execute; + delta = ktime_sub(ktime_get(), task->tk_start); + op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); } static void _print_name(struct seq_file *seq, unsigned int op, @@ -186,8 +179,6 @@ static void _print_name(struct seq_file *seq, unsigned int op, seq_printf(seq, "\t%12u: ", op); } -#define MILLISECS_PER_JIFFY (1000 / HZ) - void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) { struct rpc_iostats *stats = clnt->cl_metrics; @@ -214,9 +205,9 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) metrics->om_timeouts, metrics->om_bytes_sent, metrics->om_bytes_recv, - metrics->om_queue * MILLISECS_PER_JIFFY, - metrics->om_rtt * MILLISECS_PER_JIFFY, - metrics->om_execute * MILLISECS_PER_JIFFY); + ktime_to_ms(metrics->om_queue), + ktime_to_ms(metrics->om_rtt), + ktime_to_ms(metrics->om_execute)); } } EXPORT_SYMBOL_GPL(rpc_print_iostats); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 061b2e0..cbc0849 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -744,8 +744,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (rqstp->rq_deferred) { svc_xprt_received(xprt); len = svc_deferred_recv(rqstp); - } else + } else { len = xprt->xpt_ops->xpo_recvfrom(rqstp); + svc_xprt_received(xprt); + } dprintk("svc: got len=%d\n", len); } @@ -893,12 +895,12 @@ void svc_delete_xprt(struct svc_xprt *xprt) */ if (test_bit(XPT_TEMP, &xprt->xpt_flags)) serv->sv_tmpcnt--; + spin_unlock_bh(&serv->sv_lock); while ((dr = svc_deferred_dequeue(xprt)) != NULL) kfree(dr); svc_xprt_put(xprt); - spin_unlock_bh(&serv->sv_lock); } void svc_close_xprt(struct svc_xprt *xprt) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index ce0d5b3..7e534dd 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -150,7 +150,6 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) } break; } - return; } /* @@ -547,7 +546,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) dprintk("svc: recvfrom returned error %d\n", -err); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); } - svc_xprt_received(&svsk->sk_xprt); return -EAGAIN; } len = svc_addr_len(svc_addr(rqstp)); @@ -562,11 +560,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) svsk->sk_sk->sk_stamp = skb->tstamp; set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ - /* - * Maybe more packets - kick another thread ASAP. - */ - svc_xprt_received(&svsk->sk_xprt); - len = skb->len - sizeof(struct udphdr); rqstp->rq_arg.len = len; @@ -917,7 +910,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) if (len < want) { dprintk("svc: short recvfrom while reading record " "length (%d of %d)\n", len, want); - svc_xprt_received(&svsk->sk_xprt); goto err_again; /* record header not complete */ } @@ -953,7 +945,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) if (len < svsk->sk_reclen) { dprintk("svc: incomplete TCP record (%d of %d)\n", len, svsk->sk_reclen); - svc_xprt_received(&svsk->sk_xprt); goto err_again; /* record not complete */ } len = svsk->sk_reclen; @@ -961,14 +952,11 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) return len; error: - if (len == -EAGAIN) { + if (len == -EAGAIN) dprintk("RPC: TCP recv_record got EAGAIN\n"); - svc_xprt_received(&svsk->sk_xprt); - } return len; err_delete: set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); - svc_xprt_received(&svsk->sk_xprt); err_again: return -EAGAIN; } @@ -1110,7 +1098,6 @@ out: svsk->sk_tcplen = 0; svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); - svc_xprt_received(&svsk->sk_xprt); if (serv->sv_stats) serv->sv_stats->nettcpcnt++; @@ -1119,7 +1106,6 @@ out: err_again: if (len == -EAGAIN) { dprintk("RPC: TCP recvfrom got EAGAIN\n"); - svc_xprt_received(&svsk->sk_xprt); return len; } error: diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 2763fde..a1f82a8 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -762,6 +762,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un __write_bytes_to_xdr_buf(&subbuf, obj, len); return 0; } +EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); int xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 699ade6..3fc3253 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -43,6 +43,7 @@ #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/net.h> +#include <linux/ktime.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/metrics.h> @@ -62,7 +63,6 @@ * Local functions */ static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); -static inline void do_xprt_reserve(struct rpc_task *); static void xprt_connect_status(struct rpc_task *task); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); @@ -711,12 +711,16 @@ void xprt_connect(struct rpc_task *task) if (task->tk_rqstp) task->tk_rqstp->rq_bytes_sent = 0; - task->tk_timeout = xprt->connect_timeout; + task->tk_timeout = task->tk_rqstp->rq_timeout; rpc_sleep_on(&xprt->pending, task, xprt_connect_status); + + if (test_bit(XPRT_CLOSING, &xprt->state)) + return; + if (xprt_test_and_set_connecting(xprt)) + return; xprt->stat.connect_start = jiffies; xprt->ops->connect(task); } - return; } static void xprt_connect_status(struct rpc_task *task) @@ -771,25 +775,19 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) } EXPORT_SYMBOL_GPL(xprt_lookup_rqst); -/** - * xprt_update_rtt - update an RPC client's RTT state after receiving a reply - * @task: RPC request that recently completed - * - */ -void xprt_update_rtt(struct rpc_task *task) +static void xprt_update_rtt(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_rtt *rtt = task->tk_client->cl_rtt; unsigned timer = task->tk_msg.rpc_proc->p_timer; + long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); if (timer) { if (req->rq_ntrans == 1) - rpc_update_rtt(rtt, timer, - (long)jiffies - req->rq_xtime); + rpc_update_rtt(rtt, timer, m); rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); } } -EXPORT_SYMBOL_GPL(xprt_update_rtt); /** * xprt_complete_rqst - called when reply processing is complete @@ -807,7 +805,9 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) task->tk_pid, ntohl(req->rq_xid), copied); xprt->stat.recvs++; - task->tk_rtt = (long)jiffies - req->rq_xtime; + req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); + if (xprt->ops->timer != NULL) + xprt_update_rtt(task); list_del_init(&req->rq_list); req->rq_private_buf.len = copied; @@ -906,7 +906,7 @@ void xprt_transmit(struct rpc_task *task) return; req->rq_connect_cookie = xprt->connect_cookie; - req->rq_xtime = jiffies; + req->rq_xtime = ktime_get(); status = xprt->ops->send_request(task); if (status != 0) { task->tk_status = status; @@ -935,7 +935,7 @@ void xprt_transmit(struct rpc_task *task) spin_unlock_bh(&xprt->transport_lock); } -static inline void do_xprt_reserve(struct rpc_task *task) +static void xprt_alloc_slot(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -955,6 +955,16 @@ static inline void do_xprt_reserve(struct rpc_task *task) rpc_sleep_on(&xprt->backlog, task, NULL); } +static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) +{ + memset(req, 0, sizeof(*req)); /* mark unused */ + + spin_lock(&xprt->reserve_lock); + list_add(&req->rq_list, &xprt->free); + rpc_wake_up_next(&xprt->backlog); + spin_unlock(&xprt->reserve_lock); +} + /** * xprt_reserve - allocate an RPC request slot * @task: RPC task requesting a slot allocation @@ -968,7 +978,7 @@ void xprt_reserve(struct rpc_task *task) task->tk_status = -EIO; spin_lock(&xprt->reserve_lock); - do_xprt_reserve(task); + xprt_alloc_slot(task); spin_unlock(&xprt->reserve_lock); } @@ -1006,14 +1016,10 @@ void xprt_release(struct rpc_task *task) { struct rpc_xprt *xprt; struct rpc_rqst *req; - int is_bc_request; if (!(req = task->tk_rqstp)) return; - /* Preallocated backchannel request? */ - is_bc_request = bc_prealloc(req); - xprt = req->rq_xprt; rpc_count_iostats(task); spin_lock_bh(&xprt->transport_lock); @@ -1027,21 +1033,16 @@ void xprt_release(struct rpc_task *task) mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); spin_unlock_bh(&xprt->transport_lock); - if (!bc_prealloc(req)) + if (req->rq_buffer) xprt->ops->buf_free(req->rq_buffer); task->tk_rqstp = NULL; if (req->rq_release_snd_buf) req->rq_release_snd_buf(req); dprintk("RPC: %5u release request %p\n", task->tk_pid, req); - if (likely(!is_bc_request)) { - memset(req, 0, sizeof(*req)); /* mark unused */ - - spin_lock(&xprt->reserve_lock); - list_add(&req->rq_list, &xprt->free); - rpc_wake_up_next(&xprt->backlog); - spin_unlock(&xprt->reserve_lock); - } else + if (likely(!bc_prealloc(req))) + xprt_free_slot(xprt, req); + else xprt_free_bc_request(req); } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index f92e37e..0194de8 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -566,7 +566,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp, ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, rqstp->rq_arg.head[0].iov_len); - svc_xprt_received(rqstp->rq_xprt); return ret; } @@ -665,7 +664,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) rqstp->rq_arg.head[0].iov_len); rqstp->rq_prot = IPPROTO_MAX; svc_xprt_copy_addrs(rqstp, xprt); - svc_xprt_received(xprt); return ret; close_out: @@ -678,6 +676,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) */ set_bit(XPT_CLOSE, &xprt->xpt_flags); defer: - svc_xprt_received(xprt); return 0; } diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 187257b..a85e866 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -305,7 +305,6 @@ xprt_setup_rdma(struct xprt_create *args) /* 60 second timeout, no retries */ xprt->timeout = &xprt_rdma_default_timeout; xprt->bind_timeout = (60U * HZ); - xprt->connect_timeout = (60U * HZ); xprt->reestablish_timeout = (5U * HZ); xprt->idle_timeout = (5U * 60 * HZ); @@ -449,21 +448,19 @@ xprt_rdma_connect(struct rpc_task *task) struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); - if (!xprt_test_and_set_connecting(xprt)) { - if (r_xprt->rx_ep.rep_connected != 0) { - /* Reconnect */ - schedule_delayed_work(&r_xprt->rdma_connect, - xprt->reestablish_timeout); - xprt->reestablish_timeout <<= 1; - if (xprt->reestablish_timeout > (30 * HZ)) - xprt->reestablish_timeout = (30 * HZ); - else if (xprt->reestablish_timeout < (5 * HZ)) - xprt->reestablish_timeout = (5 * HZ); - } else { - schedule_delayed_work(&r_xprt->rdma_connect, 0); - if (!RPC_IS_ASYNC(task)) - flush_scheduled_work(); - } + if (r_xprt->rx_ep.rep_connected != 0) { + /* Reconnect */ + schedule_delayed_work(&r_xprt->rdma_connect, + xprt->reestablish_timeout); + xprt->reestablish_timeout <<= 1; + if (xprt->reestablish_timeout > (30 * HZ)) + xprt->reestablish_timeout = (30 * HZ); + else if (xprt->reestablish_timeout < (5 * HZ)) + xprt->reestablish_timeout = (5 * HZ); + } else { + schedule_delayed_work(&r_xprt->rdma_connect, 0); + if (!RPC_IS_ASYNC(task)) + flush_scheduled_work(); } } @@ -677,7 +674,7 @@ xprt_rdma_send_request(struct rpc_task *task) if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) goto drop_connection; - task->tk_bytes_sent += rqst->rq_snd_buf.len; + rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; rqst->rq_bytes_sent = 0; return 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9847c30..b7cd8cc 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -138,20 +138,6 @@ static ctl_table sunrpc_table[] = { #endif /* - * Time out for an RPC UDP socket connect. UDP socket connects are - * synchronous, but we set a timeout anyway in case of resource - * exhaustion on the local host. - */ -#define XS_UDP_CONN_TO (5U * HZ) - -/* - * Wait duration for an RPC TCP connection to be established. Solaris - * NFS over TCP uses 60 seconds, for example, which is in line with how - * long a server takes to reboot. - */ -#define XS_TCP_CONN_TO (60U * HZ) - -/* * Wait duration for a reply from the RPC portmapper. */ #define XS_BIND_TO (60U * HZ) @@ -542,7 +528,7 @@ static int xs_udp_send_request(struct rpc_task *task) xdr->len - req->rq_bytes_sent, status); if (status >= 0) { - task->tk_bytes_sent += status; + req->rq_xmit_bytes_sent += status; if (status >= req->rq_slen) return 0; /* Still some bytes left; set up for a retry later. */ @@ -638,7 +624,7 @@ static int xs_tcp_send_request(struct rpc_task *task) /* If we've sent the entire packet, immediately * reset the count of bytes sent. */ req->rq_bytes_sent += status; - task->tk_bytes_sent += status; + req->rq_xmit_bytes_sent += status; if (likely(req->rq_bytes_sent >= req->rq_slen)) { req->rq_bytes_sent = 0; return 0; @@ -858,7 +844,6 @@ static void xs_udp_data_ready(struct sock *sk, int len) dst_confirm(skb_dst(skb)); xprt_adjust_cwnd(task, copied); - xprt_update_rtt(task); xprt_complete_rqst(task, copied); out_unlock: @@ -1050,8 +1035,6 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt, if (transport->tcp_flags & TCP_RCV_LAST_FRAG) transport->tcp_flags &= ~TCP_RCV_COPY_DATA; } - - return; } /* @@ -2016,9 +1999,6 @@ static void xs_connect(struct rpc_task *task) struct rpc_xprt *xprt = task->tk_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); - if (xprt_test_and_set_connecting(xprt)) - return; - if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { dprintk("RPC: xs_connect delayed xprt %p for %lu " "seconds\n", @@ -2038,16 +2018,6 @@ static void xs_connect(struct rpc_task *task) } } -static void xs_tcp_connect(struct rpc_task *task) -{ - struct rpc_xprt *xprt = task->tk_xprt; - - /* Exit if we need to wait for socket shutdown to complete */ - if (test_bit(XPRT_CLOSING, &xprt->state)) - return; - xs_connect(task); -} - /** * xs_udp_print_stats - display UDP socket-specifc stats * @xprt: rpc_xprt struct containing statistics @@ -2210,7 +2180,6 @@ static int bc_send_request(struct rpc_task *task) static void bc_close(struct rpc_xprt *xprt) { - return; } /* @@ -2220,7 +2189,6 @@ static void bc_close(struct rpc_xprt *xprt) static void bc_destroy(struct rpc_xprt *xprt) { - return; } static struct rpc_xprt_ops xs_udp_ops = { @@ -2246,7 +2214,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { .release_xprt = xs_tcp_release_xprt, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, - .connect = xs_tcp_connect, + .connect = xs_connect, .buf_alloc = rpc_malloc, .buf_free = rpc_free, .send_request = xs_tcp_send_request, @@ -2337,7 +2305,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); xprt->bind_timeout = XS_BIND_TO; - xprt->connect_timeout = XS_UDP_CONN_TO; xprt->reestablish_timeout = XS_UDP_REEST_TO; xprt->idle_timeout = XS_IDLE_DISC_TO; @@ -2412,7 +2379,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; xprt->bind_timeout = XS_BIND_TO; - xprt->connect_timeout = XS_TCP_CONN_TO; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; xprt->idle_timeout = XS_IDLE_DISC_TO; @@ -2472,9 +2438,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) struct sock_xprt *transport; struct svc_sock *bc_sock; - if (!args->bc_xprt) - ERR_PTR(-EINVAL); - xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); if (IS_ERR(xprt)) return xprt; @@ -2488,7 +2451,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) /* backchannel */ xprt_set_bound(xprt); xprt->bind_timeout = 0; - xprt->connect_timeout = 0; xprt->reestablish_timeout = 0; xprt->idle_timeout = 0; diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 5319600..ca84212 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -82,7 +82,6 @@ static int __net_init sysctl_net_init(struct net *net) static void __net_exit sysctl_net_exit(struct net *net) { WARN_ON(!list_empty(&net->sysctls.list)); - return; } static struct pernet_operations sysctl_pernet_ops = { diff --git a/net/tipc/addr.c b/net/tipc/addr.c index e5207a1..c048543 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c @@ -92,3 +92,35 @@ int tipc_addr_node_valid(u32 addr) return (tipc_addr_domain_valid(addr) && tipc_node(addr)); } +int tipc_in_scope(u32 domain, u32 addr) +{ + if (!domain || (domain == addr)) + return 1; + if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */ + return 1; + if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */ + return 1; + return 0; +} + +/** + * tipc_addr_scope - convert message lookup domain to a 2-bit scope value + */ + +int tipc_addr_scope(u32 domain) +{ + if (likely(!domain)) + return TIPC_ZONE_SCOPE; + if (tipc_node(domain)) + return TIPC_NODE_SCOPE; + if (tipc_cluster(domain)) + return TIPC_CLUSTER_SCOPE; + return TIPC_ZONE_SCOPE; +} + +char *tipc_addr_string_fill(char *string, u32 addr) +{ + snprintf(string, 16, "<%u.%u.%u>", + tipc_zone(addr), tipc_cluster(addr), tipc_node(addr)); + return string; +} diff --git a/net/tipc/addr.h b/net/tipc/addr.h index 3ba67e6..c1cc572 100644 --- a/net/tipc/addr.h +++ b/net/tipc/addr.h @@ -67,32 +67,6 @@ static inline int may_route(u32 addr) return(addr ^ tipc_own_addr) >> 11; } -static inline int in_scope(u32 domain, u32 addr) -{ - if (!domain || (domain == addr)) - return 1; - if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */ - return 1; - if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */ - return 1; - return 0; -} - -/** - * addr_scope - convert message lookup domain to equivalent 2-bit scope value - */ - -static inline int addr_scope(u32 domain) -{ - if (likely(!domain)) - return TIPC_ZONE_SCOPE; - if (tipc_node(domain)) - return TIPC_NODE_SCOPE; - if (tipc_cluster(domain)) - return TIPC_CLUSTER_SCOPE; - return TIPC_ZONE_SCOPE; -} - /** * addr_domain - convert 2-bit scope value to equivalent message lookup domain * @@ -110,14 +84,9 @@ static inline int addr_domain(int sc) return tipc_addr(tipc_zone(tipc_own_addr), 0, 0); } -static inline char *addr_string_fill(char *string, u32 addr) -{ - snprintf(string, 16, "<%u.%u.%u>", - tipc_zone(addr), tipc_cluster(addr), tipc_node(addr)); - return string; -} - int tipc_addr_domain_valid(u32); int tipc_addr_node_valid(u32 addr); - +int tipc_in_scope(u32 domain, u32 addr); +int tipc_addr_scope(u32 domain); +char *tipc_addr_string_fill(char *string, u32 addr); #endif diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 90a0519..a008c66 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -119,7 +119,7 @@ static struct bclink *bclink = NULL; static struct link *bcl = NULL; static DEFINE_SPINLOCK(bc_lock); -const char tipc_bclink_name[] = "multicast-link"; +const char tipc_bclink_name[] = "broadcast-link"; static u32 buf_seqno(struct sk_buff *buf) @@ -275,7 +275,7 @@ static void bclink_send_nack(struct tipc_node *n_ptr) buf = buf_acquire(INT_H_SIZE); if (buf) { msg = buf_msg(buf); - msg_init(msg, BCAST_PROTOCOL, STATE_MSG, + tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, n_ptr->addr); msg_set_mc_netid(msg, tipc_net_id); msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); @@ -822,3 +822,113 @@ void tipc_bclink_stop(void) spin_unlock_bh(&bc_lock); } + +/** + * tipc_nmap_add - add a node to a node map + */ + +void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) +{ + int n = tipc_node(node); + int w = n / WSIZE; + u32 mask = (1 << (n % WSIZE)); + + if ((nm_ptr->map[w] & mask) == 0) { + nm_ptr->count++; + nm_ptr->map[w] |= mask; + } +} + +/** + * tipc_nmap_remove - remove a node from a node map + */ + +void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) +{ + int n = tipc_node(node); + int w = n / WSIZE; + u32 mask = (1 << (n % WSIZE)); + + if ((nm_ptr->map[w] & mask) != 0) { + nm_ptr->map[w] &= ~mask; + nm_ptr->count--; + } +} + +/** + * tipc_nmap_diff - find differences between node maps + * @nm_a: input node map A + * @nm_b: input node map B + * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) + */ + +void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, + struct tipc_node_map *nm_diff) +{ + int stop = ARRAY_SIZE(nm_a->map); + int w; + int b; + u32 map; + + memset(nm_diff, 0, sizeof(*nm_diff)); + for (w = 0; w < stop; w++) { + map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); + nm_diff->map[w] = map; + if (map != 0) { + for (b = 0 ; b < WSIZE; b++) { + if (map & (1 << b)) + nm_diff->count++; + } + } + } +} + +/** + * tipc_port_list_add - add a port to a port list, ensuring no duplicates + */ + +void tipc_port_list_add(struct port_list *pl_ptr, u32 port) +{ + struct port_list *item = pl_ptr; + int i; + int item_sz = PLSIZE; + int cnt = pl_ptr->count; + + for (; ; cnt -= item_sz, item = item->next) { + if (cnt < PLSIZE) + item_sz = cnt; + for (i = 0; i < item_sz; i++) + if (item->ports[i] == port) + return; + if (i < PLSIZE) { + item->ports[i] = port; + pl_ptr->count++; + return; + } + if (!item->next) { + item->next = kmalloc(sizeof(*item), GFP_ATOMIC); + if (!item->next) { + warn("Incomplete multicast delivery, no memory\n"); + return; + } + item->next->next = NULL; + } + } +} + +/** + * tipc_port_list_free - free dynamically created entries in port_list chain + * + */ + +void tipc_port_list_free(struct port_list *pl_ptr) +{ + struct port_list *item; + struct port_list *next; + + for (item = pl_ptr->next; item; item = next) { + next = item->next; + kfree(item); + } +} + diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 4c1771e..e8c2b81 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -72,41 +72,11 @@ struct tipc_node; extern const char tipc_bclink_name[]; +void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node); +void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); /** - * nmap_add - add a node to a node map - */ - -static inline void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) -{ - int n = tipc_node(node); - int w = n / WSIZE; - u32 mask = (1 << (n % WSIZE)); - - if ((nm_ptr->map[w] & mask) == 0) { - nm_ptr->count++; - nm_ptr->map[w] |= mask; - } -} - -/** - * nmap_remove - remove a node from a node map - */ - -static inline void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) -{ - int n = tipc_node(node); - int w = n / WSIZE; - u32 mask = (1 << (n % WSIZE)); - - if ((nm_ptr->map[w] & mask) != 0) { - nm_ptr->map[w] &= ~mask; - nm_ptr->count--; - } -} - -/** - * nmap_equal - test for equality of node maps + * tipc_nmap_equal - test for equality of node maps */ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) @@ -114,84 +84,11 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m return !memcmp(nm_a, nm_b, sizeof(*nm_a)); } -/** - * nmap_diff - find differences between node maps - * @nm_a: input node map A - * @nm_b: input node map B - * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) - */ - -static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, - struct tipc_node_map *nm_diff) -{ - int stop = ARRAY_SIZE(nm_a->map); - int w; - int b; - u32 map; - - memset(nm_diff, 0, sizeof(*nm_diff)); - for (w = 0; w < stop; w++) { - map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); - nm_diff->map[w] = map; - if (map != 0) { - for (b = 0 ; b < WSIZE; b++) { - if (map & (1 << b)) - nm_diff->count++; - } - } - } -} - -/** - * port_list_add - add a port to a port list, ensuring no duplicates - */ - -static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port) -{ - struct port_list *item = pl_ptr; - int i; - int item_sz = PLSIZE; - int cnt = pl_ptr->count; - - for (; ; cnt -= item_sz, item = item->next) { - if (cnt < PLSIZE) - item_sz = cnt; - for (i = 0; i < item_sz; i++) - if (item->ports[i] == port) - return; - if (i < PLSIZE) { - item->ports[i] = port; - pl_ptr->count++; - return; - } - if (!item->next) { - item->next = kmalloc(sizeof(*item), GFP_ATOMIC); - if (!item->next) { - warn("Incomplete multicast delivery, no memory\n"); - return; - } - item->next->next = NULL; - } - } -} - -/** - * port_list_free - free dynamically created entries in port_list chain - * - * Note: First item is on stack, so it doesn't need to be released - */ - -static inline void tipc_port_list_free(struct port_list *pl_ptr) -{ - struct port_list *item; - struct port_list *next; - - for (item = pl_ptr->next; item; item = next) { - next = item->next; - kfree(item); - } -} +void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, + struct tipc_node_map *nm_diff); +void tipc_port_list_add(struct port_list *pl_ptr, u32 port); +void tipc_port_list_free(struct port_list *pl_ptr); int tipc_bclink_init(void); void tipc_bclink_stop(void); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 7809137..52ae17b 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -467,6 +467,18 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr) return res; } +/** + * tipc_bearer_congested - determines if bearer is currently congested + */ + +int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr) +{ + if (unlikely(b_ptr->publ.blocked)) + return 1; + if (likely(list_empty(&b_ptr->cong_links))) + return 0; + return !tipc_bearer_resolve_congestion(b_ptr, l_ptr); +} /** * tipc_enable_bearer - enable bearer with the given name @@ -493,7 +505,7 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority) return -EINVAL; } if (!tipc_addr_domain_valid(bcast_scope) || - !in_scope(bcast_scope, tipc_own_addr)) { + !tipc_in_scope(bcast_scope, tipc_own_addr)) { warn("Bearer <%s> rejected, illegal broadcast scope\n", name); return -EINVAL; } @@ -571,7 +583,7 @@ restart: spin_lock_init(&b_ptr->publ.lock); write_unlock_bh(&tipc_net_lock); info("Enabled bearer <%s>, discovery domain %s, priority %u\n", - name, addr_string_fill(addr_string, bcast_scope), priority); + name, tipc_addr_string_fill(addr_string, bcast_scope), priority); return 0; failed: write_unlock_bh(&tipc_net_lock); diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 000228e..a850b38 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -125,6 +125,7 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest); void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr); struct bearer *tipc_bearer_find_interface(const char *if_name); int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr); +int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr); int tipc_bearer_init(void); void tipc_bearer_stop(void); void tipc_bearer_lock_push(struct bearer *b_ptr); @@ -154,17 +155,4 @@ static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf, return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest); } -/** - * tipc_bearer_congested - determines if bearer is currently congested - */ - -static inline int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr) -{ - if (unlikely(b_ptr->publ.blocked)) - return 1; - if (likely(list_empty(&b_ptr->cong_links))) - return 0; - return !tipc_bearer_resolve_congestion(b_ptr, l_ptr); -} - -#endif +#endif /* _TIPC_BEARER_H */ diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c index a7eac00..e68f705 100644 --- a/net/tipc/cluster.c +++ b/net/tipc/cluster.c @@ -238,7 +238,7 @@ static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest) if (buf) { msg = buf_msg(buf); memset((char *)msg, 0, size); - msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest); + tipc_msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest); } return buf; } diff --git a/net/tipc/config.c b/net/tipc/config.c index ca3544d..961d1b0 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -56,9 +56,6 @@ struct subscr_data { struct manager { u32 user_ref; u32 port_ref; - u32 subscr_ref; - u32 link_subscriptions; - struct list_head link_subscribers; }; static struct manager mng = { 0}; @@ -70,12 +67,6 @@ static int req_tlv_space; /* request message TLV area size */ static int rep_headroom; /* reply message headroom to use */ -void tipc_cfg_link_event(u32 addr, char *name, int up) -{ - /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */ -} - - struct sk_buff *tipc_cfg_reply_alloc(int payload_size) { struct sk_buff *buf; @@ -130,12 +121,24 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string) } - - #if 0 /* Now obsolete code for handling commands not yet implemented the new way */ +/* + * Some of this code assumed that the manager structure contains two added + * fields: + * u32 link_subscriptions; + * struct list_head link_subscribers; + * which are currently not present. These fields may need to be re-introduced + * if and when support for link subscriptions is added. + */ + +void tipc_cfg_link_event(u32 addr, char *name, int up) +{ + /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */ +} + int tipc_cfg_cmd(const struct tipc_cmd_msg * msg, char *data, u32 sz, @@ -243,13 +246,48 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg, default: rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig); } - exit: +exit: rmsg.result_len = htonl(msg_sect[1].iov_len); rmsg.retval = htonl(rv); tipc_cfg_respond(msg_sect, 2u, orig); } #endif +#define MAX_STATS_INFO 2000 + +static struct sk_buff *tipc_show_stats(void) +{ + struct sk_buff *buf; + struct tlv_desc *rep_tlv; + struct print_buf pb; + int str_len; + u32 value; + + if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) + return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); + + value = ntohl(*(u32 *)TLV_DATA(req_tlv_area)); + if (value != 0) + return tipc_cfg_reply_error_string("unsupported argument"); + + buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO)); + if (buf == NULL) + return NULL; + + rep_tlv = (struct tlv_desc *)buf->data; + tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO); + + tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n"); + + /* Use additional tipc_printf()'s to return more info ... */ + + str_len = tipc_printbuf_validate(&pb); + skb_put(buf, TLV_SPACE(str_len)); + TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); + + return buf; +} + static struct sk_buff *cfg_enable_bearer(void) { struct tipc_bearer_config *args; @@ -533,6 +571,9 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area case TIPC_CMD_DUMP_LOG: rep_tlv_buf = tipc_log_dump(); break; + case TIPC_CMD_SHOW_STATS: + rep_tlv_buf = tipc_show_stats(); + break; case TIPC_CMD_SET_LINK_TOL: case TIPC_CMD_SET_LINK_PRI: case TIPC_CMD_SET_LINK_WINDOW: @@ -667,9 +708,6 @@ int tipc_cfg_init(void) struct tipc_name_seq seq; int res; - memset(&mng, 0, sizeof(mng)); - INIT_LIST_HEAD(&mng.link_subscribers); - res = tipc_attach(&mng.user_ref, NULL, NULL); if (res) goto failed; diff --git a/net/tipc/core.c b/net/tipc/core.c index 4e84c84..6964681 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -49,8 +49,6 @@ #include "config.h" -#define TIPC_MOD_VER "2.0.0" - #ifndef CONFIG_TIPC_ZONES #define CONFIG_TIPC_ZONES 3 #endif @@ -104,6 +102,30 @@ int tipc_get_mode(void) } /** + * buf_acquire - creates a TIPC message buffer + * @size: message size (including TIPC header) + * + * Returns a new buffer with data pointers set to the specified size. + * + * NOTE: Headroom is reserved to allow prepending of a data link header. + * There may also be unrequested tailroom present at the buffer's end. + */ + +struct sk_buff *buf_acquire(u32 size) +{ + struct sk_buff *skb; + unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; + + skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); + if (skb) { + skb_reserve(skb, BUF_HEADROOM); + skb_put(skb, size); + skb->next = NULL; + } + return skb; +} + +/** * tipc_core_stop_net - shut down TIPC networking sub-systems */ diff --git a/net/tipc/core.h b/net/tipc/core.h index c58a1d1..1887990 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -59,6 +59,9 @@ #include <linux/slab.h> #include <linux/vmalloc.h> + +#define TIPC_MOD_VER "2.0.0" + /* * TIPC sanity test macros */ @@ -325,29 +328,7 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb) return (struct tipc_msg *)skb->data; } -/** - * buf_acquire - creates a TIPC message buffer - * @size: message size (including TIPC header) - * - * Returns a new buffer with data pointers set to the specified size. - * - * NOTE: Headroom is reserved to allow prepending of a data link header. - * There may also be unrequested tailroom present at the buffer's end. - */ - -static inline struct sk_buff *buf_acquire(u32 size) -{ - struct sk_buff *skb; - unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; - - skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); - if (skb) { - skb_reserve(skb, BUF_HEADROOM); - skb_put(skb, size); - skb->next = NULL; - } - return skb; -} +extern struct sk_buff *buf_acquire(u32 size); /** * buf_discard - frees a TIPC message buffer diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 74b7d1e..fc1fcf5 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -120,7 +120,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type, if (buf) { msg = buf_msg(buf); - msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain); + tipc_msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain); msg_set_non_seq(msg, 1); msg_set_req_links(msg, req_links); msg_set_dest_domain(msg, dest_domain); @@ -144,7 +144,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr, char media_addr_str[64]; struct print_buf pb; - addr_string_fill(node_addr_str, node_addr); + tipc_addr_string_fill(node_addr_str, node_addr); tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str)); tipc_media_addr_printf(&pb, media_addr); tipc_printbuf_validate(&pb); @@ -183,7 +183,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr) disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr); return; } - if (!in_scope(dest, tipc_own_addr)) + if (!tipc_in_scope(dest, tipc_own_addr)) return; if (is_slave(tipc_own_addr) && is_slave(orig)) return; @@ -224,7 +224,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr) memcpy(addr, &media_addr, sizeof(*addr)); tipc_link_reset(link); } - link_fully_up = (link->state == WORKING_WORKING); + link_fully_up = link_working_working(link); spin_unlock_bh(&n_ptr->lock); if ((type == DSC_RESP_MSG) || link_fully_up) return; diff --git a/net/tipc/link.c b/net/tipc/link.c index c76e82e..a3616b9 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -202,41 +202,6 @@ static unsigned int align(unsigned int i) return (i + 3) & ~3u; } -static int link_working_working(struct link *l_ptr) -{ - return (l_ptr->state == WORKING_WORKING); -} - -static int link_working_unknown(struct link *l_ptr) -{ - return (l_ptr->state == WORKING_UNKNOWN); -} - -static int link_reset_unknown(struct link *l_ptr) -{ - return (l_ptr->state == RESET_UNKNOWN); -} - -static int link_reset_reset(struct link *l_ptr) -{ - return (l_ptr->state == RESET_RESET); -} - -static int link_blocked(struct link *l_ptr) -{ - return (l_ptr->exp_msg_count || l_ptr->blocked); -} - -static int link_congested(struct link *l_ptr) -{ - return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]); -} - -static u32 link_max_pkt(struct link *l_ptr) -{ - return l_ptr->max_pkt; -} - static void link_init_max_pkt(struct link *l_ptr) { u32 max_pkt; @@ -468,7 +433,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer, l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; msg = l_ptr->pmsg; - msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); + tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); msg_set_size(msg, sizeof(l_ptr->proto_msg)); msg_set_session(msg, (tipc_random & 0xffff)); msg_set_bearer_id(msg, b_ptr->identity); @@ -561,9 +526,8 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz) goto exit; if (!list_empty(&p_ptr->wait_list)) goto exit; - p_ptr->congested_link = l_ptr; p_ptr->publ.congested = 1; - p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr)); + p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); l_ptr->stats.link_congs++; exit: @@ -592,7 +556,6 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all) if (win <= 0) break; list_del_init(&p_ptr->wait_list); - p_ptr->congested_link = NULL; spin_lock_bh(p_ptr->publ.lock); p_ptr->publ.congested = 0; p_ptr->wakeup(&p_ptr->publ); @@ -1017,7 +980,7 @@ static int link_bundle_buf(struct link *l_ptr, return 0; if (skb_tailroom(bundler) < (pad + size)) return 0; - if (link_max_pkt(l_ptr) < (to_pos + size)) + if (l_ptr->max_pkt < (to_pos + size)) return 0; skb_put(bundler, pad + size); @@ -1062,9 +1025,9 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) u32 size = msg_size(msg); u32 dsz = msg_data_sz(msg); u32 queue_size = l_ptr->out_queue_size; - u32 imp = msg_tot_importance(msg); + u32 imp = tipc_msg_tot_importance(msg); u32 queue_limit = l_ptr->queue_limit[imp]; - u32 max_packet = link_max_pkt(l_ptr); + u32 max_packet = l_ptr->max_pkt; msg_set_prevnode(msg, tipc_own_addr); /* If routed message */ @@ -1127,7 +1090,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) struct tipc_msg bundler_hdr; if (bundler) { - msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, + tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, INT_H_SIZE, l_ptr->addr); skb_copy_to_linear_data(bundler, &bundler_hdr, INT_H_SIZE); @@ -1195,7 +1158,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf, int res = msg_data_sz(msg); if (likely(!link_congested(l_ptr))) { - if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) { + if (likely(msg_size(msg) <= l_ptr->max_pkt)) { if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { link_add_to_outqueue(l_ptr, buf, msg); if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, @@ -1212,7 +1175,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf, } } else - *used_max_pkt = link_max_pkt(l_ptr); + *used_max_pkt = l_ptr->max_pkt; } return tipc_link_send_buf(l_ptr, buf); /* All other cases */ } @@ -1280,7 +1243,7 @@ again: * (Must not hold any locks while building message.) */ - res = msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt, + res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt, !sender->user_port, &buf); read_lock_bh(&tipc_net_lock); @@ -1319,7 +1282,7 @@ exit: * then re-try fast path or fragment the message */ - sender->publ.max_pkt = link_max_pkt(l_ptr); + sender->publ.max_pkt = l_ptr->max_pkt; tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); @@ -1391,7 +1354,7 @@ again: /* Prepare reusable fragment header: */ msg_dbg(hdr, ">FRAGMENTING>"); - msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, + tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(hdr)); msg_set_link_selector(&fragm_hdr, sender->publ.ref); msg_set_size(&fragm_hdr, max_pkt); @@ -1482,8 +1445,8 @@ error: tipc_node_unlock(node); goto reject; } - if (link_max_pkt(l_ptr) < max_pkt) { - sender->publ.max_pkt = link_max_pkt(l_ptr); + if (l_ptr->max_pkt < max_pkt) { + sender->publ.max_pkt = l_ptr->max_pkt; tipc_node_unlock(node); for (; buf_chain; buf_chain = buf) { buf = buf_chain->next; @@ -1650,7 +1613,7 @@ static void link_reset_all(unsigned long addr) tipc_node_lock(n_ptr); warn("Resetting all links to %s\n", - addr_string_fill(addr_string, n_ptr->addr)); + tipc_addr_string_fill(addr_string, n_ptr->addr)); for (i = 0; i < MAX_BEARERS; i++) { if (n_ptr->links[i]) { @@ -1692,7 +1655,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf) n_ptr = l_ptr->owner->next; tipc_node_lock(n_ptr); - addr_string_fill(addr_string, n_ptr->addr); + tipc_addr_string_fill(addr_string, n_ptr->addr); tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string); tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported); tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked); @@ -2435,7 +2398,7 @@ void tipc_link_changeover(struct link *l_ptr) return; } - msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, + tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); msg_set_msgcnt(&tunnel_hdr, msgcount); @@ -2490,7 +2453,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel) struct sk_buff *iter; struct tipc_msg tunnel_hdr; - msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, + tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); @@ -2681,7 +2644,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) u32 dsz = msg_data_sz(inmsg); unchar *crs = buf->data; u32 rest = insize; - u32 pack_sz = link_max_pkt(l_ptr); + u32 pack_sz = l_ptr->max_pkt; u32 fragm_sz = pack_sz - INT_H_SIZE; u32 fragm_no = 1; u32 destaddr; @@ -2696,7 +2659,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) /* Prepare reusable fragment header: */ - msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, + tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, destaddr); msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); @@ -3127,7 +3090,7 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) tipc_printf(&pb, "Link <%s>\n" " %s MTU:%u Priority:%u Tolerance:%u ms" " Window:%u packets\n", - l_ptr->name, status, link_max_pkt(l_ptr), + l_ptr->name, status, l_ptr->max_pkt, l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]); tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", l_ptr->next_in_no - l_ptr->stats.recv_info, @@ -3272,7 +3235,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector) tipc_node_lock(n_ptr); l_ptr = n_ptr->active_links[selector & 1]; if (l_ptr) - res = link_max_pkt(l_ptr); + res = l_ptr->max_pkt; tipc_node_unlock(n_ptr); } read_unlock_bh(&tipc_net_lock); @@ -3330,9 +3293,7 @@ static void link_print(struct link *l_ptr, struct print_buf *buf, if (l_ptr->next_out) tipc_printf(buf, "%u..", msg_seqno(buf_msg(l_ptr->next_out))); - tipc_printf(buf, "%u]", - msg_seqno(buf_msg - (l_ptr->last_out)), l_ptr->out_queue_size); + tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out))); if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - msg_seqno(buf_msg(l_ptr->first_out))) != (l_ptr->out_queue_size - 1)) || diff --git a/net/tipc/link.h b/net/tipc/link.h index 6a51e38..2e5385c 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -292,4 +292,39 @@ static inline u32 lesser(u32 left, u32 right) return less_eq(left, right) ? left : right; } + +/* + * Link status checking routines + */ + +static inline int link_working_working(struct link *l_ptr) +{ + return (l_ptr->state == WORKING_WORKING); +} + +static inline int link_working_unknown(struct link *l_ptr) +{ + return (l_ptr->state == WORKING_UNKNOWN); +} + +static inline int link_reset_unknown(struct link *l_ptr) +{ + return (l_ptr->state == RESET_UNKNOWN); +} + +static inline int link_reset_reset(struct link *l_ptr) +{ + return (l_ptr->state == RESET_RESET); +} + +static inline int link_blocked(struct link *l_ptr) +{ + return (l_ptr->exp_msg_count || l_ptr->blocked); +} + +static inline int link_congested(struct link *l_ptr) +{ + return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]); +} + #endif diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 73dcd00..3810638 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -40,6 +40,100 @@ #include "msg.h" #include "bearer.h" +u32 tipc_msg_tot_importance(struct tipc_msg *m) +{ + if (likely(msg_isdata(m))) { + if (likely(msg_orignode(m) == tipc_own_addr)) + return msg_importance(m); + return msg_importance(m) + 4; + } + if ((msg_user(m) == MSG_FRAGMENTER) && + (msg_type(m) == FIRST_FRAGMENT)) + return msg_importance(msg_get_wrapped(m)); + return msg_importance(m); +} + + +void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, + u32 hsize, u32 destnode) +{ + memset(m, 0, hsize); + msg_set_version(m); + msg_set_user(m, user); + msg_set_hdr_sz(m, hsize); + msg_set_size(m, hsize); + msg_set_prevnode(m, tipc_own_addr); + msg_set_type(m, type); + if (!msg_short(m)) { + msg_set_orignode(m, tipc_own_addr); + msg_set_destnode(m, destnode); + } +} + +/** + * tipc_msg_calc_data_size - determine total data size for message + */ + +int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect) +{ + int dsz = 0; + int i; + + for (i = 0; i < num_sect; i++) + dsz += msg_sect[i].iov_len; + return dsz; +} + +/** + * tipc_msg_build - create message using specified header and data + * + * Note: Caller must not hold any locks in case copy_from_user() is interrupted! + * + * Returns message data size or errno + */ + +int tipc_msg_build(struct tipc_msg *hdr, + struct iovec const *msg_sect, u32 num_sect, + int max_size, int usrmem, struct sk_buff** buf) +{ + int dsz, sz, hsz, pos, res, cnt; + + dsz = tipc_msg_calc_data_size(msg_sect, num_sect); + if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) { + *buf = NULL; + return -EINVAL; + } + + pos = hsz = msg_hdr_sz(hdr); + sz = hsz + dsz; + msg_set_size(hdr, sz); + if (unlikely(sz > max_size)) { + *buf = NULL; + return dsz; + } + + *buf = buf_acquire(sz); + if (!(*buf)) + return -ENOMEM; + skb_copy_to_linear_data(*buf, hdr, hsz); + for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) { + if (likely(usrmem)) + res = !copy_from_user((*buf)->data + pos, + msg_sect[cnt].iov_base, + msg_sect[cnt].iov_len); + else + skb_copy_to_linear_data_offset(*buf, pos, + msg_sect[cnt].iov_base, + msg_sect[cnt].iov_len); + pos += msg_sect[cnt].iov_len; + } + if (likely(res)) + return dsz; + + buf_discard(*buf); + *buf = NULL; + return -EFAULT; +} #ifdef CONFIG_TIPC_DEBUG diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 7ee6ae2..995d2da 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -708,100 +708,13 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos) #define DSC_REQ_MSG 0 #define DSC_RESP_MSG 1 -static inline u32 msg_tot_importance(struct tipc_msg *m) -{ - if (likely(msg_isdata(m))) { - if (likely(msg_orignode(m) == tipc_own_addr)) - return msg_importance(m); - return msg_importance(m) + 4; - } - if ((msg_user(m) == MSG_FRAGMENTER) && - (msg_type(m) == FIRST_FRAGMENT)) - return msg_importance(msg_get_wrapped(m)); - return msg_importance(m); -} - - -static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, - u32 hsize, u32 destnode) -{ - memset(m, 0, hsize); - msg_set_version(m); - msg_set_user(m, user); - msg_set_hdr_sz(m, hsize); - msg_set_size(m, hsize); - msg_set_prevnode(m, tipc_own_addr); - msg_set_type(m, type); - if (!msg_short(m)) { - msg_set_orignode(m, tipc_own_addr); - msg_set_destnode(m, destnode); - } -} - -/** - * msg_calc_data_size - determine total data size for message - */ - -static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect) -{ - int dsz = 0; - int i; - - for (i = 0; i < num_sect; i++) - dsz += msg_sect[i].iov_len; - return dsz; -} - -/** - * msg_build - create message using specified header and data - * - * Note: Caller must not hold any locks in case copy_from_user() is interrupted! - * - * Returns message data size or errno - */ - -static inline int msg_build(struct tipc_msg *hdr, +u32 tipc_msg_tot_importance(struct tipc_msg *m); +void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, + u32 hsize, u32 destnode); +int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect); +int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, - int max_size, int usrmem, struct sk_buff** buf) -{ - int dsz, sz, hsz, pos, res, cnt; - - dsz = msg_calc_data_size(msg_sect, num_sect); - if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) { - *buf = NULL; - return -EINVAL; - } - - pos = hsz = msg_hdr_sz(hdr); - sz = hsz + dsz; - msg_set_size(hdr, sz); - if (unlikely(sz > max_size)) { - *buf = NULL; - return dsz; - } - - *buf = buf_acquire(sz); - if (!(*buf)) - return -ENOMEM; - skb_copy_to_linear_data(*buf, hdr, hsz); - for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) { - if (likely(usrmem)) - res = !copy_from_user((*buf)->data + pos, - msg_sect[cnt].iov_base, - msg_sect[cnt].iov_len); - else - skb_copy_to_linear_data_offset(*buf, pos, - msg_sect[cnt].iov_base, - msg_sect[cnt].iov_len); - pos += msg_sect[cnt].iov_len; - } - if (likely(res)) - return dsz; - - buf_discard(*buf); - *buf = NULL; - return -EFAULT; -} + int max_size, int usrmem, struct sk_buff** buf); static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) { diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 10a6989..6ac3c54 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -103,7 +103,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) if (buf != NULL) { msg = buf_msg(buf); - msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest); + tipc_msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest); msg_set_size(msg, LONG_H_SIZE + size); } return buf; diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index acab41a..8ba7962 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -627,7 +627,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) struct name_seq *seq; u32 ref; - if (!in_scope(*destnode, tipc_own_addr)) + if (!tipc_in_scope(*destnode, tipc_own_addr)) return 0; read_lock_bh(&tipc_nametbl_lock); diff --git a/net/tipc/net.c b/net/tipc/net.c index d7cd1e0..f61b769 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -219,7 +219,7 @@ void tipc_net_route_msg(struct sk_buff *buf) /* Handle message for this node */ dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); - if (in_scope(dnode, tipc_own_addr)) { + if (tipc_in_scope(dnode, tipc_own_addr)) { if (msg_isdata(msg)) { if (msg_mcast(msg)) tipc_port_recv_mcast(buf, NULL); @@ -277,7 +277,7 @@ int tipc_net_start(u32 addr) info("Started in network mode\n"); info("Own node address %s, network identity %u\n", - addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); + tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); return 0; } diff --git a/net/tipc/node.c b/net/tipc/node.c index 17cc394..b634942 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -268,7 +268,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr) if (n_ptr->link_cnt >= 2) { err("Attempt to create third link to %s\n", - addr_string_fill(addr_string, n_ptr->addr)); + tipc_addr_string_fill(addr_string, n_ptr->addr)); return NULL; } @@ -280,7 +280,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr) } err("Attempt to establish second link on <%s> to %s\n", l_ptr->b_ptr->publ.name, - addr_string_fill(addr_string, l_ptr->addr)); + tipc_addr_string_fill(addr_string, l_ptr->addr)); } return NULL; } @@ -439,7 +439,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) return; info("Lost contact with %s\n", - addr_string_fill(addr_string, n_ptr->addr)); + tipc_addr_string_fill(addr_string, n_ptr->addr)); /* Abort link changeover */ for (i = 0; i < MAX_BEARERS; i++) { @@ -602,7 +602,7 @@ u32 tipc_available_nodes(const u32 domain) read_lock_bh(&tipc_net_lock); for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { - if (!in_scope(domain, n_ptr->addr)) + if (!tipc_in_scope(domain, n_ptr->addr)) continue; if (tipc_node_is_up(n_ptr)) cnt++; @@ -651,7 +651,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) /* Add TLVs for all nodes in scope */ for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { - if (!in_scope(domain, n_ptr->addr)) + if (!tipc_in_scope(domain, n_ptr->addr)) continue; node_info.addr = htonl(n_ptr->addr); node_info.up = htonl(tipc_node_is_up(n_ptr)); @@ -711,7 +711,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { u32 i; - if (!in_scope(domain, n_ptr->addr)) + if (!tipc_in_scope(domain, n_ptr->addr)) continue; tipc_node_lock(n_ptr); for (i = 0; i < MAX_BEARERS; i++) { diff --git a/net/tipc/port.c b/net/tipc/port.c index e70d27e..0737680 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -116,7 +116,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain, msg_set_namelower(hdr, seq->lower); msg_set_nameupper(hdr, seq->upper); msg_set_hdr_sz(hdr, MCAST_H_SIZE); - res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, + res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, !oport->user_port, &buf); if (unlikely(!buf)) return res; @@ -241,13 +241,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle, p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; p_ptr->publ.ref = ref; msg = &p_ptr->publ.phdr; - msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0); + tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0); msg_set_origport(msg, ref); p_ptr->last_in_seqno = 41; p_ptr->sent = 1; INIT_LIST_HEAD(&p_ptr->wait_list); INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); - p_ptr->congested_link = NULL; p_ptr->dispatcher = dispatcher; p_ptr->wakeup = wakeup; p_ptr->user_port = NULL; @@ -396,7 +395,7 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode, buf = buf_acquire(LONG_H_SIZE); if (buf) { msg = buf_msg(buf); - msg_init(msg, usr, type, LONG_H_SIZE, destnode); + tipc_msg_init(msg, usr, type, LONG_H_SIZE, destnode); msg_set_errcode(msg, err); msg_set_destport(msg, destport); msg_set_origport(msg, origport); @@ -440,7 +439,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) return data_sz; } rmsg = buf_msg(rbuf); - msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg)); + tipc_msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg)); msg_set_errcode(rmsg, err); msg_set_destport(rmsg, msg_origport(msg)); msg_set_origport(rmsg, msg_destport(msg)); @@ -481,7 +480,7 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, struct sk_buff *buf; int res; - res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, + res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, !p_ptr->user_port, &buf); if (!buf) return res; @@ -1344,7 +1343,7 @@ int tipc_port_recv_sections(struct port *sender, unsigned int num_sect, struct sk_buff *buf; int res; - res = msg_build(&sender->publ.phdr, msg_sect, num_sect, + res = tipc_msg_build(&sender->publ.phdr, msg_sect, num_sect, MAX_MSG_SIZE, !sender->user_port, &buf); if (likely(buf)) tipc_port_recv_msg(buf); @@ -1384,7 +1383,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect) if (port_unreliable(p_ptr)) { p_ptr->publ.congested = 0; /* Just calculate msg length and return */ - return msg_calc_data_size(msg_sect, num_sect); + return tipc_msg_calc_data_size(msg_sect, num_sect); } return -ELINKCONG; } @@ -1453,7 +1452,7 @@ int tipc_forward2name(u32 ref, struct port *p_ptr; struct tipc_msg *msg; u32 destnode = domain; - u32 destport = 0; + u32 destport; int res; p_ptr = tipc_port_deref(ref); @@ -1467,7 +1466,7 @@ int tipc_forward2name(u32 ref, msg_set_hdr_sz(msg, LONG_H_SIZE); msg_set_nametype(msg, name->type); msg_set_nameinst(msg, name->instance); - msg_set_lookup_scope(msg, addr_scope(domain)); + msg_set_lookup_scope(msg, tipc_addr_scope(domain)); if (importance <= TIPC_CRITICAL_IMPORTANCE) msg_set_importance(msg,importance); destport = tipc_nametbl_translate(name->type, name->instance, &destnode); @@ -1484,7 +1483,7 @@ int tipc_forward2name(u32 ref, return res; if (port_unreliable(p_ptr)) { /* Just calculate msg length and return */ - return msg_calc_data_size(msg_sect, num_sect); + return tipc_msg_calc_data_size(msg_sect, num_sect); } return -ELINKCONG; } @@ -1525,7 +1524,7 @@ int tipc_forward_buf2name(u32 ref, struct port *p_ptr; struct tipc_msg *msg; u32 destnode = domain; - u32 destport = 0; + u32 destport; int res; p_ptr = (struct port *)tipc_ref_deref(ref); @@ -1540,7 +1539,7 @@ int tipc_forward_buf2name(u32 ref, msg_set_origport(msg, orig->ref); msg_set_nametype(msg, name->type); msg_set_nameinst(msg, name->instance); - msg_set_lookup_scope(msg, addr_scope(domain)); + msg_set_lookup_scope(msg, tipc_addr_scope(domain)); msg_set_hdr_sz(msg, LONG_H_SIZE); msg_set_size(msg, LONG_H_SIZE + dsz); destport = tipc_nametbl_translate(name->type, name->instance, &destnode); @@ -1620,7 +1619,7 @@ int tipc_forward2port(u32 ref, return res; if (port_unreliable(p_ptr)) { /* Just calculate msg length and return */ - return msg_calc_data_size(msg_sect, num_sect); + return tipc_msg_calc_data_size(msg_sect, num_sect); } return -ELINKCONG; } diff --git a/net/tipc/port.h b/net/tipc/port.h index ff31ee4..8d1652a 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h @@ -75,7 +75,6 @@ struct user_port { * @wakeup: ptr to routine to call when port is no longer congested * @user_port: ptr to user port associated with port (if any) * @wait_list: adjacent ports in list of ports waiting on link congestion - * @congested_link: ptr to congested link port is waiting on * @waiting_pkts: * @sent: * @acked: @@ -95,7 +94,6 @@ struct port { void (*wakeup)(struct tipc_port *); struct user_port *user_port; struct list_head wait_list; - struct link *congested_link; u32 waiting_pkts; u32 sent; u32 acked; diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index e978c71..2609e44 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c @@ -43,7 +43,7 @@ * wimax_rfkill() Kernel calling wimax_rfkill() * __wimax_rf_toggle_radio() * - * wimax_rfkill_set_radio_block() RF-Kill subsytem calling + * wimax_rfkill_set_radio_block() RF-Kill subsystem calling * __wimax_rf_toggle_radio() * * __wimax_rf_toggle_radio() diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 1ed65db..ee99e7d 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -315,12 +315,11 @@ void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) BUG(); } __wimax_state_set(wimax_dev, new_state); - if (stch_skb) + if (!IS_ERR(stch_skb)) wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header); out: d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", wimax_dev, new_state, old_state); - return; } @@ -362,7 +361,6 @@ void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) if (wimax_dev->state > __WIMAX_ST_NULL) __wimax_state_change(wimax_dev, new_state); mutex_unlock(&wimax_dev->mutex); - return; } EXPORT_SYMBOL_GPL(wimax_state_change); diff --git a/net/wireless/chan.c b/net/wireless/chan.c index bf1737f..d92d088 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -10,38 +10,6 @@ #include "core.h" struct ieee80211_channel * -rdev_fixed_channel(struct cfg80211_registered_device *rdev, - struct wireless_dev *for_wdev) -{ - struct wireless_dev *wdev; - struct ieee80211_channel *result = NULL; - - WARN_ON(!mutex_is_locked(&rdev->devlist_mtx)); - - list_for_each_entry(wdev, &rdev->netdev_list, list) { - if (wdev == for_wdev) - continue; - - /* - * Lock manually to tell lockdep about allowed - * nesting here if for_wdev->mtx is held already. - * This is ok as it's all under the rdev devlist - * mutex and as such can only be done once at any - * given time. - */ - mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING); - if (wdev->current_bss) - result = wdev->current_bss->pub.channel; - wdev_unlock(wdev); - - if (result) - break; - } - - return result; -} - -struct ieee80211_channel * rdev_freq_to_chan(struct cfg80211_registered_device *rdev, int freq, enum nl80211_channel_type channel_type) { @@ -75,15 +43,22 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, return chan; } -int rdev_set_freq(struct cfg80211_registered_device *rdev, - struct wireless_dev *for_wdev, - int freq, enum nl80211_channel_type channel_type) +int cfg80211_set_freq(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, int freq, + enum nl80211_channel_type channel_type) { struct ieee80211_channel *chan; int result; - if (rdev_fixed_channel(rdev, for_wdev)) - return -EBUSY; + if (wdev->iftype == NL80211_IFTYPE_MONITOR) + wdev = NULL; + + if (wdev) { + ASSERT_WDEV_LOCK(wdev); + + if (!netif_running(wdev->netdev)) + return -ENETDOWN; + } if (!rdev->ops->set_channel) return -EOPNOTSUPP; @@ -92,11 +67,14 @@ int rdev_set_freq(struct cfg80211_registered_device *rdev, if (!chan) return -EINVAL; - result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); + result = rdev->ops->set_channel(&rdev->wiphy, + wdev ? wdev->netdev : NULL, + chan, channel_type); if (result) return result; - rdev->channel = chan; + if (wdev) + wdev->channel = chan; return 0; } diff --git a/net/wireless/core.h b/net/wireless/core.h index b2234b4..ae930ac 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -70,9 +70,6 @@ struct cfg80211_registered_device { struct work_struct conn_work; struct work_struct event_work; - /* current channel */ - struct ieee80211_channel *channel; - /* must be last because of the way we do wiphy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); @@ -388,14 +385,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); struct ieee80211_channel * -rdev_fixed_channel(struct cfg80211_registered_device *rdev, - struct wireless_dev *for_wdev); -struct ieee80211_channel * rdev_freq_to_chan(struct cfg80211_registered_device *rdev, int freq, enum nl80211_channel_type channel_type); -int rdev_set_freq(struct cfg80211_registered_device *rdev, - struct wireless_dev *for_wdev, - int freq, enum nl80211_channel_type channel_type); +int cfg80211_set_freq(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, int freq, + enum nl80211_channel_type channel_type); u16 cfg80211_calculate_bitrate(struct rate_info *rate); diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 6a5acf7..adcabba 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -81,15 +81,10 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct cfg80211_cached_keys *connkeys) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct ieee80211_channel *chan; int err; ASSERT_WDEV_LOCK(wdev); - chan = rdev_fixed_channel(rdev, wdev); - if (chan && chan != params->channel) - return -EBUSY; - if (wdev->ssid_len) return -EALREADY; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 01da83dd..aaa1aad 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -589,6 +589,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, i++; NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); } + CMD(set_channel, SET_CHANNEL); #undef CMD @@ -689,10 +690,90 @@ static int parse_txq_params(struct nlattr *tb[], return 0; } +static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) +{ + /* + * You can only set the channel explicitly for AP, mesh + * and WDS type interfaces; all others have their channel + * managed via their respective "establish a connection" + * command (connect, join, ...) + * + * Monitors are special as they are normally slaved to + * whatever else is going on, so they behave as though + * you tried setting the wiphy channel itself. + */ + return !wdev || + wdev->iftype == NL80211_IFTYPE_AP || + wdev->iftype == NL80211_IFTYPE_WDS || + wdev->iftype == NL80211_IFTYPE_MESH_POINT || + wdev->iftype == NL80211_IFTYPE_MONITOR; +} + +static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + struct genl_info *info) +{ + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + u32 freq; + int result; + + if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) + return -EINVAL; + + if (!nl80211_can_set_dev_channel(wdev)) + return -EOPNOTSUPP; + + if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { + channel_type = nla_get_u32(info->attrs[ + NL80211_ATTR_WIPHY_CHANNEL_TYPE]); + if (channel_type != NL80211_CHAN_NO_HT && + channel_type != NL80211_CHAN_HT20 && + channel_type != NL80211_CHAN_HT40PLUS && + channel_type != NL80211_CHAN_HT40MINUS) + return -EINVAL; + } + + freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); + + mutex_lock(&rdev->devlist_mtx); + if (wdev) { + wdev_lock(wdev); + result = cfg80211_set_freq(rdev, wdev, freq, channel_type); + wdev_unlock(wdev); + } else { + result = cfg80211_set_freq(rdev, NULL, freq, channel_type); + } + mutex_unlock(&rdev->devlist_mtx); + + return result; +} + +static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *netdev; + int result; + + rtnl_lock(); + + result = get_rdev_dev_by_info_ifindex(info, &rdev, &netdev); + if (result) + goto unlock; + + result = __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info); + + unlock: + rtnl_unlock(); + + return result; +} + static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; - int result = 0, rem_txq_params = 0; + struct net_device *netdev = NULL; + struct wireless_dev *wdev; + int result, rem_txq_params = 0; struct nlattr *nl_txq_params; u32 changed; u8 retry_short = 0, retry_long = 0; @@ -701,16 +782,50 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) rtnl_lock(); + /* + * Try to find the wiphy and netdev. Normally this + * function shouldn't need the netdev, but this is + * done for backward compatibility -- previously + * setting the channel was done per wiphy, but now + * it is per netdev. Previous userland like hostapd + * also passed a netdev to set_wiphy, so that it is + * possible to let that go to the right netdev! + */ mutex_lock(&cfg80211_mutex); - rdev = __cfg80211_rdev_from_info(info); - if (IS_ERR(rdev)) { - mutex_unlock(&cfg80211_mutex); - result = PTR_ERR(rdev); - goto unlock; + if (info->attrs[NL80211_ATTR_IFINDEX]) { + int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); + + netdev = dev_get_by_index(genl_info_net(info), ifindex); + if (netdev && netdev->ieee80211_ptr) { + rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); + mutex_lock(&rdev->mtx); + } else + netdev = NULL; } - mutex_lock(&rdev->mtx); + if (!netdev) { + rdev = __cfg80211_rdev_from_info(info); + if (IS_ERR(rdev)) { + mutex_unlock(&cfg80211_mutex); + result = PTR_ERR(rdev); + goto unlock; + } + wdev = NULL; + netdev = NULL; + result = 0; + + mutex_lock(&rdev->mtx); + } else if (netif_running(netdev) && + nl80211_can_set_dev_channel(netdev->ieee80211_ptr)) + wdev = netdev->ieee80211_ptr; + else + wdev = NULL; + + /* + * end workaround code, by now the rdev is available + * and locked, and wdev may or may not be NULL. + */ if (info->attrs[NL80211_ATTR_WIPHY_NAME]) result = cfg80211_dev_rename( @@ -749,26 +864,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { - enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; - u32 freq; - - result = -EINVAL; - - if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { - channel_type = nla_get_u32(info->attrs[ - NL80211_ATTR_WIPHY_CHANNEL_TYPE]); - if (channel_type != NL80211_CHAN_NO_HT && - channel_type != NL80211_CHAN_HT20 && - channel_type != NL80211_CHAN_HT40PLUS && - channel_type != NL80211_CHAN_HT40MINUS) - goto bad_res; - } - - freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); - - mutex_lock(&rdev->devlist_mtx); - result = rdev_set_freq(rdev, NULL, freq, channel_type); - mutex_unlock(&rdev->devlist_mtx); + result = __nl80211_set_channel(rdev, wdev, info); if (result) goto bad_res; } @@ -865,6 +961,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) bad_res: mutex_unlock(&rdev->mtx); + if (netdev) + dev_put(netdev); unlock: rtnl_unlock(); return result; @@ -3562,9 +3660,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; - struct wireless_dev *wdev; struct cfg80211_crypto_settings crypto; - struct ieee80211_channel *chan, *fixedchan; + struct ieee80211_channel *chan; const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; int err, ssid_len, ie_len = 0; bool use_mfp = false; @@ -3607,16 +3704,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) goto out; } - mutex_lock(&rdev->devlist_mtx); - wdev = dev->ieee80211_ptr; - fixedchan = rdev_fixed_channel(rdev, wdev); - if (fixedchan && chan != fixedchan) { - err = -EBUSY; - mutex_unlock(&rdev->devlist_mtx); - goto out; - } - mutex_unlock(&rdev->devlist_mtx); - ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); @@ -5186,6 +5273,12 @@ static struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, + { + .cmd = NL80211_CMD_SET_CHANNEL, + .doit = nl80211_set_channel, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_multicast_group nl80211_mlme_mcgrp = { diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 8ddf5ae..72222f0 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -741,7 +741,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, const u8 *prev_bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct ieee80211_channel *chan; struct cfg80211_bss *bss = NULL; int err; @@ -750,10 +749,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, if (wdev->sme_state != CFG80211_SME_IDLE) return -EALREADY; - chan = rdev_fixed_channel(rdev, wdev); - if (chan && chan != connect->channel) - return -EBUSY; - if (WARN_ON(wdev->connect_keys)) { kfree(wdev->connect_keys); wdev->connect_keys = NULL; diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index a60a277..9634299 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -782,16 +782,22 @@ int cfg80211_wext_siwfreq(struct net_device *dev, return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); - default: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); if (freq < 0) return freq; if (freq == 0) return -EINVAL; + wdev_lock(wdev); mutex_lock(&rdev->devlist_mtx); - err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT); + err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); mutex_unlock(&rdev->devlist_mtx); + wdev_unlock(wdev); return err; + default: + return -EOPNOTSUPP; } } EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq); @@ -801,7 +807,6 @@ int cfg80211_wext_giwfreq(struct net_device *dev, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: @@ -809,9 +814,9 @@ int cfg80211_wext_giwfreq(struct net_device *dev, case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); default: - if (!rdev->channel) + if (!wdev->channel) return -EINVAL; - freq->m = rdev->channel->center_freq; + freq->m = wdev->channel->center_freq; freq->e = 6; return 0; } diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index d5c6140..9818198 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -108,7 +108,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, /* SSID is not set, we just want to switch channel */ if (chan && !wdev->wext.connect.ssid_len) { - err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); + err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); goto out; } diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 296e65e..5e86d4e 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -453,7 +453,6 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, struct sock *sk = sock->sk; int rc = -ENOPROTOOPT; - lock_kernel(); if (level != SOL_X25 || optname != X25_QBITINCL) goto out; @@ -465,10 +464,12 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, if (get_user(opt, (int __user *)optval)) goto out; - x25_sk(sk)->qbitincl = !!opt; + if (opt) + set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); + else + clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = 0; out: - unlock_kernel(); return rc; } @@ -478,7 +479,6 @@ static int x25_getsockopt(struct socket *sock, int level, int optname, struct sock *sk = sock->sk; int val, len, rc = -ENOPROTOOPT; - lock_kernel(); if (level != SOL_X25 || optname != X25_QBITINCL) goto out; @@ -496,10 +496,9 @@ static int x25_getsockopt(struct socket *sock, int level, int optname, if (put_user(len, optlen)) goto out; - val = x25_sk(sk)->qbitincl; + val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; out: - unlock_kernel(); return rc; } @@ -583,7 +582,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; x25->cudmatchlength = 0; - x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */ + set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ /* on call accept */ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; @@ -632,12 +631,12 @@ static struct sock *x25_make_new(struct sock *osk) x25->t22 = ox25->t22; x25->t23 = ox25->t23; x25->t2 = ox25->t2; + x25->flags = ox25->flags; x25->facilities = ox25->facilities; - x25->qbitincl = ox25->qbitincl; x25->dte_facilities = ox25->dte_facilities; x25->cudmatchlength = ox25->cudmatchlength; - x25->accptapprv = ox25->accptapprv; + clear_bit(X25_INTERRUPT_FLAG, &x25->flags); x25_init_timers(sk); out: return sk; @@ -1053,8 +1052,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; - /* Normally all calls are accepted immediatly */ - if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) { + /* Normally all calls are accepted immediately */ + if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; } @@ -1186,7 +1185,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ - if (x25->qbitincl) { + if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { qbit = skb->data[0]; skb_pull(skb, 1); } @@ -1242,7 +1241,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, len = rc; if (rc < 0) kfree_skb(skb); - else if (x25->qbitincl) + else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) len++; } @@ -1307,7 +1306,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, /* * No Q bit information on Interrupt data. */ - if (x25->qbitincl) { + if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } @@ -1325,7 +1324,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, skb_pull(skb, x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN); - if (x25->qbitincl) { + if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } @@ -1576,7 +1575,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) rc = -EINVAL; if (sk->sk_state != TCP_CLOSE) break; - x25->accptapprv = X25_ALLOW_ACCPT_APPRV; + clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); rc = 0; break; } @@ -1585,7 +1584,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) rc = -EINVAL; if (sk->sk_state != TCP_ESTABLISHED) break; - if (x25->accptapprv) /* must call accptapprv above */ + /* must call accptapprv above */ + if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) break; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 372ac22..6317896 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -273,7 +273,7 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp break; case X25_INTERRUPT_CONFIRMATION: - x25->intflag = 0; + clear_bit(X25_INTERRUPT_FLAG, &x25->flags); break; case X25_INTERRUPT: diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c index 52351a2..d00649f 100644 --- a/net/x25/x25_out.c +++ b/net/x25/x25_out.c @@ -148,8 +148,9 @@ void x25_kick(struct sock *sk) /* * Transmit interrupt data. */ - if (!x25->intflag && skb_peek(&x25->interrupt_out_queue) != NULL) { - x25->intflag = 1; + if (skb_peek(&x25->interrupt_out_queue) != NULL && + !test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) { + skb = skb_dequeue(&x25->interrupt_out_queue); x25_transmit_link(skb, x25->neighbour); } diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index 1396572..8e69533 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h @@ -55,7 +55,7 @@ static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr, case AF_INET6: h ^= __xfrm6_daddr_saddr_hash(daddr, saddr); break; - }; + } return (h ^ (h >> 16)) & hmask; } @@ -102,7 +102,7 @@ static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short h = __xfrm6_daddr_saddr_hash(daddr, saddr); break; - }; + } h ^= (h >> 16); return h & hmask; } @@ -119,7 +119,7 @@ static inline unsigned int __addr_hash(xfrm_address_t *daddr, xfrm_address_t *sa case AF_INET6: h = __xfrm6_daddr_saddr_hash(daddr, saddr); break; - }; + } h ^= (h >> 16); return h & hmask; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 31f4ba4..d965a2b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1805,7 +1805,7 @@ restart: /* EREMOTE tells the caller to generate * a one-shot blackhole route. */ dst_release(dst); - xfrm_pols_put(pols, num_pols); + xfrm_pols_put(pols, drop_pols); XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); return -EREMOTE; } @@ -2209,7 +2209,6 @@ EXPORT_SYMBOL(xfrm_dst_ifdown); static void xfrm_link_failure(struct sk_buff *skb) { /* Impossible. Such dst must be popped before reaches point of failure. */ - return; } static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index a267fbd..ba59983 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1783,7 +1783,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, } else { // reset the timers here? - printk("Dont know what to do with soft policy expire\n"); + WARN(1, "Dont know what to do with soft policy expire\n"); } km_policy_expired(xp, p->dir, up->hard, current->pid); @@ -1883,7 +1883,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; bad_policy: - printk("BAD policy passed\n"); + WARN(1, "BAD policy passed\n"); free_state: kfree(x); nomem: @@ -2385,8 +2385,9 @@ static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c) case XFRM_MSG_FLUSHSA: return xfrm_notify_sa_flush(c); default: - printk("xfrm_user: Unknown SA event %d\n", c->event); - break; + printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", + c->event); + break; } return 0; @@ -2676,7 +2677,8 @@ static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_ev case XFRM_MSG_POLEXPIRE: return xfrm_exp_policy_notify(xp, dir, c); default: - printk("xfrm_user: Unknown Policy event %d\n", c->event); + printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", + c->event); } return 0; |