aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 21:09:35 +0200
committerKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 21:09:35 +0200
commit123dc2ce092ee26ac57c2b3962aeb8ed9b367fb6 (patch)
treea2b8a9260352029ee735b96f62b90435f4cb7bfb
parent70268992c575948147c0e9c71d1e1ee0517a16ed (diff)
parent29715fe22f6e7ea5d84c2872fd5dd2d407ed5083 (diff)
downloadkernel_samsung_crespo-123dc2ce092ee26ac57c2b3962aeb8ed9b367fb6.zip
kernel_samsung_crespo-123dc2ce092ee26ac57c2b3962aeb8ed9b367fb6.tar.gz
kernel_samsung_crespo-123dc2ce092ee26ac57c2b3962aeb8ed9b367fb6.tar.bz2
Merge commit '29715fe' into HEAD
-rw-r--r--drivers/net/r8169.c64
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--mm/mempolicy.c125
3 files changed, 125 insertions, 66 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0335ab0..f7a56f4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -58,8 +58,12 @@
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-#define TX_BUFFS_AVAIL(tp) \
- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+#define TX_SLOTS_AVAIL(tp) \
+ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
+
+/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+#define TX_FRAGS_READY_FOR(tp,nr_frags) \
+ (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -299,6 +303,8 @@ enum rtl_registers {
Config0 = 0x51,
Config1 = 0x52,
Config2 = 0x53,
+#define PME_SIGNAL (1 << 5) /* 8168c and later */
+
Config3 = 0x54,
Config4 = 0x55,
Config5 = 0x56,
@@ -448,7 +454,6 @@ enum rtl_register_content {
/* Config1 register p.24 */
LEDS1 = (1 << 7),
LEDS0 = (1 << 6),
- MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
Speed_down = (1 << 4),
MEMMAP = (1 << 3),
IOMAP = (1 << 2),
@@ -456,6 +461,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -1223,7 +1229,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
u16 reg;
u8 mask;
} cfg[] = {
- { WAKE_ANY, Config1, PMEnable },
{ WAKE_PHY, Config3, LinkUp },
{ WAKE_MAGIC, Config3, MagicPacket },
{ WAKE_UCAST, Config5, UWF },
@@ -1231,16 +1236,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{ WAKE_MCAST, Config5, MWF },
{ WAKE_ANY, Config5, LanWake }
};
+ u8 options;
RTL_W8(Cfg9346, Cfg9346_Unlock);
for (i = 0; i < ARRAY_SIZE(cfg); i++) {
- u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(cfg[i].reg, options);
}
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
+ options = RTL_R8(Config1) & ~PMEnable;
+ if (wolopts)
+ options |= PMEnable;
+ RTL_W8(Config1, options);
+ break;
+ default:
+ options = RTL_R8(Config2) & ~PME_SIGNAL;
+ if (wolopts)
+ options |= PME_SIGNAL;
+ RTL_W8(Config2, options);
+ break;
+ }
+
RTL_W8(Cfg9346, Cfg9346_Lock);
}
@@ -3003,22 +3024,24 @@ static const struct rtl_cfg_info {
};
/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
const struct rtl_cfg_info *cfg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
unsigned msi = 0;
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(pdev)) {
- dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
} else {
cfg2 |= MSIEnable;
msi = RTL_FEATURE_MSI;
}
}
- RTL_W8(Config2, cfg2);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
return msi;
}
@@ -3588,7 +3611,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ tp->features |= rtl_try_msi(tp, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
@@ -3683,6 +3706,7 @@ out:
return rc;
err_out_msi_4:
+ netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
iounmap(ioaddr);
err_out_free_res_3:
@@ -3708,6 +3732,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&tp->task);
+ netif_napi_del(&tp->napi);
+
unregister_netdev(dev);
rtl_release_firmware(tp);
@@ -4905,7 +4931,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts[2];
int frags;
- if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -4953,10 +4979,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
RTL_W8(TxPoll, NPQ);
- if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
netif_stop_queue(dev);
- smp_rmb();
- if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+ smp_mb();
+ if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
netif_wake_queue(dev);
}
@@ -5056,9 +5082,9 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
- smp_wmb();
+ smp_mb();
if (netif_queue_stopped(dev) &&
- (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+ TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
/*
@@ -5067,7 +5093,6 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
* of start_xmit activity is detected (if it is not detected,
* it is slow enough). -- FR
*/
- smp_rmb();
if (tp->cur_tx != dirty_tx)
RTL_W8(TxPoll, NPQ);
}
@@ -5569,6 +5594,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct device *d = &pdev->dev;
+
+ pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
@@ -5597,6 +5625,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
+
+ pm_runtime_put_noidle(d);
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 7978eec..3e8f2f7 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -188,7 +188,7 @@ struct sp_node {
struct shared_policy {
struct rb_root root;
- spinlock_t lock;
+ struct mutex mutex;
};
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3f3cc56..6a569cc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -606,6 +606,42 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
return first;
}
+/*
+ * Apply policy to a single VMA
+ * This must be called with the mmap_sem held for writing.
+ */
+static int vma_replace_policy(struct vm_area_struct *vma,
+ struct mempolicy *pol)
+{
+ int err;
+ struct mempolicy *old;
+ struct mempolicy *new;
+
+ pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff,
+ vma->vm_ops, vma->vm_file,
+ vma->vm_ops ? vma->vm_ops->set_policy : NULL);
+
+ new = mpol_dup(pol);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ if (vma->vm_ops && vma->vm_ops->set_policy) {
+ err = vma->vm_ops->set_policy(vma, new);
+ if (err)
+ goto err_out;
+ }
+
+ old = vma->vm_policy;
+ vma->vm_policy = new; /* protected by mmap_sem */
+ mpol_put(old);
+
+ return 0;
+ err_out:
+ mpol_put(new);
+ return err;
+}
+
/* Step 2: apply policy to a range and do splits. */
static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long end, struct mempolicy *new_pol)
@@ -645,23 +681,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (err)
goto out;
}
-
- /*
- * Apply policy to a single VMA. The reference counting of
- * policy for vma_policy linkages has already been handled by
- * vma_merge and split_vma as necessary. If this is a shared
- * policy then ->set_policy will increment the reference count
- * for an sp node.
- */
- pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff,
- vma->vm_ops, vma->vm_file,
- vma->vm_ops ? vma->vm_ops->set_policy : NULL);
- if (vma->vm_ops && vma->vm_ops->set_policy) {
- err = vma->vm_ops->set_policy(vma, new_pol);
- if (err)
- goto out;
- }
+ err = vma_replace_policy(vma, new_pol);
+ if (err)
+ goto out;
}
out:
@@ -1999,7 +2021,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
*/
/* lookup first element intersecting start-end */
-/* Caller holds sp->lock */
+/* Caller holds sp->mutex */
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
@@ -2063,36 +2085,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
if (!sp->root.rb_node)
return NULL;
- spin_lock(&sp->lock);
+ mutex_lock(&sp->mutex);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
- spin_unlock(&sp->lock);
+ mutex_unlock(&sp->mutex);
return pol;
}
+static void sp_free(struct sp_node *n)
+{
+ mpol_put(n->policy);
+ kmem_cache_free(sn_cache, n);
+}
+
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
rb_erase(&n->nd, &sp->root);
- mpol_put(n->policy);
- kmem_cache_free(sn_cache, n);
+ sp_free(n);
}
static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
struct mempolicy *pol)
{
- struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+ struct sp_node *n;
+ struct mempolicy *newpol;
+ n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n)
return NULL;
+
+ newpol = mpol_dup(pol);
+ if (IS_ERR(newpol)) {
+ kmem_cache_free(sn_cache, n);
+ return NULL;
+ }
+ newpol->flags |= MPOL_F_SHARED;
+
n->start = start;
n->end = end;
- mpol_get(pol);
- pol->flags |= MPOL_F_SHARED; /* for unref */
- n->policy = pol;
+ n->policy = newpol;
+
return n;
}
@@ -2100,10 +2136,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new)
{
- struct sp_node *n, *new2 = NULL;
+ struct sp_node *n;
+ int ret = 0;
-restart:
- spin_lock(&sp->lock);
+ mutex_lock(&sp->mutex);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
@@ -2116,16 +2152,14 @@ restart:
} else {
/* Old policy spanning whole new range. */
if (n->end > end) {
+ struct sp_node *new2;
+ new2 = sp_alloc(end, n->end, n->policy);
if (!new2) {
- spin_unlock(&sp->lock);
- new2 = sp_alloc(end, n->end, n->policy);
- if (!new2)
- return -ENOMEM;
- goto restart;
+ ret = -ENOMEM;
+ goto out;
}
n->end = start;
sp_insert(sp, new2);
- new2 = NULL;
break;
} else
n->end = start;
@@ -2136,12 +2170,9 @@ restart:
}
if (new)
sp_insert(sp, new);
- spin_unlock(&sp->lock);
- if (new2) {
- mpol_put(new2->policy);
- kmem_cache_free(sn_cache, new2);
- }
- return 0;
+out:
+ mutex_unlock(&sp->mutex);
+ return ret;
}
/**
@@ -2159,7 +2190,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */
- spin_lock_init(&sp->lock);
+ mutex_init(&sp->mutex);
if (mpol) {
struct vm_area_struct pvma;
@@ -2213,7 +2244,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
}
err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
if (err && new)
- kmem_cache_free(sn_cache, new);
+ sp_free(new);
return err;
}
@@ -2225,16 +2256,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
if (!p->root.rb_node)
return;
- spin_lock(&p->lock);
+ mutex_lock(&p->mutex);
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
- rb_erase(&n->nd, &p->root);
- mpol_put(n->policy);
- kmem_cache_free(sn_cache, n);
+ sp_delete(p, n);
}
- spin_unlock(&p->lock);
+ mutex_unlock(&p->mutex);
}
/* assumes fs == KERNEL_DS */