diff options
author | Pawit Pornkitprasan <p.pawit@gmail.com> | 2013-04-29 17:41:09 +0700 |
---|---|---|
committer | Pawit Pornkitprasan <p.pawit@gmail.com> | 2013-04-29 17:41:45 +0700 |
commit | 8966a0af43e954331e92bb53b9c09111d01117b8 (patch) | |
tree | 1090f15bcb4b9c8d2a0bad9d90222728f184ccd6 /mm | |
parent | b429ec3fd774ebb511a76377caebbbafe5e1ae9f (diff) | |
parent | b47833937231eebab2fe46502426ea8158fae8d9 (diff) | |
download | kernel_samsung_aries-8966a0af43e954331e92bb53b9c09111d01117b8.zip kernel_samsung_aries-8966a0af43e954331e92bb53b9c09111d01117b8.tar.gz kernel_samsung_aries-8966a0af43e954331e92bb53b9c09111d01117b8.tar.bz2 |
Merge 3.0.75
Change-Id: Iefad0afd76773f4a9556775cd0fa8dcacb135cdf
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 20 | ||||
-rw-r--r-- | mm/memory.c | 47 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 15 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 2 |
5 files changed, 74 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 037f077..14420dd 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2006,8 +2006,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf) /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { - struct hstate *h = &default_hstate; - return h->nr_huge_pages * pages_per_huge_page(h); + struct hstate *h; + unsigned long nr_total_pages = 0; + + for_each_hstate(h) + nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); + return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) @@ -2796,7 +2800,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } - if (absent || + /* + * We need call hugetlb_fault for both hugepages under migration + * (in which case hugetlb_fault waits for the migration,) and + * hwpoisoned hugepages (in which case we need to prevent the + * caller from accessing to them.) In order to do this, we use + * here is_swap_pte instead of is_hugetlb_entry_migration and + * is_hugetlb_entry_hwpoisoned. This is because it simply covers + * both cases, and because we can't follow correct pages + * directly from any kind of swap entries. + */ + if (absent || is_swap_pte(huge_ptep_get(pte)) || ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret; diff --git a/mm/memory.c b/mm/memory.c index 4da0f8a..ac8d3a1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2317,6 +2317,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); +/** + * vm_iomap_memory - remap memory to userspace + * @vma: user vma to map to + * @start: start of area + * @len: size of area + * + * This is a simplified io_remap_pfn_range() for common driver use. The + * driver just needs to give us the physical memory range to be mapped, + * we'll figure out the rest from the vma information. + * + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get + * whatever write-combining details or similar. + */ +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ + unsigned long vm_len, pfn, pages; + + /* Check that the physical memory area passed in looks valid */ + if (start + len < start) + return -EINVAL; + /* + * You *really* shouldn't map things that aren't page-aligned, + * but we've historically allowed it because IO memory might + * just have smaller alignment. + */ + len += start & ~PAGE_MASK; + pfn = start >> PAGE_SHIFT; + pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; + if (pfn + pages < pfn) + return -EINVAL; + + /* We start the mapping 'vm_pgoff' pages into the area */ + if (vma->vm_pgoff > pages) + return -EINVAL; + pfn += vma->vm_pgoff; + pages -= vma->vm_pgoff; + + /* Can we fit all of the mapping? */ + vm_len = vma->vm_end - vma->vm_start; + if (vm_len >> PAGE_SHIFT > pages) + return -EINVAL; + + /* Ok, let it rip */ + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e0a3e51..a739dd1 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -453,19 +453,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages) zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; - if (need_zonelists_rebuild) - build_all_zonelists(zone); - else - zone_pcp_update(zone); + if (onlined_pages) { + node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); + if (need_zonelists_rebuild) + build_all_zonelists(zone); + else + zone_pcp_update(zone); + } mutex_unlock(&zonelists_mutex); init_per_zone_wmark_min(); - if (onlined_pages) { + if (onlined_pages) kswapd_run(zone_to_nid(zone)); - node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); - } vm_total_pages = nr_free_pagecache_pages(); @@ -1581,7 +1581,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) if (mm) { /* Check the cache first. */ /* (Cache hit rate is typically around 35%.) */ - vma = mm->mmap_cache; + vma = ACCESS_ONCE(mm->mmap_cache); if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { struct rb_node * rb_node; @@ -808,7 +808,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) struct vm_area_struct *vma; /* check the cache first */ - vma = mm->mmap_cache; + vma = ACCESS_ONCE(mm->mmap_cache); if (vma && vma->vm_start <= addr && vma->vm_end > addr) return vma; |