net: davinci_cpdma: use dma_addr_t for DMA address
[deliverable/linux.git] / mm / swapfile.c
index 58877312cf6b94b74da00b8913bf8db64d56ab26..2bb30aa3a4123a547bd29e1585a1234644a92152 100644 (file)
@@ -165,8 +165,6 @@ static void discard_swap_cluster(struct swap_info_struct *si,
        int found_extent = 0;
 
        while (nr_pages) {
-               struct list_head *lh;
-
                if (se->start_page <= start_page &&
                    start_page < se->start_page + se->nr_pages) {
                        pgoff_t offset = start_page - se->start_page;
@@ -188,8 +186,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
                                break;
                }
 
-               lh = se->list.next;
-               se = list_entry(lh, struct swap_extent, list);
+               se = list_next_entry(se, list);
        }
 }
 
@@ -903,7 +900,7 @@ int swp_swapcount(swp_entry_t entry)
        VM_BUG_ON(page_private(page) != SWP_CONTINUED);
 
        do {
-               page = list_entry(page->lru.next, struct page, lru);
+               page = list_next_entry(page, lru);
                map = kmap_atomic(page);
                tmp_count = map[offset];
                kunmap_atomic(map);
@@ -929,6 +926,9 @@ int reuse_swap_page(struct page *page)
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        if (unlikely(PageKsm(page)))
                return 0;
+       /* The page is part of THP and cannot be reused */
+       if (PageTransCompound(page))
+               return 0;
        count = page_mapcount(page);
        if (count <= 1 && PageSwapCache(page)) {
                count += page_swapcount(page);
@@ -1111,19 +1111,9 @@ unsigned int count_swap_pages(int type, int free)
 }
 #endif /* CONFIG_HIBERNATION */
 
-static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
 {
-#ifdef CONFIG_MEM_SOFT_DIRTY
-       /*
-        * When pte keeps soft dirty bit the pte generated
-        * from swap entry does not has it, still it's same
-        * pte from logical point of view.
-        */
-       pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
-       return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
-#else
-       return pte_same(pte, swp_pte);
-#endif
+       return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
 }
 
 /*
@@ -1145,14 +1135,15 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        if (unlikely(!page))
                return -ENOMEM;
 
-       if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) {
+       if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
+                               &memcg, false)) {
                ret = -ENOMEM;
                goto out_nolock;
        }
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
-       if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
-               mem_cgroup_cancel_charge(page, memcg);
+       if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
+               mem_cgroup_cancel_charge(page, memcg, false);
                ret = 0;
                goto out;
        }
@@ -1163,11 +1154,11 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        set_pte_at(vma->vm_mm, addr, pte,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        if (page == swapcache) {
-               page_add_anon_rmap(page, vma, addr);
-               mem_cgroup_commit_charge(page, memcg, true);
+               page_add_anon_rmap(page, vma, addr, false);
+               mem_cgroup_commit_charge(page, memcg, true, false);
        } else { /* ksm created a completely new copy */
-               page_add_new_anon_rmap(page, vma, addr);
-               mem_cgroup_commit_charge(page, memcg, false);
+               page_add_new_anon_rmap(page, vma, addr, false);
+               mem_cgroup_commit_charge(page, memcg, false, false);
                lru_cache_add_active_or_unevictable(page, vma);
        }
        swap_free(entry);
@@ -1209,7 +1200,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                 * swapoff spends a _lot_ of time in this loop!
                 * Test inline before going to call unuse_pte.
                 */
-               if (unlikely(maybe_same_pte(*pte, swp_pte))) {
+               if (unlikely(pte_same_as_swp(*pte, swp_pte))) {
                        pte_unmap(pte);
                        ret = unuse_pte(vma, pmd, addr, entry, page);
                        if (ret)
@@ -1633,14 +1624,11 @@ static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
        se = start_se;
 
        for ( ; ; ) {
-               struct list_head *lh;
-
                if (se->start_page <= offset &&
                                offset < (se->start_page + se->nr_pages)) {
                        return se->start_block + (offset - se->start_page);
                }
-               lh = se->list.next;
-               se = list_entry(lh, struct swap_extent, list);
+               se = list_next_entry(se, list);
                sis->curr_swap_extent = se;
                BUG_ON(se == start_se);         /* It *must* be present */
        }
@@ -1664,7 +1652,7 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
        while (!list_empty(&sis->first_swap_extent.list)) {
                struct swap_extent *se;
 
-               se = list_entry(sis->first_swap_extent.list.next,
+               se = list_first_entry(&sis->first_swap_extent.list,
                                struct swap_extent, list);
                list_del(&se->list);
                kfree(se);
@@ -2959,11 +2947,10 @@ static void free_swap_count_continuations(struct swap_info_struct *si)
                struct page *head;
                head = vmalloc_to_page(si->swap_map + offset);
                if (page_private(head)) {
-                       struct list_head *this, *next;
-                       list_for_each_safe(this, next, &head->lru) {
-                               struct page *page;
-                               page = list_entry(this, struct page, lru);
-                               list_del(this);
+                       struct page *page, *next;
+
+                       list_for_each_entry_safe(page, next, &head->lru, lru) {
+                               list_del(&page->lru);
                                __free_page(page);
                        }
                }
This page took 0.028277 seconds and 5 git commands to generate.