immortalwrt-mt798x/target/linux/generic/backport-5.4/020-13-BACKPORT-mm-swapcache-support-to-handle-the-shadow-e.patch
Tianling Shen 5c1af46f28
kernel: bump to 5.4.241
Removed upstreamed patches:
- generic/backport-5.4/430-v6.3-ubi-Fix-failure-attaching-when-vid_hdr-offset-equals.patch
- mvebu/patches-5.4/008-net-mvneta-make-tx-buffer-array-agnostic.patch

Refreshed all patches.

Signed-off-by: Tianling Shen <cnsztl@immortalwrt.org>
2023-04-23 13:57:30 +08:00

262 lines
8.7 KiB
Diff

From 1b950746b18526adb4534e656651850f73a9d1c1 Mon Sep 17 00:00:00 2001
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Date: Tue, 11 Aug 2020 18:30:47 -0700
Subject: [PATCH] BACKPORT: mm/swapcache: support to handle the shadow entries
Workingset detection for anonymous page will be implemented in the
following patch and it requires to store the shadow entries into the
swapcache. This patch implements an infrastructure to store the shadow
entry in the swapcache.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Link: http://lkml.kernel.org/r/1595490560-15117-5-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 3852f6768ede542ed48b9077bedf482c7ecb6327)
Signed-off-by: Yu Zhao <yuzhao@google.org>
BUG=b:123039911
TEST=Built
Change-Id: I913ad8bfa0bbb744e35a0da4b684cdb5c557f394
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/2951281
Reviewed-by: Yu Zhao <yuzhao@chromium.org>
Commit-Queue: Yu Zhao <yuzhao@chromium.org>
Tested-by: Yu Zhao <yuzhao@chromium.org>
---
include/linux/swap.h | 17 +++++++++----
mm/shmem.c | 2 +-
mm/swap_state.c | 57 +++++++++++++++++++++++++++++++++++++++-----
mm/swapfile.c | 2 ++
mm/vmscan.c | 2 +-
5 files changed, 68 insertions(+), 12 deletions(-)
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -408,10 +408,14 @@ extern struct address_space *swapper_spa
extern unsigned long total_swapcache_pages(void);
extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *page);
-extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
+extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
+ gfp_t gfp, void **shadowp);
extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
-extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
+extern void __delete_from_swap_cache(struct page *page,
+ swp_entry_t entry, void *shadow);
extern void delete_from_swap_cache(struct page *);
+extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t entry,
@@ -566,13 +570,13 @@ static inline int add_to_swap(struct pag
}
static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, void **shadowp)
{
return -1;
}
static inline void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry)
+ swp_entry_t entry, void *shadow)
{
}
@@ -580,6 +584,11 @@ static inline void delete_from_swap_cach
{
}
+static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end)
+{
+}
+
static inline int page_swapcount(struct page *page)
{
return 0;
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1374,7 +1374,7 @@ static int shmem_writepage(struct page *
if (list_empty(&info->swaplist))
list_add(&info->swaplist, &shmem_swaplist);
- if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
+ if (add_to_swap_cache(page, swap, GFP_ATOMIC, NULL) == 0) {
spin_lock_irq(&info->lock);
shmem_recalc_inode(inode);
info->swapped++;
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -112,12 +112,14 @@ void show_swap_cache_info(void)
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
+int add_to_swap_cache(struct page *page, swp_entry_t entry,
+ gfp_t gfp, void **shadowp)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
unsigned long i, nr = compound_nr(page);
+ void *old;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -127,16 +129,25 @@ int add_to_swap_cache(struct page *page,
SetPageSwapCache(page);
do {
+ unsigned long nr_shadows = 0;
+
xas_lock_irq(&xas);
xas_create_range(&xas);
if (xas_error(&xas))
goto unlock;
for (i = 0; i < nr; i++) {
VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+ old = xas_load(&xas);
+ if (xa_is_value(old)) {
+ nr_shadows++;
+ if (shadowp)
+ *shadowp = old;
+ }
set_page_private(page + i, entry.val + i);
xas_store(&xas, page);
xas_next(&xas);
}
+ address_space->nrexceptional -= nr_shadows;
address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr);
@@ -156,7 +167,8 @@ unlock:
* This must be called only on pages that have
* been verified to be in the swap cache.
*/
-void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
+void __delete_from_swap_cache(struct page *page,
+ swp_entry_t entry, void *shadow)
{
struct address_space *address_space = swap_address_space(entry);
int i, nr = hpage_nr_pages(page);
@@ -168,12 +180,14 @@ void __delete_from_swap_cache(struct pag
VM_BUG_ON_PAGE(PageWriteback(page), page);
for (i = 0; i < nr; i++) {
- void *entry = xas_store(&xas, NULL);
+ void *entry = xas_store(&xas, shadow);
VM_BUG_ON_PAGE(entry != page, entry);
set_page_private(page + i, 0);
xas_next(&xas);
}
ClearPageSwapCache(page);
+ if (shadow)
+ address_space->nrexceptional += nr;
address_space->nrpages -= nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
ADD_CACHE_INFO(del_total, nr);
@@ -210,7 +224,7 @@ int add_to_swap(struct page *page)
* Add it to the swap cache.
*/
err = add_to_swap_cache(page, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
+ __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
if (err)
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -248,13 +262,44 @@ void delete_from_swap_cache(struct page
struct address_space *address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(page, entry);
+ __delete_from_swap_cache(page, entry, NULL);
xa_unlock_irq(&address_space->i_pages);
put_swap_page(page, entry);
page_ref_sub(page, hpage_nr_pages(page));
}
+void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end)
+{
+ unsigned long curr = begin;
+ void *old;
+
+ for (;;) {
+ unsigned long nr_shadows = 0;
+ swp_entry_t entry = swp_entry(type, curr);
+ struct address_space *address_space = swap_address_space(entry);
+ XA_STATE(xas, &address_space->i_pages, curr);
+
+ xa_lock_irq(&address_space->i_pages);
+ xas_for_each(&xas, old, end) {
+ if (!xa_is_value(old))
+ continue;
+ xas_store(&xas, NULL);
+ nr_shadows++;
+ }
+ address_space->nrexceptional -= nr_shadows;
+ xa_unlock_irq(&address_space->i_pages);
+
+ /* search the next swapcache until we meet end */
+ curr >>= SWAP_ADDRESS_SPACE_SHIFT;
+ curr++;
+ curr <<= SWAP_ADDRESS_SPACE_SHIFT;
+ if (curr > end)
+ break;
+ }
+}
+
/*
* If we are the only user, then try to free up the swap cache.
*
@@ -420,7 +465,7 @@ struct page *__read_swap_cache_async(swp
__SetPageLocked(new_page);
__SetPageSwapBacked(new_page);
err = add_to_swap_cache(new_page, entry,
- gfp_mask & GFP_RECLAIM_MASK);
+ gfp_mask & GFP_RECLAIM_MASK, NULL);
if (likely(!err)) {
/* Initiate read into locked page */
SetPageWorkingset(new_page);
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -716,6 +716,7 @@ static void add_to_avail_list(struct swa
static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
unsigned int nr_entries)
{
+ unsigned long begin = offset;
unsigned long end = offset + nr_entries - 1;
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
@@ -741,6 +742,7 @@ static void swap_range_free(struct swap_
swap_slot_free_notify(si->bdev, offset);
offset++;
}
+ clear_shadow_from_swap_cache(si->type, begin, end);
}
static int scan_swap_map_slots(struct swap_info_struct *si,
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -948,7 +948,7 @@ static int __remove_mapping(struct addre
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
mem_cgroup_swapout(page, swap);
- __delete_from_swap_cache(page, swap);
+ __delete_from_swap_cache(page, swap, NULL);
xa_unlock_irqrestore(&mapping->i_pages, flags);
put_swap_page(page, swap);
} else {