Skip to content

Commit 1a10a44

Browse files
Matthew Wilcox (Oracle)akpm00
Matthew Wilcox (Oracle)
authored andcommitted
sparc64: implement the new page table range API
Add set_ptes(), update_mmu_cache_range(), flush_dcache_folio() and flush_icache_pages(). Convert the PG_dcache_dirty flag from being per-page to per-folio. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Acked-by: Mike Rapoport (IBM) <[email protected]> Cc: "David S. Miller" <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 665f640 commit 1a10a44

File tree

5 files changed

+119
-67
lines changed

5 files changed

+119
-67
lines changed

arch/sparc/include/asm/cacheflush_64.h

+12-6
Original file line numberDiff line numberDiff line change
@@ -35,20 +35,26 @@ void flush_icache_range(unsigned long start, unsigned long end);
3535
void __flush_icache_page(unsigned long);
3636

3737
void __flush_dcache_page(void *addr, int flush_icache);
38-
void flush_dcache_page_impl(struct page *page);
38+
void flush_dcache_folio_impl(struct folio *folio);
3939
#ifdef CONFIG_SMP
40-
void smp_flush_dcache_page_impl(struct page *page, int cpu);
41-
void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
40+
void smp_flush_dcache_folio_impl(struct folio *folio, int cpu);
41+
void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio);
4242
#else
43-
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
44-
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
43+
#define smp_flush_dcache_folio_impl(folio, cpu) flush_dcache_folio_impl(folio)
44+
#define flush_dcache_folio_all(mm, folio) flush_dcache_folio_impl(folio)
4545
#endif
4646

4747
void __flush_dcache_range(unsigned long start, unsigned long end);
4848
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
49-
void flush_dcache_page(struct page *page);
49+
void flush_dcache_folio(struct folio *folio);
50+
#define flush_dcache_folio flush_dcache_folio
51+
static inline void flush_dcache_page(struct page *page)
52+
{
53+
flush_dcache_folio(page_folio(page));
54+
}
5055

5156
#define flush_icache_page(vma, pg) do { } while(0)
57+
#define flush_icache_pages(vma, pg, nr) do { } while(0)
5258

5359
void flush_ptrace_access(struct vm_area_struct *, struct page *,
5460
unsigned long uaddr, void *kaddr,

arch/sparc/include/asm/pgtable_64.h

+22-7
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ extern unsigned long VMALLOC_END;
8686
#define vmemmap ((struct page *)VMEMMAP_BASE)
8787

8888
#include <linux/sched.h>
89+
#include <asm/tlbflush.h>
8990

9091
bool kern_addr_valid(unsigned long addr);
9192

@@ -927,8 +928,21 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
927928
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
928929
}
929930

930-
#define set_pte_at(mm,addr,ptep,pte) \
931-
__set_pte_at((mm), (addr), (ptep), (pte), 0)
931+
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
932+
pte_t *ptep, pte_t pte, unsigned int nr)
933+
{
934+
arch_enter_lazy_mmu_mode();
935+
for (;;) {
936+
__set_pte_at(mm, addr, ptep, pte, 0);
937+
if (--nr == 0)
938+
break;
939+
ptep++;
940+
pte_val(pte) += PAGE_SIZE;
941+
addr += PAGE_SIZE;
942+
}
943+
arch_leave_lazy_mmu_mode();
944+
}
945+
#define set_ptes set_ptes
932946

933947
#define pte_clear(mm,addr,ptep) \
934948
set_pte_at((mm), (addr), (ptep), __pte(0UL))
@@ -947,8 +961,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
947961
\
948962
if (pfn_valid(this_pfn) && \
949963
(((old_addr) ^ (new_addr)) & (1 << 13))) \
950-
flush_dcache_page_all(current->mm, \
951-
pfn_to_page(this_pfn)); \
964+
flush_dcache_folio_all(current->mm, \
965+
page_folio(pfn_to_page(this_pfn))); \
952966
} \
953967
newpte; \
954968
})
@@ -963,7 +977,10 @@ struct seq_file;
963977
void mmu_info(struct seq_file *);
964978

965979
struct vm_area_struct;
966-
void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
980+
void update_mmu_cache_range(struct vm_fault *, struct vm_area_struct *,
981+
unsigned long addr, pte_t *ptep, unsigned int nr);
982+
#define update_mmu_cache(vma, addr, ptep) \
983+
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
967984
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
968985
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
969986
pmd_t *pmd);
@@ -1121,8 +1138,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
11211138
}
11221139
#define pte_access_permitted pte_access_permitted
11231140

1124-
#include <asm/tlbflush.h>
1125-
11261141
/* We provide our own get_unmapped_area to cope with VA holes and
11271142
* SHM area cache aliasing for userland.
11281143
*/

arch/sparc/kernel/smp_64.c

+36-20
Original file line numberDiff line numberDiff line change
@@ -921,20 +921,26 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
921921
#endif
922922
extern unsigned long xcall_flush_dcache_page_spitfire;
923923

924-
static inline void __local_flush_dcache_page(struct page *page)
924+
static inline void __local_flush_dcache_folio(struct folio *folio)
925925
{
926+
unsigned int i, nr = folio_nr_pages(folio);
927+
926928
#ifdef DCACHE_ALIASING_POSSIBLE
927-
__flush_dcache_page(page_address(page),
929+
for (i = 0; i < nr; i++)
930+
__flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
928931
((tlb_type == spitfire) &&
929-
page_mapping_file(page) != NULL));
932+
folio_flush_mapping(folio) != NULL));
930933
#else
931-
if (page_mapping_file(page) != NULL &&
932-
tlb_type == spitfire)
933-
__flush_icache_page(__pa(page_address(page)));
934+
if (folio_flush_mapping(folio) != NULL &&
935+
tlb_type == spitfire) {
936+
unsigned long pfn = folio_pfn(folio)
937+
for (i = 0; i < nr; i++)
938+
__flush_icache_page((pfn + i) * PAGE_SIZE);
939+
}
934940
#endif
935941
}
936942

937-
void smp_flush_dcache_page_impl(struct page *page, int cpu)
943+
void smp_flush_dcache_folio_impl(struct folio *folio, int cpu)
938944
{
939945
int this_cpu;
940946

@@ -948,33 +954,38 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
948954
this_cpu = get_cpu();
949955

950956
if (cpu == this_cpu) {
951-
__local_flush_dcache_page(page);
957+
__local_flush_dcache_folio(folio);
952958
} else if (cpu_online(cpu)) {
953-
void *pg_addr = page_address(page);
959+
void *pg_addr = folio_address(folio);
954960
u64 data0 = 0;
955961

956962
if (tlb_type == spitfire) {
957963
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
958-
if (page_mapping_file(page) != NULL)
964+
if (folio_flush_mapping(folio) != NULL)
959965
data0 |= ((u64)1 << 32);
960966
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
961967
#ifdef DCACHE_ALIASING_POSSIBLE
962968
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
963969
#endif
964970
}
965971
if (data0) {
966-
xcall_deliver(data0, __pa(pg_addr),
967-
(u64) pg_addr, cpumask_of(cpu));
972+
unsigned int i, nr = folio_nr_pages(folio);
973+
974+
for (i = 0; i < nr; i++) {
975+
xcall_deliver(data0, __pa(pg_addr),
976+
(u64) pg_addr, cpumask_of(cpu));
968977
#ifdef CONFIG_DEBUG_DCFLUSH
969-
atomic_inc(&dcpage_flushes_xcall);
978+
atomic_inc(&dcpage_flushes_xcall);
970979
#endif
980+
pg_addr += PAGE_SIZE;
981+
}
971982
}
972983
}
973984

974985
put_cpu();
975986
}
976987

977-
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
988+
void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio)
978989
{
979990
void *pg_addr;
980991
u64 data0;
@@ -988,24 +999,29 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
988999
atomic_inc(&dcpage_flushes);
9891000
#endif
9901001
data0 = 0;
991-
pg_addr = page_address(page);
1002+
pg_addr = folio_address(folio);
9921003
if (tlb_type == spitfire) {
9931004
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
994-
if (page_mapping_file(page) != NULL)
1005+
if (folio_flush_mapping(folio) != NULL)
9951006
data0 |= ((u64)1 << 32);
9961007
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
9971008
#ifdef DCACHE_ALIASING_POSSIBLE
9981009
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
9991010
#endif
10001011
}
10011012
if (data0) {
1002-
xcall_deliver(data0, __pa(pg_addr),
1003-
(u64) pg_addr, cpu_online_mask);
1013+
unsigned int i, nr = folio_nr_pages(folio);
1014+
1015+
for (i = 0; i < nr; i++) {
1016+
xcall_deliver(data0, __pa(pg_addr),
1017+
(u64) pg_addr, cpu_online_mask);
10041018
#ifdef CONFIG_DEBUG_DCFLUSH
1005-
atomic_inc(&dcpage_flushes_xcall);
1019+
atomic_inc(&dcpage_flushes_xcall);
10061020
#endif
1021+
pg_addr += PAGE_SIZE;
1022+
}
10071023
}
1008-
__local_flush_dcache_page(page);
1024+
__local_flush_dcache_folio(folio);
10091025

10101026
preempt_enable();
10111027
}

0 commit comments

Comments
 (0)