aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpswbuild <pswbuild@oppo.com>2023-03-06 17:38:41 +0800
committerMichael Bestas <mkbestas@lineageos.org>2023-05-23 12:58:25 +0300
commit4255c98bb03e087ff9281a3959b71b78e9efd30e (patch)
treec0ba4e36894aa03123a4c2d0276994b7299573e9
parente17e8f270d54024bd46e036ab56e5f9ffefad5d1 (diff)
Synchronize codes for OnePlus 8 IN2013_11_F.17/OnePlus 8Pro IN2023_11_F.17 and OnePlus 8T KB2003_11_F.17
Change-Id: I3ada00d21a01779fd6df7d1c8f21d71af03ae72c
-rw-r--r--include/linux/damon.h522
-rw-r--r--include/trace/events/damon.h82
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/Makefile1
-rw-r--r--mm/damon/Kconfig20
-rw-r--r--mm/damon/Makefile5
-rw-r--r--mm/damon/core.c1177
-rw-r--r--mm/damon/ops-common.c135
-rw-r--r--mm/damon/ops-common.h16
-rw-r--r--mm/damon/paddr.c317
-rw-r--r--mm/damon/reclaim.c452
11 files changed, 0 insertions, 2729 deletions
diff --git a/include/linux/damon.h b/include/linux/damon.h
deleted file mode 100644
index 96af9586f00d..000000000000
--- a/include/linux/damon.h
+++ /dev/null
@@ -1,522 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * DAMON api
- *
- * Author: SeongJae Park <sjpark@amazon.de>
- */
-
-#ifndef _DAMON_H_
-#define _DAMON_H_
-
-#include <linux/mutex.h>
-#include <linux/time64.h>
-#include <linux/types.h>
-#include <linux/random.h>
-
-/* Minimal region size. Every damon_region is aligned by this. */
-#define DAMON_MIN_REGION PAGE_SIZE
-/* Max priority score for DAMON-based operation schemes */
-#define DAMOS_MAX_SCORE (99)
-
-/* Get a random number in [l, r) */
-static inline unsigned long damon_rand(unsigned long l, unsigned long r)
-{
- return l + prandom_u32_max(r - l);
-}
-
-/**
- * struct damon_addr_range - Represents an address region of [@start, @end).
- * @start: Start address of the region (inclusive).
- * @end: End address of the region (exclusive).
- */
-struct damon_addr_range {
- unsigned long start;
- unsigned long end;
-};
-
-/**
- * struct damon_region - Represents a monitoring target region.
- * @ar: The address range of the region.
- * @sampling_addr: Address of the sample for the next access check.
- * @nr_accesses: Access frequency of this region.
- * @list: List head for siblings.
- * @age: Age of this region.
- *
- * @age is initially zero, increased for each aggregation interval, and reset
- * to zero again if the access frequency is significantly changed. If two
- * regions are merged into a new region, both @nr_accesses and @age of the new
- * region are set as region size-weighted average of those of the two regions.
- */
-struct damon_region {
- struct damon_addr_range ar;
- unsigned long sampling_addr;
- unsigned int nr_accesses;
- struct list_head list;
-
- unsigned int age;
-/* private: Internal value for age calculation. */
- unsigned int last_nr_accesses;
-};
-
-/**
- * struct damon_target - Represents a monitoring target.
- * @pid: The PID of the virtual address space to monitor.
- * @nr_regions: Number of monitoring target regions of this target.
- * @regions_list: Head of the monitoring target regions of this target.
- * @list: List head for siblings.
- *
- * Each monitoring context could have multiple targets. For example, a context
- * for virtual memory address spaces could have multiple target processes. The
- * @pid should be set for appropriate &struct damon_operations including the
- * virtual address spaces monitoring operations.
- */
-struct damon_target {
- struct pid *pid;
- unsigned int nr_regions;
- struct list_head regions_list;
- struct list_head list;
-};
-
-/**
- * enum damos_action - Represents an action of a Data Access Monitoring-based
- * Operation Scheme.
- *
- * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
- * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
- * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
- * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
- * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
- * @DAMOS_STAT: Do nothing but count the stat.
- * @NR_DAMOS_ACTIONS: Total number of DAMOS actions
- */
-enum damos_action {
- DAMOS_WILLNEED,
- DAMOS_COLD,
- DAMOS_PAGEOUT,
- DAMOS_HUGEPAGE,
- DAMOS_NOHUGEPAGE,
- DAMOS_STAT, /* Do nothing but only record the stat */
- NR_DAMOS_ACTIONS,
-};
-
-/**
- * struct damos_quota - Controls the aggressiveness of the given scheme.
- * @ms: Maximum milliseconds that the scheme can use.
- * @sz: Maximum bytes of memory that the action can be applied.
- * @reset_interval: Charge reset interval in milliseconds.
- *
- * @weight_sz: Weight of the region's size for prioritization.
- * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
- * @weight_age: Weight of the region's age for prioritization.
- *
- * To avoid consuming too much CPU time or IO resources for applying the
- * &struct damos->action to large memory, DAMON allows users to set time and/or
- * size quotas. The quotas can be set by writing non-zero values to &ms and
- * &sz, respectively. If the time quota is set, DAMON tries to use only up to
- * &ms milliseconds within &reset_interval for applying the action. If the
- * size quota is set, DAMON tries to apply the action only up to &sz bytes
- * within &reset_interval.
- *
- * Internally, the time quota is transformed to a size quota using estimated
- * throughput of the scheme's action. DAMON then compares it against &sz and
- * uses smaller one as the effective quota.
- *
- * For selecting regions within the quota, DAMON prioritizes current scheme's
- * target memory regions using the &struct damon_operations->get_scheme_score.
- * You could customize the prioritization logic by setting &weight_sz,
- * &weight_nr_accesses, and &weight_age, because monitoring operations are
- * encouraged to respect those.
- */
-struct damos_quota {
- unsigned long ms;
- unsigned long sz;
- unsigned long reset_interval;
-
- unsigned int weight_sz;
- unsigned int weight_nr_accesses;
- unsigned int weight_age;
-
-/* private: */
- /* For throughput estimation */
- unsigned long total_charged_sz;
- unsigned long total_charged_ns;
-
- unsigned long esz; /* Effective size quota in bytes */
-
- /* For charging the quota */
- unsigned long charged_sz;
- unsigned long charged_from;
- struct damon_target *charge_target_from;
- unsigned long charge_addr_from;
-
- /* For prioritization */
- unsigned long histogram[DAMOS_MAX_SCORE + 1];
- unsigned int min_score;
-};
-
-/**
- * enum damos_wmark_metric - Represents the watermark metric.
- *
- * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
- * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
- * @DAMOS_WMARK_OPLUS: Wake up short-term reclamation when Free memory drops.
- * @DAMOS_WMARK_SLEEP: Sleep for call from upper layer.
- */
-enum damos_wmark_metric {
- DAMOS_WMARK_NONE,
- DAMOS_WMARK_FREE_MEM_RATE,
- DAMOS_WMARK_OPLUS,
- DAMOS_WMARK_SLEEP,
- NR_DAMOS_WMARK_METRICS,
-};
-
-/**
- * struct damos_watermarks - Controls when a given scheme should be activated.
- * @metric: Metric for the watermarks.
- * @interval: Watermarks check time interval in microseconds.
- * @high: High watermark.
- * @mid: Middle watermark.
- * @low: Low watermark.
- *
- * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
- * means DAMON does monitoring and applying the action of the scheme to
- * appropriate memory regions. Else, DAMON checks &metric of the system for at
- * least every &interval microseconds and works as below.
- *
- * If &metric is higher than &high, the scheme is inactivated. If &metric is
- * between &mid and &low, the scheme is activated. If &metric is lower than
- * &low, the scheme is inactivated.
- */
-struct damos_watermarks {
- enum damos_wmark_metric metric;
- unsigned long interval;
- unsigned long high;
- unsigned long mid;
- unsigned long low;
-
-/* private: */
- bool activated;
-};
-
-/**
- * struct damos_stat - Statistics on a given scheme.
- * @nr_tried: Total number of regions that the scheme is tried to be applied.
- * @sz_tried: Total size of regions that the scheme is tried to be applied.
- * @nr_applied: Total number of regions that the scheme is applied.
- * @sz_applied: Total size of regions that the scheme is applied.
- * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
- */
-struct damos_stat {
- unsigned long nr_tried;
- unsigned long sz_tried;
- unsigned long nr_applied;
- unsigned long sz_applied;
- unsigned long qt_exceeds;
-};
-
-/**
- * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
- * @min_sz_region: Minimum size of target regions.
- * @max_sz_region: Maximum size of target regions.
- * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
- * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
- * @min_age_region: Minimum age of target regions.
- * @max_age_region: Maximum age of target regions.
- * @action: &damo_action to be applied to the target regions.
- * @quota: Control the aggressiveness of this scheme.
- * @wmarks: Watermarks for automated (in)activation of this scheme.
- * @stat: Statistics of this scheme.
- * @list: List head for siblings.
- *
- * For each aggregation interval, DAMON finds regions which fit in the
- * condition (&min_sz_region, &max_sz_region, &min_nr_accesses,
- * &max_nr_accesses, &min_age_region, &max_age_region) and applies &action to
- * those. To avoid consuming too much CPU time or IO resources for the
- * &action, &quota is used.
- *
- * To do the work only when needed, schemes can be activated for specific
- * system situations using &wmarks. If all schemes that registered to the
- * monitoring context are inactive, DAMON stops monitoring either, and just
- * repeatedly checks the watermarks.
- *
- * If all schemes that registered to a &struct damon_ctx are inactive, DAMON
- * stops monitoring and just repeatedly checks the watermarks.
- *
- * After applying the &action to each region, &stat_count and &stat_sz is
- * updated to reflect the number of regions and total size of regions that the
- * &action is applied.
- */
-struct damos {
- unsigned long min_sz_region;
- unsigned long max_sz_region;
- unsigned int min_nr_accesses;
- unsigned int max_nr_accesses;
- unsigned int min_age_region;
- unsigned int max_age_region;
- enum damos_action action;
- struct damos_quota quota;
- struct damos_watermarks wmarks;
- struct damos_stat stat;
- struct list_head list;
-};
-
-/**
- * enum damon_ops_id - Identifier for each monitoring operations implementation
- *
- * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
- * @DAMON_OPS_PADDR: Monitoring operations for the physical address space
- */
-enum damon_ops_id {
- DAMON_OPS_VADDR,
- DAMON_OPS_PADDR,
- NR_DAMON_OPS,
-};
-
-struct damon_ctx;
-
-/**
- * struct damon_operations - Monitoring operations for given use cases.
- *
- * @id: Identifier of this operations set.
- * @init: Initialize operations-related data structures.
- * @update: Update operations-related data structures.
- * @prepare_access_checks: Prepare next access check of target regions.
- * @check_accesses: Check the accesses to target regions.
- * @reset_aggregated: Reset aggregated accesses monitoring results.
- * @get_scheme_score: Get the score of a region for a scheme.
- * @apply_scheme: Apply a DAMON-based operation scheme.
- * @target_valid: Determine if the target is valid.
- * @cleanup: Clean up the context.
- *
- * DAMON can be extended for various address spaces and usages. For this,
- * users should register the low level operations for their target address
- * space and usecase via the &damon_ctx.ops. Then, the monitoring thread
- * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
- * the monitoring, @update after each &damon_ctx.ops_update_interval, and
- * @check_accesses, @target_valid and @prepare_access_checks after each
- * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each
- * &damon_ctx.aggr_interval.
- *
- * Each &struct damon_operations instance having valid @id can be registered
- * via damon_register_ops() and selected by damon_select_ops() later.
- * @init should initialize operations-related data structures. For example,
- * this could be used to construct proper monitoring target regions and link
- * those to @damon_ctx.adaptive_targets.
- * @update should update the operations-related data structures. For example,
- * this could be used to update monitoring target regions for current status.
- * @prepare_access_checks should manipulate the monitoring regions to be
- * prepared for the next access check.
- * @check_accesses should check the accesses to each region that made after the
- * last preparation and update the number of observed accesses of each region.
- * It should also return max number of observed accesses that made as a result
- * of its update. The value will be used for regions adjustment threshold.
- * @reset_aggregated should reset the access monitoring results that aggregated
- * by @check_accesses.
- * @get_scheme_score should return the priority score of a region for a scheme
- * as an integer in [0, &DAMOS_MAX_SCORE].
- * @apply_scheme is called from @kdamond when a region for user provided
- * DAMON-based operation scheme is found. It should apply the scheme's action
- * to the region and return bytes of the region that the action is successfully
- * applied.
- * @target_valid should check whether the target is still valid for the
- * monitoring.
- * @cleanup is called from @kdamond just before its termination.
- */
-struct damon_operations {
- enum damon_ops_id id;
- void (*init)(struct damon_ctx *context);
- void (*update)(struct damon_ctx *context);
- void (*prepare_access_checks)(struct damon_ctx *context);
- unsigned int (*check_accesses)(struct damon_ctx *context);
- void (*reset_aggregated)(struct damon_ctx *context);
- int (*get_scheme_score)(struct damon_ctx *context,
- struct damon_target *t, struct damon_region *r,
- struct damos *scheme);
- unsigned long (*apply_scheme)(struct damon_ctx *context,
- struct damon_target *t, struct damon_region *r,
- struct damos *scheme);
- bool (*target_valid)(void *target);
- void (*cleanup)(struct damon_ctx *context);
-};
-
-/**
- * struct damon_callback - Monitoring events notification callbacks.
- *
- * @before_start: Called before starting the monitoring.
- * @after_sampling: Called after each sampling.
- * @after_aggregation: Called after each aggregation.
- * @before_terminate: Called before terminating the monitoring.
- * @private: User private data.
- *
- * The monitoring thread (&damon_ctx.kdamond) calls @before_start and
- * @before_terminate just before starting and finishing the monitoring,
- * respectively. Therefore, those are good places for installing and cleaning
- * @private.
- *
- * The monitoring thread calls @after_sampling and @after_aggregation for each
- * of the sampling intervals and aggregation intervals, respectively.
- * Therefore, users can safely access the monitoring results without additional
- * protection. For the reason, users are recommended to use these callback for
- * the accesses to the results.
- *
- * If any callback returns non-zero, monitoring stops.
- */
-struct damon_callback {
- void *private;
-
- int (*before_start)(struct damon_ctx *context);
- int (*after_sampling)(struct damon_ctx *context);
- int (*after_aggregation)(struct damon_ctx *context);
- void (*before_terminate)(struct damon_ctx *context);
-};
-
-/**
- * struct damon_ctx - Represents a context for each monitoring. This is the
- * main interface that allows users to set the attributes and get the results
- * of the monitoring.
- *
- * @sample_interval: The time between access samplings.
- * @aggr_interval: The time between monitor results aggregations.
- * @ops_update_interval: The time between monitoring operations updates.
- *
- * For each @sample_interval, DAMON checks whether each region is accessed or
- * not. It aggregates and keeps the access information (number of accesses to
- * each region) for @aggr_interval time. DAMON also checks whether the target
- * memory regions need update (e.g., by ``mmap()`` calls from the application,
- * in case of virtual memory monitoring) and applies the changes for each
- * @ops_update_interval. All time intervals are in micro-seconds.
- * Please refer to &struct damon_operations and &struct damon_callback for more
- * detail.
- *
- * @kdamond: Kernel thread who does the monitoring.
- * @kdamond_stop: Notifies whether kdamond should stop.
- * @kdamond_lock: Mutex for the synchronizations with @kdamond.
- *
- * For each monitoring context, one kernel thread for the monitoring is
- * created. The pointer to the thread is stored in @kdamond.
- *
- * Once started, the monitoring thread runs until explicitly required to be
- * terminated or every monitoring target is invalid. The validity of the
- * targets is checked via the &damon_operations.target_valid of @ops. The
- * termination can also be explicitly requested by writing non-zero to
- * @kdamond_stop. The thread sets @kdamond to NULL when it terminates.
- * Therefore, users can know whether the monitoring is ongoing or terminated by
- * reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from
- * outside of the monitoring thread must be protected by @kdamond_lock.
- *
- * Note that the monitoring thread protects only @kdamond and @kdamond_stop via
- * @kdamond_lock. Accesses to other fields must be protected by themselves.
- *
- * @ops: Set of monitoring operations for given use cases.
- * @callback: Set of callbacks for monitoring events notifications.
- *
- * @min_nr_regions: The minimum number of adaptive monitoring regions.
- * @max_nr_regions: The maximum number of adaptive monitoring regions.
- * @adaptive_targets: Head of monitoring targets (&damon_target) list.
- * @schemes: Head of schemes (&damos) list.
- */
-struct damon_ctx {
- unsigned long sample_interval;
- unsigned long aggr_interval;
- unsigned long ops_update_interval;
-
-/* private: internal use only */
- unsigned long punish_interval;
- struct timespec64 last_aggregation;
- struct timespec64 last_ops_update;
-
-/* public: */
- struct task_struct *kdamond;
- struct mutex kdamond_lock;
-
- struct damon_operations ops;
- struct damon_callback callback;
-
- unsigned long min_nr_regions;
- unsigned long max_nr_regions;
- struct list_head adaptive_targets;
- struct list_head schemes;
-};
-
-static inline struct damon_region *damon_next_region(struct damon_region *r)
-{
- return container_of(r->list.next, struct damon_region, list);
-}
-
-static inline struct damon_region *damon_prev_region(struct damon_region *r)
-{
- return container_of(r->list.prev, struct damon_region, list);
-}
-
-static inline struct damon_region *damon_last_region(struct damon_target *t)
-{
- return list_last_entry(&t->regions_list, struct damon_region, list);
-}
-
-#define damon_for_each_region(r, t) \
- list_for_each_entry(r, &t->regions_list, list)
-
-#define damon_for_each_region_safe(r, next, t) \
- list_for_each_entry_safe(r, next, &t->regions_list, list)
-
-#define damon_for_each_target(t, ctx) \
- list_for_each_entry(t, &(ctx)->adaptive_targets, list)
-
-#define damon_for_each_target_safe(t, next, ctx) \
- list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
-
-#define damon_for_each_scheme(s, ctx) \
- list_for_each_entry(s, &(ctx)->schemes, list)
-
-#define damon_for_each_scheme_safe(s, next, ctx) \
- list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
-
-#ifdef CONFIG_DAMON
-
-struct damon_region *damon_new_region(unsigned long start, unsigned long end);
-/*
- * Add a region between two other regions
- */
-static inline void damon_insert_region(struct damon_region *r,
- struct damon_region *prev, struct damon_region *next,
- struct damon_target *t)
-{
- __list_add(&r->list, &prev->list, &next->list);
- t->nr_regions++;
-}
-void damon_add_region(struct damon_region *r, struct damon_target *t);
-void damon_destroy_region(struct damon_region *r, struct damon_target *t);
-
-struct damos *damon_new_scheme(
- unsigned long min_sz_region, unsigned long max_sz_region,
- unsigned int min_nr_accesses, unsigned int max_nr_accesses,
- unsigned int min_age_region, unsigned int max_age_region,
- enum damos_action action, struct damos_quota *quota,
- struct damos_watermarks *wmarks);
-void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
-void damon_destroy_scheme(struct damos *s);
-
-struct damon_target *damon_new_target(void);
-void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
-bool damon_targets_empty(struct damon_ctx *ctx);
-void damon_free_target(struct damon_target *t);
-void damon_destroy_target(struct damon_target *t);
-unsigned int damon_nr_regions(struct damon_target *t);
-
-struct damon_ctx *damon_new_ctx(void);
-void damon_destroy_ctx(struct damon_ctx *ctx);
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long ops_upd_int,
- unsigned long min_nr_reg, unsigned long max_nr_reg);
-int damon_set_schemes(struct damon_ctx *ctx,
- struct damos **schemes, ssize_t nr_schemes);
-int damon_nr_running_ctxs(void);
-int damon_register_ops(struct damon_operations *ops);
-int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
-
-int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
-int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
-
-#endif /* CONFIG_DAMON */
-
-#endif /* _DAMON_H */
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
deleted file mode 100644
index 0bc02e571267..000000000000
--- a/include/trace/events/damon.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM damon
-
-#if !defined(_TRACE_DAMON_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_DAMON_H
-
-#include <linux/damon.h>
-#include <linux/types.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(damon_aggregated,
-
- TP_PROTO(struct damon_target *t, unsigned int target_id,
- struct damon_region *r, unsigned int nr_regions),
- TP_ARGS(t, target_id, r, nr_regions),
-
- TP_STRUCT__entry(
- __field(unsigned long, target_id)
- __field(unsigned int, nr_regions)
- __field(unsigned long, start)
- __field(unsigned long, end)
- __field(unsigned int, nr_accesses)
- __field(unsigned int, age)
- ),
-
- TP_fast_assign(
- __entry->target_id = target_id;
- __entry->nr_regions = nr_regions;
- __entry->start = r->ar.start;
- __entry->end = r->ar.end;
- __entry->nr_accesses = r->nr_accesses;
- __entry->age = r->age;
- ),
-
- TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u %u",
- __entry->target_id, __entry->nr_regions,
- __entry->start, __entry->end,
- __entry->nr_accesses, __entry->age)
-);
-
-TRACE_EVENT(damon_reclaim_statistics,
-
- TP_PROTO(unsigned long nr_reclaim_tried_regions,
- unsigned long bytes_reclaim_tried_regions,
- unsigned long nr_reclaimed_regions,
- unsigned long bytes_reclaimed_regions,
- unsigned long nr_quota_exceeds),
- TP_ARGS(nr_reclaim_tried_regions,
- bytes_reclaim_tried_regions,
- nr_reclaimed_regions,
- bytes_reclaimed_regions,
- nr_quota_exceeds),
-
- TP_STRUCT__entry(
- __field(unsigned long, nr_reclaim_tried_regions)
- __field(unsigned long, bytes_reclaim_tried_regions)
- __field(unsigned long, nr_reclaimed_regions)
- __field(unsigned long, bytes_reclaimed_regions)
- __field(unsigned long, nr_quota_exceeds)
- ),
-
- TP_fast_assign(
- __entry->nr_reclaim_tried_regions = nr_reclaim_tried_regions;
- __entry->bytes_reclaim_tried_regions = bytes_reclaim_tried_regions;
- __entry->nr_reclaimed_regions = nr_reclaimed_regions;
- __entry->bytes_reclaimed_regions = bytes_reclaimed_regions;
- __entry->nr_quota_exceeds = nr_quota_exceeds;
- ),
-
- TP_printk("nr_reclaim_tried_regions=%lu bytes_reclaim_tried_regions=%lu nr_reclaimed_regions=%lu, bytes_reclaimed_regions=%lu, nr_quota_exceeds=%lu",
- __entry->nr_reclaim_tried_regions,
- __entry->bytes_reclaim_tried_regions,
- __entry->nr_reclaimed_regions,
- __entry->bytes_reclaimed_regions,
- __entry->nr_quota_exceeds)
-);
-
-#endif /* _TRACE_DAMON_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/mm/Kconfig b/mm/Kconfig
index b4ed9d5381a2..a96d610c2f85 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -867,8 +867,6 @@ config GUP_BENCHMARK
config ARCH_HAS_PTE_SPECIAL
bool
-source "mm/damon/Kconfig"
-
# multi-gen LRU {
config LRU_GEN
bool "Multi-Gen LRU"
diff --git a/mm/Makefile b/mm/Makefile
index e84f1c3ab108..c8be64543758 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -109,7 +109,6 @@ obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
obj-$(CONFIG_HMM) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
obj-$(CONFIG_PROCESS_RECLAIM) += process_reclaim.o
-obj-$(CONFIG_DAMON) += damon/
#ifdef OPLUS_FEATURE_HEALTHINFO
obj-y += healthinfo/
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
deleted file mode 100644
index b70a4b753f5f..000000000000
--- a/mm/damon/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-menu "Data Access Monitoring"
-
-config DAMON
- bool "DAMON: Data Access Monitoring Framework"
- default y
-
-config DAMON_PADDR
- bool "Data access monitoring operations for the physical address space"
- depends on DAMON && MMU
- select PAGE_IDLE_FLAG
- default y
-
-config DAMON_RECLAIM
- bool "Build DAMON-based reclaim (DAMON_RECLAIM)"
- depends on DAMON_PADDR
- default y
-
-endmenu
diff --git a/mm/damon/Makefile b/mm/damon/Makefile
deleted file mode 100644
index a38d1accc286..000000000000
--- a/mm/damon/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_DAMON) := core.o
-obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o
-obj-$(CONFIG_DAMON_RECLAIM) += reclaim.o
diff --git a/mm/damon/core.c b/mm/damon/core.c
deleted file mode 100644
index 7f28d9c433c6..000000000000
--- a/mm/damon/core.c
+++ /dev/null
@@ -1,1177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Data Access Monitor
- *
- * Author: SeongJae Park <sjpark@amazon.de>
- */
-
-#define pr_fmt(fmt) "damon: " fmt
-
-#include <linux/damon.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/damon.h>
-
-static DEFINE_MUTEX(damon_lock);
-static int nr_running_ctxs;
-static bool running_exclusive_ctxs;
-
-static DEFINE_MUTEX(damon_ops_lock);
-static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
-
-static unsigned long last_metric;
-static unsigned long active_interval;
-static unsigned long active_cnt;
-
-extern int wmarks_metric;
-extern unsigned long nr_reclaim_time;
-extern unsigned long nr_reclaim_page;
-extern unsigned long nr_damon_region;
-
-/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
-static bool damon_registered_ops_id(enum damon_ops_id id)
-{
- struct damon_operations empty_ops = {};
-
- if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
- return false;
- return true;
-}
-
-/**
- * damon_register_ops() - Register a monitoring operations set to DAMON.
- * @ops: monitoring operations set to register.
- *
- * This function registers a monitoring operations set of valid &struct
- * damon_operations->id so that others can find and use them later.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int damon_register_ops(struct damon_operations *ops)
-{
- int err = 0;
-
- if (ops->id >= NR_DAMON_OPS)
- return -EINVAL;
- mutex_lock(&damon_ops_lock);
- /* Fail for already registered ops */
- if (damon_registered_ops_id(ops->id)) {
- err = -EINVAL;
- goto out;
- }
- damon_registered_ops[ops->id] = *ops;
-out:
- mutex_unlock(&damon_ops_lock);
- return err;
-}
-
-/**
- * damon_select_ops() - Select a monitoring operations to use with the context.
- * @ctx: monitoring context to use the operations.
- * @id: id of the registered monitoring operations to select.
- *
- * This function finds registered monitoring operations set of @id and make
- * @ctx to use it.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
-{
- int err = 0;
-
- if (id >= NR_DAMON_OPS)
- return -EINVAL;
-
- mutex_lock(&damon_ops_lock);
- if (!damon_registered_ops_id(id))
- err = -EINVAL;
- else
- ctx->ops = damon_registered_ops[id];
- mutex_unlock(&damon_ops_lock);
- return err;
-}
-
-/*
- * Construct a damon_region struct
- *
- * Returns the pointer to the new struct if success, or NULL otherwise
- */
-struct damon_region *damon_new_region(unsigned long start, unsigned long end)
-{
- struct damon_region *region;
-
- region = kmalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return NULL;
-
- region->ar.start = start;
- region->ar.end = end;
- region->nr_accesses = 0;
- INIT_LIST_HEAD(&region->list);
-
- region->age = 0;
- region->last_nr_accesses = 0;
-
- return region;
-}
-
-void damon_add_region(struct damon_region *r, struct damon_target *t)
-{
- list_add_tail(&r->list, &t->regions_list);
- t->nr_regions++;
-}
-
-static void damon_del_region(struct damon_region *r, struct damon_target *t)
-{
- list_del(&r->list);
- t->nr_regions--;
-}
-
-static void damon_free_region(struct damon_region *r)
-{
- kfree(r);
-}
-
-void damon_destroy_region(struct damon_region *r, struct damon_target *t)
-{
- damon_del_region(r, t);
- damon_free_region(r);
-}
-
-struct damos *damon_new_scheme(
- unsigned long min_sz_region, unsigned long max_sz_region,
- unsigned int min_nr_accesses, unsigned int max_nr_accesses,
- unsigned int min_age_region, unsigned int max_age_region,
- enum damos_action action, struct damos_quota *quota,
- struct damos_watermarks *wmarks)
-{
- struct damos *scheme;
-
- scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
- if (!scheme)
- return NULL;
- scheme->min_sz_region = min_sz_region;
- scheme->max_sz_region = max_sz_region;
- scheme->min_nr_accesses = min_nr_accesses;
- scheme->max_nr_accesses = max_nr_accesses;
- scheme->min_age_region = min_age_region;
- scheme->max_age_region = max_age_region;
- scheme->action = action;
- scheme->stat = (struct damos_stat){};
- INIT_LIST_HEAD(&scheme->list);
-
- scheme->quota.ms = quota->ms;
- scheme->quota.sz = quota->sz;
- scheme->quota.reset_interval = quota->reset_interval;
- scheme->quota.weight_sz = quota->weight_sz;
- scheme->quota.weight_nr_accesses = quota->weight_nr_accesses;
- scheme->quota.weight_age = quota->weight_age;
- scheme->quota.total_charged_sz = 0;
- scheme->quota.total_charged_ns = 0;
- scheme->quota.esz = 0;
- scheme->quota.charged_sz = 0;
- scheme->quota.charged_from = 0;
- scheme->quota.charge_target_from = NULL;
- scheme->quota.charge_addr_from = 0;
-
- scheme->wmarks.metric = wmarks->metric;
- scheme->wmarks.interval = wmarks->interval;
- scheme->wmarks.high = wmarks->high;
- scheme->wmarks.mid = wmarks->mid;
- scheme->wmarks.low = wmarks->low;
- scheme->wmarks.activated = true;
-
- return scheme;
-}
-
-void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
-{
- list_add_tail(&s->list, &ctx->schemes);
-}
-
-static void damon_del_scheme(struct damos *s)
-{
- list_del(&s->list);
-}
-
-static void damon_free_scheme(struct damos *s)
-{
- kfree(s);
-}
-
-void damon_destroy_scheme(struct damos *s)
-{
- damon_del_scheme(s);
- damon_free_scheme(s);
-}
-
-/*
- * Construct a damon_target struct
- *
- * Returns the pointer to the new struct if success, or NULL otherwise
- */
-struct damon_target *damon_new_target(void)
-{
- struct damon_target *t;
-
- t = kmalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- return NULL;
-
- t->pid = NULL;
- t->nr_regions = 0;
- INIT_LIST_HEAD(&t->regions_list);
-
- return t;
-}
-
-void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
-{
- list_add_tail(&t->list, &ctx->adaptive_targets);
-}
-
-bool damon_targets_empty(struct damon_ctx *ctx)
-{
- return list_empty(&ctx->adaptive_targets);
-}
-
-static void damon_del_target(struct damon_target *t)
-{
- list_del(&t->list);
-}
-
-void damon_free_target(struct damon_target *t)
-{
- struct damon_region *r, *next;
-
- damon_for_each_region_safe(r, next, t)
- damon_free_region(r);
- kfree(t);
-}
-
-void damon_destroy_target(struct damon_target *t)
-{
- damon_del_target(t);
- damon_free_target(t);
-}
-
-unsigned int damon_nr_regions(struct damon_target *t)
-{
- return t->nr_regions;
-}
-
-struct damon_ctx *damon_new_ctx(void)
-{
- struct damon_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return NULL;
-
- ctx->sample_interval = 5 * 1000;
- ctx->aggr_interval = 100 * 1000;
- ctx->ops_update_interval = 60 * 1000 * 1000;
-
- ktime_get_coarse_ts64(&ctx->last_aggregation);
- ctx->last_ops_update = ctx->last_aggregation;
-
- mutex_init(&ctx->kdamond_lock);
-
- ctx->min_nr_regions = 10;
- ctx->max_nr_regions = 1000;
-
- INIT_LIST_HEAD(&ctx->adaptive_targets);
- INIT_LIST_HEAD(&ctx->schemes);
-
- return ctx;
-}
-
-static void damon_destroy_targets(struct damon_ctx *ctx)
-{
- struct damon_target *t, *next_t;
-
- if (ctx->ops.cleanup) {
- ctx->ops.cleanup(ctx);
- return;
- }
-
- damon_for_each_target_safe(t, next_t, ctx)
- damon_destroy_target(t);
-}
-
-void damon_destroy_ctx(struct damon_ctx *ctx)
-{
- struct damos *s, *next_s;
-
- damon_destroy_targets(ctx);
-
- damon_for_each_scheme_safe(s, next_s, ctx)
- damon_destroy_scheme(s);
-
- kfree(ctx);
-}
-
-/**
- * damon_set_attrs() - Set attributes for the monitoring.
- * @ctx: monitoring context
- * @sample_int: time interval between samplings
- * @aggr_int: time interval between aggregations
- * @ops_upd_int: time interval between monitoring operations updates
- * @min_nr_reg: minimal number of regions
- * @max_nr_reg: maximum number of regions
- *
- * This function should not be called while the kdamond is running.
- * Every time interval is in micro-seconds.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long ops_upd_int,
- unsigned long min_nr_reg, unsigned long max_nr_reg)
-{
- if (min_nr_reg < 3)
- return -EINVAL;
-
- if (min_nr_reg > max_nr_reg)
- return -EINVAL;
-
- ctx->sample_interval = sample_int;
- ctx->aggr_interval = aggr_int;
- ctx->ops_update_interval = ops_upd_int;
- ctx->min_nr_regions = min_nr_reg;
- ctx->max_nr_regions = max_nr_reg;
-
- return 0;
-}
-
-/**
- * damon_set_schemes() - Set data access monitoring based operation schemes.
- * @ctx: monitoring context
- * @schemes: array of the schemes
- * @nr_schemes: number of entries in @schemes
- *
- * This function should not be called while the kdamond of the context is
- * running.
- *
- * Return: 0 if success, or negative error code otherwise.
- */
-int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
- ssize_t nr_schemes)
-{
- struct damos *s, *next;
- ssize_t i;
-
- damon_for_each_scheme_safe(s, next, ctx)
- damon_destroy_scheme(s);
- for (i = 0; i < nr_schemes; i++)
- damon_add_scheme(ctx, schemes[i]);
- return 0;
-}
-
-/**
- * damon_nr_running_ctxs() - Return number of currently running contexts.
- */
-int damon_nr_running_ctxs(void)
-{
- int nr_ctxs;
-
- mutex_lock(&damon_lock);
- nr_ctxs = nr_running_ctxs;
- mutex_unlock(&damon_lock);
-
- return nr_ctxs;
-}
-
-/* Returns the size upper limit for each monitoring region */
-static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
-{
- struct damon_target *t;
- struct damon_region *r;
- unsigned long sz = 0;
-
- damon_for_each_target(t, ctx) {
- damon_for_each_region(r, t)
- sz += r->ar.end - r->ar.start;
- }
-
- if (ctx->min_nr_regions)
- sz /= ctx->min_nr_regions;
- if (sz < DAMON_MIN_REGION)
- sz = DAMON_MIN_REGION;
-
- return sz;
-}
-
-static int kdamond_fn(void *data);
-
-/*
- * __damon_start() - Starts monitoring with given context.
- * @ctx: monitoring context
- *
- * This function should be called while damon_lock is hold.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int __damon_start(struct damon_ctx *ctx)
-{
- int err = -EBUSY;
-
- mutex_lock(&ctx->kdamond_lock);
- if (!ctx->kdamond) {
- err = 0;
- ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
- nr_running_ctxs);
- if (IS_ERR(ctx->kdamond)) {
- err = PTR_ERR(ctx->kdamond);
- ctx->kdamond = NULL;
- }
- }
- mutex_unlock(&ctx->kdamond_lock);
-
- return err;
-}
-
-/**
- * damon_start() - Starts the monitorings for a given group of contexts.
- * @ctxs: an array of the pointers for contexts to start monitoring
- * @nr_ctxs: size of @ctxs
- * @exclusive: exclusiveness of this contexts group
- * This function starts a group of monitoring threads for a group of monitoring
- * contexts. One thread per each context is created and run in parallel. The
- * caller should handle synchronization between the threads by itself. If
- * @exclusive is true and a group of threads that created by other
- * 'damon_start()' call is currently running, this function does nothing but
- * returns -EBUSY.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
-{
- int i;
- int err = 0;
-
- mutex_lock(&damon_lock);
- if ((exclusive && nr_running_ctxs) ||
- (!exclusive && running_exclusive_ctxs)) {
- mutex_unlock(&damon_lock);
- return -EBUSY;
- }
-
- for (i = 0; i < nr_ctxs; i++) {
- err = __damon_start(ctxs[i]);
- if (err)
- break;
- nr_running_ctxs++;
- }
- if (exclusive && nr_running_ctxs)
- running_exclusive_ctxs = true;
- mutex_unlock(&damon_lock);
-
- return err;
-}
-
-/*
- * __damon_stop() - Stops monitoring of a given context.
- * @ctx: monitoring context
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int __damon_stop(struct damon_ctx *ctx)
-{
- struct task_struct *tsk;
-
- mutex_lock(&ctx->kdamond_lock);
- tsk = ctx->kdamond;
- if (tsk) {
- get_task_struct(tsk);
- mutex_unlock(&ctx->kdamond_lock);
- kthread_stop(tsk);
- put_task_struct(tsk);
- return 0;
- }
- mutex_unlock(&ctx->kdamond_lock);
-
- return -EPERM;
-}
-
-/**
- * damon_stop() - Stops the monitorings for a given group of contexts.
- * @ctxs: an array of the pointers for contexts to stop monitoring
- * @nr_ctxs: size of @ctxs
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
-{
- int i, err = 0;
-
- for (i = 0; i < nr_ctxs; i++) {
- /* nr_running_ctxs is decremented in kdamond_fn */
- err = __damon_stop(ctxs[i]);
- if (err)
- break;
- }
-
- return err;
-}
-
-/*
- * damon_check_reset_time_interval() - Check if a time interval is elapsed.
- * @baseline: the time to check whether the interval has elapsed since
- * @interval: the time interval (microseconds)
- *
- * See whether the given time interval has passed since the given baseline
- * time. If so, it also updates the baseline to current time for next check.
- *
- * Return: true if the time interval has passed, or false otherwise.
- */
-static bool damon_check_reset_time_interval(struct timespec64 *baseline,
- unsigned long interval)
-{
- struct timespec64 now;
-
- ktime_get_coarse_ts64(&now);
- if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
- interval * 1000)
- return false;
- *baseline = now;
- return true;
-}
-
-/*
- * Check whether it is time to flush the aggregated information
- */
-static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
-{
- return damon_check_reset_time_interval(&ctx->last_aggregation,
- ctx->aggr_interval);
-}
-
-/*
- * Reset the aggregated monitoring results ('nr_accesses' of each region).
- */
-static void kdamond_reset_aggregated(struct damon_ctx *c)
-{
- struct damon_target *t;
- unsigned int ti = 0; /* target's index */
-
- damon_for_each_target(t, c) {
- struct damon_region *r;
-
- damon_for_each_region(r, t) {
- trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
- r->last_nr_accesses = r->nr_accesses;
- r->nr_accesses = 0;
- }
- ti++;
- }
-}
-
-static void damon_split_region_at(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- unsigned long sz_r);
-
-static bool __damos_valid_target(struct damon_region *r, struct damos *s)
-{
- unsigned long sz;
-
- sz = r->ar.end - r->ar.start;
- return s->min_sz_region <= sz && sz <= s->max_sz_region &&
- s->min_nr_accesses <= r->nr_accesses &&
- r->nr_accesses <= s->max_nr_accesses &&
- s->min_age_region <= r->age && r->age <= s->max_age_region;
-}
-
-static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
- struct damon_region *r, struct damos *s)
-{
- bool ret = __damos_valid_target(r, s);
-
- if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
- return ret;
-
- return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
-}
-
-static void damon_do_apply_schemes(struct damon_ctx *c,
- struct damon_target *t,
- struct damon_region *r)
-{
- struct damos *s;
-
- damon_for_each_scheme(s, c) {
- struct damos_quota *quota = &s->quota;
- unsigned long sz = r->ar.end - r->ar.start;
- struct timespec64 begin, end;
- unsigned long sz_applied = 0;
-
- if (!s->wmarks.activated)
- continue;
-
- /* Check the quota */
- if (quota->esz && quota->charged_sz >= quota->esz)
- continue;
-
- /* Skip previously charged regions */
- if (quota->charge_target_from) {
- if (t != quota->charge_target_from)
- continue;
- if (r == damon_last_region(t)) {
- quota->charge_target_from = NULL;
- quota->charge_addr_from = 0;
- continue;
- }
- if (quota->charge_addr_from &&
- r->ar.end <= quota->charge_addr_from)
- continue;
-
- if (quota->charge_addr_from && r->ar.start <
- quota->charge_addr_from) {
- sz = ALIGN_DOWN(quota->charge_addr_from -
- r->ar.start, DAMON_MIN_REGION);
- if (!sz) {
- if (r->ar.end - r->ar.start <=
- DAMON_MIN_REGION)
- continue;
- sz = DAMON_MIN_REGION;
- }
- damon_split_region_at(c, t, r, sz);
- r = damon_next_region(r);
- sz = r->ar.end - r->ar.start;
- }
- quota->charge_target_from = NULL;
- quota->charge_addr_from = 0;
- }
-
- if (!damos_valid_target(c, t, r, s))
- continue;
-
- /* Apply the scheme */
- if (c->ops.apply_scheme) {
- if (quota->esz &&
- quota->charged_sz + sz > quota->esz) {
- sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
- DAMON_MIN_REGION);
- if (!sz)
- goto update_stat;
- damon_split_region_at(c, t, r, sz);
- }
- ktime_get_coarse_ts64(&begin);
- sz_applied = c->ops.apply_scheme(c, t, r, s);
- ktime_get_coarse_ts64(&end);
- quota->total_charged_ns += timespec64_to_ns(&end) -
- timespec64_to_ns(&begin);
- quota->charged_sz += sz;
- if (quota->esz && quota->charged_sz >= quota->esz) {
- quota->charge_target_from = t;
- quota->charge_addr_from = r->ar.end + 1;
- }
- }
- if (s->action != DAMOS_STAT)
- r->age = 0;
-
-update_stat:
- s->stat.nr_tried++;
- s->stat.sz_tried += sz;
- if (sz_applied)
- s->stat.nr_applied++;
- s->stat.sz_applied += sz_applied;
- }
-}
-
-/* Shouldn't be called if quota->ms and quota->sz are zero */
-static void damos_set_effective_quota(struct damos_quota *quota)
-{
- unsigned long throughput;
- unsigned long esz;
-
- if (!quota->ms) {
- quota->esz = quota->sz;
- return;
- }
-
- if (quota->total_charged_ns)
- throughput = quota->total_charged_sz * 1000000 /
- quota->total_charged_ns;
- else
- throughput = PAGE_SIZE * 1024;
- esz = throughput * quota->ms;
-
- if (quota->sz && quota->sz < esz)
- esz = quota->sz;
- quota->esz = esz;
-}
-
-static void kdamond_apply_schemes(struct damon_ctx *c)
-{
- struct damon_target *t;
- struct damon_region *r, *next_r;
- struct damos *s;
-
- damon_for_each_scheme(s, c) {
- struct damos_quota *quota = &s->quota;
- unsigned long cumulated_sz;
- unsigned int score, max_score = 0;
-
- if (!s->wmarks.activated)
- continue;
-
- if (!quota->ms && !quota->sz)
- continue;
-
- /* New charge window starts */
- if (time_after_eq(jiffies, quota->charged_from +
- msecs_to_jiffies(
- quota->reset_interval))) {
- if (quota->esz && quota->charged_sz >= quota->esz)
- s->stat.qt_exceeds++;
- quota->total_charged_sz += quota->charged_sz;
- quota->charged_from = jiffies;
- quota->charged_sz = 0;
- damos_set_effective_quota(quota);
- }
-
- if (!c->ops.get_scheme_score)
- continue;
-
- /* Fill up the score histogram */
- memset(quota->histogram, 0, sizeof(quota->histogram));
- damon_for_each_target(t, c) {
- damon_for_each_region(r, t) {
- if (!__damos_valid_target(r, s))
- continue;
- score = c->ops.get_scheme_score(
- c, t, r, s);
- quota->histogram[score] +=
- r->ar.end - r->ar.start;
- if (score > max_score)
- max_score = score;
- }
- }
-
- /* Set the min score limit */
- for (cumulated_sz = 0, score = max_score; ; score--) {
- cumulated_sz += quota->histogram[score];
- if (cumulated_sz >= quota->esz || !score)
- break;
- }
- quota->min_score = score;
- }
-
- damon_for_each_target(t, c) {
- damon_for_each_region_safe(r, next_r, t)
- damon_do_apply_schemes(c, t, r);
- }
-}
-
-static inline unsigned long sz_damon_region(struct damon_region *r)
-{
- return r->ar.end - r->ar.start;
-}
-
-/*
- * Merge two adjacent regions into one region
- */
-static void damon_merge_two_regions(struct damon_target *t,
- struct damon_region *l, struct damon_region *r)
-{
- unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
-
- l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
- (sz_l + sz_r);
- l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
- l->ar.end = r->ar.end;
- damon_destroy_region(r, t);
-}
-
-/*
- * Merge adjacent regions having similar access frequencies
- *
- * t target affected by this merge operation
- * thres '->nr_accesses' diff threshold for the merge
- * sz_limit size upper limit of each region
- */
-static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
- unsigned long sz_limit)
-{
- struct damon_region *r, *prev = NULL, *next;
-
- damon_for_each_region_safe(r, next, t) {
- if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
- r->age = 0;
- else
- r->age++;
-
- if (prev && prev->ar.end == r->ar.start &&
- abs(prev->nr_accesses - r->nr_accesses) <= thres &&
- sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
- damon_merge_two_regions(t, prev, r);
- else
- prev = r;
- }
-}
-
-/*
- * Merge adjacent regions having similar access frequencies
- *
- * threshold '->nr_accesses' diff threshold for the merge
- * sz_limit size upper limit of each region
- *
- * This function merges monitoring target regions which are adjacent and their
- * access frequencies are similar. This is for minimizing the monitoring
- * overhead under the dynamically changeable access pattern. If a merge was
- * unnecessarily made, later 'kdamond_split_regions()' will revert it.
- */
-static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
- unsigned long sz_limit)
-{
- struct damon_target *t;
-
- damon_for_each_target(t, c)
- damon_merge_regions_of(t, threshold, sz_limit);
-}
-
-/*
- * Split a region in two
- *
- * r the region to be split
- * sz_r size of the first sub-region that will be made
- */
-static void damon_split_region_at(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- unsigned long sz_r)
-{
- struct damon_region *new;
-
- new = damon_new_region(r->ar.start + sz_r, r->ar.end);
- if (!new)
- return;
-
- r->ar.end = new->ar.start;
-
- new->age = r->age;
- new->last_nr_accesses = r->last_nr_accesses;
-
- damon_insert_region(new, r, damon_next_region(r), t);
-}
-
-/* Split every region in the given target into 'nr_subs' regions */
-static void damon_split_regions_of(struct damon_ctx *ctx,
- struct damon_target *t, int nr_subs)
-{
- struct damon_region *r, *next;
- unsigned long sz_region, sz_sub = 0;
- int i;
-
- damon_for_each_region_safe(r, next, t) {
- sz_region = r->ar.end - r->ar.start;
-
- for (i = 0; i < nr_subs - 1 &&
- sz_region > 2 * DAMON_MIN_REGION; i++) {
- /*
- * Randomly select size of left sub-region to be at
- * least 10 percent and at most 90% of original region
- */
- sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
- sz_region / 10, DAMON_MIN_REGION);
- /* Do not allow blank region */
- if (sz_sub == 0 || sz_sub >= sz_region)
- continue;
-
- damon_split_region_at(ctx, t, r, sz_sub);
- sz_region = sz_sub;
- }
- }
-}
-
-/*
- * Split every target region into randomly-sized small regions
- *
- * This function splits every target region into random-sized small regions if
- * current total number of the regions is equal or smaller than half of the
- * user-specified maximum number of regions. This is for maximizing the
- * monitoring accuracy under the dynamically changeable access patterns. If a
- * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
- * it.
- */
-static void kdamond_split_regions(struct damon_ctx *ctx)
-{
- struct damon_target *t;
- unsigned int nr_regions = 0;
- static unsigned int last_nr_regions;
- int nr_subregions = 2;
-
- damon_for_each_target(t, ctx)
- nr_regions += damon_nr_regions(t);
-
- if (nr_regions > ctx->max_nr_regions / 2)
- return;
-
- /* Maybe the middle of the region has different access frequency */
- if (last_nr_regions == nr_regions &&
- nr_regions < ctx->max_nr_regions / 3)
- nr_subregions = 3;
-
- damon_for_each_target(t, ctx)
- damon_split_regions_of(ctx, t, nr_subregions);
-
- last_nr_regions = nr_regions;
- nr_damon_region = nr_regions;
-}
-
-/*
- * Check whether current monitoring should be stopped
- *
- * The monitoring is stopped when either the user requested to stop, or all
- * monitoring targets are invalid.
- *
- * Returns true if need to stop current monitoring.
- */
-static bool kdamond_need_stop(struct damon_ctx *ctx)
-{
- struct damon_target *t;
-
- if (kthread_should_stop())
- return true;
-
- if (!ctx->ops.target_valid)
- return false;
-
- damon_for_each_target(t, ctx) {
- if (ctx->ops.target_valid(t))
- return false;
- }
-
- return true;
-}
-
-static void kdamond_usleep(unsigned long usecs)
-{
- /* See Documentation/timers/timers-howto.rst for the thresholds */
- if (usecs > 20 * USEC_PER_MSEC)
- schedule_timeout_idle(usecs_to_jiffies(usecs));
- else
- usleep_idle_range(usecs, usecs + 1);
-}
-
-/*
- * Returns zero if the scheme is active. Else, returns time to wait for next
- * watermark check in micro-seconds.
- */
-static unsigned long damos_wmark_wait_us(struct damos *scheme)
-{
- unsigned long metric;
- unsigned long diff;
- struct sysinfo i;
-
- switch (wmarks_metric) {
- case DAMOS_WMARK_NONE:
- break;
-
- case DAMOS_WMARK_FREE_MEM_RATE:
- si_meminfo(&i);
- metric = i.freeram * 1000 / i.totalram;
- /* higher than high watermark or lower than low watermark */
- if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
- if (scheme->wmarks.activated)
- pr_debug("deactivate a scheme (%d) for %s wmark\n",
- scheme->action,
- metric > scheme->wmarks.high ?
- "high" : "low");
- scheme->wmarks.activated = false;
- return scheme->wmarks.interval;
- }
-
- /* inactive and higher than middle watermark */
- if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
- !scheme->wmarks.activated)
- return scheme->wmarks.interval;
-
- if (!scheme->wmarks.activated)
- pr_debug("activate a scheme (%d)\n", scheme->action);
- scheme->wmarks.activated = true;
- return 0;
-
- case DAMOS_WMARK_OPLUS:
- si_meminfo(&i);
- metric = i.freeram * 1000 / i.totalram;
- diff = abs(last_metric - metric);
-
- if (scheme->wmarks.activated) {
- if (metric < 10) {
- scheme->wmarks.activated = false;
- nr_reclaim_time = 0;
- nr_reclaim_page = 0;
- printk("[damon_reclaim] sleep. need kswapd.\n");
- return scheme->wmarks.interval;
- }
- nr_reclaim_time = active_interval * active_cnt; // 10ms(0.01s) * cnt;
- }
-
- // printk("[damon_reclaim] last: %d now: %d diff: %d activated: %d nr_reclaim_time: %d pages: %d \n",
- // last_metric, metric, diff, scheme->wmarks.activated, nr_reclaim_time, nr_reclaim_page);
-
- last_metric = (2*last_metric + 8*metric)/10; // smooth filter
- /*
- Total RAM Activate FreeMem
- -------------------------------
- 4 GB 80 MB
- 6 GB 120 MB
- 8 GB 160 MB
- 12 GB 240 MB
- 16 GB 320 MB
- */
- if (diff <= 20 || last_metric < metric) {
-
- if (scheme->wmarks.activated) {
- if (nr_reclaim_time >= 300000 || // time thresold: 5min = 5*60*1000 ms
- nr_reclaim_page >= i.totalram * 20 / 1000 )
- {
- scheme->wmarks.activated = false;
- nr_reclaim_time = 0;
- nr_reclaim_page = 0;
- printk("[damon_reclaim] sleep. need to control quota.\n");
- return scheme->wmarks.interval;
- } else {
- ++active_cnt;
- return 0;
- }
- } else {
- // nothing
- return scheme->wmarks.interval;
- }
- }
- else {
- if (scheme->wmarks.activated) {
- //nothing
- return 0;
- } else {
- active_cnt = 0;
- scheme->wmarks.activated = true;
- nr_reclaim_time = 0;
- nr_reclaim_page = 0;
- printk("[damon_reclaim] active. \n");
- kdamond_usleep(scheme->wmarks.interval);
- return 0;
- }
- }
-
- case DAMOS_WMARK_SLEEP:
- scheme->wmarks.activated = false;
- nr_reclaim_time = 0;
- nr_reclaim_page = 0;
- printk("[damon_reclaim] sleep. call from upper layer.\n");
- return scheme->wmarks.interval;
-
- default:
- break;
- }
- return 0;
-}
-
-/* Returns negative error code if it's not activated but should return */
-static int kdamond_wait_activation(struct damon_ctx *ctx)
-{
- struct damos *s;
- unsigned long wait_time;
- unsigned long min_wait_time = 0;
-
- while (!kdamond_need_stop(ctx)) {
- damon_for_each_scheme(s, ctx) {
- wait_time = damos_wmark_wait_us(s);
- if (!min_wait_time || wait_time < min_wait_time)
- min_wait_time = wait_time;
- }
- if (!min_wait_time)
- return 0;
-
- kdamond_usleep(min_wait_time);
- }
- return -EBUSY;
-}
-
-/*
- * The monitoring daemon that runs as a kernel thread
- */
-static int kdamond_fn(void *data)
-{
- struct damon_ctx *ctx = (struct damon_ctx *)data;
- struct damon_target *t;
- struct damon_region *r, *next;
- unsigned int max_nr_accesses = 0;
- unsigned long sz_limit = 0;
- bool done = false;
-
- pr_debug("kdamond (%d) starts\n", current->pid);
-
- if (ctx->ops.init)
- ctx->ops.init(ctx);
- if (ctx->callback.before_start && ctx->callback.before_start(ctx))
- done = true;
-
- sz_limit = damon_region_sz_limit(ctx);
-
- last_metric = 0;
- active_interval = ctx->sample_interval / 1000;
- active_cnt = 0;
- nr_reclaim_page = 0;
-
- while (!kdamond_need_stop(ctx) && !done) {
- if (kdamond_wait_activation(ctx))
- continue;
-
- if (ctx->ops.prepare_access_checks)
- ctx->ops.prepare_access_checks(ctx);
- if (ctx->callback.after_sampling &&
- ctx->callback.after_sampling(ctx))
- done = true;
-
- kdamond_usleep(ctx->sample_interval);
-
- if (ctx->ops.check_accesses)
- max_nr_accesses = ctx->ops.check_accesses(ctx);
-
- if (kdamond_aggregate_interval_passed(ctx)) {
- kdamond_merge_regions(ctx,
- max_nr_accesses / 10,
- sz_limit);
- if (ctx->callback.after_aggregation &&
- ctx->callback.after_aggregation(ctx))
- done = true;
- kdamond_apply_schemes(ctx);
- kdamond_reset_aggregated(ctx);
- kdamond_split_regions(ctx);
- if (ctx->ops.reset_aggregated)
- ctx->ops.reset_aggregated(ctx);
- ctx->ops.update(ctx);
- sz_limit = damon_region_sz_limit(ctx);
- }
- }
- damon_for_each_target(t, ctx) {
- damon_for_each_region_safe(r, next, t)
- damon_destroy_region(r, t);
- }
-
- if (ctx->callback.before_terminate)
- ctx->callback.before_terminate(ctx);
- if (ctx->ops.cleanup)
- ctx->ops.cleanup(ctx);
-
- pr_debug("kdamond (%d) finishes\n", current->pid);
- mutex_lock(&ctx->kdamond_lock);
- ctx->kdamond = NULL;
- mutex_unlock(&ctx->kdamond_lock);
-
- mutex_lock(&damon_lock);
- nr_running_ctxs--;
- if (!nr_running_ctxs && running_exclusive_ctxs)
- running_exclusive_ctxs = false;
- mutex_unlock(&damon_lock);
-
- return 0;
-}
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
deleted file mode 100644
index 345e47dcd970..000000000000
--- a/mm/damon/ops-common.c
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common Operations for Data Access Monitoring
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#include <linux/mmu_notifier.h>
-#include <linux/page_idle.h>
-#include <linux/pagemap.h>
-#include <linux/rmap.h>
-
-#include "ops-common.h"
-
-/*
- * Get an online page for a pfn if it's in the LRU list. Otherwise, returns
- * NULL.
- *
- * The body of this function is stolen from the 'page_idle_get_page()'. We
- * steal rather than reuse it because the code is quite simple.
- */
-struct page *damon_get_page(unsigned long pfn)
-{
- struct page *page = pfn_to_online_page(pfn);
-
- if (!page || !PageLRU(page) || !get_page_unless_zero(page))
- return NULL;
-
- if (unlikely(!PageLRU(page))) {
- put_page(page);
- page = NULL;
- }
- return page;
-}
-
-void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
-{
- bool referenced = false;
- struct page *page = damon_get_page(pte_pfn(*pte));
-
- if (!page)
- return;
-
- if (pte_young(*pte)) {
- referenced = true;
- *pte = pte_mkold(*pte);
- }
-
-#ifdef CONFIG_MMU_NOTIFIER
- if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE))
- referenced = true;
-#endif /* CONFIG_MMU_NOTIFIER */
-
- if (referenced)
- set_page_young(page);
-
- set_page_idle(page);
- put_page(page);
-}
-
-void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- bool referenced = false;
- struct page *page = damon_get_page(pmd_pfn(*pmd));
-
- if (!page)
- return;
-
- if (pmd_young(*pmd)) {
- referenced = true;
- *pmd = pmd_mkold(*pmd);
- }
-
-#ifdef CONFIG_MMU_NOTIFIER
- if (mmu_notifier_clear_young(mm, addr,
- addr + ((1UL) << HPAGE_PMD_SHIFT)))
- referenced = true;
-#endif /* CONFIG_MMU_NOTIFIER */
-
- if (referenced)
- set_page_young(page);
-
- set_page_idle(page);
- put_page(page);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-}
-
-#define DAMON_MAX_SUBSCORE (100)
-#define DAMON_MAX_AGE_IN_LOG (32)
-
-int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
- struct damos *s)
-{
- unsigned int max_nr_accesses;
- int freq_subscore;
- unsigned int age_in_sec;
- int age_in_log, age_subscore;
- unsigned int freq_weight = s->quota.weight_nr_accesses;
- unsigned int age_weight = s->quota.weight_age;
- int hotness;
-
- max_nr_accesses = c->aggr_interval / c->sample_interval;
- if (max_nr_accesses == 0)
- max_nr_accesses = 1;
- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
-
- age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000;
- for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
- age_in_log++, age_in_sec >>= 1)
- ;
-
- /* If frequency is 0, higher age means it's colder */
- if (freq_subscore == 0)
- age_in_log *= -1;
-
- /*
- * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG].
- * Scale it to be in [0, 100] and set it as age subscore.
- */
- age_in_log += DAMON_MAX_AGE_IN_LOG;
- age_subscore = age_in_log * DAMON_MAX_SUBSCORE /
- DAMON_MAX_AGE_IN_LOG / 2;
-
- hotness = (freq_weight * freq_subscore + age_weight * age_subscore);
- if (freq_weight + age_weight)
- hotness /= freq_weight + age_weight;
- /*
- * Transform it to fit in [0, DAMOS_MAX_SCORE]
- */
- hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
-
- /* Return coldness of the region */
- return DAMOS_MAX_SCORE - hotness;
-} \ No newline at end of file
diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
deleted file mode 100644
index 8d562105fc10..000000000000
--- a/mm/damon/ops-common.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common Operations for Data Access Monitoring
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#include <linux/damon.h>
-
-struct page *damon_get_page(unsigned long pfn);
-
-void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
-void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
-
-int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
- struct damos *s); \ No newline at end of file
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
deleted file mode 100644
index 1615fbd0b366..000000000000
--- a/mm/damon/paddr.c
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DAMON Operations for The Physical Address Space
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#define pr_fmt(fmt) "damon-pa: " fmt
-
-#include <linux/mmu_notifier.h>
-#include <linux/page_idle.h>
-#include <linux/pagemap.h>
-#include <linux/rmap.h>
-
-#include "../internal.h"
-#include "ops-common.h"
-
-extern unsigned long min_age;
-
-extern unsigned long quota_ms;
-extern unsigned long quota_sz;
-extern unsigned long quota_reset_interval_ms;
-
-extern unsigned long wmarks_interval;
-extern unsigned long wmarks_high;
-extern unsigned long wmarks_mid;
-extern unsigned long wmarks_low;
-
-extern unsigned long sample_interval;
-extern unsigned long aggr_interval;
-extern unsigned long min_nr_regions;
-extern unsigned long max_nr_regions;
-
-extern unsigned long nr_reclaim_page;
-
-static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, void *arg)
-{
- struct page_vma_mapped_walk pvmw = {
- .page = page,
- .vma = vma,
- .address = addr,
- };
-
- while (page_vma_mapped_walk(&pvmw)) {
- addr = pvmw.address;
- if (pvmw.pte)
- damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
- else
- damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
- }
- return true;
-}
-
-static void damon_pa_mkold(unsigned long paddr)
-{
- struct page *page = damon_get_page(PHYS_PFN(paddr));
- struct rmap_walk_control rwc = {
- .rmap_one = __damon_pa_mkold,
- .anon_lock = page_lock_anon_vma_read,
- };
- bool need_lock;
-
- if (!page)
- return;
-
- if (!page_mapped(page) || !page_rmapping(page)) {
- set_page_idle(page);
- goto out;
- }
-
- need_lock = !PageAnon(page) || PageKsm(page);
- if (need_lock && !trylock_page(page))
- goto out;
-
- rmap_walk(page, &rwc);
-
- if (need_lock)
- unlock_page(page);
-
-out:
- put_page(page);
-}
-
-static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
- struct damon_region *r)
-{
- r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
-
- damon_pa_mkold(r->sampling_addr);
-}
-
-static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
-{
- struct damon_target *t;
- struct damon_region *r;
-
- damon_for_each_target(t, ctx) {
- damon_for_each_region(r, t)
- __damon_pa_prepare_access_check(ctx, r);
- }
-}
-
-struct damon_pa_access_chk_result {
- unsigned long page_sz;
- bool accessed;
-};
-
-static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, void *arg)
-{
- struct damon_pa_access_chk_result *result = arg;
- struct page_vma_mapped_walk pvmw = {
- .page = page,
- .vma = vma,
- .address = addr,
- };
-
- result->accessed = false;
- result->page_sz = PAGE_SIZE;
- while (page_vma_mapped_walk(&pvmw)) {
- addr = pvmw.address;
- if (pvmw.pte) {
- result->accessed = pte_young(*pvmw.pte) ||
- !page_is_idle(page) ||
- mmu_notifier_test_young(vma->vm_mm, addr);
- } else {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- result->accessed = pmd_young(*pvmw.pmd) ||
- !page_is_idle(page) ||
- mmu_notifier_test_young(vma->vm_mm, addr);
- result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
-#else
- WARN_ON_ONCE(1);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
- }
- if (result->accessed) {
- page_vma_mapped_walk_done(&pvmw);
- break;
- }
- }
-
- /* If accessed, stop walking */
- return !result->accessed;
-}
-
-static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
-{
- struct page *page = damon_get_page(PHYS_PFN(paddr));
- struct damon_pa_access_chk_result result = {
- .page_sz = PAGE_SIZE,
- .accessed = false,
- };
- struct rmap_walk_control rwc = {
- .arg = &result,
- .rmap_one = __damon_pa_young,
- .anon_lock = page_lock_anon_vma_read,
- };
- bool need_lock;
-
- if (!page)
- return false;
-
- if (!page_mapped(page) || !page_rmapping(page)) {
- if (page_is_idle(page))
- result.accessed = false;
- else
- result.accessed = true;
- put_page(page);
- goto out;
- }
-
- need_lock = !PageAnon(page) || PageKsm(page);
- if (need_lock && !trylock_page(page)) {
- put_page(page);
- return NULL;
- }
-
- rmap_walk(page, &rwc);
-
- if (need_lock)
- unlock_page(page);
- put_page(page);
-
-out:
- *page_sz = result.page_sz;
- return result.accessed;
-}
-
-static void __damon_pa_check_access(struct damon_ctx *ctx,
- struct damon_region *r)
-{
- static unsigned long last_addr;
- static unsigned long last_page_sz = PAGE_SIZE;
- static bool last_accessed;
-
- /* If the region is in the last checked page, reuse the result */
- if (ALIGN_DOWN(last_addr, last_page_sz) ==
- ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
- if (last_accessed)
- r->nr_accesses++;
- return;
- }
-
- last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
- if (last_accessed)
- r->nr_accesses++;
-
- last_addr = r->sampling_addr;
-}
-
-static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
-{
- struct damon_target *t;
- struct damon_region *r;
- unsigned int max_nr_accesses = 0;
-
- damon_for_each_target(t, ctx) {
- damon_for_each_region(r, t) {
- __damon_pa_check_access(ctx, r);
- max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
- }
- }
-
- return max_nr_accesses;
-}
-
-static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- struct damos *scheme)
-{
- unsigned long addr, applied;
- LIST_HEAD(page_list);
-
- if (scheme->action != DAMOS_PAGEOUT)
- return 0;
-
- for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct page *page = damon_get_page(PHYS_PFN(addr));
-
- if (!page)
- continue;
-
- ClearPageReferenced(page);
- test_and_clear_page_young(page);
- if (isolate_lru_page(page)) {
- put_page(page);
- continue;
- }
- if (PageUnevictable(page)) {
- putback_lru_page(page);
- } else {
- list_add(&page->lru, &page_list);
- put_page(page);
- }
- }
- applied = reclaim_pages(&page_list);
- cond_resched();
- printk("[damon_reclaim] reclaimed %lld pages\n", applied);
- nr_reclaim_page = nr_reclaim_page + applied;
- return applied * PAGE_SIZE;
-}
-
-static int damon_pa_scheme_score(struct damon_ctx *context,
- struct damon_target *t, struct damon_region *r,
- struct damos *scheme)
-{
- switch (scheme->action) {
- case DAMOS_PAGEOUT:
- return damon_pageout_score(context, r, scheme);
- default:
- break;
- }
-
- return DAMOS_MAX_SCORE;
-}
-
-static void damon_update_param(struct damon_ctx *ctx)
-{
- struct damos *s;
- damon_for_each_scheme(s, ctx) {
- s->min_age_region = min_age / aggr_interval;
-
- s->quota.ms = quota_ms;
- s->quota.sz = quota_sz;
- s->quota.reset_interval = quota_reset_interval_ms;
-
- s->wmarks.interval = wmarks_interval;
- s->wmarks.high = wmarks_high;
- s->wmarks.mid = wmarks_mid;
- s->wmarks.low = wmarks_low;
- }
- ctx->sample_interval = sample_interval;
- ctx->aggr_interval = aggr_interval;
- ctx->min_nr_regions = min_nr_regions;
- ctx->max_nr_regions = max_nr_regions;
-}
-
-static int __init damon_pa_initcall(void)
-{
- struct damon_operations ops = {
- .id = DAMON_OPS_PADDR,
- .init = NULL,
- .update = damon_update_param,
- .prepare_access_checks = damon_pa_prepare_access_checks,
- .check_accesses = damon_pa_check_accesses,
- .reset_aggregated = NULL,
- .target_valid = NULL,
- .cleanup = NULL,
- .apply_scheme = damon_pa_apply_scheme,
- .get_scheme_score = damon_pa_scheme_score,
- };
-
- return damon_register_ops(&ops);
-};
-
-subsys_initcall(damon_pa_initcall);
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
deleted file mode 100644
index b277f8d1f49e..000000000000
--- a/mm/damon/reclaim.c
+++ /dev/null
@@ -1,452 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DAMON-based page reclamation
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#define pr_fmt(fmt) "damon-reclaim: " fmt
-
-#include <linux/damon.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-
-#include <trace/events/damon.h>
-
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "damon_reclaim."
-
-/*
- * Enable or disable DAMON_RECLAIM.
- *
- * You can enable DAMON_RCLAIM by setting the value of this parameter as ``Y``.
- * Setting it as ``N`` disables DAMON_RECLAIM. Note that DAMON_RECLAIM could
- * do no real monitoring and reclamation due to the watermarks-based activation
- * condition. Refer to below descriptions for the watermarks parameter for
- * this.
- */
-static bool enabled __read_mostly = false;
-
-/*
- * Time threshold for cold memory regions identification in microseconds.
- *
- * If a memory region is not accessed for this or longer time, DAMON_RECLAIM
- * identifies the region as cold, and reclaims. 120 seconds by default.
- */
-unsigned long min_age __read_mostly = 120000000;
-module_param(min_age, ulong, 0664);
-
-/*
- * Limit of time for trying the reclamation in milliseconds.
- *
- * DAMON_RECLAIM tries to use only up to this time within a time window
- * (quota_reset_interval_ms) for trying reclamation of cold pages. This can be
- * used for limiting CPU consumption of DAMON_RECLAIM. If the value is zero,
- * the limit is disabled.
- *
- * 10 ms by default.
- */
-unsigned long quota_ms __read_mostly = 10;
-module_param(quota_ms, ulong, 0664);
-
-/*
- * Limit of size of memory for the reclamation in bytes.
- *
- * DAMON_RECLAIM charges amount of memory which it tried to reclaim within a
- * time window (quota_reset_interval_ms) and makes no more than this limit is
- * tried. This can be used for limiting consumption of CPU and IO. If this
- * value is zero, the limit is disabled.
- *
- * 128 MiB by default.
- */
-unsigned long quota_sz __read_mostly = 128 * 1024 * 1024;
-module_param(quota_sz, ulong, 0664);
-
-/*
- * The time/size quota charge reset interval in milliseconds.
- *
- * The charge reset interval for the quota of time (quota_ms) and size
- * (quota_sz). That is, DAMON_RECLAIM does not try reclamation for more than
- * quota_ms milliseconds or quota_sz bytes within quota_reset_interval_ms
- * milliseconds.
- *
- * 1 second by default.
- */
-unsigned long quota_reset_interval_ms __read_mostly = 1000;
-module_param(quota_reset_interval_ms, ulong, 0664);
-
-/*
- * The watermarks metric
- * 0 = DAMOS_WMARK_NONE // always
- * 1 = DAMOS_WMARK_FREE_MEM_RATE // usually
- * 2 = DAMOS_WMARK_OPLUS // often
- * 3 = DAMOS_WMARK_SLEEP // never
- */
-int wmarks_metric __read_mostly = 2;
-module_param(wmarks_metric, int, 0664);
-
-/*
- * The watermarks check time interval in microseconds.
- *
- * Minimal time to wait before checking the watermarks, when DAMON_RECLAIM is
- * enabled but inactive due to its watermarks rule. 5 seconds by default.
- */
-unsigned long wmarks_interval __read_mostly = 5000000;
-module_param(wmarks_interval, ulong, 0664);
-
-/*
- * Memory rate (per thousand) for the high watermark.
- *
- * If free memory of the system in bytes per thousand bytes is higher than
- * this, DAMON_RECLAIM becomes inactive, so it does nothing but periodically
- * checks the watermarks. 500 (50%) by default.
- */
-unsigned long wmarks_high __read_mostly = 500;
-module_param(wmarks_high, ulong, 0664);
-
-/*
- * Memory rate (per thousand) for the middle watermark.
- *
- * If free memory of the system in bytes per thousand bytes is between this and
- * the low watermark, DAMON_RECLAIM becomes active, so starts the monitoring
- * and the reclaiming. 400 (40%) by default.
- */
-unsigned long wmarks_mid __read_mostly = 400;
-module_param(wmarks_mid, ulong, 0664);
-
-/*
- * Memory rate (per thousand) for the low watermark.
- *
- * If free memory of the system in bytes per thousand bytes is lower than this,
- * DAMON_RECLAIM becomes inactive, so it does nothing but periodically checks
- * the watermarks. In the case, the system falls back to the LRU-based page
- * granularity reclamation logic. 200 (20%) by default.
- */
-unsigned long wmarks_low __read_mostly = 40;
-module_param(wmarks_low, ulong, 0664);
-
-/*
- * Sampling interval for the monitoring in microseconds.
- *
- * The sampling interval of DAMON for the cold memory monitoring. Please refer
- * to the DAMON documentation for more detail. 5 ms by default.
- */
-unsigned long sample_interval __read_mostly = 500000; // 500ms
-module_param(sample_interval, ulong, 0664);
-
-/*
- * Aggregation interval for the monitoring in microseconds.
- *
- * The aggregation interval of DAMON for the cold memory monitoring. Please
- * refer to the DAMON documentation for more detail. 100 ms by default.
- */
-unsigned long aggr_interval __read_mostly = 5000000; // 5 sec
-module_param(aggr_interval, ulong, 0664);
-
-/*
- * Minimum number of monitoring regions.
- *
- * The minimal number of monitoring regions of DAMON for the cold memory
- * monitoring. This can be used to set lower-bound of the monitoring quality.
- * But, setting this too high could result in increased monitoring overhead.
- * Please refer to the DAMON documentation for more detail. 10 by default.
- */
-unsigned long min_nr_regions __read_mostly = 10;
-module_param(min_nr_regions, ulong, 0664);
-
-/*
- * Maximum number of monitoring regions.
- *
- * The maximum number of monitoring regions of DAMON for the cold memory
- * monitoring. This can be used to set upper-bound of the monitoring overhead.
- * However, setting this too low could result in bad monitoring quality.
- * Please refer to the DAMON documentation for more detail. 1000 by default.
- */
-unsigned long max_nr_regions __read_mostly = 1000;
-module_param(max_nr_regions, ulong, 0664);
-
-/*
- * Start of the target memory region in physical address.
- *
- * The start physical address of memory region that DAMON_RECLAIM will do work
- * against. By default, biggest System RAM is used as the region.
- */
-static unsigned long monitor_region_start __read_mostly;
-module_param(monitor_region_start, ulong, 0664);
-
-/*
- * End of the target memory region in physical address.
- *
- * The end physical address of memory region that DAMON_RECLAIM will do work
- * against. By default, biggest System RAM is used as the region.
- */
-static unsigned long monitor_region_end __read_mostly;
-module_param(monitor_region_end, ulong, 0664);
-
-/*
- * PID of the DAMON thread
- *
- * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
- * Else, -1.
- */
-static int kdamond_pid __read_mostly = -1;
-module_param(kdamond_pid, int, 0400);
-
-/*
- * Number of memory regions that tried to be reclaimed.
- */
-static unsigned long nr_reclaim_tried_regions __read_mostly;
-module_param(nr_reclaim_tried_regions, ulong, 0400);
-
-/*
- * Total bytes of memory regions that tried to be reclaimed.
- */
-static unsigned long bytes_reclaim_tried_regions __read_mostly;
-module_param(bytes_reclaim_tried_regions, ulong, 0400);
-
-/*
- * Number of memory regions that successfully be reclaimed.
- */
-static unsigned long nr_reclaimed_regions __read_mostly;
-module_param(nr_reclaimed_regions, ulong, 0400);
-
-/*
- * Total bytes of memory regions that successfully be reclaimed.
- */
-static unsigned long bytes_reclaimed_regions __read_mostly;
-module_param(bytes_reclaimed_regions, ulong, 0400);
-
-/*
- * Number of times that the time/space quota limits have exceeded
- */
-static unsigned long nr_quota_exceeds __read_mostly;
-module_param(nr_quota_exceeds, ulong, 0400);
-
-static struct damon_ctx *ctx;
-static struct damon_target *target;
-
-struct damon_reclaim_ram_walk_arg {
- unsigned long start;
- unsigned long end;
-};
-
-unsigned long nr_reclaim_time __read_mostly;
-module_param(nr_reclaim_time, ulong, 0400);
-
-unsigned long nr_reclaim_page __read_mostly;
-module_param(nr_reclaim_page, ulong, 0400);
-
-unsigned long nr_damon_region __read_mostly;
-module_param(nr_damon_region, ulong, 0400);
-
-static int walk_system_ram(struct resource *res, void *arg)
-{
- struct damon_reclaim_ram_walk_arg *a = arg;
-
- if (a->end - a->start < res->end - res->start) {
- a->start = res->start;
- a->end = res->end;
- }
- return 0;
-}
-
-/*
- * Find biggest 'System RAM' resource and store its start and end address in
- * @start and @end, respectively. If no System RAM is found, returns false.
- */
-static bool get_monitoring_region(unsigned long *start, unsigned long *end)
-{
- struct damon_reclaim_ram_walk_arg arg = {};
-
- walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
- if (arg.end <= arg.start)
- return false;
-
- *start = arg.start;
- *end = arg.end;
- return true;
-}
-
-static struct damos *damon_reclaim_new_scheme(void)
-{
- struct damos_watermarks wmarks = {
- .metric = wmarks_metric,
- .interval = wmarks_interval,
- .high = wmarks_high,
- .mid = wmarks_mid,
- .low = wmarks_low,
- };
- struct damos_quota quota = {
- /*
- * Do not try reclamation for more than quota_ms milliseconds
- * or quota_sz bytes within quota_reset_interval_ms.
- */
- .ms = quota_ms,
- .sz = quota_sz,
- .reset_interval = quota_reset_interval_ms,
- /* Within the quota, page out older regions first. */
- .weight_sz = 0,
- .weight_nr_accesses = 0,
- .weight_age = 1
- };
- struct damos *scheme = damon_new_scheme(
- /* Find regions having PAGE_SIZE or larger size */
- PAGE_SIZE, ULONG_MAX,
- /* and not accessed at all */
- 0, 0,
- /* for min_age or more micro-seconds, and */
- min_age / aggr_interval, UINT_MAX,
- /* page out those, as soon as found */
- DAMOS_PAGEOUT,
- /* under the quota. */
- &quota,
- /* (De)activate this according to the watermarks. */
- &wmarks);
-
- return scheme;
-}
-
-static int damon_reclaim_turn(bool on)
-{
- struct damon_region *region;
- struct damos *scheme;
- int err;
-
- if (!on) {
- err = damon_stop(&ctx, 1);
- if (!err)
- kdamond_pid = -1;
- return err;
- }
-
- err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
- min_nr_regions, max_nr_regions);
- if (err)
- return err;
-
- if (monitor_region_start > monitor_region_end)
- return -EINVAL;
- if (!monitor_region_start && !monitor_region_end &&
- !get_monitoring_region(&monitor_region_start,
- &monitor_region_end))
- return -EINVAL;
- /* DAMON will free this on its own when finish monitoring */
- region = damon_new_region(monitor_region_start, monitor_region_end);
- if (!region)
- return -ENOMEM;
- damon_add_region(region, target);
-
- /* Will be freed by 'damon_set_schemes()' below */
- scheme = damon_reclaim_new_scheme();
- if (!scheme) {
- err = -ENOMEM;
- goto free_region_out;
- }
- err = damon_set_schemes(ctx, &scheme, 1);
- if (err)
- goto free_scheme_out;
-
- err = damon_start(&ctx, 1, true);
- if (!err) {
- kdamond_pid = ctx->kdamond->pid;
- return 0;
- }
-
-free_scheme_out:
- damon_destroy_scheme(scheme);
-free_region_out:
- damon_destroy_region(region, target);
- return err;
-}
-
-#define ENABLE_CHECK_INTERVAL_MS 1000
-static struct delayed_work damon_reclaim_timer;
-static void damon_reclaim_timer_fn(struct work_struct *work)
-{
- static bool last_enabled;
- bool now_enabled;
-
- now_enabled = enabled;
- if (last_enabled != now_enabled) {
- if (!damon_reclaim_turn(now_enabled))
- last_enabled = now_enabled;
- else
- enabled = last_enabled;
- }
-
- if (enabled)
- schedule_delayed_work(&damon_reclaim_timer,
- msecs_to_jiffies(ENABLE_CHECK_INTERVAL_MS));
-}
-static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
-
-static int enabled_store(const char *val,
- const struct kernel_param *kp)
-{
- int rc = param_set_bool(val, kp);
-
- if (rc < 0)
- return rc;
-
- if (enabled)
- schedule_delayed_work(&damon_reclaim_timer, 0);
-
- return 0;
-}
-
-static const struct kernel_param_ops enabled_param_ops = {
- .set = enabled_store,
- .get = param_get_bool,
-};
-
-module_param_cb(enabled, &enabled_param_ops, &enabled, 0664);
-MODULE_PARM_DESC(enabled,
- "Enable or disable DAMON_RECLAIM (default: disabled)");
-
-static int damon_reclaim_after_aggregation(struct damon_ctx *c)
-{
- struct damos *s;
-
- /* update the stats parameter */
- damon_for_each_scheme(s, c) {
- nr_reclaim_tried_regions = s->stat.nr_tried;
- bytes_reclaim_tried_regions = s->stat.sz_tried;
- nr_reclaimed_regions = s->stat.nr_applied;
- bytes_reclaimed_regions = s->stat.sz_applied;
- nr_quota_exceeds = s->stat.qt_exceeds;
- trace_damon_reclaim_statistics(nr_reclaim_tried_regions,
- bytes_reclaim_tried_regions,
- nr_reclaimed_regions,
- bytes_reclaimed_regions,
- nr_quota_exceeds);
- }
- return 0;
-}
-
-static int __init damon_reclaim_init(void)
-{
- ctx = damon_new_ctx();
- if (!ctx)
- return -ENOMEM;
-
- if (damon_select_ops(ctx, DAMON_OPS_PADDR))
- return -EINVAL;
-
- ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
-
- target = damon_new_target();
- if (!target) {
- damon_destroy_ctx(ctx);
- return -ENOMEM;
- }
- damon_add_target(ctx, target);
-
- schedule_delayed_work(&damon_reclaim_timer, 0);
- return 0;
-}
-
-module_init(damon_reclaim_init);